hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0851d8ec021205dc7bb80669be4b6689a85558b0
| 2,253
|
py
|
Python
|
SqrMelon/gl_shaders.py
|
vslotman/sqrmelon
|
3ce3fc475d24b4b716091d3bdd9486c82118e93e
|
[
"MIT"
] | 93
|
2018-04-14T17:29:40.000Z
|
2021-05-09T23:04:51.000Z
|
SqrMelon/gl_shaders.py
|
vslotman/sqrmelon
|
3ce3fc475d24b4b716091d3bdd9486c82118e93e
|
[
"MIT"
] | 24
|
2020-04-17T18:51:02.000Z
|
2021-06-06T15:39:26.000Z
|
SqrMelon/gl_shaders.py
|
thijskruithof/sqrmelon
|
05c846540686055af3559158bf4ed9e905ab1f35
|
[
"MIT"
] | 8
|
2018-04-21T12:56:32.000Z
|
2020-02-05T12:01:20.000Z
|
from OpenGL.GL import *
from OpenGL.GL.shaders import ShaderProgram
def compileProgram(*shaders, **named):
"""Create a new program, attach shaders and validate
shaders -- arbitrary number of shaders to attach to the
generated program.
separable (keyword only) -- set the separable flag to allow
for partial installation of shader into the pipeline (see
glUseProgramStages)
retrievable (keyword only) -- set the retrievable flag to
allow retrieval of the program binary representation, (see
glProgramBinary, glGetProgramBinary)
validate (keyword only) -- if False, suppress automatic
validation against current GL state. In advanced usage
the validation can produce spurious errors. Note: this
function is *not* really intended for advanced usage,
if you're finding yourself specifying this flag you
likely should be using your own shader management code.
This convenience function is *not* standard OpenGL,
but it does wind up being fairly useful for demos
and the like. You may wish to copy it to your code
base to guard against PyOpenGL changes.
Usage:
shader = compileProgram(
compileShader( source, GL_VERTEX_SHADER ),
compileShader( source2, GL_FRAGMENT_SHADER ),
)
glUseProgram( shader )
Note:
If (and only if) validation of the linked program
*passes* then the passed-in shader objects will be
deleted from the GL.
returns ShaderProgram() (GLuint) program reference
raises RuntimeError when a link/validation failure occurs
"""
program = glCreateProgram()
if named.get('separable'):
glProgramParameteri( program, separate_shader_objects.GL_PROGRAM_SEPARABLE, GL_TRUE )
if named.get('retrievable'):
glProgramParameteri( program, get_program_binary.GL_PROGRAM_BINARY_RETRIEVABLE_HINT, GL_TRUE )
for shader in shaders:
glAttachShader(program, shader)
program = ShaderProgram( program )
glLinkProgram(program)
if named.get('validate', True):
program.check_validate()
program.check_linked()
for shader in shaders:
glDeleteShader(shader)
return program
| 42.509434
| 102
| 0.700843
|
05acbb558d2ff7abee7b43b1907e422b788068ea
| 835
|
py
|
Python
|
backend/models/consumer_event.py
|
SINTEF-SE/PySSMic
|
9f1883fb9309bc10946b5954437514d874a8c652
|
[
"MIT"
] | null | null | null |
backend/models/consumer_event.py
|
SINTEF-SE/PySSMic
|
9f1883fb9309bc10946b5954437514d874a8c652
|
[
"MIT"
] | 124
|
2018-09-11T10:44:34.000Z
|
2018-11-15T12:34:58.000Z
|
backend/models/consumer_event.py
|
SINTEF-SE/PySSMic
|
9f1883fb9309bc10946b5954437514d874a8c652
|
[
"MIT"
] | 3
|
2018-11-14T14:59:55.000Z
|
2018-11-16T08:16:31.000Z
|
class ConsumerEvent:
regTime = 0
startTime = 0
latestTime = 0
csvName = ""
houseId = 0
deviceId = 0
id = 0
def __init__(self, reg_time, start_time, latest_time, ids, csv_name):
self.regTime = int(reg_time)
self.startTime = int(start_time)
self.latestTime = int(latest_time)
self.csvName = csv_name
ids_list = list(map(int, ids.replace(
"[", "").replace("]", "").split(":")))
self.houseId = int(ids_list[0])
self.deviceId = int(ids_list[1])
self.id = int(ids_list[2])
def __str__(self):
return "RegTime %r, StartTime %r, latestTime %r, csvName %r, houseId %r, deviceId %r, id %r" %\
(self.regTime, self.startTime, self.latestTime,
self.csvName, self.houseId, self.deviceId, self.id)
| 33.4
| 103
| 0.579641
|
023bdf7d6ebefd2d7c50befd7d732ad266be4972
| 1,346
|
py
|
Python
|
examples/wmbarb.py
|
yang69can/pyngl
|
78a7040ce9de4b7a442b0c3b5faecccab2f01426
|
[
"Apache-2.0"
] | 125
|
2016-11-24T09:04:28.000Z
|
2022-01-22T14:06:56.000Z
|
examples/wmbarb.py
|
yang69can/pyngl
|
78a7040ce9de4b7a442b0c3b5faecccab2f01426
|
[
"Apache-2.0"
] | 52
|
2017-11-08T23:23:02.000Z
|
2022-03-20T03:17:39.000Z
|
examples/wmbarb.py
|
yang69can/pyngl
|
78a7040ce9de4b7a442b0c3b5faecccab2f01426
|
[
"Apache-2.0"
] | 25
|
2017-08-27T10:50:43.000Z
|
2022-01-29T14:56:05.000Z
|
#
# File:
# wmbarb.py
#
# Synopsis:
# Draws four different wind barbs at different positions.
#
# Category:
# Wind barbs.
#
# Author:
# Fred Clare
#
# Date of initial publication:
# March, 2005
#
# Description:
# Draws four different wind barbs at different positions.
# Scales the sizes.
#
# Effects illustrated:
# o Drawing wind barbs.
# o Setting and retrieving wind barb control parameters.
#
# Output:
# A single visualization is produced that draws the wind barbs.
# The retrieved control parameter WBS (for wind barb size) is
# printed to standard output.
#
# Notes:
#
from __future__ import print_function
import Ngl
#
# Draw four wind barbs of the same magnitude, but at different
# locations and in different directions.
#
# Open a workstation.
#
wks_type = "png"
wks = Ngl.open_wks(wks_type, "wmbarb")
#
# Draw wind barbs.
#
x = [0.25, 0.75, 0.75, 0.25] # x,y,u,v can also be numpy arrays.
y = [0.25, 0.25, 0.75, 0.75]
u = [50., -50., -50., 50.0]
v = [50., 50., -50., -50.0]
Ngl.wmsetp("wbs", 0.2) # Scale the size.
Ngl.wmbarb(wks, x, y, u, v) # Draw barbs.
Ngl.frame(wks) # Draw plot.
#
# Retrieve the value of the wbs parameter.
#
size = Ngl.wmgetp("wbs")
print("Current scale factor for wind barb size = {:10.7f}".format((size)))
Ngl.end()
| 21.365079
| 74
| 0.637444
|
76daccb30107bf7843b57c1eb7f99f5bf509d66d
| 830
|
py
|
Python
|
tests/storage/conftest.py
|
gvtulder/vdirsyncer
|
7a92aa20b1910eb5cc3c451a9a2fa62e76c80141
|
[
"BSD-3-Clause"
] | 1
|
2020-11-27T04:17:28.000Z
|
2020-11-27T04:17:28.000Z
|
tests/storage/conftest.py
|
scheibler/vdirsyncer
|
7a92aa20b1910eb5cc3c451a9a2fa62e76c80141
|
[
"BSD-3-Clause"
] | null | null | null |
tests/storage/conftest.py
|
scheibler/vdirsyncer
|
7a92aa20b1910eb5cc3c451a9a2fa62e76c80141
|
[
"BSD-3-Clause"
] | 1
|
2019-09-19T09:51:36.000Z
|
2019-09-19T09:51:36.000Z
|
# -*- coding: utf-8 -*-
import pytest
import uuid
@pytest.fixture
def slow_create_collection(request):
# We need to properly clean up because otherwise we might run into
# storage limits.
to_delete = []
def delete_collections():
for s in to_delete:
s.delete_collection()
request.addfinalizer(delete_collections)
def inner(cls, args, collection):
assert collection.startswith('test')
collection += '-vdirsyncer-ci-' + str(uuid.uuid4())
args['collection'] = collection
args = cls.create_collection(**args)
s = cls(**args)
_clear_collection(s)
assert not list(s.list())
to_delete.append(s)
return args
return inner
def _clear_collection(s):
for href, etag in s.list():
s.delete(href, etag)
| 21.842105
| 70
| 0.625301
|
6ba95a8dff924b7fc03fa4a2d47562fac9c918ed
| 5,976
|
py
|
Python
|
utils/paginations.py
|
BattleWoLFz99/Twitchain
|
6f12aa932488063619482f0ffc1658064151e091
|
[
"MIT"
] | null | null | null |
utils/paginations.py
|
BattleWoLFz99/Twitchain
|
6f12aa932488063619482f0ffc1658064151e091
|
[
"MIT"
] | 1
|
2021-05-27T11:03:53.000Z
|
2021-05-27T11:03:53.000Z
|
utils/paginations.py
|
BattleWoLFz99/Twitchain
|
6f12aa932488063619482f0ffc1658064151e091
|
[
"MIT"
] | null | null | null |
from dateutil import parser
from django.conf import settings
from rest_framework.pagination import BasePagination
from rest_framework.response import Response
from utils.time_constants import MAX_TIMESTAMP
class EndlessPagination(BasePagination):
page_size = 20
def __init__(self):
super(EndlessPagination, self).__init__()
self.has_next_page = False
def to_html(self):
pass
def paginate_ordered_list(self, reverse_ordered_list, request):
if 'created_at__gt' in request.query_params:
# 兼容 iso 格式和 int 格式的时间戳
try:
created_at__gt = parser.isoparse(request.query_params['created_at__gt'])
except ValueError:
created_at__gt = int(request.query_params['created_at__gt'])
objects = []
for obj in reverse_ordered_list:
if obj.created_at > created_at__gt:
objects.append(obj)
else:
break
self.has_next_page = False
return objects
index = 0
if 'created_at__lt' in request.query_params:
# 兼容 iso 格式和 int 格式的时间戳
try:
created_at__lt = parser.isoparse(request.query_params['created_at__lt'])
except ValueError:
created_at__lt = int(request.query_params['created_at__lt'])
for index, obj in enumerate(reverse_ordered_list):
if obj.created_at < created_at__lt:
break
else:
# 没找到任何满足条件的 objects, 返回空数组
# 注意这个 else 对应的是 for,参见 python 的 for else 语法
reverse_ordered_list = []
self.has_next_page = len(reverse_ordered_list) > index + self.page_size
return reverse_ordered_list[index: index + self.page_size]
def paginate_queryset(self, queryset, request, view=None):
if 'created_at__gt' in request.query_params:
# created_at__gt 用于下拉刷新的时候加载最新的内容进来
# 为了简便起见,下拉刷新不做翻页机制,直接加载所有更新的数据
# 因为如果数据很久没有更新的话,不会采用下拉刷新的方式进行更新,而是重新加载最新的数据
created_at__gt = request.query_params['created_at__gt']
queryset = queryset.filter(created_at__gt=created_at__gt)
self.has_next_page = False
return queryset.order_by('-created_at')
if 'created_at__lt' in request.query_params:
# created_at__lt 用于向上滚屏(往下翻页)的时候加载下一页的数据
# 寻找 created_at < created_at__lt 的 objects 里按照 created_at 倒序的前
# page_size + 1 个 objects
# 比如目前的 created_at 列表是 [10, 9, 8, 7 .. 1] 如果 created_at__lt=10
# page_size = 2 则应该返回 [9, 8, 7],多返回一个 object 的原因是为了判断是否
# 还有下一页从而减少一次空加载。
created_at__lt = request.query_params['created_at__lt']
queryset = queryset.filter(created_at__lt=created_at__lt)
queryset = queryset.order_by('-created_at')[:self.page_size + 1]
self.has_next_page = len(queryset) > self.page_size
return queryset[:self.page_size]
def paginate_hbase(self, hb_model, row_key_prefix, request):
if 'created_at__gt' in request.query_params:
# created_at__gt 用于下拉刷新的时候加载最新的内容进来
# 为了简便起见,下拉刷新不做翻页机制,直接加载所有更新的数据
# 因为如果数据很久没有更新的话,不会采用下拉刷新的方式进行更新,而是重新加载最新的数据
created_at__gt = request.query_params['created_at__gt']
start = (*row_key_prefix, created_at__gt)
stop = (*row_key_prefix, MAX_TIMESTAMP)
objects = hb_model.filter(start=start, stop=stop)
if len(objects) and objects[0].created_at == int(created_at__gt):
objects = objects[:0:-1]
else:
objects = objects[::-1]
self.has_next_page = False
return objects
if 'created_at__lt' in request.query_params:
# created_at__lt 用于向上滚屏(往下翻页)的时候加载下一页的数据
# 寻找 timestamp < created_at__lt 的 objects 里按照 timestamp 倒序的前 page_size + 1 个 objects
# 比如目前的 timestamp 列表是 [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] 如果 created_at__lt=5, page_size = 2
# 则应该返回 [4, 3, 2],多返回一个 object 的原因是为了判断是否还有下一页从而减少一次空加载。
# 由于 hbase 只支持 <= 的查询而不支持 <, 因此我们还需要再多取一个 item 保证 < 的 item 有 page_size + 1 个
created_at__lt = request.query_params['created_at__lt']
start = (*row_key_prefix, created_at__lt)
stop = (*row_key_prefix, None)
objects = hb_model.filter(start=start, stop=stop, limit=self.page_size + 2, reverse=True)
if len(objects) and objects[0].created_at == int(created_at__lt):
objects = objects[1:]
if len(objects) > self.page_size:
self.has_next_page = True
objects = objects[:-1]
else:
self.has_next_page = False
return objects
# 没有任何参数,默认加载最新的一页
prefix = (*row_key_prefix, None)
objects = hb_model.filter(prefix=prefix, limit=self.page_size + 1, reverse=True)
if len(objects) > self.page_size:
self.has_next_page = True
objects = objects[:-1]
else:
self.has_next_page = False
return objects
def paginate_cached_list(self, cached_list, request):
paginated_list = self.paginate_ordered_list(cached_list, request)
# 如果是上翻页,paginated_list 里是所有的最新的数据,直接返回
if 'created_at__gt' in request.query_params:
return paginated_list
# 如果还有下一页,说明 cached_list 里的数据还没有取完,也直接返回
if self.has_next_page:
return paginated_list
# 如果 cached_list 的长度不足最大限制,说明 cached_list 里已经是所有数据了
if len(cached_list) < settings.REDIS_LIST_LENGTH_LIMIT:
return paginated_list
# 如果进入这里,说明可能存在在数据库里没有 load 在 cache 里的数据,需要直接去数据库查询
return None
def get_paginated_response(self, data):
return Response({
'has_next_page': self.has_next_page,
'results': data,
})
| 42.992806
| 101
| 0.621988
|
0e2dfc6ea4090182068fcd2a46e977d84abf473d
| 1,490
|
py
|
Python
|
pdfstream/vend/qt_kicker.py
|
st3107/pdfstream
|
6e1829d889e5f5400386513efe993ad0596da8a5
|
[
"BSD-3-Clause"
] | null | null | null |
pdfstream/vend/qt_kicker.py
|
st3107/pdfstream
|
6e1829d889e5f5400386513efe993ad0596da8a5
|
[
"BSD-3-Clause"
] | 34
|
2020-07-08T16:24:52.000Z
|
2020-11-21T17:55:13.000Z
|
pdfstream/vend/qt_kicker.py
|
xpdAcq/PDFstream
|
dcd9a368ab80cfb61c4198b9f06d8c972b2e2538
|
[
"BSD-3-Clause"
] | 5
|
2020-12-02T11:26:06.000Z
|
2022-03-30T00:25:30.000Z
|
import asyncio
import sys
_QT_KICKER_INSTALLED = {}
def install_qt_kicker(loop=None, update_rate=0.03):
"""Install a periodic callback to integrate Qt and asyncio event loops.
If a version of the Qt bindings are not already imported, this function
will do nothing.
It is safe to call this function multiple times.
Parameters
----------
loop : event loop, optional
update_rate : number
Seconds between periodic updates. Default is 0.03.
"""
if loop is None:
loop = asyncio.get_event_loop()
global _QT_KICKER_INSTALLED
if loop in _QT_KICKER_INSTALLED:
return
if not any(p in sys.modules for p in ['PyQt4', 'pyside', 'PyQt5']):
return
import matplotlib.backends.backend_qt5
from matplotlib.backends.backend_qt5 import _create_qApp
from matplotlib._pylab_helpers import Gcf
_create_qApp()
qApp = matplotlib.backends.backend_qt5.qApp
try:
_draw_all = Gcf.draw_all # mpl version >= 1.5
except AttributeError:
# slower, but backward-compatible
def _draw_all():
for f_mgr in Gcf.get_all_fig_managers():
f_mgr.canvas.draw_idle()
def _qt_kicker():
# The RunEngine Event Loop interferes with the qt event loop. Here we
# kick it to keep it going.
_draw_all()
qApp.processEvents()
loop.call_later(update_rate, _qt_kicker)
_QT_KICKER_INSTALLED[loop] = loop.call_soon(_qt_kicker)
| 28.653846
| 77
| 0.671141
|
7c5e8470126deafaa3ffba9bb33d28fceece41ae
| 5,244
|
py
|
Python
|
src/niveristand/_decorators.py
|
robertkarol/niveristand-python
|
dd47ea4024a23c4a978636e34e0ffc8de6e2c47a
|
[
"MIT"
] | null | null | null |
src/niveristand/_decorators.py
|
robertkarol/niveristand-python
|
dd47ea4024a23c4a978636e34e0ffc8de6e2c47a
|
[
"MIT"
] | null | null | null |
src/niveristand/_decorators.py
|
robertkarol/niveristand-python
|
dd47ea4024a23c4a978636e34e0ffc8de6e2c47a
|
[
"MIT"
] | null | null | null |
from functools import wraps
import inspect
from niveristand import _errormessages, errors
from niveristand.clientapi._datatypes import DataType
from niveristand.clientapi._datatypes import rtprimitives
rt_seq_mode_id = '__rtseq_mode__'
def nivs_rt_sequence(func):
from niveristand.library._tasks import get_scheduler, nivs_yield
@wraps(func)
def ret_func(*args, **kwargs):
is_top_level = False
this_task = get_scheduler().try_get_task_for_curr_thread()
if this_task is None:
is_top_level = True
this_task = get_scheduler().create_and_register_task_for_top_level()
get_scheduler().sched()
this_task.wait_for_turn()
try:
if is_top_level:
from niveristand.clientapi import RealTimeSequence
RealTimeSequence(func)
retval = func(*args, **kwargs)
except errors.SequenceError:
# generate error already saved this error in the task, so we can just pass.
pass
finally:
if is_top_level:
this_task.mark_stopped()
this_task.iteration_counter.finished = True
nivs_yield()
if this_task.error and this_task.error.should_raise:
raise errors.RunError.RunErrorFactory(this_task.error)
return retval
_set_rtseq_attrs(func, ret_func)
return ret_func
class NivsParam:
"""
Describes a parameter passed down to a function.
Args:
param_name(str): Name of the parameter as it is found in the function definition.
default_elem: Default value and type. Refer to :ref:`api_datatypes_page` for valid values.
by_value(bool): Specifies whether to pass a parameter by value or by reference. Set to True to pass by value.
Set to False to pass by reference.
Refer to :any:`NivsParam.BY_REF` or :any:`NivsParam.BY_VALUE` for details.
"""
BY_REF = False #: Passes a parameter by reference. Allows the called function to modify the value.
BY_VALUE = True #: Passes a parameter by value. Creates a copy of the caller's value for use inside the function.
def __init__(self, param_name, default_elem, by_value):
self.param_name = param_name
self.default_elem = default_elem
self.by_value = by_value
def __call__(self, func):
@wraps(func)
def ret_func(*args, **kwargs):
args = _reconstruct_args(func, args, self)
return func(*args, **kwargs)
_set_rtseq_attrs(func, ret_func)
return ret_func
def _set_rtseq_attrs(func, ret_func):
wrapped = getattr(func, rt_seq_mode_id, None)
if wrapped is None:
wrapped = func
setattr(func, rt_seq_mode_id, wrapped)
setattr(ret_func, rt_seq_mode_id, wrapped)
def _reconstruct_args(f, args, new_param):
real_func = getattr(f, rt_seq_mode_id, f)
new_args = list(args)
arg_spec = inspect.getargspec(real_func)[0]
if new_param is not None:
if new_param.param_name in arg_spec:
idx = arg_spec.index(new_param.param_name)
datatype_name = new_param.default_elem.__class__.__name__
datatype = rtprimitives.get_class_by_name(datatype_name)
if new_param.by_value:
if isinstance(args[idx], DataType):
value = args[idx].value
else:
value = args[idx]
new_args[idx] = datatype(value)
else:
if not isinstance(args[idx], DataType):
value = args[idx]
new_args[idx] = datatype(value)
else:
raise errors.VeristandError(_errormessages.param_description_no_param)
return tuple(new_args)
def task(mt):
"""
Marks a nested function-definition as a task inside a :func:`niveristand.library.multitask`.
Args:
mt: the parent :func:`niveristand.library.multitask`
Use this function as a decorator.
Refer to :func:`niveristand.library.multitask` for more details on using tasks.
"""
def _add_task_to_list(func):
from niveristand.library._tasks import nivs_yield
@wraps(func)
def _internal_task(task_info):
# all tasks start waiting for their turn from the scheduler.
task_info.wait_for_turn()
try:
return func()
except (errors._StopTaskException, errors.SequenceError):
pass
finally:
# if the task was stopped or it finished execution mark it stopped, then yield.
# It won't get scheduled again, and the thread will be marked finished.
task_info.mark_stopped()
nivs_yield()
mt.add_func(_internal_task)
# return the original function, since we already added the wrapped one to the mt.
# this allows the user to call it normally if they choose outside an mt context.
return func
return _add_task_to_list
_VALID_DECORATORS = {
nivs_rt_sequence.__name__: nivs_rt_sequence,
NivsParam.__name__: NivsParam,
task.__name__: task,
}
| 35.432432
| 118
| 0.640351
|
686cfbcd990064cb3c3172a481093713183c09b4
| 15,108
|
py
|
Python
|
chunky3d/vtk_utils.py
|
dslawinski-fp/chunky3d
|
55ae4752f802b2aaf925a03a1048bf7dc292e809
|
[
"MIT"
] | 7
|
2020-02-13T15:40:49.000Z
|
2021-11-16T13:42:36.000Z
|
chunky3d/vtk_utils.py
|
dslawinski-fp/chunky3d
|
55ae4752f802b2aaf925a03a1048bf7dc292e809
|
[
"MIT"
] | 18
|
2020-02-18T11:30:32.000Z
|
2022-03-30T07:17:14.000Z
|
chunky3d/vtk_utils.py
|
dslawinski-fp/chunky3d
|
55ae4752f802b2aaf925a03a1048bf7dc292e809
|
[
"MIT"
] | 6
|
2020-02-13T21:23:16.000Z
|
2022-03-30T14:07:01.000Z
|
import logging
import os
import warnings
import vtk
from vtk.util import numpy_support
import numpy as np
logging.debug(("VTK version:", vtk.VTK_VERSION))
# region VTK I/O
# Great explanation of VTK file and data formats are in
# "VTK Textbook" and "VTK User Guide" chapter 12: "Readers" and 19: "VTK File Formats".
def read_vtk(path, return_reader=False):
path = os.path.abspath(path)
logging.debug('Reading: "{}".'.format(path))
_, file_extension = os.path.splitext(path)
if file_extension.endswith(".vti"):
reader = vtk.vtkXMLImageDataReader()
reader.SetFileName(path)
elif file_extension.endswith(".stl"):
reader = vtk.vtkSTLReader()
reader.SetFileName(path)
elif file_extension.endswith(".vtp"):
reader = vtk.vtkXMLPolyDataReader()
reader.SetFileName(path)
elif file_extension.endswith(".vtu"):
reader = vtk.vtkXMLUnstructuredGridReader()
reader.SetFileName(path)
elif file_extension.endswith(".mhd"):
reader = vtk.vtkMetaImageReader()
reader.SetFileName(path)
elif file_extension.endswith(".vtk"):
reader = vtk.vtkUnstructuredGridReader()
reader.SetFileName(path)
reader.ReadAllVectorsOn()
reader.ReadAllScalarsOn()
else:
raise Exception("Unsupported file extension.")
reader.Update()
if not return_reader:
return reader.GetOutput()
return reader.GetOutput(), reader
def save_vti(vti, path):
_save(vti, path, vtk.vtkXMLImageDataWriter)
def save_vtp(vtp, path):
_save(vtp, path, vtk.vtkXMLPolyDataWriter)
def save_vtu(vtu, path):
_save(vtu, path, vtk.vtkXMLUnstructuredGridWriter)
def save_stl(vtp, path):
_save(vtp, path, vtk.vtkSTLWriter)
def _save(input_data, path, writer_type):
path = os.path.abspath(path)
logging.debug('Saving: "{}".'.format(path))
writer = writer_type()
writer.SetFileName(path)
writer.SetInputData(input_data)
writer.Write()
# endregion
# region Conversions
def add_np_to_vti(vti, arr_np, arr_name, arr_type=None):
with warnings.catch_warnings():
# FutureWarning:
# Conversion of the second argument of issubdtype from `complex` to `np.complexfloating` is deprecated.
# In future, it will be treated as `np.complex128 == np.dtype(complex).type`.
warnings.simplefilter("ignore")
arr = numpy_support.numpy_to_vtk(
num_array=arr_np.ravel(), deep=True, array_type=arr_type
)
arr.SetName(arr_name)
vti.GetPointData().AddArray(arr)
def vti_to_np(vti, array, components=1):
x, y, z = vti.GetDimensions()
arr_np = numpy_support.vtk_to_numpy(vti.GetPointData().GetArray(array))
if components == 1:
return arr_np.reshape(z, y, x)
else:
return arr_np.reshape(z, y, x, components)
def vtp_to_np(vtp, arrays):
if vtp.GetPolys().GetMaxCellSize() > 3:
cut_triangles = vtk.vtkTriangleFilter()
cut_triangles.SetInputData(vtp)
cut_triangles.Update()
vtp = cut_triangles.GetOutput()
result = [
(
"vertices",
numpy_support.vtk_to_numpy(vtp.GetPoints().GetData()).astype(np.float32),
),
(
"triangles",
numpy_support.vtk_to_numpy(vtp.GetPolys().GetData()).reshape(-1, 4)[:, 1:4],
),
]
result.extend(
[
(arr, numpy_support.vtk_to_numpy(vtp.GetPointData().GetArray(arr)))
for arr in arrays
]
)
return dict(result)
def vti_to_string(vti):
writer = vtk.vtkXMLImageDataWriter()
writer.SetInputData(vti)
writer.WriteToOutputStringOn() # must be before Update call
writer.Update()
return writer.GetOutputString()
def vtp_to_string(vtp):
polydata_writer = vtk.vtkPolyDataWriter()
polydata_writer.WriteToOutputStringOn()
polydata_writer.SetInputData(vtp)
polydata_writer.Update()
return polydata_writer.GetOutputString()
def string_to_vtp(s):
reader = vtk.vtkPolyDataReader()
reader.ReadFromInputStringOn()
reader.SetInputString(s)
reader.Update()
return reader.GetOutput()
def sailfish_vti_to_npy(vti_file, verbose=False, rho_name="rho", v_name="v"):
"""
Read vti files from sailfish and return
gets only rho and v
"""
vti, reader = read_vtk(vti_file, return_reader=True)
# info = reader.GetInformation()
field_names = [
reader.GetPointArrayName(i) for i in range(reader.GetNumberOfPointArrays())
]
logging.getLogger(vti_file).debug(("fields:", field_names))
assert rho_name in field_names and v_name in field_names
data_rho = vti_to_np(vti, rho_name)
data_v = vti_to_np(vti, v_name, components=3)
data_v = np.rollaxis(data_v, 3, 0)
# alt. data = data.transpose(2,1,0)
return data_rho, data_v
# endregion
# region SimpleITK interop.
def read_sitk(path):
import SimpleITK as sitk
path = os.path.abspath(path)
logging.debug('Reading "{}".'.format(path))
return sitk.ReadImage(path)
def save_sitk(img_sitk, path):
import SimpleITK as sitk
path = os.path.abspath(path)
logging.debug('Saving "{}".'.format(path))
sitk.WriteImage(img_sitk, path, False)
def save_sitk_as_vti(img_sitk, path, array_name="data"):
vti = sitk_to_vti(img_sitk, array_name)
save_vti(vti, path)
def sitk_to_vti(img, array_name, array_type=None):
import SimpleITK as sitk
vti = vtk.vtkImageData()
vti.SetSpacing(img.GetSpacing())
vti.SetOrigin(img.GetOrigin())
vti.SetDimensions(img.GetSize())
voxels = sitk.GetArrayFromImage(img)
arr = numpy_support.numpy_to_vtk(
num_array=voxels.ravel(), deep=True, array_type=array_type
)
arr.SetName(array_name)
vti.GetPointData().SetScalars(arr)
return vti
def vti_to_sitk(vti, array):
import SimpleITK as sitk
arr_np = vti_to_np(vti, array)
arr_sitk = sitk.GetImageFromArray(arr_np)
arr_sitk.SetOrigin(vti.GetOrigin())
arr_sitk.SetSpacing(vti.GetSpacing())
return arr_sitk
# endregion
# region Some strange functions
def probe_vt(vtX_file, point_data, fname=None, shape2d=None, verbose=False):
"""get values interpolated from vtu mesh on the set of points (N,3)"""
vtX = read_vtk(vtX_file)
points = vtk.vtkPoints()
for point in point_data:
points.InsertNextPoint(*point)
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
probe = vtk.vtkProbeFilter()
probe.SetSourceData(vtX)
probe.SetInputData(polydata)
probe.Update()
out = probe.GetOutput()
# out.GetBounds()
# to get points: numpy_support.vtk_to_numpy(out.GetPoints().GetData()).shape
pd = out.GetAttributesAsFieldData(0)
log = logging.getLogger(vtX_file)
if fname is None:
# all fields
output = dict()
for i in range(pd.GetNumberOfArrays()):
v_interp_on_grid = numpy_support.vtk_to_numpy(pd.GetArray(i))
if shape2d:
v_interp_on_grid = v_interp_on_grid.reshape(shape2d)
log.debug(("appending in output:", pd.GetArrayName(i)))
output[pd.GetArrayName(i)] = v_interp_on_grid
assert len(output) > 0
return output
else:
field_numbers = [
i for i in range(pd.GetNumberOfArrays()) if pd.GetArrayName(i) == fname
]
assert len(field_numbers) == 1
log.debug(("output:", pd.GetArrayName(field_numbers[0])))
v_interp_on_grid = numpy_support.vtk_to_numpy(pd.GetArray(field_numbers[0]))
if shape2d:
return v_interp_on_grid.reshape(shape2d)
return v_interp_on_grid
def probe_vtu(
vtu_file="output.vtu", point_data=[[0.050640, 0.027959, 0.05213]], *args, **kwargs
):
"""get values interpolated from vtu mesh on the set of points (N,3)"""
probe_vt(vtu_file, *args, **kwargs)
def probe_vti(
vti_file="output.vti", point_data=[[0.050640, 0.027959, 0.05213]], *args, **kwargs
):
"""get values of interpolated from vti file mesh on the set of points (N,3)"""
probe_vt(vti_file, *args, **kwargs)
def mod_mesh(du, stlfile="c0006.stl", output_fn="surface.vtp", write=False, sign=1):
"""
Move stl in normal direction
"""
if type(stlfile) is str:
stl = read_vtk(stlfile)
else:
stl = stlfile
vertices = numpy_support.vtk_to_numpy(stl.GetPoints().GetData())
# indices = numpy_support.vtk_to_numpy(stl.GetPolys().GetData()).reshape(-1, 4)[:, 1:4]
merged = vtk.vtkPolyData()
merged.DeepCopy(stl)
# Compute normals to vertices
normalGenerator = vtk.vtkPolyDataNormals()
normalGenerator.SetInputData(merged)
normalGenerator.ComputePointNormalsOn()
normalGenerator.ComputeCellNormalsOff()
normalGenerator.SetSplitting(0)
normalGenerator.SetConsistency(0)
normalGenerator.Update()
merged = normalGenerator.GetOutput()
normals = numpy_support.vtk_to_numpy(merged.GetPointData().GetNormals())
points = vtk.vtkPoints()
for normal, pos in zip(normals, vertices):
points.InsertNextPoint(pos + normal * (-sign * du))
merged.SetPoints(points)
return merged
def xyz_at_dx(du=0.001, stlfile="c0006.stl", sign=1):
"""
Returns equidistant points from stl in normal direction
"""
if type(stlfile) is str:
stl = read_vtk(stlfile)
else:
stl = stlfile
# stl = stlImageActor(stlfile)
vertices = numpy_support.vtk_to_numpy(stl.GetPoints().GetData())
indices = numpy_support.vtk_to_numpy(stl.GetPolys().GetData()).reshape(-1, 4)[
:, 1:4
]
merged = vtk.vtkPolyData()
merged.DeepCopy(stl)
# Compute normals to vertices
normalGenerator = vtk.vtkPolyDataNormals()
normalGenerator.SetInputData(merged)
normalGenerator.ComputePointNormalsOn()
normalGenerator.ComputeCellNormalsOff()
normalGenerator.SetSplitting(0)
normalGenerator.SetConsistency(0)
normalGenerator.Update()
merged = normalGenerator.GetOutput()
normals = numpy_support.vtk_to_numpy(merged.GetPointData().GetNormals())
points = []
for normal, pos in zip(normals, vertices):
points.append(pos + normal * (-sign * du))
return np.array(points)
def probe_at_dx(
du=0.001,
velocity_file=None,
stlfile="c0006.stl",
output_fn="surface.vtp",
velocity_name="v [m/s]",
write=False,
mu=1.0,
move_mesh=False,
sign=1,
):
"""
Equidistant points from stl in normal direction
"""
if type(stlfile) is str:
stl = read_vtk(stlfile)
else:
stl = stlfile
vertices = numpy_support.vtk_to_numpy(stl.GetPoints().GetData())
# indices = numpy_support.vtk_to_numpy(stl.GetPolys().GetData()).reshape(-1, 4)[:, 1:4]
merged = vtk.vtkPolyData()
merged.DeepCopy(stl)
vel_data = read_vtk(velocity_file)
# Compute normals to vertices
normalGenerator = vtk.vtkPolyDataNormals()
normalGenerator.SetInputData(merged)
normalGenerator.ComputePointNormalsOn()
normalGenerator.ComputeCellNormalsOff()
normalGenerator.SetSplitting(0)
normalGenerator.SetConsistency(0)
normalGenerator.Update()
merged = normalGenerator.GetOutput()
normals = numpy_support.vtk_to_numpy(merged.GetPointData().GetNormals())
points = vtk.vtkPoints()
pointsPolyData = vtk.vtkPolyData()
for normal, pos in zip(normals, vertices):
points.InsertNextPoint(pos + normal * (-sign * du))
pointsPolyData.SetPoints(points)
probe_filter = vtk.vtkProbeFilter()
probe_filter.SetInputData(pointsPolyData)
probe_filter.SetSourceData(vel_data)
probe_filter.GetOutputPort()
probe_filter.Update()
probed_data = probe_filter.GetOutput().GetPointData()
if isinstance(velocity_name, str):
v_vec = numpy_support.vtk_to_numpy(probed_data.GetArray(velocity_name))
elif isinstance(velocity_name, list):
vx = numpy_support.vtk_to_numpy(probed_data.GetArray(velocity_name[0]))
vy = numpy_support.vtk_to_numpy(probed_data.GetArray(velocity_name[1]))
vz = numpy_support.vtk_to_numpy(probed_data.GetArray(velocity_name[2]))
v_vec = np.stack((vx, vy, vz), 1).reshape((-1, 3))
else:
raise NotImplementedError
logging.debug(v_vec.shape)
velocity = vtk.vtkFloatArray()
velocity.SetNumberOfComponents(1)
velocity.SetName("X_at_epsilon")
for v in v_vec:
# if np.max(v) > 1e33 or np.min(v) < -1e33:
# s = np.array([0.00])
velocity.InsertNextTypedTuple([v])
merged.GetPointData().AddArray(velocity)
if move_mesh:
merged.SetPoints(points)
logging.debug("moving point by dx inside")
merged.Modified()
if write:
save_vtp(merged, output_fn)
return merged
def scale_and_trans(
vtk_data=None,
output=None,
scale=1000.0,
deltaxyz=[-14.52326308, 180.637182, 161.81502267],
):
"""
Performs scaling (e.g from meters to mm) and translation of the dataset.
Note that `vtk_data` is reader.GetOutput()
"""
transform = vtk.vtkTransform()
transform.Scale(scale, scale, scale)
transform.Translate(*deltaxyz)
transformFilter = vtk.vtkTransformFilter()
transformFilter.SetTransform(transform)
transformFilter.SetInputData(vtk_data)
transformFilter.Update()
if output is None:
return transformFilter.GetOutput()
else:
save_vtu(transformFilter.GetOutput(), output)
def vtp_from_verts_faces(verts, faces):
"""
Make vtp polydata object from vertices and indices/faces
"""
poly = vtk.vtkPolyData()
Points = vtk.vtkPoints()
Points.SetData(numpy_support.numpy_to_vtk(verts.astype(np.float32)))
vtk_id_array = numpy_support.numpy_to_vtk(
np.pad(
faces.astype(np.int64), [(0, 0), (1, 0)], mode="constant", constant_values=3
).flatten(),
array_type=vtk.VTK_ID_TYPE,
)
vtk_cells = vtk.vtkCellArray()
vtk_cells.SetCells(faces.shape[0], vtk_id_array)
poly.SetPoints(Points)
poly.SetPolys(vtk_cells)
return poly
from scipy.ndimage.filters import gaussian_filter
from skimage.measure import marching_cubes
def marching_cubes_with_smooth(mask, sigma=1.2, level=0.5):
"""
Compute isosurface around mask, converts also boundaing box.
Paramteters
-----------
mask : Sparse mask
sigma : blur radius (in lattice units)
level : isolevel after gaussian blurring (e.g. 0.5 for binary mask)
Returns
-------
verts, faces : mesh vertices and triangles.
"""
from .chunky import Sparse
origin, spacing = mask.origin, mask.spacing
mask_s = Sparse.empty_like(mask, dtype=np.float32)
mask_s.copy_from(mask)
mask_s.run(lambda d, _: (gaussian_filter(d, sigma), None), envelope=(5, 5, 5))
verts, faces, normals, values = marching_cubes(mask_s[:, :, :], 0.5)
verts = np.array(origin, dtype=np.float32) + (
verts[:, ::-1].astype(np.float32) * spacing
)
return verts, faces
# endregion
| 27.619744
| 111
| 0.672756
|
66eafc31642f6e2f6be3fa027b4861c46aea96a8
| 5,684
|
py
|
Python
|
src/CCLM_OUTS.py
|
bijanfallah/OI_CCLM
|
831efc1092c70fb75048c467f2f3157e8e15ecb7
|
[
"MIT"
] | null | null | null |
src/CCLM_OUTS.py
|
bijanfallah/OI_CCLM
|
831efc1092c70fb75048c467f2f3157e8e15ecb7
|
[
"MIT"
] | null | null | null |
src/CCLM_OUTS.py
|
bijanfallah/OI_CCLM
|
831efc1092c70fb75048c467f2f3157e8e15ecb7
|
[
"MIT"
] | null | null | null |
from __future__ import division
__author__ = 'Bijan'
'''
This is a function to plot CCLM outputs.
'''
from netCDF4 import Dataset as NetCDFFile
import numpy as np
import matplotlib.pyplot as plt
#from matplotlib.backends.backend_pdf import PdfPages
import os
import cartopy.crs as ccrs
import cartopy.feature
def rand_station_locations(N=50,sed=777):
import requests
import random
import re
#data = requests.get(
#"http://www.ecad.eu/download/ensembles/data/ensembles_all_stations_v13.1.txt") # read only 20 000 chars
# "http://www.ecad.eu/download/ensembles/data/Version14.0/ensembles_tg_stations_v14.0.txt") # new version!
data = open("/scratch/users/fallah/OI_EOBS/OI_CCLM/ensembles_tg_stations_v14.0.txt")
Data = []
pattern = re.compile(r"[^-\d]*([\-]{0,1}\d+\.\d+)[^-\d]*")
results = []
for line in data:
line = line.decode().split("|")
for i in line:
match = pattern.match(i)
if match:
results.append(match.groups()[0])
pairs = []
i = 0
end = len(results)
while i < end - 1:
pairs.append((results[i], results[i + 1]))
i += 1
# # Choose N random stations
random.seed(sed)
rand_obs_number = random.sample(range(0, 8900), 7000)
k = 0
lat={}
lon={}
for i in rand_obs_number:
if 33 < float(pairs[i][0]) < 65 and -10 < float(pairs[i][1]) < 41: #TODO: make the limits smarter
lat[k]= float(pairs[i][0])
lon[k] = float(pairs[i][1])
k = k + 1
return(list(lat.values())[0:N],list(lon.values())[0:N])
if not os.path.exists('TEMP'):
os.makedirs('TEMP')
os.chdir('TEMP')
def Plot_CCLM(dir_mistral='/scratch/b/b324045/cclm-sp_2.1/data/ext/',name='europe_0440.nc',bcolor='red',var='HSURF',flag='TRUE'
,color_map='TRUE', alph=1, grids='TRUE', grids_color='red', rand_obs='FALSE', NN=500):
# type: (object, object, object, object, object, object, object, object, object) -> object
# type: (object, object, object, object, object, object) -> object
#CMD = 'scp $mistral:'+ dir_mistral+ name+' ./' # here I have commented as I copied the files on local
CMD = 'wget users.met.fu-berlin.de/~BijanFallah/'+ dir_mistral+ name
os.system(CMD)
nc = NetCDFFile(name)
os.remove(name)
lats = nc.variables['lat'][:]
lons = nc.variables['lon'][:]
rlats = nc.variables['rlat'][:] # extract/copy the data
rlons = nc.variables['rlon'][:]
t = nc.variables[var][:].squeeze()
nc.close()
fig = plt.figure('1')
fig.set_size_inches(14, 10)
#rp = ccrs.RotatedPole(pole_longitude=-162.0,
# pole_latitude=39.25,
# globe=ccrs.Globe(semimajor_axis=6370000,
# semiminor_axis=6370000))
rp = ccrs.RotatedPole(pole_longitude=-165.0,
pole_latitude=46.0,
globe=ccrs.Globe(semimajor_axis=6370000,
semiminor_axis=6370000))
pc = ccrs.PlateCarree()
ax = plt.axes(projection=rp)
ax.coastlines('50m', linewidth=0.8)
ax.add_feature(cartopy.feature.LAKES,
edgecolor='none', facecolor='lightblue',
linewidth=0.8)
t[t < 0] = 0
if flag=='TRUE':
v = np.linspace(0, 3000, 11, endpoint=True)
cs = plt.contourf(lons, lats, t, v, transform=ccrs.PlateCarree(), cmap=plt.cm.terrain)
if color_map=='TRUE':
cb = plt.colorbar(cs)
cb.set_label('topography [m]', fontsize=20)
cb.ax.tick_params(labelsize=20)
ax.add_feature(cartopy.feature.OCEAN,
edgecolor='black', facecolor='lightblue',
linewidth=0.8)
ax.gridlines()
ax.text(-45.14, 15.24, r'$45\degree N$',
fontsize=15)
ax.text(-45.14, 35.73, r'$60\degree N$',
fontsize=15)
ax.text(-45.14, -3.73, r'$30\degree N$',
fontsize=15)
ax.text(-45.14, -20.73, r'$15\degree N$',
fontsize=15)
ax.text(-19.83, -35.69, r'$0\degree $',
fontsize=15)
ax.text(15.106, -35.69, r'$20\degree E$',
fontsize=15)
#ax.text(26, -29.69, r'$40\degree E$',
# fontsize=15)
if grids=='TRUE':
rlonss, rlatss = np.meshgrid(rlons,rlats)
plt.scatter(rlonss, rlatss, marker='.', c=grids_color, s=2, alpha=.4)
if rand_obs=='TRUE':
s,t = rand_station_locations(NN, sed=777)
#tt,ss=np.meshgrid(t.values(),s.values())
from rotgrid import Rotgrid
mapping = Rotgrid(-165.0,46.0,0,0)
#TT=t.values()
#SS=s.values()
TT=t
SS=s
for i in range(0,NN):#just for sed -i
(TT[i], SS[i]) = mapping.transform(TT[i], SS[i])
plt.scatter(TT[i], SS[i], marker='+', c=grids_color, s=10, zorder=10)
# print(TT[i],SS[i])
plt.hlines(y=min(rlats), xmin=min(rlons), xmax=max(rlons), color=bcolor,linestyles= 'solid', linewidth=2, alpha=alph)
plt.hlines(y=max(rlats), xmin=min(rlons), xmax=max(rlons), color=bcolor,linestyles= 'solid', linewidth=2, alpha=alph)
plt.vlines(x=min(rlons), ymin=min(rlats), ymax=max(rlats), color=bcolor,linestyles= 'solid', linewidth=2, alpha=alph)
plt.vlines(x=max(rlons), ymin=min(rlats), ymax=max(rlats), color=bcolor,linestyles= 'solid', linewidth=2, alpha=alph)
xs, ys, zs = rp.transform_points(pc,
np.array([-17, 105.0]),
np.array([3, 60])).T
ax.set_xlim(xs)
ax.set_ylim(ys)
#os.chdir('../')
| 37.150327
| 127
| 0.57354
|
70e295b836c2e36852f53497506b46a1a6dd3900
| 152
|
py
|
Python
|
Code/TDATA2RDFANDV/converter/Functions/skip_addRows.py
|
oeg-upm/bimerr-epw
|
32f706378fc1953d74e820df498506dab92bbcd7
|
[
"Apache-2.0"
] | null | null | null |
Code/TDATA2RDFANDV/converter/Functions/skip_addRows.py
|
oeg-upm/bimerr-epw
|
32f706378fc1953d74e820df498506dab92bbcd7
|
[
"Apache-2.0"
] | null | null | null |
Code/TDATA2RDFANDV/converter/Functions/skip_addRows.py
|
oeg-upm/bimerr-epw
|
32f706378fc1953d74e820df498506dab92bbcd7
|
[
"Apache-2.0"
] | null | null | null |
def skipRows_addHeader(numberRowstoSkip,contents,headers):
contents = contents[numberRowstoSkip:]
contents.insert(0,headers)
return contents
| 38
| 58
| 0.789474
|
e0fe48e352f33e2543117a85c367b330f2484dca
| 1,135
|
py
|
Python
|
nemo_text_processing/text_normalization/__init__.py
|
jinsongpan/NeMo
|
27f5f2dc6ecf7e0fd4225eedb2500cee6284e7d7
|
[
"Apache-2.0"
] | null | null | null |
nemo_text_processing/text_normalization/__init__.py
|
jinsongpan/NeMo
|
27f5f2dc6ecf7e0fd4225eedb2500cee6284e7d7
|
[
"Apache-2.0"
] | null | null | null |
nemo_text_processing/text_normalization/__init__.py
|
jinsongpan/NeMo
|
27f5f2dc6ecf7e0fd4225eedb2500cee6284e7d7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.text_normalization.verbalizers.verbalize_final import VerbalizeFinalFst
from nemo.utils import logging
try:
import pynini
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
logging.warning(
"`pynini` is not installed ! \n"
"Please run the `nemo_text_processing/setup.sh` script"
"prior to usage of this toolkit."
)
PYNINI_AVAILABLE = False
| 35.46875
| 97
| 0.762115
|
26a5ab4092b2585292625836464de2f3d112e896
| 918
|
py
|
Python
|
python/code_challenges/quick_sort/quick-sort.py
|
Edward-Regalado/data-structures-and-algorithms
|
64dde17da5946fafc82b1bf7e83fc212e6b2e4cf
|
[
"MIT"
] | null | null | null |
python/code_challenges/quick_sort/quick-sort.py
|
Edward-Regalado/data-structures-and-algorithms
|
64dde17da5946fafc82b1bf7e83fc212e6b2e4cf
|
[
"MIT"
] | 20
|
2021-04-02T20:02:34.000Z
|
2022-03-07T10:31:27.000Z
|
python/code_challenges/quick_sort/quick-sort.py
|
Edward-Regalado/data-structures-and-algorithms
|
64dde17da5946fafc82b1bf7e83fc212e6b2e4cf
|
[
"MIT"
] | 1
|
2022-01-25T03:19:12.000Z
|
2022-01-25T03:19:12.000Z
|
def partition(arr, low, high):
i = (low -1)
pivot = arr[high]
for j in range(low, high):
# if curr element is smaller than or equal to pivot
if arr[j] <= pivot:
# increment index of smaller element
i += 1
arr[i], arr[j] = arr[j], arr[i]
arr[i + 1], arr[high] = arr[high], arr[i + 1]
return (i + 1)
def quickSort(arr, low, high):
if len(arr) == 1:
return arr
if low < high:
# pi is partitioning index, arr[p] is now at the rigth place
pi = partition(arr, low, high)
# separately sort elements before partition and after partition
quickSort(arr, low, pi-1)
quickSort(arr, pi+1, high)
# Driver Code
if __name__ == "__main__":
arr = [8,4,23,42,16,15]
n = len(arr)
quickSort(arr, 0, n - 1)
print("sorted array is: ")
for i in range(n):
print("%d" % arr[i])
| 23.538462
| 71
| 0.541394
|
e56a8b21b95b3e332b5072efbd6ca2fcc959a19d
| 7,570
|
py
|
Python
|
cloudmersive_validate_api_client/models/validate_postal_code_response.py
|
Cloudmersive/Cloudmersive.APIClient.Python.Validate
|
894a3f578c3860db41b3eed179dcc52e02f565a0
|
[
"Apache-2.0"
] | 3
|
2018-06-23T21:37:21.000Z
|
2020-04-20T23:07:36.000Z
|
cloudmersive_validate_api_client/models/validate_postal_code_response.py
|
Cloudmersive/Cloudmersive.APIClient.Python.Validate
|
894a3f578c3860db41b3eed179dcc52e02f565a0
|
[
"Apache-2.0"
] | 1
|
2019-02-04T17:03:35.000Z
|
2019-03-02T20:16:52.000Z
|
cloudmersive_validate_api_client/models/validate_postal_code_response.py
|
Cloudmersive/Cloudmersive.APIClient.Python.Validate
|
894a3f578c3860db41b3eed179dcc52e02f565a0
|
[
"Apache-2.0"
] | 2
|
2019-03-21T15:54:15.000Z
|
2020-05-27T17:30:43.000Z
|
# coding: utf-8
"""
validateapi
The validation APIs help you validate data. Check if an E-mail address is real. Check if a domain is real. Check up on an IP address, and even where it is located. All this and much more is available in the validation API. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ValidatePostalCodeResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'valid_postal_code': 'bool',
'city': 'str',
'state_or_province': 'str',
'latitude': 'float',
'longitude': 'float'
}
attribute_map = {
'valid_postal_code': 'ValidPostalCode',
'city': 'City',
'state_or_province': 'StateOrProvince',
'latitude': 'Latitude',
'longitude': 'Longitude'
}
def __init__(self, valid_postal_code=None, city=None, state_or_province=None, latitude=None, longitude=None): # noqa: E501
"""ValidatePostalCodeResponse - a model defined in Swagger""" # noqa: E501
self._valid_postal_code = None
self._city = None
self._state_or_province = None
self._latitude = None
self._longitude = None
self.discriminator = None
if valid_postal_code is not None:
self.valid_postal_code = valid_postal_code
if city is not None:
self.city = city
if state_or_province is not None:
self.state_or_province = state_or_province
if latitude is not None:
self.latitude = latitude
if longitude is not None:
self.longitude = longitude
@property
def valid_postal_code(self):
"""Gets the valid_postal_code of this ValidatePostalCodeResponse. # noqa: E501
True if the Postal Code is valid, false otherwise # noqa: E501
:return: The valid_postal_code of this ValidatePostalCodeResponse. # noqa: E501
:rtype: bool
"""
return self._valid_postal_code
@valid_postal_code.setter
def valid_postal_code(self, valid_postal_code):
"""Sets the valid_postal_code of this ValidatePostalCodeResponse.
True if the Postal Code is valid, false otherwise # noqa: E501
:param valid_postal_code: The valid_postal_code of this ValidatePostalCodeResponse. # noqa: E501
:type: bool
"""
self._valid_postal_code = valid_postal_code
@property
def city(self):
"""Gets the city of this ValidatePostalCodeResponse. # noqa: E501
If valid, City corresponding to the input postal code, such as 'Walnut Creek' # noqa: E501
:return: The city of this ValidatePostalCodeResponse. # noqa: E501
:rtype: str
"""
return self._city
@city.setter
def city(self, city):
"""Sets the city of this ValidatePostalCodeResponse.
If valid, City corresponding to the input postal code, such as 'Walnut Creek' # noqa: E501
:param city: The city of this ValidatePostalCodeResponse. # noqa: E501
:type: str
"""
self._city = city
@property
def state_or_province(self):
"""Gets the state_or_province of this ValidatePostalCodeResponse. # noqa: E501
If valid; State or province corresponding to the input postal code, such as 'CA' or 'California' # noqa: E501
:return: The state_or_province of this ValidatePostalCodeResponse. # noqa: E501
:rtype: str
"""
return self._state_or_province
@state_or_province.setter
def state_or_province(self, state_or_province):
"""Sets the state_or_province of this ValidatePostalCodeResponse.
If valid; State or province corresponding to the input postal code, such as 'CA' or 'California' # noqa: E501
:param state_or_province: The state_or_province of this ValidatePostalCodeResponse. # noqa: E501
:type: str
"""
self._state_or_province = state_or_province
@property
def latitude(self):
"""Gets the latitude of this ValidatePostalCodeResponse. # noqa: E501
If the postal code is valid, the degrees latitude of the centroid of the postal code, null otherwise # noqa: E501
:return: The latitude of this ValidatePostalCodeResponse. # noqa: E501
:rtype: float
"""
return self._latitude
@latitude.setter
def latitude(self, latitude):
"""Sets the latitude of this ValidatePostalCodeResponse.
If the postal code is valid, the degrees latitude of the centroid of the postal code, null otherwise # noqa: E501
:param latitude: The latitude of this ValidatePostalCodeResponse. # noqa: E501
:type: float
"""
self._latitude = latitude
@property
def longitude(self):
"""Gets the longitude of this ValidatePostalCodeResponse. # noqa: E501
If the postal code is valid, the degrees longitude of the centroid of the postal code, null otherwise # noqa: E501
:return: The longitude of this ValidatePostalCodeResponse. # noqa: E501
:rtype: float
"""
return self._longitude
@longitude.setter
def longitude(self, longitude):
"""Sets the longitude of this ValidatePostalCodeResponse.
If the postal code is valid, the degrees longitude of the centroid of the postal code, null otherwise # noqa: E501
:param longitude: The longitude of this ValidatePostalCodeResponse. # noqa: E501
:type: float
"""
self._longitude = longitude
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ValidatePostalCodeResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ValidatePostalCodeResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.913043
| 240
| 0.624571
|
a15bc11aa0e7f4299833980f93bcf33cde5e685b
| 841
|
py
|
Python
|
tests/test_discovery.py
|
jamescrowley321/py-oidc
|
0e0feb0d8301de66bf98e7fe274541d05e76103b
|
[
"Apache-2.0"
] | 2
|
2020-07-06T16:47:32.000Z
|
2020-07-30T16:44:18.000Z
|
tests/test_discovery.py
|
jamescrowley321/py-identity-model
|
0e0feb0d8301de66bf98e7fe274541d05e76103b
|
[
"Apache-2.0"
] | 18
|
2020-07-02T18:48:48.000Z
|
2021-08-01T21:53:35.000Z
|
tests/test_discovery.py
|
jamescrowley321/py-identity-model
|
0e0feb0d8301de66bf98e7fe274541d05e76103b
|
[
"Apache-2.0"
] | null | null | null |
from py_identity_model import DiscoveryDocumentRequest, get_discovery_document
from .test_utils import get_config
TEST_DISCO_ADDRESS = get_config()["TEST_DISCO_ADDRESS"]
def test_get_discovery_document_is_successful():
disco_doc_request = DiscoveryDocumentRequest(address=TEST_DISCO_ADDRESS)
disco_doc_response = get_discovery_document(disco_doc_request)
assert disco_doc_response.is_successful
assert disco_doc_response.issuer
assert disco_doc_response.jwks_uri
assert disco_doc_response.token_endpoint
assert disco_doc_response.authorization_endpoint
# def test_get_discovery_document_fails():
# disco_doc_request = DiscoveryDocumentRequest(address='http://not.a.real.address')
# disco_doc_response = get_discovery_document(disco_doc_request)
# assert disco_doc_response.is_successful is False
| 40.047619
| 87
| 0.83591
|
79d0d6238a0b80a693cd75b1fbf6e38dd518c185
| 6,810
|
py
|
Python
|
src/aceinna/devices/parsers/dum_packet_parser.py
|
xhaidong/python-openimu
|
9cd20ed61f62d0abd964e37700972bc97e3d0e8c
|
[
"Apache-2.0"
] | null | null | null |
src/aceinna/devices/parsers/dum_packet_parser.py
|
xhaidong/python-openimu
|
9cd20ed61f62d0abd964e37700972bc97e3d0e8c
|
[
"Apache-2.0"
] | null | null | null |
src/aceinna/devices/parsers/dum_packet_parser.py
|
xhaidong/python-openimu
|
9cd20ed61f62d0abd964e37700972bc97e3d0e8c
|
[
"Apache-2.0"
] | null | null | null |
import struct
import collections
from ..dmu.configuration_field import CONFIGURATION_FIELD_DEFINES_SINGLETON
from ..dmu.eeprom_field import EEPROM_FIELD_DEFINES_SINGLETON
from .dmu_field_parser import decode_value
class DMU_PACKET_STATUS(object):
PREV_PACKET_TYPE = ''
PREV_TIME_FIELD_VALUE = ''
PRE_ELAPSED_TIME_SEC = 0.0
def _extract_time_field(configuration):
time_field_dict = {
'S0': 'GPSITOW',
'S1': 'counter',
'A1': 'timeITOW',
'A2': 'timeITOW'
}
field_name = time_field_dict.get(configuration['name'])
field = next((item for item in configuration['payload']
if item['name'] == field_name), None)
return field
def _calculate_time_value(packet_type, payload, field):
if DMU_PACKET_STATUS.PREV_PACKET_TYPE != packet_type:
DMU_PACKET_STATUS.PREV_PACKET_TYPE = packet_type
DMU_PACKET_STATUS.PREV_TIME_FIELD_VALUE = ''
time_field_value = 0.0
now = 0.0
prev = 0.0
offset = int(field['offset'])
if packet_type == 'S0' or packet_type == 'S1':
now = struct.unpack('>H', struct.pack(
'>2B', *payload[offset:offset+2]))[0]
if packet_type == 'A1' or packet_type == 'A2':
now = struct.unpack('>I', struct.pack(
'>4B', *payload[offset:offset+4]))[0]
prev = now if DMU_PACKET_STATUS.PREV_TIME_FIELD_VALUE == '' else DMU_PACKET_STATUS.PREV_TIME_FIELD_VALUE
time_field_value = 1.0/65535.0 * \
(now - prev) if now > prev else 1 - 1.0/65535.0 * (now - prev)
DMU_PACKET_STATUS.PRE_ELAPSED_TIME_SEC += time_field_value
return DMU_PACKET_STATUS.PRE_ELAPSED_TIME_SEC
# input packet
def read_eeprom_parser(payload):
read_address = decode_value('uint16', payload[0:2])
eeprom_data = payload[3:]
eeprom_field = EEPROM_FIELD_DEFINES_SINGLETON.find(read_address)
value, parsed, error = eeprom_field.parse(eeprom_data)
return {
"name": eeprom_field.name,
"value": value,
"parsed": parsed
}, error
def read_field_parser(payload):
data = []
error = False
number_of_fields = payload[0]
data_payload = payload[1:]
for parameter_index in range(number_of_fields):
parameter_id = decode_value(
'uint16', data_payload[parameter_index * 4:parameter_index*4+2])
parameter_value = data_payload[parameter_index *
4+2:parameter_index*4+4]
configuration_field = CONFIGURATION_FIELD_DEFINES_SINGLETON.find(
parameter_id)
value, parsed, error = configuration_field.parse(parameter_value)
# value = unpack_value(configuration_field.field_type, parameter_value)
if error:
break
data.append({
"paramId": parameter_id,
"name": configuration_field.name,
"value": value,
"parsed": parsed
})
return data, error
def write_field_parser(payload):
data = 0
error = False
field_count = decode_value('uint8', payload[0:1])
if isinstance(field_count, bool) and not field_count:
error = True
data = -1
return data, error
# output packet
def id_parser(payload, *args):
'''
Parse id packet
'''
serial_num = int.from_bytes(struct.pack(
'4B', *payload[0:4]), byteorder='big')
mode_string_len = len(payload[4:])
model_string = struct.pack('{0}B'.format(
mode_string_len), *payload[4:]).decode()
split_text = model_string.split(' ')
return {
'name': split_text[0],
'pn': split_text[1],
'sn': serial_num
}
def version_data_parser(payload, *args):
'''
Parse version data
'''
version_string = '{0}.{1}.{2}.{3}.{4}'.format(*payload)
return {
'app_name': 'DMU',
'version': version_string
}
def common_continuous_parser(payload, configuration, scaling):
'''
Unpack output packet
'''
if configuration is None:
return
data = None
length = 0
pack_fmt = '>'
for value in configuration['payload']:
if value['type'] == 'float':
pack_fmt += 'f'
length += 4
elif value['type'] == 'uint32':
pack_fmt += 'I'
length += 4
elif value['type'] == 'int32':
pack_fmt += 'i'
length += 4
elif value['type'] == 'int16':
pack_fmt += 'h'
length += 2
elif value['type'] == 'uint16':
pack_fmt += 'H'
length += 2
elif value['type'] == 'double':
pack_fmt += 'd'
length += 8
elif value['type'] == 'int64':
pack_fmt += 'q'
length += 8
elif value['type'] == 'uint64':
pack_fmt += 'Q'
length += 8
elif value['type'] == 'char':
pack_fmt += 'c'
length += 1
elif value['type'] == 'uchar':
pack_fmt += 'B'
length += 1
elif value['type'] == 'uint8':
pack_fmt += 'B'
length += 1
len_fmt = '{0}B'.format(length)
try:
pack_item = struct.pack(len_fmt, *payload)
data = struct.unpack(pack_fmt, pack_item)
out = []
for idx, item in enumerate(configuration['payload']):
scaling_setting = None
scaling_value = 1
if item.__contains__('scaling'):
scaling_setting = scaling[item['scaling']]
if scaling_setting:
scaling_value = eval(scaling_setting)
format_value = data[idx]*scaling_value
out.append((item['name'], format_value))
time_field = _extract_time_field(configuration)
if time_field:
time_value = _calculate_time_value(
configuration['name'], payload, time_field)
out.append(('time', time_value))
format_data = collections.OrderedDict(out)
except Exception as ex: # pylint: disable=broad-except
print(
"error happened when decode the payload of packets, pls restart driver: {0}"
.format(ex))
return format_data
# packet handler
def match_command_handler(packet_type):
'''
Find the handler for specified packet
'''
parser_dict = {
'RF': read_field_parser,
'GF': read_field_parser,
'SF': write_field_parser,
'WF': write_field_parser,
'RE': read_eeprom_parser
}
return parser_dict.get(packet_type)
def match_continuous_handler(packet_type):
parser_dict = {
'ID': id_parser,
'VR': version_data_parser
}
if not parser_dict.__contains__(packet_type):
return common_continuous_parser
return parser_dict.get(packet_type)
| 27.24
| 108
| 0.59163
|
77e7284d70c27288d68de2f6522f0b367e0597cc
| 1,782
|
py
|
Python
|
test.py
|
Barbany/Multi-speaker-Neural-Vocoder
|
a3f5c266603b17bcbe264e750947140f302272c8
|
[
"MIT"
] | 13
|
2018-06-27T09:59:25.000Z
|
2021-05-28T08:05:52.000Z
|
test.py
|
Barbany/Multi-speaker-Neural-Vocoder
|
a3f5c266603b17bcbe264e750947140f302272c8
|
[
"MIT"
] | 2
|
2018-06-28T02:05:41.000Z
|
2020-03-02T14:00:04.000Z
|
test.py
|
Barbany/Multi-speaker-Neural-Vocoder
|
a3f5c266603b17bcbe264e750947140f302272c8
|
[
"MIT"
] | 3
|
2018-06-28T02:02:20.000Z
|
2019-08-11T04:04:32.000Z
|
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
import utils
class myDataset(Dataset):
def __init__(self):
ratio_min=0
ratio_max=0.8
npynamecond= 'conditioning'+'_'+str(ratio_min)+'_'+str(ratio_max)+'.npy'
npynamedata= 'cdatamatrix'+'_'+str(ratio_min)+'_'+str(ratio_max)+'.npy'
self.cond=np.load(npynamecond)
self.data=np.load(npynamedata)
self.num_samples=self.data.shape[0]*self.data.shape[1]
def __getitem__(self, index):
if index < 0 or index >= 100000000:
raise IndexError
nbatch, sample_in_batch = divmod(index, 128)
print('sample in batch', sample_in_batch)
begseq = nbatch * 1040 + 80
print('begseq', begseq)
fromseq = begseq - 80
print( 'fromseq' , fromseq )
toseq = begseq + 1040
print( 'toseq' , toseq )
reset =False
data = self.data[sample_in_batch][nbatch][fromseq:toseq-1]
print('data get item ', data)
target = self.data[sample_in_batch][nbatch][begseq:toseq]
print('target', target)
return (data, target)
def __len__(self):
return self.num_samples
dset = myDataset()
print('Size dataset:', len(dset))
dloader = DataLoader(dset, 128, shuffle=False, drop_last = True)
print ('END LOADING DATA', '*'*40)
test=dset[1]
print(test)
iteration, num_epochs = (0,2)
for epoch in range(num_epochs):
print('ep')
for (iteration, fulldata) in enumerate(dloader, iteration + 1):
print('op')
(data, target) = fulldata
print('Epoch=', epoch, ' Iteration=', iteration)
data2 = data
print('Data', data2)
target2 = target
print('Target', target2)
| 27.415385
| 80
| 0.609989
|
7012401ed95c292ade5baa5f293d6fe2fc7ae2f6
| 3,598
|
py
|
Python
|
scripts/generateProjectionEvalData.py
|
dsbrown1331/safe-imitation-learning
|
dc4f40a7f51f4ff98994371d6aa026ec8181557a
|
[
"MIT"
] | 1
|
2020-05-22T14:04:50.000Z
|
2020-05-22T14:04:50.000Z
|
scripts/generateProjectionEvalData.py
|
dsbrown1331/safe-imitation-learning
|
dc4f40a7f51f4ff98994371d6aa026ec8181557a
|
[
"MIT"
] | null | null | null |
scripts/generateProjectionEvalData.py
|
dsbrown1331/safe-imitation-learning
|
dc4f40a7f51f4ff98994371d6aa026ec8181557a
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import bound_methods
from numpy import nan, inf
#plot results for experiment4_1
#rewards are feasible in that all start states end up at goal within 25 steps
sample_flag = 4
chain_length = 10000
step = 0.01
alpha = 100
size = 9
num_reps = 200
rolloutLength = 100
numDemos = [1,5,9]
tol = 0.0001
gamma = 0.95
burn = 100 #TODO play with this
skip = 20
stochastic = 1
delta_conf = 0.95
bounds = ["VaR 95", "VaR 99"]
bound_dict = {}
filePath = "./data/abbeel_projection/"
for bound_type in bounds:
accuracies = []
average_bound = []
for numDemo in numDemos:
predicted = []
bound_error = []
true_perf_ratio = []
#print("=========", numDemo, "=========")
for rep in range(num_reps):
filename = "ProjectionEval_numdemos" + str(numDemo) + "_alpha" + str(alpha) + "_chain" + str(chain_length) + "_step" + str(step) + "0000_L1sampleflag" + str(sample_flag) + "_rolloutLength" + str(rolloutLength) + "_stochastic" + str(stochastic) + "_rep" + str(rep)+ ".txt"
f = open(filePath + filename,'r')
f.readline() #clear out comment from buffer
actual = (float(f.readline())) #get the true ratio
#print "actual", actual
f.readline() #clear out ---
wfcb = (float(f.readline())) #get the worst-case feature count bound
f.readline() #clear out ---
samples = []
for line in f: #read in the mcmc chain
val = float(line)
samples.append(float(line))
#print samples
#burn
burned_samples = samples[burn::skip]
#print "max sample", np.max(burned_samples)
#compute confidence bound
if bound_type == "VaR 99":
upper_bnd = bound_methods.percentile_confidence_upper_bnd(burned_samples, 0.99, delta_conf)
elif bound_type == "VaR 95":
upper_bnd = bound_methods.percentile_confidence_upper_bnd(burned_samples, 0.95, delta_conf)
#print "upper bound", upper_bnd
predicted.append(upper_bnd)
true_perf_ratio.append(actual)
bound_error.append(upper_bnd - actual)
accuracy = 0.0
for i in range(len(predicted)):
if (predicted[i] >= true_perf_ratio[i]) or np.abs(predicted[i] - true_perf_ratio[i]) < tol:
accuracy += 1.0
accuracy = accuracy / len(predicted)
accuracies.append(accuracy)
average_bound.append(np.mean(predicted))
bound_dict[bound_type] = average_bound
print(bound_type)
print("over", numDemos, "demos")
print("bound", average_bound)
print("ave accuracy", np.mean(accuracies))
#figure out Syed and Schapire theoretical bounds to compare against
VaR95_bound = bound_dict["VaR 95"][0]
syed_bounds = []
k = 8 #num_features
for ndemo in numDemos:
syed_bounds.append(bound_methods.syed_schapire_bound(ndemo, gamma, k, delta_conf))
#calculate Syed and Schapire bound to match our bound with 1 demo
m = 0
eps = 100000
while(eps > VaR95_bound):
m = m + 1
#print("count", m)
eps = bound_methods.syed_schapire_bound(m, gamma, k, delta_conf)
#print('syed', eps1)
syed_bounds.append(eps)
numDemos.append(m)
print("Syed and Schapire")
print("over", numDemos, "demos")
print("bound", syed_bounds)
| 33.943396
| 287
| 0.594497
|
c556cc90be67c813e6fe42c8e3201dfddb3021d1
| 3,306
|
py
|
Python
|
convlab/human_eval/run.py
|
ngduyanhece/ConvLab
|
a04582a77537c1a706fbf64715baa9ad0be1301a
|
[
"MIT"
] | 405
|
2019-06-17T05:38:47.000Z
|
2022-03-29T15:16:51.000Z
|
convlab/human_eval/run.py
|
ngduyanhece/ConvLab
|
a04582a77537c1a706fbf64715baa9ad0be1301a
|
[
"MIT"
] | 69
|
2019-06-20T22:57:41.000Z
|
2022-03-04T12:12:07.000Z
|
convlab/human_eval/run.py
|
ngduyanhece/ConvLab
|
a04582a77537c1a706fbf64715baa9ad0be1301a
|
[
"MIT"
] | 124
|
2019-06-17T05:11:23.000Z
|
2021-12-31T05:58:18.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
from parlai.core.params import ParlaiParser
from parlai.mturk.core.mturk_manager import MTurkManager
from task_config import task_config
from worlds import MultiWozEvalWorld
MASTER_QUALIF = {
'QualificationTypeId': '2F1QJWKUDD8XADTFD2Q0G6UTO95ALH',
'Comparator': 'Exists',
'RequiredToPreview': True
}
MASTER_QUALIF_SDBOX = {
'QualificationTypeId': '2ARFPLSP75KLA8M8DH1HTEQVJT3SY6',
'Comparator': 'Exists',
'RequiredToPreview': True
}
LOCALE_QUALIF_SDBOX = {
'QualificationTypeId': '00000000000000000071',
"Comparator": "In",
'LocaleValues': [{'Country': "HK"}, {'Country': "US"}, {'Country': "CN"}]
}
def main():
"""This task consists of an MTurk agent evaluating a chit-chat model. They
are asked to chat to the model adopting a specific persona. After their
conversation, they are asked to evaluate their partner on several metrics.
"""
argparser = ParlaiParser(False, add_model_args=True)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
argparser.add_argument(
'-dp', '--datapath', default='./',
help='path to datasets, defaults to current directory')
opt = argparser.parse_args()
# add additional model args
opt['override'] = {
'no_cuda': True,
'interactive_mode': True,
'tensorboard_log': False
}
# Set the task name to be the folder name
opt['task'] = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
# append the contents of task_config.py to the configuration
opt.update(task_config)
mturk_agent_id = 'Tourist'
mturk_manager = MTurkManager(
opt=opt,
mturk_agent_ids=[mturk_agent_id]
)
mturk_manager.setup_server()
try:
mturk_manager.start_new_run()
mturk_manager.ready_to_accept_workers()
mturk_manager.create_hits([LOCALE_QUALIF_SDBOX])
mturk_manager.set_onboard_function(onboard_function=None)
# mturk_manager.ready_to_accept_workers()
def check_worker_eligibility(worker):
return True
# def assign_worker_roles(workers):
# for index, worker in enumerate(workers):
# worker.id = mturk_agent_ids[index % len(mturk_agent_ids)]
def assign_worker_roles(workers):
workers[0].id = mturk_agent_id
def run_conversation(mturk_manager, opt, workers):
agents = workers[:]
# workers[0].assignment_generator = assignment_generator
world = MultiWozEvalWorld(
opt=opt,
agent=workers[0]
)
while not world.episode_done():
print("parley")
world.parley()
print("save data")
world.save_data()
print("world shutdown")
world.shutdown()
mturk_manager.start_task(
eligibility_function=check_worker_eligibility,
assign_role_function=assign_worker_roles,
task_function=run_conversation
)
except BaseException:
raise
finally:
mturk_manager.expire_all_unassigned_hits()
mturk_manager.shutdown()
if __name__ == '__main__':
main()
| 28.016949
| 78
| 0.652753
|
4a4f7f114fd195de82b345d89cdc0b90326b77d6
| 1,205
|
py
|
Python
|
python/Ip scanner/main.py
|
Abhishek-op/Awful-Scripts
|
28902890155f54631ab0fd0c341ad7cba24ac425
|
[
"MIT"
] | 2
|
2021-07-20T12:56:28.000Z
|
2021-12-13T02:53:51.000Z
|
python/Ip scanner/main.py
|
Abhishek-op/Awful-Scripts
|
28902890155f54631ab0fd0c341ad7cba24ac425
|
[
"MIT"
] | null | null | null |
python/Ip scanner/main.py
|
Abhishek-op/Awful-Scripts
|
28902890155f54631ab0fd0c341ad7cba24ac425
|
[
"MIT"
] | null | null | null |
import requests
def findip():
print("[--]Enter Hostname")
IP = input()
try:
hurl = 'http://ip-api.com/json/'
url = hurl+IP
response = requests.get(url)
return response.json()
except Exception as e:
print("[-]Error: check your net connection")
return {}
def main():
data = findip()
print('[-]Fetching details about hostname')
print(
'%-12s => %24s' % ("[-]GLOBAL IP", data.get("query")),
'%-12s => %24s' % ("[-]ISP", data.get("isp")),
'%-12s => %24s' % ("[-]ORG", data.get("org")),
'%-12s => %24s' % ("[-]CITY", data.get("city")),
'%-12s => %24s' % ("[-]Zip code", data.get("zip")),
'%-12s => %24s' % ("[-]COUNTRY", data.get("country")),
'%-12s => %24s' % ("[-]TIMEZONE", data.get("timezone")),
'%-12s => %24s' % ("[-]LATITUDE", data.get("lat")),
'%-12s => %24s' % ("[-]LONGITUDE", data.get("lon")),
'%-12s => %24s' % ("[-]TIMEZONE", data.get("timezone")),
'%-12s => %24s' % ("[-]ORG", data.get("org")),
'%-12s => %24s' % ("[-]As", data.get("as")),
sep="\n"
)
if __name__ == '__main__':
main()
| 29.390244
| 64
| 0.443983
|
119c66f00cdef1d73152bbf228b93ffe981f27c1
| 3,253
|
py
|
Python
|
mercury_engine_data_structures/object.py
|
Antidote/mercury-engine-data-structures
|
d8e8ba1eacaa37f4fc76b78bb208ffc2cde61f64
|
[
"MIT"
] | null | null | null |
mercury_engine_data_structures/object.py
|
Antidote/mercury-engine-data-structures
|
d8e8ba1eacaa37f4fc76b78bb208ffc2cde61f64
|
[
"MIT"
] | null | null | null |
mercury_engine_data_structures/object.py
|
Antidote/mercury-engine-data-structures
|
d8e8ba1eacaa37f4fc76b78bb208ffc2cde61f64
|
[
"MIT"
] | null | null | null |
from typing import Dict, Union, Type
import construct
from construct import Construct, Probe, Struct, Adapter
from construct.core import FocusedSeq, Byte, If, IfThenElse, Optional, Peek
import mercury_engine_data_structures.dread_data
from mercury_engine_data_structures.common_types import make_vector
from mercury_engine_data_structures.construct_extensions.misc import ErrorWithMessage, ForceQuit
from mercury_engine_data_structures.formats.property_enum import PropertyEnum
def ConfirmType(name: str):
def check(ctx):
return ctx[f"{name}_type"] != name
return construct.If(
check,
ErrorWithMessage(
lambda ctx: f"Expected {name}, got {ctx[f'{name}_type']} ("
f"{mercury_engine_data_structures.dread_data.all_property_id_to_name().get(ctx[f'{name}_type'])}) "
"without assigned type"
),
)
def _has_duplicated_keys(obj):
seen = set()
for item in obj:
if item.type in seen:
return True
seen.add(item.type)
return False
class ObjectAdapter(Adapter):
def _decode(self, obj: construct.ListContainer, context, path):
if _has_duplicated_keys(obj):
return obj
result = construct.Container()
for item in obj:
if item.type in result:
raise construct.ConstructError(f"Type {item.type} found twice in object", path)
result[item.type] = item.item
return result
def _encode(self, obj: construct.Container, context, path):
if isinstance(obj, construct.ListContainer):
return obj
return construct.ListContainer(
construct.Container(
type=type_,
item=item
)
for type_, item in obj.items()
)
def Object(fields: Dict[str, Union[Construct, Type[Construct]]], *,
debug=False) -> Construct:
all_types = list(fields)
fields = {
name: FocusedSeq(
name,
"next_field" / Optional(Peek(PropertyEnum)),
"remaining" / Optional(Peek(Byte)),
name / IfThenElse(
construct.this._parsing,
If(lambda this: this.remaining is not None and (this.next_field is None or this.next_field not in fields.keys()), conn),
Optional(conn)
)
)
for name, conn in fields.items()
}
for type_name in all_types:
if type_name not in mercury_engine_data_structures.dread_data.all_name_to_property_id():
raise ValueError(f"Unknown type name: {type_name}, not in hashes database")
switch = construct.Switch(
construct.this.type,
fields,
ErrorWithMessage(
lambda ctx: f"Type {ctx.type} not known, valid types are {all_types}."
)
)
switch.name = "item"
r = ObjectAdapter(make_vector(
Struct(
"type" / PropertyEnum,
switch,
)
))
if debug:
r.name = "fields"
r = construct.FocusedSeq(
"fields",
r,
"next_enum" / PropertyEnum,
"probe" / Probe(lookahead=0x8),
ForceQuit(),
)
return r
| 30.401869
| 136
| 0.602828
|
adf627b923234f97f86ede0badcba405dbf2ea52
| 2,555
|
py
|
Python
|
scraper.py
|
dietofworms/img-scraper
|
81a90a5354b14d8fd4c3a2c4d6798a18bd5859a5
|
[
"MIT"
] | null | null | null |
scraper.py
|
dietofworms/img-scraper
|
81a90a5354b14d8fd4c3a2c4d6798a18bd5859a5
|
[
"MIT"
] | 1
|
2019-05-02T07:22:52.000Z
|
2019-05-02T07:28:19.000Z
|
scraper.py
|
skarmaniolos/img-scraper
|
81a90a5354b14d8fd4c3a2c4d6798a18bd5859a5
|
[
"MIT"
] | null | null | null |
import sys
import os
import requests
from re import sub, finditer
from bs4 import BeautifulSoup
from io import BytesIO
from PIL import Image
# gets img from url and save to destination
def download_file(url, dest_dir, filename=None):
if(filename is None):
filename = urlsplit(url).path.split("/")[-1]
request = requests.get(url)
image = Image.open(BytesIO(request.content))
if image.mode in ('RGBA', 'LA'):
#background = Image.new(image.mode[:-1], image.size, fill_color)
#background.paste(image, image.split()[-1])
background = img_alpha_to_colour(image)
image = background
image.save(os.path.join(dest_dir, filename))
# handles img that have no alpha layer
def img_alpha_to_colour(image, color=(255, 255, 255)):
image.load() # needed for split()
background = Image.new('RGB', image.size, color)
background.paste(image, mask=image.split()[3]) # 3 is the alpha channel
return background
# takes html as input, creates list, appends img elements to list
def get_file_urls(soup):
file_urls = []
# fix href errors
for anchor in soup.find_all(attrs={'class':'fileThumb'}):
file_urls.append(sub("//", "https://", anchor.get('href')))
return file_urls
# iterate through posts, find img filenames
def get_filenames(soup):
filenames = []
for anchor in soup.find_all(attrs={'class': 'fileText'}):
filenames.append(anchor.get_text().split(" ")[1])
print(filenames.append(anchor.get_text().split(" ")[1]))
return filenames
# primary function
def main(url):
print("┌─────────────────────────────────────────────────────────────────────┐")
print("│ Image Scraping Tool built by Steven Karmaniolos │")
print("└─────────────────────────────────────────────────────────────────────┘")
print("Downloading images from " + url + " ...")
dest_dir = os.getcwd()+'/scraped_imgs'
html = requests.get(url).text
soup = BeautifulSoup(html, "html.parser")
file_urls = get_file_urls(soup)
filenames = get_filenames(soup)
for i in range(len(file_urls)):
print("Downloading file: " + filenames[i])
download_file(file_urls[i], dest_dir, filenames[i])
print("┌─────────────────────────────────────────────────────────────────────┐")
print("│ Scrape completed. Your files are now ready. │")
print("└─────────────────────────────────────────────────────────────────────┘")
main()
| 31.9375
| 84
| 0.564384
|
5afe7c32e42242dfd29aff70628d15841341e088
| 1,706
|
py
|
Python
|
sssh/db.py
|
phizaz/ssher
|
a273fac6b8d9016dd5cc59dc32c888658ee8172c
|
[
"MIT"
] | 3
|
2016-06-12T07:41:58.000Z
|
2018-04-13T13:36:01.000Z
|
sssh/db.py
|
phizaz/ssher
|
a273fac6b8d9016dd5cc59dc32c888658ee8172c
|
[
"MIT"
] | null | null | null |
sssh/db.py
|
phizaz/ssher
|
a273fac6b8d9016dd5cc59dc32c888658ee8172c
|
[
"MIT"
] | null | null | null |
from os.path import join, exists, dirname
DB_PATH = join(dirname(__file__), 'ssher_db.txt')
class DB:
def __init__(self):
self.db = self.read_db()
def get_name(self, name):
names = list(map(lambda x: x[0], self.db))
if name not in names:
return False
return self.db[names.index(name)]
def apply_usernames(self, name, usernames):
row = self.get_name(name)
if not row:
print('name not found')
return False
row[2] = usernames
self.apply_db()
return True
def add_row(self, name, host, usernames):
row = self.get_name(name)
if row:
print('duplicate name')
return False
self.db.append([name, host, usernames])
self.apply_db()
return True
def remove_name(self, name):
row = self.get_name(name)
if not row:
print('name not found')
return False
self.db.remove(row)
self.apply_db()
return True
def read_db(self):
# name host username(s)
if not exists(DB_PATH):
with open(DB_PATH, 'w') as _:
pass
db = []
with open(DB_PATH, 'r') as handle:
for line in handle:
name, host, user = line.strip().split(' ')
usernames = user.split(',')
db.append([name, host, usernames])
return db
def apply_db(self):
with open(DB_PATH, 'w') as handle:
for name, host, usernames in self.db:
handle.write('{}\n'.format(
' '.join([name, host, ','.join(usernames)])
))
| 27.516129
| 63
| 0.516999
|
dffed03eb0f9a2b84c31874e6b95e239366cb3eb
| 3,297
|
py
|
Python
|
tests/attacks/evasion/test_auto_projected_gradient_descent.py
|
mcguires5/adversarial-robustness-toolbox
|
f8b0552859eaf31c5b66e1d14d28b89178795ad0
|
[
"MIT"
] | 1
|
2020-07-12T03:45:23.000Z
|
2020-07-12T03:45:23.000Z
|
tests/attacks/evasion/test_auto_projected_gradient_descent.py
|
mcguires5/adversarial-robustness-toolbox
|
f8b0552859eaf31c5b66e1d14d28b89178795ad0
|
[
"MIT"
] | 105
|
2020-08-24T06:15:43.000Z
|
2022-03-24T08:03:16.000Z
|
tests/attacks/evasion/test_auto_projected_gradient_descent.py
|
mcguires5/adversarial-robustness-toolbox
|
f8b0552859eaf31c5b66e1d14d28b89178795ad0
|
[
"MIT"
] | null | null | null |
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import pytest
import numpy as np
from art.attacks.evasion import AutoProjectedGradientDescent
from art.estimators.estimator import BaseEstimator, LossGradientsMixin
from art.estimators.classification.classifier import ClassifierMixin
from tests.attacks.utils import backend_test_classifier_type_check_fail
logger = logging.getLogger(__name__)
@pytest.fixture()
def fix_get_mnist_subset(get_mnist_dataset):
(x_train_mnist, y_train_mnist), (x_test_mnist, y_test_mnist) = get_mnist_dataset
n_train = 100
n_test = 10
yield (x_train_mnist[:n_train], y_train_mnist[:n_train], x_test_mnist[:n_test], y_test_mnist[:n_test])
@pytest.mark.only_with_platform("tensorflow")
def test_generate(is_tf_version_2, fix_get_mnist_subset, get_image_classifier_list_for_attack):
if is_tf_version_2:
classifier_list = get_image_classifier_list_for_attack(AutoProjectedGradientDescent)
if classifier_list is None:
logging.warning("Couldn't perform this test because no classifier is defined")
return
for classifier in classifier_list:
attack = AutoProjectedGradientDescent(
estimator=classifier,
norm=np.inf,
eps=0.3,
eps_step=0.1,
max_iter=5,
targeted=False,
nb_random_init=1,
batch_size=32,
loss_type="cross_entropy",
)
(x_train_mnist, y_train_mnist, x_test_mnist, y_test_mnist) = fix_get_mnist_subset
x_train_mnist_adv = attack.generate(x=x_train_mnist, y=y_train_mnist)
assert np.mean(np.abs(x_train_mnist_adv - x_train_mnist)) == pytest.approx(0.0329, abs=0.005)
assert np.max(np.abs(x_train_mnist_adv - x_train_mnist)) == pytest.approx(0.3, abs=0.01)
def test_classifier_type_check_fail():
backend_test_classifier_type_check_fail(
AutoProjectedGradientDescent, [BaseEstimator, LossGradientsMixin, ClassifierMixin]
)
if __name__ == "__main__":
pytest.cmdline.main("-q -s {} --mlFramework=tensorflow --durations=0".format(__file__).split(" "))
| 41.734177
| 120
| 0.733394
|
dde99939b5d5fb2db0fe5f3efbc004af3fc48ac1
| 491
|
py
|
Python
|
src/utils/config.py
|
dciborow/SubscriptionPolicy
|
100718bca552fb92edcb1867a94aba1f2d131edc
|
[
"MIT"
] | 1
|
2021-07-23T16:37:17.000Z
|
2021-07-23T16:37:17.000Z
|
src/utils/config.py
|
dciborow/SubscriptionPolicy
|
100718bca552fb92edcb1867a94aba1f2d131edc
|
[
"MIT"
] | null | null | null |
src/utils/config.py
|
dciborow/SubscriptionPolicy
|
100718bca552fb92edcb1867a94aba1f2d131edc
|
[
"MIT"
] | 1
|
2021-06-16T20:12:35.000Z
|
2021-06-16T20:12:35.000Z
|
import os
import json
class Configuration:
def __init__(self, config_path: str):
if not os.path.exists(config_path):
raise Exception("Config file not found: ", config_path)
config = None
with open(config_path, "r") as input:
config = input.readlines()
config = "\n".join(config)
config = json.loads(config)
if config:
for key in config:
setattr(self, key, config[key])
| 28.882353
| 67
| 0.562118
|
c3e46c53bb8d3a50ec9f534c5fa382dde246b2d5
| 4,782
|
py
|
Python
|
src/reservations/azext_reservations/vendored_sdks/quota/operations/_operation_operations.py
|
rahuls-microsoft/azure-cli-extensions
|
815a8bcf3d025af1c191498ad29456006fc30e14
|
[
"MIT"
] | null | null | null |
src/reservations/azext_reservations/vendored_sdks/quota/operations/_operation_operations.py
|
rahuls-microsoft/azure-cli-extensions
|
815a8bcf3d025af1c191498ad29456006fc30e14
|
[
"MIT"
] | null | null | null |
src/reservations/azext_reservations/vendored_sdks/quota/operations/_operation_operations.py
|
rahuls-microsoft/azure-cli-extensions
|
815a8bcf3d025af1c191498ad29456006fc30e14
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class OperationOperations(object):
"""OperationOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure_reservation_api.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.OperationList"]
"""Get operations.
List all the operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure_reservation_api.models.OperationList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-10-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('OperationList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.Capacity/operations'} # type: ignore
| 42.318584
| 133
| 0.652656
|
734e5fd26caff99e6745ceb278598defd5d1f38e
| 774
|
py
|
Python
|
main.py
|
KidsInMyBasement/Halloween-Art-Project-21
|
616bd1254eaa81148d381b5a63613a92f91eda67
|
[
"MIT"
] | null | null | null |
main.py
|
KidsInMyBasement/Halloween-Art-Project-21
|
616bd1254eaa81148d381b5a63613a92f91eda67
|
[
"MIT"
] | null | null | null |
main.py
|
KidsInMyBasement/Halloween-Art-Project-21
|
616bd1254eaa81148d381b5a63613a92f91eda67
|
[
"MIT"
] | null | null | null |
import turtle
import random as rand
score = 0
#-----game configuration----
"""def score():"""
# shape = "turtle"
color = "purple"
size = 4
tr = turtle.Turtle()
wn = turtle.Screen()
wn.addshape('bg2.gif')
wn.bgcolor("black")
wn.addshape('ghost.gif')
# x = tr.shape('ghost.gif')
#-----initialize turtle-----
t = turtle.Turtle()
t.shape('ghost.gif')
t.fillcolor(color)
t.shapesize(size)
#-----game functions--------
def change_position():
rand.randint(-200,200)
rand.randint(-150,150)
new_x_pos = rand.randint(-200,200)
new_y_pos = rand.randint(-150,150)
t.penup()
t.goto(new_x_pos,new_y_pos)
def squaro_clicked(x,y):
change_position()
#-----events----------------
t.onclick(squaro_clicked)
wn = turtle.Screen()
wn.mainloop()
| 22.114286
| 37
| 0.625323
|
3e77d7b0f59879e19f461e32e9049ae0ed623923
| 137,185
|
py
|
Python
|
dojo/forms.py
|
maerifat/django-DefectDojo
|
ba1a415219ff20e8b4e909ef14f750de9b80297e
|
[
"BSD-3-Clause"
] | null | null | null |
dojo/forms.py
|
maerifat/django-DefectDojo
|
ba1a415219ff20e8b4e909ef14f750de9b80297e
|
[
"BSD-3-Clause"
] | 206
|
2020-04-20T16:03:18.000Z
|
2022-01-15T23:07:48.000Z
|
dojo/forms.py
|
maerifat/django-DefectDojo
|
ba1a415219ff20e8b4e909ef14f750de9b80297e
|
[
"BSD-3-Clause"
] | 1
|
2020-12-06T15:44:44.000Z
|
2020-12-06T15:44:44.000Z
|
import os
import re
from datetime import datetime, date
import pickle
from crispy_forms.bootstrap import InlineRadios, InlineCheckboxes
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout
from django.db.models import Count, Q
from dateutil.relativedelta import relativedelta
from django import forms
from django.contrib.auth.password_validation import validate_password
from django.core import validators
from django.core.exceptions import ValidationError
from django.forms import modelformset_factory
from django.forms import utils as form_utils
from django.forms.widgets import Widget, Select
from django.utils.dates import MONTHS
from django.utils.safestring import mark_safe
from django.utils import timezone
import tagulous
from dojo.endpoint.utils import endpoint_get_or_create, endpoint_filter, \
validate_endpoints_to_add
from dojo.models import Finding, Finding_Group, Product_Type, Product, Note_Type, \
Check_List, User, Engagement, Test, Test_Type, Notes, Risk_Acceptance, \
Development_Environment, Dojo_User, Endpoint, Stub_Finding, Finding_Template, \
JIRA_Issue, JIRA_Project, JIRA_Instance, GITHUB_Issue, GITHUB_PKey, GITHUB_Conf, UserContactInfo, Tool_Type, \
Tool_Configuration, Tool_Product_Settings, Cred_User, Cred_Mapping, System_Settings, Notifications, \
Languages, Language_Type, App_Analysis, Objects_Product, Benchmark_Product, Benchmark_Requirement, \
Benchmark_Product_Summary, Rule, Child_Rule, Engagement_Presets, DojoMeta, \
Engagement_Survey, Answered_Survey, TextAnswer, ChoiceAnswer, Choice, Question, TextQuestion, \
ChoiceQuestion, General_Survey, Regulation, FileUpload, SEVERITY_CHOICES, Product_Type_Member, \
Product_Member, Global_Role, Dojo_Group, Product_Group, Product_Type_Group, Dojo_Group_Member, \
Product_API_Scan_Configuration
from dojo.tools.factory import requires_file, get_choices_sorted, requires_tool_type
from django.urls import reverse
from tagulous.forms import TagField
import logging
from crum import get_current_user
from dojo.utils import get_system_setting, get_product
from django.conf import settings
from dojo.authorization.roles_permissions import Permissions
from dojo.product_type.queries import get_authorized_product_types
from dojo.product.queries import get_authorized_products
from dojo.finding.queries import get_authorized_findings
from dojo.user.queries import get_authorized_users_for_product_and_product_type
from dojo.group.queries import get_authorized_groups, get_group_member_roles
logger = logging.getLogger(__name__)
RE_DATE = re.compile(r'(\d{4})-(\d\d?)-(\d\d?)$')
FINDING_STATUS = (('verified', 'Verified'),
('false_p', 'False Positive'),
('duplicate', 'Duplicate'),
('out_of_scope', 'Out of Scope'))
class MultipleSelectWithPop(forms.SelectMultiple):
def render(self, name, *args, **kwargs):
html = super(MultipleSelectWithPop, self).render(name, *args, **kwargs)
popup_plus = '<div class="input-group dojo-input-group">' + html + '<span class="input-group-btn"><a href="/' + name + '/add" class="btn btn-primary" class="add-another" id="add_id_' + name + '" onclick="return showAddAnotherPopup(this);"><span class="glyphicon glyphicon-plus"></span></a></span></div>'
return mark_safe(popup_plus)
class MonthYearWidget(Widget):
"""
A Widget that splits date input into two <select> boxes for month and year,
with 'day' defaulting to the first of the month.
Based on SelectDateWidget, in
django/trunk/django/forms/extras/widgets.py
"""
none_value = (0, '---')
month_field = '%s_month'
year_field = '%s_year'
def __init__(self, attrs=None, years=None, required=True):
# years is an optional list/tuple of years to use in the
# "year" select box.
self.attrs = attrs or {}
self.required = required
if years:
self.years = years
else:
this_year = date.today().year
self.years = list(range(this_year - 10, this_year + 1))
def render(self, name, value, attrs=None, renderer=None):
try:
year_val, month_val = value.year, value.month
except AttributeError:
year_val = month_val = None
if isinstance(value, str):
match = RE_DATE.match(value)
if match:
year_val,
month_val,
day_val = [int(v) for v in match.groups()]
output = []
if 'id' in self.attrs:
id_ = self.attrs['id']
else:
id_ = 'id_%s' % name
month_choices = list(MONTHS.items())
if not (self.required and value):
month_choices.append(self.none_value)
month_choices.sort()
local_attrs = self.build_attrs({'id': self.month_field % id_})
s = Select(choices=month_choices)
select_html = s.render(self.month_field % name, month_val, local_attrs)
output.append(select_html)
year_choices = [(i, i) for i in self.years]
if not (self.required and value):
year_choices.insert(0, self.none_value)
local_attrs['id'] = self.year_field % id_
s = Select(choices=year_choices)
select_html = s.render(self.year_field % name, year_val, local_attrs)
output.append(select_html)
return mark_safe('\n'.join(output))
def id_for_label(self, id_):
return '%s_month' % id_
id_for_label = classmethod(id_for_label)
def value_from_datadict(self, data, files, name):
y = data.get(self.year_field % name)
m = data.get(self.month_field % name)
if y == m == "0":
return None
if y and m:
return '%s-%s-%s' % (y, m, 1)
return data.get(name, None)
class Product_TypeForm(forms.ModelForm):
description = forms.CharField(widget=forms.Textarea(attrs={}),
required=False)
class Meta:
model = Product_Type
fields = ['name', 'description', 'critical_product', 'key_product']
class Delete_Product_TypeForm(forms.ModelForm):
id = forms.IntegerField(required=True,
widget=forms.widgets.HiddenInput())
class Meta:
model = Product_Type
fields = ['id']
class Edit_Product_Type_MemberForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(Edit_Product_Type_MemberForm, self).__init__(*args, **kwargs)
self.fields['product_type'].disabled = True
self.fields['user'].queryset = Dojo_User.objects.order_by('first_name', 'last_name')
self.fields['user'].disabled = True
class Meta:
model = Product_Type_Member
fields = ['product_type', 'user', 'role']
class Add_Product_Type_MemberForm(forms.ModelForm):
users = forms.ModelMultipleChoiceField(queryset=Dojo_User.objects.none(), required=True, label='Users')
def __init__(self, *args, **kwargs):
super(Add_Product_Type_MemberForm, self).__init__(*args, **kwargs)
current_members = Product_Type_Member.objects.filter(product_type=self.initial["product_type"]).values_list('user', flat=True)
self.fields['users'].queryset = Dojo_User.objects.exclude(
Q(is_superuser=True) |
Q(id__in=current_members)).exclude(is_active=False).order_by('first_name', 'last_name')
self.fields['product_type'].disabled = True
class Meta:
model = Product_Type_Member
fields = ['product_type', 'users', 'role']
class Add_Product_Type_Member_UserForm(forms.ModelForm):
product_types = forms.ModelMultipleChoiceField(queryset=Product_Type.objects.none(), required=True, label='Product Types')
def __init__(self, *args, **kwargs):
super(Add_Product_Type_Member_UserForm, self).__init__(*args, **kwargs)
current_members = Product_Type_Member.objects.filter(user=self.initial['user']).values_list('product_type', flat=True)
self.fields['product_types'].queryset = get_authorized_product_types(Permissions.Product_Type_Member_Add_Owner) \
.exclude(id__in=current_members)
self.fields['user'].disabled = True
class Meta:
model = Product_Type_Member
fields = ['product_types', 'user', 'role']
class Delete_Product_Type_MemberForm(Edit_Product_Type_MemberForm):
def __init__(self, *args, **kwargs):
super(Delete_Product_Type_MemberForm, self).__init__(*args, **kwargs)
self.fields['role'].disabled = True
class Test_TypeForm(forms.ModelForm):
class Meta:
model = Test_Type
exclude = ['']
class Development_EnvironmentForm(forms.ModelForm):
class Meta:
model = Development_Environment
fields = ['name']
class Delete_Dev_EnvironmentForm(forms.ModelForm):
class Meta:
model = Development_Environment
fields = ['id']
class ProductForm(forms.ModelForm):
name = forms.CharField(max_length=255, required=True)
description = forms.CharField(widget=forms.Textarea(attrs={}),
required=True)
prod_type = forms.ModelChoiceField(label='Product Type',
queryset=Product_Type.objects.none(),
required=True)
product_manager = forms.ModelChoiceField(queryset=Dojo_User.objects.exclude(is_active=False).order_by('first_name', 'last_name'), required=False)
technical_contact = forms.ModelChoiceField(queryset=Dojo_User.objects.exclude(is_active=False).order_by('first_name', 'last_name'), required=False)
team_manager = forms.ModelChoiceField(queryset=Dojo_User.objects.exclude(is_active=False).order_by('first_name', 'last_name'), required=False)
def __init__(self, *args, **kwargs):
super(ProductForm, self).__init__(*args, **kwargs)
self.fields['prod_type'].queryset = get_authorized_product_types(Permissions.Product_Type_Add_Product)
class Meta:
model = Product
fields = ['name', 'description', 'tags', 'product_manager', 'technical_contact', 'team_manager', 'prod_type', 'regulations',
'business_criticality', 'platform', 'lifecycle', 'origin', 'user_records', 'revenue', 'external_audience',
'internet_accessible', 'enable_simple_risk_acceptance', 'enable_full_risk_acceptance']
class DeleteProductForm(forms.ModelForm):
id = forms.IntegerField(required=True,
widget=forms.widgets.HiddenInput())
class Meta:
model = Product
fields = ['id']
class DeleteFindingGroupForm(forms.ModelForm):
id = forms.IntegerField(required=True,
widget=forms.widgets.HiddenInput())
class Meta:
model = Finding_Group
fields = ['id']
class Edit_Product_MemberForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(Edit_Product_MemberForm, self).__init__(*args, **kwargs)
self.fields['product'].disabled = True
self.fields['user'].queryset = Dojo_User.objects.order_by('first_name', 'last_name')
self.fields['user'].disabled = True
class Meta:
model = Product_Member
fields = ['product', 'user', 'role']
class Add_Product_MemberForm(forms.ModelForm):
users = forms.ModelMultipleChoiceField(queryset=Dojo_User.objects.none(), required=True, label='Users')
def __init__(self, *args, **kwargs):
super(Add_Product_MemberForm, self).__init__(*args, **kwargs)
self.fields['product'].disabled = True
current_members = Product_Member.objects.filter(product=self.initial["product"]).values_list('user', flat=True)
self.fields['users'].queryset = Dojo_User.objects.exclude(
Q(is_superuser=True) |
Q(id__in=current_members)).exclude(is_active=False).order_by('first_name', 'last_name')
class Meta:
model = Product_Member
fields = ['product', 'users', 'role']
class Add_Product_Member_UserForm(forms.ModelForm):
products = forms.ModelMultipleChoiceField(queryset=Product.objects.none(), required=True, label='Products')
def __init__(self, *args, **kwargs):
super(Add_Product_Member_UserForm, self).__init__(*args, **kwargs)
current_members = Product_Member.objects.filter(user=self.initial["user"]).values_list('product', flat=True)
self.fields['products'].queryset = get_authorized_products(Permissions.Product_Member_Add_Owner) \
.exclude(id__in=current_members)
self.fields['user'].disabled = True
class Meta:
model = Product_Member
fields = ['products', 'user', 'role']
class Delete_Product_MemberForm(Edit_Product_MemberForm):
def __init__(self, *args, **kwargs):
super(Delete_Product_MemberForm, self).__init__(*args, **kwargs)
self.fields['role'].disabled = True
class NoteTypeForm(forms.ModelForm):
description = forms.CharField(widget=forms.Textarea(attrs={}),
required=True)
class Meta:
model = Note_Type
fields = ['name', 'description', 'is_single', 'is_mandatory']
class EditNoteTypeForm(NoteTypeForm):
def __init__(self, *args, **kwargs):
is_single = kwargs.pop('is_single')
super(EditNoteTypeForm, self).__init__(*args, **kwargs)
if is_single is False:
self.fields['is_single'].widget = forms.HiddenInput()
class DisableOrEnableNoteTypeForm(NoteTypeForm):
def __init__(self, *args, **kwargs):
super(DisableOrEnableNoteTypeForm, self).__init__(*args, **kwargs)
self.fields['name'].disabled = True
self.fields['description'].disabled = True
self.fields['is_single'].disabled = True
self.fields['is_mandatory'].disabled = True
self.fields['is_active'].disabled = True
class Meta:
model = Note_Type
fields = '__all__'
class DojoMetaDataForm(forms.ModelForm):
value = forms.CharField(widget=forms.Textarea(attrs={}),
required=True)
def full_clean(self):
super(DojoMetaDataForm, self).full_clean()
try:
self.instance.validate_unique()
except ValidationError:
msg = "A metadata entry with the same name exists already for this object."
self.add_error('name', msg)
class Meta:
model = DojoMeta
fields = '__all__'
class ImportScanForm(forms.Form):
scan_date = forms.DateTimeField(
required=False,
label="Scan Completion Date",
help_text="Scan completion date will be used on all findings.",
widget=forms.TextInput(attrs={'class': 'datepicker'}))
minimum_severity = forms.ChoiceField(help_text='Minimum severity level to be imported',
required=True,
choices=SEVERITY_CHOICES)
active = forms.BooleanField(help_text="Select if these findings are currently active.", required=False, initial=True)
verified = forms.BooleanField(help_text="Select if these findings have been verified.", required=False)
scan_type = forms.ChoiceField(required=True, choices=get_choices_sorted)
environment = forms.ModelChoiceField(
queryset=Development_Environment.objects.all().order_by('name'))
endpoints = forms.ModelMultipleChoiceField(Endpoint.objects, required=False, label='Systems / Endpoints')
endpoints_to_add = forms.CharField(max_length=5000, required=False, label="Endpoints to add",
help_text="The IP address, host name or full URL. You may enter one endpoint per line. "
"Each must be valid.",
widget=forms.widgets.Textarea(attrs={'rows': '3', 'cols': '400'}))
version = forms.CharField(max_length=100, required=False, help_text="Version that was scanned.")
branch_tag = forms.CharField(max_length=100, required=False, help_text="Branch or Tag that was scanned.")
commit_hash = forms.CharField(max_length=100, required=False, help_text="Commit that was scanned.")
build_id = forms.CharField(max_length=100, required=False, help_text="ID of the build that was scanned.")
api_scan_configuration = forms.ModelChoiceField(Product_API_Scan_Configuration.objects, required=False, label='API Scan Configuration')
service = forms.CharField(max_length=200, required=False, help_text="A service is a self-contained piece of functionality within a Product. This is an optional field which is used in deduplication of findings when set.")
tags = TagField(required=False, help_text="Add tags that help describe this scan. "
"Choose from the list or add new tags. Press Enter key to add.")
file = forms.FileField(widget=forms.widgets.FileInput(
attrs={"accept": ".xml, .csv, .nessus, .json, .html, .js, .zip, .xlsx, .txt, .sarif"}),
label="Choose report file",
required=False)
close_old_findings = forms.BooleanField(help_text="Select if old findings no longer present in the report get closed as mitigated when importing."
"This affects the whole engagement/product depending on your deduplication scope.",
required=False, initial=False)
if settings.FEATURE_FINDING_GROUPS:
group_by = forms.ChoiceField(required=False, choices=Finding_Group.GROUP_BY_OPTIONS, help_text='Choose an option to automatically group new findings by the chosen option.')
def __init__(self, *args, **kwargs):
super(ImportScanForm, self).__init__(*args, **kwargs)
# couldn't find a cleaner way to add empty default
if 'group_by' in self.fields:
choices = self.fields['group_by'].choices
choices.insert(0, ('', '---------'))
self.fields['group_by'].choices = choices
self.endpoints_to_add_list = []
def clean(self):
cleaned_data = super().clean()
scan_type = cleaned_data.get("scan_type")
file = cleaned_data.get("file")
if requires_file(scan_type) and not file:
raise forms.ValidationError('Uploading a Report File is required for {}'.format(scan_type))
tool_type = requires_tool_type(scan_type)
if tool_type:
api_scan_configuration = cleaned_data.get('api_scan_configuration')
if api_scan_configuration and tool_type != api_scan_configuration.tool_configuration.tool_type.name:
raise forms.ValidationError(f'API scan configuration must be of tool type {tool_type}')
endpoints_to_add_list, errors = validate_endpoints_to_add(cleaned_data['endpoints_to_add'])
if errors:
raise forms.ValidationError(errors)
else:
self.endpoints_to_add_list = endpoints_to_add_list
return cleaned_data
# date can only be today or in the past, not the future
def clean_scan_date(self):
date = self.cleaned_data['scan_date']
# scan_date is no longer deafulted to "today" at import time, so set it here if necessary
if not date:
return None
if date.date() > datetime.today().date():
raise forms.ValidationError("The date cannot be in the future!")
return date
def get_scan_type(self):
TGT_scan = self.cleaned_data['scan_type']
return TGT_scan
class ReImportScanForm(forms.Form):
scan_date = forms.DateTimeField(
required=True,
label="Scan Completion Date",
help_text="Scan completion date will be used on all findings.",
initial=datetime.now().strftime("%m/%d/%Y"),
widget=forms.TextInput(attrs={'class': 'datepicker'}))
minimum_severity = forms.ChoiceField(help_text='Minimum severity level to be imported',
required=True,
choices=SEVERITY_CHOICES[0:4])
active = forms.BooleanField(help_text="Select if these findings are currently active.", required=False, initial=True)
verified = forms.BooleanField(help_text="Select if these findings have been verified.", required=False)
endpoints = forms.ModelMultipleChoiceField(Endpoint.objects, required=False, label='Systems / Endpoints')
tags = TagField(required=False, help_text="Modify existing tags that help describe this scan. "
"Choose from the list or add new tags. Press Enter key to add.")
file = forms.FileField(widget=forms.widgets.FileInput(
attrs={"accept": ".xml, .csv, .nessus, .json, .html, .js, .zip, .xlsx, .txt, .sarif"}),
label="Choose report file",
required=False)
close_old_findings = forms.BooleanField(help_text="Select if old findings no longer present in the report get closed as mitigated when importing.",
required=False, initial=True)
version = forms.CharField(max_length=100, required=False, help_text="Version that will be set on existing Test object. Leave empty to leave existing value in place.")
branch_tag = forms.CharField(max_length=100, required=False, help_text="Branch or Tag that was scanned.")
commit_hash = forms.CharField(max_length=100, required=False, help_text="Commit that was scanned.")
build_id = forms.CharField(max_length=100, required=False, help_text="ID of the build that was scanned.")
api_scan_configuration = forms.ModelChoiceField(Product_API_Scan_Configuration.objects, required=False, label='API Scan Configuration')
service = forms.CharField(max_length=200, required=False, help_text="A service is a self-contained piece of functionality within a Product. This is an optional field which is used in deduplication of findings when set.")
if settings.FEATURE_FINDING_GROUPS:
group_by = forms.ChoiceField(required=False, choices=Finding_Group.GROUP_BY_OPTIONS, help_text='Choose an option to automatically group new findings by the chosen option')
def __init__(self, *args, test=None, **kwargs):
super(ReImportScanForm, self).__init__(*args, **kwargs)
self.scan_type = None
if test:
self.scan_type = test.test_type.name
self.fields['tags'].initial = test.tags.all()
# couldn't find a cleaner way to add empty default
if 'group_by' in self.fields:
choices = self.fields['group_by'].choices
choices.insert(0, ('', '---------'))
self.fields['group_by'].choices = choices
def clean(self):
cleaned_data = super().clean()
file = cleaned_data.get("file")
if requires_file(self.scan_type) and not file:
raise forms.ValidationError("Uploading a report file is required for re-uploading findings.")
tool_type = requires_tool_type(self.scan_type)
if tool_type:
api_scan_configuration = cleaned_data.get('api_scan_configuration')
if api_scan_configuration and tool_type != api_scan_configuration.tool_configuration.tool_type.name:
raise forms.ValidationError(f'API scan configuration must be of tool type {tool_type}')
return cleaned_data
# date can only be today or in the past, not the future
def clean_scan_date(self):
date = self.cleaned_data['scan_date']
if date.date() > timezone.localtime(timezone.now()).date():
raise forms.ValidationError("The date cannot be in the future!")
return date
class ImportEndpointMetaForm(forms.Form):
file = forms.FileField(widget=forms.widgets.FileInput(
attrs={"accept": ".csv"}),
label="Choose meta file",
required=True) # Could not get required=True to actually accept the file as present
create_endpoints = forms.BooleanField(
label="Create nonexisting Endpoint",
initial=True,
required=False,
help_text="Create endpoints that do not already exist",)
create_tags = forms.BooleanField(
label="Add Tags",
initial=True,
required=False,
help_text="Add meta from file as tags in the format key:value",)
create_dojo_meta = forms.BooleanField(
label="Add Meta",
initial=False,
required=False,
help_text="Add data from file as Metadata. Metadata is used for displaying custom fields",)
def __init__(self, *args, **kwargs):
super(ImportEndpointMetaForm, self).__init__(*args, **kwargs)
class DoneForm(forms.Form):
done = forms.BooleanField()
class UploadThreatForm(forms.Form):
file = forms.FileField(widget=forms.widgets.FileInput(
attrs={"accept": ".jpg,.png,.pdf"}),
label="Select Threat Model")
class MergeFindings(forms.ModelForm):
FINDING_ACTION = (('', 'Select an Action'), ('inactive', 'Inactive'), ('delete', 'Delete'))
append_description = forms.BooleanField(label="Append Description", initial=True, required=False,
help_text="Description in all findings will be appended into the merged finding.")
add_endpoints = forms.BooleanField(label="Add Endpoints", initial=True, required=False,
help_text="Endpoints in all findings will be merged into the merged finding.")
dynamic_raw = forms.BooleanField(label="Dynamic Scanner Raw Requests", initial=True, required=False,
help_text="Dynamic scanner raw requests in all findings will be merged into the merged finding.")
tag_finding = forms.BooleanField(label="Add Tags", initial=True, required=False,
help_text="Tags in all findings will be merged into the merged finding.")
mark_tag_finding = forms.BooleanField(label="Tag Merged Finding", initial=True, required=False,
help_text="Creates a tag titled 'merged' for the finding that will be merged. If the 'Finding Action' is set to 'inactive' the inactive findings will be tagged with 'merged-inactive'.")
append_reference = forms.BooleanField(label="Append Reference", initial=True, required=False,
help_text="Reference in all findings will be appended into the merged finding.")
finding_action = forms.ChoiceField(
required=True,
choices=FINDING_ACTION,
label="Finding Action",
help_text="The action to take on the merged finding. Set the findings to inactive or delete the findings.")
def __init__(self, *args, **kwargs):
finding = kwargs.pop('finding')
findings = kwargs.pop('findings')
super(MergeFindings, self).__init__(*args, **kwargs)
self.fields['finding_to_merge_into'] = forms.ModelChoiceField(
queryset=findings, initial=0, required="False", label="Finding to Merge Into", help_text="Findings selected below will be merged into this finding.")
# Exclude the finding to merge into from the findings to merge into
self.fields['findings_to_merge'] = forms.ModelMultipleChoiceField(
queryset=findings, required=True, label="Findings to Merge",
widget=forms.widgets.SelectMultiple(attrs={'size': 10}),
help_text=('Select the findings to merge.'))
self.field_order = ['finding_to_merge_into', 'findings_to_merge', 'append_description', 'add_endpoints', 'append_reference']
class Meta:
model = Finding
fields = ['append_description', 'add_endpoints', 'append_reference']
class EditRiskAcceptanceForm(forms.ModelForm):
# unfortunately django forces us to repeat many things here. choices, default, required etc.
recommendation = forms.ChoiceField(choices=Risk_Acceptance.TREATMENT_CHOICES, initial=Risk_Acceptance.TREATMENT_ACCEPT, widget=forms.RadioSelect, label="Security Recommendation")
decision = forms.ChoiceField(choices=Risk_Acceptance.TREATMENT_CHOICES, initial=Risk_Acceptance.TREATMENT_ACCEPT, widget=forms.RadioSelect)
path = forms.FileField(label="Proof", required=False, widget=forms.widgets.FileInput(attrs={"accept": ".jpg,.png,.pdf"}))
expiration_date = forms.DateTimeField(required=False, widget=forms.TextInput(attrs={'class': 'datepicker'}))
class Meta:
model = Risk_Acceptance
exclude = ['accepted_findings', 'notes']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['path'].help_text = 'Existing proof uploaded: %s' % self.instance.filename() if self.instance.filename() else 'None'
self.fields['expiration_date_warned'].disabled = True
self.fields['expiration_date_handled'].disabled = True
class RiskAcceptanceForm(EditRiskAcceptanceForm):
# path = forms.FileField(label="Proof", required=False, widget=forms.widgets.FileInput(attrs={"accept": ".jpg,.png,.pdf"}))
# expiration_date = forms.DateTimeField(required=False, widget=forms.TextInput(attrs={'class': 'datepicker'}))
accepted_findings = forms.ModelMultipleChoiceField(
queryset=Finding.objects.none(), required=True,
widget=forms.widgets.SelectMultiple(attrs={'size': 10}),
help_text=('Active, verified findings listed, please select to add findings.'))
notes = forms.CharField(required=False, max_length=2400,
widget=forms.Textarea,
label='Notes')
class Meta:
model = Risk_Acceptance
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
expiration_delta_days = get_system_setting('risk_acceptance_form_default_days')
logger.debug('expiration_delta_days: %i', expiration_delta_days)
if expiration_delta_days > 0:
expiration_date = timezone.now().date() + relativedelta(days=expiration_delta_days)
# logger.debug('setting default expiration_date: %s', expiration_date)
self.fields['expiration_date'].initial = expiration_date
# self.fields['path'].help_text = 'Existing proof uploaded: %s' % self.instance.filename() if self.instance.filename() else 'None'
self.fields['accepted_findings'].queryset = get_authorized_findings(Permissions.Risk_Acceptance)
class UploadFileForm(forms.ModelForm):
class Meta:
model = FileUpload
fields = ['title', 'file']
ManageFileFormSet = modelformset_factory(FileUpload, extra=3, max_num=10, fields=['title', 'file'], can_delete=True)
class ReplaceRiskAcceptanceProofForm(forms.ModelForm):
path = forms.FileField(label="Proof", required=True, widget=forms.widgets.FileInput(attrs={"accept": ".jpg,.png,.pdf"}))
class Meta:
model = Risk_Acceptance
fields = ['path']
class AddFindingsRiskAcceptanceForm(forms.ModelForm):
accepted_findings = forms.ModelMultipleChoiceField(
queryset=Finding.objects.none(), required=True,
widget=forms.widgets.SelectMultiple(attrs={'size': 10}),
help_text=('Select to add findings.'), label="Add findings as accepted:")
class Meta:
model = Risk_Acceptance
fields = ['accepted_findings']
# exclude = ('name', 'owner', 'path', 'notes', 'accepted_by', 'expiration_date', 'compensating_control')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['accepted_findings'].queryset = get_authorized_findings(Permissions.Risk_Acceptance)
class CheckForm(forms.ModelForm):
options = (('Pass', 'Pass'), ('Fail', 'Fail'), ('N/A', 'N/A'))
session_management = forms.ChoiceField(choices=options)
encryption_crypto = forms.ChoiceField(choices=options)
configuration_management = forms.ChoiceField(choices=options)
authentication = forms.ChoiceField(choices=options)
authorization_and_access_control = forms.ChoiceField(choices=options)
data_input_sanitization_validation = forms.ChoiceField(choices=options)
sensitive_data = forms.ChoiceField(choices=options)
other = forms.ChoiceField(choices=options)
def __init__(self, *args, **kwargs):
findings = kwargs.pop('findings')
super(CheckForm, self).__init__(*args, **kwargs)
self.fields['session_issues'].queryset = findings
self.fields['crypto_issues'].queryset = findings
self.fields['config_issues'].queryset = findings
self.fields['auth_issues'].queryset = findings
self.fields['author_issues'].queryset = findings
self.fields['data_issues'].queryset = findings
self.fields['sensitive_issues'].queryset = findings
self.fields['other_issues'].queryset = findings
class Meta:
model = Check_List
fields = ['session_management', 'session_issues', 'encryption_crypto', 'crypto_issues',
'configuration_management', 'config_issues', 'authentication', 'auth_issues',
'authorization_and_access_control', 'author_issues',
'data_input_sanitization_validation', 'data_issues',
'sensitive_data', 'sensitive_issues', 'other', 'other_issues', ]
class EngForm(forms.ModelForm):
name = forms.CharField(
max_length=300, required=False,
help_text="Add a descriptive name to identify this engagement. " +
"Without a name the target start date will be set.")
description = forms.CharField(widget=forms.Textarea(attrs={}),
required=False, help_text="Description of the engagement and details regarding the engagement.")
product = forms.ModelChoiceField(label='Product',
queryset=Product.objects.none(),
required=True)
target_start = forms.DateField(widget=forms.TextInput(
attrs={'class': 'datepicker', 'autocomplete': 'off'}))
target_end = forms.DateField(widget=forms.TextInput(
attrs={'class': 'datepicker', 'autocomplete': 'off'}))
lead = forms.ModelChoiceField(
queryset=None,
required=True, label="Testing Lead")
test_strategy = forms.URLField(required=False, label="Test Strategy URL")
def __init__(self, *args, **kwargs):
cicd = False
product = None
if 'cicd' in kwargs:
cicd = kwargs.pop('cicd')
if 'product' in kwargs:
product = kwargs.pop('product')
self.user = None
if 'user' in kwargs:
self.user = kwargs.pop('user')
super(EngForm, self).__init__(*args, **kwargs)
if product:
self.fields['preset'] = forms.ModelChoiceField(help_text="Settings and notes for performing this engagement.", required=False, queryset=Engagement_Presets.objects.filter(product=product))
self.fields['lead'].queryset = get_authorized_users_for_product_and_product_type(None, product, Permissions.Product_View)
else:
self.fields['lead'].queryset = User.objects.exclude(is_staff=False)
self.fields['product'].queryset = get_authorized_products(Permissions.Engagement_Add)
# Don't show CICD fields on a interactive engagement
if cicd is False:
del self.fields['build_id']
del self.fields['commit_hash']
del self.fields['branch_tag']
del self.fields['build_server']
del self.fields['source_code_management_server']
# del self.fields['source_code_management_uri']
del self.fields['orchestration_engine']
else:
del self.fields['test_strategy']
del self.fields['status']
def is_valid(self):
valid = super(EngForm, self).is_valid()
# we're done now if not valid
if not valid:
return valid
if self.cleaned_data['target_start'] > self.cleaned_data['target_end']:
self.add_error('target_start', 'Your target start date exceeds your target end date')
self.add_error('target_end', 'Your target start date exceeds your target end date')
return False
return True
class Meta:
model = Engagement
exclude = ('first_contacted', 'real_start', 'engagement_type',
'real_end', 'requester', 'reason', 'updated', 'report_type',
'product', 'threat_model', 'api_test', 'pen_test', 'check_list')
class DeleteEngagementForm(forms.ModelForm):
id = forms.IntegerField(required=True,
widget=forms.widgets.HiddenInput())
class Meta:
model = Engagement
fields = ['id']
class TestForm(forms.ModelForm):
title = forms.CharField(max_length=255, required=False)
description = forms.CharField(widget=forms.Textarea(attrs={'rows': '3'}), required=False)
test_type = forms.ModelChoiceField(queryset=Test_Type.objects.all().order_by('name'))
environment = forms.ModelChoiceField(
queryset=Development_Environment.objects.all().order_by('name'))
# credential = forms.ModelChoiceField(Cred_User.objects.all(), required=False)
target_start = forms.DateTimeField(widget=forms.TextInput(
attrs={'class': 'datepicker', 'autocomplete': 'off'}))
target_end = forms.DateTimeField(widget=forms.TextInput(
attrs={'class': 'datepicker', 'autocomplete': 'off'}))
lead = forms.ModelChoiceField(
queryset=None,
required=False, label="Testing Lead")
def __init__(self, *args, **kwargs):
obj = None
if 'engagement' in kwargs:
obj = kwargs.pop('engagement')
if 'instance' in kwargs:
obj = kwargs.get('instance')
super(TestForm, self).__init__(*args, **kwargs)
if obj:
product = get_product(obj)
self.fields['lead'].queryset = get_authorized_users_for_product_and_product_type(None, product, Permissions.Product_View)
self.fields['api_scan_configuration'].queryset = Product_API_Scan_Configuration.objects.filter(product=product)
else:
self.fields['lead'].queryset = User.objects.exclude(is_staff=False)
class Meta:
model = Test
fields = ['title', 'test_type', 'target_start', 'target_end', 'description',
'environment', 'percent_complete', 'tags', 'lead', 'version', 'branch_tag', 'build_id', 'commit_hash',
'api_scan_configuration']
class DeleteTestForm(forms.ModelForm):
id = forms.IntegerField(required=True,
widget=forms.widgets.HiddenInput())
class Meta:
model = Test
fields = ['id']
class AddFindingForm(forms.ModelForm):
title = forms.CharField(max_length=1000)
date = forms.DateField(required=True,
widget=forms.TextInput(attrs={'class': 'datepicker', 'autocomplete': 'off'}))
cwe = forms.IntegerField(required=False)
cve = forms.CharField(max_length=28, required=False)
cvssv3 = forms.CharField(max_length=117, required=False, widget=forms.TextInput(attrs={'class': 'cvsscalculator', 'data-toggle': 'dropdown', 'aria-haspopup': 'true', 'aria-expanded': 'false'}))
description = forms.CharField(widget=forms.Textarea)
severity = forms.ChoiceField(
choices=SEVERITY_CHOICES,
error_messages={
'required': 'Select valid choice: In Progress, On Hold, Completed',
'invalid_choice': 'Select valid choice: Critical,High,Medium,Low'})
mitigation = forms.CharField(widget=forms.Textarea, required=False)
impact = forms.CharField(widget=forms.Textarea, required=False)
request = forms.CharField(widget=forms.Textarea, required=False)
response = forms.CharField(widget=forms.Textarea, required=False)
endpoints = forms.ModelMultipleChoiceField(Endpoint.objects.none(), required=False, label='Systems / Endpoints')
endpoints_to_add = forms.CharField(max_length=5000, required=False, label="Endpoints to add",
help_text="The IP address, host name or full URL. You may enter one endpoint per line. "
"Each must be valid.",
widget=forms.widgets.Textarea(attrs={'rows': '3', 'cols': '400'}))
references = forms.CharField(widget=forms.Textarea, required=False)
publish_date = forms.DateField(widget=forms.TextInput(attrs={'class': 'datepicker', 'autocomplete': 'off'}), required=False)
# the only reliable way without hacking internal fields to get predicatble ordering is to make it explicit
field_order = ('title', 'date', 'cwe', 'cve', 'severity', 'cvssv3', 'description', 'mitigation', 'impact', 'request', 'response', 'steps_to_reproduce',
'severity_justification', 'endpoints', 'endpoints_to_add', 'references', 'active', 'verified', 'false_p', 'duplicate', 'out_of_scope',
'risk_accepted', 'under_defect_review')
def __init__(self, *args, **kwargs):
req_resp = kwargs.pop('req_resp')
product = None
if 'product' in kwargs:
product = kwargs.pop('product')
super(AddFindingForm, self).__init__(*args, **kwargs)
if product:
self.fields['endpoints'].queryset = Endpoint.objects.filter(product=product)
if req_resp:
self.fields['request'].initial = req_resp[0]
self.fields['response'].initial = req_resp[1]
self.endpoints_to_add_list = []
def clean(self):
cleaned_data = super(AddFindingForm, self).clean()
if ((cleaned_data['active'] or cleaned_data['verified']) and cleaned_data['duplicate']):
raise forms.ValidationError('Duplicate findings cannot be'
' verified or active')
if cleaned_data['false_p'] and cleaned_data['verified']:
raise forms.ValidationError('False positive findings cannot '
'be verified.')
if cleaned_data['active'] and 'risk_accepted' in cleaned_data and cleaned_data['risk_accepted']:
raise forms.ValidationError('Active findings cannot '
'be risk accepted.')
endpoints_to_add_list, errors = validate_endpoints_to_add(cleaned_data['endpoints_to_add'])
if errors:
raise forms.ValidationError(errors)
else:
self.endpoints_to_add_list = endpoints_to_add_list
return cleaned_data
class Meta:
model = Finding
exclude = ('reporter', 'url', 'numerical_severity', 'endpoint', 'under_review', 'reviewers',
'review_requested_by', 'is_mitigated', 'jira_creation', 'jira_change', 'endpoint_status', 'sla_start_date')
class AdHocFindingForm(forms.ModelForm):
title = forms.CharField(max_length=1000)
date = forms.DateField(required=True,
widget=forms.TextInput(attrs={'class': 'datepicker', 'autocomplete': 'off'}))
cwe = forms.IntegerField(required=False)
cve = forms.CharField(max_length=28, required=False)
cvssv3 = forms.CharField(max_length=117, required=False, widget=forms.TextInput(attrs={'class': 'cvsscalculator', 'data-toggle': 'dropdown', 'aria-haspopup': 'true', 'aria-expanded': 'false'}))
description = forms.CharField(widget=forms.Textarea)
severity = forms.ChoiceField(
choices=SEVERITY_CHOICES,
error_messages={
'required': 'Select valid choice: In Progress, On Hold, Completed',
'invalid_choice': 'Select valid choice: Critical,High,Medium,Low'})
mitigation = forms.CharField(widget=forms.Textarea, required=False)
impact = forms.CharField(widget=forms.Textarea, required=False)
request = forms.CharField(widget=forms.Textarea, required=False)
response = forms.CharField(widget=forms.Textarea, required=False)
endpoints = forms.ModelMultipleChoiceField(queryset=Endpoint.objects.none(), required=False, label='Systems / Endpoints')
endpoints_to_add = forms.CharField(max_length=5000, required=False, label="Endpoints to add",
help_text="The IP address, host name or full URL. You may enter one endpoint per line. "
"Each must be valid.",
widget=forms.widgets.Textarea(attrs={'rows': '3', 'cols': '400'}))
references = forms.CharField(widget=forms.Textarea, required=False)
publish_date = forms.DateField(widget=forms.TextInput(attrs={'class': 'datepicker', 'autocomplete': 'off'}), required=False)
# the only reliable way without hacking internal fields to get predicatble ordering is to make it explicit
field_order = ('title', 'date', 'cwe', 'cve', 'severity', 'cvssv3', 'description', 'mitigation', 'impact', 'request', 'response', 'steps_to_reproduce',
'severity_justification', 'endpoints', 'endpoints_to_add', 'references', 'active', 'verified', 'false_p', 'duplicate', 'out_of_scope',
'risk_accepted', 'under_defect_review', 'sla_start_date')
def __init__(self, *args, **kwargs):
req_resp = kwargs.pop('req_resp')
product = None
if 'product' in kwargs:
product = kwargs.pop('product')
super(AdHocFindingForm, self).__init__(*args, **kwargs)
if product:
self.fields['endpoints'].queryset = Endpoint.objects.filter(product=product)
if req_resp:
self.fields['request'].initial = req_resp[0]
self.fields['response'].initial = req_resp[1]
self.endpoints_to_add_list = []
def clean(self):
cleaned_data = super(AdHocFindingForm, self).clean()
if ((cleaned_data['active'] or cleaned_data['verified']) and cleaned_data['duplicate']):
raise forms.ValidationError('Duplicate findings cannot be'
' verified or active')
if cleaned_data['false_p'] and cleaned_data['verified']:
raise forms.ValidationError('False positive findings cannot '
'be verified.')
endpoints_to_add_list, errors = validate_endpoints_to_add(cleaned_data['endpoints_to_add'])
if errors:
raise forms.ValidationError(errors)
else:
self.endpoints_to_add_list = endpoints_to_add_list
return cleaned_data
class Meta:
model = Finding
exclude = ('reporter', 'url', 'numerical_severity', 'under_review', 'reviewers',
'review_requested_by', 'is_mitigated', 'jira_creation', 'jira_change', 'endpoint_status', 'sla_start_date')
class PromoteFindingForm(forms.ModelForm):
title = forms.CharField(max_length=1000)
date = forms.DateField(required=True,
widget=forms.TextInput(attrs={'class': 'datepicker', 'autocomplete': 'off'}))
cwe = forms.IntegerField(required=False)
cve = forms.CharField(max_length=28, required=False)
cvssv3 = forms.CharField(max_length=117, required=False, widget=forms.TextInput(attrs={'class': 'cvsscalculator', 'data-toggle': 'dropdown', 'aria-haspopup': 'true', 'aria-expanded': 'false'}))
description = forms.CharField(widget=forms.Textarea)
severity = forms.ChoiceField(
choices=SEVERITY_CHOICES,
error_messages={
'required': 'Select valid choice: In Progress, On Hold, Completed',
'invalid_choice': 'Select valid choice: Critical,High,Medium,Low'})
mitigation = forms.CharField(widget=forms.Textarea, required=False)
impact = forms.CharField(widget=forms.Textarea, required=False)
endpoints = forms.ModelMultipleChoiceField(Endpoint.objects.none(), required=False, label='Systems / Endpoints')
endpoints_to_add = forms.CharField(max_length=5000, required=False, label="Endpoints to add",
help_text="The IP address, host name or full URL. You may enter one endpoint per line. "
"Each must be valid.",
widget=forms.widgets.Textarea(attrs={'rows': '3', 'cols': '400'}))
references = forms.CharField(widget=forms.Textarea, required=False)
# the onyl reliable way without hacking internal fields to get predicatble ordering is to make it explicit
field_order = ('title', 'group', 'date', 'sla_start_date', 'cwe', 'cve', 'severity', 'cvssv3', 'cvssv3_score', 'description', 'mitigation', 'impact',
'request', 'response', 'steps_to_reproduce', 'severity_justification', 'endpoints', 'endpoints_to_add', 'references',
'active', 'mitigated', 'mitigated_by', 'verified', 'false_p', 'duplicate',
'out_of_scope', 'risk_accept', 'under_defect_review')
def __init__(self, *args, **kwargs):
product = None
if 'product' in kwargs:
product = kwargs.pop('product')
super(PromoteFindingForm, self).__init__(*args, **kwargs)
if product:
self.fields['endpoints'].queryset = Endpoint.objects.filter(product=product)
self.endpoints_to_add_list = []
def clean(self):
cleaned_data = super(PromoteFindingForm, self).clean()
endpoints_to_add_list, errors = validate_endpoints_to_add(cleaned_data['endpoints_to_add'])
if errors:
raise forms.ValidationError(errors)
else:
self.endpoints_to_add_list = endpoints_to_add_list
return cleaned_data
class Meta:
model = Finding
exclude = ('reporter', 'url', 'numerical_severity', 'active', 'false_p', 'verified', 'endpoint_status',
'duplicate', 'out_of_scope', 'under_review', 'reviewers', 'review_requested_by', 'is_mitigated', 'jira_creation', 'jira_change')
class SplitDateTimeWidget(forms.MultiWidget):
supports_microseconds = False
template_name = 'dojo/field-datetime.html'
def __init__(self):
widgets = (
forms.TextInput(attrs={'type': 'date', 'autocomplete': 'off'}),
forms.TextInput(attrs={'type': 'time', 'autocomplete': 'off'}),
)
super().__init__(widgets)
def decompress(self, value):
if value:
value = form_utils.to_current_timezone(value)
return [value.date(), value.time()]
return [None, None]
class SplitDateTimeField(forms.MultiValueField):
widget = SplitDateTimeWidget
hidden_widget = forms.SplitHiddenDateTimeWidget
def __init__(self, **kwargs):
fields = (
forms.DateField(),
forms.TimeField(),
)
super().__init__(fields, **kwargs)
def compress(self, data_list):
if data_list:
# preserve default dojo behavior and set current time if any part is empty
if data_list[0] in self.empty_values:
selected_date = date.today()
else:
selected_date = data_list[0]
if data_list[1] in self.empty_values:
selected_time = datetime.now().time()
else:
selected_time = data_list[1]
# keep potential tzinfo
return form_utils.from_current_timezone(datetime.combine(selected_date, selected_time, *data_list[2:]))
return None
class FindingForm(forms.ModelForm):
title = forms.CharField(max_length=1000)
group = forms.ModelChoiceField(required=False, queryset=Finding_Group.objects.none(), help_text='The Finding Group to which this finding belongs, leave empty to remove the finding from the group. Groups can only be created via Bulk Edit for now.')
date = forms.DateField(required=True,
widget=forms.TextInput(attrs={'class': 'datepicker', 'autocomplete': 'off'}))
cwe = forms.IntegerField(required=False)
cve = forms.CharField(max_length=28, required=False, strip=False)
cvssv3 = forms.CharField(max_length=117, required=False, widget=forms.TextInput(attrs={'class': 'cvsscalculator', 'data-toggle': 'dropdown', 'aria-haspopup': 'true', 'aria-expanded': 'false'}))
description = forms.CharField(widget=forms.Textarea)
severity = forms.ChoiceField(
choices=SEVERITY_CHOICES,
error_messages={
'required': 'Select valid choice: In Progress, On Hold, Completed',
'invalid_choice': 'Select valid choice: Critical,High,Medium,Low'})
mitigation = forms.CharField(widget=forms.Textarea, required=False)
impact = forms.CharField(widget=forms.Textarea, required=False)
request = forms.CharField(widget=forms.Textarea, required=False)
response = forms.CharField(widget=forms.Textarea, required=False)
endpoints = forms.ModelMultipleChoiceField(queryset=Endpoint.objects.none(), required=False, label='Systems / Endpoints')
endpoints_to_add = forms.CharField(max_length=5000, required=False, label="Endpoints to add",
help_text="The IP address, host name or full URL. You may enter one endpoint per line. "
"Each must be valid.",
widget=forms.widgets.Textarea(attrs={'rows': '3', 'cols': '400'}))
references = forms.CharField(widget=forms.Textarea, required=False)
mitigated = SplitDateTimeField(required=False, help_text='Date and time when the flaw has been fixed')
mitigated_by = forms.ModelChoiceField(required=True, queryset=User.objects.all(), initial=get_current_user)
publish_date = forms.DateField(widget=forms.TextInput(attrs={'class': 'datepicker', 'autocomplete': 'off'}), required=False)
# the onyl reliable way without hacking internal fields to get predicatble ordering is to make it explicit
field_order = ('title', 'group', 'date', 'sla_start_date', 'cwe', 'cve', 'severity', 'cvssv3', 'cvssv3_score', 'description', 'mitigation', 'impact',
'request', 'response', 'steps_to_reproduce', 'severity_justification', 'endpoints', 'endpoints_to_add', 'references',
'active', 'mitigated', 'mitigated_by', 'verified', 'false_p', 'duplicate',
'out_of_scope', 'risk_accept', 'under_defect_review')
def __init__(self, *args, **kwargs):
req_resp = None
if 'req_resp' in kwargs:
req_resp = kwargs.pop('req_resp')
self.can_edit_mitigated_data = kwargs.pop('can_edit_mitigated_data') if 'can_edit_mitigated_data' in kwargs \
else False
super(FindingForm, self).__init__(*args, **kwargs)
self.fields['endpoints'].queryset = Endpoint.objects.filter(product=self.instance.test.engagement.product)
# do not show checkbox if finding is not accepted and simple risk acceptance is disabled
# if checked, always show to allow unaccept also with full risk acceptance enabled
# when adding from template, we don't have access to the test. quickfix for now to just hide simple risk acceptance
if not hasattr(self.instance, 'test') or (not self.instance.risk_accepted and not self.instance.test.engagement.product.enable_simple_risk_acceptance):
del self.fields['risk_accepted']
else:
if self.instance.risk_accepted:
self.fields['risk_accepted'].help_text = "Uncheck to unaccept the risk. Use full risk acceptance from the dropdown menu if you need advanced settings such as an expiry date."
elif self.instance.test.engagement.product.enable_simple_risk_acceptance:
self.fields['risk_accepted'].help_text = "Check to accept the risk. Use full risk acceptance from the dropdown menu if you need advanced settings such as an expiry date."
# self.fields['tags'].widget.choices = t
if req_resp:
self.fields['request'].initial = req_resp[0]
self.fields['response'].initial = req_resp[1]
if self.instance.duplicate:
self.fields['duplicate'].help_text = "Original finding that is being duplicated here (readonly). Use view finding page to manage duplicate relationships. Unchecking duplicate here will reset this findings duplicate status, but will trigger deduplication logic."
else:
self.fields['duplicate'].help_text = "You can mark findings as duplicate only from the view finding page."
self.fields['sla_start_date'].disabled = True
if self.can_edit_mitigated_data:
if hasattr(self, 'instance'):
self.fields['mitigated'].initial = self.instance.mitigated
self.fields['mitigated_by'].initial = self.instance.mitigated_by
else:
del self.fields['mitigated']
del self.fields['mitigated_by']
if not settings.FEATURE_FINDING_GROUPS or not hasattr(self.instance, 'test'):
del self.fields['group']
else:
self.fields['group'].queryset = self.instance.test.finding_group_set.all()
self.fields['group'].initial = self.instance.finding_group
self.endpoints_to_add_list = []
def clean(self):
cleaned_data = super(FindingForm, self).clean()
cleaned_data['cve'] = None if cleaned_data['cve'] == '' else cleaned_data['cve']
if (cleaned_data['active'] or cleaned_data['verified']) and cleaned_data['duplicate']:
raise forms.ValidationError('Duplicate findings cannot be'
' verified or active')
if cleaned_data['false_p'] and cleaned_data['verified']:
raise forms.ValidationError('False positive findings cannot '
'be verified.')
if cleaned_data['active'] and 'risk_accepted' in cleaned_data and cleaned_data['risk_accepted']:
raise forms.ValidationError('Active findings cannot '
'be risk accepted.')
endpoints_to_add_list, errors = validate_endpoints_to_add(cleaned_data['endpoints_to_add'])
if errors:
raise forms.ValidationError(errors)
else:
self.endpoints_to_add_list = endpoints_to_add_list
return cleaned_data
def _post_clean(self):
super(FindingForm, self)._post_clean()
if self.can_edit_mitigated_data:
opts = self.instance._meta
try:
opts.get_field('mitigated').save_form_data(self.instance, self.cleaned_data.get('mitigated'))
opts.get_field('mitigated_by').save_form_data(self.instance, self.cleaned_data.get('mitigated_by'))
except forms.ValidationError as e:
self._update_errors(e)
class Meta:
model = Finding
exclude = ('reporter', 'url', 'numerical_severity', 'under_review', 'reviewers',
'review_requested_by', 'is_mitigated', 'jira_creation', 'jira_change', 'sonarqube_issue', 'endpoint_status')
class StubFindingForm(forms.ModelForm):
title = forms.CharField(required=True, max_length=1000)
class Meta:
model = Stub_Finding
order = ('title',)
exclude = (
'date', 'description', 'severity', 'reporter', 'test', 'is_mitigated')
def clean(self):
cleaned_data = super(StubFindingForm, self).clean()
if 'title' in cleaned_data:
if len(cleaned_data['title']) <= 0:
raise forms.ValidationError("The title is required.")
else:
raise forms.ValidationError("The title is required.")
return cleaned_data
class ApplyFindingTemplateForm(forms.Form):
title = forms.CharField(max_length=1000, required=True)
cwe = forms.IntegerField(label="CWE", required=False)
cve = forms.CharField(label="CVE", max_length=28, required=False)
cvssv3 = forms.CharField(label="CVSSv3", max_length=117, required=False, widget=forms.TextInput(attrs={'class': 'btn btn-secondary dropdown-toggle', 'data-toggle': 'dropdown', 'aria-haspopup': 'true', 'aria-expanded': 'false'}))
severity = forms.ChoiceField(required=False, choices=SEVERITY_CHOICES, error_messages={'required': 'Select valid choice: In Progress, On Hold, Completed', 'invalid_choice': 'Select valid choice: Critical,High,Medium,Low'})
description = forms.CharField(widget=forms.Textarea)
mitigation = forms.CharField(widget=forms.Textarea, required=False)
impact = forms.CharField(widget=forms.Textarea, required=False)
references = forms.CharField(widget=forms.Textarea, required=False)
tags = TagField(required=False, help_text="Add tags that help describe this finding template. Choose from the list or add new tags. Press Enter key to add.", initial=Finding.tags.tag_model.objects.all().order_by('name'))
def __init__(self, template=None, *args, **kwargs):
super(ApplyFindingTemplateForm, self).__init__(*args, **kwargs)
self.fields['tags'].autocomplete_tags = Finding.tags.tag_model.objects.all().order_by('name')
self.template = template
def clean(self):
cleaned_data = super(ApplyFindingTemplateForm, self).clean()
if 'title' in cleaned_data:
if len(cleaned_data['title']) <= 0:
raise forms.ValidationError("The title is required.")
else:
raise forms.ValidationError("The title is required.")
return cleaned_data
class Meta:
fields = ['title', 'cwe', 'cve', 'cvssv3', 'severity', 'description', 'mitigation', 'impact', 'references', 'tags']
order = ('title', 'cwe', 'cve', 'cvssv3', 'severity', 'description', 'impact', 'is_mitigated')
class FindingTemplateForm(forms.ModelForm):
apply_to_findings = forms.BooleanField(required=False, help_text="Apply template to all findings that match this CWE. (Update will overwrite mitigation, impact and references for any active, verified findings.)")
title = forms.CharField(max_length=1000, required=True)
cwe = forms.IntegerField(label="CWE", required=False)
cve = forms.CharField(label="CVE", max_length=28, required=False)
cvssv3 = forms.CharField(max_length=117, required=False, widget=forms.TextInput(attrs={'class': 'btn btn-secondary dropdown-toggle', 'data-toggle': 'dropdown', 'aria-haspopup': 'true', 'aria-expanded': 'false'}))
severity = forms.ChoiceField(
required=False,
choices=SEVERITY_CHOICES,
error_messages={
'required': 'Select valid choice: In Progress, On Hold, Completed',
'invalid_choice': 'Select valid choice: Critical,High,Medium,Low'})
field_order = ['title', 'cwe', 'cve', 'severity', 'cvssv3', 'description', 'mitigation', 'impact', 'references', 'tags', 'template_match', 'template_match_cwe', 'template_match_title', 'apply_to_findings']
def __init__(self, *args, **kwargs):
super(FindingTemplateForm, self).__init__(*args, **kwargs)
self.fields['tags'].autocomplete_tags = Finding.tags.tag_model.objects.all().order_by('name')
class Meta:
model = Finding_Template
order = ('title', 'cwe', 'cve', 'cvssv3', 'severity', 'description', 'impact')
exclude = ('numerical_severity', 'is_mitigated', 'last_used', 'endpoint_status')
class DeleteFindingTemplateForm(forms.ModelForm):
id = forms.IntegerField(required=True,
widget=forms.widgets.HiddenInput())
class Meta:
model = Finding_Template
fields = ['id']
class FindingBulkUpdateForm(forms.ModelForm):
status = forms.BooleanField(required=False)
risk_acceptance = forms.BooleanField(required=False)
risk_accept = forms.BooleanField(required=False)
risk_unaccept = forms.BooleanField(required=False)
finding_group = forms.BooleanField(required=False)
finding_group_create = forms.BooleanField(required=False)
finding_group_create_name = forms.CharField(required=False)
finding_group_add = forms.BooleanField(required=False)
add_to_finding_group = forms.BooleanField(required=False)
finding_group_remove = forms.BooleanField(required=False)
finding_group_by = forms.BooleanField(required=False)
finding_group_by_option = forms.CharField(required=False)
push_to_jira = forms.BooleanField(required=False)
# unlink_from_jira = forms.BooleanField(required=False)
push_to_github = forms.BooleanField(required=False)
tags = TagField(required=False, autocomplete_tags=Finding.tags.tag_model.objects.all().order_by('name'))
def __init__(self, *args, **kwargs):
super(FindingBulkUpdateForm, self).__init__(*args, **kwargs)
self.fields['severity'].required = False
# we need to defer initialization to prevent multiple initializations if other forms are shown
self.fields['tags'].widget.tag_options = tagulous.models.options.TagOptions(autocomplete_settings={'width': '200px', 'defer': True})
def clean(self):
cleaned_data = super(FindingBulkUpdateForm, self).clean()
if (cleaned_data['active'] or cleaned_data['verified']) and cleaned_data['duplicate']:
raise forms.ValidationError('Duplicate findings cannot be'
' verified or active')
if cleaned_data['false_p'] and cleaned_data['verified']:
raise forms.ValidationError('False positive findings cannot '
'be verified.')
return cleaned_data
class Meta:
model = Finding
fields = ('severity', 'active', 'verified', 'false_p', 'duplicate', 'out_of_scope', 'is_mitigated')
class EditEndpointForm(forms.ModelForm):
class Meta:
model = Endpoint
exclude = ['product', 'endpoint_status']
def __init__(self, *args, **kwargs):
self.product = None
self.endpoint_instance = None
super(EditEndpointForm, self).__init__(*args, **kwargs)
if 'instance' in kwargs:
self.endpoint_instance = kwargs.pop('instance')
self.product = self.endpoint_instance.product
def clean(self):
cleaned_data = super(EditEndpointForm, self).clean()
protocol = cleaned_data['protocol']
userinfo = cleaned_data['userinfo']
host = cleaned_data['host']
port = cleaned_data['port']
path = cleaned_data['path']
query = cleaned_data['query']
fragment = cleaned_data['fragment']
endpoint = endpoint_filter(
protocol=protocol,
userinfo=userinfo,
host=host,
port=port,
path=path,
query=query,
fragment=fragment,
product=self.product
)
if endpoint.count() > 1 or (endpoint.count() == 1 and endpoint.first().pk != self.endpoint_instance.pk):
raise forms.ValidationError(
'It appears as though an endpoint with this data already exists for this product.',
code='invalid')
return cleaned_data
class AddEndpointForm(forms.Form):
endpoint = forms.CharField(max_length=5000, required=True, label="Endpoint(s)",
help_text="The IP address, host name or full URL. You may enter one endpoint per line. "
"Each must be valid.",
widget=forms.widgets.Textarea(attrs={'rows': '15', 'cols': '400'}))
product = forms.CharField(required=True,
widget=forms.widgets.HiddenInput(), help_text="The product this endpoint should be "
"associated with.")
tags = TagField(required=False,
help_text="Add tags that help describe this endpoint. "
"Choose from the list or add new tags. Press Enter key to add.")
def __init__(self, *args, **kwargs):
product = None
if 'product' in kwargs:
product = kwargs.pop('product')
super(AddEndpointForm, self).__init__(*args, **kwargs)
self.fields['product'] = forms.ModelChoiceField(queryset=get_authorized_products(Permissions.Endpoint_Add))
if product is not None:
self.fields['product'].initial = product.id
self.product = product
self.endpoints_to_process = []
def save(self):
processed_endpoints = []
for e in self.endpoints_to_process:
endpoint, created = endpoint_get_or_create(
protocol=e[0],
userinfo=e[1],
host=e[2],
port=e[3],
path=e[4],
query=e[5],
fragment=e[6],
product=self.product
)
processed_endpoints.append(endpoint)
return processed_endpoints
def clean(self):
cleaned_data = super(AddEndpointForm, self).clean()
if 'endpoint' in cleaned_data and 'product' in cleaned_data:
endpoint = cleaned_data['endpoint']
product = cleaned_data['product']
if isinstance(product, Product):
self.product = product
else:
self.product = Product.objects.get(id=int(product))
else:
raise forms.ValidationError('Please enter a valid URL or IP address.',
code='invalid')
endpoints_to_add_list, errors = validate_endpoints_to_add(endpoint)
if errors:
raise forms.ValidationError(errors)
else:
self.endpoints_to_process = endpoints_to_add_list
return cleaned_data
class DeleteEndpointForm(forms.ModelForm):
id = forms.IntegerField(required=True,
widget=forms.widgets.HiddenInput())
class Meta:
model = Endpoint
fields = ['id']
class NoteForm(forms.ModelForm):
entry = forms.CharField(max_length=2400, widget=forms.Textarea(attrs={'rows': 4, 'cols': 15}),
label='Notes:')
class Meta:
model = Notes
fields = ['entry', 'private']
class TypedNoteForm(NoteForm):
def __init__(self, *args, **kwargs):
queryset = kwargs.pop('available_note_types')
super(TypedNoteForm, self).__init__(*args, **kwargs)
self.fields['note_type'] = forms.ModelChoiceField(queryset=queryset, label='Note Type', required=True)
class Meta():
model = Notes
fields = ['note_type', 'entry', 'private']
class DeleteNoteForm(forms.ModelForm):
id = forms.IntegerField(required=True,
widget=forms.widgets.HiddenInput())
class Meta:
model = Notes
fields = ['id']
class CloseFindingForm(forms.ModelForm):
entry = forms.CharField(
required=True, max_length=2400,
widget=forms.Textarea, label='Notes:',
error_messages={'required': ('The reason for closing a finding is '
'required, please use the text area '
'below to provide documentation.')})
def __init__(self, *args, **kwargs):
queryset = kwargs.pop('missing_note_types')
super(CloseFindingForm, self).__init__(*args, **kwargs)
if len(queryset) == 0:
self.fields['note_type'].widget = forms.HiddenInput()
else:
self.fields['note_type'] = forms.ModelChoiceField(queryset=queryset, label='Note Type', required=True)
class Meta:
model = Notes
fields = ['note_type', 'entry']
class DefectFindingForm(forms.ModelForm):
CLOSE_CHOICES = (("Close Finding", "Close Finding"), ("Not Fixed", "Not Fixed"))
defect_choice = forms.ChoiceField(required=True, choices=CLOSE_CHOICES)
entry = forms.CharField(
required=True, max_length=2400,
widget=forms.Textarea, label='Notes:',
error_messages={'required': ('The reason for closing a finding is '
'required, please use the text area '
'below to provide documentation.')})
class Meta:
model = Notes
fields = ['entry']
class ClearFindingReviewForm(forms.ModelForm):
entry = forms.CharField(
required=True, max_length=2400,
help_text='Please provide a message.',
widget=forms.Textarea, label='Notes:',
error_messages={'required': ('The reason for clearing a review is '
'required, please use the text area '
'below to provide documentation.')})
class Meta:
model = Finding
fields = ['active', 'verified', 'false_p', 'out_of_scope', 'duplicate']
class ReviewFindingForm(forms.Form):
reviewers = forms.ModelMultipleChoiceField(queryset=Dojo_User.objects.filter(is_staff=True, is_active=True),
help_text="Select all users who can review Finding.")
entry = forms.CharField(
required=True, max_length=2400,
help_text='Please provide a message for reviewers.',
widget=forms.Textarea, label='Notes:',
error_messages={'required': ('The reason for requesting a review is '
'required, please use the text area '
'below to provide documentation.')})
def __init__(self, *args, **kwargs):
finding = None
if 'finding' in kwargs:
finding = kwargs.pop('finding')
super(ReviewFindingForm, self).__init__(*args, **kwargs)
if finding is not None:
self.fields['reviewers'].queryset = get_authorized_users_for_product_and_product_type(None, finding.test.engagement.product, Permissions.Finding_Edit)
class Meta:
fields = ['reviewers', 'entry']
class WeeklyMetricsForm(forms.Form):
dates = forms.ChoiceField()
def __init__(self, *args, **kwargs):
super(WeeklyMetricsForm, self).__init__(*args, **kwargs)
wmf_options = []
for i in range(6):
# Weeks start on Monday
curr = datetime.now() - relativedelta(weeks=i)
start_of_period = curr - relativedelta(weeks=1, weekday=0,
hour=0, minute=0, second=0)
end_of_period = curr + relativedelta(weeks=0, weekday=0,
hour=0, minute=0, second=0)
wmf_options.append((end_of_period.strftime("%b %d %Y %H %M %S %Z"),
start_of_period.strftime("%b %d") +
" - " + end_of_period.strftime("%b %d")))
wmf_options = tuple(wmf_options)
self.fields['dates'].choices = wmf_options
class SimpleMetricsForm(forms.Form):
date = forms.DateField(
label="",
widget=MonthYearWidget())
class SimpleSearchForm(forms.Form):
query = forms.CharField(required=False)
class DateRangeMetrics(forms.Form):
start_date = forms.DateField(required=True, label="To",
widget=forms.TextInput(attrs={'class': 'datepicker', 'autocomplete': 'off'}))
end_date = forms.DateField(required=True,
label="From",
widget=forms.TextInput(attrs={'class': 'datepicker', 'autocomplete': 'off'}))
class MetricsFilterForm(forms.Form):
start_date = forms.DateField(required=False,
label="To",
widget=forms.TextInput(attrs={'class': 'datepicker', 'autocomplete': 'off'}))
end_date = forms.DateField(required=False,
label="From",
widget=forms.TextInput(attrs={'class': 'datepicker', 'autocomplete': 'off'}))
finding_status = forms.MultipleChoiceField(
required=False,
widget=forms.CheckboxSelectMultiple,
choices=FINDING_STATUS,
label="Status")
severity = forms.MultipleChoiceField(required=False,
choices=(('Low', 'Low'),
('Medium', 'Medium'),
('High', 'High'),
('Critical', 'Critical')),
help_text=('Hold down "Control", or '
'"Command" on a Mac, to '
'select more than one.'))
exclude_product_types = forms.ModelMultipleChoiceField(
required=False, queryset=Product_Type.objects.all().order_by('name'))
# add the ability to exclude the exclude_product_types field
def __init__(self, *args, **kwargs):
exclude_product_types = kwargs.get('exclude_product_types', False)
if 'exclude_product_types' in kwargs:
del kwargs['exclude_product_types']
super(MetricsFilterForm, self).__init__(*args, **kwargs)
if exclude_product_types:
del self.fields['exclude_product_types']
class DojoGroupForm(forms.ModelForm):
name = forms.CharField(max_length=255, required=True)
description = forms.CharField(widget=forms.Textarea(attrs={}), required=False)
class Meta:
model = Dojo_Group
fields = ['name', 'description']
exclude = ['users']
class DeleteGroupForm(forms.ModelForm):
id = forms.IntegerField(required=True,
widget=forms.widgets.HiddenInput())
class Meta:
model = Dojo_Group
fields = ['id']
class Add_Group_MemberForm(forms.ModelForm):
users = forms.ModelMultipleChoiceField(queryset=Dojo_Group_Member.objects.none(), required=True, label='Users')
def __init__(self, *args, **kwargs):
super(Add_Group_MemberForm, self).__init__(*args, **kwargs)
self.fields['group'].disabled = True
current_members = Dojo_Group_Member.objects.filter(group=self.initial['group']).values_list('user', flat=True)
self.fields['users'].queryset = Dojo_User.objects.exclude(
Q(is_superuser=True) |
Q(id__in=current_members)).exclude(is_active=False).order_by('first_name', 'last_name')
self.fields['role'].queryset = get_group_member_roles()
class Meta:
model = Dojo_Group_Member
fields = ['group', 'users', 'role']
class Add_Group_Member_UserForm(forms.ModelForm):
groups = forms.ModelMultipleChoiceField(queryset=Dojo_Group.objects.none(), required=True, label='Groups')
def __init__(self, *args, **kwargs):
super(Add_Group_Member_UserForm, self).__init__(*args, **kwargs)
self.fields['user'].disabled = True
current_groups = Dojo_Group_Member.objects.filter(user=self.initial['user']).values_list('group', flat=True)
self.fields['groups'].queryset = Dojo_Group.objects.exclude(id__in=current_groups)
self.fields['role'].queryset = get_group_member_roles()
class Meta:
model = Dojo_Group_Member
fields = ['groups', 'user', 'role']
class Edit_Group_MemberForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(Edit_Group_MemberForm, self).__init__(*args, **kwargs)
self.fields['group'].disabled = True
self.fields['user'].disabled = True
self.fields['role'].queryset = get_group_member_roles()
class Meta:
model = Dojo_Group_Member
fields = ['group', 'user', 'role']
class Delete_Group_MemberForm(Edit_Group_MemberForm):
def __init__(self, *args, **kwargs):
super(Delete_Group_MemberForm, self).__init__(*args, **kwargs)
self.fields['role'].disabled = True
class Add_Product_GroupForm(forms.ModelForm):
groups = forms.ModelMultipleChoiceField(queryset=Dojo_Group.objects.none(), required=True, label='Groups')
def __init__(self, *args, **kwargs):
super(Add_Product_GroupForm, self).__init__(*args, **kwargs)
self.fields['product'].disabled = True
current_groups = Product_Group.objects.filter(product=self.initial["product"]).values_list('group', flat=True)
authorized_groups = get_authorized_groups(Permissions.Group_View)
authorized_groups = authorized_groups.exclude(id__in=current_groups)
self.fields['groups'].queryset = authorized_groups
class Meta:
model = Product_Group
fields = ['product', 'groups', 'role']
class Add_Product_Group_GroupForm(forms.ModelForm):
products = forms.ModelMultipleChoiceField(queryset=Product.objects.none(), required=True, label='Products')
def __init__(self, *args, **kwargs):
super(Add_Product_Group_GroupForm, self).__init__(*args, **kwargs)
current_members = Product_Group.objects.filter(group=self.initial["group"]).values_list('product', flat=True)
self.fields['products'].queryset = get_authorized_products(Permissions.Product_Member_Add_Owner) \
.exclude(id__in=current_members)
self.fields['group'].disabled = True
class Meta:
model = Product_Group
fields = ['products', 'group', 'role']
class Edit_Product_Group_Form(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(Edit_Product_Group_Form, self).__init__(*args, **kwargs)
self.fields['product'].disabled = True
self.fields['group'].disabled = True
class Meta:
model = Product_Group
fields = ['product', 'group', 'role']
class Delete_Product_GroupForm(Edit_Product_Group_Form):
def __init__(self, *args, **kwargs):
super(Delete_Product_GroupForm, self).__init__(*args, **kwargs)
self.fields['role'].disabled = True
class Add_Product_Type_GroupForm(forms.ModelForm):
groups = forms.ModelMultipleChoiceField(queryset=Dojo_Group.objects.none(), required=True, label='Groups')
def __init__(self, *args, **kwargs):
super(Add_Product_Type_GroupForm, self).__init__(*args, **kwargs)
current_groups = Product_Type_Group.objects.filter(product_type=self.initial["product_type"]).values_list('group', flat=True)
authorized_groups = get_authorized_groups(Permissions.Group_View)
authorized_groups = authorized_groups.exclude(id__in=current_groups)
self.fields['groups'].queryset = authorized_groups
self.fields['product_type'].disabled = True
class Meta:
model = Product_Type_Group
fields = ['product_type', 'groups', 'role']
class Add_Product_Type_Group_GroupForm(forms.ModelForm):
product_types = forms.ModelMultipleChoiceField(queryset=Product_Type.objects.none(), required=True, label='Product Types')
def __init__(self, *args, **kwargs):
super(Add_Product_Type_Group_GroupForm, self).__init__(*args, **kwargs)
current_members = Product_Type_Group.objects.filter(group=self.initial['group']).values_list('product_type', flat=True)
self.fields['product_types'].queryset = get_authorized_product_types(Permissions.Product_Type_Member_Add_Owner) \
.exclude(id__in=current_members)
self.fields['group'].disabled = True
class Meta:
model = Product_Type_Group
fields = ['product_types', 'group', 'role']
class Edit_Product_Type_Group_Form(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(Edit_Product_Type_Group_Form, self).__init__(*args, **kwargs)
self.fields['product_type'].disabled = True
self.fields['group'].disabled = True
class Meta:
model = Product_Type_Group
fields = ['product_type', 'group', 'role']
class Delete_Product_Type_GroupForm(Edit_Product_Type_Group_Form):
def __init__(self, *args, **kwargs):
super(Delete_Product_Type_GroupForm, self).__init__(*args, **kwargs)
self.fields['role'].disabled = True
class DojoUserForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(DojoUserForm, self).__init__(*args, **kwargs)
if not get_current_user().is_superuser and not settings.USER_PROFILE_EDITABLE:
for field in self.fields:
self.fields[field].disabled = True
class Meta:
model = Dojo_User
exclude = ['password', 'last_login', 'is_superuser', 'groups',
'username', 'is_staff', 'is_active', 'date_joined',
'user_permissions']
class ChangePasswordForm(forms.Form):
current_password = forms.CharField(widget=forms.PasswordInput,
required=True)
new_password = forms.CharField(widget=forms.PasswordInput,
required=True, validators=[validate_password],
help_text='Password must contain at least 9 characters, one lowercase (a-z) and one uppercase (A-Z) letter, one number (0-9), \
and one symbol (()[]{}|\`~!@#$%^&*_-+=;:\'\",<>./?).') # noqa W605
confirm_password = forms.CharField(widget=forms.PasswordInput,
required=True, validators=[validate_password],
help_text='Password must match the new password entered above, following the same password rules.')
def __init__(self, *args, **kwargs):
self.user = None
if 'user' in kwargs:
self.user = kwargs.pop('user')
super(ChangePasswordForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned_data = super().clean()
current_password = self.cleaned_data.get('current_password')
new_password = self.cleaned_data.get('new_password')
confirm_password = self.cleaned_data.get('confirm_password')
if not self.user.check_password(current_password):
raise forms.ValidationError('Current password is incorrect.')
if new_password == current_password:
raise forms.ValidationError('New password must be different from current password.')
if new_password != confirm_password:
raise forms.ValidationError('Passwords do not match.')
return cleaned_data
class AddDojoUserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput,
required=False, validators=[validate_password],
help_text='Password must contain at least 9 characters, one lowercase (a-z) and one uppercase (A-Z) letter, one number (0-9), \
and one symbol (()[]{}|\`~!@#$%^&*_-+=;:\'\",<>./?). Leave blank to set an unusable password for this user.') # noqa W605
class Meta:
model = Dojo_User
fields = ['username', 'password', 'first_name', 'last_name', 'email', 'is_active',
'is_staff', 'is_superuser']
exclude = ['last_login', 'groups', 'date_joined', 'user_permissions',
'authorized_products', 'authorized_product_types']
class EditDojoUserForm(forms.ModelForm):
class Meta:
model = Dojo_User
fields = ['username', 'first_name', 'last_name', 'email', 'is_active',
'is_staff', 'is_superuser']
exclude = ['password', 'last_login', 'groups', 'date_joined', 'user_permissions',
'authorized_products', 'authorized_product_types']
class DeleteUserForm(forms.ModelForm):
id = forms.IntegerField(required=True,
widget=forms.widgets.HiddenInput())
class Meta:
model = User
fields = ['id']
class UserContactInfoForm(forms.ModelForm):
class Meta:
model = UserContactInfo
exclude = ['user', 'slack_user_id']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
current_user = get_current_user()
if not current_user.is_superuser:
del self.fields['force_password_reset']
if not settings.USER_PROFILE_EDITABLE:
for field in self.fields:
self.fields[field].disabled = True
class GlobalRoleForm(forms.ModelForm):
class Meta:
model = Global_Role
exclude = ['user', 'group']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
current_user = get_current_user()
if not current_user.is_superuser:
self.fields['role'].disabled = True
def get_years():
now = timezone.now()
return [(now.year, now.year), (now.year - 1, now.year - 1), (now.year - 2, now.year - 2)]
class ProductTypeCountsForm(forms.Form):
month = forms.ChoiceField(choices=list(MONTHS.items()), required=True, error_messages={
'required': '*'})
year = forms.ChoiceField(choices=get_years, required=True, error_messages={
'required': '*'})
product_type = forms.ModelChoiceField(required=True,
queryset=Product_Type.objects.none(),
error_messages={
'required': '*'})
def __init__(self, *args, **kwargs):
super(ProductTypeCountsForm, self).__init__(*args, **kwargs)
self.fields['product_type'].queryset = get_authorized_product_types(Permissions.Product_Type_View)
class APIKeyForm(forms.ModelForm):
id = forms.IntegerField(required=True,
widget=forms.widgets.HiddenInput())
class Meta:
model = User
exclude = ['username', 'first_name', 'last_name', 'email', 'is_active',
'is_staff', 'is_superuser', 'password', 'last_login', 'groups',
'date_joined', 'user_permissions']
class ReportOptionsForm(forms.Form):
yes_no = (('0', 'No'), ('1', 'Yes'))
include_finding_notes = forms.ChoiceField(choices=yes_no, label="Finding Notes")
include_finding_images = forms.ChoiceField(choices=yes_no, label="Finding Images")
include_executive_summary = forms.ChoiceField(choices=yes_no, label="Executive Summary")
include_table_of_contents = forms.ChoiceField(choices=yes_no, label="Table of Contents")
include_disclaimer = forms.ChoiceField(choices=yes_no, label="Disclaimer")
report_type = forms.ChoiceField(choices=(('HTML', 'HTML'), ('AsciiDoc', 'AsciiDoc')))
class CustomReportOptionsForm(forms.Form):
yes_no = (('0', 'No'), ('1', 'Yes'))
report_name = forms.CharField(required=False, max_length=100)
include_finding_notes = forms.ChoiceField(required=False, choices=yes_no)
include_finding_images = forms.ChoiceField(choices=yes_no, label="Finding Images")
report_type = forms.ChoiceField(choices=(('HTML', 'HTML'), ('AsciiDoc', 'AsciiDoc')))
class DeleteFindingForm(forms.ModelForm):
id = forms.IntegerField(required=True,
widget=forms.widgets.HiddenInput())
class Meta:
model = Finding
fields = ['id']
class FindingFormID(forms.ModelForm):
id = forms.IntegerField(required=True,
widget=forms.widgets.HiddenInput())
class Meta:
model = Finding
fields = ('id',)
class DeleteStubFindingForm(forms.ModelForm):
id = forms.IntegerField(required=True,
widget=forms.widgets.HiddenInput())
class Meta:
model = Stub_Finding
fields = ['id']
class GITHUB_IssueForm(forms.ModelForm):
class Meta:
model = GITHUB_Issue
exclude = ['product']
class GITHUBForm(forms.ModelForm):
api_key = forms.CharField(widget=forms.PasswordInput, required=True)
class Meta:
model = GITHUB_Conf
exclude = ['product']
class DeleteGITHUBConfForm(forms.ModelForm):
id = forms.IntegerField(required=True,
widget=forms.widgets.HiddenInput())
class Meta:
model = GITHUB_Conf
fields = ['id']
class ExpressGITHUBForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput, required=True)
issue_key = forms.CharField(required=True, help_text='A valid issue ID is required to gather the necessary information.')
class Meta:
model = GITHUB_Conf
exclude = ['product', 'epic_name_id', 'open_status_key',
'close_status_key', 'info_mapping_severity',
'low_mapping_severity', 'medium_mapping_severity',
'high_mapping_severity', 'critical_mapping_severity', 'finding_text']
def get_jira_issue_template_dir_choices():
template_root = settings.JIRA_TEMPLATE_ROOT
template_dir_list = [('', '---')]
for base_dir, dirnames, filenames in os.walk(template_root):
# for filename in filenames:
# if base_dir.startswith(settings.TEMPLATE_DIR_PREFIX):
# base_dir = base_dir[len(settings.TEMPLATE_DIR_PREFIX):]
# template_list.append((os.path.join(base_dir, filename), filename))
for dirname in dirnames:
if base_dir.startswith(settings.TEMPLATE_DIR_PREFIX):
base_dir = base_dir[len(settings.TEMPLATE_DIR_PREFIX):]
template_dir_list.append((os.path.join(base_dir, dirname), dirname))
logger.debug('templates: %s', template_dir_list)
return template_dir_list
JIRA_TEMPLATE_CHOICES = sorted(get_jira_issue_template_dir_choices())
class JIRA_IssueForm(forms.ModelForm):
class Meta:
model = JIRA_Issue
exclude = ['product']
class JIRAForm(forms.ModelForm):
issue_template_dir = forms.ChoiceField(required=False,
choices=JIRA_TEMPLATE_CHOICES,
help_text='Choose the folder containing the Django templates used to render the JIRA issue description. These are stored in dojo/templates/issue-trackers. Leave empty to use the default jira_full templates.')
password = forms.CharField(widget=forms.PasswordInput, required=True)
def __init__(self, *args, **kwargs):
super(JIRAForm, self).__init__(*args, **kwargs)
if self.instance:
self.fields['password'].required = False
class Meta:
model = JIRA_Instance
exclude = ['']
def clean(self):
import dojo.jira_link.helper as jira_helper
form_data = self.cleaned_data
try:
jira = jira_helper.get_jira_connection_raw(form_data['url'], form_data['username'], form_data['password'])
logger.debug('valid JIRA config!')
except Exception as e:
# form only used by admins, so we can show full error message using str(e) which can help debug any problems
message = 'Unable to authenticate to JIRA. Please check the URL, username, password, captcha challenge, Network connection. Details in alert on top right. ' + str(e)
self.add_error('username', message)
self.add_error('password', message)
return form_data
class ExpressJIRAForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput, required=True)
issue_key = forms.CharField(required=True, help_text='A valid issue ID is required to gather the necessary information.')
class Meta:
model = JIRA_Instance
exclude = ['product', 'epic_name_id', 'open_status_key',
'close_status_key', 'info_mapping_severity',
'low_mapping_severity', 'medium_mapping_severity',
'high_mapping_severity', 'critical_mapping_severity', 'finding_text']
def clean(self):
import dojo.jira_link.helper as jira_helper
form_data = self.cleaned_data
try:
jira = jira_helper.get_jira_connection_raw(form_data['url'], form_data['username'], form_data['password'],)
logger.debug('valid JIRA config!')
except Exception as e:
# form only used by admins, so we can show full error message using str(e) which can help debug any problems
message = 'Unable to authenticate to JIRA. Please check the URL, username, password, captcha challenge, Network connection. Details in alert on top right. ' + str(e)
self.add_error('username', message)
self.add_error('password', message)
return form_data
class Benchmark_Product_SummaryForm(forms.ModelForm):
class Meta:
model = Benchmark_Product_Summary
exclude = ['product', 'current_level', 'benchmark_type', 'asvs_level_1_benchmark', 'asvs_level_1_score', 'asvs_level_2_benchmark', 'asvs_level_2_score', 'asvs_level_3_benchmark', 'asvs_level_3_score']
class DeleteBenchmarkForm(forms.ModelForm):
id = forms.IntegerField(required=True,
widget=forms.widgets.HiddenInput())
class Meta:
model = Benchmark_Product_Summary
fields = ['id']
# class JIRA_ProjectForm(forms.ModelForm):
# class Meta:
# model = JIRA_Project
# exclude = ['product']
class Product_API_Scan_ConfigurationForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(Product_API_Scan_ConfigurationForm, self).__init__(*args, **kwargs)
tool_configuration = forms.ModelChoiceField(
label='Tool Configuration',
queryset=Tool_Configuration.objects.all().order_by('name'),
required=True,
)
class Meta:
model = Product_API_Scan_Configuration
exclude = ['product']
class DeleteProduct_API_Scan_ConfigurationForm(forms.ModelForm):
id = forms.IntegerField(required=True, widget=forms.widgets.HiddenInput())
class Meta:
model = Product_API_Scan_Configuration
fields = ['id']
class DeleteJIRAInstanceForm(forms.ModelForm):
id = forms.IntegerField(required=True,
widget=forms.widgets.HiddenInput())
class Meta:
model = JIRA_Instance
fields = ['id']
class ToolTypeForm(forms.ModelForm):
class Meta:
model = Tool_Type
exclude = ['product']
class RegulationForm(forms.ModelForm):
class Meta:
model = Regulation
exclude = ['product']
class LanguagesTypeForm(forms.ModelForm):
class Meta:
model = Languages
exclude = ['product']
class Languages_TypeTypeForm(forms.ModelForm):
class Meta:
model = Language_Type
exclude = ['product']
class AppAnalysisForm(forms.ModelForm):
user = forms.ModelChoiceField(queryset=Dojo_User.objects.exclude(is_active=False).order_by('first_name', 'last_name'), required=True)
class Meta:
model = App_Analysis
exclude = ['product']
class DeleteAppAnalysisForm(forms.ModelForm):
class Meta:
model = App_Analysis
exclude = ['product', 'tags']
def __init__(self, *args, **kwargs):
super(DeleteAppAnalysisForm, self).__init__(*args, **kwargs)
self.fields['name'].disabled = True
self.fields['user'].disabled = True
self.fields['confidence'].disabled = True
self.fields['version'].disabled = True
self.fields['icon'].disabled = True
self.fields['website'].disabled = True
self.fields['website_found'].disabled = True
class ToolConfigForm(forms.ModelForm):
tool_type = forms.ModelChoiceField(queryset=Tool_Type.objects.all(), label='Tool Type')
ssh = forms.CharField(widget=forms.Textarea(attrs={}), required=False, label='SSH Key')
class Meta:
model = Tool_Configuration
exclude = ['product']
def clean(self):
from django.core.validators import URLValidator
form_data = self.cleaned_data
try:
if form_data["url"] is not None:
url_validator = URLValidator(schemes=['ssh', 'http', 'https'])
url_validator(form_data["url"])
except forms.ValidationError:
raise forms.ValidationError(
'It does not appear as though this endpoint is a valid URL/SSH or IP address.',
code='invalid')
return form_data
class DeleteObjectsSettingsForm(forms.ModelForm):
id = forms.IntegerField(required=True,
widget=forms.widgets.HiddenInput())
class Meta:
model = Objects_Product
fields = ['id']
class DeleteToolProductSettingsForm(forms.ModelForm):
id = forms.IntegerField(required=True,
widget=forms.widgets.HiddenInput())
class Meta:
model = Tool_Product_Settings
fields = ['id']
class ToolProductSettingsForm(forms.ModelForm):
tool_configuration = forms.ModelChoiceField(queryset=Tool_Configuration.objects.all(), label='Tool Configuration')
class Meta:
model = Tool_Product_Settings
fields = ['name', 'description', 'url', 'tool_configuration', 'tool_project_id']
exclude = ['tool_type']
order = ['name']
def clean(self):
from django.core.validators import URLValidator
form_data = self.cleaned_data
try:
if form_data["url"] is not None:
url_validator = URLValidator(schemes=['ssh', 'http', 'https'])
url_validator(form_data["url"])
except forms.ValidationError:
raise forms.ValidationError(
'It does not appear as though this endpoint is a valid URL/SSH or IP address.',
code='invalid')
return form_data
class ObjectSettingsForm(forms.ModelForm):
# tags = forms.CharField(widget=forms.SelectMultiple(choices=[]),
# required=False,
# help_text="Add tags that help describe this object. "
# "Choose from the list or add new tags. Press TAB key to add.")
class Meta:
model = Objects_Product
fields = ['path', 'folder', 'artifact', 'name', 'review_status', 'tags']
exclude = ['product']
def __init__(self, *args, **kwargs):
super(ObjectSettingsForm, self).__init__(*args, **kwargs)
def clean(self):
form_data = self.cleaned_data
return form_data
class CredMappingForm(forms.ModelForm):
cred_user = forms.ModelChoiceField(queryset=Cred_Mapping.objects.all().select_related('cred_id'), required=False,
label='Select a Credential')
class Meta:
model = Cred_Mapping
fields = ['cred_user']
exclude = ['product', 'finding', 'engagement', 'test', 'url', 'is_authn_provider']
class CredMappingFormProd(forms.ModelForm):
class Meta:
model = Cred_Mapping
fields = ['cred_id', 'url', 'is_authn_provider']
exclude = ['product', 'finding', 'engagement', 'test']
class EngagementPresetsForm(forms.ModelForm):
notes = forms.CharField(widget=forms.Textarea(attrs={}),
required=False, help_text="Description of what needs to be tested or setting up environment for testing")
scope = forms.CharField(widget=forms.Textarea(attrs={}),
required=False, help_text="Scope of Engagement testing, IP's/Resources/URL's)")
class Meta:
model = Engagement_Presets
exclude = ['product']
class DeleteEngagementPresetsForm(forms.ModelForm):
id = forms.IntegerField(required=True,
widget=forms.widgets.HiddenInput())
class Meta:
model = Engagement_Presets
fields = ['id']
class SystemSettingsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(SystemSettingsForm, self).__init__(*args, **kwargs)
self.fields['default_group_role'].queryset = get_group_member_roles()
class Meta:
model = System_Settings
exclude = ['product_grade', 'credentials', 'column_widths', 'drive_folder_ID', 'enable_google_sheets']
class BenchmarkForm(forms.ModelForm):
class Meta:
model = Benchmark_Product
exclude = ['product', 'control']
class Benchmark_RequirementForm(forms.ModelForm):
class Meta:
model = Benchmark_Requirement
exclude = ['']
class NotificationsForm(forms.ModelForm):
class Meta:
model = Notifications
exclude = ['']
class ProductNotificationsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ProductNotificationsForm, self).__init__(*args, **kwargs)
if not self.instance.id:
self.initial['engagement_added'] = ''
self.initial['close_engagement'] = ''
self.initial['test_added'] = ''
self.initial['scan_added'] = ''
self.initial['sla_breach'] = ''
self.initial['risk_acceptance_expiration'] = ''
class Meta:
model = Notifications
fields = ['engagement_added', 'close_engagement', 'test_added', 'scan_added', 'sla_breach', 'risk_acceptance_expiration']
class AjaxChoiceField(forms.ChoiceField):
def valid_value(self, value):
return True
class RuleForm(forms.ModelForm):
class Meta:
model = Rule
exclude = ['key_product']
class ChildRuleForm(forms.ModelForm):
class Meta:
model = Child_Rule
exclude = ['key_product']
RuleFormSet = modelformset_factory(Child_Rule, extra=2, max_num=10, exclude=[''], can_delete=True)
class DeleteRuleForm(forms.ModelForm):
id = forms.IntegerField(required=True,
widget=forms.widgets.HiddenInput())
class Meta:
model = Rule
fields = ['id']
class CredUserForm(forms.ModelForm):
# selenium_script = forms.FileField(widget=forms.widgets.FileInput(
# attrs={"accept": ".py"}),
# label="Select a Selenium Script", required=False)
class Meta:
model = Cred_User
exclude = ['']
# fields = ['selenium_script']
class GITHUB_Product_Form(forms.ModelForm):
git_conf = forms.ModelChoiceField(queryset=GITHUB_Conf.objects.all(), label='GITHUB Configuration', required=False)
class Meta:
model = GITHUB_PKey
exclude = ['product']
class JIRAProjectForm(forms.ModelForm):
inherit_from_product = forms.BooleanField(label='inherit JIRA settings from product', required=False)
jira_instance = forms.ModelChoiceField(queryset=JIRA_Instance.objects.all(), label='JIRA Instance', required=False)
issue_template_dir = forms.ChoiceField(required=False,
choices=JIRA_TEMPLATE_CHOICES,
help_text='Choose the folder containing the Django templates used to render the JIRA issue description. These are stored in dojo/templates/issue-trackers. Leave empty to use the default jira_full templates.')
prefix = 'jira-project-form'
class Meta:
model = JIRA_Project
exclude = ['product', 'engagement']
fields = ['inherit_from_product', 'jira_instance', 'project_key', 'issue_template_dir', 'component', 'push_all_issues', 'enable_engagement_epic_mapping', 'push_notes', 'product_jira_sla_notification', 'risk_acceptance_expiration_notification']
def __init__(self, *args, **kwargs):
from dojo.jira_link import helper as jira_helper
# if the form is shown for an engagement, we set a placeholder text around inherited settings from product
self.target = kwargs.pop('target', 'product')
self.product = kwargs.pop('product', None)
self.engagement = kwargs.pop('engagement', None)
super().__init__(*args, **kwargs)
logger.debug('self.target: %s, self.product: %s, self.instance: %s', self.target, self.product, self.instance)
logger.debug('data: %s', self.data)
if self.target == 'engagement':
product_name = self.product.name if self.product else self.engagement.product.name if self.engagement.product else ''
self.fields['project_key'].widget = forms.TextInput(attrs={'placeholder': 'JIRA settings inherited from product ''%s''' % product_name})
self.fields['project_key'].help_text = 'JIRA settings are inherited from product ''%s'', unless configured differently here.' % product_name
self.fields['jira_instance'].help_text = 'JIRA settings are inherited from product ''%s'' , unless configured differently here.' % product_name
# if we don't have an instance, django will insert a blank empty one :-(
# so we have to check for id to make sure we only trigger this when there is a real instance from db
if self.instance.id:
logger.debug('jira project instance found for engagement, unchecking inherit checkbox')
self.fields['jira_instance'].required = True
self.fields['project_key'].required = True
self.initial['inherit_from_product'] = False
# once a jira project config is attached to an engagement, we can't go back to inheriting
# because the config needs to remain in place for the existing jira issues
self.fields['inherit_from_product'].disabled = True
self.fields['inherit_from_product'].help_text = 'Once an engagement has a JIRA Project stored, you cannot switch back to inheritance to avoid breaking existing JIRA issues'
self.fields['jira_instance'].disabled = False
self.fields['project_key'].disabled = False
self.fields['issue_template_dir'].disabled = False
self.fields['component'].disabled = False
self.fields['push_all_issues'].disabled = False
self.fields['enable_engagement_epic_mapping'].disabled = False
self.fields['push_notes'].disabled = False
self.fields['product_jira_sla_notification'].disabled = False
self.fields['risk_acceptance_expiration_notification'].disabled = False
elif self.product:
logger.debug('setting jira project fields from product1')
self.initial['inherit_from_product'] = True
jira_project_product = jira_helper.get_jira_project(self.product)
# we have to check that we are not in a POST request where jira project config data is posted
# this is because initial values will overwrite the actual values entered by the user
# makes no sense, but seems to be accepted behaviour: https://code.djangoproject.com/ticket/30407
if jira_project_product and not (self.prefix + '-jira_instance') in self.data:
logger.debug('setting jira project fields from product2')
self.initial['jira_instance'] = jira_project_product.jira_instance.id if jira_project_product.jira_instance else None
self.initial['project_key'] = jira_project_product.project_key
self.initial['issue_template_dir'] = jira_project_product.issue_template_dir
self.initial['component'] = jira_project_product.component
self.initial['push_all_issues'] = jira_project_product.push_all_issues
self.initial['enable_engagement_epic_mapping'] = jira_project_product.enable_engagement_epic_mapping
self.initial['push_notes'] = jira_project_product.push_notes
self.initial['product_jira_sla_notification'] = jira_project_product.product_jira_sla_notification
self.initial['risk_acceptance_expiration_notification'] = jira_project_product.risk_acceptance_expiration_notification
self.fields['jira_instance'].disabled = True
self.fields['project_key'].disabled = True
self.fields['issue_template_dir'].disabled = True
self.fields['component'].disabled = True
self.fields['push_all_issues'].disabled = True
self.fields['enable_engagement_epic_mapping'].disabled = True
self.fields['push_notes'].disabled = True
self.fields['product_jira_sla_notification'].disabled = True
self.fields['risk_acceptance_expiration_notification'].disabled = True
else:
del self.fields['inherit_from_product']
# if we don't have an instance, django will insert a blank empty one :-(
# so we have to check for id to make sure we only trigger this when there is a real instance from db
if self.instance.id:
self.fields['jira_instance'].required = True
self.fields['project_key'].required = True
def clean(self):
logger.debug('validating jira project form')
cleaned_data = super().clean()
logger.debug('clean: inherit: %s', self.cleaned_data.get('inherit_from_product', False))
if not self.cleaned_data.get('inherit_from_product', False):
jira_instance = self.cleaned_data.get('jira_instance')
project_key = self.cleaned_data.get('project_key')
if project_key and jira_instance:
return cleaned_data
if not project_key and not jira_instance:
return cleaned_data
if self.target == 'engagement':
raise ValidationError('JIRA Project needs a JIRA Instance and JIRA Project Key, or choose to inherit settings from product')
else:
raise ValidationError('JIRA Project needs a JIRA Instance and JIRA Project Key, leave empty to have no JIRA integration setup')
class GITHUBFindingForm(forms.Form):
def __init__(self, *args, **kwargs):
self.enabled = kwargs.pop('enabled')
super(GITHUBFindingForm, self).__init__(*args, **kwargs)
self.fields['push_to_github'] = forms.BooleanField()
self.fields['push_to_github'].required = False
self.fields['push_to_github'].help_text = "Checking this will overwrite content of your Github issue, or create one."
push_to_github = forms.BooleanField(required=False)
class JIRAFindingForm(forms.Form):
def __init__(self, *args, **kwargs):
self.push_all = kwargs.pop('push_all', False)
self.instance = kwargs.pop('instance', None)
self.jira_project = kwargs.pop('jira_project', None)
# we provide the finding_form from the same page so we can add validation errors
# if the finding doesn't satisfy the rules to be pushed to JIRA
self.finding_form = kwargs.pop('finding_form', None)
if self.instance is None and self.jira_project is None:
raise ValueError('either and finding instance or jira_project is needed')
super(JIRAFindingForm, self).__init__(*args, **kwargs)
self.fields['push_to_jira'] = forms.BooleanField()
self.fields['push_to_jira'].required = False
if settings.FEATURE_FINDING_GROUPS:
self.fields['push_to_jira'].help_text = "Checking this will overwrite content of your JIRA issue, or create one. If this finding is part of a Finding Group, the group will pushed instead of the finding."
else:
self.fields['push_to_jira'].help_text = "Checking this will overwrite content of your JIRA issue, or create one."
self.fields['push_to_jira'].label = "Push to JIRA"
if self.push_all:
# This will show the checkbox as checked and greyed out, this way the user is aware
# that issues will be pushed to JIRA, given their product-level settings.
self.fields['push_to_jira'].help_text = \
"Push all issues is enabled on this product. If you do not wish to push all issues" \
" to JIRA, please disable Push all issues on this product."
self.fields['push_to_jira'].widget.attrs['checked'] = 'checked'
self.fields['push_to_jira'].disabled = True
if self.instance:
if hasattr(self.instance, 'has_jira_issue') and self.instance.has_jira_issue:
self.initial['jira_issue'] = self.instance.jira_issue.jira_key
self.fields['push_to_jira'].widget.attrs['checked'] = 'checked'
if settings.FEATURE_FINDING_GROUPS:
self.fields['jira_issue'].widget = forms.TextInput(attrs={'placeholder': 'Leave empty and check push to jira to create a new JIRA issue for this finding, or the group this finding is in.'})
else:
self.fields['jira_issue'].widget = forms.TextInput(attrs={'placeholder': 'Leave empty and check push to jira to create a new JIRA issue for this finding.'})
if self.instance and self.instance.has_jira_group_issue:
self.fields['push_to_jira'].widget.attrs['checked'] = 'checked'
self.fields['jira_issue'].help_text = 'Changing the linked JIRA issue for finding groups is not (yet) supported.'
self.initial['jira_issue'] = self.instance.finding_group.jira_issue.jira_key
self.fields['jira_issue'].disabled = True
def clean(self):
logger.debug('jform clean')
import dojo.jira_link.helper as jira_helper
cleaned_data = super(JIRAFindingForm, self).clean()
jira_issue_key_new = self.cleaned_data.get('jira_issue')
finding = self.instance
jira_project = self.jira_project
logger.debug('self.cleaned_data.push_to_jira: %s', self.cleaned_data.get('push_to_jira', None))
if self.cleaned_data.get('push_to_jira', None) and finding.has_jira_group_issue:
can_be_pushed_to_jira, error_message, error_code = jira_helper.can_be_pushed_to_jira(self.instance.finding_group, self.finding_form)
if not can_be_pushed_to_jira:
self.add_error('push_to_jira', ValidationError(error_message, code=error_code))
# for field in error_fields:
# self.finding_form.add_error(field, error)
elif self.cleaned_data.get('push_to_jira', None):
can_be_pushed_to_jira, error_message, error_code = jira_helper.can_be_pushed_to_jira(self.instance, self.finding_form)
if not can_be_pushed_to_jira:
self.add_error('push_to_jira', ValidationError(error_message, code=error_code))
# for field in error_fields:
# self.finding_form.add_error(field, error)
if jira_issue_key_new and (not finding or not finding.has_jira_group_issue):
# when there is a group jira issue, we skip all the linking/unlinking as this is not supported (yet)
if finding:
# in theory there can multiple jira instances that have similar projects
# so checking by only the jira issue key can lead to false positives
# so we check also the jira internal id of the jira issue
# if the key and id are equal, it is probably the same jira instance and the same issue
# the database model is lacking some relations to also include the jira config name or url here
# and I don't want to change too much now. this should cover most usecases.
jira_issue_need_to_exist = False
# changing jira link on finding
if finding.has_jira_issue and jira_issue_key_new != finding.jira_issue.jira_key:
jira_issue_need_to_exist = True
# adding existing jira issue to finding without jira link
if not finding.has_jira_issue:
jira_issue_need_to_exist = True
else:
jira_issue_need_to_exist = True
if jira_issue_need_to_exist:
jira_issue_new = jira_helper.jira_get_issue(jira_project, jira_issue_key_new)
if not jira_issue_new:
raise ValidationError('JIRA issue ' + jira_issue_key_new + ' does not exist or cannot be retrieved')
logger.debug('checking if provided jira issue id already is linked to another finding')
jira_issues = JIRA_Issue.objects.filter(jira_id=jira_issue_new.id, jira_key=jira_issue_key_new).exclude(engagement__isnull=False)
if self.instance:
# just be sure we exclude the finding that is being edited
jira_issues = jira_issues.exclude(finding=finding)
if len(jira_issues) > 0:
raise ValidationError('JIRA issue ' + jira_issue_key_new + ' already linked to ' + reverse('view_finding', args=(jira_issues[0].finding_id,)))
jira_issue = forms.CharField(required=False, label="Linked JIRA Issue",
validators=[validators.RegexValidator(
regex=r'^[A-Z][A-Z_0-9]+-\d+$',
message='JIRA issue key must be in XXXX-nnnn format ([A-Z][A-Z_0-9]+-\\d+)')])
push_to_jira = forms.BooleanField(required=False, label="Push to JIRA")
class JIRAImportScanForm(forms.Form):
def __init__(self, *args, **kwargs):
self.push_all = kwargs.pop('push_all', False)
super(JIRAImportScanForm, self).__init__(*args, **kwargs)
if self.push_all:
# This will show the checkbox as checked and greyed out, this way the user is aware
# that issues will be pushed to JIRA, given their product-level settings.
self.fields['push_to_jira'].help_text = \
"Push all issues is enabled on this product. If you do not wish to push all issues" \
" to JIRA, please disable Push all issues on this product."
self.fields['push_to_jira'].widget.attrs['checked'] = 'checked'
self.fields['push_to_jira'].disabled = True
push_to_jira = forms.BooleanField(required=False, label="Push to JIRA", help_text="Checking this will create a new jira issue for each new finding.")
class JIRAEngagementForm(forms.Form):
prefix = 'jira-epic-form'
def __init__(self, *args, **kwargs):
self.instance = kwargs.pop('instance', None)
super(JIRAEngagementForm, self).__init__(*args, **kwargs)
if self.instance:
if self.instance.has_jira_issue:
self.fields['push_to_jira'].widget.attrs['checked'] = 'checked'
self.fields['push_to_jira'].label = 'Update JIRA Epic'
self.fields['push_to_jira'].help_text = 'Checking this will update the existing EPIC in JIRA.'
push_to_jira = forms.BooleanField(required=False, label="Create EPIC", help_text="Checking this will create an EPIC in JIRA for this engagement.")
class GoogleSheetFieldsForm(forms.Form):
cred_file = forms.FileField(widget=forms.widgets.FileInput(
attrs={"accept": ".json"}),
label="Google credentials file",
required=True,
allow_empty_file=False,
help_text="Upload the credentials file downloaded from the Google Developer Console")
drive_folder_ID = forms.CharField(
required=True,
label="Google Drive folder ID",
help_text="Extract the Drive folder ID from the URL and provide it here")
email_address = forms.EmailField(
required=True,
label="Email Address",
help_text="Enter the same email Address used to create the Service Account")
enable_service = forms.BooleanField(
initial=False,
required=False,
help_text='Tick this check box to enable Google Sheets Sync feature')
def __init__(self, *args, **kwargs):
self.credentials_required = kwargs.pop('credentials_required')
options = ((0, 'Hide'), (100, 'Small'), (200, 'Medium'), (400, 'Large'))
protect = ['reporter', 'url', 'numerical_severity', 'endpoint', 'under_review', 'reviewers',
'review_requested_by', 'is_mitigated', 'jira_creation', 'jira_change', 'sonarqube_issue']
self.all_fields = kwargs.pop('all_fields')
super(GoogleSheetFieldsForm, self).__init__(*args, **kwargs)
if not self.credentials_required:
self.fields['cred_file'].required = False
for i in self.all_fields:
self.fields[i.name] = forms.ChoiceField(choices=options)
if i.name == 'id' or i.editable is False or i.many_to_one or i.name in protect:
self.fields['Protect ' + i.name] = forms.BooleanField(initial=True, required=True, disabled=True)
else:
self.fields['Protect ' + i.name] = forms.BooleanField(initial=False, required=False)
class LoginBanner(forms.Form):
banner_enable = forms.BooleanField(
label="Enable login banner",
initial=False,
required=False,
help_text='Tick this box to enable a text banner on the login page'
)
banner_message = forms.CharField(
required=False,
label="Message to display on the login page"
)
def clean(self):
cleaned_data = super().clean()
return cleaned_data
# ==============================
# Defect Dojo Engaegment Surveys
# ==============================
# List of validator_name:func_name
# Show in admin a multichoice list of validator names
# pass this to form using field_name='validator_name' ?
class QuestionForm(forms.Form):
''' Base class for a Question
'''
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_method = 'post'
# If true crispy-forms will render a <form>..</form> tags
self.helper.form_tag = kwargs.get('form_tag', True)
if 'form_tag' in kwargs:
del kwargs['form_tag']
self.engagement_survey = kwargs.get('engagement_survey')
self.answered_survey = kwargs.get('answered_survey')
if not self.answered_survey:
del kwargs['engagement_survey']
else:
del kwargs['answered_survey']
self.helper.form_class = kwargs.get('form_class', '')
self.question = kwargs.get('question')
if not self.question:
raise ValueError('Need a question to render')
del kwargs['question']
super(QuestionForm, self).__init__(*args, **kwargs)
class TextQuestionForm(QuestionForm):
def __init__(self, *args, **kwargs):
super(TextQuestionForm, self).__init__(*args, **kwargs)
# work out initial data
initial_answer = TextAnswer.objects.filter(
answered_survey=self.answered_survey,
question=self.question
)
if initial_answer.exists():
initial_answer = initial_answer[0].answer
else:
initial_answer = ''
self.fields['answer'] = forms.CharField(
label=self.question.text,
widget=forms.Textarea(attrs={"rows": 3, "cols": 10}),
required=not self.question.optional,
initial=initial_answer,
)
answer = self.fields['answer']
def save(self):
if not self.is_valid():
raise forms.ValidationError('form is not valid')
answer = self.cleaned_data.get('answer')
if not answer:
if self.fields['answer'].required:
raise forms.ValidationError('Required')
return
text_answer, created = TextAnswer.objects.get_or_create(
answered_survey=self.answered_survey,
question=self.question,
)
if created:
text_answer.answered_survey = self.answered_survey
text_answer.answer = answer
text_answer.save()
class ChoiceQuestionForm(QuestionForm):
def __init__(self, *args, **kwargs):
super(ChoiceQuestionForm, self).__init__(*args, **kwargs)
choices = [(c.id, c.label) for c in self.question.choices.all()]
# initial values
initial_choices = []
choice_answer = ChoiceAnswer.objects.filter(
answered_survey=self.answered_survey,
question=self.question,
).annotate(a=Count('answer')).filter(a__gt=0)
# we have ChoiceAnswer instance
if choice_answer:
choice_answer = choice_answer[0]
initial_choices = choice_answer.answer.all().values_list('id',
flat=True)
if self.question.multichoice is False:
initial_choices = initial_choices[0]
# default classes
widget = forms.RadioSelect
field_type = forms.ChoiceField
inline_type = InlineRadios
if self.question.multichoice:
field_type = forms.MultipleChoiceField
widget = forms.CheckboxSelectMultiple
inline_type = InlineCheckboxes
field = field_type(
label=self.question.text,
required=not self.question.optional,
choices=choices,
initial=initial_choices,
widget=widget
)
self.fields['answer'] = field
# Render choice buttons inline
self.helper.layout = Layout(
inline_type('answer')
)
def clean_answer(self):
real_answer = self.cleaned_data.get('answer')
# for single choice questions, the selected answer is a single string
if type(real_answer) is not list:
real_answer = [real_answer]
return real_answer
def save(self):
if not self.is_valid():
raise forms.ValidationError('Form is not valid')
real_answer = self.cleaned_data.get('answer')
if not real_answer:
if self.fields['answer'].required:
raise forms.ValidationError('Required')
return
choices = Choice.objects.filter(id__in=real_answer)
# find ChoiceAnswer and filter in answer !
choice_answer = ChoiceAnswer.objects.filter(
answered_survey=self.answered_survey,
question=self.question,
)
# we have ChoiceAnswer instance
if choice_answer:
choice_answer = choice_answer[0]
if not choice_answer:
# create a ChoiceAnswer
choice_answer = ChoiceAnswer.objects.create(
answered_survey=self.answered_survey,
question=self.question
)
# re save out the choices
choice_answer.answered_survey = self.answered_survey
choice_answer.answer.set(choices)
choice_answer.save()
class Add_Questionnaire_Form(forms.ModelForm):
survey = forms.ModelChoiceField(
queryset=Engagement_Survey.objects.all(),
required=True,
widget=forms.widgets.Select(),
help_text='Select the Questionnaire to add.')
class Meta:
model = Answered_Survey
exclude = ('responder',
'completed',
'engagement',
'answered_on',
'assignee')
class AddGeneralQuestionnaireForm(forms.ModelForm):
survey = forms.ModelChoiceField(
queryset=Engagement_Survey.objects.all(),
required=True,
widget=forms.widgets.Select(),
help_text='Select the Questionnaire to add.')
expiration = forms.DateField(widget=forms.TextInput(
attrs={'class': 'datepicker', 'autocomplete': 'off'}))
class Meta:
model = General_Survey
exclude = ('num_responses', 'generated')
class Delete_Questionnaire_Form(forms.ModelForm):
id = forms.IntegerField(required=True,
widget=forms.widgets.HiddenInput())
class Meta:
model = Answered_Survey
fields = ['id']
class DeleteGeneralQuestionnaireForm(forms.ModelForm):
id = forms.IntegerField(required=True,
widget=forms.widgets.HiddenInput())
class Meta:
model = General_Survey
fields = ['id']
class Delete_Eng_Survey_Form(forms.ModelForm):
id = forms.IntegerField(required=True,
widget=forms.widgets.HiddenInput())
class Meta:
model = Engagement_Survey
fields = ['id']
class CreateQuestionnaireForm(forms.ModelForm):
class Meta:
model = Engagement_Survey
exclude = ['questions']
class EditQuestionnaireQuestionsForm(forms.ModelForm):
questions = forms.ModelMultipleChoiceField(
Question.objects.all(),
required=True,
help_text="Select questions to include on this questionnaire. Field can be used to search available questions.",
widget=MultipleSelectWithPop(attrs={'size': '11'}))
class Meta:
model = Engagement_Survey
exclude = ['name', 'description', 'active']
class CreateQuestionForm(forms.Form):
type = forms.ChoiceField(choices=(("---", "-----"), ("text", "Text"), ("choice", "Choice")))
order = forms.IntegerField(min_value=1, widget=forms.TextInput(attrs={'data-type': 'both'}))
optional = forms.BooleanField(help_text="If selected, user doesn't have to answer this question",
initial=False,
required=False,
widget=forms.CheckboxInput(attrs={'data-type': 'both'}))
text = forms.CharField(widget=forms.Textarea(attrs={'data-type': 'text'}),
label="Question Text",
help_text="The actual question.")
class CreateTextQuestionForm(forms.Form):
class Meta:
model = TextQuestion
exclude = ['order', 'optional']
class MultiWidgetBasic(forms.widgets.MultiWidget):
def __init__(self, attrs=None):
widgets = [forms.TextInput(attrs={'data-type': 'choice'}),
forms.TextInput(attrs={'data-type': 'choice'}),
forms.TextInput(attrs={'data-type': 'choice'}),
forms.TextInput(attrs={'data-type': 'choice'}),
forms.TextInput(attrs={'data-type': 'choice'}),
forms.TextInput(attrs={'data-type': 'choice'})]
super(MultiWidgetBasic, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
return pickle.loads(value)
else:
return [None, None, None, None, None, None]
def format_output(self, rendered_widgets):
return '<br/>'.join(rendered_widgets)
class MultiExampleField(forms.fields.MultiValueField):
widget = MultiWidgetBasic
def __init__(self, *args, **kwargs):
list_fields = [forms.fields.CharField(required=True),
forms.fields.CharField(required=True),
forms.fields.CharField(required=False),
forms.fields.CharField(required=False),
forms.fields.CharField(required=False),
forms.fields.CharField(required=False)]
super(MultiExampleField, self).__init__(list_fields, *args, **kwargs)
def compress(self, values):
return pickle.dumps(values)
class CreateChoiceQuestionForm(forms.Form):
multichoice = forms.BooleanField(required=False,
initial=False,
widget=forms.CheckboxInput(attrs={'data-type': 'choice'}),
help_text="Can more than one choice can be selected?")
answer_choices = MultiExampleField(required=False, widget=MultiWidgetBasic(attrs={'data-type': 'choice'}))
class Meta:
model = ChoiceQuestion
exclude = ['order', 'optional', 'choices']
class EditQuestionForm(forms.ModelForm):
class Meta:
model = Question
exclude = []
class EditTextQuestionForm(EditQuestionForm):
class Meta:
model = TextQuestion
exclude = []
class EditChoiceQuestionForm(EditQuestionForm):
choices = forms.ModelMultipleChoiceField(
Choice.objects.all(),
required=True,
help_text="Select choices to include on this question. Field can be used to search available choices.",
widget=MultipleSelectWithPop(attrs={'size': '11'}))
class Meta:
model = ChoiceQuestion
exclude = []
class AddChoicesForm(forms.ModelForm):
class Meta:
model = Choice
exclude = []
class AssignUserForm(forms.ModelForm):
assignee = forms.CharField(required=False,
widget=forms.widgets.HiddenInput())
def __init__(self, *args, **kwargs):
assignee = None
if 'assignee' in kwargs:
assignee = kwargs.pop('asignees')
super(AssignUserForm, self).__init__(*args, **kwargs)
if assignee is None:
self.fields['assignee'] = forms.ModelChoiceField(queryset=Dojo_User.objects.all(), empty_label='Not Assigned', required=False)
else:
self.fields['assignee'].initial = assignee
class Meta:
model = Answered_Survey
exclude = ['engagement', 'survey', 'responder', 'completed', 'answered_on']
class AddEngagementForm(forms.Form):
product = forms.ModelChoiceField(
queryset=Product.objects.none(),
required=True,
widget=forms.widgets.Select(),
help_text='Select which product to attach Engagement')
def __init__(self, *args, **kwargs):
super(AddEngagementForm, self).__init__(*args, **kwargs)
self.fields['product'].queryset = get_authorized_products(Permissions.Engagement_Add)
| 43.371799
| 311
| 0.648941
|
e7f4fbe0133ad9947c40d9b0c4cf3cb72acd027c
| 2,133
|
py
|
Python
|
qa/rpc-tests/getchaintips.py
|
monsterdev13/MonsterNode
|
79be9557bb527f68eb78d944d5edb474b87cafda
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/getchaintips.py
|
monsterdev13/MonsterNode
|
79be9557bb527f68eb78d944d5edb474b87cafda
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/getchaintips.py
|
monsterdev13/MonsterNode
|
79be9557bb527f68eb78d944d5edb474b87cafda
|
[
"MIT"
] | 1
|
2018-05-13T20:34:51.000Z
|
2018-05-13T20:34:51.000Z
|
#!/usr/bin/env python2
# Copyright (c) 2018-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the getchaintips API. We introduce a network split, work
# on chains of different lengths, and join the network together again.
# This gives us two tips, verify that it works.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class GetChainTipsTest (BitcoinTestFramework):
def run_test (self):
BitcoinTestFramework.run_test (self)
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 1)
assert_equal (tips[0]['branchlen'], 0)
assert_equal (tips[0]['height'], 200)
assert_equal (tips[0]['status'], 'active')
# Split the network and build two chains of different lengths.
self.split_network ()
self.nodes[0].generate(10)
self.nodes[2].generate(20)
self.sync_all ()
tips = self.nodes[1].getchaintips ()
assert_equal (len (tips), 1)
shortTip = tips[0]
assert_equal (shortTip['branchlen'], 0)
assert_equal (shortTip['height'], 210)
assert_equal (tips[0]['status'], 'active')
tips = self.nodes[3].getchaintips ()
assert_equal (len (tips), 1)
longTip = tips[0]
assert_equal (longTip['branchlen'], 0)
assert_equal (longTip['height'], 220)
assert_equal (tips[0]['status'], 'active')
# Join the network halves and check that we now have two tips
# (at least at the nodes that previously had the short chain).
self.join_network ()
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 2)
assert_equal (tips[0], longTip)
assert_equal (tips[1]['branchlen'], 10)
assert_equal (tips[1]['status'], 'valid-fork')
tips[1]['branchlen'] = 0
tips[1]['status'] = 'active'
assert_equal (tips[1], shortTip)
if __name__ == '__main__':
GetChainTipsTest ().main ()
| 35.55
| 70
| 0.643226
|
e4c0a1b2e3e59b803d696bb85aae48a0afa85d8d
| 4,473
|
py
|
Python
|
bokeh/models/graphs.py
|
TheLaw1337/bokeh
|
ca6c4abf3b0cef587136b8431e9d6ee6acbedc2f
|
[
"BSD-3-Clause"
] | 15,193
|
2015-01-01T05:11:45.000Z
|
2022-03-31T19:30:20.000Z
|
bokeh/models/graphs.py
|
TheLaw1337/bokeh
|
ca6c4abf3b0cef587136b8431e9d6ee6acbedc2f
|
[
"BSD-3-Clause"
] | 9,554
|
2015-01-01T03:16:54.000Z
|
2022-03-31T22:59:39.000Z
|
bokeh/models/graphs.py
|
TheLaw1337/bokeh
|
ca6c4abf3b0cef587136b8431e9d6ee6acbedc2f
|
[
"BSD-3-Clause"
] | 4,829
|
2015-01-02T03:35:32.000Z
|
2022-03-30T16:40:26.000Z
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from ..core.has_props import abstract
from ..core.properties import (
Any,
Dict,
Either,
Instance,
Int,
Seq,
String,
)
from ..model import Model
from .expressions import CoordinateTransform
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'EdgesAndLinkedNodes',
'EdgeCoordinates',
'EdgesOnly',
'GraphCoordinates',
'GraphHitTestPolicy',
'LayoutProvider',
'NodeCoordinates',
'NodesAndLinkedEdges',
'NodesOnly',
'StaticLayoutProvider',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
@abstract
class LayoutProvider(Model):
'''
'''
@property
def node_coordinates(self) -> NodeCoordinates:
return NodeCoordinates(layout=self)
@property
def edge_coordinates(self) -> EdgeCoordinates:
return EdgeCoordinates(layout=self)
class StaticLayoutProvider(LayoutProvider):
'''
'''
graph_layout = Dict(Either(String, Int), Seq(Any), default={}, help="""
The coordinates of the graph nodes in cartesian space. The dictionary
keys correspond to a node index and the values are a two element sequence
containing the x and y coordinates of the node.
.. code-block:: python
{
0 : [0.5, 0.5],
1 : [1.0, 0.86],
2 : [0.86, 1],
}
""")
@abstract
class GraphCoordinates(CoordinateTransform):
'''
Abstract class for coordinate transform expression obtained from ``LayoutProvider``
'''
layout = Instance(LayoutProvider)
class NodeCoordinates(GraphCoordinates):
'''
Node coordinate expression obtained from ``LayoutProvider``
'''
pass
class EdgeCoordinates(GraphCoordinates):
'''
Node coordinate expression obtained from ``LayoutProvider``
'''
pass
@abstract
class GraphHitTestPolicy(Model):
'''
'''
pass
class EdgesOnly(GraphHitTestPolicy):
'''
With the ``EdgesOnly`` policy, only graph edges are able to be selected and
inspected. There is no selection or inspection of graph nodes.
'''
pass
class NodesOnly(GraphHitTestPolicy):
'''
With the ``NodesOnly`` policy, only graph nodes are able to be selected and
inspected. There is no selection or inspection of graph edges.
'''
pass
class NodesAndLinkedEdges(GraphHitTestPolicy):
'''
With the ``NodesAndLinkedEdges`` policy, inspection or selection of graph
nodes will result in the inspection or selection of the node and of the
linked graph edges. There is no direct selection or inspection of graph
edges.
'''
pass
class EdgesAndLinkedNodes(GraphHitTestPolicy):
'''
With the ``EdgesAndLinkedNodes`` policy, inspection or selection of graph
edges will result in the inspection or selection of the edge and of the
linked graph nodes. There is no direct selection or inspection of graph
nodes.
'''
pass
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 26.157895
| 87
| 0.484909
|
cba4135331ff19cb520c3b082edb2d2ff1a7d135
| 2,377
|
py
|
Python
|
WebMirror/management/rss_parser_funcs/feed_parse_extractInfiniteNovelTranslations.py
|
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 193
|
2016-08-02T22:04:35.000Z
|
2022-03-09T20:45:41.000Z
|
WebMirror/management/rss_parser_funcs/feed_parse_extractInfiniteNovelTranslations.py
|
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 533
|
2016-08-23T20:48:23.000Z
|
2022-03-28T15:55:13.000Z
|
WebMirror/management/rss_parser_funcs/feed_parse_extractInfiniteNovelTranslations.py
|
rrosajp/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 19
|
2015-08-13T18:01:08.000Z
|
2021-07-12T17:13:09.000Z
|
def extractInfiniteNovelTranslations(item):
"""
# Infinite Novel Translations
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
tagmap = [
('Ascendance of a Bookworm', 'Ascendance of a Bookworm', 'translated'),
('Yomigaeri no Maou', 'Yomigaeri no Maou', 'translated'),
('Kakei Senki wo Kakageyo!', 'Kakei Senki wo Kakageyo!', 'translated'),
('Kuro no Shoukan Samurai', 'Kuro no Shoukan Samurai', 'translated'),
('Nidoume no Jinsei wo Isekai de', 'Nidoume no Jinsei wo Isekai de', 'translated'),
('Hachi-nan', 'Hachinan tte, Sore wa Nai Deshou!', 'translated'),
('Summoned Slaughterer', 'Yobidasareta Satsuriku-sha', 'translated'),
('maou no utsuwa', 'Maou no Utsuwa', 'translated'),
('Maou no Ki', 'Maou no Ki', 'translated'),
('Imperial wars and my stratagems', 'Imperial Wars and my Stratagems', 'translated'),
('Kuro no Shoukanshi', 'Kuro no Shoukanshi', 'translated'),
('I work as Healer in Another World\'s Labyrinth City', 'I work as Healer in Another World\'s Labyrinth City', 'translated'),
('The Spearmaster and The Black Cat', 'The Spearmaster and The Black Cat', 'translated'),
('Hakai no Miko', 'Hakai no Miko', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| 72.030303
| 137
| 0.430374
|
05e9657894ebf91d149b92af51ca14c18270853a
| 1,391
|
py
|
Python
|
src/python/logic/Decision&Loop.py
|
hiteshsahu/Python4ML
|
be193f78035d669c2c8d9da4ddd7a791e6e625f6
|
[
"Apache-2.0"
] | null | null | null |
src/python/logic/Decision&Loop.py
|
hiteshsahu/Python4ML
|
be193f78035d669c2c8d9da4ddd7a791e6e625f6
|
[
"Apache-2.0"
] | null | null | null |
src/python/logic/Decision&Loop.py
|
hiteshsahu/Python4ML
|
be193f78035d669c2c8d9da4ddd7a791e6e625f6
|
[
"Apache-2.0"
] | null | null | null |
"""
elif = else if
Decision tree In Python
- if: elif: else:
. while : else:
- for x in : else:
Breaking & Passing
- pass : add to empty if condition
- break: breaks loop
- continue: skip loop
Range:
- range(n) : return sequence of 0 to n-1
- range(n,m): sequence of n+1,n+2 .. <m
- range(n,m,l): sequence of n, n+l,n+2l ...<m
"""
# ---------------IF ELIF---------------
a = 200
b = 33
if b > a:
print("b is greater than a")
elif a == b:
print("a and b are equal")
else:
print("a is greater than b")
# ---------------WHILE---------------
print("While")
i = 0
while i < 7:
i += 1
if i == 3:
continue # skip loop
elif i == 7:
pass # decison without code
# break # break loop
print(i)
else:
print("i is no longer less than 7\n")
# ---------------FOR---------------
print("For in range(n)")
for x in range(5): # range(M) return Sequence of length N starting with 0, 1, 2..<M
print(x)
else:
print("Else reached!\n")
print("For in range(n,m)")
for x in range(2, 5): # range(N, M) return Sequence of length (M-N) starting with N, N+1, N+2...<M
print(x)
else:
print("Else reached!\n")
print("For in range(n,l,m)")
for x in range(1, 10, 1): # range(N, M, l) return Sequence starting with N, N+l, N+2*l...N+l*n < M
print(x)
else:
print("Else reached!\n")
| 18.302632
| 103
| 0.525521
|
18885bb5b36af526984077b4a5fce0438fea1c92
| 19,503
|
py
|
Python
|
renderer/rasterRenderer.py
|
WhereGroup/mapconverter
|
cd0aa5f533194c85cf6e098fadc079ea61b63fce
|
[
"MIT"
] | 9
|
2020-05-13T13:45:54.000Z
|
2022-03-30T10:43:45.000Z
|
renderer/rasterRenderer.py
|
WhereGroup/mapconverter
|
cd0aa5f533194c85cf6e098fadc079ea61b63fce
|
[
"MIT"
] | 4
|
2020-09-09T10:01:08.000Z
|
2022-01-15T17:59:11.000Z
|
renderer/rasterRenderer.py
|
WhereGroup/mapconverter
|
cd0aa5f533194c85cf6e098fadc079ea61b63fce
|
[
"MIT"
] | 1
|
2022-03-16T17:08:16.000Z
|
2022-03-16T17:08:16.000Z
|
import re
from dictionaries.raster_stretch import stretch_dict
from modules.functions import change_interface, convert_rgb_string_to_hex
from modules.arcGisModules import ArcGisModules
from feature.fills.gradientFillSymbol import FeatureGradientFillSymbol
class RasterRenderer:
def __init__(self):
pass
@staticmethod
def create_raster_renderer_basic(base):
""" This creates the basic raster-renderer-element in the DOM
:param base: is the self of the renderer object containing:
base.xml_document = xml_document
base.map_layer_element = map_layer_element
base.arcLayer = arc_layer
base.layer = layer
base.rendererType = renderer_type
:return: pipe_element, raster_renderer_element
"""
pipe_element = base.xml_document.createElement("pipe")
base.map_layer_element.appendChild(pipe_element)
raster_renderer_element = base.xml_document.createElement("rasterrenderer")
raster_renderer_element.setAttribute("alphaBand", "-1")
arc_raster_layer = change_interface(base.arcLayer, ArcGisModules.module_carto.IRasterLayer)
arc_raster_effect = change_interface(arc_raster_layer, ArcGisModules.module_carto.ILayerEffects)
try:
opacity = str(1 - arc_raster_effect.Transparency * 0.01)
except AttributeError:
opacity = "1"
raster_renderer_element.setAttribute("opacity", opacity)
pipe_element.appendChild(raster_renderer_element)
raster_transparency_element = base.xml_document.createElement("rasterTransparency")
raster_renderer_element.appendChild(raster_transparency_element)
raster_stretch = change_interface(arc_raster_layer.Renderer, ArcGisModules.module_carto.IRasterStretch2)
if raster_stretch and raster_stretch.Background and raster_stretch.BackgroundColor.NullColor:
values = raster_stretch.BackgroundValue
if isinstance(values, float):
single_value_pixel_element = base.xml_document.createElement("singleValuePixelList")
raster_transparency_element.appendChild(single_value_pixel_element)
pixel_list_entry_element = base.xml_document.createElement("pixelListEntry")
single_value_pixel_element.appendChild(pixel_list_entry_element)
pixel_list_entry_element.setAttribute("min", unicode(int(values)))
pixel_list_entry_element.setAttribute("max", unicode(int(values)))
pixel_list_entry_element.setAttribute("percentTransparent", "100")
else:
single_value_pixel_element = base.xml_document.createElement("threeValuePixelList")
raster_transparency_element.appendChild(single_value_pixel_element)
pixel_list_entry_element = base.xml_document.createElement("pixelListEntry")
single_value_pixel_element.appendChild(pixel_list_entry_element)
pixel_list_entry_element.setAttribute("red", unicode(int(values[0])))
pixel_list_entry_element.setAttribute("green", unicode(int(values[1])))
pixel_list_entry_element.setAttribute("blue", unicode(int(values[2])))
pixel_list_entry_element.setAttribute("percentTransparent", "100")
min_max_origin_element = base.xml_document.createElement("minMaxOrigin")
raster_renderer_element.appendChild(min_max_origin_element)
origin_limits_element = base.xml_document.createElement("limits")
origin_limits_element_content = base.xml_document.createTextNode("None")
origin_limits_element.appendChild(origin_limits_element_content)
min_max_origin_element.appendChild(origin_limits_element)
origin_extent_element = base.xml_document.createElement("extent")
origin_extent_element_content = base.xml_document.createTextNode("WholeRaster")
origin_extent_element.appendChild(origin_extent_element_content)
min_max_origin_element.appendChild(origin_extent_element)
origin_stat_accuracy_element = base.xml_document.createElement("statAccuracy")
origin_stat_accuracy_element_content = base.xml_document.createTextNode("Estimated")
origin_stat_accuracy_element.appendChild(origin_stat_accuracy_element_content)
min_max_origin_element.appendChild(origin_stat_accuracy_element)
origincumulative_cut_lower_element = base.xml_document.createElement("cumulativeCutLower")
origincumulative_cut_lower_element_content = base.xml_document.createTextNode("0.02")
origincumulative_cut_lower_element.appendChild(origincumulative_cut_lower_element_content)
min_max_origin_element.appendChild(origincumulative_cut_lower_element)
origincumulative_cut_upper_element = base.xml_document.createElement("cumulativeCutUpper")
origincumulative_cut_upper_element_content = base.xml_document.createTextNode("0.98")
origincumulative_cut_upper_element.appendChild(origincumulative_cut_upper_element_content)
min_max_origin_element.appendChild(origincumulative_cut_upper_element)
originstd_dev_factor_element = base.xml_document.createElement("stdDevFactor")
originstd_dev_factor_element_content = base.xml_document.createTextNode("2")
originstd_dev_factor_element.appendChild(originstd_dev_factor_element_content)
min_max_origin_element.appendChild(originstd_dev_factor_element)
try:
brightness_value = str(arc_raster_effect.Brightness)
contrasts_value = str(arc_raster_effect.Contrast)
except AttributeError:
brightness_value = "0"
contrasts_value = "0"
raster_brightnesscontrast_element = base.xml_document.createElement("brightnesscontrast")
raster_brightnesscontrast_element.setAttribute("brightness", brightness_value)
raster_brightnesscontrast_element.setAttribute("contrast", contrasts_value)
pipe_element.appendChild(raster_brightnesscontrast_element)
raster_huesaturation_element = base.xml_document.createElement("huesaturation")
raster_huesaturation_element.setAttribute("saturation ", "0")
raster_huesaturation_element.setAttribute("grayscaleMode", "0")
raster_huesaturation_element.setAttribute("colorizeRed", "255")
raster_huesaturation_element.setAttribute("colorizeBlue", "128")
raster_huesaturation_element.setAttribute("colorizeGreen", "128")
raster_huesaturation_element.setAttribute("colorizeStrength", "100")
raster_huesaturation_element.setAttribute("colorizeOn", "0")
pipe_element.appendChild(raster_huesaturation_element)
raster_resampler_element = base.xml_document.createElement("rasterresampler")
raster_resampler_element.setAttribute("maxOversampling", "2")
pipe_element.appendChild(raster_resampler_element)
return pipe_element, raster_renderer_element
@staticmethod
def adapt_raster_renderer(base, raster_renderer_element):
""" here the base renderer is adapted with the specific raster-renderer-content
:param base: is the self of the renderer object containing:
base.xml_document = xml_document
base.map_layer_element = map_layer_element
base.arcLayer = arc_layer
base.layer = layer
base.rendererType = renderer_type
:param raster_renderer_element: the raster_renderer_element of the DOM
"""
arc_raster_layer = change_interface(base.arcLayer, ArcGisModules.module_carto.IRasterLayer)
renderer_name = change_interface(arc_raster_layer.Renderer, ArcGisModules.module_carto.IRasterRendererInfo).Name
if renderer_name == "Stretched":
if arc_raster_layer.BandCount == 3:
RasterRenderer._create_singleband_pseudocolor_renderer(base, raster_renderer_element, arc_raster_layer)
else:
RasterRenderer._create_stretched_renderer(base, raster_renderer_element, arc_raster_layer)
if renderer_name == "RGB Composite":
RasterRenderer._create_rgb_composite_renderer(base, raster_renderer_element, arc_raster_layer)
@staticmethod
def _create_stretched_renderer(base, raster_renderer_element, arc_raster_layer):
""" This creates the stretched renderer content
:param base: is the self of the renderer object containing:
base.xml_document = xml_document
base.map_layer_element = map_layer_element
base.arcLayer = arc_layer
base.layer = layer
base.rendererType = renderer_type
:param raster_renderer_element: the raster_renderer_element of the DOM
:param arc_raster_layer: ArcObject of the raster_layer
"""
raster_renderer_element.setAttribute("type", "singlebandgray")
raster_renderer_element.setAttribute("grayBand", "1")
renderer = change_interface(
arc_raster_layer.Renderer,
ArcGisModules.module_carto.IRasterStretchColorRampRenderer
)
sbg_high = renderer.LabelHigh[7:].split(',')[0]
sbg_min = renderer.LabelLow[6:].split(',')[0]
sbg_gradient = re.sub(r'[\s+]', '', renderer.ColorScheme.title())
raster_renderer_element.setAttribute("gradient", sbg_gradient)
raster_contrast_enhancement_element = base.xml_document.createElement("contrastEnhancement")
raster_renderer_element.appendChild(raster_contrast_enhancement_element)
sbg_min_value_element = base.xml_document.createElement("minValue")
sbg_min_value_element_content = base.xml_document.createTextNode(sbg_min)
sbg_min_value_element.appendChild(sbg_min_value_element_content)
raster_contrast_enhancement_element.appendChild(sbg_min_value_element)
sbg_max_value_element = base.xml_document.createElement("maxValue")
sbg_max_value_element_content = base.xml_document.createTextNode(sbg_high)
sbg_max_value_element.appendChild(sbg_max_value_element_content)
raster_contrast_enhancement_element.appendChild(sbg_max_value_element)
sbg_algorithm_element = base.xml_document.createElement("algorithm")
sbg_algorithm_element_content = base.xml_document.createTextNode("StretchToMinimumMaximum")
sbg_algorithm_element.appendChild(sbg_algorithm_element_content)
raster_contrast_enhancement_element.appendChild(sbg_algorithm_element)
@staticmethod
def _create_rgb_composite_renderer(base, raster_renderer_element, arc_raster_layer):
""" This creates the rgb-composite renderer content
:param base: is the self of the renderer object containing:
base.xml_document = xml_document
base.map_layer_element = map_layer_element
base.arcLayer = arc_layer
base.layer = layer
base.rendererType = renderer_type
:param raster_renderer_element: the raster_renderer_element of the DOM
:param arc_raster_layer: ArcObject of the raster_layer
"""
raster_renderer_element.setAttribute("type", "multibandcolor")
raster_renderer_element.setAttribute("redBand", "1")
raster_renderer_element.setAttribute("greenBand", "2")
raster_renderer_element.setAttribute("blueBand", "3")
raster_stretch = change_interface(arc_raster_layer.Renderer, ArcGisModules.module_carto.IRasterStretch2)
limits_element = raster_renderer_element.getElementsByTagName('limits')[0]
limits_element.firstChild.nodeValue = stretch_dict.get(raster_stretch.StretchType, 'None')
stretch_params = raster_stretch.StandardDeviationsParam
raster_renderer_element.getElementsByTagName('stdDevFactor')[0].firstChild.nodeValue = unicode(stretch_params)
no_data_element = base.xml_document.createElement("noData")
for band_index in range(1, 4):
no_data_list_element = base.xml_document.createElement("noDataList")
no_data_list_element.setAttribute("bandNo", unicode(band_index))
no_data_list_element.setAttribute("useSrcNoData", "0")
no_data_range_element = base.xml_document.createElement("noDataRange")
no_data_range_element.setAttribute("min", "0")
no_data_range_element.setAttribute("max", "0")
no_data_list_element.appendChild(no_data_range_element)
no_data_element.appendChild(no_data_list_element)
base.map_layer_element.appendChild(no_data_element)
if not raster_stretch.StretchType == 0:
bandstats = RasterRenderer._get_band_stats(arc_raster_layer.Raster)
color = ["placeholder", "red", "green", "blue"]
for x in range(1, 4):
raster_contrast_enhancement_element = base.xml_document.createElement(color[x] + "ContrastEnhancement")
raster_renderer_element.appendChild(raster_contrast_enhancement_element)
renderer_contrast_min_element = base.xml_document.createElement("minValue")
renderer_contrast_min_element_content = base.xml_document.createTextNode(
bandstats.get(x, {}).get("min")
)
renderer_contrast_min_element.appendChild(renderer_contrast_min_element_content)
raster_contrast_enhancement_element.appendChild(renderer_contrast_min_element)
renderer_contrast_max_element = base.xml_document.createElement("maxValue")
renderer_contrast_max_element_content = base.xml_document.createTextNode(
bandstats.get(x, {}).get("max")
)
renderer_contrast_max_element.appendChild(renderer_contrast_max_element_content)
raster_contrast_enhancement_element.appendChild(renderer_contrast_max_element)
renderer_contrast_algorithm_element = base.xml_document.createElement("algorithm")
renderer_contrast_algorithm_element_content = base.xml_document.createTextNode(
'StretchToMinimumMaximum'
)
renderer_contrast_algorithm_element.appendChild(renderer_contrast_algorithm_element_content)
raster_contrast_enhancement_element.appendChild(renderer_contrast_algorithm_element)
@staticmethod
def _create_singleband_pseudocolor_renderer(base, raster_renderer_element, arc_raster_layer):
renderer = change_interface(
arc_raster_layer.Renderer,
ArcGisModules.module_carto.IRasterStretchColorRampRenderer
)
color_ramp_properties = FeatureGradientFillSymbol.create_color_ramp_properties(
renderer.ColorRamp,
False,
{'dict_symbols': {}}
)
bandstats = RasterRenderer._get_band_stats(arc_raster_layer.Raster)
used_band_number = renderer.BandIndex + 1
raster_renderer_element.setAttribute("type", "singlebandpseudocolor")
raster_renderer_element.setAttribute("band", unicode(used_band_number))
raster_renderer_element.setAttribute("alphaBand", unicode(used_band_number))
raster_renderer_element.setAttribute("classificationMin", bandstats.get(used_band_number, {}).get("min"))
raster_renderer_element.setAttribute("classificationMax", bandstats.get(used_band_number, {}).get("max"))
rastershader_element = base.xml_document.createElement("rastershader")
raster_renderer_element.appendChild(rastershader_element)
colorrampshader_element = base.xml_document.createElement("colorrampshader")
colorrampshader_element.setAttribute("maximumValue", renderer.LabelHigh)
colorrampshader_element.setAttribute("minimumValue", renderer.LabelLow)
colorrampshader_element.setAttribute("classificationMode", "2")
colorrampshader_element.setAttribute("clip", "0")
colorrampshader_element.setAttribute("colorRampType", "INTERPOLATED")
rastershader_element.appendChild(colorrampshader_element)
colorramp_element = base.xml_document.createElement("colorramp")
colorramp_element.setAttribute("name", "[source]")
colorramp_element.setAttribute("type", "gradient")
color1_prop = base.xml_document.createElement("prop")
color1_prop.setAttribute("k", "color1")
color1_prop.setAttribute("v", color_ramp_properties["dict_symbols"]["color1"])
color2_prop = base.xml_document.createElement("prop")
color2_prop.setAttribute("k", "color2")
color2_prop.setAttribute("v", color_ramp_properties["dict_symbols"]["color2"])
stops_prop = base.xml_document.createElement("prop")
stops_prop.setAttribute("k", "stops")
stops_prop.setAttribute("v", color_ramp_properties["dict_symbols"]["stops"])
discrete_prop = base.xml_document.createElement("prop")
discrete_prop.setAttribute("k", "discrete")
discrete_prop.setAttribute("v", "0")
ramp_type_prop = base.xml_document.createElement("prop")
ramp_type_prop.setAttribute("k", "rampType")
ramp_type_prop.setAttribute("v", "gradient")
colorramp_element.appendChild(color1_prop)
colorramp_element.appendChild(color2_prop)
colorramp_element.appendChild(stops_prop)
colorramp_element.appendChild(discrete_prop)
colorramp_element.appendChild(ramp_type_prop)
colorrampshader_element.appendChild(colorramp_element)
color_stops = color_ramp_properties["dict_symbols"]["stops"].split(":")
for index, color in enumerate(color_stops[:-1]):
position_double, color_value = color.split(";")
item_element = base.xml_document.createElement("item")
color_value_hex = convert_rgb_string_to_hex(color_value)
item_element.setAttribute("color", color_value_hex)
item_element.setAttribute("alpha", color_value[-3:])
value = unicode(255*float(position_double)) if index < len(color_stops) else u"255"
item_element.setAttribute("value", value)
item_element.setAttribute("label", value)
colorrampshader_element.appendChild(item_element)
@staticmethod
def _get_band_stats(arc_raster):
arc_raster = change_interface(arc_raster, ArcGisModules.module_data_source_raster.IRaster2)
arc_raster_dataset = change_interface(arc_raster.RasterDataset, ArcGisModules.module_gdb.IRasterDataset)
arc_raster_band_collection = change_interface(
arc_raster_dataset,
ArcGisModules.module_data_source_raster.IRasterBandCollection
)
bandstats = {}
for x in range(1, arc_raster_band_collection.Count + 1):
raster_band = arc_raster_band_collection.BandByName("Band_" + str(x))
try:
band_minimum = raster_band.Statistics.Minimum
band_maximum = raster_band.Statistics.Maximum
except ValueError:
band_minimum = 0
band_maximum = 255
bandstats.update(
{x:
{
"min": unicode(int(band_minimum)),
"max": unicode(int(band_maximum)),
}
}
)
return bandstats
| 52.286863
| 120
| 0.72153
|
b174fc4c4f6c2425d930fc815b949575d3d99dda
| 9,742
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/network/v20160601/express_route_circuit_authorization.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/v20160601/express_route_circuit_authorization.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/v20160601/express_route_circuit_authorization.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['ExpressRouteCircuitAuthorization']
class ExpressRouteCircuitAuthorization(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorization_key: Optional[pulumi.Input[str]] = None,
authorization_name: Optional[pulumi.Input[str]] = None,
authorization_use_status: Optional[pulumi.Input[str]] = None,
circuit_name: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Authorization in a ExpressRouteCircuit resource
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] authorization_key: Gets or sets the authorization key
:param pulumi.Input[str] authorization_name: The name of the authorization.
:param pulumi.Input[str] authorization_use_status: Gets or sets AuthorizationUseStatus
:param pulumi.Input[str] circuit_name: The name of the express route circuit.
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated
:param pulumi.Input[str] id: Resource Id
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource
:param pulumi.Input[str] provisioning_state: Gets provisioning state of the PublicIP resource Updating/Deleting/Failed
:param pulumi.Input[str] resource_group_name: The name of the resource group.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['authorization_key'] = authorization_key
if authorization_name is None:
raise TypeError("Missing required property 'authorization_name'")
__props__['authorization_name'] = authorization_name
__props__['authorization_use_status'] = authorization_use_status
if circuit_name is None:
raise TypeError("Missing required property 'circuit_name'")
__props__['circuit_name'] = circuit_name
__props__['etag'] = etag
__props__['id'] = id
__props__['name'] = name
__props__['provisioning_state'] = provisioning_state
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/latest:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20150615:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20160330:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20160901:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20161201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20170301:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20170601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20170801:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20170901:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20171001:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20171101:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20180101:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20180201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20180401:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20180601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20180701:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20180801:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20181001:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20181101:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20181201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20190201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20190401:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20190601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20190701:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20190801:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20190901:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20191101:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20191201:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20200301:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20200401:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20200501:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20200601:ExpressRouteCircuitAuthorization"), pulumi.Alias(type_="azure-nextgen:network/v20200701:ExpressRouteCircuitAuthorization")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ExpressRouteCircuitAuthorization, __self__).__init__(
'azure-nextgen:network/v20160601:ExpressRouteCircuitAuthorization',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ExpressRouteCircuitAuthorization':
"""
Get an existing ExpressRouteCircuitAuthorization resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return ExpressRouteCircuitAuthorization(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="authorizationKey")
def authorization_key(self) -> pulumi.Output[Optional[str]]:
"""
Gets or sets the authorization key
"""
return pulumi.get(self, "authorization_key")
@property
@pulumi.getter(name="authorizationUseStatus")
def authorization_use_status(self) -> pulumi.Output[Optional[str]]:
"""
Gets or sets AuthorizationUseStatus
"""
return pulumi.get(self, "authorization_use_status")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
A unique read-only string that changes whenever the resource is updated
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
Gets provisioning state of the PublicIP resource Updating/Deleting/Failed
"""
return pulumi.get(self, "provisioning_state")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 65.38255
| 3,049
| 0.722644
|
3d2c035b90f6b5b84751c78ea386c548a7766d44
| 70,468
|
py
|
Python
|
python/ccxt/async_support/cdax.py
|
pcriadoperez/ccxt
|
fd0db4bad42f4f937c401cdb4cd0bcc4e716282e
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/cdax.py
|
pcriadoperez/ccxt
|
fd0db4bad42f4f937c401cdb4cd0bcc4e716282e
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/cdax.py
|
pcriadoperez/ccxt
|
fd0db4bad42f4f937c401cdb4cd0bcc4e716282e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NetworkError
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import RequestTimeout
from ccxt.base.decimal_to_precision import TRUNCATE
class cdax(Exchange):
def describe(self):
return self.deep_extend(super(cdax, self).describe(), {
'id': 'cdax',
'name': 'CDAX',
'countries': ['RU'],
'rateLimit': 100,
'userAgent': self.userAgents['chrome39'],
'certified': False,
'version': 'v1',
'accounts': None,
'accountsById': None,
'hostname': 'cdax.io',
'pro': False,
'has': {
'CORS': None,
'spot': True,
'margin': None, # has but unimplemented
'swap': None,
'future': None,
'option': None,
'cancelAllOrders': True,
'cancelOrder': True,
'cancelOrders': True,
'createOrder': True,
'fetchAccounts': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': False,
'fetchDepositAddressesByNetwork': False,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchOrderTrades': True,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': False,
'fetchTradingLimits': True,
'fetchWithdrawals': True,
'withdraw': True,
},
'timeframes': {
'1m': '1min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '60min',
'4h': '4hour',
'1d': '1day',
'1w': '1week',
'1M': '1mon',
'1y': '1year',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/102157692-fd406280-3e90-11eb-8d46-4511b617cd17.jpg',
'api': {
'market': 'https://{hostname}/api',
'public': 'https://{hostname}/api',
'private': 'https://{hostname}/api',
},
'www': 'https://cdax.io',
'referral': 'https://cdax.io/invite?invite_code=esc74',
'doc': 'https://github.com/cloudapidoc/API_Docs',
'fees': 'https://cdax.io/about/fee',
},
'api': {
'market': {
'get': {
'history/kline': 1, # 获取K线数据
'detail/merged': 1, # 获取聚合行情(Ticker)
'depth': 1, # 获取 Market Depth 数据
'trade': 1, # 获取 Trade Detail 数据
'history/trade': 1, # 批量获取最近的交易记录
'detail': 1, # 获取 Market Detail 24小时成交量数据
'tickers': 1,
'etp': 1, # 获取杠杆ETP实时净值
},
},
'public': {
'get': {
'common/symbols': 1, # 查询系统支持的所有交易对
'common/currencys': 1, # 查询系统支持的所有币种
'common/timestamp': 1, # 查询系统当前时间
'common/exchange': 1, # order limits
'settings/currencys': 1, # ?language=en-US
},
},
'private': {
'get': {
'account/accounts': 0.2, # 查询当前用户的所有账户(即account-id)
'account/accounts/{id}/balance': 0.2, # 查询指定账户的余额
'account/accounts/{sub-uid}': 1,
'account/history': 4,
'cross-margin/loan-info': 1,
'margin/loan-info': 1, # 查询借币币息率及额度
'fee/fee-rate/get': 1,
'order/openOrders': 0.4,
'order/orders': 0.4,
'order/orders/{id}': 0.4, # 查询某个订单详情
'order/orders/{id}/matchresults': 0.4, # 查询某个订单的成交明细
'order/orders/getClientOrder': 0.4,
'order/history': 1, # 查询当前委托、历史委托
'order/matchresults': 1, # 查询当前成交、历史成交
# 'dw/withdraw-virtual/addresses', # 查询虚拟币提现地址(Deprecated)
'query/deposit-withdraw': 1,
# 'margin/loan-info', # duplicate
'margin/loan-orders': 0.2, # 借贷订单
'margin/accounts/balance': 0.2, # 借贷账户详情
'cross-margin/loan-orders': 1, # 查询借币订单
'cross-margin/accounts/balance': 1, # 借币账户详情
'points/actions': 1,
'points/orders': 1,
'subuser/aggregate-balance': 10,
'stable-coin/exchange_rate': 1,
'stable-coin/quote': 1,
},
'post': {
'account/transfer': 1, # 资产划转(该节点为母用户和子用户进行资产划转的通用接口。)
'futures/transfer': 1,
'order/batch-orders': 0.4,
'order/orders/place': 0.2, # 创建并执行一个新订单(一步下单, 推荐使用)
'order/orders/submitCancelClientOrder': 0.2,
'order/orders/batchCancelOpenOrders': 0.4,
# 'order/orders', # 创建一个新的订单请求 (仅创建订单,不执行下单)
# 'order/orders/{id}/place', # 执行一个订单 (仅执行已创建的订单)
'order/orders/{id}/submitcancel': 0.2, # 申请撤销一个订单请求
'order/orders/batchcancel': 0.4, # 批量撤销订单
# 'dw/balance/transfer', # 资产划转
'dw/withdraw/api/create': 1, # 申请提现虚拟币
# 'dw/withdraw-virtual/create', # 申请提现虚拟币
# 'dw/withdraw-virtual/{id}/place', # 确认申请虚拟币提现(Deprecated)
'dw/withdraw-virtual/{id}/cancel': 1, # 申请取消提现虚拟币
'dw/transfer-in/margin': 10, # 现货账户划入至借贷账户
'dw/transfer-out/margin': 10, # 借贷账户划出至现货账户
'margin/orders': 10, # 申请借贷
'margin/orders/{id}/repay': 10, # 归还借贷
'cross-margin/transfer-in': 1, # 资产划转
'cross-margin/transfer-out': 1, # 资产划转
'cross-margin/orders': 1, # 申请借币
'cross-margin/orders/{id}/repay': 1, # 归还借币
'stable-coin/exchange': 1,
'subuser/transfer': 10,
},
},
},
'fees': {
'trading': {
'feeSide': 'get',
'tierBased': False,
'percentage': True,
'maker': self.parse_number('0.002'),
'taker': self.parse_number('0.002'),
},
},
'exceptions': {
'broad': {
'contract is restricted of closing positions on API. Please contact customer service': OnMaintenance,
'maintain': OnMaintenance,
},
'exact': {
# err-code
'bad-request': BadRequest,
'base-date-limit-error': BadRequest, # {"status":"error","err-code":"base-date-limit-error","err-msg":"date less than system limit","data":null}
'api-not-support-temp-addr': PermissionDenied, # {"status":"error","err-code":"api-not-support-temp-addr","err-msg":"API withdrawal does not support temporary addresses","data":null}
'timeout': RequestTimeout, # {"ts":1571653730865,"status":"error","err-code":"timeout","err-msg":"Request Timeout"}
'gateway-internal-error': ExchangeNotAvailable, # {"status":"error","err-code":"gateway-internal-error","err-msg":"Failed to load data. Try again later.","data":null}
'account-frozen-balance-insufficient-error': InsufficientFunds, # {"status":"error","err-code":"account-frozen-balance-insufficient-error","err-msg":"trade account balance is not enough, left: `0.0027`","data":null}
'invalid-amount': InvalidOrder, # eg "Paramemter `amount` is invalid."
'order-limitorder-amount-min-error': InvalidOrder, # limit order amount error, min: `0.001`
'order-limitorder-amount-max-error': InvalidOrder, # market order amount error, max: `1000000`
'order-marketorder-amount-min-error': InvalidOrder, # market order amount error, min: `0.01`
'order-limitorder-price-min-error': InvalidOrder, # limit order price error
'order-limitorder-price-max-error': InvalidOrder, # limit order price error
'order-holding-limit-failed': InvalidOrder, # {"status":"error","err-code":"order-holding-limit-failed","err-msg":"Order failed, exceeded the holding limit of self currency","data":null}
'order-orderprice-precision-error': InvalidOrder, # {"status":"error","err-code":"order-orderprice-precision-error","err-msg":"order price precision error, scale: `4`","data":null}
'order-etp-nav-price-max-error': InvalidOrder, # {"status":"error","err-code":"order-etp-nav-price-max-error","err-msg":"Order price cannot be higher than 5% of NAV","data":null}
'order-orderstate-error': OrderNotFound, # canceling an already canceled order
'order-queryorder-invalid': OrderNotFound, # querying a non-existent order
'order-update-error': ExchangeNotAvailable, # undocumented error
'api-signature-check-failed': AuthenticationError,
'api-signature-not-valid': AuthenticationError, # {"status":"error","err-code":"api-signature-not-valid","err-msg":"Signature not valid: Incorrect Access key [Access key错误]","data":null}
'base-record-invalid': OrderNotFound, # https://github.com/ccxt/ccxt/issues/5750
'base-symbol-trade-disabled': BadSymbol, # {"status":"error","err-code":"base-symbol-trade-disabled","err-msg":"Trading is disabled for self symbol","data":null}
'base-symbol-error': BadSymbol, # {"status":"error","err-code":"base-symbol-error","err-msg":"The symbol is invalid","data":null}
'system-maintenance': OnMaintenance, # {"status": "error", "err-code": "system-maintenance", "err-msg": "System is in maintenance!", "data": null}
# err-msg
'invalid symbol': BadSymbol, # {"ts":1568813334794,"status":"error","err-code":"invalid-parameter","err-msg":"invalid symbol"}
'symbol trade not open now': BadSymbol, # {"ts":1576210479343,"status":"error","err-code":"invalid-parameter","err-msg":"symbol trade not open now"}
},
},
'options': {
'defaultNetwork': 'ERC20',
'networks': {
'ETH': 'erc20',
'TRX': 'trc20',
'HRC20': 'hrc20',
'HECO': 'hrc20',
'HT': 'hrc20',
'ALGO': 'algo',
'OMNI': '',
},
# https://github.com/ccxt/ccxt/issues/5376
'fetchOrdersByStatesMethod': 'private_get_order_orders', # 'private_get_order_history' # https://github.com/ccxt/ccxt/pull/5392
'fetchOpenOrdersMethod': 'fetch_open_orders_v1', # 'fetch_open_orders_v2' # https://github.com/ccxt/ccxt/issues/5388
'createMarketBuyOrderRequiresPrice': True,
'fetchBalanceMethod': 'privateGetAccountAccountsIdBalance',
'createOrderMethod': 'privatePostOrderOrdersPlace',
'language': 'en-US',
},
'commonCurrencies': {
# https://github.com/ccxt/ccxt/issues/6081
# https://github.com/ccxt/ccxt/issues/3365
# https://github.com/ccxt/ccxt/issues/2873
'GET': 'Themis', # conflict with GET(Guaranteed Entrance Token, GET Protocol)
'GTC': 'Game.com', # conflict with Gitcoin and Gastrocoin
'HIT': 'HitChain',
'HOT': 'Hydro Protocol', # conflict with HOT(Holo) https://github.com/ccxt/ccxt/issues/4929
# https://github.com/ccxt/ccxt/issues/7399
# https://coinmarketcap.com/currencies/pnetwork/
# https://coinmarketcap.com/currencies/penta/markets/
# https://en.cryptonomist.ch/blog/eidoo/the-edo-to-pnt-upgrade-what-you-need-to-know-updated/
'PNT': 'Penta',
'SBTC': 'Super Bitcoin',
'BIFI': 'Bitcoin File', # conflict with Beefy.Finance https://github.com/ccxt/ccxt/issues/8706
},
})
async def fetch_time(self, params={}):
response = await self.publicGetCommonTimestamp(params)
return self.safe_integer(response, 'data')
async def fetch_trading_limits(self, symbols=None, params={}):
# self method should not be called directly, use loadTradingLimits() instead
# by default it will try load withdrawal fees of all currencies(with separate requests)
# however if you define symbols = ['ETH/BTC', 'LTC/BTC'] in args it will only load those
await self.load_markets()
if symbols is None:
symbols = self.symbols
result = {}
for i in range(0, len(symbols)):
symbol = symbols[i]
result[symbol] = await self.fetch_trading_limits_by_id(self.market_id(symbol), params)
return result
async def fetch_trading_limits_by_id(self, id, params={}):
request = {
'symbol': id,
}
response = await self.publicGetCommonExchange(self.extend(request, params))
#
# {
# status: "ok",
# data: {
# 'symbol': "aidocbtc",
# 'buy-limit-must-less-than': 1.1,
# 'sell-limit-must-greater-than': 0.9,
# 'limit-order-must-greater-than': 1,
# 'limit-order-must-less-than': 5000000,
# 'market-buy-order-must-greater-than': 0.0001,
# 'market-buy-order-must-less-than': 100,
# 'market-sell-order-must-greater-than': 1,
# 'market-sell-order-must-less-than': 500000,
# 'circuit-break-when-greater-than': 10000,
# 'circuit-break-when-less-than': 10,
# 'market-sell-order-rate-must-less-than': 0.1,
# 'market-buy-order-rate-must-less-than': 0.1
# }
# }
#
return self.parse_trading_limits(self.safe_value(response, 'data', {}))
def parse_trading_limits(self, limits, symbol=None, params={}):
#
# {
# 'symbol': "aidocbtc",
# 'buy-limit-must-less-than': 1.1,
# 'sell-limit-must-greater-than': 0.9,
# 'limit-order-must-greater-than': 1,
# 'limit-order-must-less-than': 5000000,
# 'market-buy-order-must-greater-than': 0.0001,
# 'market-buy-order-must-less-than': 100,
# 'market-sell-order-must-greater-than': 1,
# 'market-sell-order-must-less-than': 500000,
# 'circuit-break-when-greater-than': 10000,
# 'circuit-break-when-less-than': 10,
# 'market-sell-order-rate-must-less-than': 0.1,
# 'market-buy-order-rate-must-less-than': 0.1
# }
#
return {
'info': limits,
'limits': {
'amount': {
'min': self.safe_number(limits, 'limit-order-must-greater-than'),
'max': self.safe_number(limits, 'limit-order-must-less-than'),
},
},
}
def cost_to_precision(self, symbol, cost):
return self.decimal_to_precision(cost, TRUNCATE, self.markets[symbol]['precision']['cost'], self.precisionMode)
async def fetch_markets(self, params={}):
response = await self.publicGetCommonSymbols(params)
#
# {
# "status": "ok",
# "data": [
# {
# "base-currency": "ckb",
# "quote-currency": "usdt",
# "price-precision": 6,
# "amount-precision": 2,
# "symbol-partition": "default",
# "symbol": "ckbusdt",
# "state": "online",
# "value-precision": 8,
# "min-order-amt": 1,
# "max-order-amt": 140000000,
# "min-order-value": 5,
# "limit-order-min-order-amt": 1,
# "limit-order-max-order-amt": 140000000,
# "limit-order-max-buy-amt": 140000000,
# "limit-order-max-sell-amt": 140000000,
# "sell-market-min-order-amt": 1,
# "sell-market-max-order-amt": 14000000,
# "buy-market-max-order-value": 200000,
# "api-trading": "enabled",
# "tags": ""
# },
# ]
# }
#
markets = self.safe_value(response, 'data')
numMarkets = len(markets)
if numMarkets < 1:
raise NetworkError(self.id + ' publicGetCommonSymbols returned empty response: ' + self.json(markets))
result = []
for i in range(0, len(markets)):
market = markets[i]
baseId = self.safe_string(market, 'base-currency')
quoteId = self.safe_string(market, 'quote-currency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
state = self.safe_string(market, 'state')
result.append({
'id': baseId + quoteId,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': None,
'swap': False,
'future': False,
'option': False,
'active': (state == 'online'),
'contract': False,
'linear': None,
'inverse': None,
'taker': 0 if (base == 'OMG') else 0.002,
'maker': 0 if (base == 'OMG') else 0.002,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.safe_integer(market, 'amount-precision'),
'price': self.safe_integer(market, 'price-precision'),
'cost': self.safe_integer(market, 'value-precision'),
},
'limits': {
'leverage': {
'min': self.parse_number('1'),
'max': self.safe_number(market, 'leverage-ratio', 1),
'superMax': self.safe_number(market, 'super-margin-leverage-ratio', 1),
},
'amount': {
'min': self.safe_number(market, 'min-order-amt'),
'max': self.safe_number(market, 'max-order-amt'),
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'min-order-value', 0),
'max': None,
},
},
'info': market,
})
return result
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "amount": 26228.672978342216,
# "open": 9078.95,
# "close": 9146.86,
# "high": 9155.41,
# "id": 209988544334,
# "count": 265846,
# "low": 8988.0,
# "version": 209988544334,
# "ask": [9146.87, 0.156134],
# "vol": 2.3822168242201668E8,
# "bid": [9146.86, 0.080758],
# }
#
# fetchTickers
# {
# symbol: "bhdht",
# open: 2.3938,
# high: 2.4151,
# low: 2.3323,
# close: 2.3909,
# amount: 628.992,
# vol: 1493.71841095,
# count: 2088,
# bid: 2.3643,
# bidSize: 0.7136,
# ask: 2.4061,
# askSize: 0.4156
# }
#
symbol = self.safe_symbol(None, market)
timestamp = self.safe_integer(ticker, 'ts')
bid = None
bidVolume = None
ask = None
askVolume = None
if 'bid' in ticker:
if isinstance(ticker['bid'], list):
bid = self.safe_string(ticker['bid'], 0)
bidVolume = self.safe_string(ticker['bid'], 1)
else:
bid = self.safe_string(ticker, 'bid')
bidVolume = self.safe_string(ticker, 'bidSize')
if 'ask' in ticker:
if isinstance(ticker['ask'], list):
ask = self.safe_string(ticker['ask'], 0)
askVolume = self.safe_string(ticker['ask'], 1)
else:
ask = self.safe_string(ticker, 'ask')
askVolume = self.safe_string(ticker, 'askSize')
open = self.safe_string(ticker, 'open')
close = self.safe_string(ticker, 'close')
baseVolume = self.safe_string(ticker, 'amount')
quoteVolume = self.safe_string(ticker, 'vol')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': bid,
'bidVolume': bidVolume,
'ask': ask,
'askVolume': askVolume,
'vwap': None,
'open': open,
'close': close,
'last': close,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market, False)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'type': 'step0',
}
response = await self.marketGetDepth(self.extend(request, params))
#
# {
# "status": "ok",
# "ch": "market.btcusdt.depth.step0",
# "ts": 1583474832790,
# "tick": {
# "bids": [
# [9100.290000000000000000, 0.200000000000000000],
# [9099.820000000000000000, 0.200000000000000000],
# [9099.610000000000000000, 0.205000000000000000],
# ],
# "asks": [
# [9100.640000000000000000, 0.005904000000000000],
# [9101.010000000000000000, 0.287311000000000000],
# [9101.030000000000000000, 0.012121000000000000],
# ],
# "ts":1583474832008,
# "version":104999698780
# }
# }
#
if 'tick' in response:
if not response['tick']:
raise BadSymbol(self.id + ' fetchOrderBook() returned empty response: ' + self.json(response))
tick = self.safe_value(response, 'tick')
timestamp = self.safe_integer(tick, 'ts', self.safe_integer(response, 'ts'))
result = self.parse_order_book(tick, symbol, timestamp)
result['nonce'] = self.safe_integer(tick, 'version')
return result
raise ExchangeError(self.id + ' fetchOrderBook() returned unrecognized response: ' + self.json(response))
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = await self.marketGetDetailMerged(self.extend(request, params))
#
# {
# "status": "ok",
# "ch": "market.btcusdt.detail.merged",
# "ts": 1583494336669,
# "tick": {
# "amount": 26228.672978342216,
# "open": 9078.95,
# "close": 9146.86,
# "high": 9155.41,
# "id": 209988544334,
# "count": 265846,
# "low": 8988.0,
# "version": 209988544334,
# "ask": [9146.87, 0.156134],
# "vol": 2.3822168242201668E8,
# "bid": [9146.86, 0.080758],
# }
# }
#
ticker = self.parse_ticker(response['tick'], market)
timestamp = self.safe_integer(response, 'ts')
ticker['timestamp'] = timestamp
ticker['datetime'] = self.iso8601(timestamp)
return ticker
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.marketGetTickers(params)
tickers = self.safe_value(response, 'data')
timestamp = self.safe_integer(response, 'ts')
result = {}
for i in range(0, len(tickers)):
marketId = self.safe_string(tickers[i], 'symbol')
market = self.safe_market(marketId)
symbol = market['symbol']
ticker = self.parse_ticker(tickers[i], market)
ticker['timestamp'] = timestamp
ticker['datetime'] = self.iso8601(timestamp)
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id": "112522757755423628681413936",
# "ts": "1638457111917",
# "trade-id": "100454385963",
# "amount": "13.7962",
# "price": "1.697867",
# "direction": "buy"
# }
#
# fetchMyTrades(private)
#
# {
# "symbol": "adausdt",
# "fee-currency": "usdt",
# "source": "spot-api",
# "order-id": "423628498050504",
# "created-at": "1638455779233",
# "role": "taker",
# "price": "1.672487",
# "match-id": "112521868633",
# "trade-id": "100454375614",
# "filled-amount": "6.8",
# "filled-fees": "0.0227458232",
# "filled-points": "0.0",
# "fee-deduct-currency": "",
# "fee-deduct-state": "done",
# "id": "422419583501532",
# "type": "sell-market"
# },
#
# fetchOrderTrades(private)
#
# {
# "symbol": "adausdt",
# "fee-currency": "usdt",
# "source": "spot-api",
# "match-id": "112521868633",
# "trade-id": "100454375614",
# "role": "taker",
# "order-id": "423628498050504",
# "price": "1.672487",
# "created-at": "1638455779233",
# "filled-amount": "6.8",
# "filled-fees": "0.0227458232",
# "filled-points": "0.0",
# "fee-deduct-currency": "",
# "fee-deduct-state": "done",
# "id": "422419583501532",
# "type": "sell-market"
# }
#
marketId = self.safe_string(trade, 'symbol')
symbol = self.safe_symbol(marketId, market)
timestamp = self.safe_integer_2(trade, 'ts', 'created-at')
order = self.safe_string(trade, 'order-id')
side = self.safe_string(trade, 'direction')
type = self.safe_string(trade, 'type')
if type is not None:
typeParts = type.split('-')
side = typeParts[0]
type = typeParts[1]
takerOrMaker = self.safe_string(trade, 'role')
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string_2(trade, 'filled-amount', 'amount')
fee = None
feeCostString = self.safe_string(trade, 'filled-fees')
feeCurrency = self.safe_currency_code(self.safe_string(trade, 'fee-currency'))
filledPoints = self.safe_string(trade, 'filled-points')
if filledPoints is not None:
if (feeCostString is None) or (feeCostString == '0.0'):
feeCostString = filledPoints
feeCurrency = self.safe_currency_code(self.safe_string(trade, 'fee-deduct-currency'))
if feeCostString is not None:
fee = {
'cost': feeCostString,
'currency': feeCurrency,
}
tradeId = self.safe_string_2(trade, 'trade-id', 'tradeId')
id = self.safe_string(trade, 'id', tradeId)
return self.safe_trade({
'id': id,
'info': trade,
'order': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
'id': id,
}
response = await self.privateGetOrderOrdersIdMatchresults(self.extend(request, params))
return self.parse_trades(response['data'], None, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if limit is not None:
request['size'] = limit # 1-100 orders, default is 100
if since is not None:
request['start-time'] = since # a date within 120 days from today
# request['end-time'] = self.sum(since, 172800000) # 48 hours window
response = await self.privateGetOrderMatchresults(self.extend(request, params))
return self.parse_trades(response['data'], market, since, limit)
async def fetch_trades(self, symbol, since=None, limit=1000, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['size'] = limit
response = await self.marketGetHistoryTrade(self.extend(request, params))
#
# {
# "status": "ok",
# "ch": "market.btcusdt.trade.detail",
# "ts": 1583497692365,
# "data": [
# {
# "id": 105005170342,
# "ts": 1583497692182,
# "data": [
# {
# "amount": 0.010411000000000000,
# "trade-id": 102090736910,
# "ts": 1583497692182,
# "id": 10500517034273194594947,
# "price": 9096.050000000000000000,
# "direction": "sell"
# }
# ]
# },
# # ...
# ]
# }
#
data = self.safe_value(response, 'data')
result = []
for i in range(0, len(data)):
trades = self.safe_value(data[i], 'data', [])
for j in range(0, len(trades)):
trade = self.parse_trade(trades[j], market)
result.append(trade)
result = self.sort_by(result, 'timestamp')
return self.filter_by_symbol_since_limit(result, market['symbol'], since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "amount":1.2082,
# "open":0.025096,
# "close":0.025095,
# "high":0.025096,
# "id":1591515300,
# "count":6,
# "low":0.025095,
# "vol":0.0303205097
# }
#
return [
self.safe_timestamp(ohlcv, 'id'),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, 'amount'),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=1000, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'period': self.timeframes[timeframe],
}
if limit is not None:
request['size'] = limit
response = await self.marketGetHistoryKline(self.extend(request, params))
#
# {
# "status":"ok",
# "ch":"market.ethbtc.kline.1min",
# "ts":1591515374371,
# "data":[
# {"amount":0.0,"open":0.025095,"close":0.025095,"high":0.025095,"id":1591515360,"count":0,"low":0.025095,"vol":0.0},
# {"amount":1.2082,"open":0.025096,"close":0.025095,"high":0.025096,"id":1591515300,"count":6,"low":0.025095,"vol":0.0303205097},
# {"amount":0.0648,"open":0.025096,"close":0.025096,"high":0.025096,"id":1591515240,"count":2,"low":0.025096,"vol":0.0016262208},
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_ohlcvs(data, market, timeframe, since, limit)
async def fetch_accounts(self, params={}):
await self.load_markets()
response = await self.privateGetAccountAccounts(params)
return response['data']
async def fetch_currencies(self, params={}):
request = {
'language': self.options['language'],
}
response = await self.publicGetSettingsCurrencys(self.extend(request, params))
#
# {
# "status":"ok",
# "data":[
# {
# "currency-addr-with-tag":false,
# "fast-confirms":12,
# "safe-confirms":12,
# "currency-type":"eth",
# "quote-currency":true,
# "withdraw-enable-timestamp":1609430400000,
# "deposit-enable-timestamp":1609430400000,
# "currency-partition":"all",
# "support-sites":["OTC","INSTITUTION","MINEPOOL"],
# "withdraw-precision":6,
# "visible-assets-timestamp":1508839200000,
# "deposit-min-amount":"1",
# "withdraw-min-amount":"10",
# "show-precision":"8",
# "tags":"",
# "weight":23,
# "full-name":"Tether USDT",
# "otc-enable":1,
# "visible":true,
# "white-enabled":false,
# "country-disabled":false,
# "deposit-enabled":true,
# "withdraw-enabled":true,
# "name":"usdt",
# "state":"online",
# "display-name":"USDT",
# "suspend-withdraw-desc":null,
# "withdraw-desc":"Minimum withdrawal amount: 10 USDT(ERC20). not >_<not To ensure the safety of your funds, your withdrawal request will be manually reviewed if your security strategy or password is changed. Please wait for phone calls or emails from our staff.not >_<not Please make sure that your computer and browser are secure and your information is protected from being tampered or leaked.",
# "suspend-deposit-desc":null,
# "deposit-desc":"Please don’t deposit any other digital assets except USDT to the above address. Otherwise, you may lose your assets permanently. not >_<not Depositing to the above address requires confirmations of the entire network. It will arrive after 12 confirmations, and it will be available to withdraw after 12 confirmations. not >_<not Minimum deposit amount: 1 USDT. Any deposits less than the minimum will not be credited or refunded.not >_<not Your deposit address won’t change often. If there are any changes, we will notify you via announcement or email.not >_<not Please make sure that your computer and browser are secure and your information is protected from being tampered or leaked.",
# "suspend-visible-desc":null
# }
# ]
# }
#
currencies = self.safe_value(response, 'data')
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = self.safe_value(currency, 'name')
precision = self.safe_integer(currency, 'withdraw-precision')
code = self.safe_currency_code(id)
depositEnabled = self.safe_value(currency, 'deposit-enabled')
withdrawEnabled = self.safe_value(currency, 'withdraw-enabled')
countryDisabled = self.safe_value(currency, 'country-disabled')
visible = self.safe_value(currency, 'visible', False)
state = self.safe_string(currency, 'state')
active = visible and depositEnabled and withdrawEnabled and (state == 'online') and not countryDisabled
name = self.safe_string(currency, 'display-name')
result[code] = {
'id': id,
'code': code,
'type': 'crypto',
# 'payin': currency['deposit-enabled'],
# 'payout': currency['withdraw-enabled'],
# 'transfer': None,
'name': name,
'active': active,
'deposit': depositEnabled,
'withdraw': withdrawEnabled,
'fee': None, # todo need to fetch from fee endpoint
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'deposit': {
'min': self.safe_number(currency, 'deposit-min-amount'),
'max': math.pow(10, precision),
},
'withdraw': {
'min': self.safe_number(currency, 'withdraw-min-amount'),
'max': math.pow(10, precision),
},
},
'info': currency,
}
return result
def parse_balance(self, response):
balances = self.safe_value(response['data'], 'list', [])
result = {'info': response}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = None
if code in result:
account = result[code]
else:
account = self.account()
if balance['type'] == 'trade':
account['free'] = self.safe_string(balance, 'balance')
if balance['type'] == 'frozen':
account['used'] = self.safe_string(balance, 'balance')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
await self.load_markets()
await self.load_accounts()
method = self.options['fetchBalanceMethod']
request = {
'id': self.accounts[0]['id'],
}
response = await getattr(self, method)(self.extend(request, params))
return self.parse_balance(response)
async def fetch_orders_by_states(self, states, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
'states': states,
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
method = self.safe_string(self.options, 'fetchOrdersByStatesMethod', 'private_get_order_orders')
response = await getattr(self, method)(self.extend(request, params))
#
# {status: "ok",
# data: [{ id: 13997833014,
# symbol: "ethbtc",
# 'account-id': 3398321,
# amount: "0.045000000000000000",
# price: "0.034014000000000000",
# 'created-at': 1545836976871,
# type: "sell-limit",
# 'field-amount': "0.045000000000000000",
# 'field-cash-amount': "0.001530630000000000",
# 'field-fees': "0.000003061260000000",
# 'finished-at': 1545837948214,
# source: "spot-api",
# state: "filled",
# 'canceled-at': 0 } ]}
#
return self.parse_orders(response['data'], market, since, limit)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'id': id,
}
response = await self.privateGetOrderOrdersId(self.extend(request, params))
order = self.safe_value(response, 'data')
return self.parse_order(order)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_states('pre-submitted,submitted,partial-filled,filled,partial-canceled,canceled', symbol, since, limit, params)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
method = self.safe_string(self.options, 'fetchOpenOrdersMethod', 'fetch_open_orders_v1')
return await getattr(self, method)(symbol, since, limit, params)
async def fetch_open_orders_v1(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrdersV1() requires a symbol argument')
return await self.fetch_orders_by_states('pre-submitted,submitted,partial-filled', symbol, since, limit, params)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_states('filled,partial-canceled,canceled', symbol, since, limit, params)
async def fetch_open_orders_v2(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
accountId = self.safe_string(params, 'account-id')
if accountId is None:
# pick the first account
await self.load_accounts()
for i in range(0, len(self.accounts)):
account = self.accounts[i]
if account['type'] == 'spot':
accountId = self.safe_string(account, 'id')
if accountId is not None:
break
request['account-id'] = accountId
if limit is not None:
request['size'] = limit
omitted = self.omit(params, 'account-id')
response = await self.privateGetOrderOpenOrders(self.extend(request, omitted))
#
# {
# "status":"ok",
# "data":[
# {
# "symbol":"ethusdt",
# "source":"api",
# "amount":"0.010000000000000000",
# "account-id":1528640,
# "created-at":1561597491963,
# "price":"400.000000000000000000",
# "filled-amount":"0.0",
# "filled-cash-amount":"0.0",
# "filled-fees":"0.0",
# "id":38477101630,
# "state":"submitted",
# "type":"sell-limit"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_orders(data, market, since, limit)
def parse_order_status(self, status):
statuses = {
'partial-filled': 'open',
'partial-canceled': 'canceled',
'filled': 'closed',
'canceled': 'canceled',
'submitted': 'open',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# { id: 13997833014,
# symbol: "ethbtc",
# 'account-id': 3398321,
# amount: "0.045000000000000000",
# price: "0.034014000000000000",
# 'created-at': 1545836976871,
# type: "sell-limit",
# 'field-amount': "0.045000000000000000", # they have fixed it for filled-amount
# 'field-cash-amount': "0.001530630000000000", # they have fixed it for filled-cash-amount
# 'field-fees': "0.000003061260000000", # they have fixed it for filled-fees
# 'finished-at': 1545837948214,
# source: "spot-api",
# state: "filled",
# 'canceled-at': 0 }
#
# { id: 20395337822,
# symbol: "ethbtc",
# 'account-id': 5685075,
# amount: "0.001000000000000000",
# price: "0.0",
# 'created-at': 1545831584023,
# type: "buy-market",
# 'field-amount': "0.029100000000000000", # they have fixed it for filled-amount
# 'field-cash-amount': "0.000999788700000000", # they have fixed it for filled-cash-amount
# 'field-fees': "0.000058200000000000", # they have fixed it for filled-fees
# 'finished-at': 1545831584181,
# source: "spot-api",
# state: "filled",
# 'canceled-at': 0 }
#
id = self.safe_string(order, 'id')
side = None
type = None
status = self.parse_order_status(self.safe_string(order, 'state'))
orderType = self.safe_string(order, 'type')
if orderType is not None:
parts = orderType.split('-')
side = self.safe_string(parts, 0)
type = self.safe_string(parts, 1)
marketId = self.safe_string(order, 'symbol')
market = self.safe_market(marketId, market)
timestamp = self.safe_integer(order, 'created-at')
clientOrderId = self.safe_string(order, 'client-order-id')
filledString = self.safe_string_2(order, 'filled-amount', 'field-amount') # typo in their API, filled amount
priceString = self.safe_string(order, 'price')
costString = self.safe_string_2(order, 'filled-cash-amount', 'field-cash-amount') # same typo
amountString = self.safe_string(order, 'amount')
if orderType == 'buy-market':
amountString = None
feeCostString = self.safe_string_2(order, 'filled-fees', 'field-fees') # typo in their API, filled fees
fee = None
if feeCostString is not None:
feeCurrency = market['quote'] if (side == 'sell') else market['base']
fee = {
'cost': feeCostString,
'currency': feeCurrency,
}
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': market['symbol'],
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': priceString,
'stopPrice': None,
'average': None,
'cost': costString,
'amount': amountString,
'filled': filledString,
'remaining': None,
'status': status,
'fee': fee,
'trades': None,
}, market)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
await self.load_accounts()
market = self.market(symbol)
request = {
'account-id': self.accounts[0]['id'],
'symbol': market['id'],
'type': side + '-' + type,
}
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client-order-id') # must be 64 chars max and unique within 24 hours
if clientOrderId is None:
broker = self.safe_value(self.options, 'broker', {})
brokerId = self.safe_string(broker, 'id')
request['client-order-id'] = brokerId + self.uuid()
else:
request['client-order-id'] = clientOrderId
params = self.omit(params, ['clientOrderId', 'client-order-id'])
if (type == 'market') and (side == 'buy'):
if self.options['createMarketBuyOrderRequiresPrice']:
if price is None:
raise InvalidOrder(self.id + " market buy order requires price argument to calculate cost(total amount of quote currency to spend for buying, amount * price). To switch off self warning exception and specify cost in the amount argument, set .options['createMarketBuyOrderRequiresPrice'] = False. Make sure you know what you're doing.")
else:
# despite that cost = amount * price is in quote currency and should have quote precision
# the exchange API requires the cost supplied in 'amount' to be of base precision
# more about it here:
# https://github.com/ccxt/ccxt/pull/4395
# https://github.com/ccxt/ccxt/issues/7611
# we use amountToPrecision here because the exchange requires cost in base precision
request['amount'] = self.cost_to_precision(symbol, float(amount) * float(price))
else:
request['amount'] = self.cost_to_precision(symbol, amount)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
if type == 'limit' or type == 'ioc' or type == 'limit-maker' or type == 'stop-limit' or type == 'stop-limit-fok':
request['price'] = self.price_to_precision(symbol, price)
method = self.options['createOrderMethod']
response = await getattr(self, method)(self.extend(request, params))
timestamp = self.milliseconds()
id = self.safe_string(response, 'data')
return {
'info': response,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'filled': None,
'remaining': None,
'cost': None,
'trades': None,
'fee': None,
'clientOrderId': None,
'average': None,
}
async def cancel_order(self, id, symbol=None, params={}):
response = await self.privatePostOrderOrdersIdSubmitcancel({'id': id})
#
# {
# 'status': 'ok',
# 'data': '10138899000',
# }
#
return self.extend(self.parse_order(response), {
'id': id,
'status': 'canceled',
})
async def cancel_orders(self, ids, symbol=None, params={}):
await self.load_markets()
clientOrderIds = self.safe_value_2(params, 'clientOrderIds', 'client-order-ids')
params = self.omit(params, ['clientOrderIds', 'client-order-ids'])
request = {}
if clientOrderIds is None:
request['order-ids'] = ids
else:
request['client-order-ids'] = clientOrderIds
response = await self.privatePostOrderOrdersBatchcancel(self.extend(request, params))
#
# {
# "status": "ok",
# "data": {
# "success": [
# "5983466"
# ],
# "failed": [
# {
# "err-msg": "Incorrect order state",
# "order-state": 7,
# "order-id": "",
# "err-code": "order-orderstate-error",
# "client-order-id": "first"
# },
# {
# "err-msg": "Incorrect order state",
# "order-state": 7,
# "order-id": "",
# "err-code": "order-orderstate-error",
# "client-order-id": "second"
# },
# {
# "err-msg": "The record is not found.",
# "order-id": "",
# "err-code": "base-not-found",
# "client-order-id": "third"
# }
# ]
# }
# }
#
return response
async def cancel_all_orders(self, symbol=None, params={}):
await self.load_markets()
request = {
# 'account-id' string False NA The account id used for self cancel Refer to GET /v1/account/accounts
# 'symbol': market['id'], # a list of comma-separated symbols, all symbols by default
# 'types' 'string', buy-market, sell-market, buy-limit, sell-limit, buy-ioc, sell-ioc, buy-stop-limit, sell-stop-limit, buy-limit-fok, sell-limit-fok, buy-stop-limit-fok, sell-stop-limit-fok
# 'side': 'buy', # or 'sell'
# 'size': 100, # the number of orders to cancel 1-100
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
response = await self.privatePostOrderOrdersBatchCancelOpenOrders(self.extend(request, params))
#
# {
# code: 200,
# data: {
# "success-count": 2,
# "failed-count": 0,
# "next-id": 5454600
# }
# }
#
return response
def currency_to_precision(self, currency, fee):
return self.decimal_to_precision(fee, 0, self.currencies[currency]['precision'])
def safe_network(self, networkId):
lastCharacterIndex = len(networkId) - 1
lastCharacter = networkId[lastCharacterIndex]
if lastCharacter == '1':
networkId = networkId[0:lastCharacterIndex]
networksById = {}
return self.safe_string(networksById, networkId, networkId)
def parse_deposit_address(self, depositAddress, currency=None):
#
# {
# currency: "usdt",
# address: "0xf7292eb9ba7bc50358e27f0e025a4d225a64127b",
# addressTag: "",
# chain: "usdterc20", # trc20usdt, hrc20usdt, usdt, algousdt
# }
#
address = self.safe_string(depositAddress, 'address')
tag = self.safe_string(depositAddress, 'addressTag')
if tag == '':
tag = None
currencyId = self.safe_string(depositAddress, 'currency')
currency = self.safe_currency(currencyId, currency)
code = self.safe_currency_code(currencyId, currency)
networkId = self.safe_string(depositAddress, 'chain')
networks = self.safe_value(currency, 'networks', {})
networksById = self.index_by(networks, 'id')
networkValue = self.safe_value(networksById, networkId, networkId)
network = self.safe_string(networkValue, 'network')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': network,
'info': depositAddress,
}
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
if limit is None or limit > 100:
limit = 100
await self.load_markets()
currency = None
if code is not None:
currency = self.currency(code)
request = {
'type': 'deposit',
'from': 0, # From 'id' ... if you want to get results after a particular transaction id, pass the id in params.from
}
if currency is not None:
request['currency'] = currency['id']
if limit is not None:
request['size'] = limit # max 100
response = await self.privateGetQueryDepositWithdraw(self.extend(request, params))
# return response
return self.parse_transactions(response['data'], currency, since, limit)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
if limit is None or limit > 100:
limit = 100
await self.load_markets()
currency = None
if code is not None:
currency = self.currency(code)
request = {
'type': 'withdraw',
'from': 0, # From 'id' ... if you want to get results after a particular transaction id, pass the id in params.from
}
if currency is not None:
request['currency'] = currency['id']
if limit is not None:
request['size'] = limit # max 100
response = await self.privateGetQueryDepositWithdraw(self.extend(request, params))
# return response
return self.parse_transactions(response['data'], currency, since, limit)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# {
# 'id': 8211029,
# 'type': 'deposit',
# 'currency': 'eth',
# 'chain': 'eth',
# 'tx-hash': 'bd315....',
# 'amount': 0.81162421,
# 'address': '4b8b....',
# 'address-tag': '',
# 'fee': 0,
# 'state': 'safe',
# 'created-at': 1542180380965,
# 'updated-at': 1542180788077
# }
#
# fetchWithdrawals
#
# {
# 'id': 6908275,
# 'type': 'withdraw',
# 'currency': 'btc',
# 'chain': 'btc',
# 'tx-hash': 'c1a1a....',
# 'amount': 0.80257005,
# 'address': '1QR....',
# 'address-tag': '',
# 'fee': 0.0005,
# 'state': 'confirmed',
# 'created-at': 1552107295685,
# 'updated-at': 1552108032859
# }
#
timestamp = self.safe_integer(transaction, 'created-at')
updated = self.safe_integer(transaction, 'updated-at')
code = self.safe_currency_code(self.safe_string(transaction, 'currency'))
type = self.safe_string(transaction, 'type')
if type == 'withdraw':
type = 'withdrawal'
status = self.parse_transaction_status(self.safe_string(transaction, 'state'))
tag = self.safe_string(transaction, 'address-tag')
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
feeCost = abs(feeCost)
address = self.safe_string(transaction, 'address')
network = self.safe_string_upper(transaction, 'chain')
return {
'info': transaction,
'id': self.safe_string_2(transaction, 'id', 'data'),
'txid': self.safe_string(transaction, 'tx-hash'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'network': network,
'address': address,
'addressTo': None,
'addressFrom': None,
'tag': tag,
'tagTo': None,
'tagFrom': None,
'type': type,
'amount': self.safe_number(transaction, 'amount'),
'currency': code,
'status': status,
'updated': updated,
'fee': {
'currency': code,
'cost': feeCost,
'rate': None,
},
}
def parse_transaction_status(self, status):
statuses = {
# deposit statuses
'unknown': 'failed',
'confirming': 'pending',
'confirmed': 'ok',
'safe': 'ok',
'orphan': 'failed',
# withdrawal statuses
'submitted': 'pending',
'canceled': 'canceled',
'reexamine': 'pending',
'reject': 'failed',
'pass': 'pending',
'wallet-reject': 'failed',
# 'confirmed': 'ok', # present in deposit statuses
'confirm-error': 'failed',
'repealed': 'failed',
'wallet-transfer': 'pending',
'pre-transfer': 'pending',
}
return self.safe_string(statuses, status, status)
async def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
await self.load_markets()
self.check_address(address)
currency = self.currency(code)
request = {
'address': address, # only supports existing addresses in your withdraw address list
'amount': amount,
'currency': currency['id'].lower(),
}
if tag is not None:
request['addr-tag'] = tag # only for XRP?
networks = self.safe_value(self.options, 'networks', {})
network = self.safe_string_upper(params, 'network') # self line allows the user to specify either ERC20 or ETH
network = self.safe_string_lower(networks, network, network) # handle ETH>ERC20 alias
if network is not None:
# possible chains - usdterc20, trc20usdt, hrc20usdt, usdt, algousdt
if network == 'erc20':
request['chain'] = currency['id'] + network
else:
request['chain'] = network + currency['id']
params = self.omit(params, 'network')
response = await self.privatePostDwWithdrawApiCreate(self.extend(request, params))
return self.parse_transaction(response, currency)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/'
if api == 'market':
url += api
elif (api == 'public') or (api == 'private'):
url += self.version
elif (api == 'v2Public') or (api == 'v2Private'):
url += 'v2'
url += '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'private' or api == 'v2Private':
self.check_required_credentials()
timestamp = self.ymdhms(self.milliseconds(), 'T')
request = {
'SignatureMethod': 'HmacSHA256',
'SignatureVersion': '2',
'AccessKeyId': self.apiKey,
'Timestamp': timestamp,
}
if method != 'POST':
request = self.extend(request, query)
request = self.keysort(request)
auth = self.urlencode(request)
# unfortunately, PHP demands double quotes for the escaped newline symbol
# eslint-disable-next-line quotes
payload = "\n".join([method, self.hostname, url, auth])
signature = self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha256, 'base64')
auth += '&' + self.urlencode({'Signature': signature})
url += '?' + auth
if method == 'POST':
body = self.json(query)
headers = {
'Content-Type': 'application/json',
}
else:
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
else:
if params:
url += '?' + self.urlencode(params)
url = self.implode_params(self.urls['api'][api], {
'hostname': self.hostname,
}) + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
return self.safe_integer(config, 'cost', 1)
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
if 'status' in response:
#
# {"status":"error","err-code":"order-limitorder-amount-min-error","err-msg":"limit order amount error, min: `0.001`","data":null}
#
status = self.safe_string(response, 'status')
if status == 'error':
code = self.safe_string(response, 'err-code')
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], body, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], code, feedback)
message = self.safe_string(response, 'err-msg')
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
raise ExchangeError(feedback)
| 45.521964
| 730
| 0.493401
|
0dce87ab67dcd7ff0be9f03509718f1a96986424
| 1,249
|
py
|
Python
|
neural_compressor/experimental/common/metric.py
|
kevinintel/neural-compressor
|
b57645566aeff8d3c18dc49d2739a583c072f940
|
[
"Apache-2.0"
] | 172
|
2021-09-14T18:34:17.000Z
|
2022-03-30T06:49:53.000Z
|
neural_compressor/experimental/common/metric.py
|
kevinintel/neural-compressor
|
b57645566aeff8d3c18dc49d2739a583c072f940
|
[
"Apache-2.0"
] | 40
|
2021-09-14T02:26:12.000Z
|
2022-03-29T08:34:04.000Z
|
neural_compressor/experimental/common/metric.py
|
kevinintel/neural-compressor
|
b57645566aeff8d3c18dc49d2739a583c072f940
|
[
"Apache-2.0"
] | 33
|
2021-09-15T07:27:25.000Z
|
2022-03-25T08:30:57.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Metric(object):
"""common Metric just collect the infos to construct a Metric
"""
def __init__(self, metric_cls, name='user_metric', **kwargs):
"""The metric class should take the outputs of the model as the metric's inputs,
neural_compressor built-in metric always take (predictions, labels) as inputs, it's
recommended to design metric_cls to take (predictions, labels) as inputs.
metric_cls should be sub_class of neural_compressor.metric.BaseMetric.
"""
self.metric_cls = metric_cls
self.name = name
self.kwargs = kwargs
| 40.290323
| 94
| 0.710168
|
ffc466ed2934f9cb786337ae8b332b733c453f8c
| 1,494
|
py
|
Python
|
executionTimeCalculation/calculation.py
|
Leticia07/algorithm-analysis
|
a15592f9b26a63937bb4dadf7a3e1f1183359bed
|
[
"MIT"
] | null | null | null |
executionTimeCalculation/calculation.py
|
Leticia07/algorithm-analysis
|
a15592f9b26a63937bb4dadf7a3e1f1183359bed
|
[
"MIT"
] | null | null | null |
executionTimeCalculation/calculation.py
|
Leticia07/algorithm-analysis
|
a15592f9b26a63937bb4dadf7a3e1f1183359bed
|
[
"MIT"
] | null | null | null |
import pandas as pd
import matplotlib.pyplot as plt
import math
class Calculation:
def __init__(self, elements, processing):
self.elements = elements
self.processing = processing
def sortCalculation(self, complexity):
try:
return complexity / self.processing
except Exception as e:
print(e)
def insertionSort(self):
complexity = 2 * (self.elements) ** 2
self.insertionSortExecution = self.sortCalculation(complexity)
return self.insertionSortExecution
def intercalationSort(self):
complexity = self.elements * math.log(self.elements, 10)
self.intercalationSortExecution = self.sortCalculation(complexity)
return self.intercalationSortExecution
def chart(self):
columns = ['type', 'value']
values = [["Insertion", round(self.insertionSort(), 2)],["Intercalation", round(self.intercalationSort(), 2)]]
sort = pd.DataFrame(values, columns=columns)
textstr = "Insertion - {}\nIntercalation - {}".format(round(self.insertionSort(), 2), round(self.intercalationSort(), 2))
print(sort)
ax = sort.plot('type', 'value', kind='bar', legend=False, rot=0)
plt.gcf().text(0.02, 0.5, textstr, fontsize=10)
plt.gcf().canvas.set_window_title('Execution Time')
ax.set_xlabel("Types", fontsize=6)
ax.set_ylabel("Execution time", fontsize=6)
plt.subplots_adjust(left=0.35)
plt.show()
| 38.307692
| 129
| 0.649264
|
9838595d34bd2b89d9c15c9b132c7e9ece48743d
| 14,558
|
py
|
Python
|
exchangelib/autodiscover/properties.py
|
mishmashclone/ecederstrand-exchangelib
|
1bbae0e527dc82a45bf3b5946b438d69de96c20f
|
[
"BSD-2-Clause"
] | null | null | null |
exchangelib/autodiscover/properties.py
|
mishmashclone/ecederstrand-exchangelib
|
1bbae0e527dc82a45bf3b5946b438d69de96c20f
|
[
"BSD-2-Clause"
] | null | null | null |
exchangelib/autodiscover/properties.py
|
mishmashclone/ecederstrand-exchangelib
|
1bbae0e527dc82a45bf3b5946b438d69de96c20f
|
[
"BSD-2-Clause"
] | null | null | null |
from ..errors import ErrorNonExistentMailbox, AutoDiscoverFailed
from ..fields import TextField, EmailAddressField, ChoiceField, Choice, EWSElementField, OnOffField, BooleanField, \
IntegerField, BuildField, ProtocolListField
from ..properties import EWSElement
from ..transport import DEFAULT_ENCODING, NOAUTH, NTLM, BASIC, GSSAPI, SSPI, CBA
from ..util import create_element, add_xml_child, to_xml, is_xml, xml_to_str, AUTODISCOVER_REQUEST_NS, \
AUTODISCOVER_BASE_NS, AUTODISCOVER_RESPONSE_NS as RNS, ParseError
class AutodiscoverBase(EWSElement):
NAMESPACE = RNS
class User(AutodiscoverBase):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/user-pox"""
ELEMENT_NAME = 'User'
display_name = TextField(field_uri='DisplayName', namespace=RNS)
legacy_dn = TextField(field_uri='LegacyDN', namespace=RNS)
deployment_id = TextField(field_uri='DeploymentId', namespace=RNS) # GUID format
autodiscover_smtp_address = EmailAddressField(field_uri='AutoDiscoverSMTPAddress', namespace=RNS)
class IntExtUrlBase(AutodiscoverBase):
external_url = TextField(field_uri='ExternalUrl', namespace=RNS)
internal_url = TextField(field_uri='InternalUrl', namespace=RNS)
class AddressBook(IntExtUrlBase):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/addressbook-pox"""
ELEMENT_NAME = 'AddressBook'
class MailStore(IntExtUrlBase):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/mailstore-pox"""
ELEMENT_NAME = 'MailStore'
class NetworkRequirements(AutodiscoverBase):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/networkrequirements-pox"""
ELEMENT_NAME = 'NetworkRequirements'
ipv4_start = TextField(field_uri='IPv4Start', namespace=RNS)
ipv4_end = TextField(field_uri='IPv4End', namespace=RNS)
ipv6_start = TextField(field_uri='IPv6Start', namespace=RNS)
ipv6_end = TextField(field_uri='IPv6End', namespace=RNS)
class SimpleProtocol(AutodiscoverBase):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/protocol-pox
Used for the 'Internal' and 'External' elements that may contain a stripped-down version of the Protocol element.
"""
ELEMENT_NAME = 'Protocol'
WEB = 'WEB'
EXCH = 'EXCH'
EXPR = 'EXPR'
EXHTTP = 'EXHTTP'
TYPES = (WEB, EXCH, EXPR, EXHTTP)
type = ChoiceField(field_uri='Type', choices={Choice(c) for c in TYPES}, namespace=RNS)
as_url = TextField(field_uri='ASUrl', namespace=RNS)
class IntExtBase(AutodiscoverBase):
# TODO: 'OWAUrl' also has an AuthenticationMethod enum-style XML attribute with values:
# WindowsIntegrated, FBA, NTLM, Digest, Basic
owa_url = TextField(field_uri='OWAUrl', namespace=RNS)
protocol = EWSElementField(value_cls=SimpleProtocol)
class Internal(IntExtBase):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/internal-pox"""
ELEMENT_NAME = 'Internal'
class External(IntExtBase):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/external-pox"""
ELEMENT_NAME = 'External'
class Protocol(SimpleProtocol):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/protocol-pox"""
# Attribute 'Type' is ignored here. Has a name conflict with the child element and does not seem useful.
version = TextField(field_uri='Version', is_attribute=True, namespace=RNS)
internal = EWSElementField(value_cls=Internal)
external = EWSElementField(value_cls=External)
ttl = IntegerField(field_uri='TTL', namespace=RNS, default=1) # TTL for this autodiscover response, in hours
server = TextField(field_uri='Server', namespace=RNS)
server_dn = TextField(field_uri='ServerDN', namespace=RNS)
server_version = BuildField(field_uri='ServerVersion', namespace=RNS)
mdb_dn = TextField(field_uri='MdbDN', namespace=RNS)
public_folder_server = TextField(field_uri='PublicFolderServer', namespace=RNS)
port = IntegerField(field_uri='Port', namespace=RNS, min=1, max=65535)
directory_port = IntegerField(field_uri='DirectoryPort', namespace=RNS, min=1, max=65535)
referral_port = IntegerField(field_uri='ReferralPort', namespace=RNS, min=1, max=65535)
ews_url = TextField(field_uri='EwsUrl', namespace=RNS)
emws_url = TextField(field_uri='EmwsUrl', namespace=RNS)
sharing_url = TextField(field_uri='SharingUrl', namespace=RNS)
ecp_url = TextField(field_uri='EcpUrl', namespace=RNS)
ecp_url_um = TextField(field_uri='EcpUrl-um', namespace=RNS)
ecp_url_aggr = TextField(field_uri='EcpUrl-aggr', namespace=RNS)
ecp_url_mt = TextField(field_uri='EcpUrl-mt', namespace=RNS)
ecp_url_ret = TextField(field_uri='EcpUrl-ret', namespace=RNS)
ecp_url_sms = TextField(field_uri='EcpUrl-sms', namespace=RNS)
ecp_url_publish = TextField(field_uri='EcpUrl-publish', namespace=RNS)
ecp_url_photo = TextField(field_uri='EcpUrl-photo', namespace=RNS)
ecp_url_tm = TextField(field_uri='EcpUrl-tm', namespace=RNS)
ecp_url_tm_creating = TextField(field_uri='EcpUrl-tmCreating', namespace=RNS)
ecp_url_tm_hiding = TextField(field_uri='EcpUrl-tmHiding', namespace=RNS)
ecp_url_tm_editing = TextField(field_uri='EcpUrl-tmEditing', namespace=RNS)
ecp_url_extinstall = TextField(field_uri='EcpUrl-extinstall', namespace=RNS)
oof_url = TextField(field_uri='OOFUrl', namespace=RNS)
oab_url = TextField(field_uri='OABUrl', namespace=RNS)
um_url = TextField(field_uri='UMUrl', namespace=RNS)
ews_partner_url = TextField(field_uri='EwsPartnerUrl', namespace=RNS)
login_name = TextField(field_uri='LoginName', namespace=RNS)
domain_required = OnOffField(field_uri='DomainRequired', namespace=RNS)
domain_name = TextField(field_uri='DomainName', namespace=RNS)
spa = OnOffField(field_uri='SPA', namespace=RNS, default=True)
auth_package = ChoiceField(field_uri='AuthPackage', namespace=RNS, choices={
Choice(c) for c in ('basic', 'kerb', 'kerbntlm', 'ntlm', 'certificate', 'negotiate', 'nego2')
})
cert_principal_name = TextField(field_uri='CertPrincipalName', namespace=RNS)
ssl = OnOffField(field_uri='SSL', namespace=RNS, default=True)
auth_required = OnOffField(field_uri='AuthRequired', namespace=RNS, default=True)
use_pop_path = OnOffField(field_uri='UsePOPAuth', namespace=RNS)
smtp_last = OnOffField(field_uri='SMTPLast', namespace=RNS, default=False)
network_requirements = EWSElementField(value_cls=NetworkRequirements)
address_book = EWSElementField(value_cls=AddressBook)
mail_store = EWSElementField(value_cls=MailStore)
@property
def auth_type(self):
# Translates 'auth_package' value to our own 'auth_type' enum vals
if not self.auth_required:
return NOAUTH
return {
# Missing in list are DIGEST and OAUTH2
'basic': BASIC,
'kerb': GSSAPI,
'kerbntlm': NTLM, # Means client can chose between NTLM and GSSAPI
'ntlm': NTLM,
'certificate': CBA,
'negotiate': SSPI, # Unsure about this one
'nego2': GSSAPI,
'anonymous': NOAUTH, # Seen in some docs even though it's not mentioned in MSDN
}.get(self.auth_package.lower(), NTLM) # Default to NTLM
class Error(EWSElement):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/error-pox"""
ELEMENT_NAME = 'Error'
NAMESPACE = AUTODISCOVER_BASE_NS
id = TextField(field_uri='Id', namespace=AUTODISCOVER_BASE_NS, is_attribute=True)
time = TextField(field_uri='Time', namespace=AUTODISCOVER_BASE_NS, is_attribute=True)
code = TextField(field_uri='ErrorCode', namespace=AUTODISCOVER_BASE_NS)
message = TextField(field_uri='Message', namespace=AUTODISCOVER_BASE_NS)
debug_data = TextField(field_uri='DebugData', namespace=AUTODISCOVER_BASE_NS)
class Account(AutodiscoverBase):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/account-pox"""
ELEMENT_NAME = 'Account'
REDIRECT_URL = 'redirectUrl'
REDIRECT_ADDR = 'redirectAddr'
SETTINGS = 'settings'
ACTIONS = (REDIRECT_URL, REDIRECT_ADDR, SETTINGS)
type = ChoiceField(field_uri='AccountType', namespace=RNS, choices={Choice('email')})
action = ChoiceField(field_uri='Action', namespace=RNS, choices={Choice(p) for p in ACTIONS})
microsoft_online = BooleanField(field_uri='MicrosoftOnline', namespace=RNS)
redirect_url = TextField(field_uri='RedirectURL', namespace=RNS)
redirect_address = EmailAddressField(field_uri='RedirectAddr', namespace=RNS)
image = TextField(field_uri='Image', namespace=RNS) # Path to image used for branding
service_home = TextField(field_uri='ServiceHome', namespace=RNS) # URL to website of ISP
protocols = ProtocolListField()
# 'SmtpAddress' is inside the 'PublicFolderInformation' element
public_folder_smtp_address = TextField(field_uri='SmtpAddress', namespace=RNS)
@classmethod
def from_xml(cls, elem, account):
kwargs = {}
public_folder_information = elem.find(f'{{{cls.NAMESPACE}}}PublicFolderInformation')
for f in cls.FIELDS:
if f.name == 'public_folder_smtp_address':
if public_folder_information is None:
continue
kwargs[f.name] = f.from_xml(elem=public_folder_information, account=account)
continue
kwargs[f.name] = f.from_xml(elem=elem, account=account)
cls._clear(elem)
return cls(**kwargs)
class Response(AutodiscoverBase):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/response-pox"""
ELEMENT_NAME = 'Response'
user = EWSElementField(value_cls=User)
account = EWSElementField(value_cls=Account)
@property
def redirect_address(self):
try:
if self.account.action != Account.REDIRECT_ADDR:
return None
return self.account.redirect_address
except AttributeError:
return None
@property
def redirect_url(self):
try:
if self.account.action != Account.REDIRECT_URL:
return None
return self.account.redirect_url
except AttributeError:
return None
@property
def autodiscover_smtp_address(self):
# AutoDiscoverSMTPAddress might not be present in the XML. In this case, use the original email address.
try:
if self.account.action != Account.SETTINGS:
return None
return self.user.autodiscover_smtp_address
except AttributeError:
return None
@property
def ews_url(self):
"""Return the EWS URL contained in the response.
A response may contain a number of possible protocol types. EXPR is meant for EWS. See
https://techcommunity.microsoft.com/t5/blogs/blogarticleprintpage/blog-id/Exchange/article-id/16
We allow fallback to EXCH if EXPR is not available, to support installations where EXPR is not available.
Additionally, some responses may contain and EXPR with no EWS URL. In that case, return the URL from EXCH, if
available.
"""
protocols = {p.type: p for p in self.account.protocols if p.ews_url}
if Protocol.EXPR in protocols:
return protocols[Protocol.EXPR].ews_url
if Protocol.EXCH in protocols:
return protocols[Protocol.EXCH].ews_url
raise ValueError(
f'No EWS URL found in any of the available protocols: {[str(p) for p in self.account.protocols]}'
)
class ErrorResponse(EWSElement):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/response-pox
Like 'Response', but with a different namespace.
"""
ELEMENT_NAME = 'Response'
NAMESPACE = AUTODISCOVER_BASE_NS
error = EWSElementField(value_cls=Error)
class Autodiscover(EWSElement):
ELEMENT_NAME = 'Autodiscover'
NAMESPACE = AUTODISCOVER_BASE_NS
response = EWSElementField(value_cls=Response)
error_response = EWSElementField(value_cls=ErrorResponse)
@staticmethod
def _clear(elem):
# Parent implementation also clears the parent, but this element doesn't have one.
elem.clear()
@classmethod
def from_bytes(cls, bytes_content):
"""Create an instance from response bytes. An Autodiscover request and response example is available at:
https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/pox-autodiscover-response-for-exchange
:param bytes_content:
:return:
"""
if not is_xml(bytes_content) and not is_xml(bytes_content, expected_prefix=b'<Autodiscover '):
raise ValueError(f'Response is not XML: {bytes_content}')
try:
root = to_xml(bytes_content).getroot()
except ParseError:
raise ValueError(f'Error parsing XML: {bytes_content}')
if root.tag != cls.response_tag():
raise ValueError(f'Unknown root element in XML: {bytes_content}')
return cls.from_xml(elem=root, account=None)
def raise_errors(self):
# Find an error message in the response and raise the relevant exception
try:
errorcode = self.error_response.error.code
message = self.error_response.error.message
if message in ('The e-mail address cannot be found.', "The email address can't be found."):
raise ErrorNonExistentMailbox('The SMTP address has no mailbox associated with it')
raise AutoDiscoverFailed(f'Unknown error {errorcode}: {message}')
except AttributeError:
raise AutoDiscoverFailed(f'Unknown autodiscover error response: {self}')
@staticmethod
def payload(email):
# Builds a full Autodiscover XML request
payload = create_element('Autodiscover', attrs=dict(xmlns=AUTODISCOVER_REQUEST_NS))
request = create_element('Request')
add_xml_child(request, 'EMailAddress', email)
add_xml_child(request, 'AcceptableResponseSchema', RNS)
payload.append(request)
return xml_to_str(payload, encoding=DEFAULT_ENCODING, xml_declaration=True)
| 44.519878
| 127
| 0.711774
|
0e1b7225929c62869ea8df8d740fdfe69c5f6f42
| 13,488
|
py
|
Python
|
tests/hdx/freshness/test_freshness_CKAN.py
|
OCHA-DAP/hdx-data-freshness
|
c5957f16cf3907749f68f05e0dd3db791c1ae348
|
[
"MIT"
] | 5
|
2017-06-01T06:07:24.000Z
|
2020-02-09T14:55:29.000Z
|
tests/hdx/freshness/test_freshness_CKAN.py
|
OCHA-DAP/hdx-data-freshness
|
c5957f16cf3907749f68f05e0dd3db791c1ae348
|
[
"MIT"
] | 5
|
2017-02-28T12:19:41.000Z
|
2022-02-03T22:20:23.000Z
|
tests/hdx/freshness/test_freshness_CKAN.py
|
OCHA-DAP/hdx-data-freshness
|
c5957f16cf3907749f68f05e0dd3db791c1ae348
|
[
"MIT"
] | 1
|
2018-04-02T07:48:39.000Z
|
2018-04-02T07:48:39.000Z
|
"""
Unit tests for the freshness class.
"""
import json
import logging
import os
import random
from datetime import datetime, timedelta
from os.path import join
import gspread
import pytest
from gspread.urls import DRIVE_FILES_API_V3_URL
from hdx.data.dataset import Dataset
from hdx.database import Database
from hdx.hdx_configuration import Configuration
from hdx.freshness.database.dbdataset import DBDataset
from hdx.freshness.datafreshness import DataFreshness
logger = logging.getLogger(__name__)
class TestFreshnessCKAN:
@pytest.fixture(scope="class")
def configuration(self):
project_config_yaml = join(
"src", "hdx", "freshness", "project_configuration.yml"
)
hdx_key = os.getenv("HDX_KEY")
Configuration._create(
hdx_site="stage",
user_agent="test",
hdx_key=hdx_key,
project_config_yaml=project_config_yaml,
)
@pytest.fixture(scope="function")
def datasetmetadata(self):
return join("tests", "fixtures", "CKAN", "hdx_dataset_static.yml")
@pytest.fixture(scope="function")
def nodatabase(self):
dbpath = join("tests", "test_freshness.db")
try:
os.remove(dbpath)
except FileNotFoundError:
pass
return {"driver": "sqlite", "database": dbpath}
@pytest.fixture(scope="class")
def params(self):
return {
"corpora": "teamDrive",
"teamDriveId": "0AKCBfHI3H-hcUk9PVA",
"supportsAllDrives": True,
"includeItemsFromAllDrives": True,
}
@pytest.fixture(scope="function")
def gclient(self):
gsheet_auth = os.getenv("GSHEET_AUTH")
if not gsheet_auth:
raise ValueError("No gsheet authorisation supplied!")
info = json.loads(gsheet_auth)
scopes = [
"https://www.googleapis.com/auth/drive",
"https://www.googleapis.com/auth/spreadsheets",
]
gclient = gspread.service_account_from_dict(info, scopes=scopes)
return gclient
@pytest.fixture(scope="function")
def setup_teardown_folder(self, gclient, params):
payload = {
"name": "freshness_test_tmp",
"mimeType": "application/vnd.google-apps.folder",
"parents": ["1M8_Hv3myw9RpLq86kBL7QkMAYxcHjvb6"],
}
r = gclient.request("post", DRIVE_FILES_API_V3_URL, json=payload, params=params)
folderid = r.json()["id"]
yield gclient, folderid
payload = {"trashed": True}
url = f"{DRIVE_FILES_API_V3_URL}/{folderid}"
gclient.request("patch", url, json=payload, params=params)
def test_generate_dataset(
self, configuration, datasetmetadata, nodatabase, setup_teardown_folder, params
):
today = datetime.now()
gclient, folderid = setup_teardown_folder
def create_gsheet(name):
payload = {
"name": name,
"mimeType": "application/vnd.google-apps.spreadsheet",
"parents": [folderid],
}
r = gclient.request(
"post", DRIVE_FILES_API_V3_URL, json=payload, params=params
)
spreadsheetid = r.json()["id"]
gsheet = gclient.open_by_key(spreadsheetid)
gsheet.share("", role="reader", perm_type="anyone")
return gsheet.sheet1, f"{gsheet.url}/export?format=csv"
wks, unchanging_url = create_gsheet("unchanging")
# update the sheet with array
wks.update("A1", [[random.random() for i in range(4)] for j in range(3)])
changing_wks1, changing_url1 = create_gsheet("changing1")
# update the sheet with array
changing_wks1.update(
"A1", [[random.random() for i in range(5)] for j in range(2)]
)
changing_wks2, changing_url2 = create_gsheet("changing2")
# update the sheet with array
changing_wks2.update(
"A1", [[random.random() for i in range(3)] for j in range(6)]
)
datasets = list()
last_modifieds = list()
fresh_dt = datetime.utcnow() - timedelta(days=1)
due_dt = fresh_dt - timedelta(days=8)
days7 = timedelta(days=7)
overdue_dt = due_dt - days7
delinquent_dt = overdue_dt - days7
fresh = fresh_dt.isoformat()
due = due_dt.isoformat()
overdue = overdue_dt.isoformat()
delinquent = delinquent_dt.isoformat()
for i in range(8):
dataset = Dataset(
{"name": f"freshness_test_{i}", "title": f"freshness test {i}"}
)
dataset.update_from_yaml(datasetmetadata)
dataset.set_maintainer("196196be-6037-4488-8b71-d786adf4c081")
dataset.set_organization("5a63012e-6c41-420c-8c33-e84b277fdc90")
dataset.set_date_of_dataset(today)
if i == 6:
dataset.set_expected_update_frequency("Never")
else:
dataset.set_expected_update_frequency("Every week")
dataset.set_subnational(True)
dataset.add_country_location("AFG")
tags = ["protests"]
dataset.add_tags(tags)
resource = {
"name": f"test_resource_{i}",
"description": f"Test Resource {i}",
"format": "csv",
"url": unchanging_url,
}
switcher = {
0: (unchanging_url, fresh),
1: (changing_url1, overdue),
2: (unchanging_url, delinquent),
3: (unchanging_url, due),
4: (changing_url2, fresh),
5: (unchanging_url, overdue),
6: (unchanging_url, delinquent),
7: (changing_url1, fresh),
}
resource["url"], resource["last_modified"] = switcher.get(i)
dataset.add_update_resource(resource)
# add resources
dataset.create_in_hdx(updated_by_script="freshness_ignore")
datasets.append(dataset)
last_modifieds.append({"start": dataset["last_modified"]})
updated_by_script_dt = None
try:
with Database(**nodatabase) as session:
# first run
freshness = DataFreshness(
session=session, datasets=datasets, do_touch=True
)
freshness.spread_datasets()
freshness.add_new_run()
hash_ids = [
datasets[3].get_resource()["id"],
datasets[4].get_resource()["id"],
datasets[7].get_resource()["id"],
]
datasets_to_check, resources_to_check = freshness.process_datasets(
hash_ids=hash_ids
)
results, hash_results = freshness.check_urls(resources_to_check, "test")
datasets_lastmodified = freshness.process_results(results, hash_results)
freshness.update_dataset_latest_of_modifieds(
datasets_to_check, datasets_lastmodified
)
freshness.now.isoformat()
output1 = freshness.output_counts()
# change something
changing_wks1.update(
"A1", [[random.random() for i in range(5)] for j in range(2)]
)
changing_wks2.update(
"A1", [[random.random() for i in range(3)] for j in range(6)]
)
# second run
for i, dataset in enumerate(datasets):
dataset = Dataset.read_from_hdx(dataset["id"])
last_modifieds[i]["run1"] = dataset["last_modified"]
if i == 5:
dataset["review_date"] = due
if i == 7:
updated_by_script_dt = datetime.utcnow()
updated_by_script = updated_by_script_dt.isoformat()
dataset[
"updated_by_script"
] = f"freshness ({updated_by_script})"
datasets[i] = dataset
freshness = DataFreshness(
session=session, datasets=datasets, do_touch=True
)
freshness.spread_datasets()
freshness.add_new_run()
datasets_to_check, resources_to_check = freshness.process_datasets(
hash_ids=hash_ids
)
results, hash_results = freshness.check_urls(resources_to_check, "test")
datasets_lastmodified = freshness.process_results(results, hash_results)
freshness.update_dataset_latest_of_modifieds(
datasets_to_check, datasets_lastmodified
)
run2_last_modified_dt = freshness.now
run2_last_modified = run2_last_modified_dt.isoformat()
output2 = freshness.output_counts()
finally:
# tear down
for i, dataset in enumerate(datasets):
dataset = Dataset.read_from_hdx(dataset["id"])
if dataset:
last_modifieds[i]["run2"] = dataset["last_modified"]
dataset.delete_from_hdx()
assert (
output1
== """
*** Resources ***
* total: 8 *,
first hash: 6,
firstrun: 2
*** Datasets ***
* total: 8 *,
0: Fresh, Updated firstrun: 4,
1: Due, Updated firstrun: 1,
2: Overdue, Updated firstrun: 2,
3: Delinquent, Updated firstrun: 1
0 datasets have update frequency of Live
1 datasets have update frequency of Never
0 datasets have update frequency of Adhoc"""
)
assert (
output2
== """
*** Resources ***
* total: 8 *,
hash: 2,
nothing: 3,
same hash: 3
*** Datasets ***
* total: 8 *,
0: Fresh, Updated hash: 2,
0: Fresh, Updated nothing: 2,
0: Fresh, Updated script update: 1,
1: Due, Updated nothing: 1,
1: Due, Updated review date: 1,
3: Delinquent, Updated nothing: 1
0 datasets have update frequency of Live
1 datasets have update frequency of Never
0 datasets have update frequency of Adhoc"""
)
assert last_modifieds == [
{"start": fresh, "run1": fresh, "run2": fresh},
{"start": overdue, "run1": overdue, "run2": run2_last_modified},
{"start": delinquent, "run1": delinquent, "run2": delinquent},
{"start": due, "run1": due, "run2": due},
{"start": fresh, "run1": fresh, "run2": fresh},
{"start": overdue, "run1": overdue, "run2": overdue},
{"start": delinquent, "run1": delinquent, "run2": delinquent},
{"start": fresh, "run1": fresh, "run2": fresh},
]
assert updated_by_script_dt is not None
expected = [
{
"last_modified": fresh_dt,
"latest_of_modifieds": fresh_dt,
"what_updated": "nothing",
"last_resource_modified": fresh_dt,
"fresh": 0,
},
{
"last_modified": overdue_dt,
"latest_of_modifieds": run2_last_modified_dt,
"what_updated": "hash",
"fresh": 0,
},
{
"last_modified": delinquent_dt,
"latest_of_modifieds": delinquent_dt,
"what_updated": "nothing",
"last_resource_modified": delinquent_dt,
"fresh": 3,
},
{
"last_modified": due_dt,
"latest_of_modifieds": due_dt,
"what_updated": "nothing",
"last_resource_modified": due_dt,
"fresh": 1,
},
{"last_modified": fresh_dt, "what_updated": "hash", "fresh": 0},
{
"review_date": due_dt,
"last_modified": overdue_dt,
"latest_of_modifieds": due_dt,
"what_updated": "review date",
"last_resource_modified": overdue_dt,
"fresh": 1,
},
{
"update_frequency": -1,
"last_modified": delinquent_dt,
"latest_of_modifieds": delinquent_dt,
"what_updated": "nothing",
"last_resource_modified": delinquent_dt,
"fresh": 0,
},
{
"last_modified": fresh_dt,
"updated_by_script": updated_by_script_dt,
"latest_of_modifieds": updated_by_script_dt,
"what_updated": "script update",
"last_resource_modified": fresh_dt,
"fresh": 0,
},
]
nonmatching = list()
for i, dataset in enumerate(datasets):
dbdataset = (
session.query(DBDataset)
.filter_by(run_number=1, id=dataset["id"])
.one()
.__dict__
)
for key, expect in expected[i].items():
actual = dbdataset[key]
if actual != expect:
nonmatching.append(
f"Key {key} of dataset number {i} does not match! {actual} != {expect}"
)
assert nonmatching == list()
| 36.953425
| 95
| 0.544336
|
f2f7b345bbf4e408924db3b97c965d5c9505157f
| 20,569
|
py
|
Python
|
flask_multipass_cern.py
|
javfg/flask-multipass-cern
|
7456e1fe49248cb143d03b868f6244944daabd40
|
[
"MIT"
] | null | null | null |
flask_multipass_cern.py
|
javfg/flask-multipass-cern
|
7456e1fe49248cb143d03b868f6244944daabd40
|
[
"MIT"
] | null | null | null |
flask_multipass_cern.py
|
javfg/flask-multipass-cern
|
7456e1fe49248cb143d03b868f6244944daabd40
|
[
"MIT"
] | null | null | null |
# This file is part of Flask-Multipass-CERN.
# Copyright (C) 2020 - 2021 CERN
#
# Flask-Multipass-CERN is free software; you can redistribute
# it and/or modify it under the terms of the MIT License; see
# the LICENSE file for more details.
import logging
from datetime import datetime
from functools import wraps
from importlib import import_module
from inspect import getcallargs
from authlib.integrations.requests_client import OAuth2Session
from flask import current_app, g, has_request_context
from flask_multipass import IdentityRetrievalFailed
from flask_multipass.data import IdentityInfo
from flask_multipass.exceptions import MultipassException
from flask_multipass.group import Group
from flask_multipass.identity import IdentityProvider
from flask_multipass.providers.authlib import AuthlibAuthProvider, _authlib_oauth
from requests.adapters import HTTPAdapter
from requests.exceptions import RequestException
from urllib3 import Retry
CACHE_LONG_TTL = 86400 * 2
CACHE_TTL = 1800
CERN_OIDC_WELLKNOWN_URL = 'https://auth.cern.ch/auth/realms/cern/.well-known/openid-configuration'
HTTP_RETRY_COUNT = 5
retry_config = HTTPAdapter(max_retries=Retry(total=HTTP_RETRY_COUNT,
status_forcelist=[503, 504],
allowed_methods=frozenset(['GET']),
raise_on_status=False))
_cache_miss = object()
class ExtendedCache:
def __init__(self, cache):
self.cache = self._init_cache(cache)
def _init_cache(self, cache):
if cache is None:
return None
elif callable(cache):
return cache()
elif isinstance(cache, str):
module_path, class_name = cache.rsplit('.', 1)
module = import_module(module_path)
return getattr(module, class_name)
else:
return cache
def get(self, key, default=None):
if self.cache is None:
return default
return self.cache.get(key, default)
def set(self, key, value, timeout=0, refresh_timeout=None):
if self.cache is None:
return
self.cache.set(key, value, timeout)
if refresh_timeout:
self.cache.set(f'{key}:timestamp', datetime.now(), refresh_timeout)
def should_refresh(self, key):
if self.cache is None:
return True
return self.cache.get(f'{key}:timestamp') is None
def memoize_request(f):
@wraps(f)
def memoizer(*args, **kwargs):
if not has_request_context() or current_app.config['TESTING'] or current_app.config.get('REPL'):
# No memoization outside request context
return f(*args, **kwargs)
try:
cache = g._cern_multipass_memoize
except AttributeError:
g._cern_multipass_memoize = cache = {}
key = (f.__module__, f.__name__, make_hashable(getcallargs(f, *args, **kwargs)))
if key not in cache:
cache[key] = f(*args, **kwargs)
return cache[key]
return memoizer
def make_hashable(obj):
if isinstance(obj, (list, set)):
return tuple(obj)
elif isinstance(obj, dict):
return frozenset((k, make_hashable(v)) for k, v in obj.items())
return obj
def normalize_cern_person_id(value):
"""Normalize the CERN person ID.
We always want a string or None if it's missing.
"""
if value is None:
return None
elif isinstance(value, int):
return str(value)
elif not value:
return None
else:
return value
class CERNAuthProvider(AuthlibAuthProvider):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.include_token = True
@property
def authlib_settings(self):
settings = dict(self.settings['authlib_args'])
settings.setdefault('server_metadata_url', CERN_OIDC_WELLKNOWN_URL)
# XXX should we request any other scopes?
settings.setdefault('client_kwargs', {'scope': 'openid'})
return settings
class CERNGroup(Group):
supports_member_list = True
def get_members(self):
assert '/' not in self.name
with self.provider._get_api_session() as api_session:
group_data = self.provider._get_group_data(self.name)
if group_data is None:
return
gid = group_data['id']
params = {
'limit': 5000,
'field': [
'upn',
'firstName',
'lastName',
'instituteName',
'telephone1',
'primaryAccountEmail',
'cernPersonId',
],
'recursive': 'true'
}
results = self.provider._fetch_all(api_session, f'/api/v1.0/Group/{gid}/memberidentities', params)[0]
for res in results:
del res['id'] # id is always included
self.provider._fix_phone(res)
identifier = res.pop('upn')
extra_data = self.provider._extract_extra_data(res)
yield IdentityInfo(self.provider, identifier, extra_data, **res)
def has_member(self, identifier):
cache = self.provider.cache
logger = self.provider.logger
cache_key = f'flask-multipass-cern:{self.provider.name}:groups:{identifier}'
all_groups = cache.get(cache_key)
if all_groups is None or cache.should_refresh(cache_key):
try:
all_groups = {g.name.lower() for g in self.provider.get_identity_groups(identifier)}
cache.set(cache_key, all_groups, CACHE_LONG_TTL, CACHE_TTL)
except RequestException:
logger.warning('Refreshing user groups failed for %s', identifier)
if all_groups is None:
logger.error('Getting user groups failed for %s, access will be denied', identifier)
return False
if self.provider.settings['cern_users_group'] and self.name.lower() == 'cern users':
return self.provider.settings['cern_users_group'].lower() in all_groups
return self.name.lower() in all_groups
class CERNIdentityProvider(IdentityProvider):
supports_refresh = True
supports_get = False
supports_search = True
supports_search_ex = True
supports_groups = True
supports_get_identity_groups = True
group_class = CERNGroup
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.authlib_client = _authlib_oauth.register(self.name + '-idp', **self.authlib_settings)
self.settings.setdefault('cache', None)
self.settings.setdefault('extra_search_filters', [])
self.settings.setdefault('authz_api', 'https://authorization-service-api.web.cern.ch')
self.settings.setdefault('phone_prefix', '+412276')
self.settings.setdefault('cern_users_group', None)
self.settings.setdefault('logger_name', 'multipass.cern')
self.logger = logging.getLogger(self.settings['logger_name'])
self.cache = ExtendedCache(self.settings['cache'])
if not self.settings.get('mapping'):
# usually mapping is empty, in that case we set some defaults
self.settings['mapping'] = {
'first_name': 'firstName',
'last_name': 'lastName',
'affiliation': 'instituteName',
'phone': 'telephone1',
'email': 'primaryAccountEmail',
}
@property
def authlib_settings(self):
settings = dict(self.settings['authlib_args'])
settings.setdefault('server_metadata_url', CERN_OIDC_WELLKNOWN_URL)
return settings
@property
def authz_api_base(self):
return self.settings['authz_api'].rstrip('/')
def refresh_identity(self, identifier, multipass_data):
data = self._get_identity_data(identifier)
self._fix_phone(data)
identifier = data.pop('upn')
extra_data = self._extract_extra_data(data)
return IdentityInfo(self, identifier, extra_data, **data)
def _fix_phone(self, data):
phone = data.get('telephone1')
if not phone or phone.startswith('+'):
return
data['telephone1'] = self.settings['phone_prefix'] + phone
def _extract_extra_data(self, data, default=None):
return {'cern_person_id': normalize_cern_person_id(data.pop('cernPersonId', default))}
def get_identity_from_auth(self, auth_info):
upn = auth_info.data.get('sub')
groups = auth_info.data.get('groups')
cache_key_prefix = f'flask-multipass-cern:{self.name}'
if groups is not None:
groups = {x.lower() for x in groups}
cache_key = f'{cache_key_prefix}:groups:{upn}'
self.cache.set(cache_key, groups, CACHE_LONG_TTL, CACHE_TTL)
try:
data = self._fetch_identity_data(auth_info)
# check for data mismatches between our id token and authz
self._compare_data(auth_info.data, data)
phone = data.get('telephone1')
affiliation = data.get('instituteName')
self.cache.set(f'{cache_key_prefix}:phone:{upn}', phone, CACHE_LONG_TTL)
self.cache.set(f'{cache_key_prefix}:affiliation:{upn}', affiliation, CACHE_LONG_TTL)
except RequestException:
self.logger.warning('Getting identity data for %s failed', upn)
phone = self.cache.get(f'{cache_key_prefix}:phone:{upn}', _cache_miss)
affiliation = self.cache.get(f'{cache_key_prefix}:affiliation:{upn}', _cache_miss)
if phone is _cache_miss or affiliation is _cache_miss:
self.logger.error('Getting identity data for %s failed without cache fallback', upn)
raise IdentityRetrievalFailed('Retrieving identity information from CERN SSO failed', provider=self)
data = {
'firstName': auth_info.data['given_name'],
'lastName': auth_info.data['family_name'],
'displayName': auth_info.data['name'],
'telephone1': phone,
'instituteName': affiliation,
'primaryAccountEmail': auth_info.data['email'],
}
self._fix_phone(data)
data.pop('upn', None)
extra_data = self._extract_extra_data(data, normalize_cern_person_id(auth_info.data.get('cern_person_id')))
return IdentityInfo(self, upn, extra_data, **data)
def search_identities(self, criteria, exact=False):
return iter(self.search_identities_ex(criteria, exact=exact)[0])
@memoize_request
def search_identities_ex(self, criteria, exact=False, limit=None):
emails_key = '-'.join(sorted(x.lower() for x in criteria['primaryAccountEmail']))
cache_key = f'flask-multipass-cern:{self.name}:email-identities:{emails_key}'
use_cache = exact and limit is None and len(criteria) == 1 and 'primaryAccountEmail' in criteria
if use_cache:
cached_data = self.cache.get(cache_key)
if cached_data:
cached_results = []
for res in cached_data[0]:
identifier = res.pop('upn')
extra_data = self._extract_extra_data(res)
cached_results.append(IdentityInfo(self, identifier, extra_data, **res))
if not self.cache.should_refresh(cache_key):
return cached_results, cached_data[1]
if any(len(x) != 1 for x in criteria.values()):
# Unfortunately the API does not support OR filters (yet?).
# Fortunately we never search for more than one value anyway, except for emails when
# looking up identities based on the user's email address.
if len(criteria) != 1:
raise MultipassException('This provider does not support multiple values for a search criterion',
provider=self)
field, values = dict(criteria).popitem()
seen = set()
total = 0
all_identities = []
for value in values:
identities = self.search_identities_ex({field: [value]}, exact=exact, limit=limit)[0]
for identity in identities:
if identity.identifier not in seen:
seen.add(identity.identifier)
all_identities.append(identity)
total += 1
return all_identities, total
criteria = {k: next(iter(v)) for k, v in criteria.items()}
op = 'eq' if exact else 'contains'
api_criteria = [f'{k}:{op}:{v}' for k, v in criteria.items()]
api_criteria.append('type:eq:Person')
api_criteria += self.settings['extra_search_filters']
params = {
'limit': limit or 5000,
'filter': api_criteria,
'field': [
'upn',
'firstName',
'lastName',
'displayName',
'instituteName',
'telephone1',
'primaryAccountEmail',
'cernPersonId',
],
}
with self._get_api_session() as api_session:
results = []
total = 0
try:
results, total = self._fetch_all(api_session, '/api/v1.0/Identity', params, limit=limit)
except RequestException:
self.logger.warning('Refreshing identities failed for criteria %s', criteria)
if use_cache and cached_data:
return cached_results, cached_data[1]
else:
self.logger.error('Getting identities failed for criteria %s', criteria)
raise
identities = []
cache_data = []
for res in results:
if not res['upn']:
total -= 1
continue
del res['id']
self._fix_phone(res)
res_copy = dict(res)
identifier = res_copy.pop('upn')
extra_data = self._extract_extra_data(res_copy)
identities.append(IdentityInfo(self, identifier, extra_data, **res_copy))
if use_cache:
cache_data.append(res)
if use_cache:
self.cache.set(cache_key, (cache_data, total), CACHE_LONG_TTL, CACHE_TTL * 2)
return identities, total
def get_identity_groups(self, identifier):
with self._get_api_session() as api_session:
resp = api_session.get(f'{self.authz_api_base}/api/v1.0/IdentityMembership/{identifier}/precomputed')
if resp.status_code == 404 or resp.status_code == 500:
return set()
resp.raise_for_status()
results = resp.json()['data']
return {self.group_class(self, res['groupIdentifier']) for res in results}
def get_group(self, name):
return self.group_class(self, name)
def search_groups(self, name, exact=False):
op = 'eq' if exact else 'contains'
params = {
'limit': 5000,
'filter': [f'groupIdentifier:{op}:{name}'],
'field': ['groupIdentifier'],
}
with self._get_api_session() as api_session:
results = self._fetch_all(api_session, '/api/v1.0/Group', params)[0]
rv = {self.group_class(self, res['groupIdentifier']) for res in results}
if (
self.settings['cern_users_group'] and
(name.lower() == 'cern users' or (not exact and name.lower() in 'cern users'))
):
rv.add(self.group_class(self, 'CERN Users'))
return rv
@memoize_request
def _get_api_session(self):
cache_key = f'flask-multipass-cern:{self.name}:api-token'
token = self.cache.get(cache_key)
if token:
oauth_session = OAuth2Session(token=token)
oauth_session.mount(self.authz_api_base, retry_config)
return oauth_session
meta = self.authlib_client.load_server_metadata()
token_endpoint = meta['token_endpoint'].replace('protocol/openid-connect', 'api-access')
oauth_session = OAuth2Session(
self.authlib_client.client_id,
self.authlib_client.client_secret,
token_endpoint=token_endpoint,
grant_type='client_credentials',
)
oauth_session.mount(self.authz_api_base, retry_config)
oauth_session.fetch_access_token(
audience='authorization-service-api',
headers={'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'},
)
self.cache.set(cache_key, oauth_session.token, oauth_session.token['expires_in'] - 30)
return oauth_session
def _fetch_identity_data(self, auth_info):
# Exchange the user token to one for the authorization API
user_api_token = self.authlib_client.fetch_access_token(
grant_type='urn:ietf:params:oauth:grant-type:token-exchange',
subject_token_type='urn:ietf:params:oauth:token-type:access_token',
audience='authorization-service-api',
subject_token=auth_info.data['token']['access_token'],
)
params = {
'field': [
'upn',
'firstName',
'lastName',
'instituteName',
'telephone1',
'primaryAccountEmail',
'cernPersonId',
],
}
resp = self.authlib_client.get(f'{self.authz_api_base}/api/v1.0/Identity/current', token=user_api_token,
params=params)
resp.raise_for_status()
data = resp.json()['data']
del data['id'] # id is always included
return data
def _fetch_all(self, api_session, endpoint, params, limit=None):
results = []
resp = api_session.get(self.authz_api_base + endpoint, params=params)
resp.raise_for_status()
data = resp.json()
total = data['pagination']['total']
while True:
results += data['data']
if not data['pagination']['next'] or (limit is not None and len(results) >= limit):
break
resp = api_session.get(self.authz_api_base + data['pagination']['next'])
resp.raise_for_status()
data = resp.json()
if limit is not None:
# in case we got too many results due to a large last page
results = results[:limit]
return results, total
@memoize_request
def _get_group_data(self, name):
params = {
'filter': [f'groupIdentifier:eq:{name}'],
'field': ['id', 'groupIdentifier'],
}
with self._get_api_session() as api_session:
resp = api_session.get(f'{self.authz_api_base}/api/v1.0/Group', params=params)
resp.raise_for_status()
data = resp.json()
if len(data['data']) != 1:
return None
return data['data'][0]
def _get_identity_data(self, identifier):
params = {
'field': [
'upn',
'firstName',
'lastName',
'displayName',
'instituteName',
'telephone1',
'primaryAccountEmail',
'cernPersonId',
]
}
with self._get_api_session() as api_session:
resp = api_session.get(f'{self.authz_api_base}/api/v1.0/Identity/{identifier}', params=params)
resp.raise_for_status()
data = resp.json()
return data['data']
def _compare_data(self, token_data, api_data):
fields_to_compare = [
('sub', 'upn'),
('given_name', 'firstName'),
('family_name', 'lastName'),
('email', 'primaryAccountEmail'),
('cern_person_id', 'cernPersonId'),
]
for token_field, api_field in fields_to_compare:
token_value = str(token_data.get(token_field, ''))
api_value = str(api_data.get(api_field, ''))
if token_value != api_value:
self.logger.warning('Field %s mismatch for %s: %s in id_token, %s in authz api',
token_field, token_data['sub'], token_value, api_value)
| 39.179048
| 116
| 0.597452
|
2f9763c327f4f396299cd3982123775f0c24a2f4
| 20,602
|
py
|
Python
|
cocoLRPapi-master/PythonAPI/pycocotools/cocoevalLRP.py
|
cancam/LRP
|
d7be1fd1ded2f828490bf1730c2db83d206a8d3a
|
[
"MIT"
] | 64
|
2018-07-05T13:39:55.000Z
|
2021-09-22T08:47:01.000Z
|
cocoLRPapi-master/PythonAPI/pycocotools/cocoevalLRP.py
|
cancam/LRP
|
d7be1fd1ded2f828490bf1730c2db83d206a8d3a
|
[
"MIT"
] | 7
|
2019-07-02T11:05:45.000Z
|
2020-10-17T16:58:38.000Z
|
cocoLRPapi-master/PythonAPI/pycocotools/cocoevalLRP.py
|
cancam/LRP
|
d7be1fd1ded2f828490bf1730c2db83d206a8d3a
|
[
"MIT"
] | 14
|
2018-07-11T06:30:24.000Z
|
2021-03-16T01:44:24.000Z
|
__author__ = 'tsungyi'
import numpy as np
import datetime
import time
from collections import defaultdict
from . import mask as maskUtils
import copy
import pdb
import sys
class COCOevalLRP:
# Interface for evaluating detection on the Microsoft COCO dataset.
#
# The usage for CocoEval is as follows:
# cocoGt=..., cocoDt=... # load dataset and results
# E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object
# E.params.confScores = ...; # set parameters as desired
# E.evaluate(); # run per image evaluation
# E.accumulate(); # accumulate per image results
# E.summarize(); # display summary metrics of results
# For example usage see evalDemo.m and http://mscoco.org/.
#
# The evaluation parameters are as follows (defaults in brackets):
# imgIds - [all] N img ids to use for evaluation
# catIds - [all] K cat ids to use for evaluation
# iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation
# confScores - [0:.01:1] R=101 recall thresholds for evaluation
# areaRng - [...] A=4 object area ranges for evaluation
# maxDets - [1 10 100] M=3 thresholds on max detections per image
# iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints'
# iouType replaced the now DEPRECATED useSegm parameter.
# useCats - [1] if true use category labels for evaluation
# Note: if useCats=0 category labels are ignored as in proposal scoring.
# Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.
#
# evaluate(): evaluates detections on every image and every category and
# concats the results into the "evalImgs" with fields:
# dtIds - [1xD] id for each of the D detections (dt)
# gtIds - [1xG] id for each of the G ground truths (gt)
# dtMatches - [TxD] matching gt id at each IoU or 0
# gtMatches - [TxG] matching dt id at each IoU or 0
# dtScores - [1xD] confidence of each dt
# gtIgnore - [1xG] ignore flag for each gt
# dtIgnore - [TxD] ignore flag for each dt at each IoU
#
# accumulate(): accumulates the per-image, per-category evaluation
# results in "evalImgs" into the dictionary "eval" with fields:
# params - parameters used for evaluation
# date - date evaluation was performed
# counts - [T,R,K,A,M] parameter dimensions (see above)
# precision - [TxRxKxAxM] precision for every evaluation setting
# recall - [TxKxAxM] max recall for every evaluation setting
# Note: precision and recall==-1 for settings with no gt objects.
#
# See also coco, mask, pycocoDemo, pycocoEvalDemo
#
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
# Licensed under the Simplified BSD License [see coco/license.txt]
def __init__(self, cocoGt=None, cocoDt=None, tau=0.5):
'''
Initialize CocoEval using coco APIs for gt and dt
:param cocoGt: coco object with ground truth annotations
:param cocoDt: coco object with detection results
:return: None
'''
self.cocoGt = cocoGt # ground truth COCO API
self.cocoDt = cocoDt # detections COCO API
self.params = {} # evaluation parameters
self.evalImgs = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements
self.eval = {} # accumulated evaluation results
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
self.params = Params(tau) # parameters
self._paramsEval = {} # parameters for evaluation
self.stats = [] # result summarization
self.ious = {} # ious between all gts and dts
if not cocoGt is None:
self.params.imgIds = sorted(cocoGt.getImgIds())
self.params.catIds = sorted(cocoGt.getCatIds())
def _prepare(self):
'''
Prepare ._gts and ._dts for evaluation based on params
:return: None
'''
def _toMask(anns, coco):
# modify ann['segmentation'] by reference
for ann in anns:
rle = coco.annToRLE(ann)
ann['segmentation'] = rle
p = self.params
if p.useCats:
gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
else:
gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))
dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))
# set ignore flag
for gt in gts:
gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0
gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
for gt in gts:
self._gts[gt['image_id'], gt['category_id']].append(gt)
for dt in dts:
self._dts[dt['image_id'], dt['category_id']].append(dt)
self.evalImgs = defaultdict(list) # per-image per-category evaluation results
self.eval = {} # accumulated evaluation results
def evaluate(self):
'''
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
:return: None
'''
tic = time.time()
print('Running per image evaluation...')
p = self.params
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
self.params=p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
computeIoU = self.computeIoU
self.ious = {(imgId, catId): computeIoU(imgId, catId) \
for imgId in p.imgIds
for catId in catIds}
evaluateImg = self.evaluateImg
maxDet = p.maxDets
self.evalImgs = [evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
self._paramsEval = copy.deepcopy(self.params)
toc = time.time()
print('DONE (t={:0.2f}s).'.format(toc-tic))
def computeIoU(self, imgId, catId):
p = self.params
if p.useCats:
gt = self._gts[imgId,catId]
dt = self._dts[imgId,catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]
dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]
if len(gt) == 0 and len(dt) ==0:
return []
inds = np.argsort([-d['score'] for d in dt], kind='mergesort')
dt = [dt[i] for i in inds]
if len(dt) > p.maxDets:
dt=dt[0:p.maxDets]
g = [g['bbox'] for g in gt]
d = [d['bbox'] for d in dt]
# compute iou between each dt and gt region
iscrowd = [int(o['iscrowd']) for o in gt]
ious = maskUtils.iou(d,g,iscrowd)
return ious
def evaluateImg(self, imgId, catId, aRng, maxDet):
'''
perform evaluation for single category and image
:return: dict (single image results)
'''
p = self.params
if p.useCats:
gt = self._gts[imgId,catId]
dt = self._dts[imgId,catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]
dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]
if len(gt) == 0 and len(dt) ==0:
return None
for g in gt:
if g['ignore'] or (g['area']<aRng[0] or g['area']>aRng[1]):
g['_ignore'] = 1
else:
g['_ignore'] = 0
# sort dt highest score first, sort gt ignore last
gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')
gt = [gt[i] for i in gtind]
dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')
dt = [dt[i] for i in dtind[0:maxDet]]
iscrowd = [int(o['iscrowd']) for o in gt]
# load computed ious
ious = self.ious[imgId, catId][:, gtind] if len(self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]
T = 1
G = len(gt)
D = len(dt)
gtm = np.zeros((T,G))
dtm = np.zeros((T,D))
dtIoU = np.zeros((T,D))
gtIg = np.array([g['_ignore'] for g in gt])
dtIg = np.zeros((T,D))
if not len(ious)==0:
for dind, d in enumerate(dt):
# information about best match so far (m=-1 -> unmatched)
iou = min([p.iouThrs,1-1e-10])
m = -1
for gind, g in enumerate(gt):
# if this gt already matched, and not a crowd, continue
if gtm[0,gind]>0 and not iscrowd[gind]:
continue
# if dt matched to reg gt, and on ignore gt, stop
if m>-1 and gtIg[m]==0 and gtIg[gind]==1:
break
# continue to next gt unless better match made
if ious[dind,gind] < iou:
continue
# if match successful and best so far, store appropriately
iou=ious[dind,gind]
m=gind
# if match made store id of match for both dt and gt
if m ==-1:
continue
dtIg[0,dind] = gtIg[m]
dtIoU[0,dind]=iou
dtm[0,dind] = gt[m]['id']
gtm[0,m] = d['id']
# set unmatched detections outside of area range to ignore
a = np.array([d['area']<aRng[0] or d['area']>aRng[1] for d in dt]).reshape((1, len(dt)))
dtIg = np.logical_or(dtIg, np.logical_and(dtm==0, np.repeat(a,T,0)))
# store results for given image and category
return {
'image_id': imgId,
'category_id': catId,
'aRng': aRng,
'maxDet': maxDet,
'dtIds': [d['id'] for d in dt],
'gtIds': [g['id'] for g in gt],
'dtMatches': dtm,
'gtMatches': gtm,
'dtScores': [d['score'] for d in dt],
'gtIgnore': gtIg,
'dtIgnore': dtIg,
'dtIoUs' : dtIoU
}
def accumulate(self, p = None):
'''
Accumulate per image evaluation results and store the result in self.eval
:param p: input params for evaluation
:return: None
'''
print('Accumulating evaluation results...')
tic = time.time()
if not self.evalImgs:
print('Please run evaluate() first')
# allows input customized parameters
if p is None:
p = self.params
p.catIds = p.catIds if p.useCats == 1 else [-1]
T = 1
S = len(p.confScores)
K = len(p.catIds) if p.useCats else 1
omega=np.zeros((S,K))
nhat=np.zeros((S,K))
mhat=np.zeros((S,K))
LRPError=-np.ones((S,K))
LocError=-np.ones((S,K))
FPError=-np.ones((S,K))
FNError=-np.ones((S,K))
OptLRPError=-np.ones((1,K))
OptLocError=-np.ones((1,K))
OptFPError=-np.ones((1,K))
OptFNError=-np.ones((1,K))
Threshold=-np.ones((1,K))
index=np.zeros((1,K))
# create dictionary for future indexing
_pe = self._paramsEval
catIds = _pe.catIds if _pe.useCats else [-1]
setK = set(catIds)
setI = set(_pe.imgIds)
# get inds to evaluate
k_list = [n for n, k in enumerate(p.catIds) if k in setK]
i_list = [n for n, i in enumerate(p.imgIds) if i in setI]
I0 = len(_pe.imgIds)
# retrieve E at each category, area range, and max number of detections
for k, k0 in enumerate(k_list):
Nk = k0*I0
E = [self.evalImgs[Nk + i] for i in i_list]
E = [e for e in E if not e is None]
if len(E) == 0:
continue
dtScores = np.concatenate([e['dtScores'][0:p.maxDets] for e in E])
# different sorting method generates slightly different results.
# mergesort is used to be consistent as Matlab implementation.
inds = np.argsort(-dtScores, kind='mergesort')
dtScoresSorted = dtScores[inds]
dtm = np.concatenate([e['dtMatches'][:,0:p.maxDets] for e in E], axis=1)[:,inds]
dtIg = np.concatenate([e['dtIgnore'][:,0:p.maxDets] for e in E], axis=1)[:,inds]
IoUoverlap = np.squeeze(np.concatenate([e['dtIoUs'][:,0:p.maxDets] for e in E], axis=1)[:,inds], axis=0)
for i in range(len(IoUoverlap)):
if IoUoverlap[i]!=0:
IoUoverlap[i]=1-IoUoverlap[i]
gtIg = np.concatenate([e['gtIgnore'] for e in E])
npig = np.count_nonzero(gtIg==0 )
if npig == 0:
continue
tps = np.squeeze(np.logical_and( dtm, np.logical_not(dtIg) )*1)
fps = np.squeeze(np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )*1)
IoUoverlap=np.multiply(IoUoverlap,tps)
np.set_printoptions(threshold=sys.maxsize)
for s, s0 in enumerate(_pe.confScores):
thrind=np.sum(dtScoresSorted>=s0)
omega[s,k]=np.sum(tps[0:thrind])
nhat[s,k]=np.sum(fps[0:thrind])
mhat[s,k]=npig-omega[s,k]
l=np.maximum((omega[s,k]+nhat[s,k]),npig);
FPError[s,k]=(1-_pe.iouThrs)*(nhat[s,k]/l)
FNError[s,k]=(1-_pe.iouThrs)*(mhat[s,k]/l)
Z=((omega[s,k]+mhat[s,k]+nhat[s,k])/l);
LRPError[s,k]=(np.sum(IoUoverlap[:thrind])/l)+FPError[s,k]+FNError[s,k];
LRPError[s,k]=LRPError[s,k]/Z;
LRPError[s,k]=LRPError[s,k]/(1-_pe.iouThrs);
LocError[s,k]=np.sum(IoUoverlap[:thrind])/omega[s,k];
FPError[s,k]=nhat[s,k]/(omega[s,k]+nhat[s,k]);
FNError[s,k]=mhat[s,k]/npig
OptLRPError[0,k]=min(LRPError[:,k])
ind=np.argmin(LRPError[:,k])
OptLocError[0,k]=LocError[ind,k]
OptFPError[0,k]=FPError[ind,k]
OptFNError[0,k]=FNError[ind,k]
Threshold[0,k]=ind*0.01
no_gt = (OptLRPError == -1)
OptLRPError[no_gt] = np.nan
OptLocError[no_gt] = np.nan
OptFPError[no_gt] = np.nan
OptFNError[no_gt] = np.nan
Threshold[no_gt] = np.nan
moLRPLoc=np.nanmean(OptLocError)
moLRPFP=np.nanmean(OptFPError)
moLRPFN=np.nanmean(OptFNError)
moLRP=np.nanmean(OptLRPError)
self.eval = {
'params': p,
'counts': [S, K],
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'LRPError': LRPError,
'BoxLocComp': LocError,
'FPComp': FPError,
'FNComp': FNError,
'oLRPError': OptLRPError,
'oBoxLocComp': OptLocError,
'oFPComp': OptFPError,
'oFNComp': OptFNError,
'moLRP': moLRP,
'moLRPLoc': moLRPLoc,
'moLRPFP': moLRPFP,
'moLRPFN': moLRPFN,
'OptThresholds':Threshold
}
toc = time.time()
print('DONE (t={:0.2f}s).'.format( toc-tic))
def summarize(self, detailed=0):
'''
Compute and display summary metrics for evaluation results.
Note this functin can *only* be applied on the default parameter setting
'''
if detailed==1:
print('LRP, oLRP, moLRP and Class Specific Optimal Thresholds are as follows: \n ')
print('------------------------------------------------------ \n ')
print('------------------------------------------------------ \n ')
print('1.LRP and Components:\n')
print('LRP= \n'+str(self.eval['LRPError'])+'\n')
print('LRPLocalization=\n'+str(self.eval['BoxLocComp'])+'\n')
print('LRPFalsePositive=\n'+str(self.eval['FPComp'])+'\n')
print('LRPFalseNegative=\n'+str(self.eval['FNComp'])+'\n')
print('------------------------------------------------------ \n ')
print('------------------------------------------------------ \n ')
print('2.Optimal LRP and Components:')
print('------------------------------------------------------ \n ')
print('oLRP='+str(self.eval['oLRPError'])+'\n')
print('oLRPLocalization=\n'+str(self.eval['oBoxLocComp'])+'\n')
print('oLRPFalsePositive=\n'+str(self.eval['oFPComp'])+'\n')
print('oLRPFalseNegative=\n'+str(self.eval['oFNComp'])+'\n')
print('------------------------------------------------------ \n')
print('------------------------------------------------------ \n ')
print('3.Mean Optimal LRP and Components:')
print('------------------------------------------------------ \n ')
print('moLRP={:0.4f}, moLRP_LocComp={:0.4f}, moLRP_FPComp={:0.4f}, moLRP_FNComp={:0.4f} \n'.format(self.eval['moLRP'], self.eval['moLRPLoc'],self.eval['moLRPFP'],self.eval['moLRPFN']))
print('------------------------------------------------------ \n ')
print('------------------------------------------------------ \n ')
print('4.Optimal Class Specific Thresholds:\n')
print(self.eval['OptThresholds'])
print('------------------------------------------------------ \n ')
print('------------------------------------------------------ \n ')
else:
print('oLRP, moLRP and Class Specific Optimal Thresholds are as follows: \n ')
print('------------------------------------------------------ \n ')
print('------------------------------------------------------ \n ')
print('1.Optimal LRP and Components:')
print('------------------------------------------------------ \n ')
print('oLRP='+str(self.eval['oLRPError'])+'\n')
print('oLRPLocalization=\n'+str(self.eval['oBoxLocComp'])+'\n')
print('oLRPFalsePositive=\n'+str(self.eval['oFPComp'])+'\n')
print('oLRPFalseNegative=\n'+str(self.eval['oFNComp'])+'\n')
print('------------------------------------------------------ \n')
print('------------------------------------------------------ \n ')
print('2.Mean Optimal LRP and Components:')
print('------------------------------------------------------ \n ')
print('moLRP={:0.4f}, moLRP_LocComp={:0.4f}, moLRP_FPComp={:0.4f}, moLRP_FNComp={:0.4f} \n'.format(self.eval['moLRP'], self.eval['moLRPLoc'],self.eval['moLRPFP'],self.eval['moLRPFN']))
print('------------------------------------------------------ \n ')
print('------------------------------------------------------ \n ')
print('3.Optimal Class Specific Thresholds:\n')
print(self.eval['OptThresholds'])
print('------------------------------------------------------ \n ')
print('------------------------------------------------------ \n ')
class Params:
'''
Params for coco evaluation api
'''
def setDetParams(self):
self.imgIds = []
self.catIds = []
# np.arange causes trouble. the data point on arange is slightly larger than the true value
self.confScores = np.linspace(.0, 1.00, np.round((1.00 - .0) / .01) + 1, endpoint=True)
self.maxDets = 100
self.areaRng = [[0 ** 2, 1e5 ** 2]]
self.areaRngLbl = ['all']
self.useCats = 1
def __init__(self, tau=0.5):
self.setDetParams()
self.iouThrs = tau
| 45.179825
| 196
| 0.503301
|
6dd1445246a156412af150e9775acb8e388fd242
| 718
|
py
|
Python
|
dt_cw.py
|
jaadeoye/oral-cancer-risk-predictor
|
bdc8f153242c61058fab2edd8c8a160511e6046f
|
[
"MIT"
] | null | null | null |
dt_cw.py
|
jaadeoye/oral-cancer-risk-predictor
|
bdc8f153242c61058fab2edd8c8a160511e6046f
|
[
"MIT"
] | null | null | null |
dt_cw.py
|
jaadeoye/oral-cancer-risk-predictor
|
bdc8f153242c61058fab2edd8c8a160511e6046f
|
[
"MIT"
] | null | null | null |
import pandas as pd
from sklearn import metrics
import numpy as np
from imblearn.combine import SMOTEENN
from imblearn.combine import SMOTETomek
from sklearn.tree import DecisionTreeClassifier
import pickle
#import data
df_train = pd.read_csv('/Users/jaadeoye/Desktop/screen_ml/screen_full3.csv')
features = ['V1', 'V3', 'V4', 'V6', 'V8', 'V9', 'V10', 'V11', 'V12', 'V13', 'V14',
'V15', 'V16', 'V17', 'V18', 'V20', 'V21', 'V22', 'V23', 'V25', 'V27',
'V28', 'V29', 'V30', 'V31', 'V33', 'V34', 'V35']
x = df_train[features]
y = df_train.Status
#train model
logreg = DecisionTreeClassifier(random_state=0, max_depth=3, class_weight="balanced")
logreg.fit(x,y)
#pickle
pickle.dump(logreg, open('dt2', 'wb'))
| 35.9
| 85
| 0.683844
|
f4e6430a3889cbe3668e301f12aeda2306f09458
| 17,824
|
py
|
Python
|
diagrams/__init__.py
|
vanife/diagrams
|
b58537bba4e954d49726e85b386acaa2bf6341c5
|
[
"MIT"
] | null | null | null |
diagrams/__init__.py
|
vanife/diagrams
|
b58537bba4e954d49726e85b386acaa2bf6341c5
|
[
"MIT"
] | null | null | null |
diagrams/__init__.py
|
vanife/diagrams
|
b58537bba4e954d49726e85b386acaa2bf6341c5
|
[
"MIT"
] | null | null | null |
import contextvars
import os
import uuid
from pathlib import Path
from typing import List, Union, Dict
from graphviz import Digraph
# Global contexts for a diagrams and a cluster.
#
# These global contexts are for letting the clusters and nodes know
# where context they are belong to. So the all clusters and nodes does
# not need to specify the current diagrams or cluster via parameters.
__diagram = contextvars.ContextVar("diagrams")
__cluster = contextvars.ContextVar("cluster")
def getdiagram() -> "Diagram":
try:
return __diagram.get()
except LookupError:
return None
def setdiagram(diagram: "Diagram"):
__diagram.set(diagram)
def getcluster() -> "Cluster":
try:
return __cluster.get()
except LookupError:
return None
def setcluster(cluster: "Cluster"):
__cluster.set(cluster)
class Diagram:
__directions = ("TB", "BT", "LR", "RL")
__curvestyles = ("ortho", "curved")
__outformats = ("png", "jpg", "svg", "pdf")
# fmt: off
_default_graph_attrs = {
"pad": "2.0",
"splines": "ortho",
"nodesep": "0.60",
"ranksep": "0.75",
"fontname": "Sans-Serif",
"fontsize": "15",
"fontcolor": "#2D3436",
}
_default_node_attrs = {
"shape": "box",
"style": "rounded",
"fixedsize": "true",
"width": "1.4",
"height": "1.4",
"labelloc": "b",
# imagepos attribute is not backward compatible
# TODO: check graphviz version to see if "imagepos" is available >= 2.40
# https://github.com/xflr6/graphviz/blob/master/graphviz/backend.py#L248
# "imagepos": "tc",
"imagescale": "true",
"fontname": "Sans-Serif",
"fontsize": "13",
"fontcolor": "#2D3436",
}
_default_edge_attrs = {
"color": "#7B8894",
}
# fmt: on
# TODO: Label position option
# TODO: Save directory option (filename + directory?)
def __init__(
self,
name: str = "",
filename: str = "",
direction: str = "LR",
curvestyle: str = "ortho",
outformat: str = "png",
show: bool = True,
graph_attr: dict = {},
node_attr: dict = {},
edge_attr: dict = {},
):
"""Diagram represents a global diagrams context.
:param name: Diagram name. It will be used for output filename if the
filename isn't given.
:param filename: The output filename, without the extension (.png).
If not given, it will be generated from the name.
:param direction: Data flow direction. Default is 'left to right'.
:param curvestyle: Curve bending style. One of "ortho" or "curved".
:param outformat: Output file format. Default is 'png'.
:param show: Open generated image after save if true, just only save otherwise.
:param graph_attr: Provide graph_attr dot config attributes.
:param node_attr: Provide node_attr dot config attributes.
:param edge_attr: Provide edge_attr dot config attributes.
"""
self.name = name
if not name and not filename:
filename = "diagrams_image"
elif not filename:
filename = "_".join(self.name.split()).lower()
self.filename = filename
self.dot = Digraph(self.name, filename=self.filename)
# Set attributes.
for k, v in self._default_graph_attrs.items():
self.dot.graph_attr[k] = v
self.dot.graph_attr["label"] = self.name
for k, v in self._default_node_attrs.items():
self.dot.node_attr[k] = v
for k, v in self._default_edge_attrs.items():
self.dot.edge_attr[k] = v
if not self._validate_direction(direction):
raise ValueError(f'"{direction}" is not a valid direction')
self.dot.graph_attr["rankdir"] = direction
if not self._validate_curvestyle(curvestyle):
raise ValueError(f'"{curvestyle}" is not a valid curvestyle')
self.dot.graph_attr["splines"] = curvestyle
if not self._validate_outformat(outformat):
raise ValueError(f'"{outformat}" is not a valid output format')
self.outformat = outformat
# Merge passed in attributes
self.dot.graph_attr.update(graph_attr)
self.dot.node_attr.update(node_attr)
self.dot.edge_attr.update(edge_attr)
self.show = show
def __str__(self) -> str:
return str(self.dot)
def __enter__(self):
setdiagram(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.render()
# Remove the graphviz file leaving only the image.
os.remove(self.filename)
setdiagram(None)
def _repr_png_(self):
return self.dot.pipe(format="png")
def _validate_direction(self, direction: str) -> bool:
direction = direction.upper()
for v in self.__directions:
if v == direction:
return True
return False
def _validate_curvestyle(self, curvestyle: str) -> bool:
curvestyle = curvestyle.lower()
for v in self.__curvestyles:
if v == curvestyle:
return True
return False
def _validate_outformat(self, outformat: str) -> bool:
outformat = outformat.lower()
for v in self.__outformats:
if v == outformat:
return True
return False
def node(self, nodeid: str, label: str, **attrs) -> None:
"""Create a new node."""
self.dot.node(nodeid, label=label, **attrs)
def connect(self, node: "Node", node2: "Node", edge: "Edge") -> None:
"""Connect the two Nodes."""
self.dot.edge(node.nodeid, node2.nodeid, **edge.attrs)
def subgraph(self, dot: Digraph) -> None:
"""Create a subgraph for clustering"""
self.dot.subgraph(dot)
def render(self) -> None:
self.dot.render(format=self.outformat, view=self.show, quiet=True)
class Cluster:
__directions = ("TB", "BT", "LR", "RL")
__bgcolors = ("#E5F5FD", "#EBF3E7", "#ECE8F6", "#FDF7E3")
# fmt: off
_default_graph_attrs = {
"shape": "box",
"style": "rounded",
"labeljust": "l",
"pencolor": "#AEB6BE",
"fontname": "Sans-Serif",
"fontsize": "12",
}
# fmt: on
# FIXME:
# Cluster direction does not work now. Graphviz couldn't render
# correctly for a subgraph that has a different rank direction.
def __init__(
self, label: str = "cluster", direction: str = "LR", graph_attr: dict = {},
):
"""Cluster represents a cluster context.
:param label: Cluster label.
:param direction: Data flow direction. Default is 'left to right'.
:param graph_attr: Provide graph_attr dot config attributes.
"""
self.label = label
self.name = "cluster_" + self.label
self.dot = Digraph(self.name)
# Set attributes.
for k, v in self._default_graph_attrs.items():
self.dot.graph_attr[k] = v
self.dot.graph_attr["label"] = self.label
if not self._validate_direction(direction):
raise ValueError(f'"{direction}" is not a valid direction')
self.dot.graph_attr["rankdir"] = direction
# Node must be belong to a diagrams.
self._diagram = getdiagram()
if self._diagram is None:
raise EnvironmentError("Global diagrams context not set up")
self._parent = getcluster()
# Set cluster depth for distinguishing the background color
self.depth = self._parent.depth + 1 if self._parent else 0
coloridx = self.depth % len(self.__bgcolors)
self.dot.graph_attr["bgcolor"] = self.__bgcolors[coloridx]
# Merge passed in attributes
self.dot.graph_attr.update(graph_attr)
def __enter__(self):
setcluster(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
if self._parent:
self._parent.subgraph(self.dot)
else:
self._diagram.subgraph(self.dot)
setcluster(self._parent)
def _validate_direction(self, direction: str):
direction = direction.upper()
for v in self.__directions:
if v == direction:
return True
return False
def node(self, nodeid: str, label: str, **attrs) -> None:
"""Create a new node in the cluster."""
self.dot.node(nodeid, label=label, **attrs)
def subgraph(self, dot: Digraph) -> None:
self.dot.subgraph(dot)
class Node:
"""Node represents a node for a specific backend service."""
_provider = None
_type = None
_icon_dir = None
_icon = None
_height = 1.9
def __init__(self, label: str = "", *, nodeid: str = None, **attrs: Dict):
"""Node represents a system component.
:param label: Node label.
"""
# Generates an ID for identifying a node, unless specified
self._id = nodeid or self._rand_id()
self.label = label
# fmt: off
# If a node has an icon, increase the height slightly to avoid
# that label being spanned between icon image and white space.
# Increase the height by the number of new lines included in the label.
padding = 0.4 * (label.count('\n'))
self._attrs = {
"shape": "none",
"height": str(self._height + padding),
"image": self._load_icon(),
} if self._icon else {}
# fmt: on
self._attrs.update(attrs)
# Node must be belong to a diagrams.
self._diagram = getdiagram()
if self._diagram is None:
raise EnvironmentError("Global diagrams context not set up")
self._cluster = getcluster()
# If a node is in the cluster context, add it to cluster.
if self._cluster:
self._cluster.node(self._id, self.label, **self._attrs)
else:
self._diagram.node(self._id, self.label, **self._attrs)
def __repr__(self):
_name = self.__class__.__name__
return f"<{self._provider}.{self._type}.{_name}>"
def __sub__(self, other: Union["Node", List["Node"], "Edge"]):
"""Implement Self - Node, Self - [Nodes] and Self - Edge."""
if isinstance(other, list):
for node in other:
self.connect(node, Edge(self))
return other
elif isinstance(other, Node):
return self.connect(other, Edge(self))
else:
other.node = self
return other
def __rsub__(self, other: Union[List["Node"], List["Edge"]]):
""" Called for [Nodes] and [Edges] - Self because list don't have __sub__ operators. """
for o in other:
if isinstance(o, Edge):
o.connect(self)
else:
o.connect(self, Edge(self))
return self
def __rshift__(self, other: Union["Node", List["Node"], "Edge"]):
"""Implements Self >> Node, Self >> [Nodes] and Self Edge."""
if isinstance(other, list):
for node in other:
self.connect(node, Edge(self, forward=True))
return other
elif isinstance(other, Node):
return self.connect(other, Edge(self, forward=True))
else:
other.forward = True
other.node = self
return other
def __lshift__(self, other: Union["Node", List["Node"], "Edge"]):
"""Implements Self << Node, Self << [Nodes] and Self << Edge."""
if isinstance(other, list):
for node in other:
self.connect(node, Edge(self, reverse=True))
return other
elif isinstance(other, Node):
return self.connect(other, Edge(self, reverse=True))
else:
other.reverse = True
return other.connect(self)
def __rrshift__(self, other: Union[List["Node"], List["Edge"]]):
"""Called for [Nodes] and [Edges] >> Self because list don't have __rshift__ operators."""
for o in other:
if isinstance(o, Edge):
o.forward = True
o.connect(self)
else:
o.connect(self, Edge(self, forward=True))
return self
def __rlshift__(self, other: Union[List["Node"], List["Edge"]]):
"""Called for [Nodes] << Self because list of Nodes don't have __lshift__ operators."""
for o in other:
if isinstance(o, Edge):
o.reverse = True
o.connect(self)
else:
o.connect(self, Edge(self, reverse=True))
return self
@property
def nodeid(self):
return self._id
# TODO: option for adding flow description to the connection edge
def connect(self, node: "Node", edge: "Edge"):
"""Connect to other node.
:param node: Other node instance.
:param edge: Type of the edge.
:return: Connected node.
"""
if not isinstance(node, Node):
ValueError(f"{node} is not a valid Node")
if not isinstance(node, Edge):
ValueError(f"{node} is not a valid Edge")
# An edge must be added on the global diagrams, not a cluster.
self._diagram.connect(self, node, edge)
return node
@staticmethod
def _rand_id():
return uuid.uuid4().hex
def _load_icon(self):
basedir = Path(os.path.abspath(os.path.dirname(__file__)))
return os.path.join(basedir.parent, self._icon_dir, self._icon)
class Edge:
"""Edge represents an edge between two nodes."""
_default_edge_attrs = {
"fontcolor": "#2D3436",
"fontname": "Sans-Serif",
"fontsize": "13",
}
def __init__(
self,
node: "Node" = None,
forward: bool = False,
reverse: bool = False,
label: str = "",
color: str = "",
style: str = "",
**attrs: Dict,
):
"""Edge represents an edge between two nodes.
:param node: Parent node.
:param forward: Points forward.
:param reverse: Points backward.
:param label: Edge label.
:param color: Edge color.
:param style: Edge style.
:param attrs: Other edge attributes
"""
if node is not None:
assert isinstance(node, Node)
self.node = node
self.forward = forward
self.reverse = reverse
self._attrs = {}
# Set attributes.
for k, v in self._default_edge_attrs.items():
self._attrs[k] = v
if label:
# Graphviz complaining about using label for edges, so replace it with xlabel.
# Update: xlabel option causes the misaligned label position: https://github.com/mingrammer/diagrams/issues/83
self._attrs["label"] = label
if color:
self._attrs["color"] = color
if style:
self._attrs["style"] = style
self._attrs.update(attrs)
def __sub__(self, other: Union["Node", "Edge", List["Node"]]):
"""Implement Self - Node or Edge and Self - [Nodes]"""
return self.connect(other)
def __rsub__(self, other: Union[List["Node"], List["Edge"]]) -> List["Edge"]:
"""Called for [Nodes] or [Edges] - Self because list don't have __sub__ operators."""
return self.append(other)
def __rshift__(self, other: Union["Node", "Edge", List["Node"]]):
"""Implements Self >> Node or Edge and Self >> [Nodes]."""
self.forward = True
return self.connect(other)
def __lshift__(self, other: Union["Node", "Edge", List["Node"]]):
"""Implements Self << Node or Edge and Self << [Nodes]."""
self.reverse = True
return self.connect(other)
def __rrshift__(self, other: Union[List["Node"], List["Edge"]]) -> List["Edge"]:
"""Called for [Nodes] or [Edges] >> Self because list of Edges don't have __rshift__ operators."""
return self.append(other, forward=True)
def __rlshift__(self, other: Union[List["Node"], List["Edge"]]) -> List["Edge"]:
"""Called for [Nodes] or [Edges] << Self because list of Edges don't have __lshift__ operators."""
return self.append(other, reverse=True)
def append(self, other: Union[List["Node"], List["Edge"]], forward=None, reverse=None) -> List["Edge"]:
result = []
for o in other:
if isinstance(o, Edge):
o.forward = forward if forward else o.forward
o.reverse = forward if forward else o.reverse
self._attrs = o.attrs.copy()
result.append(o)
else:
result.append(Edge(o, forward=forward, reverse=reverse, **self._attrs))
return result
def connect(self, other: Union["Node", "Edge", List["Node"]]):
if isinstance(other, list):
for node in other:
self.node.connect(node, self)
return other
elif isinstance(other, Edge):
self._attrs = other._attrs.copy()
return self
else:
if self.node is not None:
return self.node.connect(other, self)
else:
self.node = other
return self
@property
def attrs(self) -> Dict:
if self.forward and self.reverse:
direction = "both"
elif self.forward:
direction = "forward"
elif self.reverse:
direction = "back"
else:
direction = "none"
return {**self._attrs, "dir": direction}
Group = Cluster
| 33.007407
| 122
| 0.579387
|
440bbe92b9b9a5c3956e62bc18b4aabc3e162b48
| 418
|
py
|
Python
|
CRF/venv/Scripts/pip-script.py
|
cadia-lvl/NER
|
d32e7d6b6817bf5c26d237fa61739f9ef4ccd710
|
[
"Apache-2.0"
] | 2
|
2021-03-19T15:13:37.000Z
|
2021-03-19T16:04:43.000Z
|
CRF/venv/Scripts/pip-script.py
|
cadia-lvl/NER
|
d32e7d6b6817bf5c26d237fa61739f9ef4ccd710
|
[
"Apache-2.0"
] | 1
|
2021-05-14T13:26:03.000Z
|
2021-05-17T09:31:12.000Z
|
CRF/venv/Scripts/pip-script.py
|
cadia-lvl/NER
|
d32e7d6b6817bf5c26d237fa61739f9ef4ccd710
|
[
"Apache-2.0"
] | 1
|
2020-09-11T09:21:40.000Z
|
2020-09-11T09:21:40.000Z
|
#!C:\Users\auzi\PycharmProjects\CRF\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| 32.153846
| 70
| 0.645933
|
452d6a0d6432b4ac59b032f77e9d44d302bd8029
| 8,450
|
py
|
Python
|
nengo/spa/spa.py
|
hunse/nengo
|
5fcd7b18aa9496e5c47c38c6408430cd9f68a720
|
[
"BSD-2-Clause"
] | null | null | null |
nengo/spa/spa.py
|
hunse/nengo
|
5fcd7b18aa9496e5c47c38c6408430cd9f68a720
|
[
"BSD-2-Clause"
] | null | null | null |
nengo/spa/spa.py
|
hunse/nengo
|
5fcd7b18aa9496e5c47c38c6408430cd9f68a720
|
[
"BSD-2-Clause"
] | null | null | null |
import warnings
import numpy as np
import nengo
from nengo.spa.vocab import Vocabulary
from nengo.spa.module import Module
from nengo.spa.utils import enable_spa_params
from nengo.utils.compat import iteritems
class SPA(nengo.Network):
"""Base class for SPA models.
This expands the standard Network system to support structured connections
that use Semantic Pointers and associated vocabularies in their
definitions.
To build a SPA model, you can either just use ``with`` or
create a subclass of this SPA class.
If you use the ``with`` statement, any attribute added to the SPA network
will be accessible for SPA connections.
If you chose to create a subclass, any spa.Module object
that is assigned to a
member variable will automatically be accessible by the SPA connection
system.
As an example, the following code will build three modules
(two Buffers and a Memory) that can be referred to as a, b, and c,
respectively.
First, the example with a ``with`` statement::
example = spa.Spa()
with example:
example.a = spa.Buffer(dimensions=8)
example.b = spa.Buffer(dimensions=16)
example.c = spa.Memory(dimensions=8)
Now, the example with a subclass::
class Example(spa.SPA):
def __init__(self):
self.a = spa.Buffer(dimensions=8)
self.b = spa.Buffer(dimensions=16)
self.c = spa.Memory(dimensions=8)
These names can be used by special Modules that are aware of these
names. As an example, the Cortical module allows you to form connections
between these modules in ways that are aware of semantic pointers::
with example:
example.a = spa.Buffer(dimensions=8)
example.b = spa.Buffer(dimensions=16)
example.c = spa.Memory(dimensions=8)
example.cortical = spa.Cortical(spa.Actions(
'b=a*CAT', 'c=b*~CAT'))
For complex cognitive control, the key modules are the BasalGangla
and the Thalamus. Together, these allow us to define complex actions
using the Action syntax::
class SequenceExample(spa.SPA):
def __init__(self):
self.state = spa.Memory(dimensions=32)
actions = spa.Actions('dot(state, A) --> state=B',
'dot(state, B) --> state=C',
'dot(state, C) --> state=D',
'dot(state, D) --> state=E',
'dot(state, E) --> state=A',
)
self.bg = spa.BasalGanglia(actions=actions)
self.thal = spa.Thalamus(self.bg)
"""
def __init__(self, label=None, seed=None, add_to_container=None,
vocabs=[]):
super(SPA, self).__init__(label, seed, add_to_container)
enable_spa_params(self)
self._modules = {}
self._default_vocabs = {}
for vo in vocabs:
if vo.dimensions in self._default_vocabs:
warnings.warn("Duplicate vocabularies with dimension %d. "
"Using the last entry in the vocab list with "
"that dimensionality." % (vo.dimensions))
self._default_vocabs[vo.dimensions] = vo
def __setattr__(self, key, value):
"""A setattr that handles Modules being added specially.
This is so that we can use the variable name for the Module as
the name that all of the SPA system will use to access that module.
"""
super(SPA, self).__setattr__(key, value)
if isinstance(value, Module):
if value.label is None:
value.label = key
self._modules[key] = value
for k, (obj, v) in iteritems(value.inputs):
if type(v) == int:
value.inputs[k] = (obj, self.get_default_vocab(v))
self.config[obj].vocab = value.inputs[k][1]
for k, (obj, v) in iteritems(value.outputs):
if type(v) == int:
value.outputs[k] = (obj, self.get_default_vocab(v))
self.config[obj].vocab = value.outputs[k][1]
value.on_add(self)
def __exit__(self, ex_type, ex_value, traceback):
super(SPA, self).__exit__(ex_type, ex_value, traceback)
if ex_type is not None:
# re-raise the exception that triggered this __exit__
return False
module_list = frozenset(self._modules.values())
for net in self.networks:
# Since there are no attributes to distinguish what's been added
# and what hasn't, we have to ask the network
if isinstance(net, Module) and (net not in module_list):
raise ValueError("%s was not added as an attribute of "
"the SPA network and won't be detected"
% (net))
def get_module(self, name):
"""Return the module for the given name."""
if name in self._modules:
return self._modules[name]
elif '_' in name:
module, name = name.rsplit('_', 1)
if module in self._modules:
return self._modules[module]
raise KeyError('Could not find module "%s"' % name)
def get_default_vocab(self, dimensions):
"""Return a Vocabulary with the desired dimensions.
This will create a new default Vocabulary if one doesn't exist.
"""
# If seed is set, create rng based off that seed.
# Otherwise, just use the default NumPy rng.
rng = None if self.seed is None else np.random.RandomState(self.seed)
if dimensions not in self._default_vocabs:
self._default_vocabs[dimensions] = Vocabulary(dimensions, rng=rng)
return self._default_vocabs[dimensions]
def get_module_input(self, name):
"""Return the object to connect into for the given name.
The name will be either the same as a module, or of the form
<module_name>_<input_name>.
"""
if name in self._modules:
return self._modules[name].inputs['default']
elif '_' in name:
module, name = name.rsplit('_', 1)
if module in self._modules:
m = self._modules[module]
if name in m.inputs:
return m.inputs[name]
raise KeyError('Could not find module input "%s"' % name)
def get_module_inputs(self):
for name, module in iteritems(self._modules):
for input in module.inputs:
if input == 'default':
yield name
else:
yield '%s_%s' % (name, input)
def get_input_vocab(self, name):
return self.get_module_input(name)[1]
def get_module_output(self, name):
"""Return the object to connect into for the given name.
The name will be either the same as a module, or of the form
<module_name>_<output_name>.
"""
if name in self._modules:
return self._modules[name].outputs['default']
elif '_' in name:
module, name = name.rsplit('_', 1)
if module in self._modules:
m = self._modules[module]
if name in m.outputs:
return m.outputs[name]
raise KeyError('Could not find module output "%s"' % name)
def get_module_outputs(self):
for name, module in iteritems(self._modules):
for output in module.outputs:
if output == 'default':
yield name
else:
yield '%s_%s' % (name, output)
def get_output_vocab(self, name):
return self.get_module_output(name)[1]
def similarity(self, data, probe, vocab=None):
"""Return the similarity between the probed data and corresponding
vocabulary.
Parameters
----------
data: ProbeDict
Collection of simulation data returned by sim.run() function call.
probe: Probe
Probe with desired data.
"""
if vocab is None:
vocab = self.config[probe.target].vocab
return nengo.spa.similarity(data[probe], vocab)
| 37.723214
| 78
| 0.579408
|
1e95975f9159db8ebb7648cbb2564a7e3d5e8bcf
| 2,719
|
py
|
Python
|
matroids/NulityMatroid.py
|
PotassiumIodide/matroid-theory-in-python
|
51c06ba728c9d9002234fe98b1bc84bffb86a0cb
|
[
"MIT"
] | 2
|
2020-11-27T09:51:49.000Z
|
2021-11-10T07:16:34.000Z
|
matroids/NulityMatroid.py
|
PotassiumIodide/matroid-theory-in-python
|
51c06ba728c9d9002234fe98b1bc84bffb86a0cb
|
[
"MIT"
] | 1
|
2020-11-16T07:22:29.000Z
|
2020-11-16T07:22:29.000Z
|
matroids/NulityMatroid.py
|
PotassiumIodide/matroid-theory-in-python
|
51c06ba728c9d9002234fe98b1bc84bffb86a0cb
|
[
"MIT"
] | null | null | null |
from functools import cached_property
from typing import Callable, TypeVar
from .Matroid import Matroid
from matroids.construct import (
independent_sets,
dependent_sets,
bases,
circuits,
rank_function,
closure_function,
flats,
open_sets,
hyperplanes,
spanning_sets,
)
from .core.checker import satisfies_nulity_function_axiom
from .core.exception import MatroidAxiomError
from .core.types import MatroidAxiom
T = TypeVar("T")
class NulityMatroid(Matroid):
__axiom = MatroidAxiom.NULITY_FUNCTION
def __init__(self, matroid: tuple[set[T],list[set[T]]]):
if not satisfies_nulity_function_axiom(matroid):
raise MatroidAxiomError(f"The given family doesn't satisfy {self.axiom.value}!")
self.__ground_set = matroid[0]
self.__nulity_function = matroid[1]
@property
def axiom(self) -> MatroidAxiom:
return self.__axiom
@property
def ground_set(self) -> set[T]:
return self.__ground_set
@cached_property
def independent_sets(self) -> list[set[T]]:
return independent_sets.from_nulity_matroid((self.ground_set, self.nulity_function))
@cached_property
def dependent_sets(self) -> list[set[T]]:
return dependent_sets.from_nulity_matroid((self.ground_set, self.nulity_function))
@cached_property
def bases(self) -> list[set[T]]:
return bases.from_nulity_matroid((self.ground_set, self.nulity_function))
@cached_property
def circuits(self) -> list[set[T]]:
return circuits.from_nulity_matroid((self.ground_set, self.nulity_function))
@cached_property
def rank_function(self) -> Callable[[set[T]], int]:
return rank_function.from_nulity_matroid((self.ground_set, self.nulity_function))
@property
def nulity_function(self) -> Callable[[set[T]], int]:
return self.__nulity_function
@cached_property
def closure_function(self) -> Callable[[set[T]], set[T]]:
return closure_function.from_nulity_matroid((self.ground_set, self.nulity_function))
@cached_property
def flats(self) -> list[set[T]]:
return flats.from_nulity_matroid((self.ground_set, self.nulity_function))
@cached_property
def open_sets(self) -> list[set[T]]:
return open_sets.from_nulity_matroid((self.ground_set, self.nulity_function))
@cached_property
def hyperplanes(self) -> list[set[T]]:
return hyperplanes.from_nulity_matroid((self.ground_set, self.nulity_function))
@cached_property
def spanning_sets(self) -> list[set[T]]:
return spanning_sets.from_nulity_matroid((self.ground_set, self.nulity_function))
| 31.616279
| 92
| 0.69989
|
fcdf39a6af62f58b529253af6094fea2fe5f2541
| 122,066
|
py
|
Python
|
env/lib/python3.6/site-packages/torch/_torch_docs.py
|
bopopescu/smart_contracts7
|
40a487cb3843e86ab5e4cb50b1aafa2095f648cd
|
[
"Apache-2.0"
] | null | null | null |
env/lib/python3.6/site-packages/torch/_torch_docs.py
|
bopopescu/smart_contracts7
|
40a487cb3843e86ab5e4cb50b1aafa2095f648cd
|
[
"Apache-2.0"
] | null | null | null |
env/lib/python3.6/site-packages/torch/_torch_docs.py
|
bopopescu/smart_contracts7
|
40a487cb3843e86ab5e4cb50b1aafa2095f648cd
|
[
"Apache-2.0"
] | 1
|
2020-07-24T17:53:25.000Z
|
2020-07-24T17:53:25.000Z
|
"""Adds docstrings to functions defined in the torch._C"""
import torch._C
from torch._C import _add_docstr as add_docstr
add_docstr(torch._C.abs,
r"""abs(input, out=None) -> Tensor
Computes the element-wise absolute value of the given :attr:`input` tensor.
Example::
>>> torch.abs(torch.FloatTensor([-1, -2, 3]))
FloatTensor([1, 2, 3])
""")
add_docstr(torch._C.acos,
r"""
acos(input, out=None) -> Tensor
Returns a new tensor with the arccosine of the elements of :attr:`input`.
Args:
input (Tensor): the input tensor
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4)
>>> a
-0.6366
0.2718
0.4469
1.3122
[torch.FloatTensor of size 4]
>>> torch.acos(a)
2.2608
1.2956
1.1075
nan
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.add,
r"""
.. function:: add(input, value, out=None)
Adds the scalar :attr:`value` to each element of the input :attr:`input`
and returns a new resulting tensor.
.. math::
out = input + value
If :attr:`input` is of type FloatTensor or DoubleTensor, :attr:`value` must be
a real number, otherwise it should be an integer.
Args:
input (Tensor): the input tensor
value (Number): the number to be added to each element of :attr:`input`
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4)
>>> a
0.4050
-1.2227
1.8688
-0.4185
[torch.FloatTensor of size 4]
>>> torch.add(a, 20)
20.4050
18.7773
21.8688
19.5815
[torch.FloatTensor of size 4]
.. function:: add(input, value=1, other, out=None)
Each element of the tensor :attr:`other` is multiplied by the scalar
:attr:`value` and added to each element of the tensor :attr:`input`.
The resulting tensor is returned.
The shapes of :attr:`input` and :attr:`other` must be
:ref:`broadcastable <broadcasting-semantics>`.
.. math::
out = input + value \times other
If :attr:`other` is of type FloatTensor or DoubleTensor, :attr:`value` must be
a real number, otherwise it should be an integer.
Args:
input (Tensor): the first input tensor
value (Number): the scalar multiplier for :attr:`other`
other (Tensor): the second input tensor
out (Tensor, optional): the output tensor
Example::
>>> import torch
>>> a = torch.randn(4)
>>> a
-0.9310
2.0330
0.0852
-0.2941
[torch.FloatTensor of size 4]
>>> b = torch.randn(2, 2)
>>> b
1.0663 0.2544
-0.1513 0.0749
[torch.FloatTensor of size 2x2]
>>> torch.add(a, 10, b)
9.7322
4.5770
-1.4279
0.4552
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.addbmm,
r"""
addbmm(beta=1, mat, alpha=1, batch1, batch2, out=None) -> Tensor
Performs a batch matrix-matrix product of matrices stored
in :attr:`batch1` and :attr:`batch2`,
with a reduced add step (all matrix multiplications get accumulated
along the first dimension).
:attr:`mat` is added to the final result.
:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the
same number of matrices.
If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
:math:`(b \times m \times p)` tensor, ::attr:`mat` must be
:ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
and attr:`out` will be a :math:`(n \times p)` tensor.
.. math::
out = \beta\ mat + \alpha\ (\sum_{i=0}^{b} batch1_i \mathbin{@} batch2_i)
For inputs of type `FloatTensor` or `DoubleTensor`, args `beta` and `alpha`
must be real numbers, otherwise they should be integers.
Args:
beta (Number, optional): multiplier for :attr:`mat`
mat (Tensor): matrix to be added
alpha (Number, optional): multiplier for `batch1 @ batch2`
batch1 (Tensor): the first batch of matrices to be multiplied
batch2 (Tensor): the second batch of matrices to be multiplied
out (Tensor, optional): the output tensor
Example::
>>> M = torch.randn(3, 5)
>>> batch1 = torch.randn(10, 3, 4)
>>> batch2 = torch.randn(10, 4, 5)
>>> torch.addbmm(M, batch1, batch2)
-3.1162 11.0071 7.3102 0.1824 -7.6892
1.8265 6.0739 0.4589 -0.5641 -5.4283
-9.3387 -0.1794 -1.2318 -6.8841 -4.7239
[torch.FloatTensor of size 3x5]
""")
add_docstr(torch._C.addcdiv,
r"""
addcdiv(tensor, value=1, tensor1, tensor2, out=None) -> Tensor
Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`,
multiply the result by the scalar :attr:`value` and add it to :attr:`tensor`.
.. math::
out_i = tensor_i + value \times \frac{tensor1_i}{tensor2_i}
The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be
:ref:`broadcastable <broadcasting-semantics>`.
For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
a real number, otherwise an integer.
Args:
tensor (Tensor): the tensor to be added
value (Number, optional): multiplier for :math:`tensor1 ./ tensor2`
tensor1 (Tensor): the numerator tensor
tensor2 (Tensor): the denominator tensor
out (Tensor, optional): the output tensor
Example::
>>> t = torch.randn(2, 3)
>>> t1 = torch.randn(1, 6)
>>> t2 = torch.randn(6, 1)
>>> torch.addcdiv(t, 0.1, t1, t2)
0.0122 -0.0188 -0.2354
0.7396 -1.5721 1.2878
[torch.FloatTensor of size 2x3]
""")
add_docstr(torch._C.addcmul,
r"""
addcmul(tensor, value=1, tensor1, tensor2, out=None) -> Tensor
Performs the element-wise multiplication of :attr:`tensor1`
by :attr:`tensor2`, multiply the result by the scalar :attr:`value`
and add it to :attr:`tensor`.
.. math::
out_i = tensor_i + value \times tensor1_i \times tensor2_i
The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be
:ref:`broadcastable <broadcasting-semantics>`.
For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
a real number, otherwise an integer.
Args:
tensor (Tensor): the tensor to be added
value (Number, optional): multiplier for :math:`tensor1 .* tensor2`
tensor1 (Tensor): the tensor to be multiplied
tensor2 (Tensor): the tensor to be multiplied
out (Tensor, optional): the output tensor
Example::
>>> t = torch.randn(2, 3)
>>> t1 = torch.randn(1, 6)
>>> t2 = torch.randn(6, 1)
>>> torch.addcmul(t, 0.1, t1, t2)
0.0122 -0.0188 -0.2354
0.7396 -1.5721 1.2878
[torch.FloatTensor of size 2x3]
""")
add_docstr(torch._C.addmm,
r"""
addmm(beta=1, mat, alpha=1, mat1, mat2, out=None) -> Tensor
Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
The matrix :attr:`mat` is added to the final result.
If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
:math:`(m \times p)` tensor, then :attr:`mat` must be
:ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
and :attr:`out` will be a :math:`(n \times p)` tensor.
`alpha` and `beta` are scaling factors on `mat1 @ mat2` and `mat` respectively.
.. math::
out = \beta\ mat + \alpha\ (mat1_i \mathbin{@} mat2_i)
For inputs of type `FloatTensor` or `DoubleTensor`, args :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
Args:
beta (Number, optional): multiplier for :attr:`mat`
mat (Tensor): matrix to be added
alpha (Number, optional): multiplier for :math:`mat1 @ mat2`
mat1 (Tensor): the first matrix to be multiplied
mat2 (Tensor): the second matrix to be multiplied
out (Tensor, optional): the output tensor
Example::
>>> M = torch.randn(2, 3)
>>> mat1 = torch.randn(2, 3)
>>> mat2 = torch.randn(3, 3)
>>> torch.addmm(M, mat1, mat2)
-0.4095 -1.9703 1.3561
5.7674 -4.9760 2.7378
[torch.FloatTensor of size 2x3]
""")
add_docstr(torch._C.addmv,
r"""
addmv(beta=1, tensor, alpha=1, mat, vec, out=None) -> Tensor
Performs a matrix-vector product of the matrix :attr:`mat` and
the vector :attr:`vec`.
The vector :attr:`tensor` is added to the final result.
If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
size `m`, then :attr:`tensor` must be
:ref:`broadcastable <broadcasting-semantics>` with a 1-D tensor of size `n` and
:attr:`out` will be 1-D tensor of size `n`.
`alpha` and `beta` are scaling factors on `mat * vec` and `tensor` respectively.
.. math::
out = \beta\ tensor + \alpha\ (mat \mathbin{@} vec)
For inputs of type `FloatTensor` or `DoubleTensor`, args :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers
Args:
beta (Number, optional): multiplier for :attr:`tensor`
tensor (Tensor): vector to be added
alpha (Number, optional): multiplier for :math:`mat @ vec`
mat (Tensor): matrix to be multiplied
vec (Tensor): vector to be multiplied
out (Tensor, optional): the output tensor
Example::
>>> M = torch.randn(2)
>>> mat = torch.randn(2, 3)
>>> vec = torch.randn(3)
>>> torch.addmv(M, mat, vec)
-2.0939
-2.2950
[torch.FloatTensor of size 2]
""")
add_docstr(torch._C.addr,
r"""
addr(beta=1, mat, alpha=1, vec1, vec2, out=None) -> Tensor
Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2`
and adds it to the matrix :attr:`mat`.
Optional values :attr:`beta` and :attr:`alpha` are scalars that multiply
:attr:`mat` and :math:`(vec1 \otimes vec2)` respectively.
.. math::
out = \beta\ mat + \alpha\ (vec1 \otimes vec2)
If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector
of size `m`, then :attr:`mat` must be
:ref:`broadcastable <broadcasting-semantics>` with a matrix of size
:math:`(n \times m)` and :attr:`out` will be a matrix of size
:math:`(n \times m)`.
For inputs of type `FloatTensor` or `DoubleTensor`, args :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers
Args:
beta (Number, optional): multiplier for :attr:`mat`
mat (Tensor): matrix to be added
alpha (Number, optional): multiplier for :math:`vec1 \otimes vec2`
vec1 (Tensor): the first vector of the outer product
vec2 (Tensor): the second vector of the outer product
out (Tensor, optional): the output tensor
Example::
>>> vec1 = torch.arange(1, 4)
>>> vec2 = torch.arange(1, 3)
>>> M = torch.zeros(3, 2)
>>> torch.addr(M, vec1, vec2)
1 2
2 4
3 6
[torch.FloatTensor of size 3x2]
""")
add_docstr(torch._C.asin,
r"""
asin(input, out=None) -> Tensor
Returns a new tensor with the arcsine of the elements of :attr:`input`.
Args:
input (Tensor): the input tensor
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4)
>>> a
-0.6366
0.2718
0.4469
1.3122
[torch.FloatTensor of size 4]
>>> torch.asin(a)
-0.6900
0.2752
0.4633
nan
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.atan,
r"""
atan(input, out=None) -> Tensor
Returns a new tensor with the arctangent of the elements of :attr:`input`.
Args:
input (Tensor): the input tensor
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4)
>>> a
-0.6366
0.2718
0.4469
1.3122
[torch.FloatTensor of size 4]
>>> torch.atan(a)
-0.5669
0.2653
0.4203
0.9196
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.atan2,
r"""
atan2(input1, input2, out=None) -> Tensor
Returns a new tensor with the arctangent of the elements of :attr:`input1`
and :attr:`input2`.
The shapes of :attr:`input1` and :attr:`input2` must be
:ref:`broadcastable <broadcasting-semantics>`.
Args:
input1 (Tensor): the first input tensor
input2 (Tensor): the second input tensor
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4)
>>> a
-0.6366
0.2718
0.4469
1.3122
[torch.FloatTensor of size 4]
>>> torch.atan2(a, torch.randn(4))
-2.4167
2.9755
0.9363
1.6613
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.baddbmm,
r"""
baddbmm(beta=1, mat, alpha=1, batch1, batch2, out=None) -> Tensor
Performs a batch matrix-matrix product of matrices in :attr:`batch1`
and :attr:`batch2`.
:attr:`mat` is added to the final result.
:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same
number of matrices.
If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
:math:`(b \times m \times p)` tensor, then :attr:`mat` must be
:ref:`broadcastable <broadcasting-semantics>` with a
:math:`(b \times n \times p)` tensor and :attr:`out` will be a
:math:`(b \times n \times p)` tensor.
.. math::
out_i = \beta\ mat_i + \alpha\ (batch1_i \mathbin{@} batch2_i)
For inputs of type `FloatTensor` or `DoubleTensor`, args :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
Args:
beta (Number, optional): multiplier for :attr:`mat`
mat (Tensor): the tensor to be added
alpha (Number, optional): multiplier for `batch1 @ batch2`
batch1 (Tensor): the first batch of matrices to be multiplied
batch2 (Tensor): the second batch of matrices to be multiplied
out (Tensor, optional): the output tensor
Example::
>>> M = torch.randn(10, 3, 5)
>>> batch1 = torch.randn(10, 3, 4)
>>> batch2 = torch.randn(10, 4, 5)
>>> torch.baddbmm(M, batch1, batch2).size()
torch.Size([10, 3, 5])
""")
add_docstr(torch._C.bernoulli,
r"""
bernoulli(input, out=None) -> Tensor
Draws binary random numbers (0 or 1) from a Bernoulli distribution.
The :attr:`input` tensor should be a tensor containing probabilities
to be used for drawing the binary random number.
Hence, all values in :attr:`input` have to be in the range:
:math:`0 \leq input_i \leq 1`
The `i-th` element of the output tensor will draw a value `1` according
to the `i-th` probability value given in :attr:`input`.
The returned :attr:`out` tensor only has values 0 or 1 and is of the same
shape as :attr:`input`
Args:
input (Tensor): the input tensor of probability values for the Bernoulli distribution
out (Tensor, optional): the output tensor
Example::
>>> a = torch.Tensor(3, 3).uniform_(0, 1) # generate a uniform random matrix with range [0, 1]
>>> a
0.7544 0.8140 0.9842
0.5282 0.0595 0.6445
0.1925 0.9553 0.9732
[torch.FloatTensor of size 3x3]
>>> torch.bernoulli(a)
1 1 1
0 0 1
0 1 1
[torch.FloatTensor of size 3x3]
>>> a = torch.ones(3, 3) # probability of drawing "1" is 1
>>> torch.bernoulli(a)
1 1 1
1 1 1
1 1 1
[torch.FloatTensor of size 3x3]
>>> a = torch.zeros(3, 3) # probability of drawing "1" is 0
>>> torch.bernoulli(a)
0 0 0
0 0 0
0 0 0
[torch.FloatTensor of size 3x3]
""")
add_docstr(torch._C.bmm,
r"""
bmm(batch1, batch2, out=None) -> Tensor
Performs a batch matrix-matrix product of matrices stored in :attr:`batch1`
and :attr:`batch2`.
:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing
the same number of matrices.
If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
:math:`(b \times m \times p)` tensor, :attr:`out` will be a
:math:`(b \times n \times p)` tensor.
.. math::
out_i = batch1_i \mathbin{@} batch2_i
.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
For broadcasting matrix products, see :func:`torch.matmul`.
Args:
batch1 (Tensor): the first batch of matrices to be multiplied
batch2 (Tensor): the second batch of matrices to be multiplied
out (Tensor, optional): the output tensor
Example::
>>> batch1 = torch.randn(10, 3, 4)
>>> batch2 = torch.randn(10, 4, 5)
>>> res = torch.bmm(batch1, batch2)
>>> res.size()
torch.Size([10, 3, 5])
""")
add_docstr(torch._C.cat,
r"""
cat(seq, dim=0, out=None) -> Tensor
Concatenates the given sequence of :attr:`seq` tensors in the given dimension.
All tensors must either have the same shape (except in the cat dimension) or be
empty.
:func:`torch.cat` can be seen as an inverse operation for :func:`torch.split`
and :func:`torch.chunk`
:func:`cat` can be best understood via examples.
Args:
seq (sequence of tensors): any python sequence of tensors of the same type.
Non-empty tensors provided must have the same shape, except in the
cat dimension.
dim (int, optional): the dimension over which the tensors are concatenated
out (Tensor, optional): the output tensor
Example::
>>> x = torch.randn(2, 3)
>>> x
0.5983 -0.0341 2.4918
1.5981 -0.5265 -0.8735
[torch.FloatTensor of size 2x3]
>>> torch.cat((x, x, x), 0)
0.5983 -0.0341 2.4918
1.5981 -0.5265 -0.8735
0.5983 -0.0341 2.4918
1.5981 -0.5265 -0.8735
0.5983 -0.0341 2.4918
1.5981 -0.5265 -0.8735
[torch.FloatTensor of size 6x3]
>>> torch.cat((x, x, x), 1)
0.5983 -0.0341 2.4918 0.5983 -0.0341 2.4918 0.5983 -0.0341 2.4918
1.5981 -0.5265 -0.8735 1.5981 -0.5265 -0.8735 1.5981 -0.5265 -0.8735
[torch.FloatTensor of size 2x9]
""")
add_docstr(torch._C.ceil,
r"""
ceil(input, out=None) -> Tensor
Returns a new tensor with the ceil of the elements of :attr:`input`,
the smallest integer greater than or equal to each element.
Args:
input (Tensor): the input tensor
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4)
>>> a
1.3869
0.3912
-0.8634
-0.5468
[torch.FloatTensor of size 4]
>>> torch.ceil(a)
2
1
-0
-0
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.reciprocal,
r"""
reciprocal(input, out=None) -> Tensor
Returns a new tensor with the reciprocal of the elements of :attr:`input`,
i.e. :math:`x^{-1} = \frac{1}{x}`.
Args:
input (Tensor): the input tensor
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4)
>>> a
1.3869
0.3912
-0.8634
-0.5468
[torch.FloatTensor of size 4]
>>> torch.reciprocal(a)
0.7210
2.5565
-1.1583
-1.8289
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.clamp,
r"""
clamp(input, min, max, out=None) -> Tensor
Clamp all elements in :attr:`input` into the range `[min, max]` and return
a resulting tensor:
.. math::
y_i = \begin{cases}
min & \text{if } x_i < min \\
x_i & \text{if } min \leq x_i \leq max \\
max & \text{if } x_i > max
\end{cases}
If :attr:`input` is of type `FloatTensor` or `DoubleTensor`, args :attr:`min`
and :attr:`max` must be real numbers, otherwise they should be integers.
Args:
input (Tensor): the input tensor
min (Number): lower-bound of the range to be clamped to
max (Number): upper-bound of the range to be clamped to
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4)
>>> a
1.3869
0.3912
-0.8634
-0.5468
[torch.FloatTensor of size 4]
>>> torch.clamp(a, min=-0.5, max=0.5)
0.5000
0.3912
-0.5000
-0.5000
[torch.FloatTensor of size 4]
.. function:: clamp(input, *, min, out=None) -> Tensor
Clamps all elements in :attr:`input` to be larger or equal :attr:`min`.
If :attr:`input` is of type `FloatTensor` or `DoubleTensor`, :attr:`value`
should be a real number, otherwise it should be an integer.
Args:
input (Tensor): the input tensor
value (Number): minimal value of each element in the output
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4)
>>> a
1.3869
0.3912
-0.8634
-0.5468
[torch.FloatTensor of size 4]
>>> torch.clamp(a, min=0.5)
1.3869
0.5000
0.5000
0.5000
[torch.FloatTensor of size 4]
.. function:: clamp(input, *, max, out=None) -> Tensor
Clamps all elements in :attr:`input` to be smaller or equal :attr:`max`.
If :attr:`input` is of type `FloatTensor` or `DoubleTensor`, :attr:`value`
should be a real number, otherwise it should be an integer.
Args:
input (Tensor): the input tensor
value (Number): maximal value of each element in the output
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4)
>>> a
1.3869
0.3912
-0.8634
-0.5468
[torch.FloatTensor of size 4]
>>> torch.clamp(a, max=0.5)
0.5000
0.3912
-0.8634
-0.5468
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.cos,
r"""
cos(input, out=None) -> Tensor
Returns a new tensor with the cosine of the elements of :attr:`input`.
Args:
input (Tensor): the input tensor
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4)
>>> a
-0.6366
0.2718
0.4469
1.3122
[torch.FloatTensor of size 4]
>>> torch.cos(a)
0.8041
0.9633
0.9018
0.2557
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.cosh,
r"""
cosh(input, out=None) -> Tensor
Returns a new tensor with the hyperbolic cosine of the elements of
:attr:`input`.
Args:
input (Tensor): the input tensor
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4)
>>> a
-0.6366
0.2718
0.4469
1.3122
[torch.FloatTensor of size 4]
>>> torch.cosh(a)
1.2095
1.0372
1.1015
1.9917
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.cross,
r"""
cross(input, other, dim=-1, out=None) -> Tensor
Returns the cross product of vectors in dimension :attr:`dim` of :attr:`input`
and :attr:`other`.
:attr:`input` and :attr:`other` must have the same size, and the size of their
:attr:`dim` dimension should be 3.
If :attr:`dim` is not given, it defaults to the first dimension found with the
size 3.
Args:
input (Tensor): the input tensor
other (Tensor): the second input tensor
dim (int, optional): the dimension to take the cross-product in.
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4, 3)
>>> a
-0.6652 -1.0116 -0.6857
0.2286 0.4446 -0.5272
0.0476 0.2321 1.9991
0.6199 1.1924 -0.9397
[torch.FloatTensor of size 4x3]
>>> b = torch.randn(4, 3)
>>> b
-0.1042 -1.1156 0.1947
0.9947 0.1149 0.4701
-1.0108 0.8319 -0.0750
0.9045 -1.3754 1.0976
[torch.FloatTensor of size 4x3]
>>> torch.cross(a, b, dim=1)
-0.9619 0.2009 0.6367
0.2696 -0.6318 -0.4160
-1.6805 -2.0171 0.2741
0.0163 -1.5304 -1.9311
[torch.FloatTensor of size 4x3]
>>> torch.cross(a, b)
-0.9619 0.2009 0.6367
0.2696 -0.6318 -0.4160
-1.6805 -2.0171 0.2741
0.0163 -1.5304 -1.9311
[torch.FloatTensor of size 4x3]
""")
add_docstr(torch._C.cumprod,
r"""
cumprod(input, dim, out=None) -> Tensor
Returns the cumulative product of elements of :attr:`input` in the dimension
:attr:`dim`.
For example, if :attr:`input` is a vector of size N, the result will also be
a vector of size N, with elements.
.. math::
y_i = x_1 \times x_2\times x_3\times \dots \times x_i
Args:
input (Tensor): the input tensor
dim (int): the dimension to do the operation over
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(10)
>>> a
1.1148
1.8423
1.4143
-0.4403
1.2859
-1.2514
-0.4748
1.1735
-1.6332
-0.4272
[torch.FloatTensor of size 10]
>>> torch.cumprod(a, dim=0)
1.1148
2.0537
2.9045
-1.2788
-1.6444
2.0578
-0.9770
-1.1466
1.8726
-0.8000
[torch.FloatTensor of size 10]
>>> a[5] = 0.0
>>> torch.cumprod(a, dim=0)
1.1148
2.0537
2.9045
-1.2788
-1.6444
-0.0000
0.0000
0.0000
-0.0000
0.0000
[torch.FloatTensor of size 10]
""")
add_docstr(torch._C.cumsum,
r"""
cumsum(input, dim, out=None) -> Tensor
Returns the cumulative sum of elements of :attr:`input` in the dimension
:attr:`dim`.
For example, if :attr:`input` is a vector of size N, the result will also be
a vector of size N, with elements.
.. math::
y_i = x_1 + x_2 + x_3 + \dots + x_i
Args:
input (Tensor): the input tensor
dim (int): the dimension to do the operation over
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(10)
>>> a
-0.6039
-0.2214
-0.3705
-0.0169
1.3415
-0.1230
0.9719
0.6081
-0.1286
1.0947
[torch.FloatTensor of size 10]
>>> torch.cumsum(a, dim=0)
-0.6039
-0.8253
-1.1958
-1.2127
0.1288
0.0058
0.9777
1.5858
1.4572
2.5519
[torch.FloatTensor of size 10]
""")
add_docstr(torch._C.diag,
r"""
diag(input, diagonal=0, out=None) -> Tensor
- If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor
with the elements of :attr:`input` as the diagonal.
- If :attr:`input` is a matrix (2-D tensor), then returns a 1-D tensor with
the diagonal elements of :attr:`input`.
The argument :attr:`diagonal` controls which diagonal to consider:
- If :attr:`diagonal` = 0, it is the main diagonal.
- If :attr:`diagonal` > 0, it is above the main diagonal.
- If :attr:`diagonal` < 0, it is below the main diagonal.
Args:
input (Tensor): the input tensor
diagonal (int, optional): the diagonal to consider
out (Tensor, optional): the output tensor
Example:
Get the square matrix where the input vector is the diagonal::
>>> a = torch.randn(3)
>>> a
1.0480
-2.3405
-1.1138
[torch.FloatTensor of size 3]
>>> torch.diag(a)
1.0480 0.0000 0.0000
0.0000 -2.3405 0.0000
0.0000 0.0000 -1.1138
[torch.FloatTensor of size 3x3]
>>> torch.diag(a, 1)
0.0000 1.0480 0.0000 0.0000
0.0000 0.0000 -2.3405 0.0000
0.0000 0.0000 0.0000 -1.1138
0.0000 0.0000 0.0000 0.0000
[torch.FloatTensor of size 4x4]
Get the k-th diagonal of a given matrix::
>>> a = torch.randn(3, 3)
>>> a
-1.5328 -1.3210 -1.5204
0.8596 0.0471 -0.2239
-0.6617 0.0146 -1.0817
[torch.FloatTensor of size 3x3]
>>> torch.diag(a, 0)
-1.5328
0.0471
-1.0817
[torch.FloatTensor of size 3]
>>> torch.diag(a, 1)
-1.3210
-0.2239
[torch.FloatTensor of size 2]
""")
add_docstr(torch._C.dist,
r"""
dist(input, other, p=2) -> float
Returns the p-norm of (:attr:`input` - :attr:`other`)
The shapes of :attr:`input` and :attr:`other` must be
:ref:`broadcastable <broadcasting-semantics>`.
Args:
input (Tensor): the input tensor
other (Tensor): the Right-hand-side input tensor
p (float, optional): the norm to be computed
Example::
>>> x = torch.randn(4)
>>> x
0.2505
-0.4571
-0.3733
0.7807
[torch.FloatTensor of size 4]
>>> y = torch.randn(4)
>>> y
0.7782
-0.5185
1.4106
-2.4063
[torch.FloatTensor of size 4]
>>> torch.dist(x, y, 3.5)
3.302832063224223
>>> torch.dist(x, y, 3)
3.3677282206393286
>>> torch.dist(x, y, 0)
inf
>>> torch.dist(x, y, 1)
5.560028076171875
""")
add_docstr(torch._C.div,
r"""
.. function:: div(input, value, out=None)
Divides each element of the input :attr:`input` with the scalar :attr:`value`
and returns a new resulting tensor.
.. math::
out_i = \frac{input_i}{value}
If :attr:`input` is of type `FloatTensor` or `DoubleTensor`, :attr:`value`
should be a real number, otherwise it should be an integer
Args:
input (Tensor): the input tensor
value (Number): the number to be divided to each element of :attr:`input`
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(5)
>>> a
-0.6147
-1.1237
-0.1604
-0.6853
0.1063
[torch.FloatTensor of size 5]
>>> torch.div(a, 0.5)
-1.2294
-2.2474
-0.3208
-1.3706
0.2126
[torch.FloatTensor of size 5]
.. function:: div(input, other, out=None)
Each element of the tensor :attr:`input` is divided by each element
of the tensor :attr:`other`. The resulting tensor is returned. The shapes of
:attr:`input` and :attr:`other` must be
:ref:`broadcastable <broadcasting-semantics>`.
.. math::
out_i = \frac{input_i}{other_i}
Args:
input (Tensor): the numerator tensor
other (Tensor): the denominator tensor
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4,4)
>>> a
-0.1810 0.4017 0.2863 -0.1013
0.6183 2.0696 0.9012 -1.5933
0.5679 0.4743 -0.0117 -0.1266
-0.1213 0.9629 0.2682 1.5968
[torch.FloatTensor of size 4x4]
>>> b = torch.randn(8, 2)
>>> b
0.8774 0.7650
0.8866 1.4805
-0.6490 1.1172
1.4259 -0.8146
1.4633 -0.1228
0.4643 -0.6029
0.3492 1.5270
1.6103 -0.6291
[torch.FloatTensor of size 8x2]
>>> torch.div(a, b)
-0.2062 0.5251 0.3229 -0.0684
-0.9528 1.8525 0.6320 1.9559
0.3881 -3.8625 -0.0253 0.2099
-0.3473 0.6306 0.1666 -2.5381
[torch.FloatTensor of size 4x4]
""")
add_docstr(torch._C.dot,
r"""
dot(tensor1, tensor2) -> float
Computes the dot product (inner product) of two tensors.
.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
Example::
>>> torch.dot(torch.Tensor([2, 3]), torch.Tensor([2, 1]))
7.0
""")
add_docstr(torch._C.eig,
r"""
eig(a, eigenvectors=False, out=None) -> (Tensor, Tensor)
Computes the eigenvalues and eigenvectors of a real square matrix.
Args:
a (Tensor): the square matrix for which the eigenvalues and eigenvectors will be computed
eigenvectors (bool): ``True`` to compute both eigenvalues and eigenvectors; otherwise, only eigenvalues will be computed
out (tuple, optional): the output tensors
Returns:
(Tensor, Tensor): A tuple containing
- **e** (*Tensor*): the right eigenvalues of ``a``
- **v** (*Tensor*): the eigenvectors of ``a`` if ``eigenvectors`` is ``True``; otherwise an empty tensor
""")
add_docstr(torch._C.eq,
r"""
eq(input, other, out=None) -> Tensor
Computes element-wise equality
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
out (Tensor, optional): the output tensor. Must be a `ByteTensor` or the same type as `input`.
Returns:
Tensor: A ``torch.ByteTensor`` containing a 1 at each location where the tensors are equal and a 0 at every other location
Example::
>>> torch.eq(torch.Tensor([[1, 2], [3, 4]]), torch.Tensor([[1, 1], [4, 4]]))
1 0
0 1
[torch.ByteTensor of size 2x2]
""")
add_docstr(torch._C.equal,
r"""
equal(tensor1, tensor2) -> bool
``True`` if two tensors have the same size and elements, ``False`` otherwise.
Example::
>>> torch.equal(torch.Tensor([1, 2]), torch.Tensor([1, 2]))
True
""")
add_docstr(torch._C.erf,
r"""
erf(tensor, out=None) -> Tensor
Computes the error function of each element.
Example::
>>> torch.erf(torch.Tensor([0, -1., 10.]))
torch.FloatTensor([0., -0.8427, 1.])
""")
add_docstr(torch._C.erfinv,
r"""
erfinv(tensor, out=None) -> Tensor
Computes the inverse error function of each element.
Example::
>>> torch.erfinv(torch.Tensor([0, 0.5., -1.]))
torch.FloatTensor([0., 0.4769, -inf])
""")
add_docstr(torch._C.exp,
r"""
exp(tensor, out=None) -> Tensor
Computes the exponential of each element.
Example::
>>> torch.exp(torch.Tensor([0, math.log(2)]))
torch.FloatTensor([1, 2])
""")
add_docstr(torch._C.eye,
r"""
eye(n, m=None, out=None)
Returns a 2-D tensor with ones on the diagonal and zeros elsewhere.
Args:
n (int): the number of rows
m (int, optional): the number of columns with default being :attr:`n`
out (Tensor, optional): the output tensor
Returns:
Tensor: A 2-D tensor with ones on the diagonal and zeros elsewhere
Example::
>>> torch.eye(3)
1 0 0
0 1 0
0 0 1
[torch.FloatTensor of size 3x3]
""")
add_docstr(torch._C.floor,
r"""
floor(input, out=None) -> Tensor
Returns a new tensor with the floor of the elements of :attr:`input`,
the largest integer less than or equal to each element.
Args:
input (Tensor): the input tensor
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4)
>>> a
1.3869
0.3912
-0.8634
-0.5468
[torch.FloatTensor of size 4]
>>> torch.floor(a)
1
0
-1
-1
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.fmod,
r"""
fmod(input, divisor, out=None) -> Tensor
Computes the element-wise remainder of division.
The dividend and divisor may contain both for integer and floating point
numbers. The remainder has the same sign as the dividend :attr:`input`.
When :attr:`divisor` is a tensor, the shapes of :attr:`input` and
:attr:`divisor` must be :ref:`broadcastable <broadcasting-semantics>`.
Args:
input (Tensor): the dividend
divisor (Tensor or float): the divisor, which may be either a number or a tensor of the same shape as the dividend
out (Tensor, optional): the output tensor
Example::
>>> torch.fmod(torch.Tensor([-3, -2, -1, 1, 2, 3]), 2)
torch.FloatTensor([-1, -0, -1, 1, 0, 1])
>>> torch.fmod(torch.Tensor([1, 2, 3, 4, 5]), 1.5)
torch.FloatTensor([1.0, 0.5, 0.0, 1.0, 0.5])
.. seealso::
:func:`torch.remainder`, which computes the element-wise remainder of
division equivalently to Python's `%` operator
""")
add_docstr(torch._C.frac,
r"""
frac(tensor, out=None) -> Tensor
Computes the fractional portion of each element in :attr:`tensor`.
Example::
>>> torch.frac(torch.Tensor([1, 2.5, -3.2])
torch.FloatTensor([0, 0.5, -0.2])
""")
add_docstr(torch._C.from_numpy,
r"""
from_numpy(ndarray) -> Tensor
Creates a :class:`Tensor` from a :class:`numpy.ndarray`.
The returned tensor and `ndarray` share the same memory. Modifications to the
tensor will be reflected in the `ndarray` and vice versa. The returned tensor
is not resizable.
Example::
>>> a = numpy.array([1, 2, 3])
>>> t = torch.from_numpy(a)
>>> t
torch.LongTensor([1, 2, 3])
>>> t[0] = -1
>>> a
array([-1, 2, 3])
""")
add_docstr(torch._C.gather,
r"""
gather(input, dim, index, out=None) -> Tensor
Gathers values along an axis specified by `dim`.
For a 3-D tensor the output is specified by::
out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0
out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1
out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2
If :attr:`input` is an n-dimensional tensor with size
:math:`(x_0, x_1..., x_{i-1}, x_i, x_{i+1}, ..., x_{n-1})`
and :attr:`dim` = i, then :attr:`index` must be an n-dimensional tensor with
size :math:`(x_0, x_1, ..., x_{i-1}, y, x_{i+1}, ..., x_{n-1})` where y >= 1 and
:attr:`out` will have the same size as :attr:`index`.
Args:
input (Tensor): the source tensor
dim (int): the axis along which to index
index (LongTensor): the indices of elements to gather
out (Tensor, optional): the destination tensor
Example::
>>> t = torch.Tensor([[1,2],[3,4]])
>>> torch.gather(t, 1, torch.LongTensor([[0,0],[1,0]]))
1 1
4 3
[torch.FloatTensor of size 2x2]
""")
add_docstr(torch._C.ge,
r"""
ge(input, other, out=None) -> Tensor
Computes `input >= other` element-wise.
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
out (Tensor, optional): the output tensor that must be a `ByteTensor` or the same type as :attr:`input`
Returns:
Tensor: A ``torch.ByteTensor`` containing a 1 at each location where comparison is true
Example::
>>> torch.ge(torch.Tensor([[1, 2], [3, 4]]), torch.Tensor([[1, 1], [4, 4]]))
1 1
0 1
[torch.ByteTensor of size 2x2]
""")
add_docstr(torch._C.gels,
r"""
gels(B, A, out=None) -> Tensor
Computes the solution to the least squares and least norm problems for a full
rank :math:`m` by :math:`n` matrix :math:`A`.
If :math:`m >= n`, :func:`gels` solves the least-squares problem:
.. math::
\begin{array}{ll}
\mbox{minimize} & \|AX-B\|_F.
\end{array}
If :math:`m < n`, :func:`gels` solves the least-norm problem:
.. math::
\begin{array}{ll}
\mbox{minimize} & \|X\|_F & \mbox{subject to} & AX = B.
\end{array}
The first :math:`n` rows of the returned matrix :math:`X` contains the
solution. The remaining rows contain residual information: the euclidean norm
of each column starting at row :math:`n` is the residual for the corresponding
column.
Args:
B (Tensor): the matrix :math:`B`
A (Tensor): the :math:`m` by :math:`n` matrix :math:`A`
out (tuple, optional): the optional destination tensor
Returns:
(Tensor, Tensor): A tuple containing:
- **X** (*Tensor*): the least squares solution
- **qr** (*Tensor*): the details of the QR factorization
.. note::
The returned matrices will always be transposed, irrespective of the strides
of the input matrices. That is, they will have stride `(1, m)` instead of
`(m, 1)`.
Example::
>>> A = torch.Tensor([[1, 1, 1],
... [2, 3, 4],
... [3, 5, 2],
... [4, 2, 5],
... [5, 4, 3]])
>>> B = torch.Tensor([[-10, -3],
[ 12, 14],
[ 14, 12],
[ 16, 16],
[ 18, 16]])
>>> X, _ = torch.gels(B, A)
>>> X
2.0000 1.0000
1.0000 1.0000
1.0000 2.0000
[torch.FloatTensor of size 3x2]
""")
add_docstr(torch._C.geqrf,
r"""
geqrf(input, out=None) -> (Tensor, Tensor)
This is a low-level function for calling LAPACK directly.
You'll generally want to use :func:`torch.qr` instead.
Computes a QR decomposition of :attr:`input`, but without constructing
`Q` and `R` as explicit separate matrices.
Rather, this directly calls the underlying LAPACK function `?geqrf`
which produces a sequence of 'elementary reflectors'.
See `LAPACK documentation`_ for further details.
Args:
input (Tensor): the input matrix
out (tuple, optional): the output tuple of (Tensor, Tensor)
.. _LAPACK documentation:
https://software.intel.com/en-us/node/521004
""")
add_docstr(torch._C.ger,
r"""
ger(vec1, vec2, out=None) -> Tensor
Outer product of :attr:`vec1` and :attr:`vec2`.
If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector of
size `m`, then :attr:`out` must be a matrix of size :math:`(n \times m)`.
.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
Args:
vec1 (Tensor): 1-D input vector
vec2 (Tensor): 1-D input vector
out (Tensor, optional): optional output matrix
Example::
>>> v1 = torch.arange(1, 5)
>>> v2 = torch.arange(1, 4)
>>> torch.ger(v1, v2)
1 2 3
2 4 6
3 6 9
4 8 12
[torch.FloatTensor of size 4x3]
""")
add_docstr(torch._C.gesv,
r"""
gesv(B, A, out=None) -> (Tensor, Tensor)
`X, LU = torch.gesv(B, A)` returns the solution to the system of linear
equations represented by :math:`AX = B`
`LU` contains `L` and `U` factors for LU factorization of `A`.
:attr:`A` has to be a square and non-singular matrix (2-D tensor).
If `A` is an :math:`(m \times m)` matrix and `B` is :math:`(m \times k)`,
the result `LU` is :math:`(m \times m)` and `X` is :math:`(m \times k)`.
.. note::
Irrespective of the original strides, the returned matrices
`X` and `LU` will be transposed, i.e. with strides `(1, m)`
instead of `(m, 1)`.
Args:
B (Tensor): input matrix of :math:`(m \times k)` dimensions
A (Tensor): input square matrix of :math:`(m \times m)` dimensions
out (Tensor, optional): optional output matrix
Example::
>>> A = torch.Tensor([[6.80, -2.11, 5.66, 5.97, 8.23],
... [-6.05, -3.30, 5.36, -4.44, 1.08],
... [-0.45, 2.58, -2.70, 0.27, 9.04],
... [8.32, 2.71, 4.35, -7.17, 2.14],
... [-9.67, -5.14, -7.26, 6.08, -6.87]]).t()
>>> B = torch.Tensor([[4.02, 6.19, -8.22, -7.57, -3.03],
... [-1.56, 4.00, -8.67, 1.75, 2.86],
... [9.81, -4.09, -4.57, -8.61, 8.99]]).t()
>>> X, LU = torch.gesv(B, A)
>>> torch.dist(B, torch.mm(A, X))
9.250057093890353e-06
""")
add_docstr(torch._C.get_num_threads,
r"""
get_num_threads() -> int
Gets the number of OpenMP threads used for parallelizing CPU operations
""")
add_docstr(torch._C.gt,
r"""
gt(input, other, out=None) -> Tensor
Computes `input > other` element-wise.
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
out (Tensor, optional): the output tensor that must be a `ByteTensor` or the same type as :attr:`input`
Returns:
Tensor: A ``torch.ByteTensor`` containing a 1 at each location where comparison is true
Example::
>>> torch.gt(torch.Tensor([[1, 2], [3, 4]]), torch.Tensor([[1, 1], [4, 4]]))
0 1
0 0
[torch.ByteTensor of size 2x2]
""")
add_docstr(torch._C.histc,
r"""
histc(input, bins=100, min=0, max=0, out=None) -> Tensor
Computes the histogram of a tensor.
The elements are sorted into equal width bins between :attr:`min` and
:attr:`max`. If :attr:`min` and :attr:`max` are both zero, the minimum and
maximum values of the data are used.
Args:
input (Tensor): the input tensor
bins (int): number of histogram bins
min (int): lower end of the range (inclusive)
max (int): upper end of the range (inclusive)
out (Tensor, optional): the output tensor
Returns:
Tensor: Histogram represented as a tensor
Example::
>>> torch.histc(torch.FloatTensor([1, 2, 1]), bins=4, min=0, max=3)
FloatTensor([0, 2, 1, 0])
""")
add_docstr(torch._C.index_select,
r"""
index_select(input, dim, index, out=None) -> Tensor
Returns a new tensor which indexes the :attr:`input` tensor along dimension
:attr:`dim` using the entries in :attr:`index` which is a `LongTensor`.
The returned tensor has the same number of dimensions as the original tensor
(:attr:`input`). The :attr:`dim`\ th dimension has the same size as the length
of :attr:`index`; other dimensions have the same size as in the original tensor.
.. note:: The returned tensor does **not** use the same storage as the original
tensor. If :attr:`out` has a different shape than expected, we
silently change it to the correct shape, reallocating the underlying
storage if necessary.
Args:
input (Tensor): the input tensor
dim (int): the dimension in which we index
index (LongTensor): the 1-D tensor containing the indices to index
out (Tensor, optional): the output tensor
Example::
>>> x = torch.randn(3, 4)
>>> x
1.2045 2.4084 0.4001 1.1372
0.5596 1.5677 0.6219 -0.7954
1.3635 -1.2313 -0.5414 -1.8478
[torch.FloatTensor of size 3x4]
>>> indices = torch.LongTensor([0, 2])
>>> torch.index_select(x, 0, indices)
1.2045 2.4084 0.4001 1.1372
1.3635 -1.2313 -0.5414 -1.8478
[torch.FloatTensor of size 2x4]
>>> torch.index_select(x, 1, indices)
1.2045 0.4001
0.5596 0.6219
1.3635 -0.5414
[torch.FloatTensor of size 3x2]
""")
add_docstr(torch._C.inverse,
r"""
inverse(input, out=None) -> Tensor
Takes the inverse of the square matrix :attr:`input`.
.. note::
Irrespective of the original strides, the returned matrix will be
transposed, i.e. with strides `(1, m)` instead of `(m, 1)`
Args:
input (Tensor): the input 2-D square tensor
out (Tensor, optional): the optional output tensor
Example::
>>> x = torch.rand(10, 10)
>>> x
0.7800 0.2267 0.7855 0.9479 0.5914 0.7119 0.4437 0.9131 0.1289 0.1982
0.0045 0.0425 0.2229 0.4626 0.6210 0.0207 0.6338 0.7067 0.6381 0.8196
0.8350 0.7810 0.8526 0.9364 0.7504 0.2737 0.0694 0.5899 0.8516 0.3883
0.6280 0.6016 0.5357 0.2936 0.7827 0.2772 0.0744 0.2627 0.6326 0.9153
0.7897 0.0226 0.3102 0.0198 0.9415 0.9896 0.3528 0.9397 0.2074 0.6980
0.5235 0.6119 0.6522 0.3399 0.3205 0.5555 0.8454 0.3792 0.4927 0.6086
0.1048 0.0328 0.5734 0.6318 0.9802 0.4458 0.0979 0.3320 0.3701 0.0909
0.2616 0.3485 0.4370 0.5620 0.5291 0.8295 0.7693 0.1807 0.0650 0.8497
0.1655 0.2192 0.6913 0.0093 0.0178 0.3064 0.6715 0.5101 0.2561 0.3396
0.4370 0.4695 0.8333 0.1180 0.4266 0.4161 0.0699 0.4263 0.8865 0.2578
[torch.FloatTensor of size 10x10]
>>> x = torch.rand(10, 10)
>>> y = torch.inverse(x)
>>> z = torch.mm(x, y)
>>> z
1.0000 0.0000 0.0000 -0.0000 0.0000 0.0000 0.0000 0.0000 -0.0000 -0.0000
0.0000 1.0000 -0.0000 0.0000 0.0000 0.0000 -0.0000 -0.0000 -0.0000 -0.0000
0.0000 0.0000 1.0000 -0.0000 -0.0000 0.0000 0.0000 0.0000 -0.0000 -0.0000
0.0000 0.0000 0.0000 1.0000 0.0000 0.0000 0.0000 -0.0000 -0.0000 0.0000
0.0000 0.0000 -0.0000 -0.0000 1.0000 0.0000 0.0000 -0.0000 -0.0000 -0.0000
0.0000 0.0000 0.0000 -0.0000 0.0000 1.0000 -0.0000 -0.0000 -0.0000 -0.0000
0.0000 0.0000 0.0000 -0.0000 0.0000 0.0000 1.0000 0.0000 -0.0000 0.0000
0.0000 0.0000 -0.0000 -0.0000 0.0000 0.0000 -0.0000 1.0000 -0.0000 0.0000
-0.0000 0.0000 -0.0000 -0.0000 0.0000 0.0000 -0.0000 -0.0000 1.0000 -0.0000
-0.0000 0.0000 -0.0000 -0.0000 -0.0000 0.0000 -0.0000 -0.0000 0.0000 1.0000
[torch.FloatTensor of size 10x10]
>>> torch.max(torch.abs(z - torch.eye(10))) # Max nonzero
5.096662789583206e-07
""")
add_docstr(torch._C.kthvalue,
r"""
kthvalue(input, k, dim=None, keepdim=False, out=None) -> (Tensor, LongTensor)
Returns the :attr:`k` th smallest element of the given :attr:`input` tensor
along a given dimension.
If :attr:`dim` is not given, the last dimension of the `input` is chosen.
A tuple of `(values, indices)` is returned, where the `indices` is the indices
of the kth-smallest element in the original `input` tensor in dimension `dim`.
If :attr:`keepdim` is ``True``, both the :attr:`values` and :attr:`indices` tensors
are the same size as :attr:`input`, except in the dimension :attr:`dim` where
they are of size 1. Otherwise, :attr:`dim` is squeezed
(see :func:`torch.squeeze`), resulting in both the :attr:`values` and
:attr:`indices` tensors having 1 fewer dimension than the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
k (int): k for the k-th smallest element
dim (int, optional): the dimension to find the kth value along
keepdim (bool): whether the output tensors have :attr:`dim` retained or not
out (tuple, optional): the output tuple of (Tensor, LongTensor)
can be optionally given to be used as output buffers
Example::
>>> x = torch.arange(1, 6)
>>> x
1
2
3
4
5
[torch.FloatTensor of size 5]
>>> torch.kthvalue(x, 4)
(
4
[torch.FloatTensor of size 1]
,
3
[torch.LongTensor of size 1]
)
>>> x=torch.arange(1,7).resize_(2,3)
>>> x
1 2 3
4 5 6
[torch.FloatTensor of size 2x3]
>>> torch.kthvalue(x,2,0,True)
(
4 5 6
[torch.FloatTensor of size 1x3]
,
1 1 1
[torch.LongTensor of size 1x3]
)
""")
add_docstr(torch._C.le,
r"""
le(input, other, out=None) -> Tensor
Computes `input <= other` element-wise.
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
out (Tensor, optional): the output tensor that must be a `ByteTensor` or the same type as :attr:`input`
Returns:
Tensor: A ``torch.ByteTensor`` containing a 1 at each location where comparison is true
Example::
>>> torch.le(torch.Tensor([[1, 2], [3, 4]]), torch.Tensor([[1, 1], [4, 4]]))
1 0
1 1
[torch.ByteTensor of size 2x2]
""")
add_docstr(torch._C.lerp,
r"""
lerp(start, end, weight, out=None)
Does a linear interpolation of two tensors :attr:`start` and :attr:`end` based
on a scalar :attr:`weight` and returns the resulting :attr:`out` tensor.
.. math::
out_i = start_i + weight \times (end_i - start_i)
The shapes of :attr:`start` and :attr:`end` must be
:ref:`broadcastable <broadcasting-semantics>`.
Args:
start (Tensor): the tensor with the starting points
end (Tensor): the tensor with the ending points
weight (float): the weight for the interpolation formula
out (Tensor, optional): the output tensor
Example::
>>> start = torch.arange(1, 5)
>>> end = torch.Tensor(4).fill_(10)
>>> start
1
2
3
4
[torch.FloatTensor of size 4]
>>> end
10
10
10
10
[torch.FloatTensor of size 4]
>>> torch.lerp(start, end, 0.5)
5.5000
6.0000
6.5000
7.0000
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.linspace,
r"""
linspace(start, end, steps=100, out=None) -> Tensor
Returns a one-dimensional tensor of :attr:`steps`
equally spaced points between :attr:`start` and :attr:`end`
The output tensor is 1-D of size :attr:`steps`
Args:
start (float): the starting value for the set of points
end (float): the ending value for the set of points
steps (int): number of points to sample between :attr:`start`
and :attr:`end`
out (Tensor, optional): the output tensor
Example::
>>> torch.linspace(3, 10, steps=5)
3.0000
4.7500
6.5000
8.2500
10.0000
[torch.FloatTensor of size 5]
>>> torch.linspace(-10, 10, steps=5)
-10
-5
0
5
10
[torch.FloatTensor of size 5]
>>> torch.linspace(start=-10, end=10, steps=5)
-10
-5
0
5
10
[torch.FloatTensor of size 5]
""")
add_docstr(torch._C.log,
r"""
log(input, out=None) -> Tensor
Returns a new tensor with the natural logarithm of the elements
of :attr:`input`.
Args:
input (Tensor): the input tensor
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(5)
>>> a
-0.4183
0.3722
-0.3091
0.4149
0.5857
[torch.FloatTensor of size 5]
>>> torch.log(a)
nan
-0.9883
nan
-0.8797
-0.5349
[torch.FloatTensor of size 5]
""")
add_docstr(torch._C.log1p,
r"""
log1p(input, out=None) -> Tensor
Returns a new tensor with the natural logarithm of (1 + :attr:`input`).
.. math::
y_i = \log (x_i + 1)
.. note:: This function is more accurate than :func:`torch.log` for small
values of :attr:`input`
Args:
input (Tensor): the input tensor
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(5)
>>> a
-0.4183
0.3722
-0.3091
0.4149
0.5857
[torch.FloatTensor of size 5]
>>> torch.log1p(a)
-0.5418
0.3164
-0.3697
0.3471
0.4611
[torch.FloatTensor of size 5]
""")
add_docstr(torch._C.logspace,
r"""
logspace(start, end, steps=100, out=None) -> Tensor
Returns a one-dimensional tensor of :attr:`steps` points
logarithmically spaced between :math:`10^{start}` and :math:`10^{end}`.
The output is a 1-D tensor of size :attr:`steps`
Args:
start (float): the starting value for the set of points
end (float): the ending value for the set of points
steps (int): number of points to sample between
:attr:`start` and :attr:`end`
out (Tensor, optional): the output tensor
Example::
>>> torch.logspace(start=-10, end=10, steps=5)
1.0000e-10
1.0000e-05
1.0000e+00
1.0000e+05
1.0000e+10
[torch.FloatTensor of size 5]
>>> torch.logspace(start=0.1, end=1.0, steps=5)
1.2589
2.1135
3.5481
5.9566
10.0000
[torch.FloatTensor of size 5]
""")
add_docstr(torch._C.lt,
r"""
lt(input, other, out=None) -> Tensor
Computes `input < other` element-wise.
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
out (Tensor, optional): the output tensor that must be a `ByteTensor` or the same type as :attr:`input`
Returns:
Tensor: A `torch.ByteTensor` containing a 1 at each location where comparison is true
Example::
>>> torch.lt(torch.Tensor([[1, 2], [3, 4]]), torch.Tensor([[1, 1], [4, 4]]))
0 0
1 0
[torch.ByteTensor of size 2x2]
""")
add_docstr(torch._C.masked_select,
r"""
masked_select(input, mask, out=None) -> Tensor
Returns a new 1-D tensor which indexes the :attr:`input` tensor according to
the binary mask :attr:`mask` which is a `ByteTensor`.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor don't need
to match, but they must be :ref:`broadcastable <broadcasting-semantics>`.
.. note:: The returned tensor does **not** use the same storage
as the original tensor
Args:
input (Tensor): the input data
mask (ByteTensor): the tensor containing the binary mask to index with
out (Tensor, optional): the output tensor
Example::
>>> x = torch.randn(3, 4)
>>> x
1.2045 2.4084 0.4001 1.1372
0.5596 1.5677 0.6219 -0.7954
1.3635 -1.2313 -0.5414 -1.8478
[torch.FloatTensor of size 3x4]
>>> mask = x.ge(0.5)
>>> mask
1 1 0 1
1 1 1 0
1 0 0 0
[torch.ByteTensor of size 3x4]
>>> torch.masked_select(x, mask)
1.2045
2.4084
1.1372
0.5596
1.5677
0.6219
1.3635
[torch.FloatTensor of size 7]
""")
add_docstr(torch._C.max,
r"""
.. function:: max(input) -> float
Returns the maximum value of all elements in the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
Example::
>>> a = torch.randn(1, 3)
>>> a
0.4729 -0.2266 -0.2085
[torch.FloatTensor of size 1x3]
>>> torch.max(a)
0.4729
.. function:: max(input, dim, keepdim=False, out=None) -> (Tensor, LongTensor)
Returns the maximum value of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`. The second return value is the index location of each
maximum value found (argmax).
If :attr:`keepdim` is ``True``, the output tensors are of the same size
as :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting
in the output tensors having 1 fewer dimension than :attr:`input`.
Args:
input (Tensor): the input tensor
dim (int): the dimension to reduce
keepdim (bool): whether the output tensors have :attr:`dim` retained or not
out (tuple, optional): the result tuple of two output tensors (max, max_indices)
Example::
>> a = torch.randn(4, 4)
>> a
0.0692 0.3142 1.2513 -0.5428
0.9288 0.8552 -0.2073 0.6409
1.0695 -0.0101 -2.4507 -1.2230
0.7426 -0.7666 0.4862 -0.6628
torch.FloatTensor of size 4x4]
>>> torch.max(a, 1)
(
1.2513
0.9288
1.0695
0.7426
[torch.FloatTensor of size 4]
,
2
0
0
0
[torch.LongTensor of size 4]
)
.. function:: max(input, other, out=None) -> Tensor
Each element of the tensor :attr:`input` is compared with the corresponding
element of the tensor :attr:`other` and an element-wise `max` is taken.
The shapes of :attr:`input` and :attr:`other` don't need to match,
but they must be :ref:`broadcastable <broadcasting-semantics>`.
.. math::
out_i = \max(tensor_i, other_i)
.. note:: When the shapes do not match, the shape of the returned output tensor
follows the :ref:`broadcasting rules <broadcasting-semantics>`.
Args:
input (Tensor): the input tensor
other (Tensor): the second input tensor
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4)
>>> a
1.3869
0.3912
-0.8634
-0.5468
[torch.FloatTensor of size 4]
>>> b = torch.randn(4)
>>> b
1.0067
-0.8010
0.6258
0.3627
[torch.FloatTensor of size 4]
>>> torch.max(a, b)
1.3869
0.3912
0.6258
0.3627
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.mean,
r"""
.. function:: mean(input) -> float
Returns the mean value of all elements in the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
Example::
>>> a = torch.randn(1, 3)
>>> a
-0.2946 -0.9143 2.1809
[torch.FloatTensor of size 1x3]
>>> torch.mean(a)
0.32398951053619385
.. function:: mean(input, dim, keepdim=False, out=None) -> Tensor
Returns the mean value of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 fewer dimension.
Args:
input (Tensor): the input tensor
dim (int): the dimension to reduce
keepdim (bool, optional): whether the output tensor has :attr:`dim` retained or not
out (Tensor): the output tensor
Example::
>>> a = torch.randn(4, 4)
>>> a
-1.2738 -0.3058 0.1230 -1.9615
0.8771 -0.5430 -0.9233 0.9879
1.4107 0.0317 -0.6823 0.2255
-1.3854 0.4953 -0.2160 0.2435
[torch.FloatTensor of size 4x4]
>>> torch.mean(a, 1)
-0.8545
0.0997
0.2464
-0.2157
[torch.FloatTensor of size 4]
>>> torch.mean(a, 1, True)
-0.8545
0.0997
0.2464
-0.2157
[torch.FloatTensor of size 4x1]
""")
add_docstr(torch._C.median,
r"""
.. function:: median(input) -> float
Returns the median value of all elements in the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
Example::
>>> a = torch.randn(1, 3)
>>> a
0.4729 -0.2266 -0.2085
[torch.FloatTensor of size 1x3]
>>> torch.median(a)
-0.2085
.. function:: median(input, dim=-1, keepdim=False, values=None, indices=None) -> (Tensor, LongTensor)
Returns the median value of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`. Also returns the index location of the median value
as a `LongTensor`.
By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
If :attr:`keepdim` is ``True``, the output tensors are of the same size
as :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
the outputs tensor having 1 fewer dimension than :attr:`input`.
Args:
input (Tensor): the input tensor
dim (int): the dimension to reduce
keepdim (bool): whether the output tensors have :attr:`dim` retained or not
values (Tensor, optional): the output tensor
indices (Tensor, optional): the output index tensor
Example::
>>> a
-0.6891 -0.6662
0.2697 0.7412
0.5254 -0.7402
0.5528 -0.2399
[torch.FloatTensor of size 4x2]
>>> a = torch.randn(4, 5)
>>> a
0.4056 -0.3372 1.0973 -2.4884 0.4334
2.1336 0.3841 0.1404 -0.1821 -0.7646
-0.2403 1.3975 -2.0068 0.1298 0.0212
-1.5371 -0.7257 -0.4871 -0.2359 -1.1724
[torch.FloatTensor of size 4x5]
>>> torch.median(a, 1)
(
0.4056
0.1404
0.0212
-0.7257
[torch.FloatTensor of size 4]
,
0
2
4
1
[torch.LongTensor of size 4]
)
""")
add_docstr(torch._C.min,
r"""
.. function:: min(input) -> float
Returns the minimum value of all elements in the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
Example::
>>> a = torch.randn(1, 3)
>>> a
0.4729 -0.2266 -0.2085
[torch.FloatTensor of size 1x3]
>>> torch.min(a)
-0.22663167119026184
.. function:: min(input, dim, keepdim=False, out=None) -> (Tensor, LongTensor)
Returns the minimum value of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`. The second return value is the index location of each
minimum value found (argmin).
If :attr:`keepdim` is ``True``, the output tensors are of the same size as
:attr:`input` except in the dimension :attr:`dim` where they are of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
the output tensors having 1 fewer dimension than :attr:`input`.
Args:
input (Tensor): the input tensor
dim (int): the dimension to reduce
keepdim (bool): whether the output tensors have :attr:`dim` retained or not
out (tuple, optional): the tuple of two output tensors (min, min_indices)
Example::
>> a = torch.randn(4, 4)
>> a
0.0692 0.3142 1.2513 -0.5428
0.9288 0.8552 -0.2073 0.6409
1.0695 -0.0101 -2.4507 -1.2230
0.7426 -0.7666 0.4862 -0.6628
torch.FloatTensor of size 4x4]
>> torch.min(a, 1)
0.5428
0.2073
2.4507
0.7666
torch.FloatTensor of size 4]
3
2
2
1
torch.LongTensor of size 4]
.. function:: min(input, other, out=None) -> Tensor
Each element of the tensor :attr:`input` is compared with the corresponding
element of the tensor :attr:`other` and an element-wise `min` is taken.
The resulting tensor is returned.
The shapes of :attr:`input` and :attr:`other` don't need to match,
but they must be :ref:`broadcastable <broadcasting-semantics>`.
.. math::
out_i = \min(tensor_i, other_i)
.. note:: When the shapes do not match, the shape of the returned output tensor
follows the :ref:`broadcasting rules <broadcasting-semantics>`.
Args:
input (Tensor): the input tensor
other (Tensor): the second input tensor
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4)
>>> a
1.3869
0.3912
-0.8634
-0.5468
[torch.FloatTensor of size 4]
>>> b = torch.randn(4)
>>> b
1.0067
-0.8010
0.6258
0.3627
[torch.FloatTensor of size 4]
>>> torch.min(a, b)
1.0067
-0.8010
-0.8634
-0.5468
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.mm,
r"""
mm(mat1, mat2, out=None) -> Tensor
Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
:math:`(m \times p)` tensor, :attr:`out` will be a :math:`(n \times p)` tensor.
.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
For broadcasting matrix products, see :func:`torch.matmul`.
Args:
mat1 (Tensor): the first matrix to be multiplied
mat2 (Tensor): the second matrix to be multiplied
out (Tensor, optional): the output tensor
Example::
>>> mat1 = torch.randn(2, 3)
>>> mat2 = torch.randn(3, 3)
>>> torch.mm(mat1, mat2)
0.0519 -0.3304 1.2232
4.3910 -5.1498 2.7571
[torch.FloatTensor of size 2x3]
""")
add_docstr(torch._C.mode,
r"""
mode(input, dim=-1, keepdim=False, values=None, indices=None) -> (Tensor, LongTensor)
Returns the mode value of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`. Also returns the index location of the mode value
as a `LongTensor`.
By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
If :attr:`keepdim` is ``True``, the output tensors are of the same size as
:attr:`input` except in the dimension :attr:`dim` where they are of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting
in the output tensors having 1 fewer dimension than :attr:`input`.
.. note:: This function is not defined for ``torch.cuda.Tensor`` yet.
Args:
input (Tensor): the input tensor
dim (int): the dimension to reduce
keepdim (bool): whether the output tensors have :attr:`dim` retained or not
values (Tensor, optional): the output tensor
indices (Tensor, optional): the output index tensor
Example::
>>> a
-0.6891 -0.6662
0.2697 0.7412
0.5254 -0.7402
0.5528 -0.2399
[torch.FloatTensor of size 4x2]
>>> a = torch.randn(4, 5)
>>> a
0.4056 -0.3372 1.0973 -2.4884 0.4334
2.1336 0.3841 0.1404 -0.1821 -0.7646
-0.2403 1.3975 -2.0068 0.1298 0.0212
-1.5371 -0.7257 -0.4871 -0.2359 -1.1724
[torch.FloatTensor of size 4x5]
>>> torch.mode(a, 1)
(
-2.4884
-0.7646
-2.0068
-1.5371
[torch.FloatTensor of size 4]
,
3
4
2
0
[torch.LongTensor of size 4]
)
""")
add_docstr(torch._C.mul,
r"""
.. function:: mul(input, value, out=None)
Multiplies each element of the input :attr:`input` with the scalar
:attr:`value` and returns a new resulting tensor.
.. math::
out_i = value \times input_i
If :attr:`input` is of type `FloatTensor` or `DoubleTensor`, :attr:`value`
should be a real number, otherwise it should be an integer
Args:
input (Tensor): the input tensor
value (Number): the number to be multiplied to each element of :attr:`input`
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(3)
>>> a
-0.9374
-0.5254
-0.6069
[torch.FloatTensor of size 3]
>>> torch.mul(a, 100)
-93.7411
-52.5374
-60.6908
[torch.FloatTensor of size 3]
.. function:: mul(input, other, out=None)
Each element of the tensor :attr:`input` is multiplied by each element of the
Tensor :attr:`other`. The resulting tensor is returned.
The shapes of :attr:`input` and :attr:`other` must be
:ref:`broadcastable <broadcasting-semantics>`.
.. math::
out_i = input_i \times other_i
Args:
input (Tensor): the first multiplicand tensor
other (Tensor): the second multiplicand tensor
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4,4)
>>> a
-0.7280 0.0598 -1.4327 -0.5825
-0.1427 -0.0690 0.0821 -0.3270
-0.9241 0.5110 0.4070 -1.1188
-0.8308 0.7426 -0.6240 -1.1582
[torch.FloatTensor of size 4x4]
>>> b = torch.randn(2, 8)
>>> b
0.0430 -1.0775 0.6015 1.1647 -0.6549 0.0308 -0.1670 1.0742
-1.2593 0.0292 -0.0849 0.4530 1.2404 -0.4659 -0.1840 0.5974
[torch.FloatTensor of size 2x8]
>>> torch.mul(a, b)
-0.0313 -0.0645 -0.8618 -0.6784
0.0934 -0.0021 -0.0137 -0.3513
1.1638 0.0149 -0.0346 -0.5068
-1.0304 -0.3460 0.1148 -0.6919
[torch.FloatTensor of size 4x4]
""")
add_docstr(torch._C.multinomial,
u"""
multinomial(input, num_samples, replacement=False, out=None) -> LongTensor
Returns a tensor where each row
contains :attr:`num_samples` indices sampled from the multinomial probability
distribution located in the corresponding row of tensor :attr:`input`.
.. note::
The rows of :attr:`input` do not need to sum to one (in which case we use
the values as weights), but must be non-negative and have a non-zero sum.
Indices are ordered from left to right according to when each was sampled
(first samples are placed in first column).
If :attr:`input` is a vector, :attr:`out` is a vector of size `num_samples`.
If :attr:`input` is a matrix with `m` rows, :attr:`out` is an matrix of shape
`m \u00D7 n`.
If replacement is ``True``, samples are drawn with replacement.
If not, they are drawn without replacement, which means that when a
sample index is drawn for a row, it cannot be drawn again for that row.
This implies the constraint that :attr:`num_samples` must be lower than
:attr:`input` length (or number of columns of :attr:`input` if it is a matrix).
Args:
input (Tensor): the input tensor containing probabilities
num_samples (int): number of samples to draw
replacement (bool, optional): whether to draw with replacement or not
out (Tensor, optional): the output tensor
Example::
>>> weights = torch.Tensor([0, 10, 3, 0]) # create a tensor of weights
>>> torch.multinomial(weights, 4)
1
2
0
0
[torch.LongTensor of size 4]
>>> torch.multinomial(weights, 4, replacement=True)
1
2
1
2
[torch.LongTensor of size 4]
""")
add_docstr(torch._C.mv,
r"""
mv(mat, vec, out=None) -> Tensor
Performs a matrix-vector product of the matrix :attr:`mat` and the vector
:attr:`vec`.
If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
size `m`, :attr:`out` will be 1-D of size `n`.
.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
Args:
mat (Tensor): matrix to be multiplied
vec (Tensor): vector to be multiplied
out (Tensor, optional): the output tensor
Example::
>>> mat = torch.randn(2, 3)
>>> vec = torch.randn(3)
>>> torch.mv(mat, vec)
-2.0939
-2.2950
[torch.FloatTensor of size 2]
""")
add_docstr(torch._C.ne,
r"""
ne(input, other, out=None) -> Tensor
Computes `input != other` element-wise.
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
out (Tensor, optional): the output tensor that must be a `ByteTensor` or the same type as `input`
Returns:
Tensor: A ``torch.ByteTensor`` containing a 1 at each location where comparison is true.
Example::
>>> torch.ne(torch.Tensor([[1, 2], [3, 4]]), torch.Tensor([[1, 1], [4, 4]]))
0 1
1 0
[torch.ByteTensor of size 2x2]
""")
add_docstr(torch._C.neg,
r"""
neg(input, out=None) -> Tensor
Returns a new tensor with the negative of the elements of :attr:`input`.
.. math::
out = -1 \times input
Args:
input (Tensor): the input tensor
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(5)
>>> a
-0.4430
1.1690
-0.8836
-0.4565
0.2968
[torch.FloatTensor of size 5]
>>> torch.neg(a)
0.4430
-1.1690
0.8836
0.4565
-0.2968
[torch.FloatTensor of size 5]
""")
add_docstr(torch._C.nonzero,
r"""
nonzero(input, out=None) -> LongTensor
Returns a tensor containing the indices of all non-zero elements of
:attr:`input`. Each row in the result contains the indices of a non-zero
element in :attr:`input`.
If :attr:`input` has `n` dimensions, then the resulting indices tensor
:attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of
non-zero elements in the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
out (LongTensor, optional): the output tensor containing indices
Example::
>>> torch.nonzero(torch.Tensor([1, 1, 1, 0, 1]))
0
1
2
4
[torch.LongTensor of size 4x1]
>>> torch.nonzero(torch.Tensor([[0.6, 0.0, 0.0, 0.0],
... [0.0, 0.4, 0.0, 0.0],
... [0.0, 0.0, 1.2, 0.0],
... [0.0, 0.0, 0.0,-0.4]]))
0 0
1 1
2 2
3 3
[torch.LongTensor of size 4x2]
""")
add_docstr(torch._C.norm,
r"""
.. function:: norm(input, p=2) -> float
Returns the p-norm of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
p (float, optional): the exponent value in the norm formulation
Example::
>>> a = torch.randn(1, 3)
>>> a
-0.4376 -0.5328 0.9547
[torch.FloatTensor of size 1x3]
>>> torch.norm(a, 3)
1.0338925067372466
.. function:: norm(input, p, dim, keepdim=False, out=None) -> Tensor
Returns the p-norm of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size as
:attr:`input` except in the dimension :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting
in the output tensor having 1 fewer dimension than :attr:`input`.
Args:
input (Tensor): the input tensor
p (float): the exponent value in the norm formulation
dim (int): the dimension to reduce
keepdim (bool): whether the output tensor has :attr:`dim` retained or not
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4, 2)
>>> a
-0.6891 -0.6662
0.2697 0.7412
0.5254 -0.7402
0.5528 -0.2399
[torch.FloatTensor of size 4x2]
>>> torch.norm(a, 2, 1)
0.9585
0.7888
0.9077
0.6026
[torch.FloatTensor of size 4]
>>> torch.norm(a, 0, 1, True)
2
2
2
2
[torch.FloatTensor of size 4x1]
""")
add_docstr(torch._C.normal,
r"""
.. function:: normal(means, std, out=None)
Returns a tensor of random numbers drawn from separate normal distributions
who's mean and standard deviation are given.
The :attr:`means` is a tensor with the mean of
each output element's normal distribution
The :attr:`std` is a tensor with the standard deviation of
each output element's normal distribution
The shapes of :attr:`means` and :attr:`std` don't need to match.
The total number of elements in each tensor need to be the same.
.. note:: When the shapes do not match, the shape of :attr:`means`
is used as the shape for the returned output tensor
Args:
means (Tensor): the tensor of per-element means
std (Tensor): the tensor of per-element standard deviations
out (Tensor, optional): the output tensor
Example::
torch.normal(means=torch.arange(1, 11), std=torch.arange(1, 0, -0.1))
1.5104
1.6955
2.4895
4.9185
4.9895
6.9155
7.3683
8.1836
8.7164
9.8916
[torch.FloatTensor of size 10]
.. function:: normal(mean=0.0, std, out=None)
Similar to the function above, but the means are shared among all drawn
elements.
Args:
means (float, optional): the mean for all distributions
std (Tensor): the tensor of per-element standard deviations
out (Tensor, optional): the output tensor
Example::
>>> torch.normal(mean=0.5, std=torch.arange(1, 6))
0.5723
0.0871
-0.3783
-2.5689
10.7893
[torch.FloatTensor of size 5]
.. function:: normal(means, std=1.0, out=None)
Similar to the function above, but the standard-deviations are shared among
all drawn elements.
Args:
means (Tensor): the tensor of per-element means
std (float, optional): the standard deviation for all distributions
out (Tensor, optional): the output tensor
Example::
>>> torch.normal(means=torch.arange(1, 6))
1.1681
2.8884
3.7718
2.5616
4.2500
[torch.FloatTensor of size 5]
""")
add_docstr(torch._C.numel,
r"""
numel(input) -> int
Returns the total number of elements in the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
Example::
>>> a = torch.randn(1,2,3,4,5)
>>> torch.numel(a)
120
>>> a = torch.zeros(4,4)
>>> torch.numel(a)
16
""")
add_docstr(torch._C.ones,
r"""
ones(*sizes, out=None) -> Tensor
Returns a tensor filled with the scalar value `1`, with the shape defined
by the varargs :attr:`sizes`.
Args:
sizes (int...): a set of integers defining the shape of the output tensor
out (Tensor, optional): the output tensor
Example::
>>> torch.ones(2, 3)
1 1 1
1 1 1
[torch.FloatTensor of size 2x3]
>>> torch.ones(5)
1
1
1
1
1
[torch.FloatTensor of size 5]
""")
add_docstr(torch._C.ones_like,
r"""
ones_like(input, out=None) -> Tensor
Returns a tensor filled with the scalar value `1`, with the same size as
:attr:`input`.
Args:
input (Tensor): the size of :attr:`input` will determine size of the output tensor
out (Tensor, optional): the output tensor
Example::
>>> input = torch.FloatTensor(2, 3)
>>> torch.ones_like(input)
1 1 1
1 1 1
[torch.FloatTensor of size 2x3]
""")
add_docstr(torch._C.orgqr,
r"""
orgqr(a, tau) -> Tensor
Computes the orthogal matrix `Q` of a QR factorization, from the `(a, tau)`
tuple returned by :func:`torch.geqrf`.
This directly calls the underlying LAPACK function `?orgqr`.
See `?orgqr LAPACK documentation`_ for further details.
Args:
a (Tensor): the `a` from :func:`torch.geqrf`.
tau (Tensor): the `tau` from `torch.geqrf`.
.. _?orgqr LAPACK documentation:
https://software.intel.com/en-us/mkl-developer-reference-c-orgqr
""")
add_docstr(torch._C.ormqr,
r"""
ormqr(a, tau, mat, left=True, transpose=False) -> (Tensor, Tensor)
Multiplies `mat` by the orthogonal `Q` matrix of the QR factorization
formed by :func:`torch.geqrf` that is represented by `(a, tau)`.
This directly calls the underlying LAPACK function `?ormqr`.
See `?ormqr LAPACK documentation`_ for further details.
.. _?ormqr LAPACK documentation:
https://software.intel.com/en-us/mkl-developer-reference-c-ormqr
""")
add_docstr(torch._C.potrf,
r"""
potrf(a, out=None)
potrf(a, upper, out=None)
Computes the Cholesky decomposition of a positive semidefinite
matrix :attr:`a`: returns matrix `u`
If `upper` is ``True`` or not provided, `u` is upper triangular
such that :math:`a = u^T u`.
If `upper` is ``False``, `u` is lower triangular
such that :math:`a = u u^T`.
Args:
a (Tensor): the input 2-D tensor, a symmetric positive semidefinite matrix
upper (bool, optional): whether to return a upper (default) or lower triangular matrix
out (Tensor, optional): the output tensor for `u`
Example::
>>> a = torch.randn(3,3)
>>> a = torch.mm(a, a.t()) # make symmetric positive definite
>>> u = torch.potrf(a)
>>> a
2.3563 3.2318 -0.9406
3.2318 4.9557 -2.1618
-0.9406 -2.1618 2.2443
[torch.FloatTensor of size 3x3]
>>> u
1.5350 2.1054 -0.6127
0.0000 0.7233 -1.2053
0.0000 0.0000 0.6451
[torch.FloatTensor of size 3x3]
>>> torch.mm(u.t(),u)
2.3563 3.2318 -0.9406
3.2318 4.9557 -2.1618
-0.9406 -2.1618 2.2443
[torch.FloatTensor of size 3x3]
""")
add_docstr(torch._C.potri,
r"""
potri(u, out=None)
potri(u, upper, out=None)
Computes the inverse of a positive semidefinite matrix given its
Cholesky factor :attr:`u`: returns matrix `inv`
If `upper` is ``True`` or not provided, `u` is upper triangular
such that :math:`inv = (u^T u)^{-1}`.
If `upper` is ``False``, `u` is lower triangular
such that :math:`inv = (u u^T)^{-1}`.
Args:
u (Tensor): the input 2-D tensor, a upper or lower triangular
Cholesky factor
upper (bool, optional): whether to return a upper (default) or lower triangular matrix
out (Tensor, optional): the output tensor for `inv`
Example::
>>> a = torch.randn(3,3)
>>> a = torch.mm(a, a.t()) # make symmetric positive definite
>>> u = torch.potrf(a)
>>> a
2.3563 3.2318 -0.9406
3.2318 4.9557 -2.1618
-0.9406 -2.1618 2.2443
[torch.FloatTensor of size 3x3]
>>> torch.potri(u)
12.5724 -10.1765 -4.5333
-10.1765 8.5852 4.0047
-4.5333 4.0047 2.4031
[torch.FloatTensor of size 3x3]
>>> a.inverse()
12.5723 -10.1765 -4.5333
-10.1765 8.5852 4.0047
-4.5333 4.0047 2.4031
[torch.FloatTensor of size 3x3]
""")
add_docstr(torch._C.potrs,
r"""
potrs(b, u, out=None)
potrs(b, u, upper, out=None)
Solves a linear system of equations with a positive semidefinite
matrix to be inverted given its given a Cholesky factor
matrix :attr:`u`: returns matrix `c`
If `upper` is ``True`` or not provided, `u` is and upper triangular
such that :math:`c = (u^T u)^{-1} b`.
If `upper` is ``False``, `u` is and lower triangular
such that :math:`c = (u u^T)^{-1} b`.
.. note:: `b` is always a 2-D tensor, use `b.unsqueeze(1)` to convert a vector.
Args:
b (Tensor): the right hand side 2-D tensor
u (Tensor): the input 2-D tensor, a upper or lower triangular Cholesky factor
upper (bool, optional): whether to return a upper (default) or lower triangular matrix
out (Tensor, optional): the output tensor for `c`
Example::
>>> a = torch.randn(3,3)
>>> a = torch.mm(a, a.t()) # make symmetric positive definite
>>> u = torch.potrf(a)
>>> a
2.3563 3.2318 -0.9406
3.2318 4.9557 -2.1618
-0.9406 -2.1618 2.2443
[torch.FloatTensor of size 3x3]
>>> b = torch.randn(3,2)
>>> b
-0.3119 -1.8224
-0.2798 0.1789
-0.3735 1.7451
[torch.FloatTensor of size 3x2]
>>> torch.potrs(b,u)
0.6187 -32.6438
-0.7234 27.0703
-0.6039 13.1717
[torch.FloatTensor of size 3x2]
>>> torch.mm(a.inverse(),b)
0.6187 -32.6436
-0.7234 27.0702
-0.6039 13.1717
[torch.FloatTensor of size 3x2]
""")
add_docstr(torch._C.pow,
r"""
.. function:: pow(input, exponent, out=None)
Takes the power of each element in :attr:`input` with :attr:`exponent` and
returns a tensor with the result.
:attr:`exponent` can be either a single ``float`` number or a `Tensor`
with the same number of elements as :attr:`input`.
When :attr:`exponent` is a scalar value, the operation applied is:
.. math::
out_i = x_i ^ {exponent}
When :attr:`exponent` is a tensor, the operation applied is:
.. math::
out_i = x_i ^ {exponent_i}
When :attr:`exponent` is a tensor, the shapes of :attr:`input`
and :attr:`exponent` must be :ref:`broadcastable <broadcasting-semantics>`.
Args:
input (Tensor): the input tensor
exponent (float or tensor): the exponent value
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4)
>>> a
-0.5274
-0.8232
-2.1128
1.7558
[torch.FloatTensor of size 4]
>>> torch.pow(a, 2)
0.2781
0.6776
4.4640
3.0829
[torch.FloatTensor of size 4]
>>> exp = torch.arange(1, 5)
>>> a = torch.arange(1, 5)
>>> a
1
2
3
4
[torch.FloatTensor of size 4]
>>> exp
1
2
3
4
[torch.FloatTensor of size 4]
>>> torch.pow(a, exp)
1
4
27
256
[torch.FloatTensor of size 4]
.. function:: pow(base, input, out=None)
:attr:`base` is a scalar ``float`` value, and :attr:`input` is a tensor.
The returned tensor :attr:`out` is of the same shape as :attr:`input`
The operation applied is:
.. math::
out_i = base ^ {input_i}
Args:
base (float): the scalar base value for the power operation
input (Tensor): the exponent tensor
out (Tensor, optional): the output tensor
Example::
>>> exp = torch.arange(1, 5)
>>> base = 2
>>> torch.pow(base, exp)
2
4
8
16
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.prod,
r"""
.. function:: prod(input) -> float
Returns the product of all elements in the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
Example::
>>> a = torch.randn(1, 3)
>>> a
0.6170 0.3546 0.0253
[torch.FloatTensor of size 1x3]
>>> torch.prod(a)
0.005537458061418483
.. function:: prod(input, dim, keepdim=False, out=None) -> Tensor
Returns the product of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size as
:attr:`input` except in the dimension :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting
in the output tensor having 1 fewer dimension than :attr:`input`.
Args:
input (Tensor): the input tensor
dim (int): the dimension to reduce
keepdim (bool): whether the output tensor has :attr:`dim` retained or not
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4, 2)
>>> a
0.1598 -0.6884
-0.1831 -0.4412
-0.9925 -0.6244
-0.2416 -0.8080
[torch.FloatTensor of size 4x2]
>>> torch.prod(a, 1)
-0.1100
0.0808
0.6197
0.1952
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.pstrf,
r"""
pstrf(a, out=None)
pstrf(a, upper, out=None)
Computes the pivoted Cholesky decomposition of a positive semidefinite
matrix :attr:`a`: returns matrices `u` and `piv`.
If `upper` is ``True`` or not provided, `u` is and upper triangular
such that :math:`a = p^T u^T u p`, with `p` the permutation given by `piv`.
If `upper` is ``False``, `u` is and lower triangular
such that :math:`a = p^T u u^T p`.
Args:
a (Tensor): the input 2-D tensor
upper (bool, optional): whether to return a upper (default) or lower triangular matrix
out (tuple, optional): tuple of `u` and `piv` tensors
Example::
>>> a = torch.randn(3,3)
>>> a = torch.mm(a, a.t()) # make symmetric positive definite
>>> a
5.4417 -2.5280 1.3643
-2.5280 2.9689 -2.1368
1.3643 -2.1368 4.6116
[torch.FloatTensor of size 3x3]
>>> u,piv = torch.pstrf(a)
>>> u
2.3328 0.5848 -1.0837
0.0000 2.0663 -0.7274
0.0000 0.0000 1.1249
[torch.FloatTensor of size 3x3]
>>> piv
0
2
1
[torch.IntTensor of size 3]
>>> p = torch.eye(3).index_select(0,piv.long()).index_select(0,piv.long()).t() # make pivot permutation
>>> torch.mm(torch.mm(p.t(),torch.mm(u.t(),u)),p) # reconstruct
5.4417 1.3643 -2.5280
1.3643 4.6116 -2.1368
-2.5280 -2.1368 2.9689
[torch.FloatTensor of size 3x3]
""")
add_docstr(torch._C.qr,
r"""
qr(input, out=None) -> (Tensor, Tensor)
Computes the QR decomposition of a matrix :attr:`input`: returns matrices
`q` and `r` such that :math:`x = q r`, with `q` being an orthogonal matrix
and `r` being an upper triangular matrix.
This returns the thin (reduced) QR factorization.
.. note:: precision may be lost if the magnitudes of the elements of `input`
are large
.. note:: while it should always give you a valid decomposition, it may not
give you the same one across platforms - it will depend on your
LAPACK implementation.
.. note:: Irrespective of the original strides, the returned matrix `q` will be
transposed, i.e. with strides `(1, m)` instead of `(m, 1)`.
Args:
input (Tensor): the input 2-D tensor
out (tuple, optional): tuple of `Q` and `R` tensors
Example::
>>> a = torch.Tensor([[12, -51, 4], [6, 167, -68], [-4, 24, -41]])
>>> q, r = torch.qr(a)
>>> q
-0.8571 0.3943 0.3314
-0.4286 -0.9029 -0.0343
0.2857 -0.1714 0.9429
[torch.FloatTensor of size 3x3]
>>> r
-14.0000 -21.0000 14.0000
0.0000 -175.0000 70.0000
0.0000 0.0000 -35.0000
[torch.FloatTensor of size 3x3]
>>> torch.mm(q, r).round()
12 -51 4
6 167 -68
-4 24 -41
[torch.FloatTensor of size 3x3]
>>> torch.mm(q.t(), q).round()
1 -0 0
-0 1 0
0 0 1
[torch.FloatTensor of size 3x3]
""")
add_docstr(torch._C.rand,
r"""
rand(*sizes, out=None) -> Tensor
Returns a tensor filled with random numbers from a uniform distribution
on the interval :math:`[0, 1)`
The shape of the tensor is defined by the varargs :attr:`sizes`.
Args:
sizes (int...): a set of ints defining the shape of the output tensor.
out (Tensor, optional): the output tensor
Example::
>>> torch.rand(4)
0.9193
0.3347
0.3232
0.7715
[torch.FloatTensor of size 4]
>>> torch.rand(2, 3)
0.5010 0.5140 0.0719
0.1435 0.5636 0.0538
[torch.FloatTensor of size 2x3]
""")
add_docstr(torch._C.randn,
r"""
randn(*sizes, out=None) -> Tensor
Returns a tensor filled with random numbers from a normal distribution
with zero mean and variance of one.
The shape of the tensor is defined by the varargs :attr:`sizes`.
Args:
sizes (int...): a set of ints defining the shape of the output tensor.
out (Tensor, optional): the output tensor
Example::
>>> torch.randn(4)
-0.1145
0.0094
-1.1717
0.9846
[torch.FloatTensor of size 4]
>>> torch.randn(2, 3)
1.4339 0.3351 -1.0999
1.5458 -0.9643 -0.3558
[torch.FloatTensor of size 2x3]
""")
add_docstr(torch._C.randperm,
r"""
randperm(n, out=None) -> LongTensor
Returns a random permutation of integers from ``0`` to ``n - 1``.
Args:
n (int): the upper bound (exclusive)
Example::
>>> torch.randperm(4)
2
1
3
0
[torch.LongTensor of size 4]
""")
add_docstr(torch._C.range,
r"""
range(start, end, step=1, out=None) -> Tensor
Returns a 1-D tensor of size :math:`\lfloor \frac{end - start}{step} \rfloor + 1`
with values from :attr:`start` to :attr:`end` with step :attr:`step`. Step is
the gap between two values in the tensor. :math:`x_{i+1} = x_i + step`.
Warning:
This function is deprecated in favor of :func:`torch.arange`.
Args:
start (float): the starting value for the set of points
end (float): the ending value for the set of points
step (float): the gap between each pair of adjacent points
out (Tensor, optional): the output tensor
Example::
>>> torch.range(1, 4)
1
2
3
4
[torch.FloatTensor of size 4]
>>> torch.range(1, 4, 0.5)
1.0000
1.5000
2.0000
2.5000
3.0000
3.5000
4.0000
[torch.FloatTensor of size 7]
""")
add_docstr(torch._C.arange,
r"""
arange(start=0, end, step=1, out=None) -> Tensor
Returns a 1-D tensor of size :math:`\lfloor \frac{end - start}{step} \rfloor`
with values from the interval ``[start, end)`` taken with step :attr:`step`
starting from `start`.
Args:
start (float): the starting value for the set of points
end (float): the ending value for the set of points
step (float): the gap between each pair of adjacent points
out (Tensor, optional): the output tensor
Example::
>>> torch.arange(5)
0
1
2
3
4
[torch.FloatTensor of size 5]
>>> torch.arange(1, 4)
1
2
3
[torch.FloatTensor of size 3]
>>> torch.arange(1, 2.5, 0.5)
1.0000
1.5000
2.0000
[torch.FloatTensor of size 3]
""")
add_docstr(torch._C.remainder,
r"""
remainder(input, divisor, out=None) -> Tensor
Computes the element-wise remainder of division.
The divisor and dividend may contain both for integer and floating point
numbers. The remainder has the same sign as the divisor.
When :attr:`divisor` is a tensor, the shapes of :attr:`input` and
:attr:`divisor` must be :ref:`broadcastable <broadcasting-semantics>`.
Args:
input (Tensor): the dividend
divisor (Tensor or float): the divisor that may be either a number or a
Tensor of the same shape as the dividend
out (Tensor, optional): the output tensor
Example::
>>> torch.remainder(torch.Tensor([-3, -2, -1, 1, 2, 3]), 2)
torch.FloatTensor([1, 0, 1, 1, 0, 1])
>>> torch.remainder(torch.Tensor([1, 2, 3, 4, 5]), 1.5)
torch.FloatTensor([1.0, 0.5, 0.0, 1.0, 0.5])
.. seealso::
:func:`torch.fmod`, which computes the element-wise remainder of
division equivalently to the C library function ``fmod()``
""")
add_docstr(torch._C.renorm,
r"""
renorm(input, p, dim, maxnorm, out=None) -> Tensor
Returns a tensor where each sub-tensor of :attr:`input` along dimension
:attr:`dim` is normalized such that the `p`-norm of the sub-tensor is lower
than the value :attr:`maxnorm`
.. note:: If the norm of a row is lower than `maxnorm`, the row is unchanged
Args:
input (Tensor): the input tensor
p (float): the power for the norm computation
dim (int): the dimension to slice over to get the sub-tensors
maxnorm (float): the maximum norm to keep each sub-tensor under
out (Tensor, optional): the output tensor
Example::
>>> x = torch.ones(3, 3)
>>> x[1].fill_(2)
>>> x[2].fill_(3)
>>> x
1 1 1
2 2 2
3 3 3
[torch.FloatTensor of size 3x3]
>>> torch.renorm(x, 1, 0, 5)
1.0000 1.0000 1.0000
1.6667 1.6667 1.6667
1.6667 1.6667 1.6667
[torch.FloatTensor of size 3x3]
""")
add_docstr(torch._C.round,
r"""
round(input, out=None) -> Tensor
Returns a new tensor with each of the elements of :attr:`input` rounded
to the closest integer.
Args:
input (Tensor): the input tensor
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4)
>>> a
1.2290
1.3409
-0.5662
-0.0899
[torch.FloatTensor of size 4]
>>> torch.round(a)
1
1
-1
-0
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.rsqrt,
r"""
rsqrt(input, out=None) -> Tensor
Returns a new tensor with the reciprocal of the square-root of each of
the elements of :attr:`input`.
Args:
input (Tensor): the input tensor
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4)
>>> a
1.2290
1.3409
-0.5662
-0.0899
[torch.FloatTensor of size 4]
>>> torch.rsqrt(a)
0.9020
0.8636
nan
nan
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.set_num_threads,
r"""
set_num_threads(int)
Sets the number of OpenMP threads used for parallelizing CPU operations
""")
add_docstr(torch._C.sigmoid,
r"""
sigmoid(input, out=None) -> Tensor
Returns a new tensor with the sigmoid of the elements of :attr:`input`.
Args:
input (Tensor): the input tensor
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4)
>>> a
-0.4972
1.3512
0.1056
-0.2650
[torch.FloatTensor of size 4]
>>> torch.sigmoid(a)
0.3782
0.7943
0.5264
0.4341
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.sign,
r"""
sign(input, out=None) -> Tensor
Returns a new tensor with the sign of the elements of :attr:`input`.
Args:
input (Tensor): the input tensor
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4)
>>> a
-0.6366
0.2718
0.4469
1.3122
[torch.FloatTensor of size 4]
>>> torch.sign(a)
-1
1
1
1
""")
add_docstr(torch._C.sin,
r"""
sin(input, out=None) -> Tensor
Returns a new tensor with the sine of the elements of :attr:`input`.
Args:
input (Tensor): the input tensor
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4)
>>> a
-0.6366
0.2718
0.4469
1.3122
[torch.FloatTensor of size 4]
>>> torch.sin(a)
-0.5944
0.2684
0.4322
0.9667
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.sinh,
r"""
sinh(input, out=None) -> Tensor
Returns a new tensor with the hyperbolic sine of the elements of
:attr:`input`.
Args:
input (Tensor): the input tensor
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4)
>>> a
-0.6366
0.2718
0.4469
1.3122
[torch.FloatTensor of size 4]
>>> torch.sinh(a)
-0.6804
0.2751
0.4619
1.7225
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.sort,
r"""
sort(input, dim=None, descending=False, out=None) -> (Tensor, LongTensor)
Sorts the elements of the :attr:`input` tensor along a given dimension
in ascending order by value.
If :attr:`dim` is not given, the last dimension of the `input` is chosen.
If :attr:`descending` is ``True`` then the elements are sorted in descending
order by value.
A tuple of (sorted_tensor, sorted_indices) is returned, where the
sorted_indices are the indices of the elements in the original `input` tensor.
Args:
input (Tensor): the input tensor
dim (int, optional): the dimension to sort along
descending (bool, optional): controls the sorting order (ascending or descending)
out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can be optionally given to be used as output buffers
Example::
>>> x = torch.randn(3, 4)
>>> sorted, indices = torch.sort(x)
>>> sorted
-1.6747 0.0610 0.1190 1.4137
-1.4782 0.7159 1.0341 1.3678
-0.3324 -0.0782 0.3518 0.4763
[torch.FloatTensor of size 3x4]
>>> indices
0 1 3 2
2 1 0 3
3 1 0 2
[torch.LongTensor of size 3x4]
>>> sorted, indices = torch.sort(x, 0)
>>> sorted
-1.6747 -0.0782 -1.4782 -0.3324
0.3518 0.0610 0.4763 0.1190
1.0341 0.7159 1.4137 1.3678
[torch.FloatTensor of size 3x4]
>>> indices
0 2 1 2
2 0 2 0
1 1 0 1
[torch.LongTensor of size 3x4]
""")
add_docstr(torch._C.sqrt,
r"""
sqrt(input, out=None) -> Tensor
Returns a new tensor with the square-root of the elements of :attr:`input`.
Args:
input (Tensor): the input tensor
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4)
>>> a
1.2290
1.3409
-0.5662
-0.0899
[torch.FloatTensor of size 4]
>>> torch.sqrt(a)
1.1086
1.1580
nan
nan
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.squeeze,
"""
squeeze(input, dim=None, out=None)
Returns a tensor with all the dimensions of :attr:`input` of size `1` removed.
For example, if `input` is of shape:
:math:`(A \times 1 \times B \times C \times 1 \times D)` then the `out` tensor
will be of shape: :math:`(A \times B \times C \times D)`.
When :attr:`dim` is given, a squeeze operation is done only in the given
dimension. If `input` is of shape: :math:`(A \times 1 \times B)`,
`squeeze(input, 0)` leaves the tensor unchanged, but `squeeze(input, 1)` will
squeeze the tensor to the shape :math:`(A \times B)`.
.. note:: As an exception to the above, a 1-dimensional tensor of size 1 will
not have its dimensions changed.
.. note:: The returned tensor shares the storage with the input tensor,
so changing the contents of one will change the contents of the other.
Args:
input (Tensor): the input tensor
dim (int, optional): if given, the input will be squeezed only in
this dimension
out (Tensor, optional): the output tensor
Example::
>>> x = torch.zeros(2,1,2,1,2)
>>> x.size()
(2L, 1L, 2L, 1L, 2L)
>>> y = torch.squeeze(x)
>>> y.size()
(2L, 2L, 2L)
>>> y = torch.squeeze(x, 0)
>>> y.size()
(2L, 1L, 2L, 1L, 2L)
>>> y = torch.squeeze(x, 1)
>>> y.size()
(2L, 2L, 1L, 2L)
""")
add_docstr(torch._C.std,
r"""
.. function:: std(input, unbiased=True) -> float
Returns the standard-deviation of all elements in the :attr:`input` tensor.
If :attr:`unbiased` is ``False``, then the standard-deviation will be calculated
via the biased estimator. Otherwise, Bessel's correction will be used.
Args:
input (Tensor): the input tensor
unbiased (bool): whether to use the unbiased estimation or not
Example::
>>> a = torch.randn(1, 3)
>>> a
-1.3063 1.4182 -0.3061
[torch.FloatTensor of size 1x3]
>>> torch.std(a)
1.3782334731508061
.. function:: std(input, dim, keepdim=False, unbiased=True, out=None) -> Tensor
Returns the standard-deviation of each row of the :attr:`input` tensor in the
given dimension :attr:`dim`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size as
:attr:`input` except in the dimension :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting
in the output tensor having 1 fewer dimension than :attr:`input`.
If :attr:`unbiased` is ``False``, then the standard-deviation will be calculated
via the biased estimator. Otherwise, Bessel's correction will be used.
Args:
input (Tensor): the input tensor
dim (int): the dimension to reduce
keepdim (bool): whether the output tensor has :attr:`dim` retained or not
unbiased (bool): whether to use the unbiased estimation or not
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4, 4)
>>> a
0.1889 -2.4856 0.0043 1.8169
-0.7701 -0.4682 -2.2410 0.4098
0.1919 -1.1856 -1.0361 0.9085
0.0173 1.0662 0.2143 -0.5576
[torch.FloatTensor of size 4x4]
>>> torch.std(a, dim=1)
1.7756
1.1025
1.0045
0.6725
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.sum,
r"""
.. function:: sum(input) -> float
Returns the sum of all elements in the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
Example::
>>> a = torch.randn(1, 3)
>>> a
0.6170 0.3546 0.0253
[torch.FloatTensor of size 1x3]
>>> torch.sum(a)
0.9969287421554327
.. function:: sum(input, dim, keepdim=False, out=None) -> Tensor
Returns the sum of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
the output tensor having 1 fewer dimension than :attr:`input`.
Args:
input (Tensor): the input tensor
dim (int): the dimension to reduce
keepdim (bool): whether the output tensor has :attr:`dim` retained or not
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4, 4)
>>> a
-0.4640 0.0609 0.1122 0.4784
-1.3063 1.6443 0.4714 -0.7396
-1.3561 -0.1959 1.0609 -1.9855
2.6833 0.5746 -0.5709 -0.4430
[torch.FloatTensor of size 4x4]
>>> torch.sum(a, 1)
0.1874
0.0698
-2.4767
2.2440
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.svd,
r"""
svd(input, some=True, out=None) -> (Tensor, Tensor, Tensor)
`U, S, V = torch.svd(A)` returns the singular value decomposition of a
real matrix `A` of size `(n x m)` such that :math:`A = USV^T`.
`U` is of shape :math:`(n \times n)`.
`S` is a diagonal matrix of shape :math:`(n \times m)`, represented as a vector
of size :math:`\min(n, m)` containing the diagonal entries.
`V` is of shape :math:`(m \times m)`.
If :attr:`some` is ``True`` (default), the returned `U` and `V` matrices will
contain only :math:`min(n, m)` orthonormal columns.
.. note:: Irrespective of the original strides, the returned matrix `U`
will be transposed, i.e. with strides `(1, n)` instead of `(n, 1)`.
Args:
input (Tensor): the input 2-D tensor
some (bool, optional): controls the shape of returned `U` and `V`
out (tuple, optional): the output tuple of tensors
Example::
>>> a = torch.Tensor([[8.79, 6.11, -9.15, 9.57, -3.49, 9.84],
... [9.93, 6.91, -7.93, 1.64, 4.02, 0.15],
... [9.83, 5.04, 4.86, 8.83, 9.80, -8.99],
... [5.45, -0.27, 4.85, 0.74, 10.00, -6.02],
... [3.16, 7.98, 3.01, 5.80, 4.27, -5.31]]).t()
>>> a
8.7900 9.9300 9.8300 5.4500 3.1600
6.1100 6.9100 5.0400 -0.2700 7.9800
-9.1500 -7.9300 4.8600 4.8500 3.0100
9.5700 1.6400 8.8300 0.7400 5.8000
-3.4900 4.0200 9.8000 10.0000 4.2700
9.8400 0.1500 -8.9900 -6.0200 -5.3100
[torch.FloatTensor of size 6x5]
>>> u, s, v = torch.svd(a)
>>> u
-0.5911 0.2632 0.3554 0.3143 0.2299
-0.3976 0.2438 -0.2224 -0.7535 -0.3636
-0.0335 -0.6003 -0.4508 0.2334 -0.3055
-0.4297 0.2362 -0.6859 0.3319 0.1649
-0.4697 -0.3509 0.3874 0.1587 -0.5183
0.2934 0.5763 -0.0209 0.3791 -0.6526
[torch.FloatTensor of size 6x5]
>>> s
27.4687
22.6432
8.5584
5.9857
2.0149
[torch.FloatTensor of size 5]
>>> v
-0.2514 0.8148 -0.2606 0.3967 -0.2180
-0.3968 0.3587 0.7008 -0.4507 0.1402
-0.6922 -0.2489 -0.2208 0.2513 0.5891
-0.3662 -0.3686 0.3859 0.4342 -0.6265
-0.4076 -0.0980 -0.4932 -0.6227 -0.4396
[torch.FloatTensor of size 5x5]
>>> torch.dist(a, torch.mm(torch.mm(u, torch.diag(s)), v.t()))
8.934150226306685e-06
""")
add_docstr(torch._C.symeig,
r"""
symeig(input, eigenvectors=False, upper=True, out=None) -> (Tensor, Tensor)
`e, V = torch.symeig(input)` returns eigenvalues and eigenvectors
of a real symmetric matrix :attr:`input`.
`input` and `V` are :math:`(m \times m)` matrices and `e` is a `m` dimensional
vector.
This function calculates all eigenvalues (and vectors) of `input`
such that :math:`input = V diag(e) V^T`.
The boolean argument :attr:`eigenvectors` defines computation of
eigenvectors or eigenvalues only.
If it is ``False``, only eigenvalues are computed. If it is ``True``,
both eigenvalues and eigenvectors are computed.
Since the input matrix `input` is supposed to be symmetric,
only the upper triangular portion is used by default.
If :attr:`upper` is ``False``, then lower triangular portion is used.
Note: Irrespective of the original strides, the returned matrix `V` will
be transposed, i.e. with strides `(1, m)` instead of `(m, 1)`.
Args:
input (Tensor): the input symmetric matrix
eigenvectors(boolean, optional): controls whether eigenvectors have to be computed
upper(boolean, optional): controls whether to consider upper-triangular or lower-triangular region
out (tuple, optional): the output tuple of (Tensor, Tensor)
Examples::
>>> a = torch.Tensor([[ 1.96, 0.00, 0.00, 0.00, 0.00],
... [-6.49, 3.80, 0.00, 0.00, 0.00],
... [-0.47, -6.39, 4.17, 0.00, 0.00],
... [-7.20, 1.50, -1.51, 5.70, 0.00],
... [-0.65, -6.34, 2.67, 1.80, -7.10]]).t()
>>> e, v = torch.symeig(a, eigenvectors=True)
>>> e
-11.0656
-6.2287
0.8640
8.8655
16.0948
[torch.FloatTensor of size 5]
>>> v
-0.2981 -0.6075 0.4026 -0.3745 0.4896
-0.5078 -0.2880 -0.4066 -0.3572 -0.6053
-0.0816 -0.3843 -0.6600 0.5008 0.3991
-0.0036 -0.4467 0.4553 0.6204 -0.4564
-0.8041 0.4480 0.1725 0.3108 0.1622
[torch.FloatTensor of size 5x5]
""")
add_docstr(torch._C.t,
r"""
t(input, out=None) -> Tensor
Expects :attr:`input` to be a matrix (2-D tensor) and transposes dimensions 0 and
1.
Can be seen as a short-hand function for `transpose(input, 0, 1)`
Args:
input (Tensor): the input tensor
out (Tensor, optional): the output tensor
Example::
>>> x = torch.randn(2, 3)
>>> x
0.4834 0.6907 1.3417
-0.1300 0.5295 0.2321
[torch.FloatTensor of size 2x3]
>>> torch.t(x)
0.4834 -0.1300
0.6907 0.5295
1.3417 0.2321
[torch.FloatTensor of size 3x2]
""")
add_docstr(torch._C.take, r"""\
take(input, indices) -> Tensor
Returns a new tensor with the elements of :attr:`input` at the given indices.
The input tensor is treated as if it were viewed as a 1-D tensor. The result
takes the same shape as the indices.
Args:
input (Tensor): the input tensor
indices (LongTensor): the indices into tensor
Example::
>>> src = torch.Tensor([[4, 3, 5],
... [6, 7, 8]])
>>> torch.take(src, torch.LongTensor([0, 2, 5]))
4
5
8
[torch.FloatTensor of size 3]
""")
add_docstr(torch._C.tan,
r"""
tan(input, out=None) -> Tensor
Returns a new tensor with the tangent of the elements of :attr:`input`.
Args:
input (Tensor): the input tensor
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4)
>>> a
-0.6366
0.2718
0.4469
1.3122
[torch.FloatTensor of size 4]
>>> torch.tan(a)
-0.7392
0.2786
0.4792
3.7801
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.tanh,
r"""
tanh(input, out=None) -> Tensor
Returns a new tensor with the hyperbolic tangent of the elements
of :attr:`input`.
Args:
input (Tensor): the input tensor
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4)
>>> a
-0.6366
0.2718
0.4469
1.3122
[torch.FloatTensor of size 4]
>>> torch.tanh(a)
-0.5625
0.2653
0.4193
0.8648
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.topk,
r"""
topk(input, k, dim=None, largest=True, sorted=True, out=None) -> (Tensor, LongTensor)
Returns the :attr:`k` largest elements of the given :attr:`input` tensor along
a given dimension.
If :attr:`dim` is not given, the last dimension of the `input` is chosen.
If :attr:`largest` is ``False`` then the `k` smallest elements are returned.
A tuple of `(values, indices)` is returned, where the `indices` are the indices
of the elements in the original `input` tensor.
The boolean option :attr:`sorted` if ``True``, will make sure that the returned
`k` elements are themselves sorted
Args:
input (Tensor): the input tensor
k (int): the k in "top-k"
dim (int, optional): the dimension to sort along
largest (bool, optional): controls whether to return largest or
smallest elements
sorted (bool, optional): controls whether to return the elements
in sorted order
out (tuple, optional): the output tuple of (Tensor, LongTensor) that can be optionally given to be used as output buffers
Example::
>>> x = torch.arange(1, 6)
>>> x
1
2
3
4
5
[torch.FloatTensor of size 5]
>>> torch.topk(x, 3)
(
5
4
3
[torch.FloatTensor of size 3]
,
4
3
2
[torch.LongTensor of size 3]
)
>>> torch.topk(x, 3, 0, largest=False)
(
1
2
3
[torch.FloatTensor of size 3]
,
0
1
2
[torch.LongTensor of size 3]
)
""")
add_docstr(torch._C.trace,
r"""
trace(input) -> float
Returns the sum of the elements of the diagonal of the input 2-D matrix.
Example::
>>> x = torch.arange(1, 10).view(3, 3)
>>> x
1 2 3
4 5 6
7 8 9
[torch.FloatTensor of size 3x3]
>>> torch.trace(x)
15.0
""")
add_docstr(torch._C.transpose,
r"""
transpose(input, dim0, dim1, out=None) -> Tensor
Returns a tensor that is a transposed version of :attr:`input`.
The given dimensions :attr:`dim0` and :attr:`dim1` are swapped.
The resulting :attr:`out` tensor shares it's underlying storage with the
:attr:`input` tensor, so changing the content of one would change the content
of the other.
Args:
input (Tensor): the input tensor
dim0 (int): the first dimension to be transposed
dim1 (int): the second dimension to be transposed
Example::
>>> x = torch.randn(2, 3)
>>> x
0.5983 -0.0341 2.4918
1.5981 -0.5265 -0.8735
[torch.FloatTensor of size 2x3]
>>> torch.transpose(x, 0, 1)
0.5983 1.5981
-0.0341 -0.5265
2.4918 -0.8735
[torch.FloatTensor of size 3x2]
""")
add_docstr(torch._C.tril,
r"""
tril(input, diagonal=0, out=None) -> Tensor
Returns the lower triangular part of the matrix (2-D tensor) :attr:`input`,
the other elements of the result tensor :attr:`out` are set to 0.
The lower triangular part of the matrix is defined as the elements on and
below the diagonal.
The argument :attr:`diagonal` controls which diagonal to consider:
- If :attr:`diagonal` = 0, it is the main diagonal.
- If :attr:`diagonal` > 0, it is above the main diagonal.
- If :attr:`diagonal` < 0, it is below the main diagonal.
Args:
input (Tensor): the input tensor
diagonal (int, optional): the diagonal to consider
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(3,3)
>>> a
1.3225 1.7304 1.4573
-0.3052 -0.3111 -0.1809
1.2469 0.0064 -1.6250
[torch.FloatTensor of size 3x3]
>>> torch.tril(a)
1.3225 0.0000 0.0000
-0.3052 -0.3111 0.0000
1.2469 0.0064 -1.6250
[torch.FloatTensor of size 3x3]
>>> torch.tril(a, diagonal=1)
1.3225 1.7304 0.0000
-0.3052 -0.3111 -0.1809
1.2469 0.0064 -1.6250
[torch.FloatTensor of size 3x3]
>>> torch.tril(a, diagonal=-1)
0.0000 0.0000 0.0000
-0.3052 0.0000 0.0000
1.2469 0.0064 0.0000
[torch.FloatTensor of size 3x3]
""")
add_docstr(torch._C.triu,
r"""
triu(input, diagonal=0, out=None) -> Tensor
Returns the upper triangular part of the matrix (2-D tensor) :attr:`input`,
the other elements of the result tensor :attr:`out` are set to 0.
The upper triangular part of the matrix is defined as the elements on and
above the diagonal.
The argument :attr:`diagonal` controls which diagonal to consider:
- If :attr:`diagonal` = 0, it is the main diagonal.
- If :attr:`diagonal` > 0, it is above the main diagonal.
- If :attr:`diagonal` < 0, it is below the main diagonal.
Args:
input (Tensor): the input tensor
diagonal (int, optional): the diagonal to consider
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(3,3)
>>> a
1.3225 1.7304 1.4573
-0.3052 -0.3111 -0.1809
1.2469 0.0064 -1.6250
[torch.FloatTensor of size 3x3]
>>> torch.triu(a)
1.3225 1.7304 1.4573
0.0000 -0.3111 -0.1809
0.0000 0.0000 -1.6250
[torch.FloatTensor of size 3x3]
>>> torch.triu(a, diagonal=1)
0.0000 1.7304 1.4573
0.0000 0.0000 -0.1809
0.0000 0.0000 0.0000
[torch.FloatTensor of size 3x3]
>>> torch.triu(a, diagonal=-1)
1.3225 1.7304 1.4573
-0.3052 -0.3111 -0.1809
0.0000 0.0064 -1.6250
[torch.FloatTensor of size 3x3]
""")
add_docstr(torch._C.trtrs,
r"""
trtrs(b, A, upper=True, transpose=False, unitriangular=False) -> (Tensor, Tensor)
Solves a system of equations with a triangular coefficient matrix `A`
and multiple right-hand sides `b`.
In particular, solves :math:`AX = b` and assumes `A` is upper-triangular
with the default keyword arguments.
This method is NOT implemented for CUDA tensors.
Args:
A (Tensor): the input triangular coefficient matrix
b (Tensor): multiple right-hand sides. Each column of `b` is a
right-hand side for the system of equations.
upper (bool, optional): whether to solve the upper-triangular system
of equations (default) or the lower-triangular system of equations. Default: True.
transpose (bool, optional): whether `A` should be transposed before
being sent into the solver. Default: False.
unitriangular (bool, optional): whether `A` is unit triangular.
If True, the diagonal elements of `A` are assumed to be
1 and not referenced from `A`. Default: False.
Returns:
A tuple (X, M) where `M` is a clone of `A` and `X` is the solution to
`AX = b` (or whatever variant of the system of equations, depending on
the keyword arguments.)
Shape:
- A: :math:`(N, N)`
- b: :math:`(N, C)`
- output[0]: :math:`(N, C)`
- output[1]: :math:`(N, N)`
Examples::
>>> A = torch.randn(2,2).triu()
>>> A
-1.8793 0.1567
0.0000 -2.1972
[torch.FloatTensor of size 2x2]
>>> b = torch.randn(2,3)
>>> b
1.8776 -0.0759 1.6590
-0.5676 0.4771 0.7477
[torch.FloatTensor of size 2x3]
>>> torch.trtrs(b, A)
(
-0.9775 0.0223 -0.9112
0.2583 -0.2172 -0.3403
[torch.FloatTensor of size 2x3],
-1.8793 0.1567
0.0000 -2.1972
[torch.FloatTensor of size 2x2])
""")
add_docstr(torch._C.trunc,
r"""
trunc(input, out=None) -> Tensor
Returns a new tensor with the truncated integer values of
the elements of :attr:`input`.
Args:
input (Tensor): the input tensor
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4)
>>> a
-0.4972
1.3512
0.1056
-0.2650
[torch.FloatTensor of size 4]
>>> torch.trunc(a)
-0
1
0
-0
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.unsqueeze,
r"""
unsqueeze(input, dim, out=None)
Returns a new tensor with a dimension of size one inserted at the
specified position.
The returned tensor shares the same underlying data with this tensor.
A negative dim value can be used and will correspond to
:math:`dim + input.dim() + 1`
Args:
input (Tensor): the input tensor
dim (int): the index at which to insert the singleton dimension
out (Tensor, optional): the output tensor
Example:
>>> x = torch.Tensor([1, 2, 3, 4])
>>> torch.unsqueeze(x, 0)
1 2 3 4
[torch.FloatTensor of size 1x4]
>>> torch.unsqueeze(x, 1)
1
2
3
4
[torch.FloatTensor of size 4x1]
""")
add_docstr(torch._C.var,
r"""
.. function:: var(input, unbiased=True) -> float
Returns the variance of all elements in the :attr:`input` tensor.
If :attr:`unbiased` is ``False``, then the variance will be calculated via the
biased estimator. Otherwise, Bessel's correction will be used.
Args:
input (Tensor): the input tensor
unbiased (bool): whether to use the unbiased estimation or not
Example::
>>> a = torch.randn(1, 3)
>>> a
-1.3063 1.4182 -0.3061
[torch.FloatTensor of size 1x3]
>>> torch.var(a)
1.899527506513334
.. function:: var(input, dim, keepdim=False, unbiased=True, out=None) -> Tensor
Returns the variance of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`.
If :attr:`keepdim` is ``True``, the output tensors are of the same size
as :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
the outputs tensor having 1 fewer dimension than :attr:`input`.
If :attr:`unbiased` is ``False``, then the variance will be calculated via the
biased estimator. Otherwise, Bessel's correction will be used.
Args:
input (Tensor): the input tensor
dim (int): the dimension to reduce
keepdim (bool): whether the output tensor has :attr:`dim` retained or not
unbiased (bool): whether to use the unbiased estimation or not
out (Tensor, optional): the output tensor
Example::
>>> a = torch.randn(4, 4)
>>> a
-1.2738 -0.3058 0.1230 -1.9615
0.8771 -0.5430 -0.9233 0.9879
1.4107 0.0317 -0.6823 0.2255
-1.3854 0.4953 -0.2160 0.2435
[torch.FloatTensor of size 4x4]
>>> torch.var(a, 1)
0.8859
0.9509
0.7548
0.6949
[torch.FloatTensor of size 4]
""")
add_docstr(torch._C.zeros,
r"""
zeros(*sizes, out=None) -> Tensor
Returns a tensor filled with the scalar value `0`, with the shape defined
by the varargs :attr:`sizes`.
Args:
sizes (int...): a set of integers defining the shape of the output tensor
out (Tensor, optional): the output tensor
Example::
>>> torch.zeros(2, 3)
0 0 0
0 0 0
[torch.FloatTensor of size 2x3]
>>> torch.zeros(5)
0
0
0
0
0
[torch.FloatTensor of size 5]
""")
add_docstr(torch._C.zeros_like,
r"""
zeros_like(input, out=None) -> Tensor
Returns a tensor filled with the scalar value `0`, with the same size as
:attr:`input`.
Args:
input (Tensor): the size of the input will determine the size of the output.
out (Tensor, optional): the output tensor
Example::
>>> input = torch.FloatTensor(2, 3)
>>> torch.zeros_like(input)
0 0 0
0 0 0
[torch.FloatTensor of size 2x3]
""")
add_docstr(torch._C.btrifact,
r"""
btrifact(A, info=None, pivot=True) -> Tensor, IntTensor
Batch LU factorization.
Returns a tuple containing the LU factorization and pivots.
The optional argument `info` provides information if the
factorization succeeded for each minibatch example.
The info values are from dgetrf and a non-zero value indicates an error
occurred. The specific values are from cublas if cuda is being used, otherwise
LAPACK. Pivoting is done if pivot is set.
Arguments:
A (Tensor): the tensor to factor
Example::
>>> A = torch.randn(2, 3, 3)
>>> A_LU = A.btrifact()
""")
add_docstr(torch._C.btrisolve,
r"""
btrisolve(b, LU_data, LU_pivots) -> Tensor
Batch LU solve.
Returns the LU solve of the linear system :math:`Ax = b`.
Arguments:
b (Tensor): the RHS tensor
LU_data (Tensor): the pivoted LU factorization of A from :meth:`btrifact`.
LU_pivots (IntTensor): the pivots of the LU factorization
Example::
>>> A = torch.randn(2, 3, 3)
>>> b = torch.randn(2, 3)
>>> A_LU = torch.btrifact(A)
>>> x = b.btrisolve(*A_LU)
>>> torch.norm(A.bmm(x.unsqueeze(2)) - b)
6.664001874625056e-08
""")
| 24.123715
| 129
| 0.622032
|
c071b66bd691c5570255d730ff26a600cdb6ff5a
| 32,839
|
py
|
Python
|
unitorch/models/detectron2/meta_arch/yolo5.py
|
fuliucansheng/UniTorch
|
47038321593ce4e7eabda555bd58c0cf89482146
|
[
"MIT"
] | 2
|
2022-02-05T08:52:00.000Z
|
2022-03-27T07:01:34.000Z
|
unitorch/models/detectron2/meta_arch/yolo5.py
|
Lixin-Qian/unitorch
|
47038321593ce4e7eabda555bd58c0cf89482146
|
[
"MIT"
] | null | null | null |
unitorch/models/detectron2/meta_arch/yolo5.py
|
Lixin-Qian/unitorch
|
47038321593ce4e7eabda555bd58c0cf89482146
|
[
"MIT"
] | 1
|
2022-03-27T07:01:13.000Z
|
2022-03-27T07:01:13.000Z
|
# Copyright (c) FULIUCANSHENG.
# Licensed under the MIT License.
import logging
import math
import time
import numpy as np
import torch
import torchvision
from torch import nn, Tensor
from typing import List, Dict, Tuple
from detectron2.config import configurable
from detectron2.layers import ShapeSpec, Conv2d
from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
from detectron2.modeling.backbone import Backbone
from detectron2.structures import ImageList, Instances, Boxes
from detectron2.utils.events import get_event_storage
from detectron2.data.detection_utils import convert_image_to_rgb
from detectron2.modeling.postprocessing import detector_postprocess
from detectron2.modeling import build_backbone
class YoloV5Head(nn.Module):
@configurable
def __init__(
self,
*,
input_shape: List[ShapeSpec],
nc,
anchors,
):
super().__init__()
self.nc = nc # number of classes
self.no = nc + 5 # number of outputs per anchor
self.nl = len(anchors) # number of detection layers
assert self.nl == len(input_shape)
self.na = len(anchors[0]) // 2 # number of anchors
self.grid = [torch.zeros(1)] * self.nl # init grid
a = torch.tensor(anchors).float().view(self.nl, -1, 2)
self.register_buffer("anchors", a) # shape(nl,na,2)
self.register_buffer("anchor_grid", a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
ch = [x.channels for x in input_shape]
self.m = nn.ModuleList(Conv2d(x, self.no * self.na, 1) for x in ch)
@classmethod
def from_config(cls, cfg, input_shape: List[ShapeSpec]):
nc = cfg.MODEL.YOLOV5.NUM_CLASSES
anchors = cfg.MODEL.YOLOV5.ANCHORS
return {
"input_shape": input_shape,
"nc": nc,
"anchors": anchors,
}
def forward(self, x: List[Tensor]):
"""
Arguments:
features (list[Tensor]): FPN feature map tensors in high to low resolution.
Each tensor in the list correspond to different feature levels.
Returns:
x (list[Tensor]): #nl tensors,
each having shape [N, na, Hi, Wi, nc + 5]
z (Tensor) : [N, nl*na*(sum of grid sizes) , no] indictaing
1. Box position z[..., 0:2]
2. Box width and height z[..., 2:4]
3. Objectness z[..., 5]
4. Class probabilities z[..., 6:]
"""
for i in range(self.nl):
x[i] = self.m[i](x[i]) # conv
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
return x
@staticmethod
def _make_grid(nx=20, ny=20):
yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
# https://arxiv.org/abs/1708.02002 section 3.3
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
for mi, s in zip(self.m, self.stride): # from
b = mi.bias.view(self.na, -1) # conv.bias(255) to (3,85)
b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
b.data[:, 5:] += math.log(0.6 / (self.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
def xywh2xyxy(x):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
def non_max_suppression(
prediction,
conf_thres=0.25,
iou_thres=0.45,
classes=None,
agnostic=False,
multi_label=False,
labels=(),
max_det=300,
):
"""Runs Non-Maximum Suppression (NMS) on inference results
conf_thresh - 0.1 (for yolov4)
iou_thresh - 0.6 (for yolov4)
multi_label - not in yolov4
merge = False - in yolov4 not in yolov3
Labesl = () not in yolov4
Returns:
list of detections, on (n,6) tensor per image [xyxy, conf, cls]
"""
# if yolov3 or yolov5:
nc = prediction.shape[2] - 5 # number of classes
# else
nc = prediction[0].shape[1] - 5 # Number of classes
xc = prediction[..., 4] > conf_thres # candidates
# Checks
assert 0 <= conf_thres <= 1, f"Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0"
assert 0 <= iou_thres <= 1, f"Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0"
# Settings
# (pixels) minimum and maximum box width and height
_, max_wh = 2, 4096
max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
time_limit = 10.0 # seconds to quit after
redundant = True # require redundant detections
multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
merge = False # use merge-NMS
t = time.time()
output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
for xi, x in enumerate(prediction): # image index, image inference
# Apply constraints
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
x = x[xc[xi]] # confidence
# Cat apriori labels if autolabelling - Not used in the YOLOV4
if labels and len(labels[xi]):
l_ = labels[xi]
v = torch.zeros((len(l_), nc + 5), device=x.device)
v[:, :4] = l_[:, 1:5] # box
v[:, 4] = 1.0 # conf
v[range(len(l_)), l_[:, 0].long() + 5] = 1.0 # cls
x = torch.cat((x, v), 0)
#################################################################
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = xywh2xyxy(x[:, :4])
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
else: # best class only
conf, j = x[:, 5:].max(1, keepdim=True)
x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
# Filter by class
if classes is not None:
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
# Apply finite constraint
# if not torch.isfinite(x).all():
# x = x[torch.isfinite(x).all(1)]
# Check shape
n = x.shape[0] # number of boxes
if not n: # no boxes
continue
# #### Not in Yolov4 ######################
elif n > max_nms: # excess boxes
# sort by confidence
x = x[x[:, 4].argsort(descending=True)[:max_nms]]
###############################################
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
# boxes (offset by class), scores
boxes, scores = x[:, :4] + c, x[:, 4]
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
if i.shape[0] > max_det: # limit detections
i = i[:max_det]
if merge and (1 < n < 3e3): # Merge NMS (boxes merged using weighted mean)
# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
weights = iou * scores[None] # box weights
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
if redundant:
i = i[iou.sum(1) > 1] # require redundancy
output[xi] = x[i]
if (time.time() - t) > time_limit:
print(f"WARNING: NMS time limit {time_limit}s exceeded")
break # time limit exceeded
return output
def bbox_iou(
box1,
box2,
x1y1x2y2=True,
GIoU=False,
DIoU=False,
CIoU=False,
EIoU=False,
ECIoU=False,
eps=1e-7,
):
# eps default value used in yolov4 is 1e-9
# Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
box2 = box2.T
# Get the coordinates of bounding boxes
if x1y1x2y2: # x1, y1, x2, y2 = box1
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
else: # transform from xywh to xyxy
b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
# Intersection area
inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * (
torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)
).clamp(0)
# Union Area
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
union = w1 * h1 + w2 * h2 - inter + eps
iou = inter / union
if GIoU or DIoU or CIoU or EIoU or ECIoU:
# convex (smallest enclosing box) width
cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1)
ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
if CIoU or DIoU or EIoU or ECIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
rho2 = (
(b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2
) / 4 # center distance squared
if DIoU:
return iou - rho2 / c2 # DIoU
elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
with torch.no_grad():
alpha = v / (v - iou + (1 + eps))
return iou - (rho2 / c2 + v * alpha) # CIoU
# ################### Function from Yolov4 ###########################################
elif EIoU: # Efficient IoU https://arxiv.org/abs/2101.08158
rho3 = (w1 - w2) ** 2
c3 = cw ** 2 + eps
rho4 = (h1 - h2) ** 2
c4 = ch ** 2 + eps
return iou - rho2 / c2 - rho3 / c3 - rho4 / c4 # EIoU
elif ECIoU:
v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
with torch.no_grad():
alpha = v / ((1 + eps) - iou + v)
rho3 = (w1 - w2) ** 2
c3 = cw ** 2 + eps
rho4 = (h1 - h2) ** 2
c4 = ch ** 2 + eps
return iou - v * alpha - rho2 / c2 - rho3 / c3 - rho4 / c4 # ECIoU
############################################################################################
else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
c_area = cw * ch + eps # convex area
return iou - (c_area - union) / c_area # GIoU
else:
return iou # IoU
def smooth_BCE(
eps=0.1,
): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
# return positive, negative label smoothing BCE targets
return 1.0 - 0.5 * eps, 0.5 * eps
class FocalLoss(nn.Module):
# Wraps focal loss around existing loss_fcn(), i.e. criteria =
# FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
super().__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
self.reduction = loss_fcn.reduction
self.loss_fcn.reduction = "none" # required to apply FL to each element
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
# p_t = torch.exp(-loss)
# loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power
# for gradient stability
# TF implementation
# https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
pred_prob = torch.sigmoid(pred) # prob from logits
p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
modulating_factor = (1.0 - p_t) ** self.gamma
loss *= alpha_factor * modulating_factor
if self.reduction == "mean":
return loss.mean()
elif self.reduction == "sum":
return loss.sum()
else: # 'none'
return loss
class ComputeLoss(object):
# Compute losses
@configurable
def __init__(
self,
*,
focal_loss_gamma,
box_loss_gain,
cls_loss_gain,
cls_positive_weight,
obj_loss_gain,
obj_positive_weight,
label_smoothing=0.0,
gr,
na,
nc,
nl,
anchors,
anchor_t,
autobalance=False,
):
super().__init__()
self.sort_obj_iou = False
self.na = na
self.nc = nc
self.nl = nl
self.anchors = anchors
self.box_loss_gain = box_loss_gain
self.cls_loss_gain = cls_loss_gain
self.obj_loss_gain = obj_loss_gain
self.anchor_t = anchor_t
# Define criteria
BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([cls_positive_weight]))
BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([obj_positive_weight]))
# Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
# positive, negative BCE targets
self.cp, self.cn = smooth_BCE(eps=label_smoothing)
# Focal loss
if focal_loss_gamma > 0:
BCEcls = FocalLoss(BCEcls, focal_loss_gamma)
BCEobj = FocalLoss(BCEobj, focal_loss_gamma)
# Detect() module
self.balance = {3: [4.0, 1.0, 0.4]}.get(nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7
self.ssi = 0 # stride 16 index
self.BCEcls, self.BCEobj, self.gr, self.autobalance = (
BCEcls,
BCEobj,
gr,
autobalance,
)
@classmethod
def from_config(cls, cfg, head):
return {
"focal_loss_gamma": cfg.MODEL.YOLOV5.FOCAL_LOSS_GAMMA,
"box_loss_gain": cfg.MODEL.YOLOV5.BOX_LOSS_GAIN,
"cls_loss_gain": cfg.MODEL.YOLOV5.CLS_LOSS_GAIN,
"cls_positive_weight": cfg.MODEL.YOLOV5.CLS_POSITIVE_WEIGHT,
"obj_loss_gain": cfg.MODEL.YOLOV5.OBJ_LOSS_GAIN,
"obj_positive_weight": cfg.MODEL.YOLOV5.OBJ_POSITIVE_WEIGHT,
"label_smoothing": cfg.MODEL.YOLOV5.LABEL_SMOOTHING,
"gr": 1.0,
"na": head.na,
"nc": head.nc,
"nl": head.nl,
"anchors": head.anchors,
"anchor_t": cfg.MODEL.YOLOV5.ANCHOR_T,
"autobalance": False,
}
def _initialize_ssi(self, stride):
if self.autobalance:
self.ssi = list(stride).index(16)
def __call__(self, p, instances): # predictions, targets, model is ignored
device = instances[0].gt_boxes.device
self.to(device)
lcls, lbox, lobj = (
torch.zeros(1, device=device),
torch.zeros(1, device=device),
torch.zeros(1, device=device),
)
tcls, tbox, indices, anchors = self.build_targets(p, instances) # targets
# Losses
for i, pi in enumerate(p): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
n = b.shape[0] # number of targets
if n:
# prediction subset corresponding to targets
ps = pi[b, a, gj, gi]
# Regression
pxy = ps[:, :2].sigmoid() * 2.0 - 0.5
pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
pbox = torch.cat((pxy, pwh), 1) # predicted box
# iou(prediction, target)
iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True)
lbox += (1.0 - iou).mean() # iou loss
# Objectness
score_iou = iou.detach().clamp(0).type(tobj.dtype)
if self.sort_obj_iou:
sort_id = torch.argsort(score_iou)
b, a, gj, gi, score_iou = (
b[sort_id],
a[sort_id],
gj[sort_id],
gi[sort_id],
score_iou[sort_id],
)
tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * score_iou # iou ratio
# Classification
if self.nc > 1: # cls loss (only if multiple classes)
t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets
t[range(n), tcls[i]] = self.cp
lcls += self.BCEcls(ps[:, 5:], t) # BCE
# Append targets to text file
# with open('targets.txt', 'a') as file:
# [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
obji = self.BCEobj(pi[..., 4], tobj)
lobj += obji * self.balance[i] # obj loss
if self.autobalance:
self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
if self.autobalance:
self.balance = [x / self.balance[self.ssi] for x in self.balance]
lbox *= self.box_loss_gain
lobj *= self.obj_loss_gain
lcls *= self.cls_loss_gain
# bs = tobj.shape[0] # batch size
# loss = lbox + lobj + lcls
# return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
return {
"loss_box": lbox,
"loss_obj": lobj,
"loss_cls": lcls,
}
def build_targets(self, p, gt_instances):
"""
Args:
p (list[Tensors]): A list of #feature level predictions
gt_instances (list[Instances]): a list of N `Instances`s. The i-th
`Instances` contains the ground-truth per-instance annotations
for the i-th input image.
"""
# Build targets for compute_loss(), input targets(image,class,x,y,w,h)
targets = []
for i, gt_per_image in enumerate(gt_instances):
# Convert the boxes to target format of shape [sum(nL per image), 6]
# where each target entry is [img_index, class, x, y, w, h],
# x, y, w, h - relative and x, y are centers
if len(gt_per_image) > 0:
boxes = gt_per_image.gt_boxes.tensor.clone()
h, w = gt_per_image.image_size
boxes[:, 0:2] = (boxes[:, 0:2] + boxes[:, 2:4]) / 2
boxes[:, 2:4] = (boxes[:, 2:4] - boxes[:, 0:2]) * 2
boxes[:, ::2] /= float(w)
boxes[:, 1::2] /= float(h)
classes = torch.unsqueeze(gt_per_image.gt_classes.clone(), dim=1)
t = torch.cat([torch.ones_like(classes) * i, classes, boxes], dim=1)
targets.append(t)
targets = torch.cat(targets, 0)
na, nt = self.na, targets.shape[0] # number of anchors, targets
tcls, tbox, indices, anch = [], [], [], []
# normalized to gridspace gain
gain = torch.ones(7, device=targets.device)
ai = (
torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt)
) # same as .repeat_interleave(nt)
# append anchor indices
targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2)
g = 0.5 # bias
off = (
torch.tensor(
[
[0, 0],
[1, 0],
[0, 1],
[-1, 0],
[0, -1], # j,k,l,m
# [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
],
device=targets.device,
).float()
* g
) # offsets
for i in range(self.nl):
anchors = self.anchors[i]
gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
# Match targets to anchors
t = targets * gain
if nt:
# Matches
r = t[:, :, 4:6] / anchors[:, None] # wh ratio
j = torch.max(r, 1.0 / r).max(2)[0] < self.anchor_t # compare
# j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] #
# iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
t = t[j] # filter
# Offsets
gxy = t[:, 2:4] # grid xy
gxi = gain[[2, 3]] - gxy # inverse
j, k = ((gxy % 1.0 < g) & (gxy > 1.0)).T
l, m = ((gxi % 1.0 < g) & (gxi > 1.0)).T
j = torch.stack((torch.ones_like(j), j, k, l, m))
t = t.repeat((5, 1, 1))[j]
offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
else:
t = targets[0]
offsets = 0
# Define
b, c = t[:, :2].long().T # image, class
gxy = t[:, 2:4] # grid xy
gwh = t[:, 4:6] # grid wh
gij = (gxy - offsets).long()
gi, gj = gij.T # grid xy indices
# Append
a = t[:, 6].long() # anchor indices
# image, anchor, grid indices
indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1)))
tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
anch.append(anchors[a]) # anchors
tcls.append(c) # class
return tcls, tbox, indices, anch
def to(self, device):
self.anchors = self.anchors.to(device)
self.BCEcls.pos_weight = self.BCEcls.pos_weight.to(device)
self.BCEobj.pos_weight = self.BCEobj.pos_weight.to(device)
@META_ARCH_REGISTRY.register()
class YoloV5(nn.Module):
"""
Implement YoloV5
"""
@configurable
def __init__(
self,
*,
backbone: Backbone,
head: nn.Module,
loss,
num_classes,
conf_thres,
iou_thres,
pixel_mean,
pixel_std,
vis_period=0,
input_format="BGR",
):
super().__init__()
self.backbone = backbone
self.head = head
self.num_classes = num_classes
self.single_cls = num_classes == 1
# Inference Parameters
self.conf_thres = conf_thres
self.iou_thres = iou_thres
# Vis parameters
self.vis_period = vis_period
self.input_format = input_format
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
"""
In Detectron1, loss is normalized by number of foreground samples in the batch.
When batch size is 1 per GPU, #foreground has a large variance and
using it lead to lower performance. Here we maintain an EMA of #foreground to
stabilize the normalizer.
"""
self.loss = loss
# self.loss_normalizer = 100 # initialize with any reasonable #fg that's not too small
# self.loss_normalizer_momentum = 0.9
self.init_stride()
self.apply(self._init_weights)
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.BatchNorm2d):
module.eps = 1e-3
module.momentum = 0.03
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
backbone_shape = backbone.output_shape()
feature_shapes = list(backbone_shape.values())
head = YoloV5Head(cfg, feature_shapes)
loss = ComputeLoss(cfg, head)
return {
"backbone": backbone,
"head": head,
"loss": loss,
"num_classes": head.nc,
"conf_thres": cfg.MODEL.YOLOV5.CONF_THRESH,
"iou_thres": cfg.MODEL.YOLOV5.IOU_THRES,
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
"vis_period": cfg.VIS_PERIOD,
"input_format": cfg.INPUT.FORMAT,
}
@property
def device(self):
return self.pixel_mean.device
def visualize_training(self, batched_inputs, results):
"""
A function used to visualize ground truth images and final network predictions.
It shows ground truth bounding boxes on the original image and up to 20
predicted object bounding boxes on the original image.
Args:
batched_inputs (list): a list that contains input to the model.
results (List[Instances]): a list of #images elements.
"""
from detectron2.utils.visualizer import Visualizer
assert len(batched_inputs) == len(results), "Cannot visualize inputs and results of different sizes"
storage = get_event_storage()
max_boxes = 20
image_index = 0 # only visualize a single image
img = batched_inputs[image_index]["image"]
img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)
v_gt = Visualizer(img, None)
v_gt = v_gt.overlay_instances(boxes=batched_inputs[image_index]["instances"].gt_boxes)
anno_img = v_gt.get_image()
processed_results = detector_postprocess(results[image_index], img.shape[0], img.shape[1])
predicted_boxes = processed_results.pred_boxes.tensor.detach().cpu().numpy()
v_pred = Visualizer(img, None)
v_pred = v_pred.overlay_instances(boxes=predicted_boxes[0:max_boxes])
prop_img = v_pred.get_image()
vis_img = np.vstack((anno_img, prop_img))
vis_img = vis_img.transpose(2, 0, 1)
vis_name = f"Top: GT bounding boxes; Bottom: {max_boxes} Highest Scoring Results"
storage.put_image(vis_name, vis_img)
def init_stride(self):
s = 256 # 2x min stride
dummy_input = torch.zeros(1, len(self.pixel_mean), s, s)
features = self.backbone(dummy_input)
features = list(features.values())
pred = self.head(features)
self.head.stride = torch.tensor([s / x.shape[-2] for x in pred]) # forward
self.head.anchors /= self.head.stride.view(-1, 1, 1)
self.stride = self.head.stride
self.head._initialize_biases() # only run once
self.loss._initialize_ssi(self.stride)
def forward(self, batched_inputs: Tuple[Dict[str, Tensor]]):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances: Instances
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
In training, dict[str, Tensor]: mapping from a named loss to a tensor storing the
loss. Used during training only. In inference, the standard output format, described
in :doc:`/tutorials/models`.
"""
images = self.preprocess_image(batched_inputs)
features = self.backbone(images.tensor)
features = list(features.values())
pred = self.head(features)
if self.training:
assert not torch.jit.is_scripting(), "Not supported"
assert "instances" in batched_inputs[0], "Instance annotations are missing in training!"
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
losses = self.loss(pred, gt_instances)
if self.vis_period > 0:
storage = get_event_storage()
if storage.iter % self.vis_period == 0:
results = self.inference(pred, images.image_sizes)
self.visualize_training(batched_inputs, results)
return losses
else:
results = self.inference(pred, images.image_sizes)
if torch.jit.is_scripting():
return results
processed_results = []
for results_per_image, input_per_image, image_size in zip(results, batched_inputs, images.image_sizes):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
return processed_results
def inference(self, x, image_sizes):
"""
Returns:
z (Tensor) : [N, nl*na*(sum of grid sizes) , no] indictaing
1. Box position z[..., 0:2]
2. Box width and height z[..., 2:4]
3. Objectness z[..., 5]
4. Class probabilities z[..., 6:]
"""
z = []
for i in range(self.head.nl):
# x(bs,na,ny,nx,no)
bs, _, ny, nx, _ = x[i].shape
if self.head.grid[i].shape[2:4] != x[i].shape[2:4]:
self.head.grid[i] = self.head._make_grid(nx, ny).to(x[i].device)
y = x[i].sigmoid()
# if self.head.inplace:
y[..., 0:2] = (y[..., 0:2] * 2.0 - 0.5 + self.head.grid[i]) * self.head.stride[i] # xy
y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.head.anchor_grid[i] # wh
# else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953
# xy = (y[..., 0:2] * 2. - 0.5 + self.head.grid[i]) * self.head.stride[i] # xy
# wh = (y[..., 2:4] * 2) ** 2 * self.head.anchor_grid[i].view(1, self.head.na, 1, 1, 2) # wh
# y = torch.cat((xy, wh, y[..., 4:]), -1)
z.append(y.view(bs, -1, self.head.no))
return self.process_inference(torch.cat(z, 1), image_sizes)
def process_inference(self, out, image_sizes):
out = non_max_suppression(
out,
self.conf_thres,
self.iou_thres,
multi_label=True,
agnostic=self.single_cls,
)
assert len(out) == len(image_sizes)
results_all: List[Instances] = []
# Statistics per image
for si, (pred, img_size) in enumerate(zip(out, image_sizes)):
if len(pred) == 0:
result = Instances(img_size)
result.pred_boxes = Boxes(torch.tensor([]))
result.scores = torch.tensor([])
result.pred_classes = torch.tensor([])
else:
# Predictions
if self.single_cls:
pred[:, 5] = 0
predn = pred.clone()
# Predn shape [ndets, 6] of format [xyxy, conf, cls] relative to the input image size
result = Instances(img_size)
result.pred_boxes = Boxes(predn[:, :4]) # TODO: Check if resizing needed
result.scores = predn[:, 4]
result.pred_classes = predn[:, 5].int() # TODO: Check the classes
results_all.append(result)
return results_all
def preprocess_image(self, batched_inputs: Tuple[Dict[str, Tensor]]):
"""
Normalize, pad and batch the input images.
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
return images
| 40.047561
| 115
| 0.537471
|
d721a69ff8ef95e9d41900ee9f05894f8e1a9fb7
| 1,666
|
py
|
Python
|
app/__init__.py
|
RamtinHaf/Secure-Social-media
|
1958f190a36f3618fec12e51546383817d05d75c
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
RamtinHaf/Secure-Social-media
|
1958f190a36f3618fec12e51546383817d05d75c
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
RamtinHaf/Secure-Social-media
|
1958f190a36f3618fec12e51546383817d05d75c
|
[
"MIT"
] | null | null | null |
from flask import Flask, g
from config import Config
from flask_bootstrap import Bootstrap
from flask_login import LoginManager, current_user
import sqlite3
import os
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from flask_wtf.csrf import CSRFProtect
# create and configure app
app = Flask(__name__)
csrf = CSRFProtect(app)
csrf.init_app(app)
Bootstrap(app)
app.config.from_object(Config)
login = LoginManager(app)
login.login_view = 'index' #requiring user login
# get an instance of the db
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(app.config['DATABASE'])
db.row_factory = sqlite3.Row
return db
# initialize db for the first time
def init_db():
with app.app_context():
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
# perform generic query, not very secure yet
def query_db(query, args ,one=False):
db = get_db()
cursor = db.execute(query, args)
rv = cursor.fetchall()
cursor.close()
db.commit()
return (rv[0] if rv else None) if one else rv
# automatically called when application is closed, and closes db connection
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
# initialize db if it does not exist
if not os.path.exists(app.config['DATABASE']):
init_db()
if not os.path.exists(app.config['UPLOAD_PATH']):
os.mkdir(app.config['UPLOAD_PATH'])
from app import routes, models
| 26.03125
| 75
| 0.711885
|
f0d7a204ea0a6ea10e682179c7c520c956ff5776
| 22,474
|
py
|
Python
|
tools/accuracy_checker/accuracy_checker/metrics/text_detection.py
|
apankratovantonp/open_model_zoo
|
e372d4173e50741a6828cda415d55c37320f89cd
|
[
"Apache-2.0"
] | 5
|
2020-03-09T07:39:04.000Z
|
2021-08-16T07:17:28.000Z
|
tools/accuracy_checker/accuracy_checker/metrics/text_detection.py
|
ananda89/open_model_zoo
|
e372d4173e50741a6828cda415d55c37320f89cd
|
[
"Apache-2.0"
] | null | null | null |
tools/accuracy_checker/accuracy_checker/metrics/text_detection.py
|
ananda89/open_model_zoo
|
e372d4173e50741a6828cda415d55c37320f89cd
|
[
"Apache-2.0"
] | 3
|
2020-07-06T08:45:26.000Z
|
2020-11-12T10:14:45.000Z
|
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
from collections import namedtuple
import numpy as np
from .metric import PerImageEvaluationMetric
from ..config import BoolField, NumberField
from ..representation import TextDetectionPrediction, TextDetectionAnnotation
from ..utils import polygon_from_points
def calculte_recall_precision_matrix(gt_rects, prediction_rects):
num_gt = len(gt_rects)
num_det = len(prediction_rects)
output_shape = [num_gt, num_det]
recall_mat = np.empty(output_shape)
precision_mat = np.empty(output_shape)
for gt_id, gt_rect in enumerate(gt_rects):
for pred_id, pred_rect in enumerate(prediction_rects):
intersected_area = rect_area(gt_rect, pred_rect)
rg_dimensions = (gt_rect.xmax - gt_rect.xmin + 1) * (gt_rect.ymax - gt_rect.ymin + 1)
rd_dimensions = (pred_rect.xmax - pred_rect.xmin + 1) * (pred_rect.ymax - pred_rect.ymin + 1)
recall_mat[gt_id, pred_id] = 0 if rg_dimensions == 0 else intersected_area / rg_dimensions
precision_mat[gt_id, pred_id] = 0 if rd_dimensions == 0 else intersected_area / rd_dimensions
return recall_mat, precision_mat
def get_union(detection_polygon, annotation_polygon):
area_prediction = detection_polygon.area
area_annotation = annotation_polygon.area
return area_prediction + area_annotation - get_intersection_area(detection_polygon, annotation_polygon)
def get_intersection_over_union(detection_polygon, annotation_polygon):
union = get_union(detection_polygon, annotation_polygon)
intersection = get_intersection_area(detection_polygon, annotation_polygon)
return intersection / union if union != 0 else 0.0
def get_intersection_area(detection_polygon, annotation_polygon):
return detection_polygon.intersection(annotation_polygon).area
Rectangle = namedtuple('Rectangle', 'xmin ymin xmax ymax')
Point = namedtuple('Point', 'x y')
def rect_center(r):
x = float(r.xmin) + float(r.xmax - r.xmin + 1) / 2.
y = float(r.ymin) + float(r.ymax - r.ymin + 1) / 2.
return Point(x, y)
def rect_point_distance(r1, r2):
distx = math.fabs(r1.x - r2.x)
disty = math.fabs(r1.y - r2.y)
return math.sqrt(distx * distx + disty * disty)
def rect_center_distance(r1, r2):
return rect_point_distance(rect_center(r1), rect_center(r2))
def rect_diag(r):
w = (r.xmax - r.xmin + 1)
h = (r.ymax - r.ymin + 1)
return math.sqrt(h * h + w * w)
def rect_area(a, b):
dx = min(a.xmax, b.xmax) - max(a.xmin, b.xmin) + 1
dy = min(a.ymax, b.ymax) - max(a.ymin, b.ymin) + 1
if (dx >= 0) and (dy >= 0):
return dx*dy
return 0.
def rect_from_points(points):
return Rectangle(*points)
class FocusedTextLocalizationMetric(PerImageEvaluationMetric):
annotation_types = (TextDetectionAnnotation, )
prediction_types = (TextDetectionPrediction, )
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'area_recall_constrain': NumberField(
min_value=0, max_value=1, optional=True, default=0.5,
description="Minimal value for recall that allows to make decision "
"that prediction polygon matched with annotation."
),
'ignore_difficult': BoolField(
optional=True, default=True,
description="Allows to ignore difficult ground truth text polygons in metric calculation."
),
'area_precision_constrain': NumberField(
min_value=0, max_value=1, optional=True, default=0.5,
description="Minimal value for precision that allows to make decision "
"that prediction polygon matched with annotation."
),
'center_diff_threshold': NumberField(min_value=0, optional=True, default=1),
'one_to_one_match_score': NumberField(
min_value=0, optional=True, max_value=1, default=1,
description='weight for one to one matching results',
),
'one_to_many_match_score': NumberField(
min_value=0, optional=True, max_value=1, default=0.8,
description='weight for one to many matching results',
),
'many_to_one_match_score': NumberField(
min_value=0, optional=True, max_value=1, default=1,
description='weight for many to one matching results',
)
})
return parameters
def configure(self):
self.area_recall_constrain = self.get_value_from_config('area_recall_constrain')
self.area_precision_constrain = self.get_value_from_config('area_precision_constrain')
self.ignore_difficult = self.get_value_from_config('ignore_difficult')
self.center_diff_threshold = self.get_value_from_config('center_diff_threshold')
self.one_to_one_match_score = self.get_value_from_config('one_to_one_match_score')
self.one_to_many_match_score = self.get_value_from_config('one_to_many_match_score')
self.many_to_one_match_score = self.get_value_from_config('many_to_one_match_score')
self.num_valid_gt = 0
self.num_valid_detections = 0
self.precision_sum = 0
self.recall_sum = 0
def update(self, annotation, prediction):
gt_rects = list(map(rect_from_points, annotation.boxes))
prediction_rects = list(map(rect_from_points, prediction.boxes))
num_gt = len(gt_rects)
num_det = len(prediction_rects)
gt_difficult_mask = np.full(num_gt, False)
prediction_difficult_mask = np.full(num_det, False)
if self.ignore_difficult:
gt_difficult_inds = annotation.metadata.get('difficult_boxes', [])
prediction_difficult_inds = prediction.metadata.get('difficult_boxes', [])
gt_difficult_mask[gt_difficult_inds] = True
prediction_difficult_mask[prediction_difficult_inds] = True
prediction_difficult_mask = self._update_difficult_prediction_mask(
gt_difficult_inds, prediction_difficult_mask, gt_rects, prediction_rects
)
num_ignored_gt = np.sum(gt_difficult_mask)
num_ignored_pred = np.sum(prediction_difficult_mask)
num_valid_gt = num_gt - num_ignored_gt
num_valid_pred = num_det - num_ignored_pred
self.num_valid_detections += num_valid_pred
self.num_valid_gt += num_valid_gt
if num_gt == 0:
recall = 1
precision = 0 if num_det > 0 else 1
self.precision_sum += precision
self.recall_sum += recall
return
recall_accum = 0
precision_accum = 0
if num_det > 0:
gt_rect_mat = np.zeros(num_gt, np.int8)
det_rect_mat = np.zeros(num_det, np.int8)
recall_mat, precision_mat = calculte_recall_precision_matrix(gt_rects, prediction_rects)
one_to_one_recall, one_to_ona_precision, det_rect_mat, gt_rect_mat = self._one_to_one_match(
gt_rects, prediction_rects,
gt_difficult_mask, prediction_difficult_mask,
gt_rect_mat, det_rect_mat,
recall_mat, precision_mat
)
recall_accum += one_to_one_recall
precision_accum += one_to_ona_precision
one_to_many_recall, one_to_many_precision, det_rect_mat, gt_rect_mat = self._one_to_many_match(
gt_rects, gt_difficult_mask, prediction_difficult_mask, gt_rect_mat, det_rect_mat,
recall_mat, precision_mat
)
recall_accum += one_to_many_recall
precision_accum += one_to_many_precision
many_to_one_recall, many_to_one_precision, det_rect_mat, gt_rect_mat = self._many_to_one_match(
prediction_rects, prediction_difficult_mask, gt_difficult_mask, gt_rect_mat, det_rect_mat,
recall_mat, precision_mat,
)
recall_accum += many_to_one_recall
precision_accum += many_to_one_precision
if num_valid_gt == 0:
recall = float(1)
precision = float(0) if num_valid_pred > 0 else float(1)
else:
recall = float(recall_accum)
precision = float(0) if num_valid_pred == 0 else float(precision_accum)
self.recall_sum += recall
self.precision_sum += precision
def evaluate(self, annotations, predictions):
raise NotImplementedError()
def _update_difficult_prediction_mask(self, gt_difficult_inds, dt_difficult_mask, gt_rects, dt_rects):
for det_id, detection_rect in enumerate(dt_rects):
for gt_difficult_id in gt_difficult_inds:
gt_difficult_rect = gt_rects[gt_difficult_id]
intersected_area = rect_area(gt_difficult_rect, detection_rect)
width = detection_rect.xmax - detection_rect.xmin + 1
height = detection_rect.ymax - detection_rect.ymin + 1
rd_dimensions = width * height
if rd_dimensions == 0:
precision = 0
else:
precision = intersected_area / rd_dimensions
if precision > self.area_precision_constrain:
dt_difficult_mask[det_id] = True
return dt_difficult_mask
def _one_to_one_match(
self, gt_rects, prediction_rects, gt_difficult_mask, prediction_difficult_mask, gt_rect_mat, det_rect_mat,
recall_mat, precision_mat
):
def match_rects(row, col, recall_mat, precision_mat):
cont = 0
for j in range(len(recall_mat[0])):
recall_constrain_pass = recall_mat[row, j] >= self.area_recall_constrain
precision_constrain_pass = precision_mat[row, j] >= self.area_precision_constrain
if recall_constrain_pass and precision_constrain_pass:
cont += 1
if cont != 1:
return False
cont = 0
for i in range(len(recall_mat)):
recall_constrain_pass = recall_mat[i, col] >= self.area_recall_constrain
precision_constrain_pass = precision_mat[i, col] >= self.area_precision_constrain
if recall_constrain_pass and precision_constrain_pass:
cont += 1
if cont != 1:
return False
recall_constrain_pass = recall_mat[row, col] >= self.area_recall_constrain
precision_constrain_pass = precision_mat[row, col] >= self.area_precision_constrain
if recall_constrain_pass and precision_constrain_pass:
return True
return False
recall_accum = 0
precision_accum = 0
for gt_id, gt_rect in enumerate(gt_rects):
for pred_id, pred_rect in enumerate(prediction_rects):
both_not_matched = not gt_rect_mat[gt_id] and not det_rect_mat[pred_id]
difficult = gt_difficult_mask[gt_id] and prediction_difficult_mask[pred_id]
if both_not_matched and not difficult:
match = match_rects(gt_id, pred_id, recall_mat, precision_mat)
if match:
norm_distance = rect_center_distance(gt_rect, pred_rect)
norm_distance /= rect_diag(gt_rect) + rect_diag(pred_rect)
norm_distance *= 2.0
if norm_distance < self.center_diff_threshold:
gt_rect_mat[gt_id] = self.one_to_one_match_score
det_rect_mat[pred_id] = 1
recall_accum += 1
precision_accum += 1
return recall_accum, precision_accum, det_rect_mat, gt_rect_mat
def _one_to_many_match(
self, gt_rects, gt_difficult_mask, pred_difficult_mask, gt_rect_mat, det_rect_mat, recall_mat, precision_mat
):
def match_rects(gt_id, recall_mat, precision_mat, gt_rect_mat, det_rect_mat, pred_difficult_mask):
many_sum = 0
det_rects = []
for det_num in range(len(recall_mat[0])):
if gt_rect_mat[gt_id] == 0 and det_rect_mat[det_num] == 0 and pred_difficult_mask[det_num]:
if precision_mat[gt_id, det_num] >= self.area_precision_constrain:
many_sum += recall_mat[gt_id, det_num]
det_rects.append(det_num)
if many_sum >= self.area_recall_constrain:
return True, det_rects
return False, []
recall_accum = 0
precision_accum = 0
for gt_id, _ in enumerate(gt_rects):
if not gt_difficult_mask[gt_id]:
match, matches_det = match_rects(
gt_id, recall_mat, precision_mat, gt_rect_mat, det_rect_mat, pred_difficult_mask
)
if match:
gt_rect_mat[gt_id] = 1
recall_accum += self.one_to_many_match_score
precision_accum += self.one_to_many_match_score * len(matches_det)
for det_id in matches_det:
det_rect_mat[det_id] = 1
return recall_accum, precision_accum, det_rect_mat, gt_rect_mat
def _many_to_one_match(
self, prediction_rects, prediction_difficult_mask, gt_difficult_mask, gt_rect_mat, det_rect_mat,
recall_mat, precision_mat
):
def match_rects(det_id, recall_mat, precision_mat, gt_rect_mat, det_rect_mat, gt_difficult_mask):
many_sum = 0
gt_rects = []
for gt_id in range(len(recall_mat)):
if gt_rect_mat[gt_id] == 0 and det_rect_mat[det_id] == 0 and not gt_difficult_mask[gt_id]:
if recall_mat[gt_id, det_id] >= self.area_recall_constrain:
many_sum += precision_mat[gt_id, det_id]
gt_rects.append(gt_id)
if many_sum >= self.area_precision_constrain:
return True, gt_rects
return False, []
recall_accum = 0
precision_accum = 0
for pred_id, _ in enumerate(prediction_rects):
if not prediction_difficult_mask[pred_id]:
match, matches_gt = match_rects(
pred_id, recall_mat, precision_mat, gt_rect_mat, det_rect_mat, gt_difficult_mask
)
if match:
det_rect_mat[pred_id] = 1
recall_accum += self.many_to_one_match_score * len(matches_gt)
precision_accum += self.many_to_one_match_score
for gt_id in matches_gt:
gt_rect_mat[gt_id] = 1
return recall_accum, precision_accum, det_rect_mat, gt_rect_mat
def reset(self):
self.num_valid_gt = 0
self.num_valid_detections = 0
self.precision_sum = 0
self.recall_sum = 0
class FocusedTextLocalizationPrecision(FocusedTextLocalizationMetric):
__provider__ = 'focused_text_precision'
def evaluate(self, annotations, predictions):
return self.precision_sum / self.num_valid_detections if self.num_valid_detections != 0 else 0
class FocusedTextLocalizationRecall(FocusedTextLocalizationMetric):
__provider__ = 'focused_text_recall'
def evaluate(self, annotations, predictions):
return self.recall_sum / self.num_valid_gt if self.num_valid_gt != 0 else 0
class FocusedTextLocalizationHMean(FocusedTextLocalizationMetric):
__provider__ = 'focused_text_hmean'
def evaluate(self, annotations, predictions):
recall = self.recall_sum / self.num_valid_gt if self.num_valid_gt != 0 else 0
precision = self.precision_sum / self.num_valid_detections if self.num_valid_detections != 0 else 0
return 2 * recall * precision / (recall + precision) if recall + precision != 0 else 0
class IncidentalSceneTextLocalizationMetric(PerImageEvaluationMetric):
annotation_types = (TextDetectionAnnotation, )
prediction_types = (TextDetectionPrediction, )
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'iou_constrain': NumberField(
min_value=0, max_value=1, optional=True, default=0.5,
description="Minimal value for intersection over union that allows to make decision "
"that prediction polygon is true positive."
),
'ignore_difficult': BoolField(
optional=True, default=True,
description="Allows to ignore difficult ground truth text polygons in metric calculation."
),
'area_precision_constrain': NumberField(
min_value=0, max_value=1, optional=True, default=0.5,
description="Minimal value for intersection over union that allows to make decision "
"that prediction polygon matched with ignored annotation."
)
})
return parameters
def configure(self):
self.iou_constrain = self.get_value_from_config('iou_constrain')
self.area_precision_constrain = self.get_value_from_config('area_precision_constrain')
self.ignore_difficult = self.get_value_from_config('ignore_difficult')
self.number_matched_detections = 0
self.number_valid_annotations = 0
self.number_valid_detections = 0
def update(self, annotation, prediction):
gt_polygons = list(map(polygon_from_points, annotation.points))
prediction_polygons = list(map(polygon_from_points, prediction.points))
num_gt = len(gt_polygons)
num_det = len(prediction_polygons)
gt_difficult_mask = np.full(num_gt, False)
prediction_difficult_mask = np.full(num_det, False)
num_det_matched = 0
if self.ignore_difficult:
gt_difficult_inds = annotation.metadata.get('difficult_boxes', [])
prediction_difficult_inds = prediction.metadata.get('difficult_boxes', [])
gt_difficult_mask[gt_difficult_inds] = True
prediction_difficult_mask[prediction_difficult_inds] = True
for det_id, detection_polygon in enumerate(prediction_polygons):
for gt_difficult_id in gt_difficult_inds:
gt_difficult_polygon = gt_polygons[gt_difficult_id]
intersected_area = get_intersection_area(gt_difficult_polygon, detection_polygon)
pd_dimensions = detection_polygon.area
precision = 0 if pd_dimensions == 0 else intersected_area / pd_dimensions
if precision >= self.area_precision_constrain:
prediction_difficult_mask[det_id] = True
break
if num_gt > 0 and num_det > 0:
iou_matrix = np.empty((num_gt, num_det))
gt_matched = np.zeros(num_gt, np.int8)
det_matched = np.zeros(num_det, np.int8)
for gt_id, gt_polygon in enumerate(gt_polygons):
for pred_id, pred_polygon in enumerate(prediction_polygons):
iou_matrix[gt_id, pred_id] = get_intersection_over_union(pred_polygon, gt_polygon)
not_matched_before = gt_matched[gt_id] == 0 and det_matched[pred_id] == 0
not_difficult = not gt_difficult_mask[gt_id] and not prediction_difficult_mask[pred_id]
if not_matched_before and not_difficult:
if iou_matrix[gt_id, pred_id] >= self.iou_constrain:
gt_matched[gt_id] = 1
det_matched[pred_id] = 1
num_det_matched += 1
num_ignored_gt = np.sum(gt_difficult_mask)
num_ignored_pred = np.sum(prediction_difficult_mask)
num_valid_gt = num_gt - num_ignored_gt
num_valid_pred = num_det - num_ignored_pred
self.number_matched_detections += num_det_matched
self.number_valid_annotations += num_valid_gt
self.number_valid_detections += num_valid_pred
def evaluate(self, annotations, predictions):
raise NotImplementedError()
def reset(self):
self.number_matched_detections = 0
self.number_valid_annotations = 0
self.number_valid_detections = 0
class IncidentalSceneTextLocalizationPrecision(IncidentalSceneTextLocalizationMetric):
__provider__ = 'incidental_text_precision'
def evaluate(self, annotations, predictions):
precision = (
0 if self.number_valid_detections == 0
else float(self.number_matched_detections) / self.number_valid_detections
)
return precision
class IncidentalSceneTextLocalizationRecall(IncidentalSceneTextLocalizationMetric):
__provider__ = 'incidental_text_recall'
def evaluate(self, annotations, predictions):
recall = (
0 if self.number_valid_annotations == 0
else float(self.number_matched_detections) / self.number_valid_annotations
)
return recall
class IncidentalSceneTextLocalizationHMean(IncidentalSceneTextLocalizationMetric):
__provider__ = 'incidental_text_hmean'
def evaluate(self, annotations, predictions):
recall = (
0 if self.number_valid_annotations == 0
else float(self.number_matched_detections) / self.number_valid_annotations
)
precision = (
0 if self.number_valid_detections == 0
else float(self.number_matched_detections) / self.number_valid_detections
)
return 0 if recall + precision == 0 else 2 * recall * precision / (recall + precision)
| 43.638835
| 120
| 0.651775
|
77c1f2ce5210aedc3becfd36aa3e42b1d7725182
| 1,026
|
py
|
Python
|
ExamplesFromChapters/Chapter3/ClusteringWithGaussians.py
|
pyarnold/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers
|
d91ec0e751f2016b5a02457de205c7c5ad9880c2
|
[
"MIT"
] | 1
|
2020-12-18T01:07:45.000Z
|
2020-12-18T01:07:45.000Z
|
ExamplesFromChapters/Chapter3/ClusteringWithGaussians.py
|
pyarnold/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers
|
d91ec0e751f2016b5a02457de205c7c5ad9880c2
|
[
"MIT"
] | null | null | null |
ExamplesFromChapters/Chapter3/ClusteringWithGaussians.py
|
pyarnold/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers
|
d91ec0e751f2016b5a02457de205c7c5ad9880c2
|
[
"MIT"
] | null | null | null |
import numpy as np
import pymc as pm
data = np.loadtxt("../../Chapter3_MCMC/data/mixture_data.csv", delimiter=",")
p = pm.Uniform("p", 0, 1)
assignment = pm.Categorical("assignment", [p, 1 - p], size=data.shape[0])
taus = 1.0 / pm.Uniform("stds", 0, 100, size=2) ** 2 # notice the size!
centers = pm.Normal("centers", [150, 150], [0.001, 0.001], size=2)
"""
The below deterministic functions map a assingment, in this case 0 or 1,
to a set of parameters, located in the (1,2) arrays `taus` and `centers.`
"""
@pm.deterministic
def center_i(assignment=assignment, centers=centers):
return centers[assignment]
@pm.deterministic
def tau_i(assignment=assignment, taus=taus):
return taus[assignment]
# and to combine it with the observations:
observations = pm.Normal("obs", center_i, tau_i,
value=data, observed=True)
# below we create a model class
model = pm.Model([p, assignment, taus, centers])
map_ = pm.MAP(model)
map_.fit()
mcmc = pm.MCMC(model)
mcmc.sample(100000, 50000)
| 24.428571
| 78
| 0.680312
|
77e33a71b5ce05b596f44a592aedaa3ca84bb8b2
| 4,591
|
py
|
Python
|
davis2017/evaluation.py
|
caganselim/davisforall
|
e6937544763ae62efdea3fc0575a854411ea2fef
|
[
"BSD-3-Clause"
] | null | null | null |
davis2017/evaluation.py
|
caganselim/davisforall
|
e6937544763ae62efdea3fc0575a854411ea2fef
|
[
"BSD-3-Clause"
] | null | null | null |
davis2017/evaluation.py
|
caganselim/davisforall
|
e6937544763ae62efdea3fc0575a854411ea2fef
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
import numpy as np
from davis2017.davis import VOSDataset
from davis2017.metrics import db_eval_boundary, db_eval_iou
from davis2017 import utils
from davis2017.results import Results
from scipy.optimize import linear_sum_assignment
class DAVISEvaluation(object):
def __init__(self, dataset_root, img_folder, mask_folder, imagesets_path):
"""
:param davis_root: Path to the VOS-style folder that contains JPEGImages, Annotations, etc. folders.
:param gt_set: Set to compute the evaluation
:param sequences: Sequences to consider for the evaluation, 'all' to use all the sequences in a set.
"""
self.davis_root = dataset_root
self.dataset = VOSDataset(root=dataset_root, img_folder=img_folder,
mask_folder=mask_folder, imagesets_path=imagesets_path)
@staticmethod
def _evaluate_unsupervised(all_gt_masks, all_res_masks, all_void_masks, metric, max_n_proposals=20):
if all_res_masks.shape[0] > max_n_proposals:
sys.stdout.write(f"\nIn your PNG files there is an index higher than the maximum number ({max_n_proposals}) of proposals allowed!")
sys.exit()
elif all_res_masks.shape[0] < all_gt_masks.shape[0]:
zero_padding = np.zeros((all_gt_masks.shape[0] - all_res_masks.shape[0], *all_res_masks.shape[1:]))
all_res_masks = np.concatenate([all_res_masks, zero_padding], axis=0)
j_metrics_res = np.zeros((all_res_masks.shape[0], all_gt_masks.shape[0], all_gt_masks.shape[1]))
f_metrics_res = np.zeros((all_res_masks.shape[0], all_gt_masks.shape[0], all_gt_masks.shape[1]))
for ii in range(all_gt_masks.shape[0]):
for jj in range(all_res_masks.shape[0]):
if 'J' in metric:
j_metrics_res[jj, ii, :] = db_eval_iou(all_gt_masks[ii, ...], all_res_masks[jj, ...], all_void_masks)
if 'F' in metric:
f_metrics_res[jj, ii, :] = db_eval_boundary(all_gt_masks[ii, ...], all_res_masks[jj, ...], all_void_masks)
if 'J' in metric and 'F' in metric:
all_metrics = (np.mean(j_metrics_res, axis=2) + np.mean(f_metrics_res, axis=2)) / 2
else:
all_metrics = np.mean(j_metrics_res, axis=2) if 'J' in metric else np.mean(f_metrics_res, axis=2)
row_ind, col_ind = linear_sum_assignment(-all_metrics)
return j_metrics_res[row_ind, col_ind, :], f_metrics_res[row_ind, col_ind, :]
def evaluate(self, res_path, metric=('J', 'F'), debug=False):
metric = metric if isinstance(metric, tuple) or isinstance(metric, list) else [metric]
if 'T' in metric:
raise ValueError('Temporal metric not supported!')
if 'J' not in metric and 'F' not in metric:
raise ValueError('Metric possible values are J for IoU or F for Boundary')
# Containers
metrics_res = {}
if 'J' in metric:
metrics_res['J'] = {"M": [], "R": [], "D": [], "M_per_object": {}}
if 'F' in metric:
metrics_res['F'] = {"M": [], "R": [], "D": [], "M_per_object": {}}
# Sweep all sequences
results = Results(root_dir=res_path)
for seq in tqdm(list(self.dataset.get_sequences())):
all_gt_masks, all_void_masks, all_masks_id = self.dataset.get_all_masks(seq, True)
all_res_masks = results.read_masks(seq, all_masks_id)
j_metrics_res, f_metrics_res = self._evaluate_unsupervised(all_gt_masks, all_res_masks, all_void_masks, metric)
for ii in range(all_gt_masks.shape[0]):
seq_name = f'{seq}_{ii+1}'
if 'J' in metric:
[JM, JR, JD] = utils.db_statistics(j_metrics_res[ii])
metrics_res['J']["M"].append(JM)
metrics_res['J']["R"].append(JR)
metrics_res['J']["D"].append(JD)
metrics_res['J']["M_per_object"][seq_name] = JM
if 'F' in metric:
[FM, FR, FD] = utils.db_statistics(f_metrics_res[ii])
metrics_res['F']["M"].append(FM)
metrics_res['F']["R"].append(FR)
metrics_res['F']["D"].append(FD)
metrics_res['F']["M_per_object"][seq_name] = FM
# Show progress
if debug:
sys.stdout.write(seq + '\n')
sys.stdout.flush()
return metrics_res
| 49.365591
| 143
| 0.615334
|
9d25753d6827c6061abb70e5faa551ac57395864
| 6,762
|
py
|
Python
|
bindings/python/ensmallen_graph/datasets/string/streptomycesrapamycinicus.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/streptomycesrapamycinicus.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/streptomycesrapamycinicus.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
"""
This file offers the methods to automatically retrieve the graph Streptomyces rapamycinicus.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-03 22:01:42.803620
The undirected graph Streptomyces rapamycinicus has 9866 nodes and 1703877
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.03501 and has 51 connected components, where the component
with most nodes has 9740 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 278, the mean node degree is 345.40,
and the node degree mode is 2. The top 5 most central nodes are 1343740.M271_15200
(degree 4955), 1343740.M271_37050 (degree 3914), 1343740.M271_05855 (degree
2986), 1343740.M271_23980 (degree 2893) and 1343740.M271_36415 (degree
2885).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import StreptomycesRapamycinicus
# Then load the graph
graph = StreptomycesRapamycinicus()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def StreptomycesRapamycinicus(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Streptomyces rapamycinicus graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Streptomyces rapamycinicus graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-03 22:01:42.803620
The undirected graph Streptomyces rapamycinicus has 9866 nodes and 1703877
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.03501 and has 51 connected components, where the component
with most nodes has 9740 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 278, the mean node degree is 345.40,
and the node degree mode is 2. The top 5 most central nodes are 1343740.M271_15200
(degree 4955), 1343740.M271_37050 (degree 3914), 1343740.M271_05855 (degree
2986), 1343740.M271_23980 (degree 2893) and 1343740.M271_36415 (degree
2885).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import StreptomycesRapamycinicus
# Then load the graph
graph = StreptomycesRapamycinicus()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="StreptomycesRapamycinicus",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.403141
| 223
| 0.706891
|
d763a026077b711e262925f14bb0dda337817bc5
| 2,609
|
py
|
Python
|
python/tink/mac/_mac_key_templates.py
|
ekmixon/tink
|
9753ffddd4d04aa56e0605ff4a0db46f2fb80529
|
[
"Apache-2.0"
] | null | null | null |
python/tink/mac/_mac_key_templates.py
|
ekmixon/tink
|
9753ffddd4d04aa56e0605ff4a0db46f2fb80529
|
[
"Apache-2.0"
] | null | null | null |
python/tink/mac/_mac_key_templates.py
|
ekmixon/tink
|
9753ffddd4d04aa56e0605ff4a0db46f2fb80529
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pre-generated KeyTemplate for Mac.
One can use these templates to generate a new tink_pb2.Keyset with
tink_pb2.KeysetHandle. To generate a new keyset that contains a single
hmac_pb2.HmacKey, one can do:
handle = keyset_handle.KeysetHandle(mac_key_templates.HMAC_SHA256_128BITTAG).
"""
from tink.proto import aes_cmac_pb2
from tink.proto import common_pb2
from tink.proto import hmac_pb2
from tink.proto import tink_pb2
def create_hmac_key_template(
key_size: int, tag_size: int,
hash_type: common_pb2.HashType) -> tink_pb2.KeyTemplate:
"""Creates a HMAC KeyTemplate, and fills in its values."""
key_format = hmac_pb2.HmacKeyFormat()
key_format.params.hash = hash_type
key_format.params.tag_size = tag_size
key_format.key_size = key_size
key_template = tink_pb2.KeyTemplate()
key_template.value = key_format.SerializeToString()
key_template.type_url = 'type.googleapis.com/google.crypto.tink.HmacKey'
key_template.output_prefix_type = tink_pb2.TINK
return key_template
def create_aes_cmac_key_template(
key_size: int, tag_size: int) -> tink_pb2.KeyTemplate:
""""Creates an AES-CMAC KeyTemplate, and fills in its values."""
key_format = aes_cmac_pb2.AesCmacKeyFormat()
key_format.key_size = key_size
key_format.params.tag_size = tag_size
key_template = tink_pb2.KeyTemplate()
key_template.value = key_format.SerializeToString()
key_template.type_url = 'type.googleapis.com/google.crypto.tink.AesCmacKey'
key_template.output_prefix_type = tink_pb2.TINK
return key_template
AES_CMAC = create_aes_cmac_key_template(key_size=32, tag_size=16)
HMAC_SHA256_128BITTAG = create_hmac_key_template(
key_size=32, tag_size=16, hash_type=common_pb2.SHA256)
HMAC_SHA256_256BITTAG = create_hmac_key_template(
key_size=32, tag_size=32, hash_type=common_pb2.SHA256)
HMAC_SHA512_256BITTAG = create_hmac_key_template(
key_size=64, tag_size=32, hash_type=common_pb2.SHA512)
HMAC_SHA512_512BITTAG = create_hmac_key_template(
key_size=64, tag_size=64, hash_type=common_pb2.SHA512)
| 40.138462
| 77
| 0.791491
|
a3b8d79f0ff3edf89b82633d47f102ec2d0ae59c
| 3,508
|
py
|
Python
|
examples/plot_letters.py
|
LemonLison/pystruct
|
5606e643d1a0a3701b93b848a2a02c49e83c4f1e
|
[
"BSD-2-Clause"
] | 501
|
2015-01-06T16:24:04.000Z
|
2022-03-22T03:53:03.000Z
|
examples/plot_letters.py
|
kingjr/pystruct
|
23c6d8f6ab34a88b63386a595debbfdfa13345fe
|
[
"BSD-2-Clause"
] | 104
|
2015-01-02T19:05:04.000Z
|
2022-02-13T20:18:38.000Z
|
examples/plot_letters.py
|
kingjr/pystruct
|
23c6d8f6ab34a88b63386a595debbfdfa13345fe
|
[
"BSD-2-Clause"
] | 145
|
2015-02-04T03:42:52.000Z
|
2022-03-04T13:16:37.000Z
|
"""
===============================
OCR Letter sequence recognition
===============================
This example illustrates the use of a chain CRF for optical character
recognition. The example is taken from Taskar et al "Max-margin markov random
fields".
Each example consists of a handwritten word, that was presegmented into
characters. Each character is represented as a 16x8 binary image. The task is
to classify the image into one of the 26 characters a-z. The first letter of
every word was ommited as it was capitalized and the task does only consider
small caps letters.
We compare classification using a standard linear SVM that classifies
each letter individually with a chain CRF that can exploit correlations
between neighboring letters (the correlation is particularly strong
as the same words are used during training and testsing).
The first figures shows the segmented letters of four words from the test set.
In set are the ground truth (green), the prediction using SVM (blue) and the
prediction using a chain CRF (red).
The second figure shows the pairwise potentials learned by the chain CRF.
The strongest patterns are "y after l" and "n after i".
There are obvious extensions that both methods could benefit from, such as
window features or non-linear kernels. This example is more meant to give a
demonstration of the CRF than to show its superiority.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from pystruct.datasets import load_letters
from pystruct.models import ChainCRF
from pystruct.learners import FrankWolfeSSVM
abc = "abcdefghijklmnopqrstuvwxyz"
letters = load_letters()
X, y, folds = letters['data'], letters['labels'], letters['folds']
# we convert the lists to object arrays, as that makes slicing much more
# convenient
X, y = np.array(X), np.array(y)
X_train, X_test = X[folds == 1], X[folds != 1]
y_train, y_test = y[folds == 1], y[folds != 1]
# Train linear SVM
svm = LinearSVC(dual=False, C=.1)
# flatten input
svm.fit(np.vstack(X_train), np.hstack(y_train))
# Train linear chain CRF
model = ChainCRF()
ssvm = FrankWolfeSSVM(model=model, C=.1, max_iter=11)
ssvm.fit(X_train, y_train)
print("Test score with chain CRF: %f" % ssvm.score(X_test, y_test))
print("Test score with linear SVM: %f" % svm.score(np.vstack(X_test),
np.hstack(y_test)))
# plot some word sequenced
n_words = 4
rnd = np.random.RandomState(1)
selected = rnd.randint(len(y_test), size=n_words)
max_word_len = max([len(y_) for y_ in y_test[selected]])
fig, axes = plt.subplots(n_words, max_word_len, figsize=(10, 10))
fig.subplots_adjust(wspace=0)
for ind, axes_row in zip(selected, axes):
y_pred_svm = svm.predict(X_test[ind])
y_pred_chain = ssvm.predict([X_test[ind]])[0]
for i, (a, image, y_true, y_svm, y_chain) in enumerate(
zip(axes_row, X_test[ind], y_test[ind], y_pred_svm, y_pred_chain)):
a.matshow(image.reshape(16, 8), cmap=plt.cm.Greys)
a.text(0, 3, abc[y_true], color="#00AA00", size=25)
a.text(0, 14, abc[y_svm], color="#5555FF", size=25)
a.text(5, 14, abc[y_chain], color="#FF5555", size=25)
a.set_xticks(())
a.set_yticks(())
for ii in range(i + 1, max_word_len):
axes_row[ii].set_visible(False)
plt.matshow(ssvm.w[26 * 8 * 16:].reshape(26, 26))
plt.colorbar()
plt.title("Transition parameters of the chain CRF.")
plt.xticks(np.arange(25), abc)
plt.yticks(np.arange(25), abc)
plt.show()
| 38.130435
| 79
| 0.711231
|
0ff9d03a02693d17b0189cec877fdc301ee1014a
| 4,179
|
py
|
Python
|
run_train_and_test.py
|
tipt0p/periodic_behavior_bn_wd
|
8d77bfc21cd011cd3d705042c2b51735ec2b8b7d
|
[
"Apache-2.0"
] | 1
|
2021-12-21T21:19:43.000Z
|
2021-12-21T21:19:43.000Z
|
run_train_and_test.py
|
tipt0p/periodic_behavior_bn_wd
|
8d77bfc21cd011cd3d705042c2b51735ec2b8b7d
|
[
"Apache-2.0"
] | null | null | null |
run_train_and_test.py
|
tipt0p/periodic_behavior_bn_wd
|
8d77bfc21cd011cd3d705042c2b51735ec2b8b7d
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python3
import os
import subprocess
import argparse
import datetime
import time
parser = argparse.ArgumentParser(description='Managing experiments')
parser.add_argument('--test', action='store_true',
help='print (test) or os.system (run)')
args = parser.parse_args()
if args.test:
action = print
else:
action = os.system
ENVIRONMENT = 'CycleBNNet_env'
data = 'CIFAR10'#'CIFAR10''CIFAR100'
network = 'ConvNetSI'#'ResNet18SI''ConvNetSI''ResNet18SIAf''ConvNetSIAf'
fix_noninvlr = 0.0
save_path = './Experiments/{}_{}/'.format(network,data)
if fix_noninvlr >=0:
save_path = './Experiments/{}_{}_noninvlr_{}/'.format(network,data,fix_noninvlr)
if not os.path.exists('./Experiments'):
os.mkdir('./Experiments')
if not os.path.exists(save_path):
os.mkdir(save_path)
params = {'dataset' : data,
'model': network,
'noninvlr':fix_noninvlr,
'momentum': 0.0,
'num_channels':32,
'depth':3,# work only for ConvNet
'epochs': 1001,
'corrupt_train': 0.0,
'save_freq': 1,
'eval_freq':1000,
'use_data_size':50000,
'dir': save_path + 'checkpoints',
'init_scale':10.,
'fix_si_pnorm_value':-1,
'gpu':0
}
lrs = [0.01,]
wds = [0.001,]
add_params = '--use_test --no_schedule --no_aug'#--fbgd --fix_si_pnorm
params_test = {'dataset' : data,
'model': network,
'num_channels': params['num_channels'],
'depth': params['depth'],
'init_scale':params['init_scale'],
'save_path': save_path + 'info',
'models_dir': save_path + 'checkpoints',
'use_data_size':params['use_data_size'],
'gpu':params['gpu']
}
log_path = save_path + 'logs/'
if not os.path.exists(save_path):
os.mkdir(save_path)
if not os.path.exists(log_path):
os.mkdir(log_path)
info_path = save_path + 'info/'
if not os.path.exists(info_path):
os.mkdir(info_path)
commands = []
for ind in range(len(lrs)):
p = params.copy()
p['lr_init'] = lrs[ind]
p['wd'] = wds[ind]
p_test = params_test.copy()
exp_name = 'c{}_d{}_ds{}_lr{}_wd{}_mom{}_corr{}_epoch{}'.format(p['num_channels'],p['depth'],p['use_data_size'],p['lr_init'],p['wd'],p['momentum'],p['corrupt_train'],p['epochs'])
if 'no_schedule' in add_params:
exp_name = exp_name + '_nosch'
if p['init_scale'] >0:
exp_name = exp_name + 'initscale{}'.format(p['init_scale'])
if 'no_aug' in add_params:
exp_name = exp_name + '_noaug'
if 'fbgd' in add_params:
exp_name = exp_name + '_fbgd'
if 'fix_si_pnorm' in add_params:
exp_name = exp_name + '_fix_si_pnorm{}'.format(p['fix_si_pnorm_value'])
p['dir'] = params['dir'] + '/' + exp_name
exp_log_path = log_path + exp_name
p_test['models_dir'] = params_test['models_dir'] + '/' + exp_name + '/trial_0'
# train
command = 'train.py {} >> {}'.format(' '.join(["--{} {}".format(k,v) for (k, v) in p.items()])+' ' +add_params, exp_log_path+'.out')
commands.append(command)
#train metrics
p_test['save_path'] = params_test['save_path'] + '/' + exp_name + '/train-tm.npz'
commands.append('get_info.py {} --corrupt_train {} --train_mode --eval_model --all_pnorm'.format(' '.join(["--{} {}".format(k,v) for (k, v) in p_test.items()]), p['corrupt_train']))
commands.append('get_info.py {} --corrupt_train {} --train_mode --update --calc_grad_norms'.format(' '.join(["--{} {}".format(k,v) for (k, v) in p_test.items()]), p['corrupt_train']))
#test metrics
p_test['save_path'] = params_test['save_path'] + '/' + exp_name + '/test-em.npz'
commands.append('get_info.py {} --use_test --eval_model'.format(' '.join(["--{} {}".format(k,v) for (k, v) in p_test.items()])))
if ENVIRONMENT:
tmp_str = ' && ~/anaconda3/envs/{}/bin/python '.format(ENVIRONMENT)
final_command = "bash -c '. activate {} {} {}'".format(ENVIRONMENT,tmp_str,tmp_str.join(commands))
else:
final_command = 'python '.join(command)
action(final_command)
| 33.701613
| 188
| 0.602058
|
49ff137e970c10d318e30ab3324bd5ce6bcaeda5
| 1,370
|
py
|
Python
|
gcpdiag/queries/kms_stub.py
|
GoogleCloudPlatform/gcpdiag
|
1fb20974c80b54c145cb4281d8b254a0ad59667d
|
[
"Apache-2.0"
] | 63
|
2021-09-28T16:29:19.000Z
|
2022-03-30T02:01:15.000Z
|
gcpdiag/queries/kms_stub.py
|
GoogleCloudPlatform/gcpdiag
|
1fb20974c80b54c145cb4281d8b254a0ad59667d
|
[
"Apache-2.0"
] | 10
|
2021-10-06T11:59:44.000Z
|
2022-03-24T16:41:38.000Z
|
gcpdiag/queries/kms_stub.py
|
GoogleCloudPlatform/gcpdiag
|
1fb20974c80b54c145cb4281d8b254a0ad59667d
|
[
"Apache-2.0"
] | 20
|
2021-09-28T18:38:29.000Z
|
2022-03-24T10:19:56.000Z
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Stub API calls used in kms.py for testing.
Instead of doing real API calls, we return test JSON data.
"""
# pylint: disable=unused-argument
# pylint: disable=invalid-name
import pathlib
from gcpdiag import utils
from gcpdiag.queries import apis_stub
PREFIX_GKE1 = pathlib.Path(__file__).parents[2] / 'test-data/gke1/json-dumps'
class KmsApiStub:
"""Mock object to simulate container api calls."""
def projects(self):
return self
def locations(self):
return self
def keyRings(self):
return self
def cryptoKeys(self):
return self
def get(self, name=None):
project_id = utils.get_project_by_res_name(name)
basename = utils.extract_value_from_res_name(name, 'cryptoKeys')
return apis_stub.RestCallStub(project_id, basename)
| 26.862745
| 77
| 0.745985
|
8b88fd71cace5bf633a7f1bcb48b0013d126861e
| 691
|
py
|
Python
|
manage.py
|
michaldomino/Voice-interface-optimization-server
|
fff59d4c5db599e35d4b5f3915bbb272d2000a26
|
[
"MIT"
] | null | null | null |
manage.py
|
michaldomino/Voice-interface-optimization-server
|
fff59d4c5db599e35d4b5f3915bbb272d2000a26
|
[
"MIT"
] | null | null | null |
manage.py
|
michaldomino/Voice-interface-optimization-server
|
fff59d4c5db599e35d4b5f3915bbb272d2000a26
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Voice_interface_optimization_server.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 30.043478
| 99
| 0.691751
|
2f8b57f11d6c3fbb6d7a5ed563fecd35e13866e9
| 39,109
|
py
|
Python
|
virtual/lib/python3.8/site-packages/mypy/checkmember.py
|
RobbinGIT/Awards
|
fb214ee6762a4f21be0b9fc247b939b3703daa8c
|
[
"MIT"
] | null | null | null |
virtual/lib/python3.8/site-packages/mypy/checkmember.py
|
RobbinGIT/Awards
|
fb214ee6762a4f21be0b9fc247b939b3703daa8c
|
[
"MIT"
] | 2
|
2021-06-08T20:56:42.000Z
|
2021-06-10T22:35:15.000Z
|
virtual/lib/python3.6/site-packages/mypy/checkmember.py
|
catherine244/Reviews
|
30138f5ad09a39c1b6866c8bacf3fd0c89abbd00
|
[
"MIT"
] | null | null | null |
"""Type checking of attribute access"""
from typing import cast, Callable, Optional, Union
from typing_extensions import TYPE_CHECKING
from mypy.types import (
Type, Instance, AnyType, TupleType, TypedDictType, CallableType, FunctionLike, TypeVarDef,
Overloaded, TypeVarType, UnionType, PartialType, TypeOfAny, LiteralType,
DeletedType, NoneType, TypeType, function_type, get_type_vars, get_proper_type, ProperType
)
from mypy.nodes import (
TypeInfo, FuncBase, Var, FuncDef, SymbolNode, Context, MypyFile, TypeVarExpr,
ARG_POS, ARG_STAR, ARG_STAR2, Decorator, OverloadedFuncDef, TypeAlias, TempNode,
is_final_node, SYMBOL_FUNCBASE_TYPES,
)
from mypy.messages import MessageBuilder
from mypy.maptype import map_instance_to_supertype
from mypy.expandtype import expand_type_by_instance, freshen_function_type_vars
from mypy.erasetype import erase_typevars
from mypy.plugin import AttributeContext
from mypy.typeanal import set_any_tvars
from mypy import message_registry
from mypy import subtypes
from mypy import meet
from mypy.typeops import (
tuple_fallback, bind_self, erase_to_bound, class_callable, type_object_type_from_function,
make_simplified_union,
)
if TYPE_CHECKING: # import for forward declaration only
import mypy.checker
from mypy import state
class MemberContext:
"""Information and objects needed to type check attribute access.
Look at the docstring of analyze_member_access for more information.
"""
def __init__(self,
is_lvalue: bool,
is_super: bool,
is_operator: bool,
original_type: Type,
context: Context,
msg: MessageBuilder,
chk: 'mypy.checker.TypeChecker',
self_type: Optional[Type]) -> None:
self.is_lvalue = is_lvalue
self.is_super = is_super
self.is_operator = is_operator
self.original_type = original_type
self.self_type = self_type or original_type
self.context = context # Error context
self.msg = msg
self.chk = chk
def builtin_type(self, name: str) -> Instance:
return self.chk.named_type(name)
def not_ready_callback(self, name: str, context: Context) -> None:
self.chk.handle_cannot_determine_type(name, context)
def copy_modified(self, *, messages: Optional[MessageBuilder] = None,
self_type: Optional[Type] = None) -> 'MemberContext':
mx = MemberContext(self.is_lvalue, self.is_super, self.is_operator,
self.original_type, self.context, self.msg, self.chk,
self.self_type)
if messages is not None:
mx.msg = messages
if self_type is not None:
mx.self_type = self_type
return mx
def analyze_member_access(name: str,
typ: Type,
context: Context,
is_lvalue: bool,
is_super: bool,
is_operator: bool,
msg: MessageBuilder, *,
original_type: Type,
chk: 'mypy.checker.TypeChecker',
override_info: Optional[TypeInfo] = None,
in_literal_context: bool = False,
self_type: Optional[Type] = None) -> Type:
"""Return the type of attribute 'name' of 'typ'.
The actual implementation is in '_analyze_member_access' and this docstring
also applies to it.
This is a general operation that supports various different variations:
1. lvalue or non-lvalue access (setter or getter access)
2. supertype access when using super() (is_super == True and
'override_info' should refer to the supertype)
'original_type' is the most precise inferred or declared type of the base object
that we have available. When looking for an attribute of 'typ', we may perform
recursive calls targeting the fallback type, and 'typ' may become some supertype
of 'original_type'. 'original_type' is always preserved as the 'typ' type used in
the initial, non-recursive call. The 'self_type' is a component of 'original_type'
to which generic self should be bound (a narrower type that has a fallback to instance).
Currently this is used only for union types.
"""
mx = MemberContext(is_lvalue,
is_super,
is_operator,
original_type,
context,
msg,
chk=chk,
self_type=self_type)
result = _analyze_member_access(name, typ, mx, override_info)
possible_literal = get_proper_type(result)
if (in_literal_context and isinstance(possible_literal, Instance) and
possible_literal.last_known_value is not None):
return possible_literal.last_known_value
else:
return result
def _analyze_member_access(name: str,
typ: Type,
mx: MemberContext,
override_info: Optional[TypeInfo] = None) -> Type:
# TODO: This and following functions share some logic with subtypes.find_member;
# consider refactoring.
typ = get_proper_type(typ)
if isinstance(typ, Instance):
return analyze_instance_member_access(name, typ, mx, override_info)
elif isinstance(typ, AnyType):
# The base object has dynamic type.
return AnyType(TypeOfAny.from_another_any, source_any=typ)
elif isinstance(typ, UnionType):
return analyze_union_member_access(name, typ, mx)
elif isinstance(typ, FunctionLike) and typ.is_type_obj():
return analyze_type_callable_member_access(name, typ, mx)
elif isinstance(typ, TypeType):
return analyze_type_type_member_access(name, typ, mx, override_info)
elif isinstance(typ, TupleType):
# Actually look up from the fallback instance type.
return _analyze_member_access(name, tuple_fallback(typ), mx, override_info)
elif isinstance(typ, (TypedDictType, LiteralType, FunctionLike)):
# Actually look up from the fallback instance type.
return _analyze_member_access(name, typ.fallback, mx, override_info)
elif isinstance(typ, NoneType):
return analyze_none_member_access(name, typ, mx)
elif isinstance(typ, TypeVarType):
return _analyze_member_access(name, typ.upper_bound, mx, override_info)
elif isinstance(typ, DeletedType):
mx.msg.deleted_as_rvalue(typ, mx.context)
return AnyType(TypeOfAny.from_error)
if mx.chk.should_suppress_optional_error([typ]):
return AnyType(TypeOfAny.from_error)
return mx.msg.has_no_attr(mx.original_type, typ, name, mx.context)
# The several functions that follow implement analyze_member_access for various
# types and aren't documented individually.
def analyze_instance_member_access(name: str,
typ: Instance,
mx: MemberContext,
override_info: Optional[TypeInfo]) -> Type:
if name == '__init__' and not mx.is_super:
# Accessing __init__ in statically typed code would compromise
# type safety unless used via super().
mx.msg.fail(message_registry.CANNOT_ACCESS_INIT, mx.context)
return AnyType(TypeOfAny.from_error)
# The base object has an instance type.
info = typ.type
if override_info:
info = override_info
if (state.find_occurrences and
info.name() == state.find_occurrences[0] and
name == state.find_occurrences[1]):
mx.msg.note("Occurrence of '{}.{}'".format(*state.find_occurrences), mx.context)
# Look up the member. First look up the method dictionary.
method = info.get_method(name)
if method:
if method.is_property:
assert isinstance(method, OverloadedFuncDef)
first_item = cast(Decorator, method.items[0])
return analyze_var(name, first_item.var, typ, info, mx)
if mx.is_lvalue:
mx.msg.cant_assign_to_method(mx.context)
signature = function_type(method, mx.builtin_type('builtins.function'))
signature = freshen_function_type_vars(signature)
if name == '__new__':
# __new__ is special and behaves like a static method -- don't strip
# the first argument.
pass
else:
signature = bind_self(signature, mx.self_type)
typ = map_instance_to_supertype(typ, method.info)
member_type = expand_type_by_instance(signature, typ)
freeze_type_vars(member_type)
return member_type
else:
# Not a method.
return analyze_member_var_access(name, typ, info, mx)
def analyze_type_callable_member_access(name: str,
typ: FunctionLike,
mx: MemberContext) -> Type:
# Class attribute.
# TODO super?
ret_type = typ.items()[0].ret_type
assert isinstance(ret_type, ProperType)
if isinstance(ret_type, TupleType):
ret_type = tuple_fallback(ret_type)
if isinstance(ret_type, Instance):
if not mx.is_operator:
# When Python sees an operator (eg `3 == 4`), it automatically translates that
# into something like `int.__eq__(3, 4)` instead of `(3).__eq__(4)` as an
# optimization.
#
# While it normally it doesn't matter which of the two versions are used, it
# does cause inconsistencies when working with classes. For example, translating
# `int == int` to `int.__eq__(int)` would not work since `int.__eq__` is meant to
# compare two int _instances_. What we really want is `type(int).__eq__`, which
# is meant to compare two types or classes.
#
# This check makes sure that when we encounter an operator, we skip looking up
# the corresponding method in the current instance to avoid this edge case.
# See https://github.com/python/mypy/pull/1787 for more info.
result = analyze_class_attribute_access(ret_type, name, mx)
if result:
return result
# Look up from the 'type' type.
return _analyze_member_access(name, typ.fallback, mx)
else:
assert False, 'Unexpected type {}'.format(repr(ret_type))
def analyze_type_type_member_access(name: str,
typ: TypeType,
mx: MemberContext,
override_info: Optional[TypeInfo]) -> Type:
# Similar to analyze_type_callable_attribute_access.
item = None
fallback = mx.builtin_type('builtins.type')
ignore_messages = mx.msg.copy()
ignore_messages.disable_errors()
if isinstance(typ.item, Instance):
item = typ.item
elif isinstance(typ.item, AnyType):
mx = mx.copy_modified(messages=ignore_messages)
return _analyze_member_access(name, fallback, mx, override_info)
elif isinstance(typ.item, TypeVarType):
upper_bound = get_proper_type(typ.item.upper_bound)
if isinstance(upper_bound, Instance):
item = upper_bound
elif isinstance(upper_bound, TupleType):
item = tuple_fallback(upper_bound)
elif isinstance(typ.item, TupleType):
item = tuple_fallback(typ.item)
elif isinstance(typ.item, FunctionLike) and typ.item.is_type_obj():
item = typ.item.fallback
elif isinstance(typ.item, TypeType):
# Access member on metaclass object via Type[Type[C]]
if isinstance(typ.item.item, Instance):
item = typ.item.item.type.metaclass_type
if item and not mx.is_operator:
# See comment above for why operators are skipped
result = analyze_class_attribute_access(item, name, mx, override_info)
if result:
if not (isinstance(get_proper_type(result), AnyType) and item.type.fallback_to_any):
return result
else:
# We don't want errors on metaclass lookup for classes with Any fallback
mx = mx.copy_modified(messages=ignore_messages)
if item is not None:
fallback = item.type.metaclass_type or fallback
return _analyze_member_access(name, fallback, mx, override_info)
def analyze_union_member_access(name: str, typ: UnionType, mx: MemberContext) -> Type:
mx.msg.disable_type_names += 1
results = []
for subtype in typ.relevant_items():
# Self types should be bound to every individual item of a union.
item_mx = mx.copy_modified(self_type=subtype)
results.append(_analyze_member_access(name, subtype, item_mx))
mx.msg.disable_type_names -= 1
return make_simplified_union(results)
def analyze_none_member_access(name: str, typ: NoneType, mx: MemberContext) -> Type:
if mx.chk.should_suppress_optional_error([typ]):
return AnyType(TypeOfAny.from_error)
is_python_3 = mx.chk.options.python_version[0] >= 3
# In Python 2 "None" has exactly the same attributes as "object". Python 3 adds a single
# extra attribute, "__bool__".
if is_python_3 and name == '__bool__':
return CallableType(arg_types=[],
arg_kinds=[],
arg_names=[],
ret_type=mx.builtin_type('builtins.bool'),
fallback=mx.builtin_type('builtins.function'))
else:
return _analyze_member_access(name, mx.builtin_type('builtins.object'), mx)
def analyze_member_var_access(name: str,
itype: Instance,
info: TypeInfo,
mx: MemberContext) -> Type:
"""Analyse attribute access that does not target a method.
This is logically part of analyze_member_access and the arguments are similar.
original_type is the type of E in the expression E.var
"""
# It was not a method. Try looking up a variable.
v = lookup_member_var_or_accessor(info, name, mx.is_lvalue)
vv = v
if isinstance(vv, Decorator):
# The associated Var node of a decorator contains the type.
v = vv.var
if isinstance(vv, TypeInfo):
# If the associated variable is a TypeInfo synthesize a Var node for
# the purposes of type checking. This enables us to type check things
# like accessing class attributes on an inner class.
v = Var(name, type=type_object_type(vv, mx.builtin_type))
v.info = info
if isinstance(vv, TypeAlias) and isinstance(get_proper_type(vv.target), Instance):
# Similar to the above TypeInfo case, we allow using
# qualified type aliases in runtime context if it refers to an
# instance type. For example:
# class C:
# A = List[int]
# x = C.A() <- this is OK
typ = instance_alias_type(vv, mx.builtin_type)
v = Var(name, type=typ)
v.info = info
if isinstance(v, Var):
implicit = info[name].implicit
# An assignment to final attribute is always an error,
# independently of types.
if mx.is_lvalue and not mx.chk.get_final_context():
check_final_member(name, info, mx.msg, mx.context)
return analyze_var(name, v, itype, info, mx, implicit=implicit)
elif isinstance(v, FuncDef):
assert False, "Did not expect a function"
elif (not v and name not in ['__getattr__', '__setattr__', '__getattribute__'] and
not mx.is_operator):
if not mx.is_lvalue:
for method_name in ('__getattribute__', '__getattr__'):
method = info.get_method(method_name)
# __getattribute__ is defined on builtins.object and returns Any, so without
# the guard this search will always find object.__getattribute__ and conclude
# that the attribute exists
if method and method.info.fullname() != 'builtins.object':
function = function_type(method, mx.builtin_type('builtins.function'))
bound_method = bind_self(function, mx.self_type)
typ = map_instance_to_supertype(itype, method.info)
getattr_type = expand_type_by_instance(bound_method, typ)
if isinstance(getattr_type, CallableType):
result = getattr_type.ret_type
# Call the attribute hook before returning.
fullname = '{}.{}'.format(method.info.fullname(), name)
hook = mx.chk.plugin.get_attribute_hook(fullname)
if hook:
result = hook(AttributeContext(get_proper_type(mx.original_type),
result, mx.context, mx.chk))
return result
else:
setattr_meth = info.get_method('__setattr__')
if setattr_meth and setattr_meth.info.fullname() != 'builtins.object':
setattr_func = function_type(setattr_meth, mx.builtin_type('builtins.function'))
bound_type = bind_self(setattr_func, mx.self_type)
typ = map_instance_to_supertype(itype, setattr_meth.info)
setattr_type = expand_type_by_instance(bound_type, typ)
if isinstance(setattr_type, CallableType) and len(setattr_type.arg_types) > 0:
return setattr_type.arg_types[-1]
if itype.type.fallback_to_any:
return AnyType(TypeOfAny.special_form)
# Could not find the member.
if mx.is_super:
mx.msg.undefined_in_superclass(name, mx.context)
return AnyType(TypeOfAny.from_error)
else:
if mx.chk and mx.chk.should_suppress_optional_error([itype]):
return AnyType(TypeOfAny.from_error)
return mx.msg.has_no_attr(mx.original_type, itype, name, mx.context)
def check_final_member(name: str, info: TypeInfo, msg: MessageBuilder, ctx: Context) -> None:
"""Give an error if the name being assigned was declared as final."""
for base in info.mro:
sym = base.names.get(name)
if sym and is_final_node(sym.node):
msg.cant_assign_to_final(name, attr_assign=True, ctx=ctx)
def analyze_descriptor_access(instance_type: Type,
descriptor_type: Type,
builtin_type: Callable[[str], Instance],
msg: MessageBuilder,
context: Context, *,
chk: 'mypy.checker.TypeChecker') -> Type:
"""Type check descriptor access.
Arguments:
instance_type: The type of the instance on which the descriptor
attribute is being accessed (the type of ``a`` in ``a.f`` when
``f`` is a descriptor).
descriptor_type: The type of the descriptor attribute being accessed
(the type of ``f`` in ``a.f`` when ``f`` is a descriptor).
context: The node defining the context of this inference.
Return:
The return type of the appropriate ``__get__`` overload for the descriptor.
"""
instance_type = get_proper_type(instance_type)
descriptor_type = get_proper_type(descriptor_type)
if isinstance(descriptor_type, UnionType):
# Map the access over union types
return make_simplified_union([
analyze_descriptor_access(instance_type, typ, builtin_type,
msg, context, chk=chk)
for typ in descriptor_type.items
])
elif not isinstance(descriptor_type, Instance):
return descriptor_type
if not descriptor_type.type.has_readable_member('__get__'):
return descriptor_type
dunder_get = descriptor_type.type.get_method('__get__')
if dunder_get is None:
msg.fail(message_registry.DESCRIPTOR_GET_NOT_CALLABLE.format(descriptor_type), context)
return AnyType(TypeOfAny.from_error)
function = function_type(dunder_get, builtin_type('builtins.function'))
bound_method = bind_self(function, descriptor_type)
typ = map_instance_to_supertype(descriptor_type, dunder_get.info)
dunder_get_type = expand_type_by_instance(bound_method, typ)
if isinstance(instance_type, FunctionLike) and instance_type.is_type_obj():
owner_type = instance_type.items()[0].ret_type
instance_type = NoneType()
elif isinstance(instance_type, TypeType):
owner_type = instance_type.item
instance_type = NoneType()
else:
owner_type = instance_type
_, inferred_dunder_get_type = chk.expr_checker.check_call(
dunder_get_type,
[TempNode(instance_type, context=context),
TempNode(TypeType.make_normalized(owner_type), context=context)],
[ARG_POS, ARG_POS], context)
inferred_dunder_get_type = get_proper_type(inferred_dunder_get_type)
if isinstance(inferred_dunder_get_type, AnyType):
# check_call failed, and will have reported an error
return inferred_dunder_get_type
if not isinstance(inferred_dunder_get_type, CallableType):
msg.fail(message_registry.DESCRIPTOR_GET_NOT_CALLABLE.format(descriptor_type), context)
return AnyType(TypeOfAny.from_error)
return inferred_dunder_get_type.ret_type
def instance_alias_type(alias: TypeAlias,
builtin_type: Callable[[str], Instance]) -> Type:
"""Type of a type alias node targeting an instance, when appears in runtime context.
As usual, we first erase any unbound type variables to Any.
"""
target = get_proper_type(alias.target)
assert isinstance(target, Instance), "Must be called only with aliases to classes"
target = set_any_tvars(target, alias.alias_tvars, alias.line, alias.column)
assert isinstance(target, Instance)
tp = type_object_type(target.type, builtin_type)
return expand_type_by_instance(tp, target)
def analyze_var(name: str,
var: Var,
itype: Instance,
info: TypeInfo,
mx: MemberContext, *,
implicit: bool = False) -> Type:
"""Analyze access to an attribute via a Var node.
This is conceptually part of analyze_member_access and the arguments are similar.
itype is the class object in which var is defined
original_type is the type of E in the expression E.var
if implicit is True, the original Var was created as an assignment to self
"""
# Found a member variable.
itype = map_instance_to_supertype(itype, var.info)
typ = var.type
if typ:
if isinstance(typ, PartialType):
return mx.chk.handle_partial_var_type(typ, mx.is_lvalue, var, mx.context)
t = expand_type_by_instance(typ, itype)
if mx.is_lvalue and var.is_property and not var.is_settable_property:
# TODO allow setting attributes in subclass (although it is probably an error)
mx.msg.read_only_property(name, itype.type, mx.context)
if mx.is_lvalue and var.is_classvar:
mx.msg.cant_assign_to_classvar(name, mx.context)
result = t # type: Type
if var.is_initialized_in_class and isinstance(t, FunctionLike) and not t.is_type_obj():
if mx.is_lvalue:
if var.is_property:
if not var.is_settable_property:
mx.msg.read_only_property(name, itype.type, mx.context)
else:
mx.msg.cant_assign_to_method(mx.context)
if not var.is_staticmethod:
# Class-level function objects and classmethods become bound methods:
# the former to the instance, the latter to the class.
functype = t
# Use meet to narrow original_type to the dispatched type.
# For example, assume
# * A.f: Callable[[A1], None] where A1 <: A (maybe A1 == A)
# * B.f: Callable[[B1], None] where B1 <: B (maybe B1 == B)
# * x: Union[A1, B1]
# In `x.f`, when checking `x` against A1 we assume x is compatible with A
# and similarly for B1 when checking agains B
dispatched_type = meet.meet_types(mx.original_type, itype)
check_self_arg(functype, dispatched_type, var.is_classmethod, mx.context, name,
mx.msg)
signature = bind_self(functype, mx.self_type, var.is_classmethod)
if var.is_property:
# A property cannot have an overloaded type => the cast is fine.
assert isinstance(signature, CallableType)
result = signature.ret_type
else:
result = signature
else:
if not var.is_ready:
mx.not_ready_callback(var.name(), mx.context)
# Implicit 'Any' type.
result = AnyType(TypeOfAny.special_form)
fullname = '{}.{}'.format(var.info.fullname(), name)
hook = mx.chk.plugin.get_attribute_hook(fullname)
if result and not mx.is_lvalue and not implicit:
result = analyze_descriptor_access(mx.original_type, result, mx.builtin_type,
mx.msg, mx.context, chk=mx.chk)
if hook:
result = hook(AttributeContext(get_proper_type(mx.original_type),
result, mx.context, mx.chk))
return result
def freeze_type_vars(member_type: ProperType) -> None:
if isinstance(member_type, CallableType):
for v in member_type.variables:
v.id.meta_level = 0
if isinstance(member_type, Overloaded):
for it in member_type.items():
for v in it.variables:
v.id.meta_level = 0
def lookup_member_var_or_accessor(info: TypeInfo, name: str,
is_lvalue: bool) -> Optional[SymbolNode]:
"""Find the attribute/accessor node that refers to a member of a type."""
# TODO handle lvalues
node = info.get(name)
if node:
return node.node
else:
return None
def check_self_arg(functype: FunctionLike,
dispatched_arg_type: Type,
is_classmethod: bool,
context: Context, name: str,
msg: MessageBuilder) -> None:
"""For x.f where A.f: A1 -> T, check that meet(type(x), A) <: A1 for each overload.
dispatched_arg_type is meet(B, A) in the following example
def g(x: B): x.f
class A:
f: Callable[[A1], None]
"""
# TODO: this is too strict. We can return filtered overloads for matching definitions
for item in functype.items():
if not item.arg_types or item.arg_kinds[0] not in (ARG_POS, ARG_STAR):
# No positional first (self) argument (*args is okay).
msg.no_formal_self(name, item, context)
else:
selfarg = item.arg_types[0]
if is_classmethod:
dispatched_arg_type = TypeType.make_normalized(dispatched_arg_type)
if not subtypes.is_subtype(dispatched_arg_type, erase_to_bound(selfarg)):
msg.incompatible_self_argument(name, dispatched_arg_type, item,
is_classmethod, context)
def analyze_class_attribute_access(itype: Instance,
name: str,
mx: MemberContext,
override_info: Optional[TypeInfo] = None) -> Optional[Type]:
"""original_type is the type of E in the expression E.var"""
info = itype.type
if override_info:
info = override_info
node = info.get(name)
if not node:
if info.fallback_to_any:
return AnyType(TypeOfAny.special_form)
return None
is_decorated = isinstance(node.node, Decorator)
is_method = is_decorated or isinstance(node.node, FuncBase)
if mx.is_lvalue:
if is_method:
mx.msg.cant_assign_to_method(mx.context)
if isinstance(node.node, TypeInfo):
mx.msg.fail(message_registry.CANNOT_ASSIGN_TO_TYPE, mx.context)
# If a final attribute was declared on `self` in `__init__`, then it
# can't be accessed on the class object.
if node.implicit and isinstance(node.node, Var) and node.node.is_final:
mx.msg.fail(message_registry.CANNOT_ACCESS_FINAL_INSTANCE_ATTR
.format(node.node.name()), mx.context)
# An assignment to final attribute on class object is also always an error,
# independently of types.
if mx.is_lvalue and not mx.chk.get_final_context():
check_final_member(name, info, mx.msg, mx.context)
if info.is_enum and not (mx.is_lvalue or is_decorated or is_method):
enum_literal = LiteralType(name, fallback=itype)
return itype.copy_modified(last_known_value=enum_literal)
t = node.type
if t:
if isinstance(t, PartialType):
symnode = node.node
assert isinstance(symnode, Var)
return mx.chk.handle_partial_var_type(t, mx.is_lvalue, symnode, mx.context)
# Find the class where method/variable was defined.
if isinstance(node.node, Decorator):
super_info = node.node.var.info # type: Optional[TypeInfo]
elif isinstance(node.node, (Var, SYMBOL_FUNCBASE_TYPES)):
super_info = node.node.info
else:
super_info = None
# Map the type to how it would look as a defining class. For example:
# class C(Generic[T]): ...
# class D(C[Tuple[T, S]]): ...
# D[int, str].method()
# Here itype is D[int, str], isuper is C[Tuple[int, str]].
if not super_info:
isuper = None
else:
isuper = map_instance_to_supertype(itype, super_info)
if isinstance(node.node, Var):
assert isuper is not None
# Check if original variable type has type variables. For example:
# class C(Generic[T]):
# x: T
# C.x # Error, ambiguous access
# C[int].x # Also an error, since C[int] is same as C at runtime
if isinstance(t, TypeVarType) or get_type_vars(t):
# Exception: access on Type[...], including first argument of class methods is OK.
if not isinstance(get_proper_type(mx.original_type), TypeType):
mx.msg.fail(message_registry.GENERIC_INSTANCE_VAR_CLASS_ACCESS, mx.context)
# Erase non-mapped variables, but keep mapped ones, even if there is an error.
# In the above example this means that we infer following types:
# C.x -> Any
# C[int].x -> int
t = erase_typevars(expand_type_by_instance(t, isuper))
is_classmethod = ((is_decorated and cast(Decorator, node.node).func.is_class)
or (isinstance(node.node, FuncBase) and node.node.is_class))
result = add_class_tvars(get_proper_type(t), itype, isuper, is_classmethod,
mx.builtin_type, mx.original_type)
if not mx.is_lvalue:
result = analyze_descriptor_access(mx.original_type, result, mx.builtin_type,
mx.msg, mx.context, chk=mx.chk)
return result
elif isinstance(node.node, Var):
mx.not_ready_callback(name, mx.context)
return AnyType(TypeOfAny.special_form)
if isinstance(node.node, TypeVarExpr):
mx.msg.fail(message_registry.CANNOT_USE_TYPEVAR_AS_EXPRESSION.format(
info.name(), name), mx.context)
return AnyType(TypeOfAny.from_error)
if isinstance(node.node, TypeInfo):
return type_object_type(node.node, mx.builtin_type)
if isinstance(node.node, MypyFile):
# Reference to a module object.
return mx.builtin_type('types.ModuleType')
if (isinstance(node.node, TypeAlias) and
isinstance(get_proper_type(node.node.target), Instance)):
return instance_alias_type(node.node, mx.builtin_type)
if is_decorated:
assert isinstance(node.node, Decorator)
if node.node.type:
return node.node.type
else:
mx.not_ready_callback(name, mx.context)
return AnyType(TypeOfAny.from_error)
else:
return function_type(cast(FuncBase, node.node), mx.builtin_type('builtins.function'))
def add_class_tvars(t: ProperType, itype: Instance, isuper: Optional[Instance],
is_classmethod: bool,
builtin_type: Callable[[str], Instance],
original_type: Type) -> Type:
"""Instantiate type variables during analyze_class_attribute_access,
e.g T and Q in the following:
class A(Generic[T]):
@classmethod
def foo(cls: Type[Q]) -> Tuple[T, Q]: ...
class B(A[str]): pass
B.foo()
original_type is the value of the type B in the expression B.foo()
"""
# TODO: verify consistency between Q and T
info = itype.type # type: TypeInfo
if is_classmethod:
assert isuper is not None
t = expand_type_by_instance(t, isuper)
# We add class type variables if the class method is accessed on class object
# without applied type arguments, this matches the behavior of __init__().
# For example (continuing the example in docstring):
# A # The type of callable is def [T] () -> A[T], _not_ def () -> A[Any]
# A[int] # The type of callable is def () -> A[int]
# and
# A.foo # The type is generic def [T] () -> Tuple[T, A[T]]
# A[int].foo # The type is non-generic def () -> Tuple[int, A[int]]
#
# This behaviour is useful for defining alternative constructors for generic classes.
# To achieve such behaviour, we add the class type variables that are still free
# (i.e. appear in the return type of the class object on which the method was accessed).
free_ids = {t.id for t in itype.args if isinstance(t, TypeVarType)}
if isinstance(t, CallableType):
# NOTE: in practice either all or none of the variables are free, since
# visit_type_application() will detect any type argument count mismatch and apply
# a correct number of Anys.
tvars = [TypeVarDef(n, n, i + 1, [], builtin_type('builtins.object'), tv.variance)
for (i, n), tv in zip(enumerate(info.type_vars), info.defn.type_vars)
# use 'is' to avoid id clashes with unrelated variables
if any(tv.id is id for id in free_ids)]
if is_classmethod:
t = bind_self(t, original_type, is_classmethod=True)
return t.copy_modified(variables=tvars + t.variables)
elif isinstance(t, Overloaded):
return Overloaded([cast(CallableType, add_class_tvars(item, itype, isuper, is_classmethod,
builtin_type, original_type))
for item in t.items()])
return t
def type_object_type(info: TypeInfo, builtin_type: Callable[[str], Instance]) -> ProperType:
"""Return the type of a type object.
For a generic type G with type variables T and S the type is generally of form
Callable[..., G[T, S]]
where ... are argument types for the __init__/__new__ method (without the self
argument). Also, the fallback type will be 'type' instead of 'function'.
"""
# We take the type from whichever of __init__ and __new__ is first
# in the MRO, preferring __init__ if there is a tie.
init_method = info.get('__init__')
new_method = info.get('__new__')
if not init_method or not is_valid_constructor(init_method.node):
# Must be an invalid class definition.
return AnyType(TypeOfAny.from_error)
# There *should* always be a __new__ method except the test stubs
# lack it, so just copy init_method in that situation
new_method = new_method or init_method
if not is_valid_constructor(new_method.node):
# Must be an invalid class definition.
return AnyType(TypeOfAny.from_error)
# The two is_valid_constructor() checks ensure this.
assert isinstance(new_method.node, (SYMBOL_FUNCBASE_TYPES, Decorator))
assert isinstance(init_method.node, (SYMBOL_FUNCBASE_TYPES, Decorator))
init_index = info.mro.index(init_method.node.info)
new_index = info.mro.index(new_method.node.info)
fallback = info.metaclass_type or builtin_type('builtins.type')
if init_index < new_index:
method = init_method.node # type: Union[FuncBase, Decorator]
is_new = False
elif init_index > new_index:
method = new_method.node
is_new = True
else:
if init_method.node.info.fullname() == 'builtins.object':
# Both are defined by object. But if we've got a bogus
# base class, we can't know for sure, so check for that.
if info.fallback_to_any:
# Construct a universal callable as the prototype.
any_type = AnyType(TypeOfAny.special_form)
sig = CallableType(arg_types=[any_type, any_type],
arg_kinds=[ARG_STAR, ARG_STAR2],
arg_names=["_args", "_kwds"],
ret_type=any_type,
fallback=builtin_type('builtins.function'))
return class_callable(sig, info, fallback, None, is_new=False)
# Otherwise prefer __init__ in a tie. It isn't clear that this
# is the right thing, but __new__ caused problems with
# typeshed (#5647).
method = init_method.node
is_new = False
# Construct callable type based on signature of __init__. Adjust
# return type and insert type arguments.
if isinstance(method, FuncBase):
t = function_type(method, fallback)
else:
assert isinstance(method.type, ProperType)
assert isinstance(method.type, FunctionLike) # is_valid_constructor() ensures this
t = method.type
return type_object_type_from_function(t, info, method.info, fallback, is_new)
def is_valid_constructor(n: Optional[SymbolNode]) -> bool:
"""Does this node represents a valid constructor method?
This includes normal functions, overloaded functions, and decorators
that return a callable type.
"""
if isinstance(n, FuncBase):
return True
if isinstance(n, Decorator):
return isinstance(get_proper_type(n.type), FunctionLike)
return False
| 44.644977
| 98
| 0.633563
|
8984587f3821a057a398fd1afa0c880daf9142b2
| 6,462
|
py
|
Python
|
pbesa/kernel/agent/Agent.py
|
akenfactory/pbesa
|
2ebc731a680f14e0fba85ffef0248eb9ea4bd944
|
[
"MIT"
] | 2
|
2020-10-22T22:23:40.000Z
|
2021-09-14T01:18:01.000Z
|
pbesa/kernel/agent/Agent.py
|
akenfactory/pbesa
|
2ebc731a680f14e0fba85ffef0248eb9ea4bd944
|
[
"MIT"
] | 2
|
2020-05-27T13:59:42.000Z
|
2022-03-02T14:58:12.000Z
|
pbesa/kernel/agent/Agent.py
|
akenfactory/pbesa
|
2ebc731a680f14e0fba85ffef0248eb9ea4bd944
|
[
"MIT"
] | 1
|
2020-05-27T13:50:40.000Z
|
2020-05-27T13:50:40.000Z
|
# -*- coding: utf-8 -*-
"""
----------------------------------------------------------
------------------------- PBESA --------------------------
----------------------------------------------------------
@autor AKEN & SIDRE
@version 3.0.1
@date 27/07/20
"""
# --------------------------------------------------------
# Define resources
# --------------------------------------------------------
import logging
from abc import ABC, abstractmethod
from ...kernel.system.Adm import Adm
from ...kernel.util.Queue import Queue
from ...kernel.agent.Channel import Channel
from ...kernel.agent.BehaviorExe import BehaviorExe
from ...kernel.agent.exceptions import AgentException
# --------------------------------------------------------
# Define component
# --------------------------------------------------------
class Agent(ABC):
""" Represents a system agent """
def __init__(self, agentID):
"""
Agent constructor method.
@param agentID Unic agent ID
"""
if agentID and isinstance(agentID, str):
self.id = agentID
self.state = {}
self.__eventsTable = {}
self.__channelsTable = {}
self.__workerList = []
self.__channelList = []
self.__behaviors = {}
self._social = False
self.log = None
self.__buildAgent()
Adm().addAgent(self)
super().__init__()
else:
raise AgentException('[Fatal, __init__]: The agent ID must be a str')
def __buildAgent(self):
""" Build the agent structure """
self.setUp()
if len(self.__behaviors) > 0:
for key, beh in self.__behaviors.items():
queue = Queue(100)
channel = Channel(queue)
worker = BehaviorExe(queue)
self.__channelsTable[key] = {'channel' : channel, 'worker': worker}
self.__workerList.append(worker)
self.__channelList.append(channel)
for evts in beh:
try:
evts['action'].setAgent(self)
self.__eventsTable[evts['event']] = {'behavior' : key, 'action': evts['action']}
except:
raise AgentException('[Fatal, buildAgent]: The action must be instantiated: %s' % str(evts['action']))
else:
raise AgentException('[Fatal, buildAgent]: Agent behaviors must be defined')
@abstractmethod
def setUp(self):
""" Method to create and initialize the agent structure """
pass
@abstractmethod
def shutdown(self):
""" Method to free up the resources taken by the agent """
pass
def sendEvent(self, event, data):
"""
Method that registers an event to the agent.
@param event Envent
@param Data event
@exceptions AgentException
"""
if event in self.__eventsTable:
behavior = self.__eventsTable[event]
channel = self.__channelsTable[behavior['behavior']]
evt = {'event': event, 'data': data, 'action': behavior['action']}
channel['channel'].sendEvent(evt)
else:
raise AgentException('[Warn, sendEvent]: The agent has not registered the event %s' % event)
def start(self):
for w in self.__workerList:
w.setLet(True)
w.start()
def wait(self):
for w in self.__workerList:
w.setLet(False)
def finalize(self):
for w in self.__workerList:
w.setAlive(False)
w.finalize()
def kill(self):
""" Remove the agent from the system """
if 'persistence' in Adm().conf:
self.persist()
self.shutdown()
self.id = None
self.log = None
self.state = None
self.__eventsTable = None
self.__channelsTable = None
self.finalize()
self.__workerList = None
self.__channelList = None
self.__behaviors = None
def toDTO(self):
dto = {
'command': 'MOVE',
'class': self.__class__.__name__,
'path': self.__module__,
'id': self.id,
'state': self.state
}
rtn = str(dto)
rtn = rtn.replace("'", "\"")
return rtn
def addBehavior(self, behavior):
"""
Add the new behavior to the agent's behavior.
@param behavior New behavior
"""
self.__behaviors[behavior] = []
def bindAction(self, behavior, event, action):
"""
Link behavior to event with action.
@param behavior Behavior
@param event Event link to behavior
@param action Action link to event
@exceptions AgentException
"""
if behavior in self.__behaviors:
self.__behaviors[behavior].append({
'event': event,
'action': action
})
else:
raise AgentException('[Fatal, bindAction]: The behavior "%s" is not associated with the agent. Must be added before behavior' % behavior)
def setUpLogger(self, loggerName, loggerFile, level):
"""
Inicia un componente de seguimiento de la aplicacion.
@param loggerName nombre del log
@param loggerFile ruta del archivo
"""
l = logging.getLogger(loggerName)
formatter = logging.Formatter('[PBESA]: %(asctime)s %(name)-12s %(lineno)d %(levelname)-8s %(message)s')
fileHandler = logging.FileHandler(loggerFile, 'w', 'utf-8')
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(streamHandler)
def activeLogger(self, logger, level=logging.INFO):
if not level:
level = logging.INFO
self.setUpLogger(logger, '%s.log' % logger, level)
self.log = logging.getLogger(logger)
def suscribeLogger(self, logger):
self.log = logging.getLogger(logger)
def persist(self):
db = Adm().getDBConnection()
db[self.id].delete_many({})
db[self.id].insert_one(self.state)
def isSocial(self):
return self._social
| 33.65625
| 149
| 0.52476
|
db36cd1e731854737d25d8a57ab8cdd072473ff9
| 2,737
|
py
|
Python
|
keg/configfile.py
|
0xf4b1/keg
|
3c8b63420c2f91381f06ecb744122b16c5fb65a0
|
[
"MIT"
] | 16
|
2018-08-18T13:33:07.000Z
|
2022-03-08T10:11:08.000Z
|
keg/configfile.py
|
MrMoonKr/keg-doc
|
d17aaba70fd7e8fd56960212b82323b40bfc580b
|
[
"MIT"
] | 9
|
2018-08-22T17:09:23.000Z
|
2018-08-30T00:02:58.000Z
|
keg/configfile.py
|
MrMoonKr/keg-doc
|
d17aaba70fd7e8fd56960212b82323b40bfc580b
|
[
"MIT"
] | 4
|
2016-04-28T03:20:08.000Z
|
2022-02-08T20:47:14.000Z
|
from collections import namedtuple
from typing import Dict, Iterable, Type, TypeVar
from . import blizini
from .patch import PatchEntry
# A content/encoding key pair
KeyPair = namedtuple("KeyPair", ["content_key", "encoding_key"])
def parse_key_pair(value: str) -> KeyPair:
"""
Parse a string that contains two or less hashes into a KeyPair
"""
pair = value.split()
if len(pair) > 2:
raise ValueError(f"Invalid KeyPair: {repr(pair)}")
elif len(pair) == 2:
content_key, encoding_key = pair
elif len(pair) == 1:
content_key, encoding_key = pair[0], ""
elif not pair:
content_key, encoding_key = "", ""
return KeyPair(content_key=content_key, encoding_key=encoding_key)
ConfigFile = TypeVar("ConfigFile", bound="BaseConfig")
class BaseConfig:
@classmethod
def from_bytes(cls: Type[ConfigFile], data: bytes, verify: bool = False) -> ConfigFile:
return cls(blizini.load(data.decode()))
def __init__(self, _values: Dict[str, str]) -> None:
self._values = _values
def __repr__(self):
return f"<{self.__class__.__name__}: {self._values}>"
class BuildConfig(BaseConfig):
def __init__(self, _values):
super().__init__(_values)
self.root = self._values.get("root", "")
self.install = parse_key_pair(self._values.get("install", ""))
self.download = parse_key_pair(self._values.get("download", ""))
self.size = parse_key_pair(self._values.get("size", "")) # Size file
self.encoding = parse_key_pair(self._values.get("encoding", ""))
self.patch = self._values.get("patch", "")
self.patch_config = self._values.get("patch-config", "")
self.build_name = self._values.get("build-name", "")
self.build_product = self._values.get("build-product", "")
self.build_uid = self._values.get("build-uid", "")
self.build_signature_file = self._values.get("build-signature-file", "")
class CDNConfig(BaseConfig):
def __init__(self, _values) -> None:
super().__init__(_values)
self.archive_group = self._values.get("archive-group", "")
self.patch_archive_group = self._values.get("patch-archive-group", "")
self.file_index = self._values.get("file-index", "")
self.patch_file_index = self._values.get("patch-file-index", "")
@property
def archives(self):
return self._values.get("archives", "").split()
@property
def patch_archives(self):
return self._values.get("patch-archives", "").split()
class PatchConfig(BaseConfig):
def __init__(self, _values) -> None:
super().__init__(_values)
self.patch = self._values.get("patch", "")
@property
def patch_entries(self) -> Iterable[PatchEntry]:
for entry in self._values.get("patch-entry", "").splitlines():
yield PatchEntry(entry)
@property
def patch_size(self):
return int(self._values.get("patch-size", "0"))
| 30.411111
| 88
| 0.707344
|
4ba5c7e8ec1d21a008fbc8c79baa3017aaaf10bd
| 728
|
py
|
Python
|
src/nth_root.py
|
baggakunal/learning-python
|
8b510f5b4bbcf4aa595b636eb61dc92947abdffb
|
[
"MIT"
] | null | null | null |
src/nth_root.py
|
baggakunal/learning-python
|
8b510f5b4bbcf4aa595b636eb61dc92947abdffb
|
[
"MIT"
] | null | null | null |
src/nth_root.py
|
baggakunal/learning-python
|
8b510f5b4bbcf4aa595b636eb61dc92947abdffb
|
[
"MIT"
] | null | null | null |
def nth_root(radicand: int, n: int) -> float:
return radicand ** (1/n)
def ordinal_suffix(value: int) -> str:
s = str(value)
if s.endswith('11'):
return 'th'
elif s.endswith('12'):
return 'th'
elif s.endswith('13'):
return 'th'
elif s.endswith('1'):
return 'st'
elif s.endswith('2'):
return 'nd'
elif s.endswith('3'):
return 'rd'
else:
return 'th'
def ordinal(value: int) -> str:
return str(value) + ordinal_suffix(value)
def display_nth_root(radicand: int, n: int) -> None:
root: float = nth_root(radicand, n)
message: str = "The " + ordinal(n) + " root of " + str(radicand) + " is " + str(root)
print(message)
| 22.060606
| 89
| 0.557692
|
1ca6491b4ed0fdc9b86e4e9964ef7e6075f7b41b
| 9,456
|
py
|
Python
|
stepwise_mkt_param/model_inference.py
|
harriliu/marketing_mix_modeling
|
2a80d229d9a8652111664dc2c010720d87d07d6b
|
[
"MIT"
] | null | null | null |
stepwise_mkt_param/model_inference.py
|
harriliu/marketing_mix_modeling
|
2a80d229d9a8652111664dc2c010720d87d07d6b
|
[
"MIT"
] | null | null | null |
stepwise_mkt_param/model_inference.py
|
harriliu/marketing_mix_modeling
|
2a80d229d9a8652111664dc2c010720d87d07d6b
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
import math
class driver_analysis:
def __init__(self, beta):
self.beta = beta*(10**10)
def get_sat_lvl(self, data, ds, alpha, media_var):
'''
Returns a indexed response curve with different saturation level:
1. Breakthrough
2. Optimal
3. Saturation Begin
4. Full Saturation
Saturation level is calculated by taking 1st, 2nd, 3rd integral of the curve function
Note saturation level is default at weekly level
'''
data[ds] = pd.to_datetime(data[ds])
data['week'] = data[ds].map(lambda x:x - timedelta(days=x.isoweekday() % 7))
data = data[['week', media_var]].groupby("week").sum().reset_index()
df_curve= pd.DataFrame()
index=((np.mean(data[media_var])/10)*100)/np.max(data[media_var])
df_curve['Index']=np.arange(0,300,index)
df_curve['var_volume']=df_curve['Index']*np.max(data[media_var])/100
def s_curve_chart (data, column_name, alpha, beta):
media_input_index = data['Index']
beta1 = np.float(beta*(10**-10))
column_name1 = str(column_name)+'_alpha_'+str(alpha).replace('.','')
data[column_name1] = round(beta1**(alpha**media_input_index),8)
return column_name1
df_curve['var_curve'] = s_curve_chart(df_curve,'var_volume',alpha, self.beta)
df_curve['max_var'] = np.max(data[media_var])
df_curve['mean_var'] = np.mean(data[media_var])
df_curve.drop('var_curve',axis = 1,inplace = True)
df_curve.sort_values(by = 'var_volume',inplace = True)
########################################################################
##########Calculate optimal point 1st derivative of the curve###########
########################################################################
def deri_1st(data,var_column,index_column):
data['deri_1st']=alpha**(data[index_column])*data[var_column]*np.log(alpha)*np.log(np.float(self.beta*(10**-10)))
deri_1st(df_curve,'var_volume_alpha_'+str(alpha).replace('.',''),'Index')
self.opt_x=df_curve[df_curve['deri_1st']==df_curve['deri_1st'].max()]['var_volume']
self.opt_y=df_curve[df_curve['deri_1st']==df_curve['deri_1st'].max()]['var_volume_alpha_'+str(alpha).replace('.','')]
df_curve['opt_x'] = self.opt_x
df_curve['opt_y'] = self.opt_y
############################################################
#######Calculate breakthrough point 2nd derivative #########
############################################################
def deri_2nd(data,var_column,index_column,frist_column):
data['deri_2nd']=data[frist_column]*np.log(alpha)+\
alpha**(2*data[index_column])*data[var_column]*\
np.log(alpha)*np.log(alpha)*np.log(np.float(self.beta*(10**-10)))*np.log(np.float(self.beta*(10**-10)))
deri_2nd(df_curve,'var_volume_alpha_'+str(alpha).replace('.',''),'Index','deri_1st')
self.bt_x=df_curve[df_curve['deri_2nd']==df_curve['deri_2nd'].max()]['var_volume']
self.bt_y=df_curve[df_curve['deri_2nd']==df_curve['deri_2nd'].max()]['var_volume_alpha_'+str(alpha).replace('.','')]
df_curve['bt_x']=self.bt_x
df_curve['bt_y']=self.bt_y
##################################################################
#########Calculate saturation begins point 3rd derivative#########
##################################################################
def deri_3rd(data,var_column,index_column,frist_column):
data['deri_3rd']=data[frist_column]*(alpha**(2*data[index_column])*np.log(np.float(self.beta*(10**-10))**2)+\
3*alpha**data[index_column]*np.log(np.float(self.beta*(10**-10)))+1)
deri_3rd(df_curve,'var_volume_alpha_'+str(alpha).replace('.',''),'Index','deri_1st')
self.sb_x=df_curve[df_curve['deri_3rd']==df_curve['deri_3rd'].max()]['var_volume']
self.sb_y=df_curve[df_curve['deri_3rd']==df_curve['deri_3rd'].max()]['var_volume_alpha_'+str(alpha).replace('.','')]
df_curve['sb_x']=self.sb_x
df_curve['sb_y']=self.sb_y
#################################################
#########Calculate full saturation point#########
#################################################
self.fs_x=df_curve[df_curve['var_volume_alpha_'+str(alpha).replace('.','')]>=0.992]['var_volume'][0:1]
self.fs_y=df_curve[df_curve['var_volume_alpha_'+str(alpha).replace('.','')]>=0.992]['var_volume_alpha_'+str(alpha).replace('.','')][0:1]
df_curve['fs_x']=self.fs_x
df_curve['fs_y']=self.fs_y
return df_curve
def readable_number(self, n):
mill_lst = ['',' Thousand',' Million',' Billion',' Trillion']
n = float(n)
millidx = max(0,min(len(mill_lst)-1, int(math.floor(0 if n == 0 else math.log10(abs(n))/3))))
return '{:.1f}{}'.format(n / 10**(3 * millidx), mill_lst[millidx])
def plot_sat_lvl(self, df_curve, model_df, ds, var):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(30, 10))
plt.style.use('ggplot')
#plot curve line
lm = sns.lineplot(x='var_volume', y = [col for col in df_curve.columns if "alpha" in col][0],
data = df_curve, color = '#37536d', ax = ax1)
# formatting number into readable format
y_ticks = lm.get_yticks()
x_ticks = lm.get_xticks()
lm.set_yticklabels(['{:,.0%}'.format(i) for i in y_ticks])
lm.set_xticklabels([self.readable_number(i) for i in x_ticks])
# plot saturation levels
ax1.plot(df_curve['bt_x'], df_curve['bt_y'],'ro',label="Break Through",marker='o', markersize=10,color='m')
ax1.plot(df_curve['opt_x'], df_curve['opt_y'], 'ro',label="Optimal",marker='o', markersize=10,color='g')
ax1.plot(df_curve['sb_x'], df_curve['sb_y'], 'ro',label="Satuation Begins",marker='o', markersize=10,color='r')
ax1.plot(df_curve['fs_x'], df_curve['fs_y'], 'ro',label="Full Satuation",marker='o', markersize=10,color='c')
# # Set plot options and show plot
ax1.set_xlabel('Variable Volumes',fontsize=20)
ax1.set_ylabel('Response Index',fontsize=20)
ax1.set_title(var +' Response Curve',fontsize=20)
ax1.legend(loc='center right', fancybox=False, framealpha=0)
# creating dataframe for plotting volume against saturation level plot
df_volume = pd.DataFrame()
df_volume['period'] = pd.to_datetime(pd.to_datetime(model_df[ds]).map(lambda x:x.strftime("%Y-%m-%d")))
df_volume['week'] = df_volume['period'].map(lambda x:x - timedelta(days=x.isoweekday() % 7))
df_volume['week'] = pd.to_datetime(df_volume['week']).map(lambda x:x.strftime("%Y-%m-%d"))
df_volume['var_volume'] = model_df[var]
df_volume = df_volume[['week', 'var_volume']].groupby("week").sum().reset_index()
max_x=df_volume['var_volume'].max()
df_volume['Optimal']=int(df_curve['opt_x'].unique()[1])
df_volume['Break Through']=int(df_curve['bt_x'].unique()[1])
df_volume['Satuation Begins']=int(df_curve['sb_x'].unique()[1])
try:
df_volume['Full Satuation']=int(df_curve['fs_x'].unique()[1])
except:
print('out of range')
fs_x=0
pass
df_volume['Max'] = max_x
df_volume['var_name'] = var
# plot volume against saturation level
textstr = '\n'.join((
r'Breakthrough: ${}'.format(self.readable_number(int(df_volume['Break Through'].unique()[0])), ),
r'Optimal: ${}'.format(self.readable_number(int(df_volume['Optimal'].unique()[0])), ),
r'Saturation Begins: ${}'.format(self.readable_number(int(df_volume['Satuation Begins'].unique()[0])),),
r'Full Saturation: ${}'.format(self.readable_number(int(df_volume['Full Satuation'].unique()[0])),),
))
ax2 = sns.barplot(x=df_volume['week'], y = df_volume['var_volume'], color = '#37536d', ax = ax2)
y_ticks2 = ax2.get_yticks()
ax2.set_yticklabels([self.readable_number(i) for i in y_ticks2])
ax2.plot('week','Break Through',data=df_volume, color='m', linewidth=5,linestyle='dashed')
ax2.plot('week','Optimal', data=df_volume, color='g', linewidth=5,linestyle='dashed')
ax2.plot('week','Satuation Begins', data=df_volume, color='r', linewidth=5,linestyle='dashed')
ax2.plot('week','Full Satuation', data=df_volume, color='c', linewidth=5,linestyle='dashed')
ax2.set_title(var +' Volume Against Weekly Saturation Levels',fontsize=20)
ax2.set_xlabel("Week",fontsize=20)
ax2.set_xticks(df_volume['week'])
ax2.set_xticklabels(df_volume['week'], rotation=40, ha='right')
ax2.set_ylabel("Volume",fontsize=20)
ax2.set_yticks(y_ticks2)
props = dict(boxstyle='round', alpha=0.5)
ax2.text(0.6, 0.95, textstr, transform=ax2.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
ax2.legend(loc='upper right', fancybox=True, framealpha=5, bbox_to_anchor=(1, 0.95))
plt.tight_layout(pad=5)
plt.show()
| 50.031746
| 144
| 0.580795
|
c6de2ff9d27cea3e5ca2f998767e207866e21de4
| 705
|
py
|
Python
|
api/generator/free_ride_token_generator.py
|
seunkoko/tms_backend
|
75a7e3bf9ddd2c4723728b3b5aed742f5976460e
|
[
"MIT"
] | null | null | null |
api/generator/free_ride_token_generator.py
|
seunkoko/tms_backend
|
75a7e3bf9ddd2c4723728b3b5aed742f5976460e
|
[
"MIT"
] | null | null | null |
api/generator/free_ride_token_generator.py
|
seunkoko/tms_backend
|
75a7e3bf9ddd2c4723728b3b5aed742f5976460e
|
[
"MIT"
] | null | null | null |
import uuid
from datetime import datetime, timedelta
try:
from ..models import FreeRide
except:
from moov_backend.api.models import FreeRide
# free-ride token generator
def generate_free_ride_token(user_email):
free_ride_token = None
count = 1
# runs until a unique token is generated
while not free_ride_token:
payload = "{0} {1} {2}".format(user_email, str(datetime.now()), count)
generated_token = uuid.uuid5(uuid.NAMESPACE_DNS, payload)
_token_found = FreeRide.query.filter(FreeRide.token==str(generated_token)).first()
count += 1
if not _token_found:
free_ride_token = str(generated_token)
return free_ride_token
| 27.115385
| 90
| 0.699291
|
af955b0374c9c1499aadfb94e998514168b3c475
| 688
|
py
|
Python
|
djaveNav/nav_with_settings.py
|
dasmith2/djaveNav
|
8a99e310e0524a19cb4ef1b0edfe1d1845437dbc
|
[
"MIT"
] | null | null | null |
djaveNav/nav_with_settings.py
|
dasmith2/djaveNav
|
8a99e310e0524a19cb4ef1b0edfe1d1845437dbc
|
[
"MIT"
] | null | null | null |
djaveNav/nav_with_settings.py
|
dasmith2/djaveNav
|
8a99e310e0524a19cb4ef1b0edfe1d1845437dbc
|
[
"MIT"
] | null | null | null |
""" The difference between nav and nav_with_settings is nav_with_settings puts
account stuff to the right of the navigation. So you'll probably want to use
this for your top level navigation. """
from djaveNav.nav import Nav, NavItem
class NavWithAccount(Nav):
def __init__(self, nav_list, settings_view_name, current_view_name):
settings_nav_item = NavItem(settings_view_name, 'Settings')
super().__init__(nav_list + [settings_nav_item], current_view_name)
def template(self):
return 'nav_with_settings.html'
def context(self):
return {
'primary_nav_items': self.nav_items_current_or_not[:-1],
'settings_nav': self.nav_items_current_or_not[-1]}
| 36.210526
| 78
| 0.757267
|
d795479685400fb44e4df69fdad0f2bd829d78c9
| 418
|
py
|
Python
|
hackerrank_problems/Angry_Professor.py
|
sreshtha10/CC
|
77147ce863dab64ecb2f76dd09560d963c763fa0
|
[
"MIT"
] | 2
|
2021-11-26T13:50:59.000Z
|
2021-11-26T14:00:16.000Z
|
hackerrank_problems/Angry_Professor.py
|
sreshthamehrotra00/CC
|
77147ce863dab64ecb2f76dd09560d963c763fa0
|
[
"MIT"
] | null | null | null |
hackerrank_problems/Angry_Professor.py
|
sreshthamehrotra00/CC
|
77147ce863dab64ecb2f76dd09560d963c763fa0
|
[
"MIT"
] | null | null | null |
test_cases = int(input())
outcomes = []
for test in range(test_cases):
arr = list(map(int,input().split()))
student_timings = list(map(int,input().split()))
onTime = 0
for student_timing in student_timings:
if student_timing <= 0:
onTime += 1
if onTime < arr[1]:
outcomes.append('YES')
else:
outcomes.append('NO')
for outcome in outcomes:
print(outcome)
| 26.125
| 52
| 0.607656
|
71471635a72b959d0f4a4d8d6b4b3cd27fd7246f
| 8,609
|
py
|
Python
|
sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2016_03_01/operations/_alert_rule_incidents_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2016_03_01/operations/_alert_rule_incidents_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 2
|
2020-03-03T23:11:13.000Z
|
2020-03-30T18:50:55.000Z
|
sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2016_03_01/operations/_alert_rule_incidents_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class AlertRuleIncidentsOperations(object):
"""AlertRuleIncidentsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~$(python-base-namespace).v2016_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
rule_name, # type: str
incident_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.Incident"
"""Gets an incident associated to an alert rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:param incident_name: The name of the incident to retrieve.
:type incident_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Incident, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2016_03_01.models.Incident
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.Incident"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'incidentName': self._serialize.url("incident_name", incident_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Incident', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/alertrules/{ruleName}/incidents/{incidentName}'} # type: ignore
def list_by_alert_rule(
self,
resource_group_name, # type: str
rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.IncidentListResult"]
"""Gets a list of incidents associated to an alert rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IncidentListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~$(python-base-namespace).v2016_03_01.models.IncidentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.IncidentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_alert_rule.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('IncidentListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_alert_rule.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/alertrules/{ruleName}/incidents'} # type: ignore
| 46.284946
| 188
| 0.655477
|
c9a29219dbeb3ca0bc44425f804f7176ec73879f
| 328
|
py
|
Python
|
threepy/geometry/QuadGeometry.py
|
district10/three.py
|
e46a41267d210b8daecc2a0cc81350493132c8da
|
[
"MIT"
] | 2
|
2019-04-09T15:57:17.000Z
|
2019-04-10T04:15:53.000Z
|
threepy/geometry/QuadGeometry.py
|
district10/three.py
|
e46a41267d210b8daecc2a0cc81350493132c8da
|
[
"MIT"
] | null | null | null |
threepy/geometry/QuadGeometry.py
|
district10/three.py
|
e46a41267d210b8daecc2a0cc81350493132c8da
|
[
"MIT"
] | null | null | null |
from threepy.geometry import *
class QuadGeometry(SurfaceGeometry):
def __init__(self, width=2, height=2, widthResolution=4,
heightResolution=4):
super().__init__(-width / 2, width / 2, widthResolution, -height / 2,
height / 2, heightResolution, lambda u, v: [u, v, 0])
| 29.818182
| 78
| 0.606707
|
8597334d3e22e0b72f5583ba6460ae95769036f1
| 913
|
py
|
Python
|
src/config.py
|
Scapogo/space-whiskey
|
be699586357515d87f904934ddd665588b7584b7
|
[
"BSD-3-Clause"
] | null | null | null |
src/config.py
|
Scapogo/space-whiskey
|
be699586357515d87f904934ddd665588b7584b7
|
[
"BSD-3-Clause"
] | 17
|
2018-09-12T01:01:02.000Z
|
2018-10-07T20:02:26.000Z
|
src/config.py
|
Scapogo/space-whiskey
|
be699586357515d87f904934ddd665588b7584b7
|
[
"BSD-3-Clause"
] | 3
|
2018-09-15T15:39:29.000Z
|
2018-10-05T17:50:26.000Z
|
# -*- coding: utf-8 -*-
"""
space-whiskey.config
~~~~~~~~~~~~~~
:copyright: © 2018 by Phil Royer.
:license: BSD, see LICENSE for more details.
"""
import os
import utils
import json
import logging
class Config:
def __init__(self):
self.file = 'config.json'
self.fullscreen = True
self.logfile = 'error.log'
if self.hasConfig():
self.readConfig()
self.setupLogging()
def hasConfig(self):
return os.path.isfile(self.file)
def readConfig(self):
with open('config.json') as f:
data = json.load(f)
self.fullscreen = data['fullscreen']
self.logfile = data['logfile']
def setupLogging(self):
# clear log file evrytime the application opens
with open(self.logfile, 'w'):
pass
logging.basicConfig(filename=self.logfile, level=logging.DEBUG)
| 24.675676
| 71
| 0.58379
|
93be0ab6a3db9db9bdfbc679c235fdb8a4e27c13
| 7,576
|
py
|
Python
|
regionmask/test/test_Regions.py
|
COVID-Weather/regionmask
|
54f2bebe5f99bd73da1341eec7d6b1d569bf5436
|
[
"MIT"
] | null | null | null |
regionmask/test/test_Regions.py
|
COVID-Weather/regionmask
|
54f2bebe5f99bd73da1341eec7d6b1d569bf5436
|
[
"MIT"
] | null | null | null |
regionmask/test/test_Regions.py
|
COVID-Weather/regionmask
|
54f2bebe5f99bd73da1341eec7d6b1d569bf5436
|
[
"MIT"
] | null | null | null |
import numpy as np
import pytest
import six
from shapely.geometry import MultiPolygon, Polygon
from regionmask import Regions, _OneRegion
# =============================================================================
# set up the testing regions
name = "Example"
numbers1 = [0, 1]
names = ["Unit Square1", "Unit Square2"]
abbrevs = ["uSq1", "uSq2"]
outl1 = ((0, 0), (0, 1), (1, 1.0), (1, 0))
outl2 = ((0, 1), (0, 2), (1, 2.0), (1, 1))
outlines = [outl1, outl2]
test_regions1 = Regions(outlines, numbers1, names, abbrevs, name=name)
numbers2 = [1, 2]
names_dict = {1: "Unit Square1", 2: "Unit Square2"}
abbrevs_dict = {1: "uSq1", 2: "uSq2"}
poly1 = Polygon(outl1)
poly2 = Polygon(outl2)
poly = {1: poly1, 2: poly2}
test_regions2 = Regions(poly, numbers2, names_dict, abbrevs_dict, name=name)
# numbers as array
numbers3 = [2, 3]
test_regions3 = Regions(outlines, np.array(numbers3), names, abbrevs, name=name)
# =============================================================================
all_test_regions = (test_regions1, test_regions2, test_regions3)
all_numbers = (numbers1, numbers2, numbers3)
all_first_numbers = (0, 1, 2)
# =============================================================================
@pytest.mark.parametrize("test_regions", all_test_regions)
def test_len(test_regions):
assert len(test_regions) == 2
@pytest.mark.parametrize("test_regions", all_test_regions)
def test_name(test_regions):
assert test_regions.name == name
@pytest.mark.parametrize("test_regions, numbers", zip(all_test_regions, all_numbers))
def test_numbers(test_regions, numbers):
assert np.allclose(test_regions.numbers, numbers)
@pytest.mark.parametrize("test_regions", all_test_regions)
def test_names(test_regions):
assert test_regions.names == ["Unit Square1", "Unit Square2"]
@pytest.mark.parametrize("test_regions", all_test_regions)
def test_abbrevs(test_regions):
assert test_regions.abbrevs == ["uSq1", "uSq2"]
def test_coords():
# passing numpy coords does not automatically close the coords
assert np.allclose(test_regions1.coords, [outl1, outl2])
# the polygon automatically closes the outline
out1 = np.vstack([outl1, outl1[0]])
out2 = np.vstack([outl2, outl2[0]])
assert np.allclose(test_regions2.coords, [out1, out2])
@pytest.mark.parametrize("test_regions", all_test_regions)
def test_bounds(test_regions):
expected = [(0, 0, 1, 1), (0, 1, 1, 2)]
assert np.allclose(test_regions.bounds, expected)
@pytest.mark.parametrize("test_regions", all_test_regions)
def test_bounds_global(test_regions):
expected = [0, 0, 1, 2]
assert np.allclose(test_regions.bounds_global, expected)
@pytest.mark.parametrize("test_regions", all_test_regions)
def test_polygon(test_regions):
assert isinstance(test_regions.polygons, list)
assert len(test_regions.polygons) == 2
assert test_regions.polygons[0].equals(poly1)
assert test_regions.polygons[1].equals(poly2)
@pytest.mark.parametrize("test_regions", all_test_regions)
def test_centroid(test_regions):
assert np.allclose(test_regions.centroids, [[0.5, 0.5], [0.5, 1.5]])
def test_centroid_multipolygon():
multipoly_equal = [MultiPolygon([poly1, poly2])]
test_regions_multipoly_equal = Regions(multipoly_equal)
# two equally sized polygons: uses the centroid of the first one
assert np.allclose(test_regions_multipoly_equal.centroids, [[0.5, 0.5]])
# two un-equally sized polygons: uses the centroid of the larger one
outl2_unequal = ((0, 1), (0, 2), (2, 2.0), (2, 1))
poly2_unequal = Polygon(outl2_unequal)
multipoly_unequal = [MultiPolygon([poly1, poly2_unequal])]
test_regions_multipoly_unequal = Regions(multipoly_unequal)
assert np.allclose(test_regions_multipoly_unequal.centroids, [[1.0, 1.5]])
@pytest.mark.parametrize(
"test_regions, number", zip(all_test_regions, all_first_numbers)
)
def test_map_keys_one(test_regions, number):
pytest.raises(KeyError, test_regions1.__getitem__, "")
expected = number
assert test_regions.map_keys(number) == expected
assert test_regions.map_keys("uSq1") == expected
assert test_regions.map_keys("Unit Square1") == expected
def test_map_keys_np_integer():
key = np.array([2, 2])[0]
assert test_regions3.map_keys(key) == 2
@pytest.mark.parametrize("test_regions, numbers", zip(all_test_regions, all_numbers))
def test_map_keys_several(test_regions, numbers):
assert test_regions.map_keys(numbers) == numbers
assert test_regions.map_keys(("uSq1", "uSq2")) == numbers
assert test_regions.map_keys(("Unit Square1", "Unit Square2")) == numbers
def test_map_keys_mixed():
assert test_regions1.map_keys([0, "uSq2"]) == [0, 1]
def test_map_keys_unique():
assert test_regions1.map_keys([0, 0, 0]) == [0]
assert test_regions1.map_keys([0, 0, 0, 1]) == [0, 1]
@pytest.mark.parametrize(
"test_regions, number", zip(all_test_regions, all_first_numbers)
)
def test_subset_to_Region(test_regions, number):
s1 = test_regions[number]
assert isinstance(s1, _OneRegion)
assert s1.number == number
assert s1.abbrev == "uSq1"
s1 = test_regions["uSq1"]
assert isinstance(s1, _OneRegion)
assert s1.number == number
assert s1.abbrev == "uSq1"
s1 = test_regions["Unit Square1"]
assert isinstance(s1, _OneRegion)
assert s1.number == number
assert s1.abbrev == "uSq1"
@pytest.mark.parametrize(
"test_regions, number", zip(all_test_regions, all_first_numbers)
)
def test_subset_to_Regions(test_regions, number):
s1 = test_regions[[number]]
assert isinstance(s1, Regions)
assert s1.numbers == [number]
assert s1.abbrevs == ["uSq1"]
@pytest.mark.parametrize("numbers", [None, [1, 2]])
@pytest.mark.parametrize("names", [None, "names", names])
@pytest.mark.parametrize("abbrevs", [None, "abbrevs", abbrevs])
@pytest.mark.parametrize("name", [None, "name"])
def test_optional_arguments(numbers, names, abbrevs, name):
if name is None:
result = Regions(outlines, numbers, names, abbrevs)
else:
result = Regions(outlines, numbers, names, abbrevs, name)
if numbers is None:
numbers = [0, 1]
if names is None:
names = _create_expected_str_list(numbers, "Region")
elif isinstance(names, six.string_types):
names = _create_expected_str_list(numbers, names)
if abbrevs is None:
abbrevs = _create_expected_str_list(numbers, "r")
elif isinstance(abbrevs, six.string_types):
abbrevs = _create_expected_str_list(numbers, abbrevs)
expected_centroids = [[0.5, 0.5], [0.5, 1.5]]
if name is None:
name = "unnamed"
assert result.numbers == numbers
assert result.names == names
assert result.abbrevs == abbrevs
assert np.allclose(result.centroids, expected_centroids)
assert result.name == name
def _create_expected_str_list(numbers, string):
return [string + str(number) for number in numbers]
def test_lon_extent():
assert test_regions1.lon_180
assert not test_regions1.lon_360
outl_ = ((0, 0), (0, 1), (360, 1.0), (360, 0))
test_regions_ = Regions([outl_])
assert not test_regions_.lon_180
assert test_regions_.lon_360
outl_ = ((-1, 0), (-1, 1), (360, 1.0), (360, 0))
test_regions_ = Regions([outl_])
with pytest.raises(ValueError, match="lon has both data that is larger than 180 "):
test_regions_.lon_180
with pytest.raises(ValueError, match="lon has both data that is larger than 180 "):
test_regions_.lon_360
| 28.916031
| 87
| 0.684398
|
6a3b642ec41430e18d4d889d74acb69254ae5482
| 4,670
|
py
|
Python
|
pinakes/main/approval/tests/services/test_process_root_request.py
|
mkanoor/pinakes
|
cfcc6e8e12e9c68d7930f41075b5e4e0dfee51c3
|
[
"Apache-2.0"
] | null | null | null |
pinakes/main/approval/tests/services/test_process_root_request.py
|
mkanoor/pinakes
|
cfcc6e8e12e9c68d7930f41075b5e4e0dfee51c3
|
[
"Apache-2.0"
] | null | null | null |
pinakes/main/approval/tests/services/test_process_root_request.py
|
mkanoor/pinakes
|
cfcc6e8e12e9c68d7930f41075b5e4e0dfee51c3
|
[
"Apache-2.0"
] | null | null | null |
""" Module to test processing root requests """
from unittest.mock import Mock, call
import pytest
from pinakes.main.approval.tests.factories import (
WorkflowFactory,
RequestFactory,
)
from pinakes.main.approval.services.process_root_request import (
ProcessRootRequest,
)
from pinakes.main.approval.tasks import start_request_task
from pinakes.main.catalog.services.handle_approval_events import (
HandleApprovalEvents,
)
@pytest.mark.django_db
def test_process_request_no_workflow(mocker):
"""Test to create a new request with no workflow"""
service = _prepare_service(mocker, [])
request = service.process().request
_assert_request(request, state="completed", decision="approved")
@pytest.mark.django_db
def test_process_request_one_workflow(mocker):
"""Test to create a new request with one workflow but no group"""
workflow = WorkflowFactory()
service = _prepare_service(mocker, [workflow.id])
request = service.process().request
_assert_request(
request, state="notified", group_name="<NO_GROUP>", workflow=workflow
)
@pytest.mark.django_db
def test_process_request_one_workflow_one_group(mocker):
"""Test to create a new request with one workflow and one group"""
add_permissions = mocker.patch(
"pinakes.main.common.tasks.add_group_permissions",
return_value=None,
)
validations = mocker.patch(
"pinakes.main.approval.validations.runtime_validate_group",
return_value=True,
)
workflow = WorkflowFactory(group_refs=({"name": "n1", "uuid": "u1"},))
service = _prepare_service(mocker, [workflow.id])
request = service.process().request
_assert_request(
request, state="notified", group_name="n1", workflow=workflow
)
assert add_permissions.call_count == 1
assert validations.call_count == 1
@pytest.mark.django_db
def test_process_request_one_workflow_groups(mocker):
"""Test to create a new request with one workflow multiple groups"""
add_permissions = mocker.patch(
"pinakes.main.common.tasks.add_group_permissions",
return_value=None,
)
enqueue = mocker.patch("django_rq.enqueue", return_value=Mock(id=123))
workflow = WorkflowFactory(
group_refs=({"name": "n1", "uuid": "u1"}, {"name": "n2", "uuid": "u2"})
)
service = _prepare_service(mocker, [workflow.id])
request = service.process().request
_assert_request(request, num_children=2, group_name="n1,n2")
_assert_request(request.requests[0], group_name="n1", workflow=workflow)
_assert_request(request.requests[1], group_name="n2", workflow=workflow)
enqueue.assert_has_calls(
[
call(start_request_task, request.requests[0].id),
call(start_request_task, request.requests[1].id),
]
)
assert add_permissions.call_count == 2
@pytest.mark.django_db
def test_process_request_workflows_groups(mocker):
"""Test to create a new request with workflows and groups"""
add_permissions = mocker.patch(
"pinakes.main.common.tasks.add_group_permissions",
return_value=None,
)
mocker.patch(
"pinakes.main.approval.validations.runtime_validate_group",
return_value=True,
)
workflow1 = WorkflowFactory(group_refs=({"name": "n1", "uuid": "u1"},))
workflow2 = WorkflowFactory()
service = _prepare_service(mocker, [workflow1.id, workflow2.id])
request = service.process().request
request.refresh_from_db()
_assert_request(
request, state="notified", num_children=2, group_name="n1,<NO_GROUP>"
)
_assert_request(
request.requests[0],
state="notified",
group_name="n1",
workflow=workflow1,
)
_assert_request(
request.requests[1],
state="pending",
group_name="<NO_GROUP>",
workflow=workflow2,
)
assert add_permissions.call_count == 1
def _prepare_service(mocker, workflow_ids):
request = RequestFactory(
name="test", description="description", workflow=None
)
mocker.patch.object(HandleApprovalEvents, "process", return_value=None)
service = ProcessRootRequest(request.id, workflow_ids)
return service
def _assert_request(
request,
state="pending",
decision="undecided",
num_children=0,
group_name="",
workflow=None,
):
assert request.name == "test"
assert request.description == "description"
assert request.state == state
assert request.decision == decision
assert request.number_of_children == num_children
assert request.workflow == workflow
assert request.group_name == group_name
| 31.554054
| 79
| 0.698073
|
5fbe2769664ca14075fd4a94ec343e417b91e17b
| 1,259
|
py
|
Python
|
src/dagster_pipeline/train_pipeline.py
|
hectorLop/Spanish-LaLiga-Prediction
|
ac7b81622a391e7b77734528b9d106190e6b8827
|
[
"MIT"
] | null | null | null |
src/dagster_pipeline/train_pipeline.py
|
hectorLop/Spanish-LaLiga-Prediction
|
ac7b81622a391e7b77734528b9d106190e6b8827
|
[
"MIT"
] | null | null | null |
src/dagster_pipeline/train_pipeline.py
|
hectorLop/Spanish-LaLiga-Prediction
|
ac7b81622a391e7b77734528b9d106190e6b8827
|
[
"MIT"
] | 1
|
2021-12-15T11:06:10.000Z
|
2021-12-15T11:06:10.000Z
|
from pathlib import Path
from src.config.config import DATA_DIR, CODE_DIR
from src.config.logger_config import logger
from src.training.utils import load_data, train_model
from dagster import solid, pipeline, Output, OutputDefinition, execute_pipeline
import pandas as pd
@solid(
output_defs=[
OutputDefinition(name='X_train', is_required=True),
OutputDefinition(name='X_test', is_required=True),
OutputDefinition(name='y_train', is_required=True),
OutputDefinition(name='y_test', is_required=True),
]
)
def get_data(context):
logger.info('TRAINING: Loading the data...')
X_train, X_test, y_train, y_test = load_data()
yield Output(X_train, 'X_train')
yield Output(X_test, 'X_test')
yield Output(y_train, 'y_train')
yield Output(y_test, 'y_test')
@solid
def model_training(context, X_train, X_test, y_train, y_test):
logger.info('TRAINING: Training the model')
config_path = Path(CODE_DIR, 'training/model_config.yaml')
train_model(config_path, X_train, X_test, y_train, y_test)
@pipeline
def train_pipeline():
X_train, X_test, y_train, y_test = get_data()
model_training(X_train, X_test, y_train, y_test)
if __name__ == '__main__':
execute_pipeline(train_pipeline)
| 33.131579
| 79
| 0.733916
|
27ae28c20ab97a2098e447c023981f244aa66e5e
| 8,867
|
py
|
Python
|
nnvm/tests/python/frontend/coreml/test_forward.py
|
dendisuhubdy/tvm
|
7cb85d81968cd69576d923852d812590b93cc26d
|
[
"Apache-2.0"
] | null | null | null |
nnvm/tests/python/frontend/coreml/test_forward.py
|
dendisuhubdy/tvm
|
7cb85d81968cd69576d923852d812590b93cc26d
|
[
"Apache-2.0"
] | null | null | null |
nnvm/tests/python/frontend/coreml/test_forward.py
|
dendisuhubdy/tvm
|
7cb85d81968cd69576d923852d812590b93cc26d
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from coremltools.models.neural_network import NeuralNetworkBuilder
from coremltools.models import datatypes
import tvm
from tvm.contrib import graph_runtime
import topi
import topi.testing
import nnvm.symbol as sym
import nnvm.compiler
from nnvm.testing.config import ctx_list
from nnvm import frontend
import coremltools as cm
import model_zoo
def get_tvm_output(symbol, x, params, target, ctx,
out_shape=(1000,), input_name='image', dtype='float32'):
shape_dict = {input_name : x.shape}
with nnvm.compiler.build_config(opt_level=3):
graph, lib, params = nnvm.compiler.build(symbol, target, shape_dict, params=params)
m = graph_runtime.create(graph, lib, ctx)
# set inputs
m.set_input(input_name, tvm.nd.array(x.astype(dtype)))
m.set_input(**params)
m.run()
# get outputs
out = m.get_output(0, tvm.nd.empty(out_shape, dtype))
return out.asnumpy()
def test_model_checkonly(model_file, model_name=''):
model = cm.models.MLModel(model_file)
sym, params = nnvm.frontend.from_coreml(model)
x = model_zoo.get_cat_image()
for target, ctx in ctx_list():
tvm_output = get_tvm_output(sym, x, params, target, ctx)
print(target, ctx, model_name, 'prediction id: ', np.argmax(tvm_output.flat))
def test_mobilenet_checkonly():
model_file = model_zoo.get_mobilenet()
test_model_checkonly(model_file, 'mobilenet')
def test_resnet50_checkonly():
model_file = model_zoo.get_resnet50()
test_model_checkonly(model_file, 'resnet50')
def run_tvm_graph(graph_def, input_data, input_name, output_shape, output_dtype='float32'):
""" Generic function to compile on nnvm and execute on tvm """
sym, params = nnvm.frontend.from_coreml(graph_def)
target = 'llvm'
if isinstance(input_data, list):
shape_dict = {}
dtype_dict = {}
for i, e in enumerate(input_name):
shape_dict[e] = input_data[i].shape
dtype_dict[e] = input_data[i].dtype
else:
shape_dict = {input_name: input_data.shape}
dtype_dict = {input_name: input_data.dtype}
graph, lib, params = nnvm.compiler.build(sym, target, shape_dict,
dtype=dtype_dict, params=params)
ctx = tvm.cpu(0)
from tvm.contrib import graph_runtime
m = graph_runtime.create(graph, lib, ctx)
# set inputs
if isinstance(input_data, list):
for i, e in enumerate(input_name):
m.set_input(e, tvm.nd.array(input_data[i].astype(input_data[i].dtype)))
else:
m.set_input(input_name, tvm.nd.array(input_data.astype(input_data.dtype)))
m.set_input(**params)
# execute
m.run()
# get outputs
if isinstance(output_shape, list) and isinstance(output_dtype, list):
tvm_output_list = []
for i, s in enumerate(output_shape):
tvm_output = m.get_output(i, tvm.nd.empty((s), output_dtype[i]))
tvm_output_list.append(tvm_output.asnumpy())
return tvm_output_list
else:
tvm_output = m.get_output(0, tvm.nd.empty((output_shape), output_dtype))
return tvm_output.asnumpy()
def verify_AddLayerParams(input_dim, alpha=2):
dtype = 'float32'
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
b_np = np.add(a_np1, a_np2) + alpha
inputs = [('input1', datatypes.Array(*input_dim)),
('input2', datatypes.Array(*input_dim))]
output = [('output', datatypes.Array(*b_np.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.add_elementwise(name='Add',
alpha=alpha,
input_names=['input1', 'input2'],
output_name='output',
mode='ADD')
model = cm.models.MLModel(builder.spec)
for target, ctx in ctx_list():
out = run_tvm_graph(model,
[a_np1, a_np2],
['input1', 'input2'],
b_np.shape,
dtype)
np.testing.assert_allclose(out, b_np, rtol=1e-5)
def test_forward_AddLayerParams():
verify_AddLayerParams((1, 2, 2), 0)
verify_AddLayerParams((1, 2, 2), 1)
verify_AddLayerParams((1, 3, 3), 2)
def verify_MultiplyLayerParams(input_dim, alpha):
dtype = 'float32'
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
b_np = np.multiply(a_np1, a_np2) * alpha
inputs = [('input1', datatypes.Array(*input_dim)),
('input2', datatypes.Array(*input_dim))]
output = [('output', datatypes.Array(*b_np.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.add_elementwise(name='Mul',
alpha=alpha,
input_names=['input1', 'input2'],
output_name='output',
mode='MULTIPLY')
model = cm.models.MLModel(builder.spec)
for target, ctx in ctx_list():
out = run_tvm_graph(model,
[a_np1, a_np2],
['input1', 'input2'],
b_np.shape,
dtype)
np.testing.assert_allclose(out, b_np, rtol=1e-5)
def test_forward_MultiplyLayerParams():
verify_MultiplyLayerParams((1, 2, 2), 0)
verify_MultiplyLayerParams((1, 2, 2), 1)
verify_MultiplyLayerParams((1, 3, 3), 2)
def verify_ConcatLayerParams(input1_dim, input2_dim):
dtype = 'float32'
a_np1 = np.random.uniform(size=input1_dim).astype(dtype)
a_np2 = np.random.uniform(size=input2_dim).astype(dtype)
b_np = np.concatenate((a_np1, a_np2), axis=1)
inputs = [('input1', datatypes.Array(*input1_dim)),
('input2', datatypes.Array(*input2_dim))]
output = [('output', datatypes.Array(*b_np.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.add_elementwise(name='Concate',
input_names=['input1', 'input2'],
output_name='output',
mode='CONCAT')
model = cm.models.MLModel(builder.spec)
for target, ctx in ctx_list():
out = run_tvm_graph(model,
[a_np1, a_np2],
['input1', 'input2'],
b_np.shape,
dtype)
np.testing.assert_allclose(out, b_np, rtol=1e-5)
def test_forward_ConcatLayerParams():
verify_ConcatLayerParams((1, 1, 2, 2), (1, 2, 2, 2))
verify_ConcatLayerParams((1, 2, 4, 4), (1, 3, 4, 4))
def verify_UpsampleLayerParams(input_dim, scale, mode):
dtype = "float32"
a_np = np.full(input_dim, 1, dtype=dtype)
if mode == 'NN':
b_np = topi.testing.upsampling_python(a_np, scale)
else:
new_h = input_dim[2] * scale
new_w = input_dim[3] * scale
b_np = topi.testing.bilinear_resize_python(a_np, (new_h, new_w), 'NCHW')
input = [('input', datatypes.Array(*input_dim))]
output = [('output', datatypes.Array(*b_np.shape))]
builder = NeuralNetworkBuilder(input, output)
builder.add_upsample(name='Upsample',
scaling_factor_h=scale,
scaling_factor_w=scale,
mode=mode,
input_name='input',
output_name='output')
model = cm.models.MLModel(builder.spec)
for target, ctx in ctx_list():
out = run_tvm_graph(model, a_np, 'input', b_np.shape, dtype)
np.testing.assert_allclose(out, b_np, rtol=1e-5)
def test_forward_UpsampleLayerParams():
verify_UpsampleLayerParams((1, 16, 32, 32), 2, 'NN')
verify_UpsampleLayerParams((1, 4, 6, 6), 3, 'BILINEAR')
def verify_l2_normalize(input_dim, eps):
dtype = "float32"
a_np = np.random.uniform(size=input_dim).astype(dtype)
b_np = topi.testing.l2_normalize_python(a_np, eps, 1)
input = [('input', datatypes.Array(*input_dim))]
output = [('output', datatypes.Array(*b_np.shape))]
builder = NeuralNetworkBuilder(input, output)
builder.add_l2_normalize(name='L2', epsilon=eps, input_name='input', output_name='output')
model = cm.models.MLModel(builder.spec)
for target, ctx in ctx_list():
out = run_tvm_graph(model, a_np, 'input', b_np.shape, dtype)
np.testing.assert_allclose(out, b_np, rtol=1e-5)
def test_forward_l2_normalize():
verify_l2_normalize((1, 3, 20, 20), 0.001)
if __name__ == '__main__':
test_mobilenet_checkonly()
test_resnet50_checkonly()
test_forward_AddLayerParams()
test_forward_ConcatLayerParams()
test_forward_MultiplyLayerParams()
test_forward_UpsampleLayerParams()
test_forward_l2_normalize()
| 37.893162
| 94
| 0.624789
|
c764718d35a02c29f16fb223b693543034dc2b6b
| 3,622
|
py
|
Python
|
scalability/experiments/run_response_payload_size_experiment.py
|
Deland-Labs/ic
|
047172b01e0afc0e61448669d4ec98b2425c6853
|
[
"Apache-2.0"
] | 1
|
2021-12-01T03:48:42.000Z
|
2021-12-01T03:48:42.000Z
|
scalability/experiments/run_response_payload_size_experiment.py
|
Deland-Labs/ic
|
047172b01e0afc0e61448669d4ec98b2425c6853
|
[
"Apache-2.0"
] | null | null | null |
scalability/experiments/run_response_payload_size_experiment.py
|
Deland-Labs/ic
|
047172b01e0afc0e61448669d4ec98b2425c6853
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""
In this experiment, we incrementally increase the size of the response payload and observe the
latency from the perspective of the client.
"""
import codecs
import json
import os
import sys
import gflags
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import common.misc as misc # noqa
import common.workload_experiment as workload_experiment # noqa
CANISTER = "response-payload-test-canister.wasm"
FLAGS = gflags.FLAGS
gflags.DEFINE_integer("iter_duration", 300, "Duration in seconds for which to execute workload in each round.")
gflags.DEFINE_integer("rps", 10, "Requests per second the workload generator should execute.")
gflags.DEFINE_integer("initial_response_size_kb", 250, "Initial response payload size in kb.")
gflags.DEFINE_integer("response_size_increment_kb", 250, "Increment of response payload size in kb per iteration.")
gflags.DEFINE_integer("max_size_increment_kb", 2 * 1024 * 1024, "Maximum response payload size to test.")
class ResponsePayloadExperiment(workload_experiment.WorkloadExperiment):
"""Logic for experiment with changing response payload size."""
def __init__(self):
"""Install canister."""
super().__init__(1)
def init_experiment(self):
"""Install canister."""
super().init_experiment()
self.install_canister(
self.target_nodes[0], canister=os.path.join(self.artifacts_path, f"../canisters/{CANISTER}")
)
def run_experiment_internal(self, config):
"""Run workload generator with the load specified in config."""
return self.run_workload_generator(
self.machines,
self.target_nodes,
FLAGS.rps,
outdir=self.iter_outdir,
payload=codecs.encode(
json.dumps({"response_size": config["response_payload_size"]}).encode("utf-8"), "hex"
),
call_method="query",
method="Query",
duration=FLAGS.iter_duration,
)
def run_iterations(self, datapoints=None):
"""Run heavy memory experiment in defined iterations."""
self.start_experiment()
print(f"🚀 running with {datapoints}kb sized response messages")
evaluated_summaries = {}
for datapoint in datapoints:
summary = self.run_experiment(
{"response_payload_size": datapoint, "load_total": datapoint} # for labels of iteration headings
)
evaluated_summaries[datapoint] = summary
print(f"{datapoint} -> {summary.percentiles[95]} -> {summary.t_median}")
results = []
for datapoint, summary in evaluated_summaries.items():
print(f"{datapoint} -> {summary.percentiles[95]} -> {summary.t_median}")
results.append(summary.t_median[0])
self.write_summary_file(
"run_response_payload_size_experiment",
{
"rps": results,
},
datapoints,
"response payload size [kb]",
rtype="update" if self.use_updates else "query",
state="done",
)
exp.end_experiment()
return None
if __name__ == "__main__":
misc.parse_command_line_args()
exp = ResponsePayloadExperiment()
exp.init()
exp.init_experiment()
def KB(x):
return x * 1024
curr = FLAGS.initial_response_size_kb
datapoints = []
while curr <= FLAGS.max_size_increment_kb:
datapoints.append(KB(curr))
curr += FLAGS.response_size_increment_kb
res = exp.run_iterations(datapoints)
| 34.169811
| 115
| 0.655163
|
82ad9fad47c0397a566aa8e032a0e698d06eafa5
| 257
|
py
|
Python
|
dc_plc/dc_documents/doctype/dc_doc_meta/dc_doc_meta.py
|
igrekus/dc_plc
|
76fbb6b1c98ff9d0de46f7979b76cd775834be79
|
[
"MIT"
] | 3
|
2020-09-06T11:34:42.000Z
|
2022-03-12T04:52:58.000Z
|
dc_plc/dc_documents/doctype/dc_doc_meta/dc_doc_meta.py
|
igrekus/dc_plc
|
76fbb6b1c98ff9d0de46f7979b76cd775834be79
|
[
"MIT"
] | null | null | null |
dc_plc/dc_documents/doctype/dc_doc_meta/dc_doc_meta.py
|
igrekus/dc_plc
|
76fbb6b1c98ff9d0de46f7979b76cd775834be79
|
[
"MIT"
] | 5
|
2020-06-18T07:47:14.000Z
|
2022-01-13T06:33:46.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, igrekus and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class DC_Doc_Meta(Document):
pass
| 23.363636
| 49
| 0.774319
|
20ee7c281dc4e5651547ce57cb308e369913d661
| 15,495
|
py
|
Python
|
tests/test_api.py
|
plimy/pyhelics
|
b7bbde50956533496502ccc5f8b333426cfaf0c5
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_api.py
|
plimy/pyhelics
|
b7bbde50956533496502ccc5f8b333426cfaf0c5
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_api.py
|
plimy/pyhelics
|
b7bbde50956533496502ccc5f8b333426cfaf0c5
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
CURRENT_DIRECTORY = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(CURRENT_DIRECTORY)
sys.path.append(os.path.dirname(CURRENT_DIRECTORY))
import time
import pytest
import pytest as pt
import helics as h
from test_init import createBroker, createValueFederate, destroyFederate, destroyBroker
def test_misc_functions_api():
print(h.helicsGetBuildFlags())
assert len(h.helicsGetBuildFlags()) > 0
assert len(h.helicsGetCompilerVersion()) > 0
with pytest.raises(h.HelicsException):
h.helicsCreateCore("something random", "here", "not an init string")
def test_broker_api():
assert h.helicsIsCoreTypeAvailable("zmq") == 1
broker1 = h.helicsCreateBroker("zmq", "broker1", "--federates 3 --loglevel=warning")
broker2 = h.helicsBrokerClone(broker1)
address_string = h.helicsBrokerGetAddress(broker1)
assert "tcp://127.0.0.1:23404" in address_string
assert "broker1" in h.helicsBrokerGetIdentifier(broker1)
err = h.helicsErrorInitialize()
h.helicsErrorClear(err)
assert err.error_code == 0
assert h.ffi.string(err.message).decode() == ""
assert h.helicsBrokerIsValid(broker1) == 1
assert h.helicsBrokerIsConnected(broker1) == 1
h.helicsBrokerDisconnect(broker1)
assert h.helicsBrokerIsConnected(broker1) == 0
h.helicsBrokerDisconnect(broker2)
# h.helicsBrokerFree(broker1)
# h.helicsBrokerFree(broker2)
h.helicsCloseLibrary()
def test_core_api():
core1 = h.helicsCreateCore("inproc", "core1", "--autobroker")
assert h.helicsCoreIsValid(core1) is True
core2 = h.helicsCoreClone(core1)
assert "core1" in h.helicsCoreGetIdentifier(core1)
assert h.helicsCoreIsConnected(core1) == 0
sourceFilter1 = h.helicsCoreRegisterFilter(core1, h.HELICS_FILTER_TYPE_DELAY, "core1SourceFilter")
h.helicsFilterAddSourceTarget(sourceFilter1, "ep1")
destinationFilter1 = h.helicsCoreRegisterFilter(core1, h.HELICS_FILTER_TYPE_DELAY, "core1DestinationFilter")
h.helicsFilterAddDestinationTarget(destinationFilter1, "ep2")
cloningFilter1 = h.helicsCoreRegisterCloningFilter(core1, "ep3")
h.helicsFilterRemoveDeliveryEndpoint(cloningFilter1, "ep3")
h.helicsCoreSetReadyToInit(core1)
h.helicsCoreDisconnect(core1)
h.helicsCoreDisconnect(core2)
# h.helicsCoreFree(core1)
# h.helicsCoreFree(core2)
h.helicsCloseLibrary()
class UserData(object):
def __init__(self, x):
self.x = x
@h.ffi.callback("void logger(int loglevel, const char* identifier, const char* message, void* userData)")
def logger(loglevel: int, identifier: str, message: str, userData: object):
userData = h.ffi.from_handle(userData)
print(f"{loglevel}, {h.ffi.string(identifier).decode()}, {h.ffi.string(message).decode()}, {userData}")
userData.x += 1
def test_logging_api():
fi = h.helicsCreateFederateInfo()
broker = h.helicsCreateBroker("zmq", "broker", "--federates 1")
h.helicsFederateInfoSetCoreInitString(fi, "--federates 1")
h.helicsFederateInfoSetIntegerProperty(fi, h.HELICS_PROPERTY_INT_LOG_LEVEL, h.HELICS_LOG_LEVEL_TIMING)
fed = h.helicsCreateValueFederate("test1", fi)
userdata = UserData(5)
handle = h.ffi.new_handle(userdata)
h.helicsFederateSetLoggingCallback(fed, logger, handle)
h.helicsFederateEnterExecutingMode(fed)
h.helicsFederateLogInfoMessage(fed, "test MEXAGE")
h.helicsFederateRequestNextStep(fed)
h.helicsFederateLogInfoMessage(fed, "test MEXAGE")
h.helicsFederateRequestNextStep(fed)
h.helicsFederateLogInfoMessage(fed, "test MEXAGE")
h.helicsFederateRequestNextStep(fed)
h.helicsFederateLogInfoMessage(fed, "test MEXAGE")
h.helicsFederateRequestNextStep(fed)
h.helicsFederateFinalize(fed)
try:
assert userdata.x == 19
except:
assert userdata.x == 9
# h.helicsFederateFree(fed)
# h.helicsFederateInfoFree(fi)
h.helicsBrokerDisconnect(broker)
# h.helicsBrokerFree(broker)
h.helicsCleanupLibrary()
h.helicsCloseLibrary()
@pt.mark.skip()
def test_misc_api():
fedInfo1 = h.helicsCreateFederateInfo()
h.helicsFederateInfoSetCoreInitString(fedInfo1, "-f 1")
h.helicsFederateInfoSetCoreName(fedInfo1, "core3")
h.helicsFederateInfoSetCoreType(fedInfo1, 3)
h.helicsFederateInfoSetCoreTypeFromString(fedInfo1, "zmq")
h.helicsFederateInfoSetFlagOption(fedInfo1, 1, True)
h.helicsFederateInfoSetTimeProperty(fedInfo1, h.HELICS_PROPERTY_TIME_INPUT_DELAY, 1.0)
h.helicsFederateInfoSetIntegerProperty(fedInfo1, h.HELICS_PROPERTY_INT_LOG_LEVEL, 1)
h.helicsFederateInfoSetIntegerProperty(fedInfo1, h.HELICS_PROPERTY_INT_MAX_ITERATIONS, 100)
h.helicsFederateInfoSetTimeProperty(fedInfo1, h.HELICS_PROPERTY_TIME_OUTPUT_DELAY, 1.0)
h.helicsFederateInfoSetTimeProperty(fedInfo1, h.HELICS_PROPERTY_TIME_PERIOD, 1.0)
h.helicsFederateInfoSetTimeProperty(fedInfo1, h.HELICS_PROPERTY_TIME_DELTA, 1.0)
h.helicsFederateInfoSetTimeProperty(fedInfo1, h.HELICS_PROPERTY_TIME_OFFSET, 0.1)
# h.helicsFederateInfoFree(fedInfo1)
broker3 = h.helicsCreateBroker("zmq", "broker3", "--federates 1")
fedInfo2 = h.helicsCreateFederateInfo()
coreInitString = "--federates 1"
h.helicsFederateInfoSetCoreInitString(fedInfo2, coreInitString)
h.helicsFederateInfoSetCoreTypeFromString(fedInfo2, "zmq")
h.helicsFederateInfoSetIntegerProperty(fedInfo2, h.HELICS_PROPERTY_INT_LOG_LEVEL, h.HELICS_LOG_LEVEL_WARNING)
h.helicsFederateInfoSetTimeProperty(fedInfo2, h.HELICS_PROPERTY_TIME_DELTA, 1.0)
fed1 = h.helicsCreateCombinationFederate("fed1", fedInfo2)
fed2 = h.helicsFederateClone(fed1)
_ = h.helicsGetFederateByName("fed1")
h.helicsFederateSetFlagOption(fed2, 1, False)
h.helicsFederateSetTimeProperty(fed2, h.HELICS_PROPERTY_TIME_INPUT_DELAY, 1.0)
h.helicsFederateSetIntegerProperty(fed1, h.HELICS_PROPERTY_INT_LOG_LEVEL, h.HELICS_LOG_LEVEL_WARNING)
h.helicsFederateSetIntegerProperty(fed2, h.HELICS_PROPERTY_INT_MAX_ITERATIONS, 100)
h.helicsFederateSetTimeProperty(fed2, h.HELICS_PROPERTY_TIME_OUTPUT_DELAY, 1.0)
h.helicsFederateSetTimeProperty(fed2, h.HELICS_PROPERTY_TIME_PERIOD, 0.0)
h.helicsFederateSetTimeProperty(fed2, h.HELICS_PROPERTY_TIME_DELTA, 1.0)
_ = h.helicsFederateRegisterCloningFilter(fed1, "fed1/Ep1")
fed1DestinationFilter = h.helicsFederateRegisterFilter(fed1, h.HELICS_FILTER_TYPE_DELAY, "fed1DestinationFilter")
h.helicsFilterAddDestinationTarget(fed1DestinationFilter, "Ep2")
ep1 = h.helicsFederateRegisterEndpoint(fed1, "Ep1", "string")
ep2 = h.helicsFederateRegisterGlobalEndpoint(fed1, "Ep2", "string")
pub1 = h.helicsFederateRegisterGlobalPublication(fed1, "pub1", h.HELICS_DATA_TYPE_DOUBLE, "")
pub2 = h.helicsFederateRegisterGlobalTypePublication(fed1, "pub2", "complex", "")
sub1 = h.helicsFederateRegisterSubscription(fed1, "pub1")
sub2 = h.helicsFederateRegisterSubscription(fed1, "pub2")
pub3 = h.helicsFederateRegisterPublication(fed1, "pub3", h.HELICS_DATA_TYPE_STRING, "")
pub1KeyString = h.helicsPublicationGetKey(pub1)
pub1TypeString = h.helicsPublicationGetType(pub1)
pub1UnitsString = h.helicsPublicationGetUnits(pub1)
sub1KeyString = h.helicsSubscriptionGetKey(sub1)
sub1UnitsString = h.helicsInputGetUnits(sub1)
assert "pub1" == pub1KeyString
assert "double" == pub1TypeString
assert "" == pub1UnitsString
assert "pub1" == sub1KeyString
assert "" == sub1UnitsString
fed1SourceFilter = h.helicsFederateRegisterFilter(fed1, h.HELICS_FILTER_TYPE_DELAY, "fed1SourceFilter")
h.helicsFilterAddSourceTarget(fed1SourceFilter, "Ep2")
h.helicsFilterAddDestinationTarget(fed1SourceFilter, "fed1/Ep1")
h.helicsFilterRemoveTarget(fed1SourceFilter, "fed1/Ep1")
h.helicsFilterAddSourceTarget(fed1SourceFilter, "Ep2")
h.helicsFilterRemoveTarget(fed1SourceFilter, "Ep2")
fed1SourceFilterNameString = h.helicsFilterGetName(fed1SourceFilter)
assert fed1SourceFilterNameString == "fed1/fed1SourceFilter"
sub3 = h.helicsFederateRegisterSubscription(fed1, "fed1/pub3", "")
pub4 = h.helicsFederateRegisterTypePublication(fed1, "pub4", "int", "")
sub4 = h.helicsFederateRegisterSubscription(fed1, "fed1/pub4", "")
pub5 = h.helicsFederateRegisterGlobalTypePublication(fed1, "pub5", "boolean", "")
sub5 = h.helicsFederateRegisterSubscription(fed1, "pub5", "")
pub6 = h.helicsFederateRegisterGlobalPublication(fed1, "pub6", h.HELICS_DATA_TYPE_VECTOR, "")
sub6 = h.helicsFederateRegisterSubscription(fed1, "pub6", "")
pub7 = h.helicsFederateRegisterGlobalPublication(fed1, "pub7", h.HELICS_DATA_TYPE_NAMED_POINT, "")
sub7 = h.helicsFederateRegisterSubscription(fed1, "pub7", "")
assert """helics.HelicsPublication(name = "pub1", type = "double", units = "", info = "")""" in repr(pub1)
assert """helics.HelicsPublication(name = "pub2", type = "complex", units = "", info = "")""" in repr(pub2)
assert """helics.HelicsPublication(name = "fed1/pub3", type = "string", units = "", info = "")""" in repr(pub3)
assert """helics.HelicsPublication(name = "fed1/pub4", type = "int", units = "", info = "")""" in repr(pub4)
assert """helics.HelicsPublication(name = "pub5", type = "boolean", units = "", info = "")""" in repr(pub5)
assert """helics.HelicsPublication(name = "pub6", type = "double_vector", units = "", info = "")""" in repr(pub6)
assert """helics.HelicsPublication(name = "pub7", type = "named_point", units = "", info = "")""" in repr(pub7)
assert (
"""helics.HelicsInput(name = "_input_18", units = "", injection_units = "", publication_type = "", type = "", target = "pub7", info = "")"""
in repr(sub7)
)
h.helicsInputSetDefaultBoolean(sub5, False)
h.helicsInputSetDefaultComplex(sub2, -9.9 + 2.5j)
h.helicsInputSetDefaultDouble(sub1, 3.4)
h.helicsInputSetDefaultInteger(sub4, 6)
h.helicsInputSetDefaultNamedPoint(sub7, "hollow", 20.0)
h.helicsInputSetDefaultString(sub3, "default")
sub6Default = [3.4, 90.9, 4.5]
h.helicsInputSetDefaultVector(sub6, sub6Default)
h.helicsEndpointSubscribe(ep2, "fed1/pub3")
h.helicsFederateEnterInitializingModeAsync(fed1)
rs = h.helicsFederateIsAsyncOperationCompleted(fed1)
if rs == 0:
time.sleep(0.500)
rs = h.helicsFederateIsAsyncOperationCompleted(fed1)
if rs == 0:
time.sleep(0.500)
rs = h.helicsFederateIsAsyncOperationCompleted(fed1)
if rs == 0:
assert True is False
h.helicsFederateEnterInitializingModeComplete(fed1)
h.helicsFederateEnterExecutingModeAsync(fed1)
h.helicsFederateEnterExecutingModeComplete(fed1)
assert (
"""helics.HelicsInput(name = "_input_18", units = "", injection_units = "", publication_type = "named_point", type = "", target = "pub7", info = "")"""
in repr(sub7)
)
mesg1 = h.helicsFederateCreateMessage(fed1)
h.helicsMessageSetString(mesg1, "Hello")
h.helicsMessageSetSource(mesg1, "fed1/Ep1")
h.helicsMessageSetOriginalSource(mesg1, "fed1/Ep1")
h.helicsMessageSetDestination(mesg1, "Ep2")
h.helicsMessageSetOriginalDestination(mesg1, "Ep2")
h.helicsEndpointSendMessage(ep1, mesg1)
mesg1 = h.helicsFederateCreateMessage(fed1)
h.helicsMessageSetString(mesg1, "There")
h.helicsMessageSetSource(mesg1, "fed1/Ep1")
h.helicsMessageSetOriginalSource(mesg1, "fed1/Ep1")
h.helicsMessageSetDestination(mesg1, "Ep2")
h.helicsMessageSetOriginalDestination(mesg1, "Ep2")
h.helicsEndpointSendMessage(ep1, mesg1)
h.helicsEndpointSetDefaultDestination(ep2, "fed1/Ep1")
ep1NameString = h.helicsEndpointGetName(ep1)
ep1TypeString = h.helicsEndpointGetType(ep1)
assert ep1NameString == "fed1/Ep1"
assert ep1TypeString == "string"
_ = h.helicsFederateGetCoreObject(fed1)
fed1Time = h.helicsFederateGetCurrentTime(fed1)
assert fed1Time == 0.0
fed1EndpointCount = h.helicsFederateGetEndpointCount(fed1)
assert fed1EndpointCount == 2
fed1NameString = h.helicsFederateGetName(fed1)
assert fed1NameString == "fed1"
fed1State = h.helicsFederateGetState(fed1)
assert fed1State == 2
fed1PubCount = h.helicsFederateGetPublicationCount(fed1)
assert fed1PubCount == 7
fed1SubCount = h.helicsFederateGetInputCount(fed1)
assert fed1SubCount == 7
h.helicsPublicationPublishBoolean(pub5, True)
h.helicsPublicationPublishComplex(pub2, 5.6 + -0.67j)
h.helicsPublicationPublishDouble(pub1, 457.234)
h.helicsPublicationPublishInteger(pub4, 1)
h.helicsPublicationPublishNamedPoint(pub7, "Blah Blah", 20.0)
h.helicsPublicationPublishString(pub3, "Mayhem")
pub6Vector = [4.5, 56.5]
h.helicsPublicationPublishVector(pub6, pub6Vector)
time.sleep(0.500)
h.helicsFederateRequestTimeAsync(fed1, 1.0)
returnTime = h.helicsFederateRequestTimeComplete(fed1)
assert returnTime == 1.0
ep2MsgCount = h.helicsEndpointPendingMessages(ep2)
assert ep2MsgCount == 0
ep2HasMsg = h.helicsEndpointHasMessage(ep2)
assert ep2HasMsg == 0
ep2MsgCount = h.helicsEndpointPendingMessageCount(ep2)
assert ep2MsgCount == 0
returnTime = h.helicsFederateRequestTime(fed1, 3.0)
assert returnTime == 3.0
ep2MsgCount = h.helicsEndpointPendingMessageCount(ep2)
try:
assert ep2MsgCount == 2
except:
assert ep2MsgCount == 3
msg2 = h.helicsEndpointGetMessage(ep2)
assert h.helicsMessageGetTime(msg2) == 1.0
assert h.helicsMessageGetString(msg2) == "Hello"
assert h.helicsMessageGetOriginalSource(msg2) == "fed1/Ep1"
assert h.helicsMessageGetSource(msg2) == "fed1/Ep1"
assert h.helicsMessageGetDestination(msg2) == "Ep2"
assert h.helicsMessageGetOriginalDestination(msg2) == "Ep2"
fed1MsgCount = h.helicsFederatePendingMessages(fed1)
assert fed1MsgCount == 1
assert h.helicsFederateHasMessage(fed1) == 1
msg3 = h.helicsFederateGetMessage(fed1)
assert h.helicsMessageGetTime(msg3) == 1.0
assert h.helicsMessageGetString(msg3) == "There"
assert h.helicsMessageGetOriginalSource(msg3) == "fed1/Ep1"
assert h.helicsMessageGetSource(msg3) == "fed1/Ep1"
assert h.helicsMessageGetDestination(msg3) == "Ep2"
assert h.helicsMessageGetOriginalDestination(msg3) == "Ep2"
sub1Updated = h.helicsInputIsUpdated(sub1)
assert sub1Updated is True
assert h.helicsInputLastUpdateTime(sub2) == 3.0
assert h.helicsInputGetComplex(sub2) == (5.6, -0.67)
assert h.helicsInputGetDouble(sub1) == 457.234
assert h.helicsInputGetInteger(sub4) == 1
sub7PointString, sub7DoubleValue = h.helicsInputGetNamedPoint(sub7)
assert sub7PointString == "Blah Blah"
assert sub7DoubleValue == 20.0
assert h.helicsInputGetBoolean(sub5) == True
assert h.helicsInputGetString(sub3) == "Mayhem"
# TODO: this test is failing in HELICS3
# sub3ValueSize = h.helicsInputGetRawValueSize(sub3)
# assert sub3ValueSize == 6
assert h.helicsInputGetVector(sub6) == [4.5, 56.5]
h.helicsFederateFinalize(fed1)
h.helicsFederateFinalize(fed2)
# h.helicsFederateFree(fed1)
h.helicsFederateFinalize(fed2)
# h.helicsFederateFree(fed2)
# h.helicsFederateInfoFree(fedInfo2)
h.helicsBrokerDisconnect(broker3)
# h.helicsBrokerFree(broker3)
h.helicsCleanupLibrary()
h.helicsCloseLibrary()
| 41.878378
| 159
| 0.73656
|
0f5bdcadce94d6788623b2a781705dc810eaff1c
| 31,292
|
py
|
Python
|
predict/macbert2-f.py
|
DataArk/CHIP2021-Task1-Top1
|
e352198d96d31c60541e4a271f20cc23b3ab6b92
|
[
"Apache-2.0"
] | 15
|
2021-12-18T06:08:55.000Z
|
2022-03-30T00:41:45.000Z
|
predict/macbert2-f.py
|
confstantine/nlp-task
|
cb152e885bc6f6f1243a12ad90b1c715eb548736
|
[
"Apache-2.0"
] | 1
|
2021-12-20T05:57:37.000Z
|
2021-12-20T13:43:07.000Z
|
predict/macbert2-f.py
|
DataArk/CHIP2021-Task1-Top1
|
e352198d96d31c60541e4a271f20cc23b3ab6b92
|
[
"Apache-2.0"
] | 1
|
2021-12-27T04:49:35.000Z
|
2021-12-27T04:49:35.000Z
|
import os
import jieba
import torch
import pickle
import torch.nn as nn
import torch.optim as optim
from ark_nlp.dataset import TMDataset
from ark_nlp.processor.vocab import CharVocab
from ark_nlp.processor.tokenizer.tm import TransfomerTokenizer
from ark_nlp.nn import Bert
from ark_nlp.dataset import BaseDataset
import pandas as pd
import codecs
import json
import os
import random
import torch
import random
import numpy as np
def set_seed(seed):
"""
设置随机种子
:param seed:
:return:
"""
random.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed_all(seed)
set_seed(2021)
import copy
import torch
import pandas as pd
from functools import lru_cache
from torch.utils.data import Dataset
from ark_nlp.dataset import PairSentenceClassificationDataset
class TMDataset(PairSentenceClassificationDataset):
def __init__(self, *args, **kwargs):
super(TMDataset, self).__init__(*args, **kwargs)
self.categories_b = sorted(list(set([data['label_b'] for data in self.dataset])))
self.cat2id_b = dict(zip(self.categories_b, range(len(self.categories_b))))
self.id2cat_b = dict(zip(range(len(self.categories_b)), self.categories_b))
def _convert_to_transfomer_ids(self, bert_tokenizer):
features = []
for (index_, row_) in enumerate(self.dataset):
input_ids = bert_tokenizer.sequence_to_ids(row_['text_a'], row_['text_b'])
input_ids, input_mask, segment_ids, speaker_ids, e1_mask = input_ids
input_a_length = self._get_input_length(row_['text_a'], bert_tokenizer)
input_b_length = self._get_input_length(row_['text_b'], bert_tokenizer)
feature = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': segment_ids,
'speaker_ids': speaker_ids,
'e1_mask': e1_mask
}
if not self.is_test:
label_ids = self.cat2id[row_['label']]
label_ids_b = self.cat2id_b[row_['label_b']]
feature['label_ids'] = label_ids
feature['label_ids_b'] = label_ids_b
features.append(feature)
return features
import numpy as np
import pandas as pd
import copy
# from utils import get_entity_bios
from ark_nlp.dataset import BaseDataset
def get_task_data(data_path):
with codecs.open(data_path, mode='r', encoding='utf8') as f:
reader = f.readlines(f)
data_list = []
for dialogue_ in reader:
dialogue_ = json.loads(dialogue_)
_dialog_id = dialogue_['dialog_id']
for content_idx_, contents_ in enumerate(dialogue_['dialog_info']):
terms_ = contents_['ner']
if len(terms_) != 0:
idx_ = 0
for _, term_ in enumerate(terms_):
entity_ = dict()
entity_['dialogue'] = dialogue_
_text = dialogue_['dialog_info'][content_idx_]['text']
_text_list = list(_text)
_text_list.insert(term_['range'][0], '[unused1]')
_text_list.insert(term_['range'][1]+1, '[unused2]')
_text = ''.join(_text_list)
if content_idx_ - 1 >= 0 and len(dialogue_['dialog_info'][content_idx_-1]) < 40:
forward_text = dialogue_['dialog_info'][content_idx_-1]['sender'] + ':' + dialogue_['dialog_info'][content_idx_-1]['text'] + ';'
else:
forward_text = ''
if contents_['sender'] == '医生':
if content_idx_ + 1 >= len(dialogue_['dialog_info']):
entity_['text_a'] = forward_text + dialogue_['dialog_info'][content_idx_]['sender'] + ':' + _text
else:
entity_['text_a'] = forward_text + dialogue_['dialog_info'][content_idx_]['sender'] + ':' + _text + ';'
temp_index = copy.deepcopy(content_idx_) + 1
speaker_flag = False
sen_counter = 0
while True:
if dialogue_['dialog_info'][temp_index]['sender'] == '患者':
sen_counter += 1
speaker_flag = True
entity_['text_a'] += dialogue_['dialog_info'][temp_index]['sender'] + ':' + dialogue_['dialog_info'][temp_index]['text'] + ';'
if sen_counter > 3:
break
temp_index += 1
if temp_index >= len(dialogue_['dialog_info']):
break
elif contents_['sender'] == '患者':
if content_idx_ + 1 >= len(dialogue_['dialog_info']):
entity_['text_a'] = forward_text + dialogue_['dialog_info'][content_idx_]['sender'] + ':' + _text
else:
entity_['text_a'] = forward_text + dialogue_['dialog_info'][content_idx_]['sender'] + ':' + _text + ';'
temp_index = copy.deepcopy(content_idx_) + 1
speaker_flag = False
sen_counter = 0
while True:
sen_counter += 1
speaker_flag = True
entity_['text_a'] += dialogue_['dialog_info'][temp_index]['sender'] + ':' + dialogue_['dialog_info'][temp_index]['text'] + ';'
if sen_counter > 3:
break
temp_index += 1
if temp_index >= len(dialogue_['dialog_info']):
break
else:
entity_['text_a'] = forward_text + dialogue_['dialog_info'][content_idx_]['sender'] + ':' + _text
if term_['name'] == 'undefined':
add_text = '|没有标准化'
else:
add_text = '|标准化为' + term_['name']
entity_['text_b'] = term_['mention'] + add_text
entity_['text_b_copy'] = term_['mention']
entity_['start_idx'] = term_['range'][0]
entity_['end_idx'] = term_['range'][1] - 1
try:
entity_['label_b'] = term_['name']
except:
print(contents_)
print(term_)
entity_['label'] = term_['attr']
entity_['dialog_id'] = _dialog_id
idx_ += 1
if entity_['label'] == '':
continue
if len(entity_) == 0:
continue
data_list.append(entity_)
data_df = pd.DataFrame(data_list)
data_df = data_df.loc[:,['dialog_id', 'text_b_copy', 'text_a', 'text_b', 'start_idx', 'end_idx', 'label_b', 'label', 'dialogue']]
return data_df
import re
import copy
data_df = get_task_data('../data/source_datasets/fliter_train_result2.txt')
tm_dataset = TMDataset(data_df)
import transformers
from transformers import AutoTokenizer
bert_vocab = transformers.AutoTokenizer.from_pretrained('hfl/chinese-macbert-large')
import unicodedata
import abc
import torch
import random
import transformers
import numpy as np
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from copy import deepcopy
from transformers import AutoTokenizer
from torch.utils.data import Dataset
from ark_nlp.processor.tokenizer._tokenizer import BaseTokenizer
class TransfomerTokenizer(BaseTokenizer):
"""
Transfomer文本编码器,用于对文本进行分词、ID化、填充等操作
:param max_seq_len: (int) 预设的文本最大长度
:param tokenizer: (object) 编码器,用于实现文本分词和ID化
"""
def __init__(self, vocab, max_seq_len):
if isinstance(vocab, str):
# TODO: 改成由自定义的字典所决定
vocab = transformers.AutoTokenizer.from_pretrained(vocab)
self.vocab = vocab
self.max_seq_len = max_seq_len
self.additional_special_tokens = set()
self.tokenizer_type = 'transfomer'
@staticmethod
def _is_control(ch):
"""控制类字符判断
"""
return unicodedata.category(ch) in ('Cc', 'Cf')
@staticmethod
def _is_special(ch):
"""判断是不是有特殊含义的符号
"""
return bool(ch) and (ch[0] == '[') and (ch[-1] == ']')
@staticmethod
def recover_bert_token(token):
"""获取token的“词干”(如果是##开头,则自动去掉##)
"""
if token[:2] == '##':
return token[2:]
else:
return token
def get_token_mapping(self, text, tokens, is_mapping_index=True):
"""给出原始的text和tokenize后的tokens的映射关系
"""
raw_text = deepcopy(text)
text = text.lower()
normalized_text, char_mapping = '', []
for i, ch in enumerate(text):
ch = unicodedata.normalize('NFD', ch)
ch = ''.join([c for c in ch if unicodedata.category(c) != 'Mn'])
ch = ''.join([
c for c in ch
if not (ord(c) == 0 or ord(c) == 0xfffd or self._is_control(c))
])
normalized_text += ch
char_mapping.extend([i] * len(ch))
text, token_mapping, offset = normalized_text, [], 0
for token in tokens:
token = token.lower()
if token == '[unk]' or token in self.additional_special_tokens:
if is_mapping_index:
token_mapping.append(char_mapping[offset:offset+1])
else:
token_mapping.append(raw_text[offset:offset+1])
offset = offset + 1
elif self._is_special(token):
token_mapping.append([]) # 如果是[CLS]或者是[SEP]之类的词,则没有对应的映射
else:
token = self.recover_bert_token(token)
start = text[offset:].index(token) + offset
end = start + len(token)
if is_mapping_index:
token_mapping.append(char_mapping[start:end])
else:
token_mapping.append(raw_text[start:end])
offset = end
return token_mapping
def sequence_to_ids(self, sequence_a, sequence_b=None):
if sequence_b is None:
return self.sentence_to_ids(sequence_a)
else:
return self.pair_to_ids(sequence_a, sequence_b)
def sentence_to_ids(self, sequence, return_sequence_length=False):
if type(sequence) == str:
sequence = self.tokenize(sequence)
if return_sequence_length:
sequence_length = len(sequence)
# 对超长序列进行截断
if len(sequence) > self.max_seq_len - 2:
sequence = sequence[0:(self.max_seq_len - 2)]
speaker_ids = []
id_ = 0
for idx_, term_ in enumerate(sequence):
if term_ == '医' and sequence[idx_+1] == '生':
id_ = 1
if term_ == '患' and sequence[idx_+1] == '者':
id_ = 2
speaker_ids.append(id_)
# 分别在首尾拼接特殊符号
sequence = ['[CLS]'] + sequence + ['[SEP]']
speaker_ids = [0] + speaker_ids + [0]
segment_ids = [0] * len(sequence)
e11_p = sequence.index("<") + 1
e12_p = sequence.index(">") - 1
e1_mask = [0] * len(sequence)
for _i in range(e11_p, e12_p+1):
e1_mask[_i] = 1
break
# ID化
sequence = self.vocab.convert_tokens_to_ids(sequence)
# 根据max_seq_len与seq的长度产生填充序列
padding = [0] * (self.max_seq_len - len(sequence))
# 创建seq_mask
sequence_mask = [1] * len(sequence) + padding
# 创建seq_segment
segment_ids = segment_ids + padding
# 对seq拼接填充序列
sequence += padding
e1_mask += padding
sequence = np.asarray(sequence, dtype='int64')
sequence_mask = np.asarray(sequence_mask, dtype='int64')
segment_ids = np.asarray(segment_ids, dtype='int64')
e1_mask = np.asarray(e1_mask, dtype='int64')
if return_sequence_length:
return (sequence, sequence_mask, segment_ids, e1_mask, sequence_length)
return (sequence, sequence_mask, segment_ids, e1_mask)
def pair_to_ids(self, sequence_a, sequence_b, return_sequence_length=False):
raw_sequence_a = copy.deepcopy(sequence_a)
if type(sequence_a) == str:
sequence_a = self.tokenize(sequence_a)
if type(sequence_b) == str:
sequence_b = self.tokenize(sequence_b)
if return_sequence_length:
sequence_length = (len(sequence_a), len(sequence_b))
# 对超长序列进行截断
start_idx = 0
end_idx = self.max_seq_len - len(sequence_b) - 3
entity_end_idx = sequence_a.index('[unused2]')
end_idx = entity_end_idx + 20
if end_idx < (self.max_seq_len - len(sequence_b)):
sequence_a = sequence_a[0:(self.max_seq_len - len(sequence_b))- 3]
else:
end_idx = end_idx - 20 + (self.max_seq_len - len(sequence_b))/ 2
start_idx = end_idx - (self.max_seq_len - len(sequence_b)) + 3
if start_idx < 0:
start_idx = 0
sequence_a = sequence_a[int(start_idx):int(end_idx)]
# sequence_a = sequence_a[0:(self.max_seq_len - len(sequence_b))]
# if len(sequence_a) > ((self.max_seq_len - 3)//2):
# sequence_a = sequence_a[0:(self.max_seq_len - 3)//2]
# if len(sequence_b) > ((self.max_seq_len - 3)//2):
# sequence_b = sequence_b[0:(self.max_seq_len - 3)//2]
speaker_ids = [0]
id_ = 0
for idx_, term_ in enumerate(sequence_a):
try:
if term_ == '医' and idx_ < len(sequence_a) - 1 and sequence_a[idx_+1] == '生':
id_ = 1
if term_ == '患' and idx_ < len(sequence_a) - 1 and sequence_a[idx_+1] == '者':
id_ = 2
except:
print(sequence_a)
print(idx_)
speaker_ids.append(id_)
speaker_ids.append(0)
for idx_, term_ in enumerate(sequence_b):
speaker_ids.append(3)
speaker_ids.append(0)
# 分别在首尾拼接特殊符号
sequence = ['[CLS]'] + sequence_a + ['[SEP]'] + sequence_b + ['[SEP]']
segment_ids = [0] * (len(sequence_a) + 2) + [1] * (len(sequence_b) + 1)
try:
e11_p = sequence.index("[unused1]") + 1
e12_p = sequence.index("[unused2]") - 1
except:
print(raw_sequence_a)
print(sequence_a)
e1_mask = [0] * len(sequence)
for _i in range(e11_p, e12_p+1):
e1_mask[_i] = 1
# ID化
sequence = self.vocab.convert_tokens_to_ids(sequence)
# 根据max_seq_len与seq的长度产生填充序列
padding = [0] * (self.max_seq_len - len(sequence))
# 创建seq_mask
sequence_mask = [1] * len(sequence) + padding
# 创建seq_segment
segment_ids = segment_ids + padding
# 对seq拼接填充序列
sequence += padding
speaker_ids += padding
e1_mask += padding
sequence = np.asarray(sequence, dtype='int64')
sequence_mask = np.asarray(sequence_mask, dtype='int64')
segment_ids = np.asarray(segment_ids, dtype='int64')
speaker_ids = np.asarray(speaker_ids, dtype='int64')
e1_mask = np.asarray(e1_mask, dtype='int64')
# if len(sequence) > 150:
# print('sequence', raw_sequence_a)
# if len(sequence_mask) > 150:
# print(len(sequence_mask))
# print(len(sequence))
# print('sequence_mask', raw_sequence_a)
# if len(segment_ids) > 150:
# print('segment_ids', raw_sequence_a)
# if len(speaker_ids) > 150:
# print('speaker_ids', raw_sequence_a)
# if len(e1_mask) > 150:
# print('e1_mask', raw_sequence_a)
if return_sequence_length:
return (sequence, sequence_mask, segment_ids, speaker_ids, e1_mask, sequence_length)
return (sequence, sequence_mask, segment_ids, speaker_ids, e1_mask)
bert_vocab.add_special_tokens({'additional_special_tokens':["[unused1]", "[unused2]", "|"]})
max_seq_length=200
tokenizer = TransfomerTokenizer(bert_vocab, max_seq_length)
import time
import torch
import math
import torch.nn.functional as F
from torch import nn
from torch import Tensor
from ark_nlp.nn import BasicModule
from transformers import BertModel
from transformers import BertPreTrainedModel
from torch.nn import CrossEntropyLoss
from ark_nlp.nn.layer.crf_block import CRF
class Bert(BertPreTrainedModel):
"""
原始的BERT模型
:param config: (obejct) 模型的配置对象
:param bert_trained: (bool) bert参数是否可训练,默认可训练
:returns:
Reference:
[1] BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding
"""
def __init__(
self,
config,
encoder_trained=True,
pooling='cls'
):
super(Bert, self).__init__(config)
self.bert = BertModel(config)
self.pooling = pooling
for param in self.bert.parameters():
param.requires_grad = encoder_trained
self.num_labels = config.num_labels
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.classifier = nn.Linear(config.hidden_size, self.num_labels)
self.classifier = nn.Linear(config.hidden_size+10, self.num_labels)
self.relative_pos_embedding = nn.Embedding(4, 10)
self.init_weights()
def mask_pooling(self, x: Tensor, attention_mask=None):
if attention_mask is None:
return torch.mean(x, dim=1)
return torch.sum(x * attention_mask.unsqueeze(2), dim=1) / torch.sum(attention_mask, dim=1, keepdim=True)
def sequence_pooling(self, sequence_feature, attention_mask):
if self.pooling == 'first_last_avg':
sequence_feature = sequence_feature[-1] + sequence_feature[1]
elif self.pooling == 'last_avg':
sequence_feature = sequence_feature[-1]
elif self.pooling == 'last_2_avg':
sequence_feature = sequence_feature[-1] + sequence_feature[-2]
elif self.pooling == 'cls':
return sequence_feature[-1][:, 0, :]
else:
raise Exception("unknown pooling {}".format(self.pooling))
return self.mask_pooling(sequence_feature, attention_mask)
def get_encoder_feature(self, encoder_output, attention_mask):
if self.task == 'SequenceLevel':
return self.sequence_pooling(encoder_output, attention_mask)
elif self.task == 'TokenLevel':
return encoder_output[-1]
else:
return encoder_output[-1][:, 0, :]
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
speaker_ids=None,
**kwargs
):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
return_dict=True,
output_hidden_states=True
).hidden_states
# encoder_feature = self.get_encoder_feature(outputs, attention_mask)
speaker_feature = self.relative_pos_embedding(speaker_ids)
# encoder_feature = outputs[-1] + speaker_feature
encoder_feature = torch.cat([outputs[-1], speaker_feature], dim=-1)
encoder_feature = self.mask_pooling(encoder_feature, attention_mask)
encoder_feature = self.dropout(encoder_feature)
out = self.classifier(encoder_feature)
return out
from transformers import BertConfig
bert_config = BertConfig.from_pretrained('hfl/chinese-macbert-large',
num_labels=len(tm_dataset.cat2id))
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable, grad
from torch.utils.data import DataLoader, Dataset
import torch.nn.functional as F
import tqdm
from tqdm import tqdm
import sklearn.metrics as sklearn_metrics
from collections import Counter
class TMPredictor(object):
def __init__(
self,
modules,
tokernizer,
cat2id
):
self.modules = modules
self.cat2id = cat2id
self.tokenizer = tokernizer
self.device = list(self.modules[0].parameters())[0].device
self.id2cat = {}
for cat_, idx_ in self.cat2id.items():
self.id2cat[idx_] = cat_
def _convert_to_transfomer_ids(
self,
text_a,
text_b
):
input_ids = self.tokenizer.sequence_to_ids(text_a, text_b)
input_ids, input_mask, segment_ids, speaker_ids, e1_mask = input_ids
features = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': segment_ids,
'speaker_ids': speaker_ids
}
return features
def _convert_to_vanilla_ids(
self,
text_a,
text_b
):
input_ids = self.tokenizer.sequence_to_ids(text_a, text_b)
features = {
'input_ids': input_ids
}
return features
def _get_input_ids(
self,
text_a,
text_b
):
if self.tokenizer.tokenizer_type == 'vanilla':
return self._convert_to_vanilla_ids(text_a, text_b)
elif self.tokenizer.tokenizer_type == 'transfomer':
return self._convert_to_transfomer_ids(text_a, text_b)
elif self.tokenizer.tokenizer_type == 'customized':
features = self._convert_to_customized_ids(text_a, text_b)
else:
raise ValueError("The tokenizer type does not exist")
def _get_module_one_sample_inputs(
self,
features
):
return {col: torch.Tensor(features[col]).type(torch.long).unsqueeze(0).to(self.device) for col in features}
def predict_one_sample(
self,
text,
topk=None,
return_label_name=True,
return_proba=False
):
if topk == None:
topk = len(self.cat2id) if len(self.cat2id) >2 else 1
text_a, text_b = text
features = self._get_input_ids(text_a, text_b)
# self.module.eval()
preds = []
probas = []
vote_label_idx = []
with torch.no_grad():
inputs = self._get_module_one_sample_inputs(features)
logits = 0
weight_sum = 0
for idx, module in enumerate(self.modules):
logit = self.modules[idx](**inputs) * 1
logit = torch.nn.functional.softmax(logit, dim=1)
probs, indices = logit.topk(topk, dim=1, sorted=True)
preds.append(indices.cpu().numpy()[0][0])
rank = indices.cpu().numpy()[0]
rank_dict = {_index: _index for _index, _index in enumerate(rank)}
probas.append([rank_dict[_index] for _index in range(len(rank))])
most_ = Counter(preds).most_common(35)
# print(most_)
max_vote_num = most_[0][1]
most_ = [m for m in most_ if m[1] != 1] # 剔除1票的相同者
most_ = [m for m in most_ if m[1] == max_vote_num] # 只选择等于投票最大值的
if len(most_) == 0: # 如果全是1票
vote_label_idx.append(Counter(preds).most_common(1)[0][0])
elif len(most_) == 1:
vote_label_idx.append(most_[0][0])
else:
prob_list_np = np.array(probas)
select_rank = 10000
select_m = 10000
for m, num in most_:
# 拿概率第m列(所有模型对第m列的概率)求和
prob_m = prob_list_np[:, m]
if sum(prob_m) < select_rank:
select_m = m
select_rank = sum(prob_m)
vote_label_idx.append(select_m)
# preds = []
# probas = []
# for pred_, proba_ in zip(indices.cpu().numpy()[0], probs.cpu().numpy()[0].tolist()):
# if return_label_name:
# pred_ = self.id2cat[pred_]
# preds.append(pred_)
# if return_proba:
# probas.append(proba_)
# if return_proba:
# return list(zip(preds, probas))
if vote_label_idx[0] == -1:
print(most_)
print(probas)
return self.id2cat[vote_label_idx[0]]
def prob_avg_rank_in_list(prob, prob_list_np): # 求一个数在二维数组每行的排名,然后求均值
rank_list = []
for i, element in enumerate(prob_list_np):
rank = 0
for p in element:
if prob[i] < p: # 概率大的放前面
rank += 1
rank_list.append(rank)
return np.array(rank_list).mean()
ensemble_dl_modules = []
for file_name_ in os.listdir('../checkpoint/macbert2-f/'):
if file_name_.startswith('.'):
continue
ensemble_dl_module = Bert(config=bert_config)
ensemble_dl_module.load_state_dict(torch.load('../checkpoint/macbert2-f/' + file_name_))
ensemble_dl_module.eval()
ensemble_dl_module.to('cuda:0')
ensemble_dl_modules.append(ensemble_dl_module)
tm_predictor_instance = TMPredictor(ensemble_dl_modules, tokenizer, tm_dataset.cat2id)
from tqdm import tqdm
submit_result = []
with codecs.open('../data/source_datasets/testb.txt', mode='r', encoding='utf8') as f:
reader = f.readlines(f)
data_list = []
for dialogue_ in tqdm(reader):
dialogue_ = json.loads(dialogue_)
for content_idx_, contents_ in enumerate(dialogue_['dialog_info']):
terms_ = contents_['ner']
if len(terms_) != 0:
idx_ = 0
for _ner_idx, term_ in enumerate(terms_):
entity_ = dict()
entity_['dialogue'] = dialogue_
_text = dialogue_['dialog_info'][content_idx_]['text']
_text_list = list(_text)
_text_list.insert(term_['range'][0], '[unused1]')
_text_list.insert(term_['range'][1]+1, '[unused2]')
_text = ''.join(_text_list)
if content_idx_ - 1 >= 0 and len(dialogue_['dialog_info'][content_idx_-1]) < 40:
forward_text = dialogue_['dialog_info'][content_idx_-1]['sender'] + ':' + dialogue_['dialog_info'][content_idx_-1]['text'] + ';'
else:
forward_text = ''
if contents_['sender'] == '医生':
if content_idx_ + 1 >= len(dialogue_['dialog_info']):
entity_['text_a'] = forward_text + dialogue_['dialog_info'][content_idx_]['sender'] + ':' + _text
else:
entity_['text_a'] = forward_text + dialogue_['dialog_info'][content_idx_]['sender'] + ':' + _text + ';'
temp_index = copy.deepcopy(content_idx_) + 1
speaker_flag = False
sen_counter = 0
while True:
if dialogue_['dialog_info'][temp_index]['sender'] == '患者':
sen_counter += 1
speaker_flag = True
entity_['text_a'] += dialogue_['dialog_info'][temp_index]['sender'] + ':' + dialogue_['dialog_info'][temp_index]['text'] + ';'
if sen_counter > 3:
break
temp_index += 1
if temp_index >= len(dialogue_['dialog_info']):
break
elif contents_['sender'] == '患者':
if content_idx_ + 1 >= len(dialogue_['dialog_info']):
entity_['text_a'] = forward_text + dialogue_['dialog_info'][content_idx_]['sender'] + ':' + _text
else:
entity_['text_a'] = forward_text + dialogue_['dialog_info'][content_idx_]['sender'] + ':' + _text + ';'
temp_index = copy.deepcopy(content_idx_) + 1
speaker_flag = False
sen_counter = 0
while True:
sen_counter += 1
speaker_flag = True
entity_['text_a'] += dialogue_['dialog_info'][temp_index]['sender'] + ':' + dialogue_['dialog_info'][temp_index]['text'] + ';'
if sen_counter > 3:
break
temp_index += 1
if temp_index >= len(dialogue_['dialog_info']):
break
else:
entity_['text_a'] = forward_text + dialogue_['dialog_info'][content_idx_]['sender'] + ':' + _text
if term_['name'] == 'undefined':
add_text = '|没有标准化'
else:
add_text = '|标准化为' + term_['name']
entity_['text_b'] = term_['mention'] + add_text
entity_['start_idx'] = term_['range'][0]
entity_['end_idx'] = term_['range'][1] - 1
entity_['label'] = term_['attr']
idx_ += 1
dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = tm_predictor_instance.predict_one_sample([entity_['text_a'], entity_['text_b']])
submit_result.append(dialogue_)
with open('./macbert2-f.txt', 'w') as output_data:
for json_content in submit_result:
output_data.write(json.dumps(json_content, ensure_ascii=False) + '\n')
| 34.7303
| 162
| 0.539052
|
84f1d9b36486efc3600689e1d704186740fd137a
| 1,331
|
py
|
Python
|
zorg/jenkins/jobs/delete_old_jobs.py
|
antiagainst/llvm-zorg
|
a5b58cdd800d0d45b1bdd1f7fe058db6acbfd918
|
[
"Apache-2.0"
] | 27
|
2019-01-15T03:03:58.000Z
|
2022-03-22T23:31:36.000Z
|
zorg/jenkins/jobs/delete_old_jobs.py
|
antiagainst/llvm-zorg
|
a5b58cdd800d0d45b1bdd1f7fe058db6acbfd918
|
[
"Apache-2.0"
] | 21
|
2020-05-29T01:12:26.000Z
|
2022-03-29T20:06:22.000Z
|
zorg/jenkins/jobs/delete_old_jobs.py
|
antiagainst/llvm-zorg
|
a5b58cdd800d0d45b1bdd1f7fe058db6acbfd918
|
[
"Apache-2.0"
] | 38
|
2019-02-10T02:46:33.000Z
|
2022-03-26T10:27:29.000Z
|
#!/usr/bin/env python
import glob
import os
import subprocess
import sys
import time
import xml.etree.ElementTree as ET
new_jobs = set()
for g in glob.glob('build/jenkins/job/*'):
new_jobs.add(os.path.basename(g))
if len(new_jobs) == 0:
print "No new jobs?!?"
sys.exit(1)
query = subprocess.check_output(['util/query.sh', 'api/xml?tree=jobs[name,description]'], )
existing_jobs = set()
tree = ET.fromstring(query)
for job in tree.findall('.//job'):
name = job.find('name').text
description_e = job.find('description')
if description_e is None:
continue
description = description_e.text
if description is None:
continue
if '$$job generated from ' in description:
existing_jobs.add(name.strip())
if len(existing_jobs) == 0:
print "No existing jobs?!?"
sys.exit(1)
# We should have already uploaded all the new jobs
missing = new_jobs - existing_jobs
if len(missing) > 0:
print "Missing jobs?!?"
sys.exit(1)
to_delete = existing_jobs - new_jobs
if len(to_delete) > 0:
print ""
print ""
print "Will delete the following jobs:"
for jobname in to_delete:
print " %s" % jobname
print "You have 5 seconds to abort"
time.sleep(5)
for jobname in to_delete:
subprocess.check_call(['util/delete_job.sh', jobname])
| 25.596154
| 91
| 0.666416
|
d60c321b37e47339a7e30c9d07fd5fd703eba751
| 2,128
|
py
|
Python
|
apps/journal/migrations/0001_initial.py
|
KolevDarko/lifehq
|
88d92f5fe76f2fb6511f2a892e096d95a69985d8
|
[
"MIT"
] | null | null | null |
apps/journal/migrations/0001_initial.py
|
KolevDarko/lifehq
|
88d92f5fe76f2fb6511f2a892e096d95a69985d8
|
[
"MIT"
] | null | null | null |
apps/journal/migrations/0001_initial.py
|
KolevDarko/lifehq
|
88d92f5fe76f2fb6511f2a892e096d95a69985d8
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0 on 2018-05-15 13:51
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('base', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Journal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('day', models.BooleanField(default=True)),
('week', models.BooleanField(default=True)),
('month', models.BooleanField(default=True)),
('quarter', models.BooleanField(default=False)),
('year', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='base.Profile')),
],
),
migrations.CreateModel(
name='JournalEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('content', models.TextField()),
('entry_type', models.CharField(max_length=20)),
('created_on', models.DateField(default=datetime.datetime.utcnow)),
('journal', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='entries', to='journal.Journal')),
],
),
migrations.CreateModel(
name='JournalTemplate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('template_type', models.CharField(choices=[('0', 'day'), ('1', 'week'), ('2', 'month'), ('3', 'quarter'), ('4', 'year')], max_length=20)),
('content', models.TextField()),
('journal', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='templates', to='journal.Journal')),
],
),
]
| 42.56
| 155
| 0.575188
|
8a7b47f7b4eeba0b0c7e1146037bacb7b0df3f1a
| 604
|
py
|
Python
|
bubblesort.py
|
amitdubey/DataStructures_In_Python
|
766cdab85c8619f7d28d4a0fa1a17288e2b13aa8
|
[
"BSD-2-Clause"
] | 1
|
2021-01-29T05:18:43.000Z
|
2021-01-29T05:18:43.000Z
|
bubblesort.py
|
amitdubey/DataStructures_In_Python
|
766cdab85c8619f7d28d4a0fa1a17288e2b13aa8
|
[
"BSD-2-Clause"
] | null | null | null |
bubblesort.py
|
amitdubey/DataStructures_In_Python
|
766cdab85c8619f7d28d4a0fa1a17288e2b13aa8
|
[
"BSD-2-Clause"
] | null | null | null |
from typing import List
def bubble_sort(our_list):
has_swapped = True
num_of_iterations = 0
while(has_swapped):
has_swapped = False
for i in range(len(our_list) - num_of_iterations - 1):
if our_list[i] > our_list[i+1]:
# Swap
our_list[i], our_list[i+1] = our_list[i+1], our_list[i]
has_swapped = True
num_of_iterations += 1
return our_list
def main():
array =[3,2,1,5,7,9,11,2,4,16]
print(bubble_sort(array))
if __name__=='__main__':
main()
| 24.16
| 75
| 0.536424
|
122cd27bd44dad37ac51fe67fe39d0d69e54f773
| 182
|
py
|
Python
|
config.py
|
longnow/plexmark
|
71fe71bddbc18a413fcd11e0882bab8c1523e9a6
|
[
"MIT"
] | 3
|
2021-01-07T21:50:16.000Z
|
2021-03-10T02:24:46.000Z
|
config.py
|
longnow/plexmark
|
71fe71bddbc18a413fcd11e0882bab8c1523e9a6
|
[
"MIT"
] | null | null | null |
config.py
|
longnow/plexmark
|
71fe71bddbc18a413fcd11e0882bab8c1523e9a6
|
[
"MIT"
] | null | null | null |
PORT = 3004
REQUEST_TIMEOUT = 300
DB_NAME = "plx"
DB_USER = "yang"
DB_POOL_MIN = 1
DB_POOL_MAX = 3
DATA_DIR = "/opt/local/plexmark-data"
MAX_WORKERS = 5
CLEANUP_MAX_AGE = 604800
| 13
| 37
| 0.725275
|
198946886bfb63847ef912d8636d88ddaa0771fc
| 2,167
|
py
|
Python
|
parcels/kernels/diffusion.py
|
jelletreep/parcels
|
7ba3e08de7de046474373f6a5fe02835fc99a8dc
|
[
"MIT"
] | 1
|
2020-10-01T02:27:00.000Z
|
2020-10-01T02:27:00.000Z
|
parcels/kernels/diffusion.py
|
jelletreep/parcels
|
7ba3e08de7de046474373f6a5fe02835fc99a8dc
|
[
"MIT"
] | null | null | null |
parcels/kernels/diffusion.py
|
jelletreep/parcels
|
7ba3e08de7de046474373f6a5fe02835fc99a8dc
|
[
"MIT"
] | null | null | null |
"""Collection of pre-built diffusion kernels"""
import math
from parcels import rng as random
__all__ = ['BrownianMotion2D', 'SpatiallyVaryingBrownianMotion2D']
def BrownianMotion2D(particle, fieldset, time):
"""Kernel for simple Brownian particle diffusion in zonal and meridional direction.
Assumes that fieldset has fields Kh_zonal and Kh_meridional"""
r = 1/3.
kh_meridional = fieldset.Kh_meridional[time, particle.depth, particle.lat, particle.lon]
particle.lat += random.uniform(-1., 1.) * math.sqrt(2*math.fabs(particle.dt)*kh_meridional/r)
kh_zonal = fieldset.Kh_zonal[time, particle.depth, particle.lat, particle.lon]
particle.lon += random.uniform(-1., 1.) * math.sqrt(2*math.fabs(particle.dt)*kh_zonal/r)
def SpatiallyVaryingBrownianMotion2D(particle, fieldset, time):
"""Diffusion equations for particles in non-uniform diffusivity fields
from Ross & Sharples (2004, doi:10.4319/lom.2004.2.289)
and Spagnol et al. (2002, doi:10.3354/meps235299)"""
# regular Brownian motion step
r = 1/3.
kh_meridional = fieldset.Kh_meridional[time, particle.depth, particle.lat, particle.lon]
Ry = random.uniform(-1., 1.) * math.sqrt(2*math.fabs(particle.dt)*kh_meridional/r)
kh_zonal = fieldset.Kh_zonal[time, particle.depth, particle.lat, particle.lon]
Rx = random.uniform(-1., 1.) * math.sqrt(2*math.fabs(particle.dt)*kh_zonal/r)
# Deterministic 'boost' out of areas of low diffusivity
dx = .01 # for spherical coords, dx is in degrees
Kyp1 = fieldset.Kh_meridional[time, particle.depth, particle.lat+dx, particle.lon]
Kym1 = fieldset.Kh_meridional[time, particle.depth, particle.lat-dx, particle.lon]
dKdy = (Kyp1-Kym1) / (2*dx)
Kxp1 = fieldset.Kh_zonal[time, particle.depth, particle.lat, particle.lon+dx]
Kxm1 = fieldset.Kh_zonal[time, particle.depth, particle.lat, particle.lon-dx]
dKdx = (Kxp1-Kxm1) / (2*dx)
CorrectionX = dKdx * math.fabs(particle.dt)
CorrectionY = dKdy * math.fabs(particle.dt)
# diffuse particle as sum of Brownian motion and deterministic 'boost'
particle.lon += Rx + CorrectionX
particle.lat += Ry + CorrectionY
| 45.145833
| 97
| 0.718505
|
dbd5f638ac42e78a05c3ad6f3e3b5d61cef493cd
| 1,603
|
py
|
Python
|
sos_trades_core/sos_processes/test/test_sellar_coupling_new_types/usecase.py
|
os-climate/sostrades-core
|
bcaa9b5e393ffbd0963e75a9315b27caf8b0abd9
|
[
"Apache-2.0"
] | 8
|
2022-01-10T14:44:28.000Z
|
2022-03-31T08:57:14.000Z
|
sos_trades_core/sos_processes/test/test_sellar_coupling_new_types/usecase.py
|
os-climate/sostrades-core
|
bcaa9b5e393ffbd0963e75a9315b27caf8b0abd9
|
[
"Apache-2.0"
] | null | null | null |
sos_trades_core/sos_processes/test/test_sellar_coupling_new_types/usecase.py
|
os-climate/sostrades-core
|
bcaa9b5e393ffbd0963e75a9315b27caf8b0abd9
|
[
"Apache-2.0"
] | 1
|
2022-02-21T14:51:45.000Z
|
2022-02-21T14:51:45.000Z
|
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from sos_trades_core.study_manager.study_manager import StudyManager
import numpy as np
import pandas as pd
class Study(StudyManager):
def __init__(self, execution_engine=None):
super().__init__(__file__, execution_engine=execution_engine)
def setup_usecase(self):
ns = f'{self.study_name}'
coupling_name = "SellarCoupling"
df = pd.DataFrame({'years': np.arange(1, 5)})
df['value'] = 1.0
dict_x = {'years': np.arange(1, 5), 'value': np.ones(4)}
disc_dict = {}
# Sellar inputs
disc_dict[f'{ns}.{coupling_name}.x'] = dict_x
disc_dict[f'{ns}.{coupling_name}.y_1'] = df
disc_dict[f'{ns}.{coupling_name}.y_2'] = df
disc_dict[f'{ns}.{coupling_name}.z'] = np.array([1., 1.])
disc_dict[f'{ns}.{coupling_name}.Sellar_Problem.local_dv'] = 10.
return [disc_dict]
if '__main__' == __name__:
uc_cls = Study()
uc_cls.load_data()
uc_cls.execution_engine.display_treeview_nodes(display_variables=True)
uc_cls.run()
| 32.06
| 74
| 0.689956
|
b3b9827a1b7210c6bd848543c2e4969ec1d6e92a
| 17,142
|
py
|
Python
|
multiplayer_snake/server/server.py
|
sheepy0125/multiplayer-snake
|
07d28c54811ff41ab1ba5bffc3ec73247021bffd
|
[
"MIT"
] | null | null | null |
multiplayer_snake/server/server.py
|
sheepy0125/multiplayer-snake
|
07d28c54811ff41ab1ba5bffc3ec73247021bffd
|
[
"MIT"
] | 5
|
2021-11-15T14:55:43.000Z
|
2021-11-18T14:32:45.000Z
|
multiplayer_snake/server/server.py
|
sheepy0125/multiplayer-snake
|
07d28c54811ff41ab1ba5bffc3ec73247021bffd
|
[
"MIT"
] | null | null | null |
"""
Snake, but multiplayer
Created by sheepy0125
2021-11-14
Server side code!
"""
### Setup ###
import multiplayer_snake.constants as constants
from multiplayer_snake.shared.common import hisock, pygame, Logger
from multiplayer_snake.shared.tools import (
get_public_ip,
get_discriminator,
check_username,
)
from multiplayer_snake.shared.pygame_tools import (
GlobalPygame,
Text,
WrappedText,
Widget,
Button,
)
from multiplayer_snake.shared.config_parser import parse
from multiplayer_snake.shared.shared_game import BaseSnakePlayer, SharedGame
from time import time
from datetime import timedelta
from os import _exit as force_exit
from io import TextIOWrapper
import sys
CONFIG = parse()
GUI_CONFIG = CONFIG["gui"]
SERVER_CONFIG = CONFIG["server"]
# Setup pygame
pygame.init()
GlobalPygame.window = pygame.display.set_mode(GUI_CONFIG["window_size"])
pygame.display.set_caption(f"{constants.__name__} Server (GUI)")
# Setup hisock
server = hisock.server.ThreadedHiSockServer(
(hisock.utils.get_local_ip(), CONFIG["server"]["port"]),
max_connections=2,
)
### Classes ###
class GameAlreadyRunningError(Exception):
...
class SnakeGame:
"""Handles everything that will be sent to the clients"""
def __init__(self):
self.num_players = 2
self.players_online: list[ServerSnakePlayer] = []
self.round: int = 0
self.frames = 0
self.uptime: int = 0 # Seconds
self.uptime_changed: bool = False # For the GUI
self.running = False
self.start_time: int = 0 # Unix timestamp
self.last_update_time: float = 0.0 # Unix timestamp
self.time_next_tick: float = 0.0 # Milliseconds
self.default_spots = [
(0, round(((SharedGame.height / 2) - 1), ndigits=1)),
(SharedGame.width - 1, round(((SharedGame.height / 2) - 1), ndigits=1)),
]
def get_data(self) -> dict:
"""Get data for updating the clients"""
# Get the data
return {
"players": [player.get_data() for player in self.players_online],
"round": self.round,
"uptime": self.uptime,
"uptime_changed": self.uptime_changed,
}
def update(self):
"""Updates the game (must be started first)"""
original_uptime = self.uptime
self.uptime = int(time() - self.start_time)
self.uptime_changed = original_uptime == self.uptime
# Is it time for the next tick?
time_next_tick = SERVER_CONFIG["time_until_update"] - (
time() - self.last_update_time
)
self.time_next_tick = time_next_tick
if time_next_tick > 0:
return
self.frames += 1
self.last_update_time += SERVER_CONFIG["time_until_update"]
# Update players
for player in self.players_online:
player.update()
# Update clients
update_clients_with_data()
def run(self):
"""Run everything. Should be called every frame"""
if not self.running:
return
self.update()
def start(self):
"""Start the game"""
if self.running:
raise GameAlreadyRunningError
Logger.log("Starting game")
self.running = True
self.start_time = int(time())
self.last_update_time = time()
# Alert everyone that the game has started
server.send_all_clients("game_started")
def stop(self):
"""Stop the game"""
if not self.running:
return
Logger.log("Stopping game")
self.running = False
# Alert everyone that the game has stopped
server.send_all_clients("game_stopped")
self.__init__()
def add_player(self, ip_address: str, username: str) -> bool:
"""Adds a player to the game, returns if valid"""
# Too many players already
if len(self.players_online) >= self.num_players:
return False
# Username isn't good
if not check_username(username):
return False
# Everything seems fine, add the player
# Get the default position
default_pos = self.default_spots[len(self.players_online)]
self.players_online.append(
ServerSnakePlayer(
default_pos=default_pos,
default_length=1,
identifier=f"{username}#{get_discriminator()}",
ip_address=ip_address,
)
)
def snake_died(self, snake_identifier: str):
# Game over
server.send_all_clients("game_over", f"{snake_identifier} died")
snake_game = SnakeGame()
class ServerSnakePlayer(BaseSnakePlayer):
"""Server side snake player"""
def _reset(self, ip_address: str, *args, **kwargs):
"""BaseSnakePlayer reset, but it has more stuff"""
self.ip_address = ip_address
super()._reset(*args, **kwargs)
def get_data(self) -> dict:
"""Get data for updating the client"""
return {
"identifier": self.identifier,
"ip_address": self.ip_address,
"pos": self.pos,
"length": self.length,
"direction": self.direction,
}
def snake_died(self, reason: str = "unknown"):
super().snake_died(reason)
snake_game.snake_died(identifier=self.identifier, reason=reason)
### Server handlers ###
@server.on("join")
def on_client_join(client_data):
Logger.log(
f"{client_data.name} ({hisock.iptup_to_str(client_data.ip)})"
" connected to the server"
)
if not snake_game.add_player(client_data.ip, client_data.name):
# Failed to join, disconnect player
server.disconnect_client(client_data)
@server.on("leave")
def on_client_leave(client_data):
Logger.log(
f"{client_data.name} ({hisock.iptup_to_str(client_data.ip)})"
" disconnected from the server"
)
# Remove player
for player in snake_game.players_online:
if player.identifier == client_data.name:
snake_game.players_online.remove(player)
break
@server.on("request_data")
def update_clients_with_data():
server.send_all_clients("update_data", snake_game.get_data())
### Widgets / GUI ###
class ServerWindow:
"""Handles all the widgets inside the window"""
def __init__(self):
"""No params as this will use CONFIG"""
self.widgets = self.create_widgets()
if CONFIG["verbose"]:
Logger.log("Server window created")
def create_widgets(self) -> list:
if CONFIG["verbose"]:
Logger.log("Created widgets")
widgets: list = [
PlayersListWidget(),
ServerInfoWidget(),
ServerStatusMesagesWidget(),
]
return widgets
def update(self):
"""Updates all the widgets"""
for widget in self.widgets:
widget.update()
def draw(self):
"""Draws all the widgets and the main window"""
# Draw background
GlobalPygame.window.fill(GUI_CONFIG["colors"]["background"])
# Draw widgets
for widget in self.widgets:
widget.draw()
### Widgets ###
class ServerWidget(Widget):
def __init__(self, *args, **kwargs):
# Colors and stuff
text_size = GUI_CONFIG["text_size"]
text_color = GUI_CONFIG["colors"]["widget"]["text"]
padding = GUI_CONFIG["widget_padding"]
widget_color = GUI_CONFIG["colors"]["widget"]["background"]
border_color = GUI_CONFIG["colors"]["widget"]["border"]
super().__init__(
text_size=text_size,
text_color=text_color,
padding=padding,
widget_color=widget_color,
border_color=border_color,
*args,
**kwargs,
)
if CONFIG["verbose"]:
Logger.log(f"Created {self.identifier} widget")
class PlayersListWidget(ServerWidget):
"""Widget for players online"""
def __init__(self):
super().__init__(
pos=(GUI_CONFIG["widget_padding"], GUI_CONFIG["widget_padding"]),
size=(
GUI_CONFIG["window_size"][0] // 4 * 1,
GUI_CONFIG["window_size"][1] // 2 - (GUI_CONFIG["widget_padding"] * 2),
),
identifier="players list",
)
self.text_widgets: dict = {
"immutable": [
self.create_text("Players online", offset=0, text_size=16),
],
"mutable": [],
}
self.update(do_check=False)
def update(self, do_check: bool = True):
if do_check and (
len(self.text_widgets["mutable"]) == len(snake_game.players_online) * 2
):
return
if CONFIG["verbose"]:
Logger.log(
f"Updating players (players online: {len(snake_game.players_online)})"
)
mutable_text_widgets: list[Text] = []
for num, player in enumerate(snake_game.players_online):
mutable_text_widgets.append(
self.create_text(
str(player.identifier),
offset=(num * 2 + 2), # 2 because of the title
)
)
mutable_text_widgets.append(
self.create_text(
hisock.utils.iptup_to_str(player.ip_address),
offset=(num * 2 + 2 + 1), # 2 because of the title
),
)
if CONFIG["verbose"]:
Logger.log(f"Created text widget for player snake {player.identifier}")
self.text_widgets["mutable"] = mutable_text_widgets
def draw(self):
super().draw()
for text_list in self.text_widgets.values():
for text in text_list:
text.draw()
class ServerInfoWidget(ServerWidget):
"""Widget for telling information about the server"""
def __init__(self):
super().__init__(
pos=(GUI_CONFIG["widget_padding"], GUI_CONFIG["window_size"][1] // 2),
size=(
GUI_CONFIG["window_size"][0] // 4 * 1,
GUI_CONFIG["window_size"][1] // 2 - GUI_CONFIG["widget_padding"],
),
identifier="server status",
)
self.text_widgets: dict = {
"immutable": [
self.create_text("Server status", offset=0, text_size=16),
self.create_text(
f"Server local IP: {hisock.utils.get_local_ip()}", offset=2
),
self.create_text(f"Server public IP: {get_public_ip()}", offset=3),
self.create_text(f"Server port: {CONFIG['server']['port']}", offset=4),
Text(
"Start/stop",
pos=(
self.pos[0] + (self.size[0] // 2),
self.pos[1] + self.size[1] - 40,
),
size=14,
color=self.text_color,
center=True,
),
],
"mutable": [self.create_text("")] * 3,
}
self.start_button = Button(
pos=(
self.pos[0] + (self.size[0] // 2),
self.pos[1] + self.size[1] - 40,
),
size=(self.size[0] // 4 * 3, 50),
color="orange",
)
self.update(do_check=False)
def update(self, do_check: bool = True):
if (not do_check) or snake_game.uptime_changed:
uptime_text_widget = self.create_text(
f"Uptime: {str(timedelta(seconds=snake_game.uptime))!s}", offset=5
)
self.text_widgets["mutable"][0] = uptime_text_widget
frame_count_widget = self.create_text(
f"Frames: {snake_game.frames}/"
+ str(
int(
(
snake_game.uptime
* (1 // CONFIG["server"]["time_until_update"])
)
)
),
offset=6,
)
self.text_widgets["mutable"][1] = frame_count_widget
if snake_game.running:
time_next_tick_text = self.create_text(
f"Time until next frame: {str(round(snake_game.time_next_tick, 1)).zfill(3)} ms",
offset=7,
)
self.text_widgets["mutable"][2] = time_next_tick_text
if self.start_button.check_pressed():
if not snake_game.running:
snake_game.start()
else:
snake_game.stop()
def draw(self):
super().draw()
self.start_button.draw()
for text_list in self.text_widgets.values():
for text in text_list:
text.draw()
class ServerStatusMesagesWidget(ServerWidget):
"""
Widget for showing status messages about the current game,
not just stats about the server.
"""
def __init__(self):
super().__init__(
pos=(
(GUI_CONFIG["widget_padding"] * 2)
+ (GUI_CONFIG["window_size"][0] // 4),
GUI_CONFIG["widget_padding"],
),
size=(
(GUI_CONFIG["window_size"][0] // 4 * 3)
- (GUI_CONFIG["widget_padding"] * 3),
GUI_CONFIG["window_size"][1] - (GUI_CONFIG["widget_padding"] * 2),
),
identifier="server status messages widget",
)
self.text_widgets: dict = {
"immutable": [self.create_text("Server logs", offset=0, text_size=16)],
"mutable": [],
}
self.update()
def update(self):
...
def add_text(self, text: str):
# Add wrapping text
if len(self.text_widgets["mutable"]) == 0:
y_offset = self.padding
else:
y_offset = (
self.text_widgets["mutable"][-1].ending_y_pos
- (self.padding * 2)
- len(self.text_widgets["mutable"])
)
text_wrapped = WrappedText(
text=text,
max_chars=90,
pos=(
self.pos[0] + self.padding,
self.pos[1] + (len(self.text_widgets["mutable"]) + self.text_size),
),
y_offset=y_offset,
text_size=self.text_size,
text_color=self.text_color,
center=False,
)
self.text_widgets["mutable"].append(text_wrapped)
def draw(self):
super().draw()
for text_list in self.text_widgets.values():
for text in text_list:
text.draw()
def scroll(self, scroll_by: int):
for text in self.text_widgets["mutable"]:
text.scroll(
min_y=self.text_widgets["immutable"][0].text_rect.bottom,
scroll_by=scroll_by,
)
@property
def needs_scroll(self):
"""Implies there is already a message"""
return self.text_widgets["mutable"][-1].ending_y_pos >= (
self.size[1] + self.pos[1]
)
@property
def scroll_by(self):
"""Implies there is already a message"""
return self.text_widgets["mutable"][-1].ending_y_pos - (
self.size[1] + self.pos[1]
)
### Override stdout ###
class StdOutOverride:
def __init__(self, _file: TextIOWrapper):
self.file = _file
def write(self, text: str):
self.file.write(text)
if text != "\n":
# Strip color
for ansi_color in Logger.colors.values():
text = text.replace(ansi_color, "")
self.log_to_widget(text)
def flush(self):
self.file.flush()
def log_to_widget(self, text: str):
server_win.widgets[2].add_text(text)
# Scrolling
if server_win.widgets[2].needs_scroll:
server_win.widgets[2].scroll(scroll_by=server_win.widgets[2].scroll_by)
sys.stdout = StdOutOverride(sys.stdout)
### Main ###
server_win = ServerWindow()
server.start()
def run_pygame_loop():
# Handle events
for event in pygame.event.get():
if event.type == pygame.QUIT:
return False
# Update
server_win.update()
# Draw
server_win.draw()
pygame.display.flip()
return True
def run():
while True:
try:
if not run_pygame_loop():
# Request to exit
return pygame.quit()
snake_game.run()
except KeyboardInterrupt:
print("\nExiting gracefully...")
pygame.quit()
return
except Exception as e:
Logger.log_error(e)
return
if __name__ != "__main__":
exit()
run()
del StdOutOverride
sys.stdout = sys.__stdout__
try:
server.disconnect_all_clients(force=False)
server.close()
except KeyboardInterrupt:
print("\nForcing!")
force_exit(1)
| 27.78282
| 97
| 0.558745
|
bf3c4a516ae16561cfa081672b77faa8adb3fd63
| 806
|
py
|
Python
|
merkle_tree/merktest.py
|
kevaundray/research
|
16f20848c614b580071fed3d2ff1dc69688fa4f4
|
[
"MIT"
] | 1,351
|
2015-09-22T08:17:10.000Z
|
2022-03-31T22:48:07.000Z
|
merkle_tree/merktest.py
|
kevaundray/research
|
16f20848c614b580071fed3d2ff1dc69688fa4f4
|
[
"MIT"
] | 42
|
2016-08-31T14:43:29.000Z
|
2021-12-05T23:10:31.000Z
|
merkle_tree/merktest.py
|
LaudateCorpus1/research
|
6e8b7b367e7f1b18b4b92151df01dfeaa0774a23
|
[
"MIT"
] | 334
|
2015-09-20T10:15:23.000Z
|
2022-03-28T17:46:57.000Z
|
from merk import merkle_tree, mk_multi_proof, verify_multi_proof
def test_multi_merkle_tree():
leaves = [i.to_bytes(32, 'big') for i in range(16)]
tree = merkle_tree(leaves)
for i in range(65536):
indices = [j for j in range(16) if (i>>j)%2 == 1]
proof = mk_multi_proof(tree, indices)
assert verify_multi_proof(tree[1], indices, [leaves[i] for i in indices], 4, proof)
if i%1024 == 1023:
print("%d of 65536 16-element proofs checked" % (i+1))
assert not verify_multi_proof(tree[2], indices, [leaves[i] for i in indices], 4, proof)
assert not verify_multi_proof(tree[1], indices, [leaves[i][::-1] for i in indices], 4, proof)
print("Multi Merkle tree test passed")
if __name__ == '__main__':
test_multi_merkle_tree()
| 44.777778
| 105
| 0.645161
|
ed922ba966e4f1389ff39276c2ef5a41458b392a
| 7,609
|
py
|
Python
|
models/tensorflow/cnn_model_util/datasets.py
|
lynex/nnfusion
|
6332697c71b6614ca6f04c0dac8614636882630d
|
[
"MIT"
] | 639
|
2020-09-05T10:00:59.000Z
|
2022-03-30T08:42:39.000Z
|
models/tensorflow/cnn_model_util/datasets.py
|
QPC-database/nnfusion
|
99ada47c50f355ca278001f11bc752d1c7abcee2
|
[
"MIT"
] | 252
|
2020-09-09T05:35:36.000Z
|
2022-03-29T04:58:41.000Z
|
models/tensorflow/cnn_model_util/datasets.py
|
QPC-database/nnfusion
|
99ada47c50f355ca278001f11bc752d1c7abcee2
|
[
"MIT"
] | 104
|
2020-09-05T10:01:08.000Z
|
2022-03-23T10:59:13.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark dataset utilities.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import abstractmethod
import os
import numpy as np
import six
from six.moves import cPickle
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.platform import gfile
from . import preprocessing
IMAGENET_NUM_TRAIN_IMAGES = 1281167
IMAGENET_NUM_VAL_IMAGES = 50000
COCO_NUM_TRAIN_IMAGES = 118287
COCO_NUM_VAL_IMAGES = 4952
class Dataset(object):
"""Abstract class for cnn benchmarks dataset."""
def __init__(self,
name,
data_dir=None,
queue_runner_required=False,
num_classes=None):
self.name = name
self.data_dir = data_dir
self._queue_runner_required = queue_runner_required
self._num_classes = num_classes
def tf_record_pattern(self, subset):
return os.path.join(self.data_dir, '%s-*-of-*' % subset)
def reader(self):
return tf.TFRecordReader()
@property
def num_classes(self):
return self._num_classes
@num_classes.setter
def num_classes(self, val):
self._num_classes = val
@abstractmethod
def num_examples_per_epoch(self, subset):
pass
def __str__(self):
return self.name
def get_input_preprocessor(self, input_preprocessor='default'):
assert not self.use_synthetic_gpu_inputs()
return _SUPPORTED_INPUT_PREPROCESSORS[self.name][input_preprocessor]
def queue_runner_required(self):
return self._queue_runner_required
def use_synthetic_gpu_inputs(self):
return not self.data_dir
class LibrispeechDataset(Dataset):
"""Configuration for LibriSpeech dataset."""
def __init__(self, data_dir=None):
super(LibrispeechDataset, self).__init__(
'librispeech', data_dir, num_classes=29)
def tf_record_pattern(self, subset):
if subset == 'train':
return os.path.join(self.data_dir, 'train-clean-*.tfrecords')
elif subset == 'validation':
return os.path.join(self.data_dir, 'test-clean.tfrecords')
else:
return ''
def num_examples_per_epoch(self, subset='train'):
del subset
return 2 # TODO(laigd): currently this is an arbitrary number.
class ImageDataset(Dataset):
"""Abstract class for image datasets."""
def __init__(self,
name,
height,
width,
depth=None,
data_dir=None,
queue_runner_required=False,
num_classes=1001):
super(ImageDataset, self).__init__(name, data_dir, queue_runner_required,
num_classes)
self.height = height
self.width = width
self.depth = depth or 3
class ImagenetDataset(ImageDataset):
"""Configuration for Imagenet dataset."""
def __init__(self, data_dir=None):
super(ImagenetDataset, self).__init__(
'imagenet', 300, 300, data_dir=data_dir)
def num_examples_per_epoch(self, subset='train'):
if subset == 'train':
return IMAGENET_NUM_TRAIN_IMAGES
elif subset == 'validation':
return IMAGENET_NUM_VAL_IMAGES
else:
raise ValueError('Invalid data subset "%s"' % subset)
class Cifar10Dataset(ImageDataset):
"""Configuration for cifar 10 dataset.
It will mount all the input images to memory.
"""
def __init__(self, data_dir=None):
super(Cifar10Dataset, self).__init__(
'cifar10',
32,
32,
data_dir=data_dir,
queue_runner_required=True,
num_classes=11)
def read_data_files(self, subset='train'):
"""Reads from data file and returns images and labels in a numpy array."""
assert self.data_dir, ('Cannot call `read_data_files` when using synthetic '
'data')
if subset == 'train':
filenames = [
os.path.join(self.data_dir, 'data_batch_%d' % i)
for i in xrange(1, 6)
]
elif subset == 'validation':
filenames = [os.path.join(self.data_dir, 'test_batch')]
else:
raise ValueError('Invalid data subset "%s"' % subset)
inputs = []
for filename in filenames:
with gfile.Open(filename, 'rb') as f:
# python2 does not have the encoding parameter
encoding = {} if six.PY2 else {'encoding': 'bytes'}
inputs.append(cPickle.load(f, **encoding))
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
all_images = np.concatenate(
[each_input[b'data'] for each_input in inputs]).astype(np.float32)
all_labels = np.concatenate(
[each_input[b'labels'] for each_input in inputs])
return all_images, all_labels
def num_examples_per_epoch(self, subset='train'):
if subset == 'train':
return 50000
elif subset == 'validation':
return 10000
else:
raise ValueError('Invalid data subset "%s"' % subset)
class COCODataset(ImageDataset):
"""COnfiguration for COCO dataset."""
def __init__(self, data_dir=None, image_size=300):
super(COCODataset, self).__init__(
'coco', image_size, image_size, data_dir=data_dir, num_classes=81)
def num_examples_per_epoch(self, subset='train'):
if subset == 'train':
return COCO_NUM_TRAIN_IMAGES
elif subset == 'validation':
return COCO_NUM_VAL_IMAGES
else:
raise ValueError('Invalid data subset "%s"' % subset)
_SUPPORTED_DATASETS = {
'imagenet': ImagenetDataset,
'cifar10': Cifar10Dataset,
'librispeech': LibrispeechDataset,
'coco': COCODataset,
}
_SUPPORTED_INPUT_PREPROCESSORS = {
'imagenet': {
'default': preprocessing.RecordInputImagePreprocessor,
'official_models_imagenet': preprocessing.ImagenetPreprocessor,
},
'cifar10': {
'default': preprocessing.Cifar10ImagePreprocessor
},
'librispeech': {
'default': preprocessing.LibrispeechPreprocessor
},
'coco': {
'default': preprocessing.COCOPreprocessor
},
}
def create_dataset(data_dir, data_name):
"""Create a Dataset instance based on data_dir and data_name."""
if not data_dir and not data_name:
# When using synthetic data, use synthetic imagenet images by default.
data_name = 'imagenet'
# Infere dataset name from data_dir if data_name is not provided.
if data_name is None:
for supported_name in _SUPPORTED_DATASETS:
if supported_name in data_dir:
data_name = supported_name
break
else: # Failed to identify dataset name from data dir.
raise ValueError('Could not identify name of dataset. '
'Please specify with --data_name option.')
if data_name not in _SUPPORTED_DATASETS:
raise ValueError('Unknown dataset. Must be one of %s' % ', '.join(
[key for key in sorted(_SUPPORTED_DATASETS.keys())]))
return _SUPPORTED_DATASETS[data_name](data_dir)
| 30.194444
| 80
| 0.677093
|
51deb4fcaa6e73ef7bef2c193bc56f6178d2026e
| 57,601
|
py
|
Python
|
python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
|
luobao-intel/incubator-mxnet
|
e9b138a54d41882267bc2955a3df6edd093679c7
|
[
"Apache-2.0"
] | 1
|
2019-01-20T13:56:45.000Z
|
2019-01-20T13:56:45.000Z
|
python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
|
luobao-intel/incubator-mxnet
|
e9b138a54d41882267bc2955a3df6edd093679c7
|
[
"Apache-2.0"
] | 2
|
2021-12-10T01:51:15.000Z
|
2021-12-14T21:58:40.000Z
|
python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
|
luobao-intel/incubator-mxnet
|
e9b138a54d41882267bc2955a3df6edd093679c7
|
[
"Apache-2.0"
] | 1
|
2019-06-11T05:30:18.000Z
|
2019-06-11T05:30:18.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Based on
# https://github.com/NVIDIA/mxnet_to_onnx/blob/master/mx2onnx_converter/
# mx2onnx_converter_functions.py
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# coding: utf-8
# pylint: disable=too-many-locals,no-else-return,too-many-lines
# pylint: disable=anomalous-backslash-in-string,eval-used
"""
Conversion Functions for common layers.
Add new functions here with a decorator.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
import logging
import numpy as np
from .export_onnx import MXNetGraph as mx_op
try:
import onnx
except ImportError:
onnx = None
def parse_helper(attrs, attrs_name, alt_value=None):
"""Helper function to parse operator attributes in required format."""
tuple_re = re.compile('\([0-9L|,| ]+\)')
if not attrs:
return alt_value
attrs_str = None if attrs.get(attrs_name) is None else str(attrs.get(attrs_name))
if attrs_str is None:
return alt_value
attrs_match = tuple_re.search(attrs_str)
if attrs_match is not None:
if attrs_match.span() == (0, len(attrs_str)):
dims = eval(attrs_str)
return dims
else:
raise AttributeError("Malformed %s dimensions: %s" % (attrs_name, str(attrs_str)))
return alt_value
def transform_padding(pad_width):
"""Helper function to convert padding format for pad operator.
"""
num_pad_values = len(pad_width)
onnx_pad_width = [0]*num_pad_values
start_index = 0
# num_pad_values will always be multiple of 2
end_index = int(num_pad_values/2)
for idx in range(0, num_pad_values):
if idx % 2 == 0:
onnx_pad_width[start_index] = pad_width[idx]
start_index += 1
else:
onnx_pad_width[end_index] = pad_width[idx]
end_index += 1
return onnx_pad_width
def convert_string_to_list(string_val):
"""Helper function to convert string to list.
Used to convert shape attribute string to list format.
"""
result_list = []
list_string = string_val.split(',')
for val in list_string:
val = str(val.strip())
val = val.replace("(", "")
val = val.replace(")", "")
val = val.replace("L", "")
val = val.replace("[", "")
val = val.replace("]", "")
if val not in ("", "None"):
result_list.append(int(val))
return result_list
def get_boolean_attribute_value(attrs, attr_name):
""" Helper function to convert a string version
of Boolean attributes to integer for ONNX.
Takes attribute dictionary and attr_name as
parameters.
"""
return 1 if attrs.get(attr_name, 0) in ["True", "1"] else 0
def get_inputs(node, kwargs):
"""Helper function to get inputs"""
name = node["name"]
proc_nodes = kwargs["proc_nodes"]
index_lookup = kwargs["index_lookup"]
inputs = node["inputs"]
attrs = node.get("attrs", {})
input_nodes = []
for ip in inputs:
input_node_id = index_lookup[ip[0]]
input_nodes.append(proc_nodes[input_node_id].name)
return name, input_nodes, attrs
def create_basic_op_node(op_name, node, kwargs):
"""Helper function to create a basic operator
node that doesn't contain op specific attrs"""
name, input_nodes, _ = get_inputs(node, kwargs)
node = onnx.helper.make_node(
op_name,
input_nodes,
[name],
name=name
)
return [node]
@mx_op.register("null")
def convert_weights_and_inputs(node, **kwargs):
"""Helper function to convert weights and inputs.
"""
name, _, _ = get_inputs(node, kwargs)
if kwargs["is_input"] is False:
weights = kwargs["weights"]
initializer = kwargs["initializer"]
np_arr = weights[name]
data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np_arr.dtype]
dims = np.shape(np_arr)
tensor_node = onnx.helper.make_tensor_value_info(name, data_type, dims)
initializer.append(
onnx.helper.make_tensor(
name=name,
data_type=data_type,
dims=dims,
vals=np_arr.flatten().tolist(),
raw=False,
)
)
return [tensor_node]
else:
tval_node = onnx.helper.make_tensor_value_info(name, kwargs["in_type"], kwargs["in_shape"])
return [tval_node]
@mx_op.register("Convolution")
def convert_convolution(node, **kwargs):
"""Map MXNet's convolution operator attributes to onnx's Conv operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
kernel_dims = list(parse_helper(attrs, "kernel"))
stride_dims = list(parse_helper(attrs, "stride", [1, 1]))
pad_dims = list(parse_helper(attrs, "pad", [0, 0]))
num_group = int(attrs.get("num_group", 1))
dilations = list(parse_helper(attrs, "dilate", [1, 1]))
pad_dims = pad_dims + pad_dims
conv_node = onnx.helper.make_node(
"Conv",
inputs=input_nodes,
outputs=[name],
kernel_shape=kernel_dims,
strides=stride_dims,
dilations=dilations,
pads=pad_dims,
group=num_group,
name=name
)
return [conv_node]
@mx_op.register("FullyConnected")
def convert_fully_connected(node, **kwargs):
"""Map MXNet's FullyConnected operator attributes to onnx's Gemm operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
initializer = kwargs["initializer"]
no_bias = get_boolean_attribute_value(attrs, "no_bias")
fcnode = []
op_name = "flatten_" + str(kwargs["idx"])
flatten_node = onnx.helper.make_node(
'Flatten',
inputs=[input_nodes[0]],
outputs=[op_name],
name=op_name
)
input_nodes[0] = op_name
fcnode.append(flatten_node)
if no_bias:
data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('int64')]
bias_name = "bias" + str(kwargs["idx"])
tensor_node = onnx.helper.make_tensor_value_info(bias_name, data_type, (1,))
initializer.append(
onnx.helper.make_tensor(
name=bias_name,
data_type=data_type,
dims=(1,),
vals=[0],
raw=False,
)
)
input_nodes.append(bias_name)
fcnode.append(tensor_node)
node = onnx.helper.make_node(
"Gemm",
input_nodes, # input (A, B, C) - C can be in place
[name], # output
alpha=1.0,
beta=1.0,
transA=False,
transB=True,
name=name
)
fcnode.append(node)
return fcnode
@mx_op.register("BatchNorm")
def convert_batchnorm(node, **kwargs):
"""Map MXNet's BatchNorm operator attributes to onnx's BatchNormalization operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
momentum = float(attrs.get("momentum", 0.9))
eps = float(attrs.get("eps", 0.001))
bn_node = onnx.helper.make_node(
"BatchNormalization",
input_nodes,
[name],
name=name,
epsilon=eps,
momentum=momentum,
# MXNet computes mean and variance per feature for batchnorm
# Default for onnx is across all spatial features. So disabling the parameter.
spatial=0
)
return [bn_node]
@mx_op.register("tanh")
def convert_tanh(node, **kwargs):
"""Map MXNet's tanh operator attributes to onnx's Tanh operator
and return the created node.
"""
return create_basic_op_node('Tanh', node, kwargs)
@mx_op.register("cos")
def convert_cos(node, **kwargs):
"""Map MXNet's cos operator attributes to onnx's Cos operator
and return the created node.
"""
return create_basic_op_node('Cos', node, kwargs)
@mx_op.register("sin")
def convert_sin(node, **kwargs):
"""Map MXNet's sin operator attributes to onnx's Sin operator
and return the created node.
"""
return create_basic_op_node('Sin', node, kwargs)
@mx_op.register("tan")
def convert_tan(node, **kwargs):
"""Map MXNet's tan operator attributes to onnx's tan operator
and return the created node.
"""
return create_basic_op_node('Tan', node, kwargs)
@mx_op.register("arccos")
def convert_acos(node, **kwargs):
"""Map MXNet's acos operator attributes to onnx's acos operator
and return the created node.
"""
return create_basic_op_node('Acos', node, kwargs)
@mx_op.register("arcsin")
def convert_asin(node, **kwargs):
"""Map MXNet's asin operator attributes to onnx's asin operator
and return the created node.
"""
return create_basic_op_node('Asin', node, kwargs)
@mx_op.register("arctan")
def convert_atan(node, **kwargs):
"""Map MXNet's atan operator attributes to onnx's atan operator
and return the created node.
"""
return create_basic_op_node('Atan', node, kwargs)
#Basic neural network functions
@mx_op.register("sigmoid")
def convert_sigmoid(node, **kwargs):
"""Map MXNet's sigmoid operator attributes to onnx's Sigmoid operator
and return the created node.
"""
return create_basic_op_node('Sigmoid', node, kwargs)
@mx_op.register("relu")
def convert_relu(node, **kwargs):
"""Map MXNet's relu operator attributes to onnx's Relu operator
and return the created node.
"""
return create_basic_op_node('Relu', node, kwargs)
@mx_op.register("Activation")
def convert_activation(node, **kwargs):
"""Map MXNet's Activation operator attributes to onnx's Tanh/Relu operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
act_type = attrs["act_type"]
# Creating a dictionary here, but if this titlecase pattern
# mxnet_name.title()
act_types = {
"tanh": "Tanh",
"relu": "Relu",
"sigmoid": "Sigmoid",
"softrelu": "Softplus",
"softsign": "Softsign"
}
act_name = act_types.get(act_type)
if act_name:
node = onnx.helper.make_node(
act_name,
input_nodes,
[name],
name=name
)
else:
raise AttributeError(
"Activation %s not implemented or recognized in the converter" % act_type
)
return [node]
@mx_op.register("Pad")
def convert_pad(node, **kwargs):
"""Map MXNet's pad operator attributes to onnx's Pad operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
mxnet_pad_width = convert_string_to_list(attrs.get("pad_width"))
onnx_pad_width = transform_padding(mxnet_pad_width)
pad_mode = attrs.get("mode")
if pad_mode == "constant":
pad_value = float(attrs.get("constant_value")) \
if "constant_value" in attrs else 0.0
node = onnx.helper.make_node(
'Pad',
inputs=input_nodes,
outputs=[name],
mode='constant',
value=pad_value,
pads=onnx_pad_width,
name=name
)
else:
node = onnx.helper.make_node(
'Pad',
inputs=input_nodes,
outputs=[name],
mode=pad_mode,
pads=onnx_pad_width,
name=name
)
return [node]
def create_helper_trans_node(op_name, input_node, node_name):
"""create extra transpose node for dot operator"""
node_name = op_name + "_" + node_name
trans_node = onnx.helper.make_node(
'Transpose',
inputs=[input_node],
outputs=[node_name],
name=node_name
)
return trans_node
@mx_op.register("dot")
def convert_dot(node, **kwargs):
"""Map MXNet's dot operator attributes to onnx's
MatMul and Transpose operators based on the values set for
transpose_a, transpose_b attributes."""
name, input_nodes, attrs = get_inputs(node, kwargs)
input_node_a = input_nodes[0]
input_node_b = input_nodes[1]
trans_a_node = None
trans_b_node = None
trans_a = get_boolean_attribute_value(attrs, "transpose_a")
trans_b = get_boolean_attribute_value(attrs, "transpose_b")
op_name = "transpose" + str(kwargs["idx"])
if trans_a:
trans_a_node = create_helper_trans_node(op_name, input_nodes[0], 'a')
input_node_a = op_name+"_a"
if trans_b:
trans_b_node = create_helper_trans_node(op_name, input_nodes[1], 'b')
input_node_b = op_name+"_b"
matmul_node = onnx.helper.make_node(
'MatMul',
inputs=[input_node_a, input_node_b],
outputs=[name],
name=name
)
if not trans_a and not trans_b:
return [matmul_node]
elif trans_a and not trans_b:
return [trans_a_node, matmul_node]
elif trans_b and not trans_a:
return [trans_b_node, matmul_node]
else:
return [trans_a_node, trans_b_node, matmul_node]
@mx_op.register("_linalg_gemm2")
def convert_linalg_gemm2(node, **kwargs):
"""Map MXNet's _linalg_gemm2 operator attributes to onnx's
MatMul and Transpose operators based on the values set for
transpose_a, transpose_b attributes.
Return multiple nodes created.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
# Getting the attributes and assigning default values.
alpha = float(attrs.get("alpha", 1.0))
trans_a = get_boolean_attribute_value(attrs, "transpose_a")
trans_b = get_boolean_attribute_value(attrs, "transpose_b")
op_name = "transpose" + str(kwargs["idx"])
if alpha == 1.0 and trans_a == 0 and trans_b == 0:
matmul_node = onnx.helper.make_node(
'MatMul',
inputs=input_nodes,
outputs=[name],
name=name
)
return [matmul_node]
elif trans_a == 1 and trans_b == 0:
op_name = "transpose" + str(kwargs["idx"])
node_name = op_name+"_a"
trans_a_node = onnx.helper.make_node(
'Transpose',
inputs=[input_nodes[0]],
outputs=[op_name+"_a"],
name=node_name
)
matmul_node = onnx.helper.make_node(
'MatMul',
inputs=[node_name, input_nodes[1]],
outputs=[name],
name=name
)
return [trans_a_node, matmul_node]
elif trans_a == 0 and trans_b == 1:
node_name = op_name + "_b"
trans_b_node = onnx.helper.make_node(
'Transpose',
inputs=[input_nodes[1]],
outputs=[op_name+"_b"],
name=node_name
)
matmul_node = onnx.helper.make_node(
'MatMul',
inputs=[input_nodes[0], node_name],
outputs=[name],
name=name
)
return [trans_b_node, matmul_node]
else:
node_name_a = op_name+"_a"
trans_a_node = onnx.helper.make_node(
'Transpose',
inputs=[input_nodes[0]],
outputs=[op_name+"_a"],
name=node_name_a
)
node_name_b = op_name + "_b"
trans_b_node = onnx.helper.make_node(
'Transpose',
inputs=[input_nodes[1]],
outputs=[op_name+"_b"],
name=node_name_b
)
matmul_node = onnx.helper.make_node(
'MatMul',
inputs=input_nodes,
outputs=[name],
name=name
)
return [trans_a_node, trans_b_node, matmul_node]
@mx_op.register("Pooling")
def convert_pooling(node, **kwargs):
"""Map MXNet's Pooling operator attributes to onnx's
MaxPool/AveragePool/GlobalMaxPool/GlobalAveragePool operators
based on the input node's attributes and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
kernel = eval(attrs["kernel"])
pool_type = attrs["pool_type"]
stride = eval(attrs["stride"]) if attrs.get("stride") else None
global_pool = get_boolean_attribute_value(attrs, "global_pool")
p_value = attrs.get('p_value', 'None')
pooling_convention = attrs.get('pooling_convention', 'valid')
if pooling_convention == 'full':
pooling_warning = "Pooling: ONNX currently doesn't support pooling_convention. " \
"This might lead to shape or accuracy issues. " \
"https://github.com/onnx/onnx/issues/549"
logging.warning(pooling_warning)
pad_dims = list(parse_helper(attrs, "pad", [0, 0]))
pad_dims = pad_dims + pad_dims
pool_types = {"max": "MaxPool", "avg": "AveragePool", "lp": "LpPool"}
global_pool_types = {"max": "GlobalMaxPool", "avg": "GlobalAveragePool",
"lp": "GlobalLpPool"}
if pool_type == 'lp' and p_value == 'None':
raise AttributeError('ONNX requires a p value for LpPool and GlobalLpPool')
if global_pool:
if pool_type == 'lp':
node = onnx.helper.make_node(
global_pool_types[pool_type],
input_nodes, # input
[name],
p=int(p_value),
name=name
)
else:
node = onnx.helper.make_node(
global_pool_types[pool_type],
input_nodes, # input
[name],
name=name
)
else:
if pool_type == 'lp':
node = onnx.helper.make_node(
pool_types[pool_type],
input_nodes, # input
[name],
p=int(p_value),
kernel_shape=kernel,
pads=pad_dims,
strides=stride,
name=name
)
else:
node = onnx.helper.make_node(
pool_types[pool_type],
input_nodes, # input
[name],
kernel_shape=kernel,
pads=pad_dims,
strides=stride,
name=name
)
return [node]
@mx_op.register("exp")
def convert_exp(node, **kwargs):
"""Map MXNet's exp operator attributes to onnx's Exp operator
and return the created node.
"""
return create_basic_op_node('Exp', node, kwargs)
@mx_op.register("_copy")
def convert_copy(node, **kwargs):
"""Map MXNet's _copy operator attributes to onnx's Identity operator
and return the created node.
"""
return create_basic_op_node('Identity', node, kwargs)
@mx_op.register("identity")
def convert_identity(node, **kwargs):
"""Map MXNet's identity operator attributes to onnx's ConstantFill operator
and return the created node.
"""
return create_basic_op_node('ConstantFill', node, kwargs)
@mx_op.register("InstanceNorm")
def convert_instancenorm(node, **kwargs):
"""Map MXNet's InstanceNorm operator attributes to onnx's InstanceNormalization operator
based on the input node's attributes and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
eps = float(attrs.get("eps", 0.001))
node = onnx.helper.make_node(
'InstanceNormalization',
inputs=input_nodes,
outputs=[name],
name=name,
epsilon=eps)
return [node]
@mx_op.register("LeakyReLU")
def convert_leakyrelu(node, **kwargs):
"""Map MXNet's LeakyReLU operator attributes to onnx's Elu/LeakyRelu/PRelu operators
based on the input node's attributes and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
act_type = attrs.get("act_type", "leaky")
alpha = float(attrs.get("slope", 0.25))
act_name = {"elu": "Elu", "leaky": "LeakyRelu", "prelu": "PRelu",
"selu": "Selu"}
if act_type == "prelu" or act_type == "selu":
node = onnx.helper.make_node(
act_name[act_type],
inputs=input_nodes,
outputs=[name],
name=name)
else:
node = onnx.helper.make_node(
act_name[act_type],
inputs=input_nodes,
outputs=[name],
name=name,
alpha=alpha)
return [node]
@mx_op.register("softmax")
def convert_softmax(node, **kwargs):
"""Map MXNet's softmax operator attributes to onnx's Softmax operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
axis = int(attrs.get("axis", -1))
softmax_node = onnx.helper.make_node(
"Softmax",
input_nodes,
[name],
axis=axis,
name=name
)
return [softmax_node]
# There's also mx.sym.softmax(), which doesn't do cross-entropy loss,
# just softmax for inference - hence the name convert_softmax_output.
@mx_op.register("SoftmaxOutput")
def convert_softmax_output(node, **kwargs):
"""Map MXNet's SoftmaxOutput operator attributes to onnx's Softmax operator
and return the created node.
"""
name = node["name"]
input1_idx = kwargs["index_lookup"][node["inputs"][0][0]]
input1 = kwargs["proc_nodes"][input1_idx]
softmax_node = onnx.helper.make_node(
"Softmax",
[input1.name],
[name],
axis=1,
name=name
)
return [softmax_node]
@mx_op.register("LogisticRegressionOutput")
def convert_logistic_regression_output(node, **kwargs):
"""Map MXNet's SoftmaxOutput operator attributes to onnx's Softmax operator
and return the created node.
"""
name = node["name"]
input1_idx = kwargs["index_lookup"][node["inputs"][0][0]]
input1 = kwargs["proc_nodes"][input1_idx]
sigmoid_node = onnx.helper.make_node(
"Sigmoid",
[input1.name],
[name],
name=name
)
return [sigmoid_node]
@mx_op.register("BlockGrad")
def convert_blockgrad(node, **kwargs):
""" Skip operator """
return create_basic_op_node('ConstantFill', node, kwargs)
@mx_op.register("MakeLoss")
def convert_makeloss(node, **kwargs):
""" Skip operator """
return create_basic_op_node('ConstantFill', node, kwargs)
@mx_op.register("Concat")
def convert_concat(node, **kwargs):
"""Map MXNet's Concat operator attributes to onnx's Concat operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
axis = int(attrs.get("dim", 1))
concat_node = onnx.helper.make_node(
"Concat",
input_nodes,
[name],
axis=axis,
name=name
)
return [concat_node]
@mx_op.register("transpose")
def convert_transpose(node, **kwargs):
"""Map MXNet's transpose operator attributes to onnx's Transpose operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
axes = attrs.get("axes", ())
if axes:
axes = tuple(map(int, re.findall(r'\d+', axes)))
transpose_node = onnx.helper.make_node(
"Transpose",
input_nodes,
[name],
perm=axes,
name=name
)
else:
transpose_node = onnx.helper.make_node(
"Transpose",
input_nodes,
[name],
name=name
)
return [transpose_node]
@mx_op.register("LRN")
def convert_lrn(node, **kwargs):
"""Map MXNet's LRN operator attributes to onnx's LRN operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
alpha = float(attrs.get("alpha", 0.0001))
beta = float(attrs.get("beta", 0.75))
bias = float(attrs.get("knorm", 1.0))
size = int(attrs.get("nsize"))
lrn_node = onnx.helper.make_node(
"LRN",
inputs=input_nodes,
outputs=[name],
name=name,
alpha=alpha,
beta=beta,
bias=bias,
size=size
)
return [lrn_node]
@mx_op.register("L2Normalization")
def convert_l2normalization(node, **kwargs):
"""Map MXNet's L2Normalization operator attributes to onnx's LpNormalization operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
mode = attrs.get("mode", "instance")
if mode != "channel":
raise AttributeError("L2Normalization: ONNX currently supports channel mode only")
l2norm_node = onnx.helper.make_node(
"LpNormalization",
input_nodes,
[name],
axis=1, # channel only
name=name
)
return [l2norm_node]
@mx_op.register("Dropout")
def convert_dropout(node, **kwargs):
"""Map MXNet's Dropout operator attributes to onnx's Dropout operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
probability = float(attrs.get("p", 0.5))
dropout_node = onnx.helper.make_node(
"Dropout",
input_nodes,
[name],
ratio=probability,
name=name
)
return [dropout_node]
@mx_op.register("Flatten")
def convert_flatten(node, **kwargs):
"""Map MXNet's Flatten operator attributes to onnx's Flatten operator
and return the created node.
"""
return create_basic_op_node('Flatten', node, kwargs)
@mx_op.register("clip")
def convert_clip(node, **kwargs):
"""Map MXNet's Clip operator attributes to onnx's Clip operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
a_min = np.float(attrs.get('a_min', -np.inf))
a_max = np.float(attrs.get('a_max', np.inf))
clip_node = onnx.helper.make_node(
"Clip",
input_nodes,
[name],
name=name,
min=a_min,
max=a_max
)
return [clip_node]
def scalar_op_helper(node, op_name, **kwargs):
"""Helper function for scalar arithmetic operations"""
name, input_nodes, attrs = get_inputs(node, kwargs)
from onnx import numpy_helper
input_type = kwargs["in_type"]
scalar_value = np.array([attrs.get("scalar", 1)],
dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[input_type])
initializer = kwargs["initializer"]
flag = True
# If the input value is in initializer, just multiply with scalar input
# and create a new initializer
for i in initializer:
if i.name == input_nodes[0]:
if op_name == 'Mul':
new_initializer = numpy_helper.to_array(i) * scalar_value[0]
elif op_name == 'Sub':
if name.startswith("_rminusscalar"):
new_initializer = scalar_value[0] - numpy_helper.to_array(i)
else:
new_initializer = numpy_helper.to_array(i) - scalar_value[0]
elif op_name == 'Add':
new_initializer = numpy_helper.to_array(i) + scalar_value[0]
elif op_name == 'Div':
if name.startswith("_rdivscalar"):
new_initializer = scalar_value[0] / numpy_helper.to_array(i)
else:
new_initializer = numpy_helper.to_array(i) / scalar_value[0]
elif op_name == 'Pow':
new_initializer = numpy_helper.to_array(i) ** scalar_value[0]
flag = False
break
# else create a new tensor of the scalar value, add it in initializer
if flag is True:
dims = np.shape(scalar_value)
scalar_op_name = "scalar_op" + str(kwargs["idx"])
tensor_node = onnx.helper.make_tensor_value_info(scalar_op_name, input_type, dims)
initializer.append(
onnx.helper.make_tensor(
name=scalar_op_name,
data_type=input_type,
dims=dims,
vals=scalar_value,
raw=False,
)
)
mul_node = onnx.helper.make_node(
op_name,
[input_nodes[0], scalar_op_name],
[name],
name=name
)
return [tensor_node, mul_node]
else:
data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[new_initializer.dtype]
dims = np.shape(new_initializer)
new_a_node = input_nodes[0] + str(kwargs["idx"])
tensor_node = onnx.helper.make_tensor_value_info(new_a_node, data_type, dims)
initializer.append(
onnx.helper.make_tensor(
name=new_a_node,
data_type=data_type,
dims=dims,
vals=new_initializer,
raw=False,
)
)
return [tensor_node]
# Convert scalar value into node and pass it as input to mul_node
@mx_op.register("_mul_scalar")
def convert_mul_scalar(node, **kwargs):
"""Map MXNet's _mul_scalar operator attributes to onnx's Mul operator.
Creates a new node for the input scalar value, adds it to the initializer
and return multiple created nodes.
"""
return scalar_op_helper(node, 'Mul', **kwargs)
# Convert scalar value into node and pass it as input to mul_node
@mx_op.register("_minus_scalar")
def convert_minus_scalar(node, **kwargs):
"""Map MXNet's _minus_scalar operator attributes to onnx's Minus operator.
Creates a new node for the input scalar value, adds it to the initializer
and return multiple created nodes.
"""
return scalar_op_helper(node, 'Sub', **kwargs)
@mx_op.register("_rminus_scalar")
def convert_rminus_scalar(node, **kwargs):
"""Map MXNet's _rminus_scalar operator attributes to onnx's Sub operator.
Creates a new node for the input scalar value, adds it to the initializer
and return multiple created nodes.
"""
return scalar_op_helper(node, 'Sub', **kwargs)
# Convert scalar value into node and pass it as input to mul_node
@mx_op.register("_plus_scalar")
def convert_add_scalar(node, **kwargs):
"""Map MXNet's _plus_scalar operator attributes to onnx's Add operator.
Creates a new node for the input scalar value, adds it to the initializer
and return multiple created nodes.
"""
return scalar_op_helper(node, 'Add', **kwargs)
# Convert scalar value into node and pass it as input to mul_node
@mx_op.register("_div_scalar")
def convert_div_scalar(node, **kwargs):
"""Map MXNet's _div_scalar operator attributes to onnx's Div operator.
Creates a new node for the input scalar value, adds it to the initializer
and return multiple created nodes.
"""
return scalar_op_helper(node, 'Div', **kwargs)
@mx_op.register("_rdiv_scalar")
def convert_rdiv_scalar(node, **kwargs):
"""Map MXNet's _rdiv_scalar operator attributes to onnx's Div operator.
Creates a new node for the input scalar value, adds it to the initializer
and return multiple created nodes.
"""
return scalar_op_helper(node, 'Div', **kwargs)
@mx_op.register("_power_scalar")
def convert_pow_scalar(node, **kwargs):
"""Map MXNet's _pow_scalar operator attributes to onnx's Pow operator.
Creates a new node for the input scalar value, adds it to the initializer
and return multiple created nodes.
"""
return scalar_op_helper(node, 'Pow', **kwargs)
# Sorting and Searching
@mx_op.register("argmax")
def convert_argmax(node, **kwargs):
"""Map MXNet's argmax operator attributes to onnx's ArgMax operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
axis = int(attrs.get("axis"))
keepdims = get_boolean_attribute_value(attrs, "keepdims")
node = onnx.helper.make_node(
'ArgMax',
inputs=input_nodes,
axis=axis,
keepdims=keepdims,
outputs=[name],
name=name
)
return [node]
@mx_op.register("argmin")
def convert_argmin(node, **kwargs):
"""Map MXNet's argmin operator attributes to onnx's ArgMin operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
axis = int(attrs.get("axis"))
keepdims = get_boolean_attribute_value(attrs, "keepdims")
node = onnx.helper.make_node(
'ArgMin',
inputs=input_nodes,
axis=axis,
keepdims=keepdims,
outputs=[name],
name=name
)
return [node]
@mx_op.register("_maximum")
def convert_maximum(node, **kwargs):
"""Map MXNet's _maximum operator attributes to onnx's Max operator
and return the created node.
"""
return create_basic_op_node('Max', node, kwargs)
@mx_op.register("_minimum")
def convert_minimum(node, **kwargs):
"""Map MXNet's _minimum operator attributes to onnx's Min operator
and return the created node.
"""
return create_basic_op_node('Min', node, kwargs)
@mx_op.register("min")
def convert_min(node, **kwargs):
"""Map MXNet's min operator attributes to onnx's ReduceMin operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
mx_axis = attrs.get("axis", None)
axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None
keepdims = get_boolean_attribute_value(attrs, "keepdims")
if axes is not None:
node = onnx.helper.make_node(
'ReduceMin',
inputs=input_nodes,
outputs=[name],
axes=axes,
keepdims=keepdims,
name=name
)
return [node]
else:
node = onnx.helper.make_node(
'ReduceMin',
inputs=input_nodes,
outputs=[name],
keepdims=keepdims,
name=name
)
return [node]
@mx_op.register("max")
def convert_max(node, **kwargs):
"""Map MXNet's max operator attributes to onnx's ReduceMax operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
mx_axis = attrs.get("axis", None)
axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None
keepdims = get_boolean_attribute_value(attrs, "keepdims")
if axes is not None:
node = onnx.helper.make_node(
'ReduceMax',
inputs=input_nodes,
outputs=[name],
axes=axes,
keepdims=keepdims,
name=name
)
return [node]
else:
node = onnx.helper.make_node(
'ReduceMax',
inputs=input_nodes,
outputs=[name],
keepdims=keepdims,
name=name
)
return [node]
@mx_op.register("mean")
def convert_mean(node, **kwargs):
"""Map MXNet's mean operator attributes to onnx's ReduceMean operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
mx_axis = attrs.get("axis", None)
axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None
keepdims = get_boolean_attribute_value(attrs, "keepdims")
if axes is not None:
node = onnx.helper.make_node(
'ReduceMean',
inputs=input_nodes,
outputs=[name],
axes=axes,
keepdims=keepdims,
name=name
)
return [node]
else:
node = onnx.helper.make_node(
'ReduceMean',
inputs=input_nodes,
outputs=[name],
keepdims=keepdims,
name=name
)
return [node]
@mx_op.register("prod")
def convert_prod(node, **kwargs):
"""Map MXNet's prod operator attributes to onnx's ReduceProd operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
mx_axis = attrs.get("axis", None)
axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None
keepdims = get_boolean_attribute_value(attrs, "keepdims")
if axes is not None:
node = onnx.helper.make_node(
'ReduceProd',
inputs=input_nodes,
outputs=[name],
axes=axes,
keepdims=keepdims,
name=name
)
return [node]
else:
node = onnx.helper.make_node(
'ReduceProd',
inputs=input_nodes,
outputs=[name],
keepdims=keepdims,
name=name
)
return [node]
# Arithmetic Operations
@mx_op.register("elemwise_add")
def convert_elementwise_add(node, **kwargs):
"""Map MXNet's elemwise_add operator attributes to onnx's Add operator
and return the created node.
"""
return create_basic_op_node('Add', node, kwargs)
@mx_op.register("broadcast_add")
def covert_broadcast_add(node, **kwargs):
"""Map MXNet's broadcast_add operator attributes to onnx's Add operator
and return the created node.
"""
return create_basic_op_node('Add', node, kwargs)
@mx_op.register("elemwise_sub")
def convert_elementwise_sub(node, **kwargs):
"""Map MXNet's elemwise_sub operator attributes to onnx's Sub operator
and return the created node.
"""
return create_basic_op_node('Sub', node, kwargs)
@mx_op.register("broadcast_sub")
def covert_broadcast_sub(node, **kwargs):
"""Map MXNet's broadcast_sub operator attributes to onnx's Sub operator
and return the created node.
"""
return create_basic_op_node('Sub', node, kwargs)
@mx_op.register("elemwise_mul")
def convert_elemwise_mul(node, **kwargs):
"""Map MXNet's elemwise_mul operator attributes to onnx's Mul operator
and return the created node.
"""
return create_basic_op_node('Mul', node, kwargs)
@mx_op.register("broadcast_mul")
def convert_broadcast_mul(node, **kwargs):
"""Map MXNet's broadcast_mul operator attributes to onnx's Mul operator
and return the created node.
"""
return create_basic_op_node('Mul', node, kwargs)
@mx_op.register("elemwise_div")
def convert_elemwise_div(node, **kwargs):
"""Map MXNet's elemwise_div operator attributes to onnx's Div operator
and return the created node.
"""
return create_basic_op_node('Div', node, kwargs)
@mx_op.register("broadcast_div")
def convert_broadcast_div(node, **kwargs):
"""Map MXNet's broadcast_div operator attributes to onnx's Div operator
and return the created node.
"""
return create_basic_op_node('Div', node, kwargs)
@mx_op.register("negative")
def convert_negative(node, **kwargs):
"""Map MXNet's negative operator attributes to onnx's Neg operator
and return the created node.
"""
return create_basic_op_node('Neg', node, kwargs)
@mx_op.register("abs")
def convert_abs(node, **kwargs):
"""Map MXNet's abs operator attributes to onnx's Abs operator
and return the created node.
"""
return create_basic_op_node('Abs', node, kwargs)
@mx_op.register("add_n")
def convert_addn(node, **kwargs):
"""Map MXNet's add_n operator attributes to onnx's Sum operator
and return the created node.
"""
return create_basic_op_node('Sum', node, kwargs)
# Rounding
@mx_op.register("ceil")
def convert_ceil(node, **kwargs):
"""Map MXNet's ceil operator attributes to onnx's Ceil operator
and return the created node.
"""
return create_basic_op_node('Ceil', node, kwargs)
@mx_op.register("floor")
def convert_floor(node, **kwargs):
"""Map MXNet's floor operator attributes to onnx's Floor operator
and return the created node.
"""
return create_basic_op_node('Floor', node, kwargs)
# Changing shape and type.
@mx_op.register("Reshape")
def convert_reshape(node, **kwargs):
"""Map MXNet's Reshape operator attributes to onnx's Reshape operator.
Converts output shape attribute to output shape tensor
and return multiple created nodes.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
output_shape_list = convert_string_to_list(attrs["shape"])
initializer = kwargs["initializer"]
output_shape_np = np.array(output_shape_list, dtype='int64')
data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[output_shape_np.dtype]
dims = np.shape(output_shape_np)
output_shape_name = "reshape_attr_tensor" + str(kwargs["idx"])
tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims)
initializer.append(
onnx.helper.make_tensor(
name=output_shape_name,
data_type=data_type,
dims=dims,
vals=output_shape_list,
raw=False,
)
)
input_nodes.append(output_shape_name)
not_supported_shape = [-2, -3, -4]
for val in output_shape_list:
if val in not_supported_shape:
raise AttributeError("Reshape: Shape value not supported in ONNX", val)
reshape_node = onnx.helper.make_node(
"Reshape",
input_nodes,
[name],
name=name
)
return [tensor_node, reshape_node]
@mx_op.register("Cast")
def convert_cast(node, **kwargs):
"""Map MXNet's Cast operator attributes to onnx's Cast operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
dtype = attrs["dtype"]
# dtype can be mapped only with types from TensorProto
# float32 is mapped to float and float64 to double in onnx
# following tensorproto mapping https://github.com/onnx/onnx/blob/master/onnx/mapping.py
if dtype == 'float32':
dtype = 'float'
elif dtype == 'float64':
dtype = 'double'
node = onnx.helper.make_node(
"Cast",
input_nodes,
[name],
to=getattr(onnx.TensorProto, dtype.upper()),
name=name,
)
return [node]
@mx_op.register("slice_axis")
def convert_slice_axis(node, **kwargs):
"""Map MXNet's slice_axis operator attributes to onnx's Slice operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
axes = int(attrs.get("axis"))
starts = int(attrs.get("begin"))
ends = int(attrs.get("end", None))
if not ends:
raise ValueError("Slice: ONNX doesnt't support 'None' in 'end' attribute")
node = onnx.helper.make_node(
"Slice",
input_nodes,
[name],
axes=[axes],
starts=[starts],
ends=[ends],
name=name,
)
return [node]
@mx_op.register("SliceChannel")
def convert_slice_channel(node, **kwargs):
"""Map MXNet's SliceChannel operator attributes to onnx's Squeeze or Split
operator based on squeeze_axis attribute
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
num_outputs = int(attrs.get("num_outputs"))
axis = int(attrs.get("axis", 1))
squeeze_axis = int(attrs.get("squeeze_axis", 0))
if squeeze_axis == 1 and num_outputs == 1:
node = onnx.helper.make_node(
"Squeeze",
input_nodes,
[name],
axes=[axis],
name=name,
)
return [node]
elif squeeze_axis == 0 and num_outputs > 1:
node = onnx.helper.make_node(
"Split",
input_nodes,
[name],
axis=axis,
split=[num_outputs],
name=name,
)
return [node]
else:
raise NotImplementedError("SliceChannel operator with num_outputs>1 and"
"squeeze_axis true is not implemented.")
@mx_op.register("expand_dims")
def convert_expand_dims(node, **kwargs):
"""Map MXNet's expand_dims operator attributes to onnx's Unsqueeze operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
axis = int(attrs.get("axis"))
node = onnx.helper.make_node(
"Unsqueeze",
input_nodes,
[name],
axes=[axis],
name=name,
)
return [node]
@mx_op.register("squeeze")
def convert_squeeze(node, **kwargs):
"""Map MXNet's squeeze operator attributes to onnx's squeeze operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
axis = attrs.get("axis", None)
if not axis:
raise AttributeError("Squeeze: Missing axis attribute: ONNX currently requires axis to "
"be specified for squeeze operator")
axis = convert_string_to_list(axis)
node = onnx.helper.make_node(
"Squeeze",
input_nodes,
[name],
axes=axis,
name=name,
)
return [node]
@mx_op.register("log")
def convert_log(node, **kwargs):
"""Map MXNet's log operator attributes to onnx's Log operator
and return the created node.
"""
return create_basic_op_node('Log', node, kwargs)
@mx_op.register("reciprocal")
def convert_reciprocal(node, **kwargs):
"""Map MXNet's reciprocal operator attributes to onnx's Reciprocal operator
and return the created node.
"""
return create_basic_op_node('Reciprocal', node, kwargs)
@mx_op.register("_power")
def convert_power(node, **kwargs):
"""Map MXNet's _power operator attributes to onnx's Pow operator
and return the created node.
"""
return create_basic_op_node('Pow', node, kwargs)
@mx_op.register("broadcast_power")
def convert_broadcast_power(node, **kwargs):
"""Map MXNet's _power operator attributes to onnx's Pow operator
and return the created node.
"""
return create_basic_op_node('Pow', node, kwargs)
@mx_op.register("sqrt")
def convert_sqrt(node, **kwargs):
"""Map MXNet's sqrt operator attributes to onnx's Sqrt operator
and return the created node.
"""
return create_basic_op_node('Sqrt', node, kwargs)
@mx_op.register("depth_to_space")
def convert_depthtospace(node, **kwargs):
"""Map MXNet's depth_to_space operator attributes to onnx's
DepthToSpace operator and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
blksize = int(attrs.get("block_size", 0))
node = onnx.helper.make_node(
"DepthToSpace",
input_nodes,
[name],
blocksize=blksize,
name=name,
)
return [node]
@mx_op.register("space_to_depth")
def convert_spacetodepth(node, **kwargs):
"""Map MXNet's space_to_depth operator attributes to onnx's
SpaceToDepth operator and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
blksize = int(attrs.get("block_size", 0))
node = onnx.helper.make_node(
"SpaceToDepth",
input_nodes,
[name],
blocksize=blksize,
name=name,
)
return [node]
@mx_op.register("square")
def convert_square(node, **kwargs):
"""Map MXNet's square operator attributes to onnx's Pow operator
and return the created node.
"""
name, input_nodes, _ = get_inputs(node, kwargs)
initializer = kwargs["initializer"]
data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('int64')]
power2_name = "square_tensor" + str(kwargs["idx"])
tensor_node = onnx.helper.make_tensor_value_info(power2_name, data_type, (1,))
initializer.append(
onnx.helper.make_tensor(
name=power2_name,
data_type=data_type,
dims=(1,),
vals=[2],
raw=False,
)
)
input_nodes.append(power2_name)
node = onnx.helper.make_node(
"Pow",
input_nodes,
[name],
name=name
)
return [tensor_node, node]
@mx_op.register("sum")
def convert_sum(node, **kwargs):
"""Map MXNet's sum operator attributes to onnx's ReduceSum operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
mx_axis = attrs.get("axis", None)
axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None
keepdims = get_boolean_attribute_value(attrs, "keepdims")
if axes:
node = onnx.helper.make_node(
'ReduceSum',
inputs=input_nodes,
outputs=[name],
axes=axes,
keepdims=keepdims,
name=name
)
else:
node = onnx.helper.make_node(
'ReduceSum',
inputs=input_nodes,
outputs=[name],
keepdims=keepdims,
name=name
)
return [node]
@mx_op.register("shape_array")
def convert_shape(node, **kwargs):
"""Map MXNet's shape_array operator attributes to onnx's Shape operator
and return the created node.
"""
return create_basic_op_node('Shape', node, kwargs)
@mx_op.register("hard_sigmoid")
def convert_hardsigmoid(node, **kwargs):
"""Map MXNet's hard_sigmoid operator attributes to onnx's HardSigmoid operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
# Converting to float32
alpha = float(attrs.get("alpha", 0.2))
beta = float(attrs.get("beta", 0.5))
node = onnx.helper.make_node(
'HardSigmoid',
input_nodes,
[name],
alpha=alpha,
beta=beta,
name=name
)
return [node]
@mx_op.register("broadcast_lesser")
def convert_broadcast_lesser(node, **kwargs):
"""Map MXNet's broadcast_lesser operator attributes to onnx's Less operator
and return the created node.
"""
return create_basic_op_node('Less', node, kwargs)
@mx_op.register("broadcast_greater")
def convert_broadcast_greater(node, **kwargs):
"""Map MXNet's broadcast_greater operator attributes to onnx's Greater operator
and return the created node.
"""
return create_basic_op_node('Greater', node, kwargs)
@mx_op.register("broadcast_equal")
def convert_broadcast_equal(node, **kwargs):
"""Map MXNet's broadcast_equal operator attributes to onnx's Equal operator
and return the created node.
"""
return create_basic_op_node('Equal', node, kwargs)
@mx_op.register("broadcast_logical_and")
def convert_broadcast_logical_and(node, **kwargs):
"""Map MXNet's broadcast logical and operator attributes to onnx's Add operator
and return the created node.
"""
return create_basic_op_node('And', node, kwargs)
@mx_op.register("broadcast_logical_or")
def convert_broadcast_logical_or(node, **kwargs):
"""Map MXNet's broadcast logical or operator attributes to onnx's Or operator
and return the created node.
"""
return create_basic_op_node('Or', node, kwargs)
@mx_op.register("broadcast_logical_xor")
def convert_broadcast_logical_xor(node, **kwargs):
"""Map MXNet's broadcast logical xor operator attributes to onnx's Xor operator
and return the created node.
"""
return create_basic_op_node('Xor', node, kwargs)
@mx_op.register("logical_not")
def convert_logical_not(node, **kwargs):
"""Map MXNet's logical not operator attributes to onnx's Not operator
and return the created node.
"""
return create_basic_op_node('Not', node, kwargs)
@mx_op.register("size_array")
def convert_size(node, **kwargs):
"""Map MXNet's size_array operator attributes to onnx's Size operator
and return the created node.
"""
return create_basic_op_node('Size', node, kwargs)
@mx_op.register("log_softmax")
def convert_logsoftmax(node, **kwargs):
"""Map MXNet's log_softmax operator attributes to onnx's LogSoftMax operator
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
# Converting to int
axis = int(attrs.get("axis", -1))
temp = attrs.get("temperature", 'None')
if temp != 'None':
raise AttributeError("LogSoftMax: ONNX supports only temperature=None")
node = onnx.helper.make_node(
'LogSoftmax',
input_nodes,
[name],
axis=axis,
name=name
)
return [node]
@mx_op.register("norm")
def convert_norm(node, **kwargs):
"""Map MXNet's norm operator attributes to onnx's ReduceL1 and ReduceL2 operators
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
mx_axis = attrs.get("axis", None)
axes = convert_string_to_list(str(mx_axis)) if mx_axis else None
keepdims = get_boolean_attribute_value(attrs, "keepdims")
ord = int(attrs.get("ord", 2))
onnx_op_name = "ReduceL1" if ord == 1 else "ReduceL2"
if axes:
reduce_node = onnx.helper.make_node(
onnx_op_name,
input_nodes,
[name],
axes=axes,
keepdims=keepdims,
name=name
)
return [reduce_node]
else:
reduce_node = onnx.helper.make_node(
onnx_op_name,
input_nodes,
[name],
keepdims=keepdims,
name=name
)
return [reduce_node]
@mx_op.register("_sample_multinomial")
def convert_multinomial(node, **kwargs):
"""Map MXNet's multinomial operator attributes to onnx's
Multinomial operator and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get("dtype", 'int32'))]
sample_size = convert_string_to_list(attrs.get("shape", '1'))
if len(sample_size) < 2:
sample_size = sample_size[-1]
else:
raise AttributeError("ONNX currently supports integer sample_size only")
node = onnx.helper.make_node(
"Multinomial",
input_nodes,
[name],
dtype=dtype,
sample_size=sample_size,
name=name,
)
return [node]
@mx_op.register("_random_uniform")
def convert_random_uniform(node, **kwargs):
"""Map MXNet's random_uniform operator attributes to onnx's RandomUniform
operator and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
# Converting to float32
low = float(attrs.get("low", 0))
high = float(attrs.get("high", 1.0))
shape = convert_string_to_list(attrs.get('shape', '[]'))
dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get('dtype', 'float32'))]
node = onnx.helper.make_node(
'RandomUniform',
input_nodes,
[name],
low=low,
high=high,
dtype=dtype,
shape=shape,
name=name
)
return [node]
@mx_op.register("_random_normal")
def convert_random_normal(node, **kwargs):
"""Map MXNet's random_normal operator attributes to onnx's RandomNormal
operator and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
# Converting to float32
mean = float(attrs.get("loc", 0))
scale = float(attrs.get("scale", 1.0))
shape = convert_string_to_list(attrs.get('shape', '[]'))
dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get('dtype', 'float32'))]
node = onnx.helper.make_node(
'RandomNormal',
input_nodes,
[name],
mean=mean,
scale=scale,
dtype=dtype,
shape=shape,
name=name
)
return [node]
@mx_op.register("ROIPooling")
def convert_roipooling(node, **kwargs):
"""Map MXNet's ROIPooling operator attributes to onnx's MaxRoiPool
operator and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
pooled_shape = convert_string_to_list(attrs.get('pooled_size'))
scale = float(attrs.get("spatial_scale"))
node = onnx.helper.make_node(
'MaxRoiPool',
input_nodes,
[name],
pooled_shape=pooled_shape,
spatial_scale=scale,
name=name
)
return [node]
| 30.157592
| 99
| 0.638235
|
685e145e0515a89bbf1c9763fb3060dae457969c
| 8,078
|
py
|
Python
|
Code/PreProcessing/Task-C/PreProcessing_User_Posts_Test_TaskC.py
|
AshwinAmbal/CLPsych_Challenge_2019
|
cc20d3458d2e52717ca93c47268d2bcfabb0b2f7
|
[
"Apache-2.0"
] | null | null | null |
Code/PreProcessing/Task-C/PreProcessing_User_Posts_Test_TaskC.py
|
AshwinAmbal/CLPsych_Challenge_2019
|
cc20d3458d2e52717ca93c47268d2bcfabb0b2f7
|
[
"Apache-2.0"
] | null | null | null |
Code/PreProcessing/Task-C/PreProcessing_User_Posts_Test_TaskC.py
|
AshwinAmbal/CLPsych_Challenge_2019
|
cc20d3458d2e52717ca93c47268d2bcfabb0b2f7
|
[
"Apache-2.0"
] | null | null | null |
import csv
from datetime import datetime
from nltk.corpus import stopwords
import nltk
import re
import unidecode
import json
#import random
from collections import defaultdict
from nltk.sentiment.vader import SentimentIntensityAnalyzer
csv.field_size_limit(100000000)
vader = SentimentIntensityAnalyzer()
with open('C:\\CLPsych Challenge\\Dataset\\PreProcessing\\word_contractions_expansion.json') as f:
cList = json.load(f)
c_re = re.compile('(%s)' % '|'.join(cList.keys()))
sw = stopwords.words("english")
extra_stop_words = ["cannot", "could", "would", "us", "may", "might", "need", "ought", "shall", "alls", "n't", "'s", "'ve", "'t", "'m", "'d", "'ll", "t"]
sw.extend(extra_stop_words)
#sw = []
def expandContractions(text, c_re=c_re):
def replace(match):
return cList[match.group(0)]
return c_re.sub(replace, text)
def humanize_unixtime(unix_time):
time = datetime.fromtimestamp(int(unix_time)).strftime('%d-%m-%Y %H.%M')
return time
def word_cleaner(word):
word = unidecode.unidecode(word)
if(word.lower() in sw):
word = " "
word = word.replace("_PERSON_", " ")
word = word.replace("_IP_", " ")
word = word.replace("_EMAIL_", " ")
word = word.replace("_URL_", " ")
word = word.replace("tldr", " ")
word = word.replace("<", " ")
# word = word.replace(".", " ")
p = re.compile('([A-Za-z]+)[.]')
word = p.sub(r'\1 ', word)
p = re.compile('[.]([A-Za-z]+)')
word = p.sub(r' \1', word)
word = word.replace("!", " ")
word = word.replace(",", " ")
word = word.replace("/", " ")
word = word.replace("~", " ")
# word = word.replace("-", " ")
word = word.replace("--", " ")
word = word.replace("-", " ")
word = word.replace("(", " ")
word = word.replace(")", " ")
word = word.replace("#", " ")
word = word.replace("?", " ")
word = word.replace("..", " ")
word = word.replace("...", " ")
word = word.replace("’", " ")
word = word.replace(":", " ")
word = word.replace("[", " ")
word = word.replace("]", " ")
word = word.replace("*", " ")
word = word.replace("\"", " ")
word = word.replace("&", " ")
word = word.replace("{", " ")
word = word.replace("}", " ")
word = word.replace("@", " ")
word = word.replace("↑", " ")
word = word.replace("$", " ")
word = word.replace("^", " ")
word = word.replace("\n", " ")
word = word.replace("\t", " ")
word = word.replace("\r", " ")
word = word.replace("`", " ")
word = word.replace("'", " ")
word = word.replace(";", " ")
#if(word == "." or word == " ." or word == " . " or word == ". "):
if(len(word) == 1 or word == "." or word == " ." or word == " . " or word == ". "):
word = " "
return word
path ="C:\\CLPsych Challenge\\Dataset\\clpsych19_test_data\\combined_data_Task_C_Test.csv"
all_data = dict()
file = open(path, 'r', encoding = 'utf8')
reader_data = csv.reader(file)
for i, row in enumerate(reader_data):
if(i == 0):
continue
all_data[(row[0], row[1])] = row
#train_user_label_path ="C:\\CLPsych Challenge\\Dataset\\clpsych19_training_data\\trainUserIds_TaskA_Final.csv"
#file =open(train_user_label_path, 'r', encoding = 'utf8')
#reader_train = csv.reader(file, delimiter=',')
#train_user_id_label = dict()
#for row in reader_train:
# train_user_id_label[row[0]] = row[1]
#test_user_label_path ="C:\\CLPsych Challenge\\Dataset\\clpsych19_training_data\\testUserIds_TaskA_Final.csv"
#file =open(test_user_label_path, 'r', encoding = 'utf8')
#reader_test = csv.reader(file, delimiter=',')
#test_user_id_label = dict()
#for row in reader_test:
# test_user_id_label[row[0]] = row[1]
taskA_path ="C:\\CLPsych Challenge\\Dataset\\clpsych19_test_data\\task_C_test.posts.csv"
#all_train_posts_of_users_combined = list()
#all_train_posts_of_users_combined.append(["User ID", "Post", "Label"])
all_test_posts_of_users_combined = list()
all_test_posts_of_users_combined.append(["User ID", "Post"])
file =open(taskA_path, 'r', encoding = 'utf8')
reader_user = csv.reader(file, delimiter=',')
taskA_user_posts = defaultdict(list)
for i, row in enumerate(reader_user):
if(i == 0):
continue
taskA_user_posts[row[1]].append(row[0])
for user in taskA_user_posts:
user_posts = list()
for row in taskA_user_posts[user]:
user_posts.append(all_data[(row, user)])
posts_sorted_by_date = sorted(user_posts, key=lambda x : x[3], reverse=True)
# for row in sorted_by_date:
# row[2] = humanize_unixtime(row[2])
# sorted_by_date
user_post_combined = ""
for i, post in enumerate(posts_sorted_by_date):
user_id = post[1]
subreddit_name = post[2]
subreddit_name = expandContractions(subreddit_name)
subreddit_name =' '.join(subreddit_name.split('\t'))
subreddit_name ='.'.join(subreddit_name.split('\n'))
subreddit_name =' '.join(subreddit_name.split('|'))
subreddit_name =' '.join(subreddit_name.split('\r'))
post[4] = expandContractions(post[4])
post[4] =' '.join(post[4].split('\t'))
post[4] ='.'.join(post[4].split('\n'))
post[4] =' '.join(post[4].split('|'))
post[4] =' '.join(post[4].split('\r'))
post[5] = expandContractions(post[5])
post[5] =' '.join(post[5].split('\t'))
post[5] ='.'.join(post[5].split('\n'))
post[5] =' '.join(post[5].split('|'))
post[5] =' '.join(post[5].split('\r'))
#user_post_title = nltk.sent_tokenize(post[4])
#user_post = nltk.sent_tokenize(post[5])
#final_post_title_sentiment = ""
#final_post_sentiment = ""
#for sent in user_post_title:
# mydict = vader.polarity_scores(sent)
# if(mydict['compound'] <= -0.05 or mydict['compound'] >= 0.05):
# final_post_title_sentiment += sent
#for sent in user_post:
# mydict = vader.polarity_scores(sent)
# if(mydict['compound'] <= -0.05 or mydict['compound'] >= 0.05):
# final_post_sentiment += sent
word_tokenized_subreddit = nltk.word_tokenize(subreddit_name)
word_tokenized_title = nltk.word_tokenize(post[4])
word_tokenized_post = nltk.word_tokenize(post[5])
#word_tokenized_title = nltk.word_tokenize(final_post_title_sentiment)
#word_tokenized_post = nltk.word_tokenize(final_post_sentiment)
for word in word_tokenized_subreddit:
user_post_combined += word_cleaner(word) + " "
for word in word_tokenized_title:
user_post_combined += word_cleaner(word) + " "
for word in word_tokenized_post:
user_post_combined += word_cleaner(word) + " "
user_post_combined = re.sub(' +', ' ',user_post_combined)
#user_post_combined = ' '.join(user_post_combined.split(' '))
user_post_combined = user_post_combined.strip()
user_post_combined = user_post_combined.lower()
#print(user_post_combined)
#print("\n\n\n")
#label = random.randint(0,1)
#if user in train_user_id_label:
# label = train_user_id_label[user]
# all_train_posts_of_users_combined.append([user_id, user_post_combined, label])
#else:
# label = test_user_id_label[user]
# all_test_posts_of_users_combined.append([user_id, user_post_combined, label])
all_test_posts_of_users_combined.append([user_id, user_post_combined])
#with open("C:\\CLPsych Challenge\\Dataset\\PreProcessing\\Non-PreProcessed-Data\\User_Posts_Processed_Test_Final.tsv",'w', encoding = 'utf8', newline='') as outcsv:
with open("C:\\CLPsych Challenge\\Dataset\\PreProcessing\\Task C-BERT_FINE_TUNING\\OverSampled-Train-Test-Data\\Full_Test_Data.tsv",'w', encoding = 'utf8', newline='') as outcsv:
writer = csv.writer(outcsv, delimiter='\t', quotechar = '"')
for row in all_test_posts_of_users_combined:
writer.writerow(row)
| 38.28436
| 181
| 0.613518
|
10e33b8ea6110bc2372d883bd75b1aa3ebbf1ffd
| 39,640
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20180701/express_route_circuit_peering.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20180701/express_route_circuit_peering.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20180701/express_route_circuit_peering.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ExpressRouteCircuitPeeringInitArgs', 'ExpressRouteCircuitPeering']
@pulumi.input_type
class ExpressRouteCircuitPeeringInitArgs:
def __init__(__self__, *,
circuit_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
azure_asn: Optional[pulumi.Input[int]] = None,
connections: Optional[pulumi.Input[Sequence[pulumi.Input['ExpressRouteCircuitConnectionArgs']]]] = None,
gateway_manager_etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
ipv6_peering_config: Optional[pulumi.Input['Ipv6ExpressRouteCircuitPeeringConfigArgs']] = None,
last_modified_by: Optional[pulumi.Input[str]] = None,
microsoft_peering_config: Optional[pulumi.Input['ExpressRouteCircuitPeeringConfigArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
peer_asn: Optional[pulumi.Input[float]] = None,
peering_name: Optional[pulumi.Input[str]] = None,
peering_type: Optional[pulumi.Input[Union[str, 'ExpressRoutePeeringType']]] = None,
primary_azure_port: Optional[pulumi.Input[str]] = None,
primary_peer_address_prefix: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
route_filter: Optional[pulumi.Input['RouteFilterArgs']] = None,
secondary_azure_port: Optional[pulumi.Input[str]] = None,
secondary_peer_address_prefix: Optional[pulumi.Input[str]] = None,
shared_key: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[Union[str, 'ExpressRoutePeeringState']]] = None,
stats: Optional[pulumi.Input['ExpressRouteCircuitStatsArgs']] = None,
vlan_id: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a ExpressRouteCircuitPeering resource.
:param pulumi.Input[str] circuit_name: The name of the express route circuit.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[int] azure_asn: The Azure ASN.
:param pulumi.Input[Sequence[pulumi.Input['ExpressRouteCircuitConnectionArgs']]] connections: The list of circuit connections associated with Azure Private Peering for this circuit.
:param pulumi.Input[str] gateway_manager_etag: The GatewayManager Etag.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input['Ipv6ExpressRouteCircuitPeeringConfigArgs'] ipv6_peering_config: The IPv6 peering configuration.
:param pulumi.Input[str] last_modified_by: Gets whether the provider or the customer last modified the peering.
:param pulumi.Input['ExpressRouteCircuitPeeringConfigArgs'] microsoft_peering_config: The Microsoft peering configuration.
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[float] peer_asn: The peer ASN.
:param pulumi.Input[str] peering_name: The name of the peering.
:param pulumi.Input[Union[str, 'ExpressRoutePeeringType']] peering_type: The peering type.
:param pulumi.Input[str] primary_azure_port: The primary port.
:param pulumi.Input[str] primary_peer_address_prefix: The primary address prefix.
:param pulumi.Input[str] provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param pulumi.Input['RouteFilterArgs'] route_filter: The reference of the RouteFilter resource.
:param pulumi.Input[str] secondary_azure_port: The secondary port.
:param pulumi.Input[str] secondary_peer_address_prefix: The secondary address prefix.
:param pulumi.Input[str] shared_key: The shared key.
:param pulumi.Input[Union[str, 'ExpressRoutePeeringState']] state: The peering state.
:param pulumi.Input['ExpressRouteCircuitStatsArgs'] stats: Gets peering stats.
:param pulumi.Input[int] vlan_id: The VLAN ID.
"""
pulumi.set(__self__, "circuit_name", circuit_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if azure_asn is not None:
pulumi.set(__self__, "azure_asn", azure_asn)
if connections is not None:
pulumi.set(__self__, "connections", connections)
if gateway_manager_etag is not None:
pulumi.set(__self__, "gateway_manager_etag", gateway_manager_etag)
if id is not None:
pulumi.set(__self__, "id", id)
if ipv6_peering_config is not None:
pulumi.set(__self__, "ipv6_peering_config", ipv6_peering_config)
if last_modified_by is not None:
pulumi.set(__self__, "last_modified_by", last_modified_by)
if microsoft_peering_config is not None:
pulumi.set(__self__, "microsoft_peering_config", microsoft_peering_config)
if name is not None:
pulumi.set(__self__, "name", name)
if peer_asn is not None:
pulumi.set(__self__, "peer_asn", peer_asn)
if peering_name is not None:
pulumi.set(__self__, "peering_name", peering_name)
if peering_type is not None:
pulumi.set(__self__, "peering_type", peering_type)
if primary_azure_port is not None:
pulumi.set(__self__, "primary_azure_port", primary_azure_port)
if primary_peer_address_prefix is not None:
pulumi.set(__self__, "primary_peer_address_prefix", primary_peer_address_prefix)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if route_filter is not None:
pulumi.set(__self__, "route_filter", route_filter)
if secondary_azure_port is not None:
pulumi.set(__self__, "secondary_azure_port", secondary_azure_port)
if secondary_peer_address_prefix is not None:
pulumi.set(__self__, "secondary_peer_address_prefix", secondary_peer_address_prefix)
if shared_key is not None:
pulumi.set(__self__, "shared_key", shared_key)
if state is not None:
pulumi.set(__self__, "state", state)
if stats is not None:
pulumi.set(__self__, "stats", stats)
if vlan_id is not None:
pulumi.set(__self__, "vlan_id", vlan_id)
@property
@pulumi.getter(name="circuitName")
def circuit_name(self) -> pulumi.Input[str]:
"""
The name of the express route circuit.
"""
return pulumi.get(self, "circuit_name")
@circuit_name.setter
def circuit_name(self, value: pulumi.Input[str]):
pulumi.set(self, "circuit_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="azureASN")
def azure_asn(self) -> Optional[pulumi.Input[int]]:
"""
The Azure ASN.
"""
return pulumi.get(self, "azure_asn")
@azure_asn.setter
def azure_asn(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "azure_asn", value)
@property
@pulumi.getter
def connections(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ExpressRouteCircuitConnectionArgs']]]]:
"""
The list of circuit connections associated with Azure Private Peering for this circuit.
"""
return pulumi.get(self, "connections")
@connections.setter
def connections(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ExpressRouteCircuitConnectionArgs']]]]):
pulumi.set(self, "connections", value)
@property
@pulumi.getter(name="gatewayManagerEtag")
def gateway_manager_etag(self) -> Optional[pulumi.Input[str]]:
"""
The GatewayManager Etag.
"""
return pulumi.get(self, "gateway_manager_etag")
@gateway_manager_etag.setter
def gateway_manager_etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "gateway_manager_etag", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="ipv6PeeringConfig")
def ipv6_peering_config(self) -> Optional[pulumi.Input['Ipv6ExpressRouteCircuitPeeringConfigArgs']]:
"""
The IPv6 peering configuration.
"""
return pulumi.get(self, "ipv6_peering_config")
@ipv6_peering_config.setter
def ipv6_peering_config(self, value: Optional[pulumi.Input['Ipv6ExpressRouteCircuitPeeringConfigArgs']]):
pulumi.set(self, "ipv6_peering_config", value)
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> Optional[pulumi.Input[str]]:
"""
Gets whether the provider or the customer last modified the peering.
"""
return pulumi.get(self, "last_modified_by")
@last_modified_by.setter
def last_modified_by(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_modified_by", value)
@property
@pulumi.getter(name="microsoftPeeringConfig")
def microsoft_peering_config(self) -> Optional[pulumi.Input['ExpressRouteCircuitPeeringConfigArgs']]:
"""
The Microsoft peering configuration.
"""
return pulumi.get(self, "microsoft_peering_config")
@microsoft_peering_config.setter
def microsoft_peering_config(self, value: Optional[pulumi.Input['ExpressRouteCircuitPeeringConfigArgs']]):
pulumi.set(self, "microsoft_peering_config", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="peerASN")
def peer_asn(self) -> Optional[pulumi.Input[float]]:
"""
The peer ASN.
"""
return pulumi.get(self, "peer_asn")
@peer_asn.setter
def peer_asn(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "peer_asn", value)
@property
@pulumi.getter(name="peeringName")
def peering_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the peering.
"""
return pulumi.get(self, "peering_name")
@peering_name.setter
def peering_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peering_name", value)
@property
@pulumi.getter(name="peeringType")
def peering_type(self) -> Optional[pulumi.Input[Union[str, 'ExpressRoutePeeringType']]]:
"""
The peering type.
"""
return pulumi.get(self, "peering_type")
@peering_type.setter
def peering_type(self, value: Optional[pulumi.Input[Union[str, 'ExpressRoutePeeringType']]]):
pulumi.set(self, "peering_type", value)
@property
@pulumi.getter(name="primaryAzurePort")
def primary_azure_port(self) -> Optional[pulumi.Input[str]]:
"""
The primary port.
"""
return pulumi.get(self, "primary_azure_port")
@primary_azure_port.setter
def primary_azure_port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "primary_azure_port", value)
@property
@pulumi.getter(name="primaryPeerAddressPrefix")
def primary_peer_address_prefix(self) -> Optional[pulumi.Input[str]]:
"""
The primary address prefix.
"""
return pulumi.get(self, "primary_peer_address_prefix")
@primary_peer_address_prefix.setter
def primary_peer_address_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "primary_peer_address_prefix", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@property
@pulumi.getter(name="routeFilter")
def route_filter(self) -> Optional[pulumi.Input['RouteFilterArgs']]:
"""
The reference of the RouteFilter resource.
"""
return pulumi.get(self, "route_filter")
@route_filter.setter
def route_filter(self, value: Optional[pulumi.Input['RouteFilterArgs']]):
pulumi.set(self, "route_filter", value)
@property
@pulumi.getter(name="secondaryAzurePort")
def secondary_azure_port(self) -> Optional[pulumi.Input[str]]:
"""
The secondary port.
"""
return pulumi.get(self, "secondary_azure_port")
@secondary_azure_port.setter
def secondary_azure_port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secondary_azure_port", value)
@property
@pulumi.getter(name="secondaryPeerAddressPrefix")
def secondary_peer_address_prefix(self) -> Optional[pulumi.Input[str]]:
"""
The secondary address prefix.
"""
return pulumi.get(self, "secondary_peer_address_prefix")
@secondary_peer_address_prefix.setter
def secondary_peer_address_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secondary_peer_address_prefix", value)
@property
@pulumi.getter(name="sharedKey")
def shared_key(self) -> Optional[pulumi.Input[str]]:
"""
The shared key.
"""
return pulumi.get(self, "shared_key")
@shared_key.setter
def shared_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "shared_key", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[Union[str, 'ExpressRoutePeeringState']]]:
"""
The peering state.
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[Union[str, 'ExpressRoutePeeringState']]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter
def stats(self) -> Optional[pulumi.Input['ExpressRouteCircuitStatsArgs']]:
"""
Gets peering stats.
"""
return pulumi.get(self, "stats")
@stats.setter
def stats(self, value: Optional[pulumi.Input['ExpressRouteCircuitStatsArgs']]):
pulumi.set(self, "stats", value)
@property
@pulumi.getter(name="vlanId")
def vlan_id(self) -> Optional[pulumi.Input[int]]:
"""
The VLAN ID.
"""
return pulumi.get(self, "vlan_id")
@vlan_id.setter
def vlan_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "vlan_id", value)
class ExpressRouteCircuitPeering(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
azure_asn: Optional[pulumi.Input[int]] = None,
circuit_name: Optional[pulumi.Input[str]] = None,
connections: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteCircuitConnectionArgs']]]]] = None,
gateway_manager_etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
ipv6_peering_config: Optional[pulumi.Input[pulumi.InputType['Ipv6ExpressRouteCircuitPeeringConfigArgs']]] = None,
last_modified_by: Optional[pulumi.Input[str]] = None,
microsoft_peering_config: Optional[pulumi.Input[pulumi.InputType['ExpressRouteCircuitPeeringConfigArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
peer_asn: Optional[pulumi.Input[float]] = None,
peering_name: Optional[pulumi.Input[str]] = None,
peering_type: Optional[pulumi.Input[Union[str, 'ExpressRoutePeeringType']]] = None,
primary_azure_port: Optional[pulumi.Input[str]] = None,
primary_peer_address_prefix: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_filter: Optional[pulumi.Input[pulumi.InputType['RouteFilterArgs']]] = None,
secondary_azure_port: Optional[pulumi.Input[str]] = None,
secondary_peer_address_prefix: Optional[pulumi.Input[str]] = None,
shared_key: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[Union[str, 'ExpressRoutePeeringState']]] = None,
stats: Optional[pulumi.Input[pulumi.InputType['ExpressRouteCircuitStatsArgs']]] = None,
vlan_id: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
Peering in an ExpressRouteCircuit resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] azure_asn: The Azure ASN.
:param pulumi.Input[str] circuit_name: The name of the express route circuit.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteCircuitConnectionArgs']]]] connections: The list of circuit connections associated with Azure Private Peering for this circuit.
:param pulumi.Input[str] gateway_manager_etag: The GatewayManager Etag.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[pulumi.InputType['Ipv6ExpressRouteCircuitPeeringConfigArgs']] ipv6_peering_config: The IPv6 peering configuration.
:param pulumi.Input[str] last_modified_by: Gets whether the provider or the customer last modified the peering.
:param pulumi.Input[pulumi.InputType['ExpressRouteCircuitPeeringConfigArgs']] microsoft_peering_config: The Microsoft peering configuration.
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[float] peer_asn: The peer ASN.
:param pulumi.Input[str] peering_name: The name of the peering.
:param pulumi.Input[Union[str, 'ExpressRoutePeeringType']] peering_type: The peering type.
:param pulumi.Input[str] primary_azure_port: The primary port.
:param pulumi.Input[str] primary_peer_address_prefix: The primary address prefix.
:param pulumi.Input[str] provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[pulumi.InputType['RouteFilterArgs']] route_filter: The reference of the RouteFilter resource.
:param pulumi.Input[str] secondary_azure_port: The secondary port.
:param pulumi.Input[str] secondary_peer_address_prefix: The secondary address prefix.
:param pulumi.Input[str] shared_key: The shared key.
:param pulumi.Input[Union[str, 'ExpressRoutePeeringState']] state: The peering state.
:param pulumi.Input[pulumi.InputType['ExpressRouteCircuitStatsArgs']] stats: Gets peering stats.
:param pulumi.Input[int] vlan_id: The VLAN ID.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ExpressRouteCircuitPeeringInitArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Peering in an ExpressRouteCircuit resource.
:param str resource_name: The name of the resource.
:param ExpressRouteCircuitPeeringInitArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ExpressRouteCircuitPeeringInitArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
azure_asn: Optional[pulumi.Input[int]] = None,
circuit_name: Optional[pulumi.Input[str]] = None,
connections: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ExpressRouteCircuitConnectionArgs']]]]] = None,
gateway_manager_etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
ipv6_peering_config: Optional[pulumi.Input[pulumi.InputType['Ipv6ExpressRouteCircuitPeeringConfigArgs']]] = None,
last_modified_by: Optional[pulumi.Input[str]] = None,
microsoft_peering_config: Optional[pulumi.Input[pulumi.InputType['ExpressRouteCircuitPeeringConfigArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
peer_asn: Optional[pulumi.Input[float]] = None,
peering_name: Optional[pulumi.Input[str]] = None,
peering_type: Optional[pulumi.Input[Union[str, 'ExpressRoutePeeringType']]] = None,
primary_azure_port: Optional[pulumi.Input[str]] = None,
primary_peer_address_prefix: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_filter: Optional[pulumi.Input[pulumi.InputType['RouteFilterArgs']]] = None,
secondary_azure_port: Optional[pulumi.Input[str]] = None,
secondary_peer_address_prefix: Optional[pulumi.Input[str]] = None,
shared_key: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[Union[str, 'ExpressRoutePeeringState']]] = None,
stats: Optional[pulumi.Input[pulumi.InputType['ExpressRouteCircuitStatsArgs']]] = None,
vlan_id: Optional[pulumi.Input[int]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ExpressRouteCircuitPeeringInitArgs.__new__(ExpressRouteCircuitPeeringInitArgs)
__props__.__dict__["azure_asn"] = azure_asn
if circuit_name is None and not opts.urn:
raise TypeError("Missing required property 'circuit_name'")
__props__.__dict__["circuit_name"] = circuit_name
__props__.__dict__["connections"] = connections
__props__.__dict__["gateway_manager_etag"] = gateway_manager_etag
__props__.__dict__["id"] = id
__props__.__dict__["ipv6_peering_config"] = ipv6_peering_config
__props__.__dict__["last_modified_by"] = last_modified_by
__props__.__dict__["microsoft_peering_config"] = microsoft_peering_config
__props__.__dict__["name"] = name
__props__.__dict__["peer_asn"] = peer_asn
__props__.__dict__["peering_name"] = peering_name
__props__.__dict__["peering_type"] = peering_type
__props__.__dict__["primary_azure_port"] = primary_azure_port
__props__.__dict__["primary_peer_address_prefix"] = primary_peer_address_prefix
__props__.__dict__["provisioning_state"] = provisioning_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["route_filter"] = route_filter
__props__.__dict__["secondary_azure_port"] = secondary_azure_port
__props__.__dict__["secondary_peer_address_prefix"] = secondary_peer_address_prefix
__props__.__dict__["shared_key"] = shared_key
__props__.__dict__["state"] = state
__props__.__dict__["stats"] = stats
__props__.__dict__["vlan_id"] = vlan_id
__props__.__dict__["etag"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20180701:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20150501preview:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20150615:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20150615:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20160330:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20160330:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20160601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20160601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20160901:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20160901:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20161201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20161201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20170301:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20170301:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20170601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20170601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20170801:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20170801:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20170901:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20170901:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20171001:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20171001:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20171101:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20171101:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20180101:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180101:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20180201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20180401:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180401:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20180601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20180801:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180801:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20181001:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20181001:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20181101:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20181101:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20181201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20181201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20190201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20190401:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190401:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20190601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20190701:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190701:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20190801:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190801:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20190901:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190901:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20191101:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20191101:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20191201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20191201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20200301:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200301:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20200401:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200401:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20200501:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200501:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20200601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20200701:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200701:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20200801:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200801:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20201101:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20201101:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20210201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20210201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-native:network/v20210301:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20210301:ExpressRouteCircuitPeering")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ExpressRouteCircuitPeering, __self__).__init__(
'azure-native:network/v20180701:ExpressRouteCircuitPeering',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ExpressRouteCircuitPeering':
"""
Get an existing ExpressRouteCircuitPeering resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ExpressRouteCircuitPeeringInitArgs.__new__(ExpressRouteCircuitPeeringInitArgs)
__props__.__dict__["azure_asn"] = None
__props__.__dict__["connections"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["gateway_manager_etag"] = None
__props__.__dict__["ipv6_peering_config"] = None
__props__.__dict__["last_modified_by"] = None
__props__.__dict__["microsoft_peering_config"] = None
__props__.__dict__["name"] = None
__props__.__dict__["peer_asn"] = None
__props__.__dict__["peering_type"] = None
__props__.__dict__["primary_azure_port"] = None
__props__.__dict__["primary_peer_address_prefix"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["route_filter"] = None
__props__.__dict__["secondary_azure_port"] = None
__props__.__dict__["secondary_peer_address_prefix"] = None
__props__.__dict__["shared_key"] = None
__props__.__dict__["state"] = None
__props__.__dict__["stats"] = None
__props__.__dict__["vlan_id"] = None
return ExpressRouteCircuitPeering(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="azureASN")
def azure_asn(self) -> pulumi.Output[Optional[int]]:
"""
The Azure ASN.
"""
return pulumi.get(self, "azure_asn")
@property
@pulumi.getter
def connections(self) -> pulumi.Output[Optional[Sequence['outputs.ExpressRouteCircuitConnectionResponse']]]:
"""
The list of circuit connections associated with Azure Private Peering for this circuit.
"""
return pulumi.get(self, "connections")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="gatewayManagerEtag")
def gateway_manager_etag(self) -> pulumi.Output[Optional[str]]:
"""
The GatewayManager Etag.
"""
return pulumi.get(self, "gateway_manager_etag")
@property
@pulumi.getter(name="ipv6PeeringConfig")
def ipv6_peering_config(self) -> pulumi.Output[Optional['outputs.Ipv6ExpressRouteCircuitPeeringConfigResponse']]:
"""
The IPv6 peering configuration.
"""
return pulumi.get(self, "ipv6_peering_config")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> pulumi.Output[Optional[str]]:
"""
Gets whether the provider or the customer last modified the peering.
"""
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="microsoftPeeringConfig")
def microsoft_peering_config(self) -> pulumi.Output[Optional['outputs.ExpressRouteCircuitPeeringConfigResponse']]:
"""
The Microsoft peering configuration.
"""
return pulumi.get(self, "microsoft_peering_config")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peerASN")
def peer_asn(self) -> pulumi.Output[Optional[float]]:
"""
The peer ASN.
"""
return pulumi.get(self, "peer_asn")
@property
@pulumi.getter(name="peeringType")
def peering_type(self) -> pulumi.Output[Optional[str]]:
"""
The peering type.
"""
return pulumi.get(self, "peering_type")
@property
@pulumi.getter(name="primaryAzurePort")
def primary_azure_port(self) -> pulumi.Output[Optional[str]]:
"""
The primary port.
"""
return pulumi.get(self, "primary_azure_port")
@property
@pulumi.getter(name="primaryPeerAddressPrefix")
def primary_peer_address_prefix(self) -> pulumi.Output[Optional[str]]:
"""
The primary address prefix.
"""
return pulumi.get(self, "primary_peer_address_prefix")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="routeFilter")
def route_filter(self) -> pulumi.Output[Optional['outputs.RouteFilterResponse']]:
"""
The reference of the RouteFilter resource.
"""
return pulumi.get(self, "route_filter")
@property
@pulumi.getter(name="secondaryAzurePort")
def secondary_azure_port(self) -> pulumi.Output[Optional[str]]:
"""
The secondary port.
"""
return pulumi.get(self, "secondary_azure_port")
@property
@pulumi.getter(name="secondaryPeerAddressPrefix")
def secondary_peer_address_prefix(self) -> pulumi.Output[Optional[str]]:
"""
The secondary address prefix.
"""
return pulumi.get(self, "secondary_peer_address_prefix")
@property
@pulumi.getter(name="sharedKey")
def shared_key(self) -> pulumi.Output[Optional[str]]:
"""
The shared key.
"""
return pulumi.get(self, "shared_key")
@property
@pulumi.getter
def state(self) -> pulumi.Output[Optional[str]]:
"""
The peering state.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter
def stats(self) -> pulumi.Output[Optional['outputs.ExpressRouteCircuitStatsResponse']]:
"""
Gets peering stats.
"""
return pulumi.get(self, "stats")
@property
@pulumi.getter(name="vlanId")
def vlan_id(self) -> pulumi.Output[Optional[int]]:
"""
The VLAN ID.
"""
return pulumi.get(self, "vlan_id")
| 53.495277
| 6,323
| 0.691019
|
fb8dc259448bb787ad94a41117bf91663063a7ae
| 5,019
|
py
|
Python
|
battle_system.py
|
BrainiaK1911/Lochwynne-University
|
804c245460568357be0d6b1127e893bac757c0cd
|
[
"Apache-2.0"
] | null | null | null |
battle_system.py
|
BrainiaK1911/Lochwynne-University
|
804c245460568357be0d6b1127e893bac757c0cd
|
[
"Apache-2.0"
] | 7
|
2021-11-29T08:35:31.000Z
|
2021-11-30T09:11:02.000Z
|
battle_system.py
|
BrainiaK1911/Lochwynne-University
|
804c245460568357be0d6b1127e893bac757c0cd
|
[
"Apache-2.0"
] | null | null | null |
import time
import numpy as np
import sys
def delay_print(s):
# print one character at a time
for c in s:
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(0.05)
# Create the class
class Student:
def __init__(self, name, type, abilities, level, power_ratings, health='==================='):
# save variables as attributes
self.name = name
self.type = type
self.abilities = abilities
self.lvl = level
self.attack = power_ratings['INT'] + power_ratings['STR'] + power_ratings['SPD'] + power_ratings['EGP'] + power_ratings['FHT']
self.defense = power_ratings['INT'] + power_ratings['SPD'] + power_ratings['DUR'] +power_ratings['FHT']
self.health = health
self.bars = 20 # Amount of health bars
def fight(Student1, Student2):
# Allow two Students to fight each other
# Print fight information
print("-----Lochwynne University-----")
print(f"\n{Student1.name}")
print("TYPE:", Student1.type)
print("ATTACK:", Student1.attack)
print("DEFENSE:", Student1.defense)
print("LVL:", Student1.lvl)
print("\nVS")
print(f"\n{Student2.name}")
print("TYPE:", Student2.type)
print("ATTACK:", Student2.attack)
print("DEFENSE:", Student2.defense)
print("LVL:", Student2.lvl)
time.sleep(2)
# Consider type advantages
version = ['Cerebral','Phytogaia','Zoomor','Mantrador','Null']
for i,k in enumerate(version):
if Student1.type == k:
# Both are same type
if Student2.type == k:
string_1_attack = '\nIts not very effective...'
string_2_attack = '\nIts not very effective...'
# Student2 is STRONG
if Student2.type == version[(i+1)%3]:
Student2.attack *= 2
Student2.defense *= 2
Student1.attack /= 2
Student1.defense /= 2
string_1_attack = '\nIts not very effective...'
string_2_attack = '\nIts super effective!'
# Student2 is WEAK
if Student2.type == version[(i+2)%3]:
Student1.attack *= 2
Student1.defense *= 2
Student2.attack /= 2
Student2.defense /= 2
string_1_attack = '\nIts super effective!'
string_2_attack = '\nIts not very effective...'
# Now for the actual fighting...
# Continue while Student still have health
while (Student1.bars > 0) and (Student2.bars > 0):
# Print the health of each Student
print(f"\n{Student1.name}\t\tHLTH\t{Student1.health}")
print(f"{Student2.name}\t\tHLTH\t{Student2.health}\n")
print(f"Go {Student1.name}!")
for i, x in enumerate(Student1.abilities):
print(f"{i+1}.", x)
index = int(input('Pick a move: '))
delay_print(f"\n{Student1.name} used {Student1.abilities[index-1]}!")
time.sleep(1)
delay_print(string_1_attack)
# Determine damage
Student2.bars -= Student1.attack
Student2.health = ""
# Add back bars plus defense boost
for j in range(int(Student2.bars+.1*Student2.defense)):
Student2.health += "="
time.sleep(1)
print(f"\n{Student1.name}\t\tHLTH\t{Student1.health}")
print(f"{Student2.name}\t\tHLTH\t{Student2.health}\n")
time.sleep(.5)
# Check to see if Student fainted
if Student2.bars <= 0:
delay_print("\n..." + Student2.name + ' fainted.')
break
# Student2s turn
print(f"Go {Student2.name}!")
for i, x in enumerate(Student2.abilities):
print(f"{i+1}.", x)
index = int(input('Pick a move: '))
delay_print(f"\n{Student2.name} used {Student2.abilities[index-1]}!")
time.sleep(1)
delay_print(string_2_attack)
# Determine damage
Student1.bars -= Student2.attack
Student1.health = ""
# Add back bars plus defense boost
for j in range(int(Student1.bars+.1*Student1.defense)):
Student1.health += "="
time.sleep(1)
print(f"{Student1.name}\t\tHLTH\t{Student1.health}")
print(f"{Student2.name}\t\tHLTH\t{Student2.health}\n")
time.sleep(.5)
# Check to see if Student fainted
if Student1.bars <= 0:
delay_print("\n..." + Student1.name + ' fainted.')
break
# Rank increases
new_level = Student2.lvl + 1
delay_print(f"\nYour new level is {new_level}.\n")
if __name__ == '__main__':
#Create Student
Raja = Student('Raja', 'Phytogaia', ['Water Whip', 'Bubble', 'Wave', 'Water Drill'], 3, {'INT':3, 'STR':3, 'SPD':3, 'DUR':4, 'EGP':3, 'FHT':5})
Gia = Student('Gia', 'Phytogaia', ['Earth Gauntlet', 'Rock Armor', 'Earthquake', 'Sand Spout'], 3, {'INT':3, 'STR':5, 'SPD':3, 'DUR':5, 'EGP':4, 'FHT':3})
fight(Raja, Gia) # Get them to fight
| 33.019737
| 158
| 0.569038
|
e63d0ecdd53002f980b86f943db17b9475ccccf8
| 2,318
|
py
|
Python
|
testapp/tests.py
|
tomi77/tastypie-sorl-thumbnail
|
b8fb5d412401c1922891b93e10df2a8fe7f00912
|
[
"MIT"
] | 1
|
2016-07-12T16:01:11.000Z
|
2016-07-12T16:01:11.000Z
|
testapp/tests.py
|
tomi77/tastypie-sorl-thumbnail
|
b8fb5d412401c1922891b93e10df2a8fe7f00912
|
[
"MIT"
] | null | null | null |
testapp/tests.py
|
tomi77/tastypie-sorl-thumbnail
|
b8fb5d412401c1922891b93e10df2a8fe7f00912
|
[
"MIT"
] | null | null | null |
import json
import six
from django.conf import settings
from sorl.thumbnail.images import ImageFile
try:
from unittest import mock
except ImportError:
from mock import mock
try:
from tastypie.test import ResourceTestCaseMixin
from django.test import TestCase
class ThumbnailFieldTestCaseBase(ResourceTestCaseMixin, TestCase):
pass
except ImportError:
from tastypie.test import ResourceTestCase
class ThumbnailFieldTestCaseBase(ResourceTestCase):
pass
class ThumbnailFieldTestCase(ThumbnailFieldTestCaseBase):
fixtures = ['photo.yaml']
@mock.patch('tastypie_sorl_thumbnail.fields.get_thumbnail')
def test_with_image(self, mock_get_thumbnail):
mock_get_thumbnail.side_effect = lambda path, *args, **kwargs: ImageFile('cache/%s' % path[len(settings.MEDIA_ROOT):])
response = self.api_client.get('/v1/photo/1/')
self.assertValidJSONResponse(response)
content = response.content.decode('utf-8') if six.PY3 else response.content
content = json.loads(content)
self.assertEqual(content, {
'id': 1,
'image': 'http://example.com/media/image.png',
'resource_uri': '/v1/photo/1/',
'thumbnail': 'http://example.com/media/cache/image.png'
})
def test_without_image(self):
response = self.api_client.get('/v1/photo/2/')
self.assertValidJSONResponse(response)
content = response.content.decode('utf-8') if six.PY3 else response.content
content = json.loads(content)
self.assertEqual(content, {
'id': 2,
'image': None,
'resource_uri': '/v1/photo/2/',
'thumbnail': None
})
@mock.patch('tastypie_sorl_thumbnail.fields.get_thumbnail')
def test_exception(self, mock_get_thumbnail):
mock_get_thumbnail.side_effect = Exception
response = self.api_client.get('/v1/photo/1/')
self.assertValidJSONResponse(response)
content = response.content.decode('utf-8') if six.PY3 else response.content
content = json.loads(content)
self.assertEqual(content, {
'id': 1,
'image': 'http://example.com/media/image.png',
'resource_uri': '/v1/photo/1/',
'thumbnail': None
})
| 33.114286
| 126
| 0.654443
|
6311cbce7dc2e90191ddc21590ebd2c4001f26cf
| 3,806
|
py
|
Python
|
data_poor_fl/coordinate_finalizers_test.py
|
lamylio/federated
|
3f79e71344016ae5e5ec550557af25e5c169a934
|
[
"Apache-2.0"
] | 1
|
2022-03-16T02:13:39.000Z
|
2022-03-16T02:13:39.000Z
|
data_poor_fl/coordinate_finalizers_test.py
|
notminusone/federated
|
6a709f5598450232b918c046cfeba849f479d5cb
|
[
"Apache-2.0"
] | null | null | null |
data_poor_fl/coordinate_finalizers_test.py
|
notminusone/federated
|
6a709f5598450232b918c046cfeba849f479d5cb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
import tensorflow as tf
import tensorflow_federated as tff
from data_poor_fl import coordinate_finalizers
MODEL_WEIGHTS_TYPE = tff.type_at_server(
tff.to_type(tff.learning.ModelWeights(tf.float32, ())))
class CoordinateFinalizersTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('num_coordinates1', 1),
('num_coordinates2', 2),
('num_coordinates3', 3),
('num_coordinates5', 5),
)
def test_build_with_expected_state_length(self, num_coordinates):
server_optimizer = tff.learning.optimizers.build_sgdm()
base_finalizer = tff.learning.templates.build_apply_optimizer_finalizer(
server_optimizer, MODEL_WEIGHTS_TYPE.member)
finalizer = coordinate_finalizers.build_coordinate_finalizer(
base_finalizer, num_coordinates=num_coordinates)
state = finalizer.initialize()
self.assertLen(state, num_coordinates)
def test_single_coordinate_matches_base_finalizer(self):
server_optimizer = tff.learning.optimizers.build_sgdm()
base_finalizer = tff.learning.templates.build_apply_optimizer_finalizer(
server_optimizer, MODEL_WEIGHTS_TYPE.member)
coordinate_finalizer = coordinate_finalizers.build_coordinate_finalizer(
base_finalizer, num_coordinates=1)
base_state = base_finalizer.initialize()
coordinate_state = coordinate_finalizer.initialize()
self.assertAllClose(base_state, coordinate_state[0])
weights = tff.learning.ModelWeights(1.0, ())
update = 0.1
base_output = base_finalizer.next(base_state, weights, update)
coordinate_output = coordinate_finalizer.next(coordinate_state, [weights],
[update])
self.assertAllClose(base_output.state, coordinate_output.state[0])
self.assertAllClose(base_output.result.trainable,
coordinate_output.result[0].trainable)
self.assertAllClose(base_output.measurements,
coordinate_output.measurements[0])
def test_coordinate_finalizer_with_three_coordinates(self):
server_optimizer = tff.learning.optimizers.build_sgdm()
base_finalizer = tff.learning.templates.build_apply_optimizer_finalizer(
server_optimizer, MODEL_WEIGHTS_TYPE.member)
coordinate_finalizer = coordinate_finalizers.build_coordinate_finalizer(
base_finalizer, num_coordinates=3)
weights = [
tff.learning.ModelWeights(1.0, ()),
tff.learning.ModelWeights(2.0, ()),
tff.learning.ModelWeights(3.0, ())
]
updates = [4.0, 5.0, 6.0]
coordinate_state = coordinate_finalizer.initialize()
coordinate_output = coordinate_finalizer.next(coordinate_state, weights,
updates)
actual_result = coordinate_output.result
base_state = base_finalizer.initialize()
list_of_base_state = [base_state, base_state, base_state]
expected_result = [
base_finalizer.next(a).result
for a in zip(list_of_base_state, weights, updates)
]
for a, b in zip(actual_result, expected_result):
self.assertAllClose(a.trainable, b.trainable)
if __name__ == '__main__':
tf.test.main()
| 40.489362
| 78
| 0.731477
|
7832111640f3fdd34bc58325abb4d35e731b4367
| 5,909
|
py
|
Python
|
arcade/examples/turn_and_move.py
|
yegarti/arcade
|
1862e61aab9a7dc646265005b0e808d953a9dfe3
|
[
"MIT"
] | null | null | null |
arcade/examples/turn_and_move.py
|
yegarti/arcade
|
1862e61aab9a7dc646265005b0e808d953a9dfe3
|
[
"MIT"
] | null | null | null |
arcade/examples/turn_and_move.py
|
yegarti/arcade
|
1862e61aab9a7dc646265005b0e808d953a9dfe3
|
[
"MIT"
] | null | null | null |
"""
Turn and Move Example.
Right-click to cause the tank to move to that point.
"""
import math
import arcade
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
SCREEN_TITLE = "Turn and Move Example"
# Image might not be lined up right, set this to offset
IMAGE_ROTATION = 90
class Player(arcade.Sprite):
"""
Sprite that turns and moves
"""
def __init__(self):
super().__init__(":resources:images/topdown_tanks/tank_green.png")
# Destination point is where we are going
self._destination_point = None
# Max speed
self.speed = 5
# Max speed we can rotate
self.rot_speed = 5
@property
def destination_point(self):
return self._destination_point
@destination_point.setter
def destination_point(self, destination_point):
self._destination_point = destination_point
def on_update(self, delta_time: float = 1 / 60):
""" Update the player """
# If we have no destination, don't go anywhere.
if not self._destination_point:
self.change_x = 0
self.change_y = 0
return
# Position the start at our current location
start_x = self.center_x
start_y = self.center_y
# Get the destination location
dest_x = self._destination_point[0]
dest_y = self._destination_point[1]
# Do math to calculate how to get the sprite to the destination.
# Calculation the angle in radians between the start points
# and end points. This is the angle the player will travel.
x_diff = dest_x - start_x
y_diff = dest_y - start_y
target_angle_radians = math.atan2(y_diff, x_diff)
if target_angle_radians < 0:
target_angle_radians += 2 * math.pi
# What angle are we at now in radians?
actual_angle_radians = math.radians(self.angle - IMAGE_ROTATION)
# How fast can we rotate?
rot_speed_radians = math.radians(self.rot_speed)
# What is the difference between what we want, and where we are?
angle_diff_radians = target_angle_radians - actual_angle_radians
# Figure out if we rotate clockwise or counter-clockwise
if abs(angle_diff_radians) <= rot_speed_radians:
# Close enough, let's set our angle to the target
actual_angle_radians = target_angle_radians
clockwise = None
elif angle_diff_radians > 0 and abs(angle_diff_radians) < math.pi:
clockwise = False
elif angle_diff_radians > 0 and abs(angle_diff_radians) >= math.pi:
clockwise = True
elif angle_diff_radians < 0 and abs(angle_diff_radians) < math.pi:
clockwise = True
else:
clockwise = False
# Rotate the proper direction if needed
if actual_angle_radians != target_angle_radians and clockwise:
actual_angle_radians -= rot_speed_radians
elif actual_angle_radians != target_angle_radians:
actual_angle_radians += rot_speed_radians
# Keep in a range of 0 to 2pi
if actual_angle_radians > 2 * math.pi:
actual_angle_radians -= 2 * math.pi
elif actual_angle_radians < 0:
actual_angle_radians += 2 * math.pi
# Convert back to degrees
self.angle = math.degrees(actual_angle_radians) + IMAGE_ROTATION
# Are we close to the correct angle? If so, move forward.
if abs(angle_diff_radians) < math.pi / 4:
self.change_x = math.cos(actual_angle_radians) * self.speed
self.change_y = math.sin(actual_angle_radians) * self.speed
# Fine-tune our change_x/change_y if we are really close to destination
# point and just need to set to that location.
traveling = False
if abs(self.center_x - dest_x) < abs(self.change_x):
self.center_x = dest_x
else:
self.center_x += self.change_x
traveling = True
if abs(self.center_y - dest_y) < abs(self.change_y):
self.center_y = dest_y
else:
self.center_y += self.change_y
traveling = True
# If we have arrived, then cancel our destination point
if not traveling:
self._destination_point = None
class MyGame(arcade.Window):
"""
Main application class.
"""
def __init__(self):
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE, resizable=True)
arcade.set_background_color(arcade.color.SAND)
self.player_sprite = None
# Sprite Lists
self.player_list = None
def setup(self):
""" Set up the game variables. Call to re-start the game. """
# Sprite Lists
self.player_list = arcade.SpriteList()
self.player_sprite = Player()
self.player_sprite.center_x = 300
self.player_sprite.center_y = 300
self.player_list.append(self.player_sprite)
def on_draw(self):
"""
Render the screen.
"""
# This command should happen before we start drawing. It will clear
# the screen to the background color, and erase what we drew last frame.
arcade.start_render()
# Call draw() on all your sprite lists below
self.player_list.draw()
def on_update(self, delta_time):
"""
All the logic to move, and the game logic goes here.
"""
self.player_list.on_update(delta_time)
def on_mouse_press(self, x, y, button, key_modifiers):
"""
Called when the user presses a mouse button.
"""
if button == arcade.MOUSE_BUTTON_RIGHT:
self.player_sprite.destination_point = x, y
def main():
""" Main function """
game = MyGame()
game.center_window()
game.setup()
arcade.run()
if __name__ == "__main__":
main()
| 30.61658
| 83
| 0.630733
|
a79fc2d1796c1e098408e2b7403c6198bd6b952d
| 3,886
|
py
|
Python
|
halici/settings.py
|
matua34/villageMarket
|
bdd183300f748afcd72ce3f87e3891bd2ed823a4
|
[
"BSL-1.0"
] | null | null | null |
halici/settings.py
|
matua34/villageMarket
|
bdd183300f748afcd72ce3f87e3891bd2ed823a4
|
[
"BSL-1.0"
] | null | null | null |
halici/settings.py
|
matua34/villageMarket
|
bdd183300f748afcd72ce3f87e3891bd2ed823a4
|
[
"BSL-1.0"
] | null | null | null |
"""
Django settings for halici project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
import mysql.connector
# mydb = mysql.connector.connect(
# host = '127.0.0.1',
# user = 'metea',
# password = 'Metinalp79')
# mycursor = mydb.cursor()
# mycursor.execute('CREATE DATABASE VillageDb')
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'kup#o#jma57ye9$g-7l3@zppm4m7vshi0r$aqdybij%%)@)n%1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'user',
'pages.apps.PagesConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'halici.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'halici.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
#'ENGINE' : 'django.db.backends.mysql',
'NAME': BASE_DIR / 'villagedata',
# 'USER': 'metea',
# 'PASSWORD': 'Metinalp79',
# 'HOST': '27.0.0.1',
# 'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'tr'
TIME_ZONE = 'EUROPE/Istanbul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
BASE_DIR / "static",
'/var/www/static/',
]
#messages framwork
from django.contrib.messages import constants as messages
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
| 26.256757
| 92
| 0.656202
|
661ea43a7d3a8569bf9d5f2f239393c2bb74d03c
| 940
|
py
|
Python
|
zerver/lib/camo.py
|
fearless0307/zulip
|
378d14af7ea73a9a83c7245706cd918bec5a37bf
|
[
"Apache-2.0"
] | 4
|
2019-06-04T09:06:53.000Z
|
2019-06-04T09:07:47.000Z
|
zerver/lib/camo.py
|
fearless0307/zulip
|
378d14af7ea73a9a83c7245706cd918bec5a37bf
|
[
"Apache-2.0"
] | 10
|
2019-02-26T11:10:42.000Z
|
2019-02-26T14:30:24.000Z
|
zerver/lib/camo.py
|
fearless0307/zulip
|
378d14af7ea73a9a83c7245706cd918bec5a37bf
|
[
"Apache-2.0"
] | 1
|
2020-01-07T15:49:54.000Z
|
2020-01-07T15:49:54.000Z
|
from django.conf import settings
import codecs
import hashlib
import hmac
def generate_camo_url(url: str) -> str:
encoded_url = url.encode("utf-8")
encoded_camo_key = settings.CAMO_KEY.encode("utf-8")
digest = hmac.new(encoded_camo_key, encoded_url, hashlib.sha1).hexdigest()
hex_encoded_url = codecs.encode(encoded_url, "hex") # type: ignore # https://github.com/python/typeshed/issues/300
return "%s/%s" % (digest, hex_encoded_url.decode("utf-8"))
# Encodes the provided URL using the same algorithm used by the camo
# caching https image proxy
def get_camo_url(url: str) -> str:
# Only encode the url if Camo is enabled
if settings.CAMO_URI == '':
return url
return "%s%s" % (settings.CAMO_URI, generate_camo_url(url))
def is_camo_url_valid(digest: str, url: str) -> bool:
camo_url = generate_camo_url(url)
camo_url_digest = camo_url.split('/')[0]
return camo_url_digest == digest
| 37.6
| 119
| 0.71383
|
2ed0879789e84c6dde7d1099384024558511cb21
| 15,171
|
py
|
Python
|
games/jyungo.py
|
s-en/muzero-general
|
b33af78155a24ae92c77021dc3ff3843c8d4bb85
|
[
"MIT"
] | null | null | null |
games/jyungo.py
|
s-en/muzero-general
|
b33af78155a24ae92c77021dc3ff3843c8d4bb85
|
[
"MIT"
] | null | null | null |
games/jyungo.py
|
s-en/muzero-general
|
b33af78155a24ae92c77021dc3ff3843c8d4bb85
|
[
"MIT"
] | null | null | null |
import datetime
import math
import os
import numpy
import torch
from .abstract_game import AbstractGame
class MuZeroConfig:
def __init__(self):
# More information is available here: https://github.com/werner-duvaud/muzero-general/wiki/Hyperparameter-Optimization
self.seed = 0 # Seed for numpy, torch and the game
self.max_num_gpus = None # Fix the maximum number of GPUs to use. It's usually faster to use a single GPU (set it to 1) if it has enough memory. None will use every GPUs available
### Game
self.observation_shape = (3, 7, 7) # Dimensions of the game observation, must be 3 (channel, height, width). For a 1D array, please reshape it to (1, 1, length of array)
self.action_space = list(range(-1, 7 * 7)) # Fixed list of all possible actions. You should only edit the length
self.players = list(range(2)) # List of players. You should only edit the length
self.stacked_observations = 0 # Number of previous observations and previous actions to add to the current observation
# Evaluate
self.muzero_player = 0 # Turn Muzero begins to play (0: MuZero plays first, 1: MuZero plays second)
self.opponent = "random" # Hard coded agent that MuZero faces to assess his progress in multiplayer games. It doesn't influence training. None, "random" or "expert" if implemented in the Game class
### Self-Play
self.num_workers = 5 # Number of simultaneous threads/workers self-playing to feed the replay buffer
self.selfplay_on_gpu = True
self.max_moves = 49 # Maximum number of moves if game is not finished before
self.num_simulations = 50 # Number of future moves self-simulated
self.discount = 1 # Chronological discount of the reward
self.temperature_threshold = None # Number of moves before dropping the temperature given by visit_softmax_temperature_fn to 0 (ie selecting the best action). If None, visit_softmax_temperature_fn is used every time
# Root prior exploration noise
self.root_dirichlet_alpha = 0.2
self.root_exploration_fraction = 0.25
# UCB formula
self.pb_c_base = 19652
self.pb_c_init = 1.25
### Network
self.network = "resnet" # "resnet" / "fullyconnected"
self.support_size = 1 # Value and reward are scaled (with almost sqrt) and encoded on a vector with a range of -support_size to support_size. Choose it so that support_size <= sqrt(max(abs(discounted reward)))
# Residual Network
self.downsample = False # Downsample observations before representation network, False / "CNN" (lighter) / "resnet" (See paper appendix Network Architecture)
self.blocks = 3 # Number of blocks in the ResNet
self.channels = 64 # Number of channels in the ResNet
self.reduced_channels_reward = 8 # Number of channels in reward head
self.reduced_channels_value = 8 # Number of channels in value head
self.reduced_channels_policy = 8 # Number of channels in policy head
self.resnet_fc_reward_layers = [16] # Define the hidden layers in the reward head of the dynamic network
self.resnet_fc_value_layers = [16] # Define the hidden layers in the value head of the prediction network
self.resnet_fc_policy_layers = [16] # Define the hidden layers in the policy head of the prediction network
# Fully Connected Network
self.encoding_size = 16
self.fc_representation_layers = [] # Define the hidden layers in the representation network
self.fc_dynamics_layers = [16] # Define the hidden layers in the dynamics network
self.fc_reward_layers = [16] # Define the hidden layers in the reward network
self.fc_value_layers = [] # Define the hidden layers in the value network
self.fc_policy_layers = [] # Define the hidden layers in the policy network
### Training
self.results_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../results", os.path.basename(__file__)[:-3], datetime.datetime.now().strftime("%Y-%m-%d--%H-%M-%S")) # Path to store the model weights and TensorBoard logs
self.save_model = True # Save the checkpoint in results_path as model.checkpoint
self.training_steps = 300000 # Total number of training steps (ie weights update according to a batch)
self.batch_size = 64 # Number of parts of games to train on at each training step
self.checkpoint_interval = 10 # Number of training steps before using the model for self-playing
self.value_loss_weight = 0.25 # Scale the value loss to avoid overfitting of the value function, paper recommends 0.25 (See paper appendix Reanalyze)
self.train_on_gpu = torch.cuda.is_available() # Train on GPU if available
self.optimizer = "Adam" # "Adam" or "SGD". Paper uses SGD
self.weight_decay = 1e-5 # L2 weights regularization
self.momentum = 0.9 # Used only if optimizer is SGD
# Exponential learning rate schedule
self.lr_init = 0.003 # Initial learning rate
self.lr_decay_rate = 1 # Set it to 1 to use a constant learning rate
self.lr_decay_steps = 10000
### Replay Buffer
self.replay_buffer_size = 1000 # Number of self-play games to keep in the replay buffer
self.num_unroll_steps = 5 # Number of game moves to keep for every batch element
self.td_steps = 49 # Number of steps in the future to take into account for calculating the target value
self.PER = False # Prioritized Replay (See paper appendix Training), select in priority the elements in the replay buffer which are unexpected for the network
self.PER_alpha = 0.5 # How much prioritization is used, 0 corresponding to the uniform case, paper suggests 1
# Reanalyze (See paper appendix Reanalyse)
self.use_last_model_value = False # Use the last model to provide a fresher, stable n-step value (See paper appendix Reanalyze)
self.reanalyse_on_gpu = False
### Adjust the self play / training ratio to avoid over/underfitting
self.self_play_delay = 0 # Number of seconds to wait after each played game
self.training_delay = 0 # Number of seconds to wait after each training step
self.ratio = 1 # Desired training steps per self played step ratio. Equivalent to a synchronous version, training can take much longer. Set it to None to disable it
def visit_softmax_temperature_fn(self, trained_steps):
"""
Parameter to alter the visit count distribution to ensure that the action selection becomes greedier as training progresses.
The smaller it is, the more likely the best action (ie with the highest visit count) is chosen.
Returns:
Positive float.
"""
if trained_steps < 0.5 * self.training_steps:
return 1.0
elif trained_steps < 0.75 * self.training_steps:
return 0.5
else:
return 0.25
class Game(AbstractGame):
"""
Game wrapper.
"""
def __init__(self, seed=None):
self.env = Jyungo()
def step(self, action):
"""
Apply action to the game.
Args:
action : action of the action_space to take.
Returns:
The new observation, the reward and a boolean if the game has ended.
"""
observation, reward, done = self.env.step(action)
return observation, reward, done
def to_play(self):
"""
Return the current player.
Returns:
The current player, it should be an element of the players list in the config.
"""
return self.env.to_play()
def legal_actions(self):
"""
Should return the legal actions at each turn, if it is not available, it can return
the whole action space. At each turn, the game have to be able to handle one of returned actions.
For complex game where calculating legal moves is too long, the idea is to define the legal actions
equal to the action space but to return a negative reward if the action is illegal.
Returns:
An array of integers, subset of the action space.
"""
return self.env.legal_actions()
def reset(self):
"""
Reset the game for a new game.
Returns:
Initial observation of the game.
"""
return self.env.reset()
def close(self):
"""
Properly close the game.
"""
pass
def render(self):
"""
Display the game observation.
"""
self.env.render()
input("Press enter to take a step ")
def human_to_action(self):
"""
For multiplayer games, ask the user for a legal action
and return the corresponding action number.
Returns:
An integer from the action space.
"""
valid = False
while not valid:
valid, action = self.env.human_input_to_action()
return action
def action_to_string(self, action):
"""
Convert an action number to a string representing the action.
Args:
action_number: an integer from the action space.
Returns:
String representing the action.
"""
return self.env.action_to_human_input(action)
class Jyungo:
def __init__(self):
self.board_size = 7
self.board = numpy.zeros((self.board_size, self.board_size), dtype="int32")
self.prev_board = numpy.zeros((self.board_size, self.board_size), dtype="int32")
self.player = 1
self.passed = False
self.board_markers = [
chr(x) for x in range(ord("A"), ord("A") + self.board_size)
]
def to_play(self):
return 0 if self.player == 1 else 1
def reset(self):
self.board = numpy.zeros((self.board_size, self.board_size), dtype="int32")
self.player = 1
return self.get_observation()
def step_board(self, board, action):
x = math.floor(action / self.board_size)
y = action % self.board_size
board[x][y] = self.player
# 死活チェック
check_board = numpy.copy(board)
# 相手色チェック
def other(vx, vy, color):
if color == 0:
return 1
if color == self.player * -1:
check_board[vx][vy] *= 2
return self.crossLoop(check_board, vx, vy, other)
return 0
# player色チェック
def same(vx, vy, color):
if color == 0:
return 1
if color == self.player:
check_board[vx][vy] *= 2
return self.crossLoop(check_board, vx, vy, same)
return 0
def checkOther(vx, vy, color):
if check_board[vx][vy] == self.player:
return 0
kuten = self.crossLoop(check_board, vx, vy, other)
if kuten == 0:
self.killStone(board, vx, vy)
self.killStone(check_board, vx, vy)
return 0
self.crossLoop(check_board, x, y, checkOther)
kuten = self.crossLoop(check_board, x, y, same)
if kuten == 0:
self.killStone(board, x, y)
def crossLoop(self, board, x, y, func):
directions = ((1, 0), (-1, 0), (0, 1), (0, -1))
ret = 0
for d in directions:
vx = x + d[0]
vy = y + d[1]
if (vx not in range(self.board_size)) or (vy not in range(self.board_size)):
continue
color = board[vx][vy]
ret += func(vx, vy, color)
return ret
def killStone(self, board, x, y):
tcolor = board[x][y]
board[x][y] = 0
def func(vx, vy, color):
if color != 0 and color == tcolor:
self.killStone(board, vx, vy)
return 0
self.crossLoop(board, x, y, func)
def step(self, action):
self.prev_board = numpy.copy(self.board)
if action >= 0:
self.step_board(self.board, action)
self.passed = False
else:
if self.passed == True:
# 2連続パスでゲーム終了
return self.get_observation(), self.get_reward(), True
self.passed = True
self.player *= -1
return self.get_observation(), 0.0, False
def get_observation(self):
board_player1 = numpy.where(self.board == 1, 1.0, 0.0)
board_player2 = numpy.where(self.board == -1, 1.0, 0.0)
board_to_play = numpy.full((7, 7), self.player, dtype="int32")
return numpy.array([board_player1, board_player2, board_to_play])
def legal_actions(self):
legal = []
for i in range(self.board_size):
for j in range(self.board_size):
action = i * self.board_size + j
buff_board = numpy.copy(self.board)
self.step_board(buff_board, action)
# コウチェック
if self.board[i][j] == 0 and not numpy.array_equal(self.prev_board, buff_board):
legal.append(action)
return legal
def get_reward(self):
r = 0.0
for i in range(self.board_size):
for j in range(self.board_size):
color = self.board[i][j]
if color == self.player:
r += 1.0
if color == self.player * -1:
r -= 1.0
return r / self.board_size / self.board_size
def render(self):
marker = " "
for i in range(self.board_size):
marker = marker + self.board_markers[i] + " "
print(marker)
for row in range(self.board_size):
print(chr(ord("A") + row), end=" ")
for col in range(self.board_size):
ch = self.board[row][col]
if ch == 0:
print(".", end=" ")
elif ch == 1:
print("X", end=" ")
elif ch == -1:
print("O", end=" ")
print()
def human_input_to_action(self):
human_input = input("Enter an action: ")
if len(human_input) == 1 and human_input[0] == 'P':
# pass
return True, -1
if (
len(human_input) == 2
and human_input[0] in self.board_markers
and human_input[1] in self.board_markers
):
x = ord(human_input[0]) - 65
y = ord(human_input[1]) - 65
action = x * self.board_size + y
if action in self.legal_actions():
return True, action
return False, -1
def action_to_human_input(self, action):
if action < 0:
# pass
return -1
x = math.floor(action / self.board_size)
y = action % self.board_size
x = chr(x + 65)
y = chr(y + 65)
return x + y
| 39.818898
| 244
| 0.602399
|
53d36ec1c8cf4eae69791c1c2f4349639651a378
| 5,371
|
py
|
Python
|
sockets.py
|
csportat/CMPUT404-assignment-websockets
|
f841cca15c9841ed7258f726b2b6560e433a8800
|
[
"Apache-2.0"
] | null | null | null |
sockets.py
|
csportat/CMPUT404-assignment-websockets
|
f841cca15c9841ed7258f726b2b6560e433a8800
|
[
"Apache-2.0"
] | null | null | null |
sockets.py
|
csportat/CMPUT404-assignment-websockets
|
f841cca15c9841ed7258f726b2b6560e433a8800
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2013-2014 Abram Hindle
# Copyright 2021 Tianying Xia
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import flask
from flask import Flask, request, redirect
from flask_sockets import Sockets
import gevent
from gevent import queue
import time
import json
import os
app = Flask(__name__)
sockets = Sockets(app)
app.debug = True
class World:
def __init__(self):
self.clear()
# we've got listeners now!
self.listeners = list()
def add_set_listener(self, listener):
self.listeners.append( listener )
def update(self, entity, key, value):
entry = self.space.get(entity,dict())
entry[key] = value
self.space[entity] = entry
self.update_listeners( entity )
def set(self, entity, data):
self.space[entity] = data
self.update_listeners( entity )
def update_listeners(self, entity):
'''update the set listeners'''
for listener in self.listeners:
listener(entity, self.get(entity))
def clear(self):
self.space = dict()
def get(self, entity):
return self.space.get(entity,dict())
def world(self):
return self.space
myWorld = World()
# Example from lectures
class Client:
def __init__(self):
self.queue = queue.Queue()
def put(self, v):
self.queue.put_nowait(v)
def get(self):
return self.queue.get()
clients = list()
def send_all(msg):
for client in clients:
client.put(msg)
# def send_all_json(obj):
# send_all( json.dumps(obj) )
def set_listener( entity, data ):
''' do something with the update ! '''
# TO-DO
send_all( json.dumps( {entity: data} ) )
myWorld.add_set_listener( set_listener )
@app.route('/')
def hello():
'''Return something coherent here.. perhaps redirect to /static/index.html '''
# TO-DO
return redirect('http://' + request.host + '/static/index.html', code=301)
def read_ws(ws,client):
'''A greenlet function that reads from the websocket and updates the world'''
# XXX: TODO IMPLEMENT ME
# Lecture examples at https://github.com/uofa-cmput404/cmput404-slides/tree/master/examples/WebSocketsExamples
try:
while True:
msg = ws.receive()
print("WS RECV: %s" % msg)
if (msg is not None):
packet = json.loads(msg)
for entity, data in packet.items():
myWorld.set(entity, data)
# send_all_json(packet)
else:
break
except Exception as e:
'''Done'''
# return None
@sockets.route('/subscribe')
def subscribe_socket(ws):
'''Fufill the websocket URL of /subscribe, every update notify the
websocket and read updates from the websocket '''
# XXX: TODO IMPLEMENT ME
# Lecture examples at https://github.com/uofa-cmput404/cmput404-slides/tree/master/examples/WebSocketsExamples
client = Client()
clients.append(client)
g = gevent.spawn(read_ws, ws, client)
try:
while True:
# block here
msg = client.get()
ws.send(msg)
except Exception as e: # WebSocketError as e:
print("WS Error %s" % e)
finally:
clients.remove(client)
gevent.kill(g)
# return None
# I give this to you, this is how you get the raw body/data portion of a post in flask
# this should come with flask but whatever, it's not my project.
def flask_post_json():
'''Ah the joys of frameworks! They do so much work for you
that they get in the way of sane operation!'''
if (request.json != None):
return request.json
elif (request.data != None and request.data.decode("utf8") != u''):
return json.loads(request.data.decode("utf8"))
else:
return json.loads(request.form.keys()[0])
@app.route("/entity/<entity>", methods=['POST','PUT'])
def update(entity):
'''update the entities via this interface'''
# TO-DO
myWorld.set( entity, flask_post_json() )
return json.dumps( myWorld.get(entity) )
@app.route("/world", methods=['POST','GET'])
def world():
'''you should probably return the world here'''
# TO-DO
return json.dumps( myWorld.world() )
@app.route("/entity/<entity>")
def get_entity(entity):
'''This is the GET version of the entity interface, return a representation of the entity'''
# TO-DO
return json.dumps( myWorld.get(entity) )
@app.route("/clear", methods=['POST','GET'])
def clear():
'''Clear the world out!'''
# TO-DO
myWorld.clear()
return json.dumps( myWorld.world() )
if __name__ == "__main__":
''' This doesn't work well anymore:
pip install gunicorn
and run
gunicorn -k flask_sockets.worker sockets:app
'''
app.run()
| 28.721925
| 114
| 0.636194
|
46b640c83cdf82ec643404853c5b3ac899feac4c
| 1,219
|
py
|
Python
|
hazelcast/protocol/codec/list_compare_and_remove_all_codec.py
|
buraksezer/hazelcast-python-client
|
4cc593ef7de994bd84fdac8331b81b309cce30a0
|
[
"Apache-2.0"
] | 3
|
2020-05-01T15:01:54.000Z
|
2021-01-27T14:51:45.000Z
|
hazelcast/protocol/codec/list_compare_and_remove_all_codec.py
|
buraksezer/hazelcast-python-client
|
4cc593ef7de994bd84fdac8331b81b309cce30a0
|
[
"Apache-2.0"
] | null | null | null |
hazelcast/protocol/codec/list_compare_and_remove_all_codec.py
|
buraksezer/hazelcast-python-client
|
4cc593ef7de994bd84fdac8331b81b309cce30a0
|
[
"Apache-2.0"
] | 1
|
2020-12-01T20:00:35.000Z
|
2020-12-01T20:00:35.000Z
|
from hazelcast.serialization.bits import *
from hazelcast.protocol.client_message import ClientMessage
from hazelcast.protocol.codec.list_message_type import *
REQUEST_TYPE = LIST_COMPAREANDREMOVEALL
RESPONSE_TYPE = 101
RETRYABLE = False
def calculate_size(name, values):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += INT_SIZE_IN_BYTES
for values_item in values:
data_size += calculate_size_data(values_item)
return data_size
def encode_request(name, values):
""" Encode request into client_message"""
client_message = ClientMessage(payload_size=calculate_size(name, values))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_str(name)
client_message.append_int(len(values))
for values_item in values:
client_message.append_data(values_item)
client_message.update_frame_length()
return client_message
def decode_response(client_message, to_object=None):
""" Decode response from client message"""
parameters = dict(response=None)
parameters['response'] = client_message.read_bool()
return parameters
| 32.078947
| 77
| 0.767842
|
63c517cfb154a32d46a5cc5fe0f22306d0d44283
| 2,238
|
py
|
Python
|
embeddingapi/embedding-build-android-tests/ant/crosswalk_ant_build_app.py
|
zhuyongyong/crosswalk-test-suite
|
24f3f8cfa663a365b0a22685d5bd096a637f72db
|
[
"BSD-3-Clause"
] | null | null | null |
embeddingapi/embedding-build-android-tests/ant/crosswalk_ant_build_app.py
|
zhuyongyong/crosswalk-test-suite
|
24f3f8cfa663a365b0a22685d5bd096a637f72db
|
[
"BSD-3-Clause"
] | null | null | null |
embeddingapi/embedding-build-android-tests/ant/crosswalk_ant_build_app.py
|
zhuyongyong/crosswalk-test-suite
|
24f3f8cfa663a365b0a22685d5bd096a637f72db
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Zhu, Yongyong <yongyongx.zhu@intel.com>
import unittest
import os
import commands
import glob
import sys;
sys.path.append(os.getcwd())
sys.path.append(os.path.realpath('..'))
import comm
class TestAntBuild(unittest.TestCase):
def test_build(self):
comm.setUp()
app_name = "Demo"
pkg_name = "com.example.demo"
comm.create(app_name, pkg_name, self)
comm.build_ant(app_name, self)
comm.app_install(app_name, pkg_name, self)
comm.app_launch(app_name, pkg_name, self)
self.assertTrue(comm.check_app_launched(pkg_name, self))
comm.app_stop(pkg_name, self)
comm.app_uninstall(pkg_name, self)
if __name__ == '__main__':
unittest.main()
| 39.263158
| 80
| 0.747096
|
b10dd8a62e775e4527034aba24c33b538c3c074a
| 19,141
|
py
|
Python
|
services/core/MultiBuilding/multibuilding/agent.py
|
Entek-Technical-Services/BEMOSS3.5
|
581a205b4129530474a5ceee93cb36ef62992d4c
|
[
"BSD-3-Clause"
] | 73
|
2017-07-11T21:46:41.000Z
|
2022-03-11T03:35:25.000Z
|
services/core/MultiBuilding/multibuilding/agent.py
|
Entek-Technical-Services/BEMOSS3.5
|
581a205b4129530474a5ceee93cb36ef62992d4c
|
[
"BSD-3-Clause"
] | 19
|
2017-10-10T22:06:15.000Z
|
2022-03-28T21:03:33.000Z
|
services/core/MultiBuilding/multibuilding/agent.py
|
Entek-Technical-Services/BEMOSS3.5
|
581a205b4129530474a5ceee93cb36ef62992d4c
|
[
"BSD-3-Clause"
] | 36
|
2017-06-24T00:17:03.000Z
|
2022-03-31T13:58:36.000Z
|
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright (c) 2015, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization
# that has cooperated in the development of these materials, makes
# any warranty, express or implied, or assumes any legal liability
# or responsibility for the accuracy, completeness, or usefulness or
# any information, apparatus, product, software, or process disclosed,
# or represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does
# not necessarily constitute or imply its endorsement, recommendation,
# r favoring by the United States Government or any agency thereof,
# or Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
#}}}
'''VOLTTRON platform™ service for multi-building messaging.'''
import errno
from errno import EAGAIN, EINTR
import logging
import sys
import uuid
import zmq
from zmq import NOBLOCK, ZMQError
import zmq.utils
from zmq.utils import z85
from volttron.platform.agent import BaseAgent, PublishMixin, periodic
from volttron.platform.agent import utils, matching
from volttron.platform import messaging
_log = logging.getLogger(__name__)
__version__ ='0.1'
def MultiBuildingAgent(config_path=None, **kwargs):
'''Return agent object providing multi-building messaging.
The configuration file, if given by config_path, may contain the
declarations below. An initial configuration may be passed in as a
dictionary via the config keyword argument.
building-publish-address:
A ØMQ address used to publish to the building's message bus.
Defaults to 'tcp://0.0.0.0:9161'.
building-subscribe-address:
A ØMQ address used to subscribe to the building's message bus.
Defaults to 'tcp://0.0.0.0:9160'.
public-key, secret-key:
Curve keypair (create with zmq.curve_keypair()) to use for
authentication and encryption. If not provided, all
communications will be unencrypted.
cleanup-period:
Frequency, in seconds, to check for and close stale
connections. Defaults to 600 seconds (10 minutes).
hosts:
A mapping (dictionary) of building names to publish/subscribe
addresses. Each entry is of the form:
<BUILDING>: {'pub': <PUB_ADDRESS>, 'sub': <SUB_ADDRESS>,
'public-key': <PUBKEY>, 'allow': <PUB_OR_SUB>}
where <BUILDING>, <PUB_ADDRESS, <SUB_ADDRESS>, <PUBKEY>, and
<PUB_OR_SUB> are all strings specifying the building name as
'CAMPUS/BUILDING', the publish and subscribe addresses as
ØMQ addresses, the curve public key, and either 'pub' or 'sub'
to allow publish only or both publish and subscribe.
uuid:
A UUID to use in the Cookie header. If not given, one will
be automatically generated.
'''
config = kwargs.pop('config', {})
if config_path:
config.update(utils.load_config(config_path))
cleanup_period = config.get('cleanup-period', 600)
assert cleanup_period >= 1
class Proxy(PublishMixin, BaseAgent):
'''Proxy messages between internal bus and other buildings.
This class could be combined with the Agent class below rather
than using it as a base class. Keeping the implementations
separate, however, provides for a cleaner implementation and
allows one to more easily track the agent logic.
'''
def __init__(self, **kwargs):
'''Create and register the external sockets.'''
super(Proxy, self).__init__(**kwargs)
# Use separate context for these sockets to avoid
# authentication conflicts with other sockets.
ctx = zmq.Context()
self.zap_sock = ctx.socket(zmq.REP)
self.zap_sock.bind('inproc://zeromq.zap.01')
self.reactor.register(self.zap_sock, self.handle_authentication)
self.outgoing = messaging.Socket(zmq.XPUB, context=ctx)
self._config_socket(self.outgoing)
self.outgoing.zap_domain = 'building.outgoing'
self.incoming = messaging.Socket(zmq.PULL, context=ctx)
self._config_socket(self.incoming)
self.incoming.zap_domain = 'building.incoming'
key = config.get('secret-key')
if key:
self.outgoing.curve_secretkey = key
self.outgoing.curve_server = 1
self.incoming.curve_secretkey = key
self.incoming.curve_server = 1
self.hosts = config.get('hosts', {})
self.allow_sub = set(key for host in self.hosts.itervalues()
for key, allow in [(host.get('public-key'), host.get('allow', 'sub'))]
if key and allow in ['pub', 'sub'])
self.allow_pub = set(key for host in self.hosts.itervalues()
for key, allow in [(host.get('public-key'), host.get('allow', 'sub'))]
if key and allow == 'pub')
def _config_socket(self, sock):
sock.reconnect_ivl = 1000
sock.reconnect_ivl_max = 180000
sock.sndtimeo = 10000
sock.rcvtimeo = 10000
sock.linger = 10000
def setup(self):
'''Bind the external ports.'''
super(Proxy, self).setup()
self.reactor.register(self.outgoing, self.handle_subscribe)
self.outgoing.bind(config.get(
'building-subscribe-address', 'tcp://0.0.0.0:9160'))
pub_addr = config.get('building-publish-address',
'tcp://0.0.0.0:9161')
if pub_addr:
self.reactor.register(self.incoming, self.handle_incoming)
self.incoming.bind(pub_addr)
def handle_incoming(self, sock):
'''Receive incoming messages and publish to internal bus.'''
try:
topic, headers, message = self.incoming.recv_message(NOBLOCK)
except ZMQError as e:
if e.errno == EINTR:
return
raise
self.publish(topic, headers, *message)
def handle_subscribe(self, sock):
'''Manage external subscription messages.'''
try:
message = self.outgoing.recv(NOBLOCK)
except ZMQError as e:
if e.errno == EINTR:
return
raise
if message:
add = bool(ord(message[0]))
topic = message[1:]
if add:
self.subscribe(topic, self.on_outgoing)
else:
self.unsubscribe(topic)
def handle_authentication(self, sock):
'''Restrict connections to approved clients.'''
allow = False
auth = sock.recv_multipart()
version, sequence, domain, address, identity, mechanism = auth[:6]
assert version == '1.0'
if mechanism == 'CURVE':
creds = z85.encode(auth[6])
if domain == 'building.outgoing':
allow = creds in self.allow_sub
elif domain == 'building.incoming':
allow = creds in self.allow_pub
elif mechanism == 'NULL':
allow, creds = True, ''
else:
creds = ''
_log.info('{} {} at {} via {} {}'.format(
'allow' if allow else 'deny', address, domain,
mechanism, creds))
if allow:
reply = [version, sequence, "200", "OK", "", ""]
else:
reply = [version, sequence, "400", "Forbidden", "", ""]
sock.send_multipart(reply)
def on_outgoing(self, topic, headers, message, match):
'''Forward messages to external subscribers.'''
while True:
try:
self.outgoing.send_message(
topic, headers, *message, flags=NOBLOCK)
except ZMQError as e:
if e.errno == EINTR:
continue
if e.errno != EAGAIN:
raise
break
class Agent(Proxy):
'''Provide inter-building publish/subscribe service.
Provides three topics for inter-building messaging:
building/recv/<CAMPUS>/<BUILDING>/<TOPIC>:
Agents can subscribe to to this topic to receive messages
sent to <TOPIC> ant the building specified by
<CAMPUS>/<BUILDING>.
building/send/<CAMPUS>/<BUILDING>/<TOPIC>:
Agents can send messages to this topic to have them
forwarded to <TOPIC> at the building specified by
<CAMPUS>/<BUILDING>.
building/error/<CAMPUS>/<BUILDING>/<TOPIC>
Errors encountered during sending/receiving to/from the
above two topics will be sent over this topic.
'''
def __init__(self, **kwargs):
super(Agent, self).__init__(**kwargs)
self.uuid = config.get('uuid') or str(uuid.uuid4())
self.subs = {}
self.rsubs = {}
self.pubs = {}
self.sequence = 0
def setup(self):
'''Request list of current subscriptions.'''
super(Agent, self).setup()
# Check and connect existing subscriptions
self.publish('subscriptions/list/building/recv/',
{'Cookie': 'init ' + self.uuid})
@matching.match_regex('subscriptions/add/building/recv/([^/]+/[^/]+)/(.*)')
def on_subscribe(self, full_topic, headers, message, match):
'''Handle new external building subscription requests.'''
building, topic = match.groups()
self.add_subscription(building, topic, cookie=headers.get('Cookie'))
def add_subscription(self, building, topic, cookie=None):
'''Add external building subscription.'''
sock = self.subs.get(building)
if not sock:
host = self.hosts.get(building)
address = host.get('sub') if host else None
# Handle missing address
if not address:
headers = {'Cookie': cookie} if cookie else {}
self.publish_error(building, topic, headers, errno.ENOENT,
'building subscription address not found')
return
sock = messaging.Socket(zmq.SUB)
key = host.get('public-key')
if key:
sock.curve_serverkey = key
sock.curve_secretkey = config.get('secret-key')
sock.curve_publickey = config.get('public-key')
self._config_socket(sock)
sock.connect(address)
self.subs[building] = sock
self.rsubs[sock] = building
self.reactor.register(sock, self.handle_republish)
sock.subscribe = topic.encode('utf-8')
def handle_republish(self, sock):
'''Publish incoming messages on internal bus.'''
building = self.rsubs[sock]
try:
orig_topic, headers, message = sock.recv_message(flags=NOBLOCK)
except ZMQError as e:
if e.errno == EINTR:
return
self.reactor.unregister(sock)
self.subs.pop(building, None)
self.rsubs.pop(sock, None)
sock.close()
return
topic = 'building/recv/{}/{}'.format(building, orig_topic)
self.publish(topic, headers, *message)
def publish_error(self, building, topic, headers, errnum, message):
'''Publish errors to error topic.'''
topic = 'building/error/{}/{}'.format(building, topic)
self.publish(topic, headers, str(errnum), message)
@matching.match_regex('subscriptions/remove/building/recv/([^/]+/[^/]+)/(.*)')
def on_unsubscribe(self, full_topic, headers, message, match):
'''Handle external building unsubscribe requests.'''
building, topic = match.groups()
sock = self.subs.get(building)
if sock:
sock.unsubscribe = topic
@matching.match_regex('building/send/([^/]+/[^/]+)/(.*)')
def on_send(self, full_topic, headers, message, match):
'''Handle external building publish requests.'''
building, topic = match.groups()
sock, seq = self.pubs.get(building, (None, None))
if not sock:
host = self.hosts.get(building)
address = host.get('pub') if host else None
# Handle missing address
if not address:
cookie = headers.get('Cookie')
headers = {'Cookie': cookie} if cookie else {}
self.publish_error(building, topic, headers, errno.ENOENT,
'building publish address not found')
return
sock = messaging.Socket(zmq.PUSH)
key = host.get('public-key')
if key:
sock.curve_serverkey = key
sock.curve_secretkey = config.get('secret-key')
sock.curve_publickey = config.get('public-key')
self._config_socket(sock)
sock.connect(address)
if seq != self.sequence:
self.pubs[building] = sock, self.sequence
while True:
try:
sock.send_message(topic, headers, *message, flags=NOBLOCK)
except ZMQError as e:
if e.errno == EINTR:
continue
self.pubs.pop(building, None)
sock.close()
self.publish_error(building, topic, headers,
errno.ECONNABORTED,
'message not sent; socket closed')
break
@periodic(cleanup_period)
def on_cleanup(self):
'''Periodically request subscription list for cleaning.'''
self.publish('subscriptions/list/building/recv/',
{'Cookie': 'clean ' + self.uuid})
for building, (sock, seq) in self.pubs.items():
if seq != self.sequence:
del self.pubs[building]
self.sequence += 1
@matching.match_exact('subscriptions/list/building/recv/')
def on_subscription_list(self, topic, headers, message, match):
'''Handle closing unused sockets.'''
if headers.get('Cookie') != 'clean ' + self.uuid:
return
topics = set()
for prefix in message:
try:
campus, building = prefix[33:].split('/', 2)[:2]
except ValueError:
continue
topics.add('/'.join([campus, building]))
for building, sock in self.subs.values():
if building not in topics:
self.subs.pop(building, None)
self.rsubs.pop(sock, None)
try:
self.reactor.unregister(sock)
except KeyError:
pass
sock.close()
@matching.match_exact('subscriptions/list/building/recv/')
def on_subscription_init(self, topic, headers, message, match):
'''Handle existing subscriptions to external buildings on start.'''
if headers.get('Cookie') != 'init ' + self.uuid:
return
for prefix in message:
try:
## len('building/recv/') == 14
campus, building, topic = prefix[14:].split('/', 2)
except ValueError:
continue
building = '/'.join([campus, building])
self.add_subscription(building, topic)
# Rename agent to match factory function.
Agent.__name__ = 'MultiBuildingAgent'
return Agent(**kwargs)
def main(argv=sys.argv):
'''Main method called by the eggsecutable.'''
utils.setup_logging()
try:
utils.default_main(MultiBuildingAgent,
description='VOLTTRON platform™ multi-building message routing agent',
argv=argv)
except Exception:
_log.exception('unhandled exception')
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
| 42.347345
| 90
| 0.580011
|
0ad3c89acc004dbfc2d7606fcc41c5b76321c2d8
| 1,690
|
py
|
Python
|
settings.py
|
princeofdatamining/blueking-sample
|
7d8b345d5052aa70b7f8566d3648c78655e3fced
|
[
"Apache-2.0"
] | null | null | null |
settings.py
|
princeofdatamining/blueking-sample
|
7d8b345d5052aa70b7f8566d3648c78655e3fced
|
[
"Apache-2.0"
] | 4
|
2020-02-12T03:14:14.000Z
|
2021-06-10T22:05:36.000Z
|
settings.py
|
hanseryukiri/jenkins
|
808c70164bb26d90f28bab542d2d5dc9e2d5e1f3
|
[
"Apache-2.0"
] | 1
|
2020-03-24T06:22:40.000Z
|
2020-03-24T06:22:40.000Z
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云(BlueKing) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
import os
from conf.default import * # noqa
"""
You can load different configurations depending on yourcurrent environment.
This can be the following values:
development
testing
production
"""
ENVIRONMENT = os.environ.get("BK_ENV", "development")
# Inherit from environment specifics
conf_module = "conf.settings_%s" % ENVIRONMENT
try:
module = __import__(conf_module, globals(), locals(), ['*'])
except ImportError as e:
raise ImportError("Could not import conf '%s' (Is it on sys.path?): %s" % (conf_module, e))
for setting in dir(module):
if setting == setting.upper():
locals()[setting] = getattr(module, setting)
# check saas app settings
try:
saas_conf_module = "conf.settings_saas"
saas_module = __import__(saas_conf_module, globals(), locals(), ['*'])
for saas_setting in dir(saas_module):
if saas_setting == saas_setting.upper():
locals()[saas_setting] = getattr(saas_module, saas_setting)
except:
pass
| 36.73913
| 115
| 0.727811
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.