hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1af27d72af6c6032e352d0d1c79a53d362e2f597
| 4,595
|
py
|
Python
|
adb_shell/transport/tcp_transport.py
|
KOLANICH-libs/adb_shell
|
bf4d348e3aa0999b24976de9bac442b0e180a27e
|
[
"Apache-2.0"
] | 268
|
2019-09-25T16:38:51.000Z
|
2022-03-31T07:08:17.000Z
|
adb_shell/transport/tcp_transport.py
|
KOLANICH-libs/adb_shell
|
bf4d348e3aa0999b24976de9bac442b0e180a27e
|
[
"Apache-2.0"
] | 73
|
2019-09-30T14:25:38.000Z
|
2022-01-23T23:04:29.000Z
|
adb_shell/transport/tcp_transport.py
|
KOLANICH-libs/adb_shell
|
bf4d348e3aa0999b24976de9bac442b0e180a27e
|
[
"Apache-2.0"
] | 48
|
2019-11-05T20:37:59.000Z
|
2022-03-09T08:12:06.000Z
|
# Copyright (c) 2021 Jeff Irion and contributors
#
# This file is part of the adb-shell package. It incorporates work
# covered by the following license notice:
#
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class for creating a socket connection with the device and sending and receiving data.
* :class:`TcpTransport`
* :meth:`TcpTransport.bulk_read`
* :meth:`TcpTransport.bulk_write`
* :meth:`TcpTransport.close`
* :meth:`TcpTransport.connect`
"""
import select
import socket
from .base_transport import BaseTransport
from ..exceptions import TcpTimeoutException
class TcpTransport(BaseTransport):
"""TCP connection object.
Parameters
----------
host : str
The address of the device; may be an IP address or a host name
port : int
The device port to which we are connecting (default is 5555)
Attributes
----------
_connection : socket.socket, None
A socket connection to the device
_host : str
The address of the device; may be an IP address or a host name
_port : int
The device port to which we are connecting (default is 5555)
"""
def __init__(self, host, port=5555):
self._host = host
self._port = port
self._connection = None
def close(self):
"""Close the socket connection.
"""
if self._connection:
try:
self._connection.shutdown(socket.SHUT_RDWR)
except OSError:
pass
self._connection.close()
self._connection = None
def connect(self, transport_timeout_s):
"""Create a socket connection to the device.
Parameters
----------
transport_timeout_s : float, None
Set the timeout on the socket instance
"""
self._connection = socket.create_connection((self._host, self._port), timeout=transport_timeout_s)
if transport_timeout_s:
# Put the socket in non-blocking mode
# https://docs.python.org/3/library/socket.html#socket.socket.settimeout
self._connection.setblocking(False)
def bulk_read(self, numbytes, transport_timeout_s):
"""Receive data from the socket.
Parameters
----------
numbytes : int
The maximum amount of data to be received
transport_timeout_s : float, None
When the timeout argument is omitted, ``select.select`` blocks until at least one file descriptor is ready. A time-out value of zero specifies a poll and never blocks.
Returns
-------
bytes
The received data
Raises
------
TcpTimeoutException
Reading timed out.
"""
readable, _, _ = select.select([self._connection], [], [], transport_timeout_s)
if readable:
return self._connection.recv(numbytes)
msg = 'Reading from {}:{} timed out ({} seconds)'.format(self._host, self._port, transport_timeout_s)
raise TcpTimeoutException(msg)
def bulk_write(self, data, transport_timeout_s):
"""Send data to the socket.
Parameters
----------
data : bytes
The data to be sent
transport_timeout_s : float, None
When the timeout argument is omitted, ``select.select`` blocks until at least one file descriptor is ready. A time-out value of zero specifies a poll and never blocks.
Returns
-------
int
The number of bytes sent
Raises
------
TcpTimeoutException
Sending data timed out. No data was sent.
"""
_, writeable, _ = select.select([], [self._connection], [], transport_timeout_s)
if writeable:
return self._connection.send(data)
msg = 'Sending data to {}:{} timed out after {} seconds. No data was sent.'.format(self._host, self._port, transport_timeout_s)
raise TcpTimeoutException(msg)
| 30.838926
| 179
| 0.629597
|
b4d3f9fce65ede2ac4c2f5b67dbfcb6f1283487b
| 3,439
|
py
|
Python
|
tests/test_basic_unet.py
|
Irme/MONAI
|
dc4bf661831b14f4231cb325cc1b15d38c1e406c
|
[
"Apache-2.0"
] | null | null | null |
tests/test_basic_unet.py
|
Irme/MONAI
|
dc4bf661831b14f4231cb325cc1b15d38c1e406c
|
[
"Apache-2.0"
] | null | null | null |
tests/test_basic_unet.py
|
Irme/MONAI
|
dc4bf661831b14f4231cb325cc1b15d38c1e406c
|
[
"Apache-2.0"
] | 1
|
2020-06-11T13:03:02.000Z
|
2020-06-11T13:03:02.000Z
|
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from parameterized import parameterized
from monai.networks.nets import BasicUNet
from tests.utils import test_script_save
CASES_1D = []
for mode in ["pixelshuffle", "nontrainable", "deconv", None]:
kwargs = {
"dimensions": 1,
"in_channels": 5,
"out_channels": 8,
}
if mode is not None:
kwargs["upsample"] = mode # type: ignore
CASES_1D.append(
[
kwargs,
(10, 5, 17),
(10, 8, 17),
]
)
CASES_2D = []
for mode in ["pixelshuffle", "nontrainable", "deconv"]:
for d1 in range(17, 64, 14):
for d2 in range(63, 18, -21):
in_channels, out_channels = 2, 3
CASES_2D.append(
[
{
"dimensions": 2,
"in_channels": in_channels,
"out_channels": out_channels,
"features": (12, 12, 13, 14, 15, 16),
"upsample": mode,
},
(2, in_channels, d1, d2),
(2, out_channels, d1, d2),
]
)
CASES_3D = [
[ # single channel 3D, batch 2
{
"dimensions": 3,
"in_channels": 1,
"out_channels": 2,
"features": (16, 20, 21, 22, 23, 11),
"upsample": "pixelshuffle",
},
(2, 1, 16, 17, 18),
(2, 2, 16, 17, 18),
],
[ # 2-channel 3D, batch 3
{
"dimensions": 3,
"in_channels": 2,
"out_channels": 7,
"features": (14, 15, 16, 17, 18, 11),
"upsample": "deconv",
},
(3, 2, 16, 17, 18),
(3, 7, 16, 17, 18),
],
[ # 4-channel 3D, batch 5
{
"dimensions": 3,
"in_channels": 4,
"out_channels": 2,
"features": (14, 15, 16, 17, 18, 10),
"upsample": "nontrainable",
},
(5, 4, 19, 84, 16),
(5, 2, 19, 84, 16),
],
]
class TestBasicUNET(unittest.TestCase):
@parameterized.expand(CASES_1D + CASES_2D + CASES_3D)
def test_shape(self, input_param, input_shape, expected_shape):
device = "cuda" if torch.cuda.is_available() else "cpu"
print(input_param)
net = BasicUNet(**input_param).to(device)
net.eval()
with torch.no_grad():
result = net(torch.randn(input_shape).to(device))
self.assertEqual(result.shape, expected_shape)
def test_script(self):
net = BasicUNet(dimensions=2, in_channels=1, out_channels=3)
test_data = torch.randn(16, 1, 32, 32)
out_orig, out_reloaded = test_script_save(net, test_data)
assert torch.allclose(out_orig, out_reloaded)
if __name__ == "__main__":
unittest.main()
| 30.705357
| 74
| 0.536493
|
0c28b646947d0ead6ac3810220c5fd8939198a64
| 483
|
py
|
Python
|
myreader/mainsite/migrations/0004_auto_20180910_0838.py
|
zhaopan-vip/MyReader
|
958e1df75bf22893a4b13f4f0bd57c7cf6bae588
|
[
"Apache-2.0"
] | null | null | null |
myreader/mainsite/migrations/0004_auto_20180910_0838.py
|
zhaopan-vip/MyReader
|
958e1df75bf22893a4b13f4f0bd57c7cf6bae588
|
[
"Apache-2.0"
] | 4
|
2021-06-08T19:18:47.000Z
|
2022-03-11T23:30:17.000Z
|
myreader/mainsite/migrations/0004_auto_20180910_0838.py
|
zhaopaniOS/MyReader
|
958e1df75bf22893a4b13f4f0bd57c7cf6bae588
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.1.1 on 2018-09-10 08:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mainsite', '0003_auto_20180907_0338'),
]
operations = [
migrations.AlterUniqueTogether(
name='book',
unique_together={('title', 'author')},
),
migrations.AlterUniqueTogether(
name='chapter',
unique_together={('book', 'section')},
),
]
| 21.954545
| 50
| 0.573499
|
b7d0e3090c4677bb0e4145de07ecbc7a49a3be79
| 81,990
|
py
|
Python
|
sympy/geometry/polygon.py
|
AyushGit123/sympy
|
bd79440abfa41d175737d1a138e63e16f9f51994
|
[
"BSD-3-Clause"
] | 1
|
2020-03-30T05:21:06.000Z
|
2020-03-30T05:21:06.000Z
|
sympy/geometry/polygon.py
|
otoosakyidavid/sympy
|
636221ff35c78b980f828a285d0c552fac77aaba
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/geometry/polygon.py
|
otoosakyidavid/sympy
|
636221ff35c78b980f828a285d0c552fac77aaba
|
[
"BSD-3-Clause"
] | 1
|
2021-02-28T20:26:24.000Z
|
2021-02-28T20:26:24.000Z
|
from __future__ import division, print_function
from sympy.core import Expr, S, Symbol, oo, pi, sympify
from sympy.core.compatibility import as_int, ordered
from sympy.core.symbol import _symbol, Dummy, symbols
from sympy.functions.elementary.complexes import sign
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.elementary.trigonometric import cos, sin, tan
from sympy.geometry.exceptions import GeometryError
from sympy.logic import And
from sympy.matrices import Matrix
from sympy.simplify import simplify
from sympy.utilities import default_sort_key
from sympy.utilities.iterables import has_dups, has_variety, uniq, rotate_left, least_rotation
from sympy.utilities.misc import func_name
from .entity import GeometryEntity, GeometrySet
from .point import Point
from .ellipse import Circle
from .line import Line, Segment, Ray
import warnings
class Polygon(GeometrySet):
"""A two-dimensional polygon.
A simple polygon in space. Can be constructed from a sequence of points
or from a center, radius, number of sides and rotation angle.
Parameters
==========
vertices : sequence of Points
Optional parameters
==========
n : If > 0, an n-sided RegularPolygon is created. See below.
Default value is 0.
Attributes
==========
area
angles
perimeter
vertices
centroid
sides
Raises
======
GeometryError
If all parameters are not Points.
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Segment, Triangle
Notes
=====
Polygons are treated as closed paths rather than 2D areas so
some calculations can be be negative or positive (e.g., area)
based on the orientation of the points.
Any consecutive identical points are reduced to a single point
and any points collinear and between two points will be removed
unless they are needed to define an explicit intersection (see examples).
A Triangle, Segment or Point will be returned when there are 3 or
fewer points provided.
Examples
========
>>> from sympy import Point, Polygon, pi
>>> p1, p2, p3, p4, p5 = [(0, 0), (1, 0), (5, 1), (0, 1), (3, 0)]
>>> Polygon(p1, p2, p3, p4)
Polygon(Point2D(0, 0), Point2D(1, 0), Point2D(5, 1), Point2D(0, 1))
>>> Polygon(p1, p2)
Segment2D(Point2D(0, 0), Point2D(1, 0))
>>> Polygon(p1, p2, p5)
Segment2D(Point2D(0, 0), Point2D(3, 0))
The area of a polygon is calculated as positive when vertices are
traversed in a ccw direction. When the sides of a polygon cross the
area will have positive and negative contributions. The following
defines a Z shape where the bottom right connects back to the top
left.
>>> Polygon((0, 2), (2, 2), (0, 0), (2, 0)).area
0
When the the keyword `n` is used to define the number of sides of the
Polygon then a RegularPolygon is created and the other arguments are
interpreted as center, radius and rotation. The unrotated RegularPolygon
will always have a vertex at Point(r, 0) where `r` is the radius of the
circle that circumscribes the RegularPolygon. Its method `spin` can be
used to increment that angle.
>>> p = Polygon((0,0), 1, n=3)
>>> p
RegularPolygon(Point2D(0, 0), 1, 3, 0)
>>> p.vertices[0]
Point2D(1, 0)
>>> p.args[0]
Point2D(0, 0)
>>> p.spin(pi/2)
>>> p.vertices[0]
Point2D(0, 1)
"""
def __new__(cls, *args, n = 0, **kwargs):
if n:
args = list(args)
# return a virtual polygon with n sides
if len(args) == 2: # center, radius
args.append(n)
elif len(args) == 3: # center, radius, rotation
args.insert(2, n)
return RegularPolygon(*args, **kwargs)
vertices = [Point(a, dim=2, **kwargs) for a in args]
# remove consecutive duplicates
nodup = []
for p in vertices:
if nodup and p == nodup[-1]:
continue
nodup.append(p)
if len(nodup) > 1 and nodup[-1] == nodup[0]:
nodup.pop() # last point was same as first
# remove collinear points
i = -3
while i < len(nodup) - 3 and len(nodup) > 2:
a, b, c = nodup[i], nodup[i + 1], nodup[i + 2]
if Point.is_collinear(a, b, c):
nodup.pop(i + 1)
if a == c:
nodup.pop(i)
else:
i += 1
vertices = list(nodup)
if len(vertices) > 3:
return GeometryEntity.__new__(cls, *vertices, **kwargs)
elif len(vertices) == 3:
return Triangle(*vertices, **kwargs)
elif len(vertices) == 2:
return Segment(*vertices, **kwargs)
else:
return Point(*vertices, **kwargs)
@property
def area(self):
"""
The area of the polygon.
Notes
=====
The area calculation can be positive or negative based on the
orientation of the points. If any side of the polygon crosses
any other side, there will be areas having opposite signs.
See Also
========
sympy.geometry.ellipse.Ellipse.area
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.area
3
In the Z shaped polygon (with the lower right connecting back
to the upper left) the areas cancel out:
>>> Z = Polygon((0, 1), (1, 1), (0, 0), (1, 0))
>>> Z.area
0
In the M shaped polygon, areas do not cancel because no side
crosses any other (though there is a point of contact).
>>> M = Polygon((0, 0), (0, 1), (2, 0), (3, 1), (3, 0))
>>> M.area
-3/2
"""
area = 0
args = self.args
for i in range(len(args)):
x1, y1 = args[i - 1].args
x2, y2 = args[i].args
area += x1*y2 - x2*y1
return simplify(area) / 2
@staticmethod
def _isright(a, b, c):
"""Return True/False for cw/ccw orientation.
Examples
========
>>> from sympy import Point, Polygon
>>> a, b, c = [Point(i) for i in [(0, 0), (1, 1), (1, 0)]]
>>> Polygon._isright(a, b, c)
True
>>> Polygon._isright(a, c, b)
False
"""
ba = b - a
ca = c - a
t_area = simplify(ba.x*ca.y - ca.x*ba.y)
res = t_area.is_nonpositive
if res is None:
raise ValueError("Can't determine orientation")
return res
@property
def angles(self):
"""The internal angle at each vertex.
Returns
=======
angles : dict
A dictionary where each key is a vertex and each value is the
internal angle at that vertex. The vertices are represented as
Points.
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.LinearEntity.angle_between
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.angles[p1]
pi/2
>>> poly.angles[p2]
acos(-4*sqrt(17)/17)
"""
# Determine orientation of points
args = self.vertices
cw = self._isright(args[-1], args[0], args[1])
ret = {}
for i in range(len(args)):
a, b, c = args[i - 2], args[i - 1], args[i]
ang = Ray(b, a).angle_between(Ray(b, c))
if cw ^ self._isright(a, b, c):
ret[b] = 2*S.Pi - ang
else:
ret[b] = ang
return ret
@property
def ambient_dimension(self):
return self.vertices[0].ambient_dimension
@property
def perimeter(self):
"""The perimeter of the polygon.
Returns
=======
perimeter : number or Basic instance
See Also
========
sympy.geometry.line.Segment.length
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.perimeter
sqrt(17) + 7
"""
p = 0
args = self.vertices
for i in range(len(args)):
p += args[i - 1].distance(args[i])
return simplify(p)
@property
def vertices(self):
"""The vertices of the polygon.
Returns
=======
vertices : list of Points
Notes
=====
When iterating over the vertices, it is more efficient to index self
rather than to request the vertices and index them. Only use the
vertices when you want to process all of them at once. This is even
more important with RegularPolygons that calculate each vertex.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.vertices
[Point2D(0, 0), Point2D(1, 0), Point2D(5, 1), Point2D(0, 1)]
>>> poly.vertices[0]
Point2D(0, 0)
"""
return list(self.args)
@property
def centroid(self):
"""The centroid of the polygon.
Returns
=======
centroid : Point
See Also
========
sympy.geometry.point.Point, sympy.geometry.util.centroid
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.centroid
Point2D(31/18, 11/18)
"""
A = 1/(6*self.area)
cx, cy = 0, 0
args = self.args
for i in range(len(args)):
x1, y1 = args[i - 1].args
x2, y2 = args[i].args
v = x1*y2 - x2*y1
cx += v*(x1 + x2)
cy += v*(y1 + y2)
return Point(simplify(A*cx), simplify(A*cy))
def second_moment_of_area(self, point=None):
"""Returns the second moment and product moment of area of a two dimensional polygon.
Parameters
==========
point : Point, two-tuple of sympifyable objects, or None(default=None)
point is the point about which second moment of area is to be found.
If "point=None" it will be calculated about the axis passing through the
centroid of the polygon.
Returns
=======
I_xx, I_yy, I_xy : number or sympy expression
I_xx, I_yy are second moment of area of a two dimensional polygon.
I_xy is product moment of area of a two dimensional polygon.
Examples
========
>>> from sympy import Point, Polygon, symbols
>>> a, b = symbols('a, b')
>>> p1, p2, p3, p4, p5 = [(0, 0), (a, 0), (a, b), (0, b), (a/3, b/3)]
>>> rectangle = Polygon(p1, p2, p3, p4)
>>> rectangle.second_moment_of_area()
(a*b**3/12, a**3*b/12, 0)
>>> rectangle.second_moment_of_area(p5)
(a*b**3/9, a**3*b/9, a**2*b**2/36)
References
==========
https://en.wikipedia.org/wiki/Second_moment_of_area
"""
I_xx, I_yy, I_xy = 0, 0, 0
args = self.vertices
for i in range(len(args)):
x1, y1 = args[i-1].args
x2, y2 = args[i].args
v = x1*y2 - x2*y1
I_xx += (y1**2 + y1*y2 + y2**2)*v
I_yy += (x1**2 + x1*x2 + x2**2)*v
I_xy += (x1*y2 + 2*x1*y1 + 2*x2*y2 + x2*y1)*v
A = self.area
c_x = self.centroid[0]
c_y = self.centroid[1]
# parallel axis theorem
I_xx_c = (I_xx/12) - (A*(c_y**2))
I_yy_c = (I_yy/12) - (A*(c_x**2))
I_xy_c = (I_xy/24) - (A*(c_x*c_y))
if point is None:
return I_xx_c, I_yy_c, I_xy_c
I_xx = (I_xx_c + A*((point[1]-c_y)**2))
I_yy = (I_yy_c + A*((point[0]-c_x)**2))
I_xy = (I_xy_c + A*((point[0]-c_x)*(point[1]-c_y)))
return I_xx, I_yy, I_xy
def first_moment_of_area(self, point=None):
"""
Returns the first moment of area of a two-dimensional polygon with
respect to a certain point of interest.
First moment of area is a measure of the distribution of the area
of a polygon in relation to an axis. The first moment of area of
the entire polygon about its own centroid is always zero. Therefore,
here it is calculated for an area, above or below a certain point
of interest, that makes up a smaller portion of the polygon. This
area is bounded by the point of interest and the extreme end
(top or bottom) of the polygon. The first moment for this area is
is then determined about the centroidal axis of the initial polygon.
References
==========
https://skyciv.com/docs/tutorials/section-tutorials/calculating-the-statical-or-first-moment-of-area-of-beam-sections/?cc=BMD
https://mechanicalc.com/reference/cross-sections
Parameters
==========
point: Point, two-tuple of sympifyable objects, or None (default=None)
point is the point above or below which the area of interest lies
If ``point=None`` then the centroid acts as the point of interest.
Returns
=======
Q_x, Q_y: number or sympy expressions
Q_x is the first moment of area about the x-axis
Q_y is the first moment of area about the y-axis
A negetive sign indicates that the section modulus is
determined for a section below (or left of) the centroidal axis
Examples
========
>>> from sympy import Point, Polygon
>>> a, b = 50, 10
>>> p1, p2, p3, p4 = [(0, b), (0, 0), (a, 0), (a, b)]
>>> p = Polygon(p1, p2, p3, p4)
>>> p.first_moment_of_area()
(625, 3125)
>>> p.first_moment_of_area(point=Point(30, 7))
(525, 3000)
"""
if point:
xc, yc = self.centroid
else:
point = self.centroid
xc, yc = point
h_line = Line(point, slope=0)
v_line = Line(point, slope=S.Infinity)
h_poly = self.cut_section(h_line)
v_poly = self.cut_section(v_line)
x_min, y_min, x_max, y_max = self.bounds
poly_1 = h_poly[0] if h_poly[0].area <= h_poly[1].area else h_poly[1]
poly_2 = v_poly[0] if v_poly[0].area <= v_poly[1].area else v_poly[1]
Q_x = (poly_1.centroid.y - yc)*poly_1.area
Q_y = (poly_2.centroid.x - xc)*poly_2.area
return Q_x, Q_y
def polar_second_moment_of_area(self):
"""Returns the polar modulus of a two-dimensional polygon
It is a constituent of the second moment of area, linked through
the perpendicular axis theorem. While the planar second moment of
area describes an object's resistance to deflection (bending) when
subjected to a force applied to a plane parallel to the central
axis, the polar second moment of area describes an object's
resistance to deflection when subjected to a moment applied in a
plane perpendicular to the object's central axis (i.e. parallel to
the cross-section)
References
==========
https://en.wikipedia.org/wiki/Polar_moment_of_inertia
Examples
========
>>> from sympy import Polygon, symbols
>>> a, b = symbols('a, b')
>>> rectangle = Polygon((0, 0), (a, 0), (a, b), (0, b))
>>> rectangle.polar_second_moment_of_area()
a**3*b/12 + a*b**3/12
"""
second_moment = self.second_moment_of_area()
return second_moment[0] + second_moment[1]
def section_modulus(self, point=None):
"""Returns a tuple with the section modulus of a two-dimensional
polygon.
Section modulus is a geometric property of a polygon defined as the
ratio of second moment of area to the distance of the extreme end of
the polygon from the centroidal axis.
References
==========
https://en.wikipedia.org/wiki/Section_modulus
Parameters
==========
point : Point, two-tuple of sympifyable objects, or None(default=None)
point is the point at which section modulus is to be found.
If "point=None" it will be calculated for the point farthest from the
centroidal axis of the polygon.
Returns
=======
S_x, S_y: numbers or SymPy expressions
S_x is the section modulus with respect to the x-axis
S_y is the section modulus with respect to the y-axis
A negetive sign indicates that the section modulus is
determined for a point below the centroidal axis
Examples
========
>>> from sympy import symbols, Polygon, Point
>>> a, b = symbols('a, b', positive=True)
>>> rectangle = Polygon((0, 0), (a, 0), (a, b), (0, b))
>>> rectangle.section_modulus()
(a*b**2/6, a**2*b/6)
>>> rectangle.section_modulus(Point(a/4, b/4))
(-a*b**2/3, -a**2*b/3)
"""
x_c, y_c = self.centroid
if point is None:
# taking x and y as maximum distances from centroid
x_min, y_min, x_max, y_max = self.bounds
y = max(y_c - y_min, y_max - y_c)
x = max(x_c - x_min, x_max - x_c)
else:
# taking x and y as distances of the given point from the centroid
y = point.y - y_c
x = point.x - x_c
second_moment= self.second_moment_of_area()
S_x = second_moment[0]/y
S_y = second_moment[1]/x
return S_x, S_y
@property
def sides(self):
"""The directed line segments that form the sides of the polygon.
Returns
=======
sides : list of sides
Each side is a directed Segment.
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Segment
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.sides
[Segment2D(Point2D(0, 0), Point2D(1, 0)),
Segment2D(Point2D(1, 0), Point2D(5, 1)),
Segment2D(Point2D(5, 1), Point2D(0, 1)), Segment2D(Point2D(0, 1), Point2D(0, 0))]
"""
res = []
args = self.vertices
for i in range(-len(args), 0):
res.append(Segment(args[i], args[i + 1]))
return res
@property
def bounds(self):
"""Return a tuple (xmin, ymin, xmax, ymax) representing the bounding
rectangle for the geometric figure.
"""
verts = self.vertices
xs = [p.x for p in verts]
ys = [p.y for p in verts]
return (min(xs), min(ys), max(xs), max(ys))
def is_convex(self):
"""Is the polygon convex?
A polygon is convex if all its interior angles are less than 180
degrees and there are no intersections between sides.
Returns
=======
is_convex : boolean
True if this polygon is convex, False otherwise.
See Also
========
sympy.geometry.util.convex_hull
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.is_convex()
True
"""
# Determine orientation of points
args = self.vertices
cw = self._isright(args[-2], args[-1], args[0])
for i in range(1, len(args)):
if cw ^ self._isright(args[i - 2], args[i - 1], args[i]):
return False
# check for intersecting sides
sides = self.sides
for i, si in enumerate(sides):
pts = si.args
# exclude the sides connected to si
for j in range(1 if i == len(sides) - 1 else 0, i - 1):
sj = sides[j]
if sj.p1 not in pts and sj.p2 not in pts:
hit = si.intersection(sj)
if hit:
return False
return True
def encloses_point(self, p):
"""
Return True if p is enclosed by (is inside of) self.
Notes
=====
Being on the border of self is considered False.
Parameters
==========
p : Point
Returns
=======
encloses_point : True, False or None
See Also
========
sympy.geometry.point.Point, sympy.geometry.ellipse.Ellipse.encloses_point
Examples
========
>>> from sympy import Polygon, Point
>>> from sympy.abc import t
>>> p = Polygon((0, 0), (4, 0), (4, 4))
>>> p.encloses_point(Point(2, 1))
True
>>> p.encloses_point(Point(2, 2))
False
>>> p.encloses_point(Point(5, 5))
False
References
==========
[1] http://paulbourke.net/geometry/polygonmesh/#insidepoly
"""
p = Point(p, dim=2)
if p in self.vertices or any(p in s for s in self.sides):
return False
# move to p, checking that the result is numeric
lit = []
for v in self.vertices:
lit.append(v - p) # the difference is simplified
if lit[-1].free_symbols:
return None
poly = Polygon(*lit)
# polygon closure is assumed in the following test but Polygon removes duplicate pts so
# the last point has to be added so all sides are computed. Using Polygon.sides is
# not good since Segments are unordered.
args = poly.args
indices = list(range(-len(args), 1))
if poly.is_convex():
orientation = None
for i in indices:
a = args[i]
b = args[i + 1]
test = ((-a.y)*(b.x - a.x) - (-a.x)*(b.y - a.y)).is_negative
if orientation is None:
orientation = test
elif test is not orientation:
return False
return True
hit_odd = False
p1x, p1y = args[0].args
for i in indices[1:]:
p2x, p2y = args[i].args
if 0 > min(p1y, p2y):
if 0 <= max(p1y, p2y):
if 0 <= max(p1x, p2x):
if p1y != p2y:
xinters = (-p1y)*(p2x - p1x)/(p2y - p1y) + p1x
if p1x == p2x or 0 <= xinters:
hit_odd = not hit_odd
p1x, p1y = p2x, p2y
return hit_odd
def arbitrary_point(self, parameter='t'):
"""A parameterized point on the polygon.
The parameter, varying from 0 to 1, assigns points to the position on
the perimeter that is that fraction of the total perimeter. So the
point evaluated at t=1/2 would return the point from the first vertex
that is 1/2 way around the polygon.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
arbitrary_point : Point
Raises
======
ValueError
When `parameter` already appears in the Polygon's definition.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Polygon, S, Symbol
>>> t = Symbol('t', real=True)
>>> tri = Polygon((0, 0), (1, 0), (1, 1))
>>> p = tri.arbitrary_point('t')
>>> perimeter = tri.perimeter
>>> s1, s2 = [s.length for s in tri.sides[:2]]
>>> p.subs(t, (s1 + s2/2)/perimeter)
Point2D(1, 1/2)
"""
t = _symbol(parameter, real=True)
if t.name in (f.name for f in self.free_symbols):
raise ValueError('Symbol %s already appears in object and cannot be used as a parameter.' % t.name)
sides = []
perimeter = self.perimeter
perim_fraction_start = 0
for s in self.sides:
side_perim_fraction = s.length/perimeter
perim_fraction_end = perim_fraction_start + side_perim_fraction
pt = s.arbitrary_point(parameter).subs(
t, (t - perim_fraction_start)/side_perim_fraction)
sides.append(
(pt, (And(perim_fraction_start <= t, t < perim_fraction_end))))
perim_fraction_start = perim_fraction_end
return Piecewise(*sides)
def parameter_value(self, other, t):
from sympy.solvers.solvers import solve
if not isinstance(other,GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if not isinstance(other,Point):
raise ValueError("other must be a point")
if other.free_symbols:
raise NotImplementedError('non-numeric coordinates')
unknown = False
T = Dummy('t', real=True)
p = self.arbitrary_point(T)
for pt, cond in p.args:
sol = solve(pt - other, T, dict=True)
if not sol:
continue
value = sol[0][T]
if simplify(cond.subs(T, value)) == True:
return {t: value}
unknown = True
if unknown:
raise ValueError("Given point may not be on %s" % func_name(self))
raise ValueError("Given point is not on %s" % func_name(self))
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of the polygon.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list (plot interval)
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Polygon
>>> p = Polygon((0, 0), (1, 0), (1, 1))
>>> p.plot_interval()
[t, 0, 1]
"""
t = Symbol(parameter, real=True)
return [t, 0, 1]
def intersection(self, o):
"""The intersection of polygon and geometry entity.
The intersection may be empty and can contain individual Points and
complete Line Segments.
Parameters
==========
other: GeometryEntity
Returns
=======
intersection : list
The list of Segments and Points
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Segment
Examples
========
>>> from sympy import Point, Polygon, Line
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly1 = Polygon(p1, p2, p3, p4)
>>> p5, p6, p7 = map(Point, [(3, 2), (1, -1), (0, 2)])
>>> poly2 = Polygon(p5, p6, p7)
>>> poly1.intersection(poly2)
[Point2D(1/3, 1), Point2D(2/3, 0), Point2D(9/5, 1/5), Point2D(7/3, 1)]
>>> poly1.intersection(Line(p1, p2))
[Segment2D(Point2D(0, 0), Point2D(1, 0))]
>>> poly1.intersection(p1)
[Point2D(0, 0)]
"""
intersection_result = []
k = o.sides if isinstance(o, Polygon) else [o]
for side in self.sides:
for side1 in k:
intersection_result.extend(side.intersection(side1))
intersection_result = list(uniq(intersection_result))
points = [entity for entity in intersection_result if isinstance(entity, Point)]
segments = [entity for entity in intersection_result if isinstance(entity, Segment)]
if points and segments:
points_in_segments = list(uniq([point for point in points for segment in segments if point in segment]))
if points_in_segments:
for i in points_in_segments:
points.remove(i)
return list(ordered(segments + points))
else:
return list(ordered(intersection_result))
def cut_section(self, line):
"""
Returns a tuple of two polygon segments that lie above and below
the intersecting line respectively.
Parameters
==========
line: Line object of geometry module
line which cuts the Polygon. The part of the Polygon that lies
above and below this line is returned.
Returns
=======
upper_polygon, lower_polygon: Polygon objects or None
upper_polygon is the polygon that lies above the given line.
lower_polygon is the polygon that lies below the given line.
upper_polygon and lower polygon are ``None`` when no polygon
exists above the line or below the line.
Raises
======
ValueError: When the line does not intersect the polygon
References
==========
https://github.com/sympy/sympy/wiki/A-method-to-return-a-cut-section-of-any-polygon-geometry
Examples
========
>>> from sympy import Point, Symbol, Polygon, Line
>>> a, b = 20, 10
>>> p1, p2, p3, p4 = [(0, b), (0, 0), (a, 0), (a, b)]
>>> rectangle = Polygon(p1, p2, p3, p4)
>>> t = rectangle.cut_section(Line((0, 5), slope=0))
>>> t
(Polygon(Point2D(0, 10), Point2D(0, 5), Point2D(20, 5), Point2D(20, 10)),
Polygon(Point2D(0, 5), Point2D(0, 0), Point2D(20, 0), Point2D(20, 5)))
>>> upper_segment, lower_segment = t
>>> upper_segment.area
100
>>> upper_segment.centroid
Point2D(10, 15/2)
>>> lower_segment.centroid
Point2D(10, 5/2)
"""
intersection_points = self.intersection(line)
if not intersection_points:
raise ValueError("This line does not intersect the polygon")
points = list(self.vertices)
points.append(points[0])
x, y = symbols('x, y', real=True, cls=Dummy)
eq = line.equation(x, y)
# considering equation of line to be `ax +by + c`
a = eq.coeff(x)
b = eq.coeff(y)
upper_vertices = []
lower_vertices = []
# prev is true when previous point is above the line
prev = True
prev_point = None
for point in points:
# when coefficient of y is 0, right side of the line is
# considered
compare = eq.subs({x: point.x, y: point.y})/b if b \
else eq.subs(x, point.x)/a
# if point lies above line
if compare > 0:
if not prev:
# if previous point lies below the line, the intersection
# point of the polygon egde and the line has to be included
edge = Line(point, prev_point)
new_point = edge.intersection(line)
upper_vertices.append(new_point[0])
lower_vertices.append(new_point[0])
upper_vertices.append(point)
prev = True
else:
if prev and prev_point:
edge = Line(point, prev_point)
new_point = edge.intersection(line)
upper_vertices.append(new_point[0])
lower_vertices.append(new_point[0])
lower_vertices.append(point)
prev = False
prev_point = point
upper_polygon, lower_polygon = None, None
if upper_vertices and isinstance(Polygon(*upper_vertices), Polygon):
upper_polygon = Polygon(*upper_vertices)
if lower_vertices and isinstance(Polygon(*lower_vertices), Polygon):
lower_polygon = Polygon(*lower_vertices)
return upper_polygon, lower_polygon
def distance(self, o):
"""
Returns the shortest distance between self and o.
If o is a point, then self does not need to be convex.
If o is another polygon self and o must be convex.
Examples
========
>>> from sympy import Point, Polygon, RegularPolygon
>>> p1, p2 = map(Point, [(0, 0), (7, 5)])
>>> poly = Polygon(*RegularPolygon(p1, 1, 3).vertices)
>>> poly.distance(p2)
sqrt(61)
"""
if isinstance(o, Point):
dist = oo
for side in self.sides:
current = side.distance(o)
if current == 0:
return S.Zero
elif current < dist:
dist = current
return dist
elif isinstance(o, Polygon) and self.is_convex() and o.is_convex():
return self._do_poly_distance(o)
raise NotImplementedError()
def _do_poly_distance(self, e2):
"""
Calculates the least distance between the exteriors of two
convex polygons e1 and e2. Does not check for the convexity
of the polygons as this is checked by Polygon.distance.
Notes
=====
- Prints a warning if the two polygons possibly intersect as the return
value will not be valid in such a case. For a more through test of
intersection use intersection().
See Also
========
sympy.geometry.point.Point.distance
Examples
========
>>> from sympy.geometry import Point, Polygon
>>> square = Polygon(Point(0, 0), Point(0, 1), Point(1, 1), Point(1, 0))
>>> triangle = Polygon(Point(1, 2), Point(2, 2), Point(2, 1))
>>> square._do_poly_distance(triangle)
sqrt(2)/2
Description of method used
==========================
Method:
[1] http://cgm.cs.mcgill.ca/~orm/mind2p.html
Uses rotating calipers:
[2] https://en.wikipedia.org/wiki/Rotating_calipers
and antipodal points:
[3] https://en.wikipedia.org/wiki/Antipodal_point
"""
e1 = self
'''Tests for a possible intersection between the polygons and outputs a warning'''
e1_center = e1.centroid
e2_center = e2.centroid
e1_max_radius = S.Zero
e2_max_radius = S.Zero
for vertex in e1.vertices:
r = Point.distance(e1_center, vertex)
if e1_max_radius < r:
e1_max_radius = r
for vertex in e2.vertices:
r = Point.distance(e2_center, vertex)
if e2_max_radius < r:
e2_max_radius = r
center_dist = Point.distance(e1_center, e2_center)
if center_dist <= e1_max_radius + e2_max_radius:
warnings.warn("Polygons may intersect producing erroneous output")
'''
Find the upper rightmost vertex of e1 and the lowest leftmost vertex of e2
'''
e1_ymax = Point(0, -oo)
e2_ymin = Point(0, oo)
for vertex in e1.vertices:
if vertex.y > e1_ymax.y or (vertex.y == e1_ymax.y and vertex.x > e1_ymax.x):
e1_ymax = vertex
for vertex in e2.vertices:
if vertex.y < e2_ymin.y or (vertex.y == e2_ymin.y and vertex.x < e2_ymin.x):
e2_ymin = vertex
min_dist = Point.distance(e1_ymax, e2_ymin)
'''
Produce a dictionary with vertices of e1 as the keys and, for each vertex, the points
to which the vertex is connected as its value. The same is then done for e2.
'''
e1_connections = {}
e2_connections = {}
for side in e1.sides:
if side.p1 in e1_connections:
e1_connections[side.p1].append(side.p2)
else:
e1_connections[side.p1] = [side.p2]
if side.p2 in e1_connections:
e1_connections[side.p2].append(side.p1)
else:
e1_connections[side.p2] = [side.p1]
for side in e2.sides:
if side.p1 in e2_connections:
e2_connections[side.p1].append(side.p2)
else:
e2_connections[side.p1] = [side.p2]
if side.p2 in e2_connections:
e2_connections[side.p2].append(side.p1)
else:
e2_connections[side.p2] = [side.p1]
e1_current = e1_ymax
e2_current = e2_ymin
support_line = Line(Point(S.Zero, S.Zero), Point(S.One, S.Zero))
'''
Determine which point in e1 and e2 will be selected after e2_ymin and e1_ymax,
this information combined with the above produced dictionaries determines the
path that will be taken around the polygons
'''
point1 = e1_connections[e1_ymax][0]
point2 = e1_connections[e1_ymax][1]
angle1 = support_line.angle_between(Line(e1_ymax, point1))
angle2 = support_line.angle_between(Line(e1_ymax, point2))
if angle1 < angle2:
e1_next = point1
elif angle2 < angle1:
e1_next = point2
elif Point.distance(e1_ymax, point1) > Point.distance(e1_ymax, point2):
e1_next = point2
else:
e1_next = point1
point1 = e2_connections[e2_ymin][0]
point2 = e2_connections[e2_ymin][1]
angle1 = support_line.angle_between(Line(e2_ymin, point1))
angle2 = support_line.angle_between(Line(e2_ymin, point2))
if angle1 > angle2:
e2_next = point1
elif angle2 > angle1:
e2_next = point2
elif Point.distance(e2_ymin, point1) > Point.distance(e2_ymin, point2):
e2_next = point2
else:
e2_next = point1
'''
Loop which determines the distance between anti-podal pairs and updates the
minimum distance accordingly. It repeats until it reaches the starting position.
'''
while True:
e1_angle = support_line.angle_between(Line(e1_current, e1_next))
e2_angle = pi - support_line.angle_between(Line(
e2_current, e2_next))
if (e1_angle < e2_angle) is True:
support_line = Line(e1_current, e1_next)
e1_segment = Segment(e1_current, e1_next)
min_dist_current = e1_segment.distance(e2_current)
if min_dist_current.evalf() < min_dist.evalf():
min_dist = min_dist_current
if e1_connections[e1_next][0] != e1_current:
e1_current = e1_next
e1_next = e1_connections[e1_next][0]
else:
e1_current = e1_next
e1_next = e1_connections[e1_next][1]
elif (e1_angle > e2_angle) is True:
support_line = Line(e2_next, e2_current)
e2_segment = Segment(e2_current, e2_next)
min_dist_current = e2_segment.distance(e1_current)
if min_dist_current.evalf() < min_dist.evalf():
min_dist = min_dist_current
if e2_connections[e2_next][0] != e2_current:
e2_current = e2_next
e2_next = e2_connections[e2_next][0]
else:
e2_current = e2_next
e2_next = e2_connections[e2_next][1]
else:
support_line = Line(e1_current, e1_next)
e1_segment = Segment(e1_current, e1_next)
e2_segment = Segment(e2_current, e2_next)
min1 = e1_segment.distance(e2_next)
min2 = e2_segment.distance(e1_next)
min_dist_current = min(min1, min2)
if min_dist_current.evalf() < min_dist.evalf():
min_dist = min_dist_current
if e1_connections[e1_next][0] != e1_current:
e1_current = e1_next
e1_next = e1_connections[e1_next][0]
else:
e1_current = e1_next
e1_next = e1_connections[e1_next][1]
if e2_connections[e2_next][0] != e2_current:
e2_current = e2_next
e2_next = e2_connections[e2_next][0]
else:
e2_current = e2_next
e2_next = e2_connections[e2_next][1]
if e1_current == e1_ymax and e2_current == e2_ymin:
break
return min_dist
def _svg(self, scale_factor=1., fill_color="#66cc99"):
"""Returns SVG path element for the Polygon.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
fill_color : str, optional
Hex string for fill color. Default is "#66cc99".
"""
from sympy.core.evalf import N
verts = map(N, self.vertices)
coords = ["{0},{1}".format(p.x, p.y) for p in verts]
path = "M {0} L {1} z".format(coords[0], " L ".join(coords[1:]))
return (
'<path fill-rule="evenodd" fill="{2}" stroke="#555555" '
'stroke-width="{0}" opacity="0.6" d="{1}" />'
).format(2. * scale_factor, path, fill_color)
def _hashable_content(self):
D = {}
def ref_list(point_list):
kee = {}
for i, p in enumerate(ordered(set(point_list))):
kee[p] = i
D[i] = p
return [kee[p] for p in point_list]
S1 = ref_list(self.args)
r_nor = rotate_left(S1, least_rotation(S1))
S2 = ref_list(list(reversed(self.args)))
r_rev = rotate_left(S2, least_rotation(S2))
if r_nor < r_rev:
r = r_nor
else:
r = r_rev
canonical_args = [ D[order] for order in r ]
return tuple(canonical_args)
def __contains__(self, o):
"""
Return True if o is contained within the boundary lines of self.altitudes
Parameters
==========
other : GeometryEntity
Returns
=======
contained in : bool
The points (and sides, if applicable) are contained in self.
See Also
========
sympy.geometry.entity.GeometryEntity.encloses
Examples
========
>>> from sympy import Line, Segment, Point
>>> p = Point(0, 0)
>>> q = Point(1, 1)
>>> s = Segment(p, q*2)
>>> l = Line(p, q)
>>> p in q
False
>>> p in s
True
>>> q*3 in s
False
>>> s in l
True
"""
if isinstance(o, Polygon):
return self == o
elif isinstance(o, Segment):
return any(o in s for s in self.sides)
elif isinstance(o, Point):
if o in self.vertices:
return True
for side in self.sides:
if o in side:
return True
return False
def bisectors(p, prec=None):
"""Returns angle bisectors of a polygon. If prec is given
then approximate the point defining the ray to that precision.
The distance between the points defining the bisector ray is 1.
Examples
========
>>> from sympy import Polygon, Point
>>> p = Polygon(Point(0, 0), Point(2, 0), Point(1, 1), Point(0, 3))
>>> p.bisectors(2)
{Point2D(0, 0): Ray2D(Point2D(0, 0), Point2D(0.71, 0.71)),
Point2D(0, 3): Ray2D(Point2D(0, 3), Point2D(0.23, 2.0)),
Point2D(1, 1): Ray2D(Point2D(1, 1), Point2D(0.19, 0.42)),
Point2D(2, 0): Ray2D(Point2D(2, 0), Point2D(1.1, 0.38))}
"""
b = {}
pts = list(p.args)
pts.append(pts[0]) # close it
cw = Polygon._isright(*pts[:3])
if cw:
pts = list(reversed(pts))
for v, a in p.angles.items():
i = pts.index(v)
p1, p2 = Point._normalize_dimension(pts[i], pts[i + 1])
ray = Ray(p1, p2).rotate(a/2, v)
dir = ray.direction
ray = Ray(ray.p1, ray.p1 + dir/dir.distance((0, 0)))
if prec is not None:
ray = Ray(ray.p1, ray.p2.n(prec))
b[v] = ray
return b
class RegularPolygon(Polygon):
"""
A regular polygon.
Such a polygon has all internal angles equal and all sides the same length.
Parameters
==========
center : Point
radius : number or Basic instance
The distance from the center to a vertex
n : int
The number of sides
Attributes
==========
vertices
center
radius
rotation
apothem
interior_angle
exterior_angle
circumcircle
incircle
angles
Raises
======
GeometryError
If the `center` is not a Point, or the `radius` is not a number or Basic
instance, or the number of sides, `n`, is less than three.
Notes
=====
A RegularPolygon can be instantiated with Polygon with the kwarg n.
Regular polygons are instantiated with a center, radius, number of sides
and a rotation angle. Whereas the arguments of a Polygon are vertices, the
vertices of the RegularPolygon must be obtained with the vertices method.
See Also
========
sympy.geometry.point.Point, Polygon
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> r = RegularPolygon(Point(0, 0), 5, 3)
>>> r
RegularPolygon(Point2D(0, 0), 5, 3, 0)
>>> r.vertices[0]
Point2D(5, 0)
"""
__slots__ = ('_n', '_center', '_radius', '_rot')
def __new__(self, c, r, n, rot=0, **kwargs):
r, n, rot = map(sympify, (r, n, rot))
c = Point(c, dim=2, **kwargs)
if not isinstance(r, Expr):
raise GeometryError("r must be an Expr object, not %s" % r)
if n.is_Number:
as_int(n) # let an error raise if necessary
if n < 3:
raise GeometryError("n must be a >= 3, not %s" % n)
obj = GeometryEntity.__new__(self, c, r, n, **kwargs)
obj._n = n
obj._center = c
obj._radius = r
obj._rot = rot % (2*S.Pi/n) if rot.is_number else rot
return obj
@property
def args(self):
"""
Returns the center point, the radius,
the number of sides, and the orientation angle.
Examples
========
>>> from sympy import RegularPolygon, Point
>>> r = RegularPolygon(Point(0, 0), 5, 3)
>>> r.args
(Point2D(0, 0), 5, 3, 0)
"""
return self._center, self._radius, self._n, self._rot
def __str__(self):
return 'RegularPolygon(%s, %s, %s, %s)' % tuple(self.args)
def __repr__(self):
return 'RegularPolygon(%s, %s, %s, %s)' % tuple(self.args)
@property
def area(self):
"""Returns the area.
Examples
========
>>> from sympy.geometry import RegularPolygon
>>> square = RegularPolygon((0, 0), 1, 4)
>>> square.area
2
>>> _ == square.length**2
True
"""
c, r, n, rot = self.args
return sign(r)*n*self.length**2/(4*tan(pi/n))
@property
def length(self):
"""Returns the length of the sides.
The half-length of the side and the apothem form two legs
of a right triangle whose hypotenuse is the radius of the
regular polygon.
Examples
========
>>> from sympy.geometry import RegularPolygon
>>> from sympy import sqrt
>>> s = square_in_unit_circle = RegularPolygon((0, 0), 1, 4)
>>> s.length
sqrt(2)
>>> sqrt((_/2)**2 + s.apothem**2) == s.radius
True
"""
return self.radius*2*sin(pi/self._n)
@property
def center(self):
"""The center of the RegularPolygon
This is also the center of the circumscribing circle.
Returns
=======
center : Point
See Also
========
sympy.geometry.point.Point, sympy.geometry.ellipse.Ellipse.center
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 5, 4)
>>> rp.center
Point2D(0, 0)
"""
return self._center
centroid = center
@property
def circumcenter(self):
"""
Alias for center.
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 5, 4)
>>> rp.circumcenter
Point2D(0, 0)
"""
return self.center
@property
def radius(self):
"""Radius of the RegularPolygon
This is also the radius of the circumscribing circle.
Returns
=======
radius : number or instance of Basic
See Also
========
sympy.geometry.line.Segment.length, sympy.geometry.ellipse.Circle.radius
Examples
========
>>> from sympy import Symbol
>>> from sympy.geometry import RegularPolygon, Point
>>> radius = Symbol('r')
>>> rp = RegularPolygon(Point(0, 0), radius, 4)
>>> rp.radius
r
"""
return self._radius
@property
def circumradius(self):
"""
Alias for radius.
Examples
========
>>> from sympy import Symbol
>>> from sympy.geometry import RegularPolygon, Point
>>> radius = Symbol('r')
>>> rp = RegularPolygon(Point(0, 0), radius, 4)
>>> rp.circumradius
r
"""
return self.radius
@property
def rotation(self):
"""CCW angle by which the RegularPolygon is rotated
Returns
=======
rotation : number or instance of Basic
Examples
========
>>> from sympy import pi
>>> from sympy.abc import a
>>> from sympy.geometry import RegularPolygon, Point
>>> RegularPolygon(Point(0, 0), 3, 4, pi/4).rotation
pi/4
Numerical rotation angles are made canonical:
>>> RegularPolygon(Point(0, 0), 3, 4, a).rotation
a
>>> RegularPolygon(Point(0, 0), 3, 4, pi).rotation
0
"""
return self._rot
@property
def apothem(self):
"""The inradius of the RegularPolygon.
The apothem/inradius is the radius of the inscribed circle.
Returns
=======
apothem : number or instance of Basic
See Also
========
sympy.geometry.line.Segment.length, sympy.geometry.ellipse.Circle.radius
Examples
========
>>> from sympy import Symbol
>>> from sympy.geometry import RegularPolygon, Point
>>> radius = Symbol('r')
>>> rp = RegularPolygon(Point(0, 0), radius, 4)
>>> rp.apothem
sqrt(2)*r/2
"""
return self.radius * cos(S.Pi/self._n)
@property
def inradius(self):
"""
Alias for apothem.
Examples
========
>>> from sympy import Symbol
>>> from sympy.geometry import RegularPolygon, Point
>>> radius = Symbol('r')
>>> rp = RegularPolygon(Point(0, 0), radius, 4)
>>> rp.inradius
sqrt(2)*r/2
"""
return self.apothem
@property
def interior_angle(self):
"""Measure of the interior angles.
Returns
=======
interior_angle : number
See Also
========
sympy.geometry.line.LinearEntity.angle_between
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 4, 8)
>>> rp.interior_angle
3*pi/4
"""
return (self._n - 2)*S.Pi/self._n
@property
def exterior_angle(self):
"""Measure of the exterior angles.
Returns
=======
exterior_angle : number
See Also
========
sympy.geometry.line.LinearEntity.angle_between
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 4, 8)
>>> rp.exterior_angle
pi/4
"""
return 2*S.Pi/self._n
@property
def circumcircle(self):
"""The circumcircle of the RegularPolygon.
Returns
=======
circumcircle : Circle
See Also
========
circumcenter, sympy.geometry.ellipse.Circle
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 4, 8)
>>> rp.circumcircle
Circle(Point2D(0, 0), 4)
"""
return Circle(self.center, self.radius)
@property
def incircle(self):
"""The incircle of the RegularPolygon.
Returns
=======
incircle : Circle
See Also
========
inradius, sympy.geometry.ellipse.Circle
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 4, 7)
>>> rp.incircle
Circle(Point2D(0, 0), 4*cos(pi/7))
"""
return Circle(self.center, self.apothem)
@property
def angles(self):
"""
Returns a dictionary with keys, the vertices of the Polygon,
and values, the interior angle at each vertex.
Examples
========
>>> from sympy import RegularPolygon, Point
>>> r = RegularPolygon(Point(0, 0), 5, 3)
>>> r.angles
{Point2D(-5/2, -5*sqrt(3)/2): pi/3,
Point2D(-5/2, 5*sqrt(3)/2): pi/3,
Point2D(5, 0): pi/3}
"""
ret = {}
ang = self.interior_angle
for v in self.vertices:
ret[v] = ang
return ret
def encloses_point(self, p):
"""
Return True if p is enclosed by (is inside of) self.
Notes
=====
Being on the border of self is considered False.
The general Polygon.encloses_point method is called only if
a point is not within or beyond the incircle or circumcircle,
respectively.
Parameters
==========
p : Point
Returns
=======
encloses_point : True, False or None
See Also
========
sympy.geometry.ellipse.Ellipse.encloses_point
Examples
========
>>> from sympy import RegularPolygon, S, Point, Symbol
>>> p = RegularPolygon((0, 0), 3, 4)
>>> p.encloses_point(Point(0, 0))
True
>>> r, R = p.inradius, p.circumradius
>>> p.encloses_point(Point((r + R)/2, 0))
True
>>> p.encloses_point(Point(R/2, R/2 + (R - r)/10))
False
>>> t = Symbol('t', real=True)
>>> p.encloses_point(p.arbitrary_point().subs(t, S.Half))
False
>>> p.encloses_point(Point(5, 5))
False
"""
c = self.center
d = Segment(c, p).length
if d >= self.radius:
return False
elif d < self.inradius:
return True
else:
# now enumerate the RegularPolygon like a general polygon.
return Polygon.encloses_point(self, p)
def spin(self, angle):
"""Increment *in place* the virtual Polygon's rotation by ccw angle.
See also: rotate method which moves the center.
>>> from sympy import Polygon, Point, pi
>>> r = Polygon(Point(0,0), 1, n=3)
>>> r.vertices[0]
Point2D(1, 0)
>>> r.spin(pi/6)
>>> r.vertices[0]
Point2D(sqrt(3)/2, 1/2)
See Also
========
rotation
rotate : Creates a copy of the RegularPolygon rotated about a Point
"""
self._rot += angle
def rotate(self, angle, pt=None):
"""Override GeometryEntity.rotate to first rotate the RegularPolygon
about its center.
>>> from sympy import Point, RegularPolygon, Polygon, pi
>>> t = RegularPolygon(Point(1, 0), 1, 3)
>>> t.vertices[0] # vertex on x-axis
Point2D(2, 0)
>>> t.rotate(pi/2).vertices[0] # vertex on y axis now
Point2D(0, 2)
See Also
========
rotation
spin : Rotates a RegularPolygon in place
"""
r = type(self)(*self.args) # need a copy or else changes are in-place
r._rot += angle
return GeometryEntity.rotate(r, angle, pt)
def scale(self, x=1, y=1, pt=None):
"""Override GeometryEntity.scale since it is the radius that must be
scaled (if x == y) or else a new Polygon must be returned.
>>> from sympy import RegularPolygon
Symmetric scaling returns a RegularPolygon:
>>> RegularPolygon((0, 0), 1, 4).scale(2, 2)
RegularPolygon(Point2D(0, 0), 2, 4, 0)
Asymmetric scaling returns a kite as a Polygon:
>>> RegularPolygon((0, 0), 1, 4).scale(2, 1)
Polygon(Point2D(2, 0), Point2D(0, 1), Point2D(-2, 0), Point2D(0, -1))
"""
if pt:
pt = Point(pt, dim=2)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
if x != y:
return Polygon(*self.vertices).scale(x, y)
c, r, n, rot = self.args
r *= x
return self.func(c, r, n, rot)
def reflect(self, line):
"""Override GeometryEntity.reflect since this is not made of only
points.
Examples
========
>>> from sympy import RegularPolygon, Line
>>> RegularPolygon((0, 0), 1, 4).reflect(Line((0, 1), slope=-2))
RegularPolygon(Point2D(4/5, 2/5), -1, 4, atan(4/3))
"""
c, r, n, rot = self.args
v = self.vertices[0]
d = v - c
cc = c.reflect(line)
vv = v.reflect(line)
dd = vv - cc
# calculate rotation about the new center
# which will align the vertices
l1 = Ray((0, 0), dd)
l2 = Ray((0, 0), d)
ang = l1.closing_angle(l2)
rot += ang
# change sign of radius as point traversal is reversed
return self.func(cc, -r, n, rot)
@property
def vertices(self):
"""The vertices of the RegularPolygon.
Returns
=======
vertices : list
Each vertex is a Point.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 5, 4)
>>> rp.vertices
[Point2D(5, 0), Point2D(0, 5), Point2D(-5, 0), Point2D(0, -5)]
"""
c = self._center
r = abs(self._radius)
rot = self._rot
v = 2*S.Pi/self._n
return [Point(c.x + r*cos(k*v + rot), c.y + r*sin(k*v + rot))
for k in range(self._n)]
def __eq__(self, o):
if not isinstance(o, Polygon):
return False
elif not isinstance(o, RegularPolygon):
return Polygon.__eq__(o, self)
return self.args == o.args
def __hash__(self):
return super(RegularPolygon, self).__hash__()
class Triangle(Polygon):
"""
A polygon with three vertices and three sides.
Parameters
==========
points : sequence of Points
keyword: asa, sas, or sss to specify sides/angles of the triangle
Attributes
==========
vertices
altitudes
orthocenter
circumcenter
circumradius
circumcircle
inradius
incircle
exradii
medians
medial
nine_point_circle
Raises
======
GeometryError
If the number of vertices is not equal to three, or one of the vertices
is not a Point, or a valid keyword is not given.
See Also
========
sympy.geometry.point.Point, Polygon
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> Triangle(Point(0, 0), Point(4, 0), Point(4, 3))
Triangle(Point2D(0, 0), Point2D(4, 0), Point2D(4, 3))
Keywords sss, sas, or asa can be used to give the desired
side lengths (in order) and interior angles (in degrees) that
define the triangle:
>>> Triangle(sss=(3, 4, 5))
Triangle(Point2D(0, 0), Point2D(3, 0), Point2D(3, 4))
>>> Triangle(asa=(30, 1, 30))
Triangle(Point2D(0, 0), Point2D(1, 0), Point2D(1/2, sqrt(3)/6))
>>> Triangle(sas=(1, 45, 2))
Triangle(Point2D(0, 0), Point2D(2, 0), Point2D(sqrt(2)/2, sqrt(2)/2))
"""
def __new__(cls, *args, **kwargs):
if len(args) != 3:
if 'sss' in kwargs:
return _sss(*[simplify(a) for a in kwargs['sss']])
if 'asa' in kwargs:
return _asa(*[simplify(a) for a in kwargs['asa']])
if 'sas' in kwargs:
return _sas(*[simplify(a) for a in kwargs['sas']])
msg = "Triangle instantiates with three points or a valid keyword."
raise GeometryError(msg)
vertices = [Point(a, dim=2, **kwargs) for a in args]
# remove consecutive duplicates
nodup = []
for p in vertices:
if nodup and p == nodup[-1]:
continue
nodup.append(p)
if len(nodup) > 1 and nodup[-1] == nodup[0]:
nodup.pop() # last point was same as first
# remove collinear points
i = -3
while i < len(nodup) - 3 and len(nodup) > 2:
a, b, c = sorted(
[nodup[i], nodup[i + 1], nodup[i + 2]], key=default_sort_key)
if Point.is_collinear(a, b, c):
nodup[i] = a
nodup[i + 1] = None
nodup.pop(i + 1)
i += 1
vertices = list(filter(lambda x: x is not None, nodup))
if len(vertices) == 3:
return GeometryEntity.__new__(cls, *vertices, **kwargs)
elif len(vertices) == 2:
return Segment(*vertices, **kwargs)
else:
return Point(*vertices, **kwargs)
@property
def vertices(self):
"""The triangle's vertices
Returns
=======
vertices : tuple
Each element in the tuple is a Point
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> t = Triangle(Point(0, 0), Point(4, 0), Point(4, 3))
>>> t.vertices
(Point2D(0, 0), Point2D(4, 0), Point2D(4, 3))
"""
return self.args
def is_similar(t1, t2):
"""Is another triangle similar to this one.
Two triangles are similar if one can be uniformly scaled to the other.
Parameters
==========
other: Triangle
Returns
=======
is_similar : boolean
See Also
========
sympy.geometry.entity.GeometryEntity.is_similar
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> t1 = Triangle(Point(0, 0), Point(4, 0), Point(4, 3))
>>> t2 = Triangle(Point(0, 0), Point(-4, 0), Point(-4, -3))
>>> t1.is_similar(t2)
True
>>> t2 = Triangle(Point(0, 0), Point(-4, 0), Point(-4, -4))
>>> t1.is_similar(t2)
False
"""
if not isinstance(t2, Polygon):
return False
s1_1, s1_2, s1_3 = [side.length for side in t1.sides]
s2 = [side.length for side in t2.sides]
def _are_similar(u1, u2, u3, v1, v2, v3):
e1 = simplify(u1/v1)
e2 = simplify(u2/v2)
e3 = simplify(u3/v3)
return bool(e1 == e2) and bool(e2 == e3)
# There's only 6 permutations, so write them out
return _are_similar(s1_1, s1_2, s1_3, *s2) or \
_are_similar(s1_1, s1_3, s1_2, *s2) or \
_are_similar(s1_2, s1_1, s1_3, *s2) or \
_are_similar(s1_2, s1_3, s1_1, *s2) or \
_are_similar(s1_3, s1_1, s1_2, *s2) or \
_are_similar(s1_3, s1_2, s1_1, *s2)
def is_equilateral(self):
"""Are all the sides the same length?
Returns
=======
is_equilateral : boolean
See Also
========
sympy.geometry.entity.GeometryEntity.is_similar, RegularPolygon
is_isosceles, is_right, is_scalene
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> t1 = Triangle(Point(0, 0), Point(4, 0), Point(4, 3))
>>> t1.is_equilateral()
False
>>> from sympy import sqrt
>>> t2 = Triangle(Point(0, 0), Point(10, 0), Point(5, 5*sqrt(3)))
>>> t2.is_equilateral()
True
"""
return not has_variety(s.length for s in self.sides)
def is_isosceles(self):
"""Are two or more of the sides the same length?
Returns
=======
is_isosceles : boolean
See Also
========
is_equilateral, is_right, is_scalene
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> t1 = Triangle(Point(0, 0), Point(4, 0), Point(2, 4))
>>> t1.is_isosceles()
True
"""
return has_dups(s.length for s in self.sides)
def is_scalene(self):
"""Are all the sides of the triangle of different lengths?
Returns
=======
is_scalene : boolean
See Also
========
is_equilateral, is_isosceles, is_right
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> t1 = Triangle(Point(0, 0), Point(4, 0), Point(1, 4))
>>> t1.is_scalene()
True
"""
return not has_dups(s.length for s in self.sides)
def is_right(self):
"""Is the triangle right-angled.
Returns
=======
is_right : boolean
See Also
========
sympy.geometry.line.LinearEntity.is_perpendicular
is_equilateral, is_isosceles, is_scalene
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> t1 = Triangle(Point(0, 0), Point(4, 0), Point(4, 3))
>>> t1.is_right()
True
"""
s = self.sides
return Segment.is_perpendicular(s[0], s[1]) or \
Segment.is_perpendicular(s[1], s[2]) or \
Segment.is_perpendicular(s[0], s[2])
@property
def altitudes(self):
"""The altitudes of the triangle.
An altitude of a triangle is a segment through a vertex,
perpendicular to the opposite side, with length being the
height of the vertex measured from the line containing the side.
Returns
=======
altitudes : dict
The dictionary consists of keys which are vertices and values
which are Segments.
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Segment.length
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.altitudes[p1]
Segment2D(Point2D(0, 0), Point2D(1/2, 1/2))
"""
s = self.sides
v = self.vertices
return {v[0]: s[1].perpendicular_segment(v[0]),
v[1]: s[2].perpendicular_segment(v[1]),
v[2]: s[0].perpendicular_segment(v[2])}
@property
def orthocenter(self):
"""The orthocenter of the triangle.
The orthocenter is the intersection of the altitudes of a triangle.
It may lie inside, outside or on the triangle.
Returns
=======
orthocenter : Point
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.orthocenter
Point2D(0, 0)
"""
a = self.altitudes
v = self.vertices
return Line(a[v[0]]).intersection(Line(a[v[1]]))[0]
@property
def circumcenter(self):
"""The circumcenter of the triangle
The circumcenter is the center of the circumcircle.
Returns
=======
circumcenter : Point
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.circumcenter
Point2D(1/2, 1/2)
"""
a, b, c = [x.perpendicular_bisector() for x in self.sides]
if not a.intersection(b):
print(a,b,a.intersection(b))
return a.intersection(b)[0]
@property
def circumradius(self):
"""The radius of the circumcircle of the triangle.
Returns
=======
circumradius : number of Basic instance
See Also
========
sympy.geometry.ellipse.Circle.radius
Examples
========
>>> from sympy import Symbol
>>> from sympy.geometry import Point, Triangle
>>> a = Symbol('a')
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, a)
>>> t = Triangle(p1, p2, p3)
>>> t.circumradius
sqrt(a**2/4 + 1/4)
"""
return Point.distance(self.circumcenter, self.vertices[0])
@property
def circumcircle(self):
"""The circle which passes through the three vertices of the triangle.
Returns
=======
circumcircle : Circle
See Also
========
sympy.geometry.ellipse.Circle
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.circumcircle
Circle(Point2D(1/2, 1/2), sqrt(2)/2)
"""
return Circle(self.circumcenter, self.circumradius)
def bisectors(self):
"""The angle bisectors of the triangle.
An angle bisector of a triangle is a straight line through a vertex
which cuts the corresponding angle in half.
Returns
=======
bisectors : dict
Each key is a vertex (Point) and each value is the corresponding
bisector (Segment).
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Segment
Examples
========
>>> from sympy.geometry import Point, Triangle, Segment
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> from sympy import sqrt
>>> t.bisectors()[p2] == Segment(Point(1, 0), Point(0, sqrt(2) - 1))
True
"""
# use lines containing sides so containment check during
# intersection calculation can be avoided, thus reducing
# the processing time for calculating the bisectors
s = [Line(l) for l in self.sides]
v = self.vertices
c = self.incenter
l1 = Segment(v[0], Line(v[0], c).intersection(s[1])[0])
l2 = Segment(v[1], Line(v[1], c).intersection(s[2])[0])
l3 = Segment(v[2], Line(v[2], c).intersection(s[0])[0])
return {v[0]: l1, v[1]: l2, v[2]: l3}
@property
def incenter(self):
"""The center of the incircle.
The incircle is the circle which lies inside the triangle and touches
all three sides.
Returns
=======
incenter : Point
See Also
========
incircle, sympy.geometry.point.Point
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.incenter
Point2D(1 - sqrt(2)/2, 1 - sqrt(2)/2)
"""
s = self.sides
l = Matrix([s[i].length for i in [1, 2, 0]])
p = sum(l)
v = self.vertices
x = simplify(l.dot(Matrix([vi.x for vi in v]))/p)
y = simplify(l.dot(Matrix([vi.y for vi in v]))/p)
return Point(x, y)
@property
def inradius(self):
"""The radius of the incircle.
Returns
=======
inradius : number of Basic instance
See Also
========
incircle, sympy.geometry.ellipse.Circle.radius
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(4, 0), Point(0, 3)
>>> t = Triangle(p1, p2, p3)
>>> t.inradius
1
"""
return simplify(2 * self.area / self.perimeter)
@property
def incircle(self):
"""The incircle of the triangle.
The incircle is the circle which lies inside the triangle and touches
all three sides.
Returns
=======
incircle : Circle
See Also
========
sympy.geometry.ellipse.Circle
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(2, 0), Point(0, 2)
>>> t = Triangle(p1, p2, p3)
>>> t.incircle
Circle(Point2D(2 - sqrt(2), 2 - sqrt(2)), 2 - sqrt(2))
"""
return Circle(self.incenter, self.inradius)
@property
def exradii(self):
"""The radius of excircles of a triangle.
An excircle of the triangle is a circle lying outside the triangle,
tangent to one of its sides and tangent to the extensions of the
other two.
Returns
=======
exradii : dict
See Also
========
sympy.geometry.polygon.Triangle.inradius
Examples
========
The exradius touches the side of the triangle to which it is keyed, e.g.
the exradius touching side 2 is:
>>> from sympy.geometry import Point, Triangle, Segment2D, Point2D
>>> p1, p2, p3 = Point(0, 0), Point(6, 0), Point(0, 2)
>>> t = Triangle(p1, p2, p3)
>>> t.exradii[t.sides[2]]
-2 + sqrt(10)
References
==========
[1] http://mathworld.wolfram.com/Exradius.html
[2] http://mathworld.wolfram.com/Excircles.html
"""
side = self.sides
a = side[0].length
b = side[1].length
c = side[2].length
s = (a+b+c)/2
area = self.area
exradii = {self.sides[0]: simplify(area/(s-a)),
self.sides[1]: simplify(area/(s-b)),
self.sides[2]: simplify(area/(s-c))}
return exradii
@property
def excenters(self):
"""Excenters of the triangle.
An excenter is the center of a circle that is tangent to a side of the
triangle and the extensions of the other two sides.
Returns
=======
excenters : dict
Examples
========
The excenters are keyed to the side of the triangle to which their corresponding
excircle is tangent: The center is keyed, e.g. the excenter of a circle touching
side 0 is:
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(6, 0), Point(0, 2)
>>> t = Triangle(p1, p2, p3)
>>> t.excenters[t.sides[0]]
Point2D(12*sqrt(10), 2/3 + sqrt(10)/3)
See Also
========
sympy.geometry.polygon.Triangle.exradii
References
==========
.. [1] http://mathworld.wolfram.com/Excircles.html
"""
s = self.sides
v = self.vertices
a = s[0].length
b = s[1].length
c = s[2].length
x = [v[0].x, v[1].x, v[2].x]
y = [v[0].y, v[1].y, v[2].y]
exc_coords = {
"x1": simplify(-a*x[0]+b*x[1]+c*x[2]/(-a+b+c)),
"x2": simplify(a*x[0]-b*x[1]+c*x[2]/(a-b+c)),
"x3": simplify(a*x[0]+b*x[1]-c*x[2]/(a+b-c)),
"y1": simplify(-a*y[0]+b*y[1]+c*y[2]/(-a+b+c)),
"y2": simplify(a*y[0]-b*y[1]+c*y[2]/(a-b+c)),
"y3": simplify(a*y[0]+b*y[1]-c*y[2]/(a+b-c))
}
excenters = {
s[0]: Point(exc_coords["x1"], exc_coords["y1"]),
s[1]: Point(exc_coords["x2"], exc_coords["y2"]),
s[2]: Point(exc_coords["x3"], exc_coords["y3"])
}
return excenters
@property
def medians(self):
"""The medians of the triangle.
A median of a triangle is a straight line through a vertex and the
midpoint of the opposite side, and divides the triangle into two
equal areas.
Returns
=======
medians : dict
Each key is a vertex (Point) and each value is the median (Segment)
at that point.
See Also
========
sympy.geometry.point.Point.midpoint, sympy.geometry.line.Segment.midpoint
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.medians[p1]
Segment2D(Point2D(0, 0), Point2D(1/2, 1/2))
"""
s = self.sides
v = self.vertices
return {v[0]: Segment(v[0], s[1].midpoint),
v[1]: Segment(v[1], s[2].midpoint),
v[2]: Segment(v[2], s[0].midpoint)}
@property
def medial(self):
"""The medial triangle of the triangle.
The triangle which is formed from the midpoints of the three sides.
Returns
=======
medial : Triangle
See Also
========
sympy.geometry.line.Segment.midpoint
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.medial
Triangle(Point2D(1/2, 0), Point2D(1/2, 1/2), Point2D(0, 1/2))
"""
s = self.sides
return Triangle(s[0].midpoint, s[1].midpoint, s[2].midpoint)
@property
def nine_point_circle(self):
"""The nine-point circle of the triangle.
Nine-point circle is the circumcircle of the medial triangle, which
passes through the feet of altitudes and the middle points of segments
connecting the vertices and the orthocenter.
Returns
=======
nine_point_circle : Circle
See also
========
sympy.geometry.line.Segment.midpoint
sympy.geometry.polygon.Triangle.medial
sympy.geometry.polygon.Triangle.orthocenter
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.nine_point_circle
Circle(Point2D(1/4, 1/4), sqrt(2)/4)
"""
return Circle(*self.medial.vertices)
@property
def eulerline(self):
"""The Euler line of the triangle.
The line which passes through circumcenter, centroid and orthocenter.
Returns
=======
eulerline : Line (or Point for equilateral triangles in which case all
centers coincide)
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.eulerline
Line2D(Point2D(0, 0), Point2D(1/2, 1/2))
"""
if self.is_equilateral():
return self.orthocenter
return Line(self.orthocenter, self.circumcenter)
def rad(d):
"""Return the radian value for the given degrees (pi = 180 degrees)."""
return d*pi/180
def deg(r):
"""Return the degree value for the given radians (pi = 180 degrees)."""
return r/pi*180
def _slope(d):
rv = tan(rad(d))
return rv
def _asa(d1, l, d2):
"""Return triangle having side with length l on the x-axis."""
xy = Line((0, 0), slope=_slope(d1)).intersection(
Line((l, 0), slope=_slope(180 - d2)))[0]
return Triangle((0, 0), (l, 0), xy)
def _sss(l1, l2, l3):
"""Return triangle having side of length l1 on the x-axis."""
c1 = Circle((0, 0), l3)
c2 = Circle((l1, 0), l2)
inter = [a for a in c1.intersection(c2) if a.y.is_nonnegative]
if not inter:
return None
pt = inter[0]
return Triangle((0, 0), (l1, 0), pt)
def _sas(l1, d, l2):
"""Return triangle having side with length l2 on the x-axis."""
p1 = Point(0, 0)
p2 = Point(l2, 0)
p3 = Point(cos(rad(d))*l1, sin(rad(d))*l1)
return Triangle(p1, p2, p3)
| 28.458868
| 133
| 0.533041
|
b0d41be41cde6cf77d9d38f62d50a2d68fc715fd
| 1,148
|
py
|
Python
|
setup.py
|
ekopach/notion-py
|
3f6a972ac04fad14e1646a865f80df43c74a9500
|
[
"MIT"
] | null | null | null |
setup.py
|
ekopach/notion-py
|
3f6a972ac04fad14e1646a865f80df43c74a9500
|
[
"MIT"
] | null | null | null |
setup.py
|
ekopach/notion-py
|
3f6a972ac04fad14e1646a865f80df43c74a9500
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
def get_requirements(fname):
"Takes requirements from requirements.txt and returns a list."
with open(fname) as fp:
reqs = list()
for lib in fp.read().split("\n"):
# Ignore pypi flags and comments
if not lib.startswith("-") or lib.startswith("#"):
reqs.append(lib.strip())
return reqs
install_requires = get_requirements("requirements.txt")
setuptools.setup(
name="notion",
version="0.0.28",
author="Jamie Alexandre",
author_email="jamalex+python@gmail.com",
description="Unofficial Python API client for Notion.so",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jamalex/notion-py",
install_requires=install_requires,
include_package_data=True,
packages=setuptools.find_packages(),
python_requires=">=3.5",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 30.210526
| 66
| 0.654181
|
2eef24403a9440c10b9406e2161c2061d4dee23e
| 1,522
|
py
|
Python
|
pypy/module/_md5/interp_md5.py
|
kantai/passe-pypy-taint-tracking
|
b60a3663f8fe89892dc182c8497aab97e2e75d69
|
[
"MIT"
] | 2
|
2016-07-06T23:30:20.000Z
|
2017-05-30T15:59:31.000Z
|
pypy/module/_md5/interp_md5.py
|
kantai/passe-pypy-taint-tracking
|
b60a3663f8fe89892dc182c8497aab97e2e75d69
|
[
"MIT"
] | null | null | null |
pypy/module/_md5/interp_md5.py
|
kantai/passe-pypy-taint-tracking
|
b60a3663f8fe89892dc182c8497aab97e2e75d69
|
[
"MIT"
] | 2
|
2020-07-09T08:14:22.000Z
|
2021-01-15T18:01:25.000Z
|
from rpython.rlib import rmd5
from pypy.interpreter.baseobjspace import Wrappable
from pypy.interpreter.typedef import TypeDef
from pypy.interpreter.gateway import interp2app, unwrap_spec
class W_MD5(Wrappable, rmd5.RMD5):
"""
A subclass of RMD5 that can be exposed to app-level.
"""
def __init__(self, space):
self.space = space
self._init()
@unwrap_spec(string='bufferstr')
def update_w(self, string):
self.update(string)
def digest_w(self):
return self.space.wrap(self.digest())
def hexdigest_w(self):
return self.space.wrap(self.hexdigest())
def copy_w(self):
clone = W_MD5(self.space)
clone._copyfrom(self)
return self.space.wrap(clone)
@unwrap_spec(initialdata='bufferstr')
def W_MD5___new__(space, w_subtype, initialdata=''):
"""
Create a new md5 object and call its initializer.
"""
w_md5 = space.allocate_instance(W_MD5, w_subtype)
md5 = space.interp_w(W_MD5, w_md5)
W_MD5.__init__(md5, space)
md5.update(initialdata)
return w_md5
W_MD5.typedef = TypeDef(
'MD5Type',
__new__ = interp2app(W_MD5___new__),
update = interp2app(W_MD5.update_w),
digest = interp2app(W_MD5.digest_w),
hexdigest = interp2app(W_MD5.hexdigest_w),
copy = interp2app(W_MD5.copy_w),
digest_size = 16,
digestsize = 16,
block_size = 64,
__doc__ = """md5(arg) -> return new md5 object.
If arg is present, the method call update(arg) is made.""")
| 26.701754
| 60
| 0.670171
|
d080fe903e5c360451a30a7e40ea3572afd9a5f5
| 5,453
|
py
|
Python
|
codenew/d2lzh_pytorch/rnn_pytorch.py
|
zzq12368/Dive-into-DL-PyTorchzzq
|
f627054a93fb1d453605ab4565b9cfd1e855e9e4
|
[
"Apache-2.0"
] | null | null | null |
codenew/d2lzh_pytorch/rnn_pytorch.py
|
zzq12368/Dive-into-DL-PyTorchzzq
|
f627054a93fb1d453605ab4565b9cfd1e855e9e4
|
[
"Apache-2.0"
] | null | null | null |
codenew/d2lzh_pytorch/rnn_pytorch.py
|
zzq12368/Dive-into-DL-PyTorchzzq
|
f627054a93fb1d453605ab4565b9cfd1e855e9e4
|
[
"Apache-2.0"
] | null | null | null |
import time
import math
import numpy as np
import torch
from torch import nn, optim
import torch.nn.functional as F
import sys
sys.path.append("..")
import utils as d2l
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
(corpus_indices, char_to_idx, idx_to_char, vocab_size) = d2l.load_data_jay_lyrics()
print(len(corpus_indices),corpus_indices)
print(len(char_to_idx),char_to_idx)
print(len(idx_to_char),idx_to_char)
print(vocab_size)
def one_hot(x, n_class, dtype=torch.float32):
# X shape: (batch), output shape: (batch, n_class)
x = x.long()
res = torch.zeros(x.shape[0], n_class, dtype=dtype, device=x.device)
res.scatter_(1, x.view(-1, 1), 1)
return res
x = torch.tensor([0, 2])
print(one_hot(x, vocab_size))
def to_onehot(X, n_class):
# X shape: (batch, seq_len), output: seq_len elements of (batch, n_class)
return [one_hot(X[:, i], n_class) for i in range(X.shape[1])]
X = torch.arange(10).view(2, 5)
print(X)
inputs = to_onehot(X, vocab_size)
print(inputs)
print(len(inputs), inputs[0].shape)
num_inputs, num_hiddens, num_outputs = vocab_size, 256, vocab_size
print('will use', device)
num_hiddens = 256
# rnn_layer = nn.LSTM(input_size=vocab_size, hidden_size=num_hiddens) # 已测试
rnn_layer = nn.RNN(input_size=vocab_size, hidden_size=num_hiddens)
# 本类已保存在d2lzh_pytorch包中方便以后使用
class RNNModel(nn.Module):
def __init__(self, rnn_layer, vocab_size):
super(RNNModel, self).__init__()
self.rnn = rnn_layer
self.hidden_size = rnn_layer.hidden_size * (2 if rnn_layer.bidirectional else 1)
self.vocab_size = vocab_size
self.dense = nn.Linear(self.hidden_size, vocab_size)
self.state = None
def forward(self, inputs, state): # inputs: (batch, seq_len)
# 获取one-hot向量表示
X = d2l.to_onehot(inputs, self.vocab_size) # X是个list
Y, self.state = self.rnn(torch.stack(X), state)
# 全连接层会首先将Y的形状变成(num_steps * batch_size, num_hiddens),它的输出
# 形状为(num_steps * batch_size, vocab_size)
output = self.dense(Y.view(-1, Y.shape[-1]))
return output, self.state
# 本函数已保存在d2lzh_pytorch包中方便以后使用
def predict_rnn_pytorch(prefix, num_chars, model, vocab_size, device, idx_to_char,
char_to_idx):
state = None
output = [char_to_idx[prefix[0]]] # output会记录prefix加上输出
for t in range(num_chars + len(prefix) - 1):
X = torch.tensor([output[-1]], device=device).view(1, 1)
if state is not None:
if isinstance(state, tuple): # LSTM, state:(h, c)
state = (state[0].to(device), state[1].to(device))
else:
state = state.to(device)
(Y, state) = model(X, state)
if t < len(prefix) - 1:
output.append(char_to_idx[prefix[t + 1]])
else:
output.append(int(Y.argmax(dim=1).item()))
return ''.join([idx_to_char[i] for i in output])
# 本函数已保存在d2lzh_pytorch包中方便以后使用
def train_and_predict_rnn_pytorch(model, num_hiddens, vocab_size, device,
corpus_indices, idx_to_char, char_to_idx,
num_epochs, num_steps, lr, clipping_theta,
batch_size, pred_period, pred_len, prefixes):
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
model.to(device)
state = None
for epoch in range(num_epochs):
l_sum, n, start = 0.0, 0, time.time()
data_iter = d2l.data_iter_consecutive(corpus_indices, batch_size, num_steps, device) # 相邻采样
for X, Y in data_iter:
if state is not None:
# 使用detach函数从计算图分离隐藏状态, 这是为了
# 使模型参数的梯度计算只依赖一次迭代读取的小批量序列(防止梯度计算开销太大)
if isinstance (state, tuple): # LSTM, state:(h, c)
state = (state[0].detach(), state[1].detach())
else:
state = state.detach()
(output, state) = model(X, state) # output: 形状为(num_steps * batch_size, vocab_size)
# Y的形状是(batch_size, num_steps),转置后再变成长度为
# batch * num_steps 的向量,这样跟输出的行一一对应
y = torch.transpose(Y, 0, 1).contiguous().view(-1)
l = loss(output, y.long())
optimizer.zero_grad()
l.backward()
# 梯度裁剪
d2l.grad_clipping(model.parameters(), clipping_theta, device)
optimizer.step()
l_sum += l.item() * y.shape[0]
n += y.shape[0]
try:
perplexity = math.exp(l_sum / n)
except OverflowError:
perplexity = float('inf')
if (epoch + 1) % pred_period == 0:
print('epoch %d, perplexity %f, time %.2f sec' % (
epoch + 1, perplexity, time.time() - start))
for prefix in prefixes:
print(' -', predict_rnn_pytorch(
prefix, pred_len, model, vocab_size, device, idx_to_char,
char_to_idx))
num_epochs, batch_size, lr, clipping_theta = 250, 32, 1e-3, 1e-2 # 注意这里的学习率设置
num_steps = 35
pred_period, pred_len, prefixes = 50, 50, ['分开', '不分开']
model = RNNModel(rnn_layer, vocab_size).to(device)
train_and_predict_rnn_pytorch(model, num_hiddens, vocab_size, device,
corpus_indices, idx_to_char, char_to_idx,
num_epochs, num_steps, lr, clipping_theta,
batch_size, pred_period, pred_len, prefixes)
| 40.095588
| 99
| 0.618192
|
5b5a95baebf1e71e2e990d1db212cdd381ad16d6
| 234
|
py
|
Python
|
python_utility/git_observer.py
|
FunTimeCoding/python-utility
|
e91df316684a07161aae33576329f9092d2e97e6
|
[
"MIT"
] | null | null | null |
python_utility/git_observer.py
|
FunTimeCoding/python-utility
|
e91df316684a07161aae33576329f9092d2e97e6
|
[
"MIT"
] | null | null | null |
python_utility/git_observer.py
|
FunTimeCoding/python-utility
|
e91df316684a07161aae33576329f9092d2e97e6
|
[
"MIT"
] | null | null | null |
# TODO: Fetch master branch of a repository repeatedly and detect merges
# into master.
# TODO: Detect tag creation.
# TODO: Detect feature branch creation.
# http://gitpython.readthedocs.io/en/stable/tutorial.html#meet-the-repo-type
| 39
| 76
| 0.777778
|
10c3578b5204750fab93b551ff5fc0b3a1a0c494
| 5,144
|
py
|
Python
|
voc_annotation.py
|
TheEvolt/yolo3-pytorch
|
4c31e8ab3e619dcd93b9d1dc11b89aa252bca84b
|
[
"MIT"
] | null | null | null |
voc_annotation.py
|
TheEvolt/yolo3-pytorch
|
4c31e8ab3e619dcd93b9d1dc11b89aa252bca84b
|
[
"MIT"
] | null | null | null |
voc_annotation.py
|
TheEvolt/yolo3-pytorch
|
4c31e8ab3e619dcd93b9d1dc11b89aa252bca84b
|
[
"MIT"
] | null | null | null |
import os
import random
import xml.etree.ElementTree as ET
from utils.utils import get_classes
# --------------------------------------------------------------------------------------------------------------------------------#
# annotation_mode用于指定该文件运行时计算的内容
# annotation_mode为0代表整个标签处理过程,包括获得VOCdevkit/VOC2007/ImageSets里面的txt以及训练用的2007_train.txt、2007_val.txt
# annotation_mode为1代表获得VOCdevkit/VOC2007/ImageSets里面的txt
# annotation_mode为2代表获得训练用的2007_train.txt、2007_val.txt
# --------------------------------------------------------------------------------------------------------------------------------#
annotation_mode = 0
# -------------------------------------------------------------------#
# 必须要修改,用于生成2007_train.txt、2007_val.txt的目标信息
# 与训练和预测所用的classes_path一致即可
# 如果生成的2007_train.txt里面没有目标信息
# 那么就是因为classes没有设定正确
# 仅在annotation_mode为0和2的时候有效
# -------------------------------------------------------------------#
classes_path = "model_data/voc_classes.txt"
# --------------------------------------------------------------------------------------------------------------------------------#
# trainval_percent用于指定(训练集+验证集)与测试集的比例,默认情况下 (训练集+验证集):测试集 = 9:1
# train_percent用于指定(训练集+验证集)中训练集与验证集的比例,默认情况下 训练集:验证集 = 9:1
# 仅在annotation_mode为0和1的时候有效
# --------------------------------------------------------------------------------------------------------------------------------#
trainval_percent = 0.9
train_percent = 0.9
# -------------------------------------------------------#
# 指向VOC数据集所在的文件夹
# 默认指向根目录下的VOC数据集
# -------------------------------------------------------#
VOCdevkit_path = "VOCdevkit"
VOCdevkit_sets = [("2007", "train"), ("2007", "val")]
classes, _ = get_classes(classes_path)
def convert_annotation(year, image_id, list_file):
in_file = open(
os.path.join(VOCdevkit_path, "VOC%s/Annotations/%s.xml" % (year, image_id)),
encoding="utf-8",
)
tree = ET.parse(in_file)
root = tree.getroot()
for obj in root.iter("object"):
difficult = 0
if obj.find("difficult") != None:
difficult = obj.find("difficult").text
cls = obj.find("name").text
if cls not in classes or int(difficult) == 1:
continue
cls_id = classes.index(cls)
xmlbox = obj.find("bndbox")
b = (
int(float(xmlbox.find("xmin").text)),
int(float(xmlbox.find("ymin").text)),
int(float(xmlbox.find("xmax").text)),
int(float(xmlbox.find("ymax").text)),
)
list_file.write(" " + ",".join([str(a) for a in b]) + "," + str(cls_id))
if __name__ == "__main__":
random.seed(0)
if annotation_mode == 0 or annotation_mode == 1:
print("Generate txt in ImageSets.")
xmlfilepath = os.path.join(VOCdevkit_path, "VOC2007/Annotations")
saveBasePath = os.path.join(VOCdevkit_path, "VOC2007/ImageSets/Main")
temp_xml = os.listdir(xmlfilepath)
total_xml = []
for xml in temp_xml:
if xml.endswith(".xml"):
total_xml.append(xml)
num = len(total_xml)
list = range(num)
tv = int(num * trainval_percent)
tr = int(tv * train_percent)
trainval = random.sample(list, tv)
train = random.sample(trainval, tr)
print("train and val size", tv)
print("train size", tr)
ftrainval = open(os.path.join(saveBasePath, "trainval.txt"), "w")
ftest = open(os.path.join(saveBasePath, "test.txt"), "w")
ftrain = open(os.path.join(saveBasePath, "train.txt"), "w")
fval = open(os.path.join(saveBasePath, "val.txt"), "w")
for i in list:
name = total_xml[i][:-4] + "\n"
if i in trainval:
ftrainval.write(name)
if i in train:
ftrain.write(name)
else:
fval.write(name)
else:
ftest.write(name)
ftrainval.close()
ftrain.close()
fval.close()
ftest.close()
print("Generate txt in ImageSets done.")
if annotation_mode == 0 or annotation_mode == 2:
print("Generate 2007_train.txt and 2007_val.txt for train.")
for year, image_set in VOCdevkit_sets:
image_ids = (
open(
os.path.join(
VOCdevkit_path,
"VOC%s/ImageSets/Main/%s.txt" % (year, image_set),
),
encoding="utf-8",
)
.read()
.strip()
.split()
)
list_file = open("%s_%s.txt" % (year, image_set), "w", encoding="utf-8")
for image_id in image_ids:
list_file.write(
"%s/VOC%s/JPEGImages/%s.jpg"
% (os.path.abspath(VOCdevkit_path), year, image_id)
)
convert_annotation(year, image_id, list_file)
list_file.write("\n")
list_file.close()
print("Generate 2007_train.txt and 2007_val.txt for train done.")
| 38.38806
| 131
| 0.488336
|
7e13d0a4bf2a912d978e10457a3af0a64b50a4d1
| 286
|
py
|
Python
|
discord_slash/__init__.py
|
PredaaA/discord-py-slash-command
|
66deebf6ebbad70cb404f34b26b25e9519478326
|
[
"MIT"
] | null | null | null |
discord_slash/__init__.py
|
PredaaA/discord-py-slash-command
|
66deebf6ebbad70cb404f34b26b25e9519478326
|
[
"MIT"
] | null | null | null |
discord_slash/__init__.py
|
PredaaA/discord-py-slash-command
|
66deebf6ebbad70cb404f34b26b25e9519478326
|
[
"MIT"
] | null | null | null |
"""
discord-py-slash-command
~~~~~~~~~~~~~~~~~~~~~~~~
Simple Discord Slash Command extension for discord.py
:copyright: (c) 2020 eunwoo1104
:license: MIT
"""
from .client import SlashCommand
from .model import SlashContext
from .utils import manage_commands
__version__ = "1.0.4.1"
| 17.875
| 53
| 0.702797
|
5bca0ad97a1ea3d37652dd1f5400d7b18f0bffab
| 252
|
py
|
Python
|
boss/boss/doctype/client/client.py
|
thispl/boss
|
e93d74eefd7b200fe7d1fabb1ea9d5b13138b632
|
[
"MIT"
] | null | null | null |
boss/boss/doctype/client/client.py
|
thispl/boss
|
e93d74eefd7b200fe7d1fabb1ea9d5b13138b632
|
[
"MIT"
] | null | null | null |
boss/boss/doctype/client/client.py
|
thispl/boss
|
e93d74eefd7b200fe7d1fabb1ea9d5b13138b632
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2021, TeamPRO and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class Client(Document):
pass
| 22.909091
| 49
| 0.769841
|
4c8fe88f953f61ea0411ed6a81cc316e5bf377ca
| 1,947
|
py
|
Python
|
alerta/webhooks/graylog.py
|
mustafaugurhancar/alerta
|
608b2db9117ecb8400c29f30bc549d9a99e9eee7
|
[
"Apache-2.0"
] | 1
|
2018-03-30T12:38:47.000Z
|
2018-03-30T12:38:47.000Z
|
alerta/webhooks/graylog.py
|
mustafaugurhancar/alerta
|
608b2db9117ecb8400c29f30bc549d9a99e9eee7
|
[
"Apache-2.0"
] | null | null | null |
alerta/webhooks/graylog.py
|
mustafaugurhancar/alerta
|
608b2db9117ecb8400c29f30bc549d9a99e9eee7
|
[
"Apache-2.0"
] | null | null | null |
from flask import request, g, jsonify
from flask_cors import cross_origin
from alerta.auth.utils import permission
from alerta.exceptions import ApiError, RejectException
from alerta.models.alert import Alert
from alerta.utils.api import process_alert, add_remote_ip
from . import webhooks
def parse_graylog(alert):
return Alert(
resource=alert['stream']['title'],
event="Alert",
environment='Development',
service=["test"],
severity="critical",
value="n/a",
text=alert['check_result']['result_description'],
attributes={'checkId': alert['check_result']['triggered_condition']['id']},
origin='Graylog',
event_type='performanceAlert',
raw_data=alert)
@webhooks.route('/webhooks/graylog', methods=['OPTIONS', 'POST'])
@cross_origin()
@permission('write:webhooks')
def graylog():
try:
incomingAlert = parse_graylog(request.json)
except ValueError as e:
raise ApiError(str(e), 400)
if request.args.get('event', None):
incomingAlert.event = request.args.get('event')
if request.args.get('event_type', None):
incomingAlert.event_type = request.args.get('event_type')
if request.args.get('environment', None):
incomingAlert.environment = request.args.get('environment')
if request.args.get('service', None):
incomingAlert.service = request.args.get('service').split(",")
if request.args.get('severity', None):
incomingAlert.severity = request.args.get('severity')
add_remote_ip(request, incomingAlert)
try:
alert = process_alert(incomingAlert)
except RejectException as e:
raise ApiError(str(e), 403)
except Exception as e:
raise ApiError(str(e), 500)
if alert:
return jsonify(status="ok", id=alert.id, alert=alert.serialize), 201
else:
raise ApiError("insert or update of graylog check failed", 500)
| 30.904762
| 83
| 0.672316
|
2f604854b3279cb1c2562d90c4ed8daebb26e870
| 8,380
|
py
|
Python
|
splunk_sdk/forwarders/v2beta1/gen_models.py
|
declanshanaghy/splunk-cloud-sdk-python
|
c36f5c968512d54f44f95271bc64d82da19aedba
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
splunk_sdk/forwarders/v2beta1/gen_models.py
|
declanshanaghy/splunk-cloud-sdk-python
|
c36f5c968512d54f44f95271bc64d82da19aedba
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
splunk_sdk/forwarders/v2beta1/gen_models.py
|
declanshanaghy/splunk-cloud-sdk-python
|
c36f5c968512d54f44f95271bc64d82da19aedba
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright © 2019 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# [http://www.apache.org/licenses/LICENSE-2.0]
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
############# This file is auto-generated. Do not edit! #############
"""
SDC Service: Splunk Forwarder Service
Send data from a Splunk forwarder to the Splunk Forwarder service in Splunk Cloud Services.
OpenAPI spec version: v2beta1.1 (recommended default)
Generated by: https://openapi-generator.tech
"""
from datetime import datetime
from typing import List, Dict
from splunk_sdk.common.sscmodel import SSCModel
from splunk_sdk.base_client import dictify, inflate
from enum import Enum
class Certificate(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "Certificate":
instance = Certificate.__new__(Certificate)
instance._attrs = model
return instance
def __init__(self, pem: "str" = None, **extra):
"""Certificate"""
self._attrs = dict()
if pem is not None:
self._attrs["pem"] = pem
for k, v in extra.items():
self._attrs[k] = v
@property
def pem(self) -> "str":
""" Gets the pem of this Certificate.
"""
return self._attrs.get("pem")
@pem.setter
def pem(self, pem: "str"):
"""Sets the pem of this Certificate.
:param pem: The pem of this Certificate.
:type: str
"""
self._attrs["pem"] = pem
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class CertificateInfo(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "CertificateInfo":
instance = CertificateInfo.__new__(CertificateInfo)
instance._attrs = model
return instance
def __init__(self, content: "str" = None, hash: "str" = None, issuer: "str" = None, last_update: "datetime" = None, not_after: "datetime" = None, not_before: "datetime" = None, slot: "int" = None, subject: "str" = None, **extra):
"""CertificateInfo"""
self._attrs = dict()
if content is not None:
self._attrs["content"] = content
if hash is not None:
self._attrs["hash"] = hash
if issuer is not None:
self._attrs["issuer"] = issuer
if last_update is not None:
self._attrs["lastUpdate"] = last_update
if not_after is not None:
self._attrs["notAfter"] = not_after
if not_before is not None:
self._attrs["notBefore"] = not_before
if slot is not None:
self._attrs["slot"] = slot
if subject is not None:
self._attrs["subject"] = subject
for k, v in extra.items():
self._attrs[k] = v
@property
def content(self) -> "str":
""" Gets the content of this CertificateInfo.
"""
return self._attrs.get("content")
@content.setter
def content(self, content: "str"):
"""Sets the content of this CertificateInfo.
:param content: The content of this CertificateInfo.
:type: str
"""
self._attrs["content"] = content
@property
def hash(self) -> "str":
""" Gets the hash of this CertificateInfo.
"""
return self._attrs.get("hash")
@hash.setter
def hash(self, hash: "str"):
"""Sets the hash of this CertificateInfo.
:param hash: The hash of this CertificateInfo.
:type: str
"""
self._attrs["hash"] = hash
@property
def issuer(self) -> "str":
""" Gets the issuer of this CertificateInfo.
"""
return self._attrs.get("issuer")
@issuer.setter
def issuer(self, issuer: "str"):
"""Sets the issuer of this CertificateInfo.
:param issuer: The issuer of this CertificateInfo.
:type: str
"""
self._attrs["issuer"] = issuer
@property
def last_update(self) -> "datetime":
""" Gets the last_update of this CertificateInfo.
"""
return self._attrs.get("lastUpdate")
@last_update.setter
def last_update(self, last_update: "datetime"):
"""Sets the last_update of this CertificateInfo.
:param last_update: The last_update of this CertificateInfo.
:type: datetime
"""
self._attrs["lastUpdate"] = last_update
@property
def not_after(self) -> "datetime":
""" Gets the not_after of this CertificateInfo.
"""
return self._attrs.get("notAfter")
@not_after.setter
def not_after(self, not_after: "datetime"):
"""Sets the not_after of this CertificateInfo.
:param not_after: The not_after of this CertificateInfo.
:type: datetime
"""
self._attrs["notAfter"] = not_after
@property
def not_before(self) -> "datetime":
""" Gets the not_before of this CertificateInfo.
"""
return self._attrs.get("notBefore")
@not_before.setter
def not_before(self, not_before: "datetime"):
"""Sets the not_before of this CertificateInfo.
:param not_before: The not_before of this CertificateInfo.
:type: datetime
"""
self._attrs["notBefore"] = not_before
@property
def slot(self) -> "int":
""" Gets the slot of this CertificateInfo.
"""
return self._attrs.get("slot")
@slot.setter
def slot(self, slot: "int"):
"""Sets the slot of this CertificateInfo.
:param slot: The slot of this CertificateInfo.
:type: int
"""
self._attrs["slot"] = slot
@property
def subject(self) -> "str":
""" Gets the subject of this CertificateInfo.
"""
return self._attrs.get("subject")
@subject.setter
def subject(self, subject: "str"):
"""Sets the subject of this CertificateInfo.
:param subject: The subject of this CertificateInfo.
:type: str
"""
self._attrs["subject"] = subject
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class Error(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "Error":
instance = Error.__new__(Error)
instance._attrs = model
return instance
def __init__(self, code: "str" = None, details: "object" = None, message: "str" = None, **extra):
"""Error"""
self._attrs = dict()
if code is not None:
self._attrs["code"] = code
if details is not None:
self._attrs["details"] = details
if message is not None:
self._attrs["message"] = message
for k, v in extra.items():
self._attrs[k] = v
@property
def code(self) -> "str":
""" Gets the code of this Error.
"""
return self._attrs.get("code")
@code.setter
def code(self, code: "str"):
"""Sets the code of this Error.
:param code: The code of this Error.
:type: str
"""
self._attrs["code"] = code
@property
def details(self) -> "dict":
""" Gets the details of this Error.
"""
return self._attrs.get("details")
@details.setter
def details(self, details: "dict"):
"""Sets the details of this Error.
:param details: The details of this Error.
:type: object
"""
self._attrs["details"] = details
@property
def message(self) -> "str":
""" Gets the message of this Error.
"""
return self._attrs.get("message")
@message.setter
def message(self, message: "str"):
"""Sets the message of this Error.
:param message: The message of this Error.
:type: str
"""
self._attrs["message"] = message
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
| 27.385621
| 233
| 0.590453
|
78fdd99d8b3e09141bf4c07a73d9e966cac14da1
| 6,133
|
py
|
Python
|
test/lint/check-rpc-mappings.py
|
btcavenue/btcavenue
|
63c135c40dbb1aef3078abb4dffefa04b8ef8217
|
[
"MIT"
] | null | null | null |
test/lint/check-rpc-mappings.py
|
btcavenue/btcavenue
|
63c135c40dbb1aef3078abb4dffefa04b8ef8217
|
[
"MIT"
] | null | null | null |
test/lint/check-rpc-mappings.py
|
btcavenue/btcavenue
|
63c135c40dbb1aef3078abb4dffefa04b8ef8217
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017-2019 The Btcavenue Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Check RPC argument consistency."""
from collections import defaultdict
import os
import re
import sys
# Source files (relative to root) to scan for dispatch tables
SOURCES = [
"src/rpc/server.cpp",
"src/rpc/blockchain.cpp",
"src/rpc/mining.cpp",
"src/rpc/misc.cpp",
"src/rpc/net.cpp",
"src/rpc/rawtransaction.cpp",
"src/wallet/rpcwallet.cpp",
]
# Source file (relative to root) containing conversion mapping
SOURCE_CLIENT = 'src/rpc/client.cpp'
# Argument names that should be ignored in consistency checks
IGNORE_DUMMY_ARGS = {'dummy', 'arg0', 'arg1', 'arg2', 'arg3', 'arg4', 'arg5', 'arg6', 'arg7', 'arg8', 'arg9'}
class RPCCommand:
def __init__(self, name, args):
self.name = name
self.args = args
class RPCArgument:
def __init__(self, names, idx):
self.names = names
self.idx = idx
self.convert = False
def parse_string(s):
assert s[0] == '"'
assert s[-1] == '"'
return s[1:-1]
def process_commands(fname):
"""Find and parse dispatch table in implementation file `fname`."""
cmds = []
in_rpcs = False
with open(fname, "r", encoding="utf8") as f:
for line in f:
line = line.rstrip()
if not in_rpcs:
if re.match(r"static const CRPCCommand .*\[\] =", line):
in_rpcs = True
else:
if line.startswith('};'):
in_rpcs = False
elif '{' in line and '"' in line:
m = re.search(r'{ *("[^"]*"), *("[^"]*"), *&([^,]*), *{([^}]*)} *},', line)
assert m, 'No match to table expression: %s' % line
name = parse_string(m.group(2))
args_str = m.group(4).strip()
if args_str:
args = [RPCArgument(parse_string(x.strip()).split('|'), idx) for idx, x in enumerate(args_str.split(','))]
else:
args = []
cmds.append(RPCCommand(name, args))
assert not in_rpcs and cmds, "Something went wrong with parsing the C++ file: update the regexps"
return cmds
def process_mapping(fname):
"""Find and parse conversion table in implementation file `fname`."""
cmds = []
in_rpcs = False
with open(fname, "r", encoding="utf8") as f:
for line in f:
line = line.rstrip()
if not in_rpcs:
if line == 'static const CRPCConvertParam vRPCConvertParams[] =':
in_rpcs = True
else:
if line.startswith('};'):
in_rpcs = False
elif '{' in line and '"' in line:
m = re.search(r'{ *("[^"]*"), *([0-9]+) *, *("[^"]*") *},', line)
assert m, 'No match to table expression: %s' % line
name = parse_string(m.group(1))
idx = int(m.group(2))
argname = parse_string(m.group(3))
cmds.append((name, idx, argname))
assert not in_rpcs and cmds
return cmds
def main():
if len(sys.argv) != 2:
print('Usage: {} ROOT-DIR'.format(sys.argv[0]), file=sys.stderr)
sys.exit(1)
root = sys.argv[1]
# Get all commands from dispatch tables
cmds = []
for fname in SOURCES:
cmds += process_commands(os.path.join(root, fname))
cmds_by_name = {}
for cmd in cmds:
cmds_by_name[cmd.name] = cmd
# Get current convert mapping for client
client = SOURCE_CLIENT
mapping = set(process_mapping(os.path.join(root, client)))
print('* Checking consistency between dispatch tables and vRPCConvertParams')
# Check mapping consistency
errors = 0
for (cmdname, argidx, argname) in mapping:
try:
rargnames = cmds_by_name[cmdname].args[argidx].names
except IndexError:
print('ERROR: %s argument %i (named %s in vRPCConvertParams) is not defined in dispatch table' % (cmdname, argidx, argname))
errors += 1
continue
if argname not in rargnames:
print('ERROR: %s argument %i is named %s in vRPCConvertParams but %s in dispatch table' % (cmdname, argidx, argname, rargnames), file=sys.stderr)
errors += 1
# Check for conflicts in vRPCConvertParams conversion
# All aliases for an argument must either be present in the
# conversion table, or not. Anything in between means an oversight
# and some aliases won't work.
for cmd in cmds:
for arg in cmd.args:
convert = [((cmd.name, arg.idx, argname) in mapping) for argname in arg.names]
if any(convert) != all(convert):
print('ERROR: %s argument %s has conflicts in vRPCConvertParams conversion specifier %s' % (cmd.name, arg.names, convert))
errors += 1
arg.convert = all(convert)
# Check for conversion difference by argument name.
# It is preferable for API consistency that arguments with the same name
# have the same conversion, so bin by argument name.
all_methods_by_argname = defaultdict(list)
converts_by_argname = defaultdict(list)
for cmd in cmds:
for arg in cmd.args:
for argname in arg.names:
all_methods_by_argname[argname].append(cmd.name)
converts_by_argname[argname].append(arg.convert)
for argname, convert in converts_by_argname.items():
if all(convert) != any(convert):
if argname in IGNORE_DUMMY_ARGS:
# these are testing or dummy, don't warn for them
continue
print('WARNING: conversion mismatch for argument named %s (%s)' %
(argname, list(zip(all_methods_by_argname[argname], converts_by_argname[argname]))))
sys.exit(errors > 0)
if __name__ == '__main__':
main()
| 37.625767
| 157
| 0.581934
|
d03f1e0edc7074618ea2f2dab59dcefa928f86e0
| 1,154
|
py
|
Python
|
copyspecial/copyspecial.py
|
leejkennedy/google-python-exercises
|
57f07892a11b594f11d2b058a0b4aaa1b50872f8
|
[
"Apache-2.0"
] | null | null | null |
copyspecial/copyspecial.py
|
leejkennedy/google-python-exercises
|
57f07892a11b594f11d2b058a0b4aaa1b50872f8
|
[
"Apache-2.0"
] | null | null | null |
copyspecial/copyspecial.py
|
leejkennedy/google-python-exercises
|
57f07892a11b594f11d2b058a0b4aaa1b50872f8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
"""Copy Special exercise
"""
# +++your code here+++
# Write functions and modify main() to call them
def main():
# This basic command line argument parsing code is provided.
# Add code to call your functions below.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print "usage: [--todir dir][--tozip zipfile] dir [dir ...]";
sys.exit(1)
# todir and tozip are either set from command line
# or left as the empty string.
# The args array is left just containing the dirs.
todir = ''
if args[0] == '--todir':
todir = args[1]
del args[0:2]
tozip = ''
if args[0] == '--tozip':
tozip = args[1]
del args[0:2]
if len(args) == 0:
print "error: must specify one or more dirs"
sys.exit(1)
# +++your code here+++
# Call your functions
if __name__ == "__main__":
main()
| 22.192308
| 67
| 0.646447
|
fa05d6b2b316dc2bfc0eb2e8e77a3f5ee9d471c8
| 12,918
|
py
|
Python
|
Huang2017AdaIN/torch_to_pytorch.py
|
czczup/URST
|
000ec9f7728f12ffad989ec1d07b1dd579514133
|
[
"Apache-2.0"
] | 119
|
2021-03-21T18:30:51.000Z
|
2022-03-29T07:28:33.000Z
|
Huang2017AdaIN/torch_to_pytorch.py
|
czczup/URST
|
000ec9f7728f12ffad989ec1d07b1dd579514133
|
[
"Apache-2.0"
] | 5
|
2021-04-02T14:26:03.000Z
|
2022-01-12T12:59:17.000Z
|
Huang2017AdaIN/torch_to_pytorch.py
|
czczup/URST
|
000ec9f7728f12ffad989ec1d07b1dd579514133
|
[
"Apache-2.0"
] | 16
|
2021-03-21T18:30:53.000Z
|
2022-03-29T07:28:34.000Z
|
from __future__ import print_function
import argparse
from functools import reduce
import torch
assert torch.__version__.split('.')[0] == '0', 'Only working on PyTorch 0.x.x'
import torch.nn as nn
from torch.autograd import Variable
from torchfile import load as load_lua
class LambdaBase(nn.Sequential):
def __init__(self, fn, *args):
super(LambdaBase, self).__init__(*args)
self.lambda_func = fn
def forward_prepare(self, input):
output = []
for module in self._modules.values():
output.append(module(input))
return output if output else input
class Lambda(LambdaBase):
def forward(self, input):
return self.lambda_func(self.forward_prepare(input))
class LambdaMap(LambdaBase):
def forward(self, input):
# result is Variables list [Variable1, Variable2, ...]
return list(map(self.lambda_func, self.forward_prepare(input)))
class LambdaReduce(LambdaBase):
def forward(self, input):
# result is a Variable
return reduce(self.lambda_func, self.forward_prepare(input))
def copy_param(m, n):
if m.weight is not None: n.weight.data.copy_(m.weight)
if m.bias is not None: n.bias.data.copy_(m.bias)
if hasattr(n, 'running_mean'): n.running_mean.copy_(m.running_mean)
if hasattr(n, 'running_var'): n.running_var.copy_(m.running_var)
def add_submodule(seq, *args):
for n in args:
seq.add_module(str(len(seq._modules)), n)
def lua_recursive_model(module, seq):
for m in module.modules:
name = type(m).__name__
real = m
if name == 'TorchObject':
name = m._typename.replace('cudnn.', '')
m = m._obj
if name == 'SpatialConvolution':
if not hasattr(m, 'groups'): m.groups = 1
n = nn.Conv2d(m.nInputPlane, m.nOutputPlane, (m.kW, m.kH),
(m.dW, m.dH), (m.padW, m.padH), 1, m.groups,
bias=(m.bias is not None))
copy_param(m, n)
add_submodule(seq, n)
elif name == 'SpatialBatchNormalization':
n = nn.BatchNorm2d(m.running_mean.size(0), m.eps, m.momentum,
m.affine)
copy_param(m, n)
add_submodule(seq, n)
elif name == 'ReLU':
n = nn.ReLU()
add_submodule(seq, n)
elif name == 'SpatialMaxPooling':
n = nn.MaxPool2d((m.kW, m.kH), (m.dW, m.dH), (m.padW, m.padH),
ceil_mode=m.ceil_mode)
add_submodule(seq, n)
elif name == 'SpatialAveragePooling':
n = nn.AvgPool2d((m.kW, m.kH), (m.dW, m.dH), (m.padW, m.padH),
ceil_mode=m.ceil_mode)
add_submodule(seq, n)
elif name == 'SpatialUpSamplingNearest':
n = nn.UpsamplingNearest2d(scale_factor=m.scale_factor)
add_submodule(seq, n)
elif name == 'View':
n = Lambda(lambda x: x.view(x.size(0), -1))
add_submodule(seq, n)
elif name == 'Linear':
# Linear in pytorch only accept 2D input
n1 = Lambda(lambda x: x.view(1, -1) if 1 == len(x.size()) else x)
n2 = nn.Linear(m.weight.size(1), m.weight.size(0),
bias=(m.bias is not None))
copy_param(m, n2)
n = nn.Sequential(n1, n2)
add_submodule(seq, n)
elif name == 'Dropout':
m.inplace = False
n = nn.Dropout(m.p)
add_submodule(seq, n)
elif name == 'SoftMax':
n = nn.Softmax()
add_submodule(seq, n)
elif name == 'Identity':
n = Lambda(lambda x: x) # do nothing
add_submodule(seq, n)
elif name == 'SpatialFullConvolution':
n = nn.ConvTranspose2d(m.nInputPlane, m.nOutputPlane, (m.kW, m.kH),
(m.dW, m.dH), (m.padW, m.padH))
add_submodule(seq, n)
elif name == 'SpatialReplicationPadding':
n = nn.ReplicationPad2d((m.pad_l, m.pad_r, m.pad_t, m.pad_b))
add_submodule(seq, n)
elif name == 'SpatialReflectionPadding':
n = nn.ReflectionPad2d((m.pad_l, m.pad_r, m.pad_t, m.pad_b))
add_submodule(seq, n)
elif name == 'Copy':
n = Lambda(lambda x: x) # do nothing
add_submodule(seq, n)
elif name == 'Narrow':
n = Lambda(
lambda x, a=(m.dimension, m.index, m.length): x.narrow(*a))
add_submodule(seq, n)
elif name == 'SpatialCrossMapLRN':
lrn = torch.legacy.nn.SpatialCrossMapLRN(m.size, m.alpha, m.beta,
m.k)
n = Lambda(lambda x, lrn=lrn: lrn.forward(x))
add_submodule(seq, n)
elif name == 'Sequential':
n = nn.Sequential()
lua_recursive_model(m, n)
add_submodule(seq, n)
elif name == 'ConcatTable': # output is list
n = LambdaMap(lambda x: x)
lua_recursive_model(m, n)
add_submodule(seq, n)
elif name == 'CAddTable': # input is list
n = LambdaReduce(lambda x, y: x + y)
add_submodule(seq, n)
elif name == 'Concat':
dim = m.dimension
n = LambdaReduce(lambda x, y, dim=dim: torch.cat((x, y), dim))
lua_recursive_model(m, n)
add_submodule(seq, n)
elif name == 'TorchObject':
print('Not Implement', name, real._typename)
else:
print('Not Implement', name)
def lua_recursive_source(module):
s = []
for m in module.modules:
name = type(m).__name__
real = m
if name == 'TorchObject':
name = m._typename.replace('cudnn.', '')
m = m._obj
if name == 'SpatialConvolution':
if not hasattr(m, 'groups'): m.groups = 1
s += ['nn.Conv2d({},{},{},{},{},{},{},bias={}),#Conv2d'.format(
m.nInputPlane,
m.nOutputPlane, (m.kW, m.kH), (m.dW, m.dH), (m.padW, m.padH),
1, m.groups, m.bias is not None)]
elif name == 'SpatialBatchNormalization':
s += ['nn.BatchNorm2d({},{},{},{}),#BatchNorm2d'.format(
m.running_mean.size(0), m.eps, m.momentum, m.affine)]
elif name == 'ReLU':
s += ['nn.ReLU()']
elif name == 'SpatialMaxPooling':
s += ['nn.MaxPool2d({},{},{},ceil_mode={}),#MaxPool2d'.format(
(m.kW, m.kH), (m.dW, m.dH), (m.padW, m.padH), m.ceil_mode)]
elif name == 'SpatialAveragePooling':
s += ['nn.AvgPool2d({},{},{},ceil_mode={}),#AvgPool2d'.format(
(m.kW, m.kH), (m.dW, m.dH), (m.padW, m.padH), m.ceil_mode)]
elif name == 'SpatialUpSamplingNearest':
s += ['nn.UpsamplingNearest2d(scale_factor={})'.format(
m.scale_factor)]
elif name == 'View':
s += ['Lambda(lambda x: x.view(x.size(0),-1)), # View']
elif name == 'Linear':
s1 = 'Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x )'
s2 = 'nn.Linear({},{},bias={})'.format(m.weight.size(1),
m.weight.size(0),
(m.bias is not None))
s += ['nn.Sequential({},{}),#Linear'.format(s1, s2)]
elif name == 'Dropout':
s += ['nn.Dropout({})'.format(m.p)]
elif name == 'SoftMax':
s += ['nn.Softmax()']
elif name == 'Identity':
s += ['Lambda(lambda x: x), # Identity']
elif name == 'SpatialFullConvolution':
s += ['nn.ConvTranspose2d({},{},{},{},{})'.format(m.nInputPlane,
m.nOutputPlane,
(m.kW, m.kH),
(m.dW, m.dH), (
m.padW, m.padH))]
elif name == 'SpatialReplicationPadding':
s += ['nn.ReplicationPad2d({})'.format(
(m.pad_l, m.pad_r, m.pad_t, m.pad_b))]
elif name == 'SpatialReflectionPadding':
s += ['nn.ReflectionPad2d({})'.format(
(m.pad_l, m.pad_r, m.pad_t, m.pad_b))]
elif name == 'Copy':
s += ['Lambda(lambda x: x), # Copy']
elif name == 'Narrow':
s += ['Lambda(lambda x,a={}: x.narrow(*a))'.format(
(m.dimension, m.index, m.length))]
elif name == 'SpatialCrossMapLRN':
lrn = 'torch.legacy.nn.SpatialCrossMapLRN(*{})'.format(
(m.size, m.alpha, m.beta, m.k))
s += [
'Lambda(lambda x,lrn={}: Variable(lrn.forward(x)))'.format(
lrn)]
elif name == 'Sequential':
s += ['nn.Sequential( # Sequential']
s += lua_recursive_source(m)
s += [')']
elif name == 'ConcatTable':
s += ['LambdaMap(lambda x: x, # ConcatTable']
s += lua_recursive_source(m)
s += [')']
elif name == 'CAddTable':
s += ['LambdaReduce(lambda x,y: x+y), # CAddTable']
elif name == 'Concat':
dim = m.dimension
s += [
'LambdaReduce(lambda x,y,dim={}: torch.cat((x,y),dim), # Concat'.format(
m.dimension)]
s += lua_recursive_source(m)
s += [')']
else:
s += '# ' + name + ' Not Implement,\n'
s = map(lambda x: '\t{}'.format(x), s)
return s
def simplify_source(s):
s = map(lambda x: x.replace(',(1, 1),(0, 0),1,1,bias=True),#Conv2d', ')'),
s)
s = map(lambda x: x.replace(',(0, 0),1,1,bias=True),#Conv2d', ')'), s)
s = map(lambda x: x.replace(',1,1,bias=True),#Conv2d', ')'), s)
s = map(lambda x: x.replace(',bias=True),#Conv2d', ')'), s)
s = map(lambda x: x.replace('),#Conv2d', ')'), s)
s = map(lambda x: x.replace(',1e-05,0.1,True),#BatchNorm2d', ')'), s)
s = map(lambda x: x.replace('),#BatchNorm2d', ')'), s)
s = map(lambda x: x.replace(',(0, 0),ceil_mode=False),#MaxPool2d', ')'), s)
s = map(lambda x: x.replace(',ceil_mode=False),#MaxPool2d', ')'), s)
s = map(lambda x: x.replace('),#MaxPool2d', ')'), s)
s = map(lambda x: x.replace(',(0, 0),ceil_mode=False),#AvgPool2d', ')'), s)
s = map(lambda x: x.replace(',ceil_mode=False),#AvgPool2d', ')'), s)
s = map(lambda x: x.replace(',bias=True)),#Linear', ')), # Linear'), s)
s = map(lambda x: x.replace(')),#Linear', ')), # Linear'), s)
s = map(lambda x: '{},\n'.format(x), s)
s = map(lambda x: x[1:], s)
s = reduce(lambda x, y: x + y, s)
return s
def torch_to_pytorch(t7_filename, outputname=None):
model = load_lua(t7_filename, unknown_classes=True)
if type(model).__name__ == 'hashable_uniq_dict': model = model.model
model.gradInput = None
slist = lua_recursive_source(torch.legacy.nn.Sequential().add(model))
s = simplify_source(slist)
header = '''
import torch
import torch.nn as nn
from torch.autograd import Variable
from functools import reduce
class LambdaBase(nn.Sequential):
def __init__(self, fn, *args):
super(LambdaBase, self).__init__(*args)
self.lambda_func = fn
def forward_prepare(self, input):
output = []
for module in self._modules.values():
output.append(module(input))
return output if output else input
class Lambda(LambdaBase):
def forward(self, input):
return self.lambda_func(self.forward_prepare(input))
class LambdaMap(LambdaBase):
def forward(self, input):
return list(map(self.lambda_func,self.forward_prepare(input)))
class LambdaReduce(LambdaBase):
def forward(self, input):
return reduce(self.lambda_func,self.forward_prepare(input))
'''
varname = t7_filename.replace('.t7', '').replace('.', '_').replace('-',
'_')
s = '{}\n\n{} = {}'.format(header, varname, s[:-2])
if outputname is None: outputname = varname
with open(outputname + '.py', "w") as pyfile:
pyfile.write(s)
n = nn.Sequential()
lua_recursive_model(model, n)
torch.save(n.state_dict(), outputname + '.pth')
parser = argparse.ArgumentParser(
description='Convert torch t7 model to pytorch')
parser.add_argument('--model', '-m', type=str, required=True,
help='torch model file in t7 format')
parser.add_argument('--output', '-o', type=str, default=None,
help='output file name prefix, xxx.py xxx.pth')
args = parser.parse_args()
torch_to_pytorch(args.model, args.output)
| 39.993808
| 88
| 0.523378
|
292bf940616f54035206d98118bc33d9b6e9e356
| 2,091
|
py
|
Python
|
keitaro/resources/integrations.py
|
Infvmous/keitaro
|
aeb7555bd8443da995705f26fd42e6e882f64dd4
|
[
"MIT"
] | 1
|
2021-07-03T16:40:37.000Z
|
2021-07-03T16:40:37.000Z
|
keitaro/resources/integrations.py
|
ysomad/keitaro
|
aeb7555bd8443da995705f26fd42e6e882f64dd4
|
[
"MIT"
] | 1
|
2021-01-28T13:06:33.000Z
|
2021-01-28T13:06:36.000Z
|
keitaro/resources/integrations.py
|
ysomad/keitaro
|
aeb7555bd8443da995705f26fd42e6e882f64dd4
|
[
"MIT"
] | 1
|
2021-02-23T08:10:29.000Z
|
2021-02-23T08:10:29.000Z
|
from keitaro.api import API
from keitaro.utils import remove_key_values
class Integration(API):
def __init__(self, client, endpoint='integrations'):
super(Integration, self).__init__(client, endpoint)
def avscan(self):
"""
Returns AVScan key
"""
return super(Integration, self).get('avscan')
def avscan_update(self, avscan_key):
"""
Updates AVScan key
"""
return super(Integration, self).put('avscan', avscan_key=avscan_key)
def facebook(self, integration_id=None):
"""
Returns Facebook all facebook integrations or
specific one by integration_id
"""
return super(Integration, self).get('facebook', integration_id)
def facebook_update(self, integration_id, name=None, ad_account_id=None,
token=None, proxy_enabled=None, proxy=None):
"""
Updates facebook integration
"""
return super(Integration, self).put(
'facebook', **remove_key_values(locals()))
def facebook_campaigns(self, integration_id):
"""
Returns campaigns link to facebook integration
"""
return super(Integration, self).get(
'facebook', facebook_id, 'campaign')
def imklo(self):
"""
Returns IMKLO url
"""
return super(Integration, self).get('imklo')
def imklo_update(self, imklo_api_url):
"""
Updates IMKLO api url
"""
return super(Integration, self).put(
'imklo', imklo_api_url=imklo_api_url)
def facebook_create(
self, name, ad_account_id, token, proxy_enabled, proxy):
"""
Creates facebook integration
"""
return super(Integration, self).post(
'facebook', **remove_key_values(locals()))
def facebook_add_campaign(self, integration_id, campaign_id):
"""
Adds campaign to facebook integration
"""
return super(Integration, self).post(
'facebook', integration_id, 'campaign')
| 29.450704
| 76
| 0.604495
|
f26815869802761fba8821033e1eeca38a76ffa4
| 4,978
|
py
|
Python
|
IMDb_framework/runs/map_phr_to_sentence.py
|
vanessadamario/data_efficiency
|
fc702d2241d737591163697332e3de1d0a0ed085
|
[
"MIT"
] | null | null | null |
IMDb_framework/runs/map_phr_to_sentence.py
|
vanessadamario/data_efficiency
|
fc702d2241d737591163697332e3de1d0a0ed085
|
[
"MIT"
] | null | null | null |
IMDb_framework/runs/map_phr_to_sentence.py
|
vanessadamario/data_efficiency
|
fc702d2241d737591163697332e3de1d0a0ed085
|
[
"MIT"
] | 1
|
2021-12-27T00:46:35.000Z
|
2021-12-27T00:46:35.000Z
|
import os
import numpy as np
from os.path import join
import re
import codecs
import pandas as pd
def clean_str_sst(string):
"""
Tokenize/string cleaning for the SST dataset
:param string: a string element
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def line_to_words(line):
""" Given the dataset, we remove the first element, which is the label
:param line: line of the dataset, as saved for e.g. in
https://github.com/CS287/HW1/blob/master/data/stsa.binary.train
:returns label: the output label
:returns words: the list of words contained in the line
"""
clean_line = clean_str_sst(line.strip())
words = clean_line.split(' ')
label = int(words[0])
words = words[1:]
return label, words
def extract_x_y(path_folder, dataset_name):
""" Here we extract the (X,y) values
:param path_folder: path to the folder containing datasets
:param dataset_name: name of the dataset
:returns input_dataset_lst: list containing input features (list of words)
:returns output_labels: np.array containing the ys
"""
input_dataset_lst = []
output_labels = []
for filename in [join(path_folder, dataset_name)]:
if filename:
with codecs.open(filename, "r", encoding="latin-1") as f:
for line in f:
label, words = line_to_words(line)
input_dataset_lst.append(words)
output_labels.append(label)
return input_dataset_lst, np.array(output_labels)
def standard_pre_processing(id, source_folder, split_path):
output_name = {'stsa.binary.phrases.train': 'phrases_train',
'stsa.binary.train': 'train',
'stsa.binary.dev': 'valid',
'stsa.binary.test': 'test'}
dataset_lst = list(output_name.keys())
for data_ in dataset_lst:
x, y = extract_x_y(source_folder, data_)
df = pd.DataFrame(x)
df.to_csv(join(split_path, output_name[data_] + '.csv'))
np.save(join(split_path, output_name[data_] + '.npy'), y)
def generate_map(id, source_folder, split_path, min_length=5):
""" Generate the map from sentence to phrases.
The redundant phrases are those which share the same polarity of the entire sentence.
The noisy phrases are the neutral ones, with label 2
:param id: the index of the experiment, not useful
:param source_folder: name of the source, containing the fine grained phrases
:param split_path: the name of the split where to save the results
:param min_length: the minimum length for each sentence, default 5, min length of the sentence
"""
split_shape = 10
x_phr = pd.read_csv(join(source_folder, 'fine_phrases_train.csv'), index_col=0)
y_phr = np.load(join(source_folder, 'fine_phrases_train.npy'))
split_name_lst = ['train', 'valid', 'test']
for split_name in split_name_lst:
dct_redundant = {} # dct of redundant phrases
dct_noise = {} # dct of noisy phrases
x_split = pd.read_csv(join(split_path, split_name + '.csv'), index_col=0)
y_split = np.load(join(split_path, split_name + '.npy'))
size_split = x_split.shape[0]
n_index_lst = np.ceiling(size_split / split_shape).astype(int)
for n_index in n_index_lst:
id_start = n_index * split_shape
id_stop = (n_index + 1) * split_shape
if id_stop > size_split:
id_stop = size_split
for id_s in np.arange(id_start, id_stop):
print(id_s)
ff = [f_ for f_ in x_split.loc[id_s] if isinstance(f_, str)] # for each sentence in the file_split.csv
dct_redundant[id_s] = []
dct_noise[id_s] = []
for id_phr in range(x_phr.shape[0]): # you look for all the phrases
tmp_phrases = [f_ for f_ in x_phr.loc[id_phr] if isinstance(f_, str)]
len_phrases = len(tmp_phrases)
set_phrases = set(tmp_phrases)
if len(set_phrases - set(ff)) == 0 and len_phrases >= min_length:
# save the phrase as the one corresponding to the sentence
if ((y_phr[id_phr] > 2) and y_split[id_s] == 1) \
or ((y_phr[id_phr] < 2) and y_split[id_s] == 0):
dct_redundant[id_s].append(id_phr)
elif y_phr[id_phr] == 2:
dct_noise[id_s].append(id_phr)
df_n = pd.DataFrame(dct_noise.values(), index=dct_noise.keys())
df_n.to_csv(join(split_path, 'map_' + split_name, 'map_n_%i.csv' % n_index))
df_r = pd.DataFrame(dct_redundant.values(), index=dct_redundant.keys())
df_r.to_csv(join(split_path, 'map_' + split_name, 'map_r_%i.csv' % n_index))
| 41.483333
| 119
| 0.616312
|
143c32e9b8c88d6477112f1ca4117fa533cc8598
| 5,648
|
py
|
Python
|
graphics.py
|
DougDimmadome7/3D-Graphics-Engine-on-Console
|
3fa766cf2af05ea5595277e47dbf17d7fede0e35
|
[
"MIT"
] | null | null | null |
graphics.py
|
DougDimmadome7/3D-Graphics-Engine-on-Console
|
3fa766cf2af05ea5595277e47dbf17d7fede0e35
|
[
"MIT"
] | null | null | null |
graphics.py
|
DougDimmadome7/3D-Graphics-Engine-on-Console
|
3fa766cf2af05ea5595277e47dbf17d7fede0e35
|
[
"MIT"
] | null | null | null |
import math
from shapes import Point, Vector, Surface
from lighting import gradient
class Ray:
def __find_components(self, theta_v: float, theta_h: float) -> list:
"""
Given the horizontal and vertical angle, calculate the vector
components such that the magnitude of the vector is 1.
"""
theta_h = math.radians(theta_h)
theta_v = math.radians(theta_v)
if theta_v > 0:
z = math.sin(theta_v)
a = math.cos(theta_v)
else:
z = math.cos(math.radians(90) - theta_v)
a = math.sin(math.radians(90) - theta_v)
if theta_h > 0:
x = a * math.sin(math.radians(90) - theta_h)
y = a * math.cos(math.radians(90) - theta_h)
else:
y = a * math.sin(theta_h)
x = a * math.cos(theta_h)
return [x, y, z]
def __init__(self, theta_v: float, theta_h: float, position):
"""
The ray is a line in 3-d space that is defined as a vector of length 1
"""
parts = self.__find_components(theta_v, theta_h)
self.vector = Vector(parts[0], parts[1], parts[2])
self.position = position
#TODO: This may not actually be working
def collision_cor(self, surface, is_eq = False) -> list:
"""
Returns the x,y,z coordinates where the Ray collides with the
"""
# This method works by treating the vector of the Ray as a parametric
#line equation. This makes determining whether there is a collision
#much simpler.
if not is_eq:
equation = surface.plane_eq()
else:
equation = surface
# Distribute the plane coefficients, and separate the constants from
#the coefficients of T.
consts, coeff = [0,0,0], [0,0,0]
for i in range(3):
consts[i] = self.position.list_form()[i] * equation[i]
coeff[i] = self.vector.list_form()[i] * equation[i]
equation[3] -= sum(consts)
t = equation[3] / sum(coeff) if sum(coeff) != 0 else float("inf")
return [i * t for i in self.vector.list_form()] + [t]
def __will_impact(self, surface, precision = .4) -> bool:
"""
returns a boolean with whether the ray will impact the surface
"""
impact = self.collision_cor(surface) #calculate where ray impacts surface
max_mins = surface.max_mins() #returns the region where the shape is
for i in range(len(max_mins)):
# checks if the point of impact is not within allowable range
if impact[i] > max_mins[i][0] + precision or impact[i] < max_mins[i][1] - precision:
return False
return True
def closest(self, shapes) -> float:
"""
Given a set of possible shapes to impact, this finds which surface
the parametric form of the vector will impact, and returns its
brightness to be outputted.
"""
closest_t = float("inf")
brightness = 0
for shape in shapes:
for surface in shape.surfaces:
if self.__will_impact(surface):
if self.collision_cor(surface)[-1] < closest_t:
brightness = surface.brightness
closest_t = self.collision_cor(surface)[-1]
return brightness
class Camera:
def __init_rays(self, position, h_angle, v_angle, orientation: list, X_len = 237, Y_len = 62):
"""
generates a list of Rays which will correspond to each pixel on the command
line.
"""
rays = []
for i in range(Y_len): #for every pixel row
for j in range(X_len): #Go across
r = Ray((v_angle - v_angle*i*2/Y_len) + orientation[0], (-h_angle + h_angle*j*2/X_len) + orientation[1], position)
rays.append(r)
return rays
def __init__(self, position, h_angle, v_angle, orientation = [0,0], X_len = 237, Y_len = 62, b_coeff = 1):
assert h_angle < 180
assert v_angle < 180
self.orientation = orientation
self.position = position
self.h_angle = h_angle
self.v_angle = v_angle
self.X_len = X_len
self.Y_len = Y_len
self.rays = self.__init_rays(position, h_angle, v_angle, orientation, X_len, Y_len)
def __create_view(self) -> list:
"""
returns a grid with the same y values as the console view.
"""
view = []
for i in range(self.Y_len):
view.append([])
return view
def __display_view(self, view):
for row in view:
for item in row:
print(item, end = '')
def __bright_ascii(self, brightness) -> chr:
"""
Given a brightness value, this will scale that brightness to an
ascii character to output
"""
return gradient[int(brightness // (100/len(gradient)))]
#TODO: FIX THE SCAN METHOD
def scan(self, shapes):
"""
The camera generates a set of rays that correspond with each pixel
on the console. The rays are separated out evenly.
"""
#view = self.__create_view()
view = ""
for i in range(len(self.rays)):
view += self.__bright_ascii(self.rays[i].closest(shapes))
#self.__display_view(view)
print(view)
| 34.024096
| 131
| 0.554356
|
f955b5dbebcc58526ae2eef58239c751e91f2889
| 998
|
py
|
Python
|
scripts/gray-note-img.py
|
heavenly-zy/notev
|
f679025104ae56cfce1a6255401e14e399447278
|
[
"MIT"
] | 1
|
2020-04-16T01:59:25.000Z
|
2020-04-16T01:59:25.000Z
|
scripts/gray-note-img.py
|
heavenly-zy/notev
|
f679025104ae56cfce1a6255401e14e399447278
|
[
"MIT"
] | null | null | null |
scripts/gray-note-img.py
|
heavenly-zy/notev
|
f679025104ae56cfce1a6255401e14e399447278
|
[
"MIT"
] | null | null | null |
import sys
import os
from cv2 import cv2
img_dir = "docs/Images/"
bak_dir = "docs/Images_bak/"
img_names = sys.argv[1: ]
if not os.path.exists(bak_dir):
os.mkdir(bak_dir)
W = 800
def process(img_name):
""" 处理一张图片 """
bak_path = os.path.join(bak_dir, img_name)
img_path = os.path.join(img_dir, img_name)
# 备份
with open(img_path, 'rb') as fr, open(bak_path, 'wb') as fw:
fw.write(fr.read())
# 读取
img = cv2.imread(img_path)
# 缩小
h, w = img.shape[: 2]
if w > W:
h = h * W // w
w = W
img = cv2.resize(img, (w, h),interpolation=cv2.INTER_CUBIC)
# 滤波
img = cv2.bilateralFilter(img, 40, 30, 75)
# 转为灰度
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 过滤背景
img[img > 200] = 255
# 文字增强
img[img < 30] = 0
# 展示
cv2.imshow('im', img)
cv2.waitKey(0)
# 写入
cv2.imwrite(img_path, img)
for img_name in img_names:
print(f"processing img {img_name}", end="\r")
process(img_name)
| 21.234043
| 67
| 0.582164
|
c714ce5afdb28fa7a16ecf22d475c9329e19f1ed
| 822
|
py
|
Python
|
django/swiper/user/models.py
|
Feier-4869/swipe
|
97207562b8ec294a2bcc62ef30f2001b39c11309
|
[
"MIT"
] | null | null | null |
django/swiper/user/models.py
|
Feier-4869/swipe
|
97207562b8ec294a2bcc62ef30f2001b39c11309
|
[
"MIT"
] | null | null | null |
django/swiper/user/models.py
|
Feier-4869/swipe
|
97207562b8ec294a2bcc62ef30f2001b39c11309
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class User(models.Model):
SEX = (
('0', 'male'),
('1', 'female'),
)
phonenum = models.CharField(max_length=50, unique=True, verbose_name='手机号')
nickname = models.CharField(max_length=50, unique=True, verbose_name='昵称')
sex = models.CharField(max_length=8, choices=SEX, verbose_name='性别')
birth_year = models.IntegerField(default=2001, verbose_name='出生年')
birth_month = models.IntegerField(default=1, verbose_name='出生月')
birth_day = models.IntegerField(default=1, verbose_name='出生日')
avater = models.CharField(max_length=50, verbose_name='个人形象')
location = models.CharField(max_length=50, verbose_name='常居地')
class Meta:
db_table = 'sp_user'
def __str__(self):
return self.nickname
| 32.88
| 79
| 0.683698
|
beaeed3c33e69a04f0fe216dc8f1a087372dd8f6
| 1,451
|
py
|
Python
|
all_net_def.py
|
jianganbai/Collision-Classification-and-Matching
|
c1f7a72e29884bc7225659d49d0677a425e7f8fd
|
[
"Apache-2.0"
] | null | null | null |
all_net_def.py
|
jianganbai/Collision-Classification-and-Matching
|
c1f7a72e29884bc7225659d49d0677a425e7f8fd
|
[
"Apache-2.0"
] | null | null | null |
all_net_def.py
|
jianganbai/Collision-Classification-and-Matching
|
c1f7a72e29884bc7225659d49d0677a425e7f8fd
|
[
"Apache-2.0"
] | null | null | null |
from torch import nn
class RCNet(nn.Module):
def __init__(self, num_classes=10):
super(RCNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(in_channels=4, out_channels=16, kernel_size=1, stride=1, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=1, stride=1),
nn.Conv2d(16, 32, kernel_size=3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(32 * 5 * 4, num_classes),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), 32 * 5 * 4)
x = self.classifier(x)
return x
class ImNet(nn.Module):
def __init__(self, num_classes=10):
super(ImNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=1),
nn.Conv2d(32, 32, kernel_size=3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(32 * 3 * 3, num_classes),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), 32 * 3 * 3)
x = self.classifier(x)
return x
| 30.229167
| 90
| 0.53756
|
a05bd66cacc34d2b9f4c444f65fc71d45f36e914
| 595
|
py
|
Python
|
tools/find_mxnet.py
|
jimmy9065/mxnet-ssd
|
e05b704de0756c4ca996e74f8f3c7dec1fe1f509
|
[
"MIT"
] | 866
|
2016-10-07T16:05:13.000Z
|
2022-01-19T08:30:31.000Z
|
tools/find_mxnet.py
|
whn09/mxnet-ssd
|
ff15817dbf6d3c6d3fc69fbf6bef4c4d61490159
|
[
"MIT"
] | 237
|
2016-10-06T21:19:45.000Z
|
2021-07-20T03:52:45.000Z
|
tools/find_mxnet.py
|
whn09/mxnet-ssd
|
ff15817dbf6d3c6d3fc69fbf6bef4c4d61490159
|
[
"MIT"
] | 431
|
2016-10-19T10:08:07.000Z
|
2021-10-03T00:43:33.000Z
|
from __future__ import print_function
import os
try:
if os.environ.get('MXNET_EXAMPLE_SSD_DISABLE_PRE_INSTALLED', 0):
raise ImportError
import mxnet as mx
print("Using mxnet as:")
print(mx)
print("Warning: using pre-installed version of mxnet may cause unexpected error...")
print("(export MXNET_EXAMPLE_SSD_DISABLE_PRE_INSTALLED=1) to prevent loading pre-installed mxnet.")
except ImportError:
import os, sys
curr_path = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(curr_path, "../mxnet/python"))
import mxnet as mx
| 37.1875
| 103
| 0.729412
|
e9c3e7af98f6cbcb246c1e5c17e1877c32020064
| 4,852
|
py
|
Python
|
creation/lib/matchPolicy.py
|
ddbox/glideinwms
|
1d0efbc1186ff9bd4cc3010fde6681b4cbe7cd54
|
[
"Apache-2.0"
] | null | null | null |
creation/lib/matchPolicy.py
|
ddbox/glideinwms
|
1d0efbc1186ff9bd4cc3010fde6681b4cbe7cd54
|
[
"Apache-2.0"
] | null | null | null |
creation/lib/matchPolicy.py
|
ddbox/glideinwms
|
1d0efbc1186ff9bd4cc3010fde6681b4cbe7cd54
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-FileCopyrightText: 2009 Fermi Research Alliance, LLC
# SPDX-License-Identifier: Apache-2.0
#
# Project:
# glideinWMS
#
# File Version:
#
# Description:
# This module contains the Match Policy related class
#
# Author:
# Parag Mhashilkar
#
import imp
import os
# import imp
import os.path
# import copy
import re
# import string
# import socket
# from collections import OrderedDict
from glideinwms.lib.xmlParse import OrderedDict
# from . import cWParams
# import pprint
class MatchPolicyLoadError(Exception):
def __init__(self, file="", search_path=[]):
self.file = file
self.searchPath = search_path
def __str__(self):
err_str = ""
if self.file == "":
err_str = "No match policy file provided"
else:
err_str = f"Failed to load policy from the file {self.file} in the search path {self.searchPath}"
return err_str
class MatchPolicyContentError(Exception):
def __init__(self, file, attr, expected_type, actual_type):
self.file = file
self.attr = attr
self.attrExpectedType = expected_type
self.attrType = actual_type
def __str__(self):
return "{} in policy file {} should be of type {} and not {}".format(
self.attr,
self.file,
self.attrExpectedType,
self.attrType,
)
class MatchPolicy:
def __init__(self, file, search_path=[]):
"""
Load match policy from the policy file
@param file: Path to the python file
@type file: string
@param search_path: Search path to the python module to load
@type search_path: list
@rtype: MatchPolicy Object
"""
if (file is not None) and (file != ""):
self.file = file
self.name = self.policyFileToPyModuleName()
search_path.append(os.path.dirname(os.path.realpath(file)))
self.searchPath = search_path
try:
# First find the module
f, path, desc = imp.find_module(self.name, self.searchPath)
# Load the module
self.pyObject = imp.load_module(self.name, f, path, desc)
except:
raise MatchPolicyLoadError(file=file, search_path=self.searchPath)
else:
raise MatchPolicyLoadError()
match_attrs = self.loadMatchAttrs()
self.factoryMatchAttrs = match_attrs.get("factory_match_attrs")
self.jobMatchAttrs = match_attrs.get("job_match_attrs")
# Assume TRUE as default for all expressions
self.factoryQueryExpr = "TRUE"
if "factory_query_expr" in dir(self.pyObject):
self.factoryQueryExpr = self.pyObject.factory_query_expr
self.jobQueryExpr = "TRUE"
if "job_query_expr" in dir(self.pyObject):
self.jobQueryExpr = self.pyObject.job_query_expr
self.startExpr = "TRUE"
if "start_expr" in dir(self.pyObject):
self.startExpr = self.pyObject.start_expr
def policyFileToPyModuleName(self):
policy_fname = os.path.basename(self.file)
policy_module_name = re.sub(".py$", "", policy_fname)
return policy_module_name
def loadMatchAttrs(self):
"""
If given match_attr i.e. factory_match_attr or job_match_attr exits
load it from the pyObject
"""
# match_attrs = {}
match_attrs = {"factory_match_attrs": {}, "job_match_attrs": {}}
for ma_name in ("factory_match_attrs", "job_match_attrs"):
if ma_name in dir(self.pyObject):
ma_attr = getattr(self.pyObject, ma_name)
# Check if the match_attr is of dict type
# TODO: Also need to check that match_attr is of string/int/bool
if isinstance(ma_attr, dict):
data = OrderedDict()
for k, v in ma_attr.items():
data[k] = OrderedDict(v)
match_attrs[ma_name] = data
else:
# Raise error if match_attr is not of type dict
raise MatchPolicyContentError(self.file, ma_name, type(ma_attr).__name__, "dict")
return match_attrs
def __repr__(self):
return self.__str__()
def __str__(self):
contents = {
"file": self.file,
"name": self.name,
"searchPath": "%s" % self.searchPath,
"pyObject": "%s" % self.pyObject,
"factoryMatchAttrs": "%s" % self.factoryMatchAttrs,
"jobMatchAttrs": "%s" % self.jobMatchAttrs,
"factoryQueryExpr": "%s" % self.factoryQueryExpr,
"jobQueryExpr": "%s" % self.jobQueryExpr,
"startExpr": "%s" % self.startExpr,
}
return "%s" % contents
| 31.303226
| 109
| 0.597279
|
28470edc2b276b2e14ddfe2320754f54923e114a
| 1,315
|
py
|
Python
|
lib/googlecloudsdk/command_lib/redis/zones_util.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | 2
|
2019-11-10T09:17:07.000Z
|
2019-12-18T13:44:08.000Z
|
lib/googlecloudsdk/command_lib/redis/zones_util.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | 11
|
2020-02-29T02:51:12.000Z
|
2022-03-30T23:20:08.000Z
|
lib/googlecloudsdk/command_lib/redis/zones_util.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | 1
|
2020-07-24T18:47:35.000Z
|
2020-07-24T18:47:35.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for `gcloud redis zones` commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
from apitools.base.py import encoding
RedisZone = collections.namedtuple('RedisZone', ['name', 'region'])
def ExtractZonesFromRegionsListResponse(response, args):
for region in response:
if args.IsSpecified('region') and region.locationId != args.region:
continue
if not region.metadata:
continue
metadata = encoding.MessageToDict(region.metadata)
for zone in metadata.get('availableZones', []):
zone = RedisZone(name=zone, region=region.locationId)
yield zone
| 31.309524
| 74
| 0.747529
|
92e8481cac988f42466f1aa1efa78bf61d3f2334
| 3,607
|
py
|
Python
|
classes/get2dcoords.py
|
jamflcjamflc/cuatro
|
007fabf1f75f87b3631966a10923ddccfe9d56af
|
[
"Apache-2.0"
] | null | null | null |
classes/get2dcoords.py
|
jamflcjamflc/cuatro
|
007fabf1f75f87b3631966a10923ddccfe9d56af
|
[
"Apache-2.0"
] | 2
|
2021-01-26T19:58:42.000Z
|
2021-01-30T22:00:12.000Z
|
classes/get2dcoords.py
|
jamflcjamflc/cuatro
|
007fabf1f75f87b3631966a10923ddccfe9d56af
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf8 -*-
# get2dcoords
# helper class for cuatro
# Alfredo Martin 2021
import numpy as np
from classes.rotate import Rotate
version = 'get2dcoords.v.1.0.0'
class Get2dcoords:
"""the instance of this class reorganizes polygons and colors into data that is ready to be rendered
in 2d:
ligt_pos: numpy array: position of the light
polygons3d: numpy array of shape (n, m, 3) with n being the number of polygons and m the number of nodes
per polygon
rotor: Instance of Rotor class
offset: tuple of three ints: offset (displacement in the positve z axis) of the giver 3d coords before being
rendered
screenpos: instance of screenpos class
"""
def __init__(self, light_pos=(0., 0., 0.), offset=None, screenpos=None):
""" Initiallizes the instance
light_pos: position of the light source
offset: tuple of three iints indicating where to translate the polygons before rendering
screenpos: instance of screenpos class"""
self.light_pos = np.array(light_pos)
self.polygons3d = None
self.rotor = Rotate()
self.offset = offset
self.screenpos = screenpos
def get_polygons2d(self, polygons3d=None, colors=None, edge_colors=None, angles=None):
"""gets 2d coords from 3d coords and returns the 2d coords along the colors in the 2d drawing order
polgons3d: numpy array of shape (n, m, 3) with n being the number of polygons and m the number of nodes per polygon
colors: numpy array of shape (n, 3)
edge_colors: numpy array of shape (n, 3)
angles: tuple of three angles (in radians)
returns tuple of coordinates, colors, edge_colors and shading factor:
coordinates: numpy array of shape (n, m, 2)
colors: numpy array of shape (n, 3)
edge_colors: numpy array of shape (n, 3)
shading: numpy array of shape (n, )"""
# Rotate the polygons
self.polygons3d = self.rotor.rotate(angles, arr=polygons3d)
# translate the poligons
self.polygons3d += np.array(self.offset).reshape(1, 1, 3)
# get the sorting indexing for the polygons
centroids = self.polygons3d.mean(axis=(1,))
centroid_vectors = centroids - self.screenpos.c.reshape(1, 3)
distances2 = (centroid_vectors ** 2).sum(axis=1)
indexes = np.argsort(-distances2) # sorting in reverse order
# calculate shading of each polygon (cosine of angle formed by the vector orthogonal to the surface and the
# vector joining the centroid and the camera)
orto_vectors1 = self.polygons3d[:, 1, :] - self.polygons3d[:, 0, :]
orto_vectors2 = self.polygons3d[:, 2, :] - self.polygons3d[:, 0, :]
orto_vectors = np.cross(orto_vectors1, orto_vectors2, axis=1)
orto_vectors /= np.linalg.norm(orto_vectors, axis=1).reshape(-1, 1)
light_vectors = centroids - self.light_pos.reshape(1, 3)
light_vectors /= np.linalg.norm(light_vectors, axis=1).reshape(-1, 1)
cosine = np.abs(np.matmul(light_vectors.reshape(-1, 1, 3), orto_vectors.reshape(-1, 3, 1)).flatten())
# calculate 2d coordinates
coords = self.screenpos.pos(self.polygons3d)
# sort the arrays
coords = np.take(coords, indexes, axis=0)
colors = np.take(colors, indexes, axis=0)
edge_colors = np.take(edge_colors, indexes, axis=0)
cosine = np.take(cosine, indexes, axis=0)
return coords, colors, edge_colors, cosine
if __name__ == '__main__':
print(verson)
| 48.093333
| 123
| 0.657887
|
500ee1c8bd5c1fa5b34d97f56e4cc91a5a5aee5a
| 1,418
|
py
|
Python
|
boot.py
|
minnovation-au/MHM4-Boot
|
d314173d03ac15317285e78348da07f78cfffe85
|
[
"MIT"
] | null | null | null |
boot.py
|
minnovation-au/MHM4-Boot
|
d314173d03ac15317285e78348da07f78cfffe85
|
[
"MIT"
] | null | null | null |
boot.py
|
minnovation-au/MHM4-Boot
|
d314173d03ac15317285e78348da07f78cfffe85
|
[
"MIT"
] | null | null | null |
##################################
### DO NOT EDIT THIS FILE ###
### ###
### MHM4 boot.py V0.2 ###
### Last Updated ###
### Manjunath.R ###
### 25 July 2017 ###
### ###
### Copyright Minnovation 2017 ###
##################################
import ubinascii, machine
i=0
def mac():
mac=ubinascii.hexlify(machine.unique_id(),':').decode()
mac=mac.replace(":","")
return mac
ap_ssid = "MHM4-"+mac()
print(ap_ssid)
############ ENTER BOOTLOAD MODE ############
############ USER: micro PASSWD: python #####
from network import WLAN
wlan = WLAN()
wlan.init(mode=WLAN.AP, ssid=ap_ssid, auth=(WLAN.WPA2,'AlphaXI0T'), channel=7, antenna=WLAN.INT_ANT)
from machine import Timer
chrono = Timer.Chrono()
chrono.start()
print('PRESS CTL-C TO ENTER REPL')
while chrono.read() < 10:
i=i+1
if i > 100000:
print('PRESS CTL-C TO ENTER REPL',(30-chrono.read()))
i=0
wlan.deinit()
################# SIGFOX ###################
#from network import Sigfox
#sigfox = Sigfox(mode=Sigfox.SIGFOX, rcz=Sigfox.RCZ4)
#ss = socket.socket(socket.AF_SIGFOX, socket.SOCK_RAW)
#ss.setblocking(True)
################# LORA #####################
#from network import LoRa
#lora = LoRa(mode=LoRa.LORA, region=LoRa.AU915)
#sl = socket.socket(socket.AF_LORA, socket.SOCK_RAW)
#sl.setblocking(False)
| 25.781818
| 100
| 0.528209
|
e62e5e1630a1a391a17ba239a874014eb0afb6b7
| 6,491
|
py
|
Python
|
centernet_lightning/models/meta.py
|
gau-nernst/centernet-lightning
|
655d895e888f80f6a643e1b9b14d3d4ccb5e7930
|
[
"MIT"
] | 47
|
2021-08-10T09:30:53.000Z
|
2022-03-29T07:53:43.000Z
|
centernet_lightning/models/meta.py
|
gau-nernst/centernet-lightning
|
655d895e888f80f6a643e1b9b14d3d4ccb5e7930
|
[
"MIT"
] | 1
|
2021-08-07T13:46:49.000Z
|
2021-08-07T13:46:49.000Z
|
centernet_lightning/models/meta.py
|
gau-nernst/centernet-lightning
|
655d895e888f80f6a643e1b9b14d3d4ccb5e7930
|
[
"MIT"
] | 6
|
2021-08-12T02:40:43.000Z
|
2022-01-31T16:12:40.000Z
|
from typing import Any, Dict, List, Union
from functools import partial
import torch
from torch import nn
from torch.optim.lr_scheduler import CosineAnnealingLR, LinearLR, SequentialLR
import pytorch_lightning as pl
from vision_toolbox.backbones import BaseBackbone
from vision_toolbox.necks import BaseNeck
from vision_toolbox.components import ConvBnAct
_optimizers = {
"SGD": partial(torch.optim.SGD, momentum=0.9),
"Adam": torch.optim.Adam,
"AdamW": torch.optim.AdamW,
"RMSprop": partial(torch.optim.RMSprop, momentum=0.9)
}
# Reference implementations
# https://github.com/tensorflow/models/blob/master/research/object_detection/meta_architectures/center_net_meta_arch.py num_filters = 256
# https://github.com/lbin/CenterNet-better-plus/blob/master/centernet/centernet_head.py num_filters = in_channels
class GenericHead(nn.Sequential):
def __init__(self, in_channels: int, out_channels: int, width: int=256, depth: int=1, block=ConvBnAct, init_bias: float=None):
super().__init__()
for i in range(depth):
in_c = in_channels if i == 0 else width
self.add_module(f"block_{i+1}", block(in_c, width))
self.out_conv = nn.Conv2d(width, out_channels, 1)
if init_bias is not None:
self.out_conv.bias.data.fill_(init_bias)
class GenericModel(nn.Module):
def __init__(self, backbone: BaseBackbone, neck: BaseNeck, heads: nn.Module, extra_block=None):
super().__init__()
self.backbone = backbone
self.neck = neck
self.heads = heads
self.extra_block = extra_block
def forward(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:
out = self.backbone.forward_features(x)
out = self.neck(out)
if self.extra_block is not None: # e.g. SPP
out = self.extra_block(out)
out = {name: head(out) for name, head in self.heads.named_children()}
return out
class MetaCenterNet(pl.LightningModule):
"""Meta architecture for CenterNet. Implement training logic
"""
def __init__(
self,
# model
backbone: BaseBackbone,
neck: BaseNeck,
heads: nn.Module,
extra_block: nn.Module=None,
# optimizer and scheduler
optimizer: str="SGD",
lr: float=0.05,
weight_decay: float=2e-5,
norm_weight_decay: float=0,
warmup_epochs: int=5,
warmup_decay: float=0.01,
# data
# batch_size: int=8,
# num_workers: int=2,
# train_data: Dict[str, Any]=None,
# val_data: Dict[str, Any]=None,
jit: bool=False
):
super().__init__()
# self.backbone = backbone
# self.extra_block = extra_block
# self.neck = neck
# self.heads = nn.ModuleDict(heads)
self.model = GenericModel(backbone, neck, heads, extra_block=extra_block)
if jit:
self.model = torch.jit.script(self.model)
def get_output_dict(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:
# """Return encoded outputs, a dict of output feature maps. Use this output to either compute loss or decode to detections. Heatmap is before sigmoid
# """
# feat = self.backbone.forward_features(x)
# if self.extra_block is not None: # e.g. SPP
# feat[-1] = self.extra_block(feat[-1])
# feat = self.neck(feat)
# outputs = {name: module(feat) for name, module in self.heads.items()}
# return outputs
return self.model(x)
def compute_loss(self, outputs: Dict[str, torch.Tensor], targets: List[Dict[str, Union[List, int]]]) -> Dict[str, torch.Tensor]:
pass
# """Return a dict of losses for each output head, and weighted total loss. This method is called during the training step
# """
# losses = {"total": torch.tensor(0., device=self.device)}
# for name, module in self.heads.items():
# module: BaseHead
# losses[name] = module.compute_loss(outputs, targets)
# losses["total"] += losses[name] * module.loss_weight
# return losses
def training_step(self, batch, batch_idx):
images, targets = batch
encoded_outputs = self.get_output_dict(images)
losses = self.compute_loss(encoded_outputs, targets)
for k, v in losses.items():
self.log(f"train/{k}_loss", v)
return losses["total"]
def configure_optimizers(self):
if self.hparams.norm_weight_decay is not None: # norm's weight decay = 0
# https://github.com/pytorch/vision/blob/main/torchvision/ops/_utils.py
norm_classes = (nn.modules.batchnorm._BatchNorm, nn.LayerNorm, nn.GroupNorm)
norm_params = []
other_params = []
for module in self.modules():
if next(module.children(), None):
other_params.extend(p for p in module.parameters(recurse=False) if p.requires_grad)
elif isinstance(module, norm_classes):
norm_params.extend(p for p in module.parameters() if p.requires_grad)
else:
other_params.extend(p for p in module.parameters() if p.requires_grad)
param_groups = (norm_params, other_params)
wd_groups = (self.hparams.norm_weight_decay, self.hparams.weight_decay)
parameters = [{"params": p, "weight_decay": w} for p, w in zip(param_groups, wd_groups) if p]
else:
parameters = self.parameters()
optimizer = _optimizers[self.hparams.optimizer](parameters, lr=self.hparams.lr, weight_decay=self.hparams.weight_decay)
lr_scheduler = CosineAnnealingLR(optimizer, T_max=self.trainer.max_epochs-self.hparams.warmup_epochs)
if self.hparams.warmup_epochs > 0:
warmup_scheduler = LinearLR(optimizer, start_factor=self.hparams.warmup_decay, total_iters=self.hparams.warmup_epochs)
lr_scheduler = SequentialLR(optimizer, schedulers=[warmup_scheduler, lr_scheduler], milestones=[self.hparams.warmup_epochs])
# https://github.com/pytorch/pytorch/issues/67318
if not hasattr(lr_scheduler, "optimizer"):
setattr(lr_scheduler, "optimizer", optimizer)
return {
"optimizer": optimizer,
"lr_scheduler": lr_scheduler
}
| 41.082278
| 157
| 0.633493
|
ac11589e1068aebd17e7b2aa5655cfc26cf20c25
| 19,472
|
py
|
Python
|
src/restaff/helpers/notation_markup/notes.py
|
ko10ok/scorator
|
130250550126bbf863ed0028f99045c17d6249e6
|
[
"Apache-2.0"
] | null | null | null |
src/restaff/helpers/notation_markup/notes.py
|
ko10ok/scorator
|
130250550126bbf863ed0028f99045c17d6249e6
|
[
"Apache-2.0"
] | 10
|
2020-06-20T07:37:27.000Z
|
2020-07-05T06:22:07.000Z
|
src/restaff/helpers/notation_markup/notes.py
|
ko10ok/scorator
|
130250550126bbf863ed0028f99045c17d6249e6
|
[
"Apache-2.0"
] | null | null | null |
from typing import NamedTuple
import svgwrite
from svgwrite.path import Path
from svgwrite.shapes import Circle, Polyline
from svgwrite.text import Text
from restaff.helpers.svg_drawing import moved_path
from ...types import NotePitch, Note, Point, StaffProperties
note_start_offset = - 0.5
# (multiplier from 1 staff line, lower upper half note)
notes_offsets = {
'C': (0, 0),
'C#': (0, 1),
'Db': (0, 1),
'D': (0.5, 0),
'D#': (1, -1),
'Eb': (1, -1),
'E': (1, 0),
'Fb': (1, 0),
'E#': (1, 1),
'F': (1, 1),
'F#': (1.5, 0),
'Gb': (2, 0),
'G': (2, -1),
'G#': (2, 0),
'Ab': (2, 0),
'A': (2, 1),
'A#': (2.5, 0),
'Bb': (2.5, 0),
'B': (3, -1),
'Cb': (3, -1),
'B#': (3, 0),
}
notes_times = {
'whole': 1,
'half': 2,
'quarter': 4,
'eighth': 8,
'16th': 16,
'32nd': 32,
'64nd': 64
}
class NoteImage(NamedTuple):
centred: str
lower: str
upper: str
whole_note = NoteImage(
centred='M-3.270,-11.230 C-5.620,-11.230 -7.310,-10.420 -8.350,-8.800 C-9.520,-7.230 -10.100,-5.110 -10.100,-2.440 C-10.100,0.630 -9.290,3.260 -7.660,5.470 C-6.040,7.610 -4.250,9.110 -2.290,9.970 C-0.540,10.810 1.320,11.230 3.270,11.230 C5.550,11.230 7.280,10.420 8.440,8.800 C9.550,7.160 10.100,5.040 10.100,2.440 C10.100,-0.620 9.290,-3.260 7.660,-5.470 C6.040,-7.550 4.280,-9.050 2.400,-9.970 C0.500,-10.810 -1.390,-11.230 -3.270,-11.230 M0.050,-13.760 C7.080,-13.760 12.940,-12.400 17.630,-9.670 C22.380,-6.930 24.760,-3.710 24.760,0.000 C24.760,3.970 22.310,7.220 17.430,9.760 C12.540,12.430 6.750,13.760 0.050,13.760 C-6.850,13.760 -12.710,12.400 -17.520,9.670 C-22.340,6.930 -24.760,3.710 -24.760,0.000 C-24.760,-3.970 -22.280,-7.220 -17.330,-9.760 C-12.450,-12.430 -6.650,-13.760 0.050,-13.760',
upper='M-9.910,0.000 C-10.330,-2.570 -10.150,-4.890 -9.390,-6.950 C-8.960,-8.100 -7.670,-10.620 -4.710,-11.110 C-3.250,-11.350 -0.540,-11.220 1.790,-10.240 C4.160,-9.250 6.150,-7.310 6.910,-6.450 C8.510,-4.650 9.520,-2.500 9.950,0.000 L24.760,0.000 C24.690,-3.250 22.630,-6.290 18.590,-9.130 C14.550,-11.970 8.360,-13.520 0.010,-13.760 C-2.770,-13.770 -5.390,-13.530 -7.850,-13.050 C-12.050,-12.240 -15.810,-10.730 -18.450,-9.130 C-22.640,-6.600 -24.740,-3.560 -24.740,0.000 L-9.910,0.000',
lower='M9.920,0.000 C10.340,2.570 10.170,4.890 9.400,6.950 C8.980,8.100 7.680,10.620 4.730,11.110 C3.260,11.350 0.550,11.220 -1.770,10.240 C-4.150,9.250 -6.130,7.310 -6.900,6.450 C-8.500,4.650 -9.510,2.500 -9.930,0.000 L-24.740,0.000 C-24.670,3.250 -22.610,6.290 -18.570,9.130 C-14.530,11.970 -8.340,13.520 0.010,13.760 C2.780,13.770 5.400,13.530 7.870,13.050 C12.070,12.240 15.830,10.730 18.470,9.130 C22.660,6.600 24.750,3.560 24.760,0.000 L9.920,0.000',
)
partial_note = NoteImage(
centred='M0.060,-13.760 C7.090,-13.760 12.950,-12.400 17.630,-9.670 C22.380,-6.930 24.760,-3.710 24.760,0.000 C24.760,3.970 22.310,7.220 17.430,9.760 C12.550,12.430 6.760,13.760 0.060,13.760 C-6.840,13.760 -12.700,12.400 -17.510,9.670 C-22.330,6.930 -24.740,3.710 -24.740,0.000 C-24.740,-3.970 -22.270,-7.220 -17.320,-9.760 C-12.440,-12.430 -6.640,-13.760 0.060,-13.760',
upper='M-9.910,0.000 L9.950,0.000 L24.760,0.000 C24.690,-3.250 22.630,-6.290 18.590,-9.130 C14.550,-11.970 8.360,-13.520 0.010,-13.760 C-2.770,-13.770 -5.390,-13.530 -7.850,-13.050 C-12.050,-12.240 -15.810,-10.730 -18.450,-9.130 C-22.640,-6.600 -24.740,-3.560 -24.740,0.000 L-9.910,0.000',
lower='M9.920,0.000 L-9.930,0.000 L-24.740,0.000 C-24.670,3.250 -22.610,6.290 -18.570,9.130 C-14.530,11.970 -8.340,13.520 0.010,13.760 C2.780,13.770 5.400,13.530 7.870,13.050 C12.070,12.240 15.830,10.730 18.470,9.130 C22.660,6.600 24.750,3.560 24.760,0.000 L9.920,0.000',
)
# TODO make partial note filled draft
note_signs = {
'whole': whole_note,
'half': whole_note,
'partial_note': partial_note
}
rest_signs = {
'whole': 'M17.781,7.815 L-17.766,7.815 C-18.422,7.815 -18.750,7.495 -18.750,6.845 L-18.750,-6.825 C-18.750,-7.485 -18.422,-7.815 -17.766,-7.815 L17.781,-7.815 C18.427,-7.815 18.750,-7.485 18.750,-6.825 L18.750,6.845 C18.750,7.495 18.427,7.815 17.781,7.815',
'half': 'M-11.580,-3.995 C-11.580,-5.175 -10.920,-6.475 -9.610,-7.905 C-8.640,-8.945 -7.210,-10.185 -5.310,-11.625 C-3.880,-12.595 -2.490,-14.055 -1.120,-16.015 C0.180,-17.895 0.830,-19.955 0.830,-22.165 C0.830,-24.775 -0.010,-27.085 -1.700,-29.095 L-5.220,-33.295 C-5.480,-33.555 -5.610,-33.885 -5.610,-34.275 S-5.420,-35.015 -5.030,-35.345 C-4.570,-35.675 -4.180,-35.845 -3.860,-35.845 C-3.400,-35.845 -3.040,-35.645 -2.780,-35.245 L12.360,-17.275 C13.010,-16.435 13.330,-15.595 13.330,-14.745 C13.330,-13.575 12.680,-12.265 11.380,-10.845 C10.530,-9.925 9.140,-8.685 7.190,-7.125 C5.690,-6.215 4.260,-4.755 2.890,-2.735 C1.590,-0.845 0.940,1.205 0.940,3.425 C0.940,6.165 1.720,8.475 3.280,10.365 L11.670,20.225 C11.870,20.415 12.030,20.745 12.160,21.205 C12.160,21.655 12.000,22.045 11.670,22.375 C11.220,22.705 10.820,22.865 10.500,22.865 C10.380,22.865 9.860,22.505 8.940,21.785 C7.960,21.005 6.660,20.255 5.030,19.535 C3.210,18.825 1.420,18.475 -0.340,18.475 C-1.970,18.475 -3.300,18.925 -4.340,19.835 C-5.250,20.745 -5.700,22.505 -5.700,25.115 C-5.700,29.085 -4.760,32.205 -2.870,34.485 C-2.680,34.745 -2.640,35.075 -2.780,35.455 C-2.900,35.715 -3.130,35.845 -3.470,35.845 C-3.920,35.845 -4.920,34.675 -6.480,32.335 C-8.120,29.865 -9.650,27.025 -11.080,23.835 C-12.580,20.445 -13.330,17.645 -13.330,15.435 C-13.330,12.635 -11.990,11.235 -9.330,11.235 C-6.260,11.235 -2.290,12.275 2.600,14.365 L-10.590,-1.465 C-11.250,-2.315 -11.580,-3.155 -11.580,-3.995',
'quarter': 'M-5.325,-23.250 C-3.435,-23.250 -2.005,-22.720 -1.025,-21.680 C-0.045,-20.510 0.475,-19.400 0.535,-18.360 C0.675,-17.380 1.005,-16.270 1.525,-15.040 C2.105,-14.000 2.855,-13.480 3.755,-13.480 C4.475,-13.480 5.555,-14.290 6.995,-15.920 C8.295,-17.340 9.235,-18.620 9.825,-19.730 C10.145,-20.380 10.595,-20.700 11.175,-20.700 L11.285,-20.700 C11.935,-20.640 12.385,-20.310 12.645,-19.730 L0.355,22.270 C-0.305,22.930 -1.215,23.250 -2.395,23.250 C-3.565,23.250 -4.475,22.930 -5.115,22.270 L6.605,-10.360 C2.435,-8.860 -1.085,-8.110 -3.945,-8.110 C-6.285,-8.110 -8.305,-8.860 -10.005,-10.360 C-11.765,-11.780 -12.645,-13.670 -12.645,-16.010 C-12.645,-18.030 -11.925,-19.720 -10.495,-21.090 C-9.065,-22.530 -7.345,-23.250 -5.325,-23.250',
'eighth': 'M-1.175,-35.750 C0.785,-35.750 2.185,-35.220 3.025,-34.180 C3.935,-33.070 4.485,-31.960 4.685,-30.860 C4.745,-29.750 5.075,-28.650 5.675,-27.540 C6.055,-26.500 6.765,-25.980 7.815,-25.980 C8.465,-25.980 9.445,-26.760 10.745,-28.320 C12.235,-30.150 13.185,-31.450 13.575,-32.230 C13.895,-32.880 14.355,-33.200 14.935,-33.200 C14.935,-33.200 14.975,-33.200 15.045,-33.200 C15.625,-33.140 16.055,-32.810 16.315,-32.230 L-1.565,34.770 C-2.205,35.430 -3.115,35.750 -4.295,35.750 C-5.465,35.750 -6.375,35.430 -7.035,34.770 L3.025,2.050 C-1.335,3.610 -4.885,4.390 -7.615,4.390 C-9.955,4.390 -11.975,3.650 -13.675,2.160 C-15.435,0.720 -16.315,-1.170 -16.315,-3.510 C-16.315,-5.530 -15.635,-7.220 -14.265,-8.590 C-12.825,-10.020 -11.105,-10.730 -9.075,-10.730 C-7.135,-10.730 -5.735,-10.210 -4.875,-9.170 C-3.905,-8.060 -3.325,-6.960 -3.125,-5.860 C-2.925,-4.550 -2.605,-3.440 -2.145,-2.530 C-1.695,-1.490 -0.985,-0.960 -0.005,-0.960 C0.775,-0.960 1.885,-1.880 3.325,-3.700 C4.815,-5.400 5.725,-6.740 6.065,-7.710 L10.635,-22.860 C6.475,-21.360 2.995,-20.610 0.205,-20.610 C-2.145,-20.610 -4.165,-21.360 -5.865,-22.860 C-7.625,-24.280 -8.505,-26.170 -8.505,-28.510 C-8.505,-30.530 -7.785,-32.220 -6.345,-33.590 C-4.915,-35.030 -3.195,-35.750 -1.175,-35.750',
'16th': 'M1.915,-48.250 C3.795,-48.250 5.235,-47.720 6.215,-46.680 C7.175,-45.510 7.705,-44.400 7.775,-43.360 C7.895,-42.380 8.225,-41.270 8.745,-40.040 C9.325,-39.000 10.075,-38.480 10.995,-38.480 C11.575,-38.480 12.515,-39.260 13.815,-40.820 C14.995,-42.510 15.775,-43.810 16.165,-44.730 C16.485,-45.380 16.945,-45.700 17.535,-45.700 L17.635,-45.700 C18.275,-45.640 18.735,-45.310 18.995,-44.730 L-2.585,47.270 C-3.235,47.930 -4.175,48.250 -5.415,48.250 C-6.515,48.250 -7.435,47.930 -8.145,47.270 L0.445,14.550 C-3.655,16.110 -7.245,16.890 -10.305,16.890 C-12.585,16.890 -14.635,16.150 -16.465,14.660 C-18.145,13.160 -18.995,11.270 -18.995,8.990 C-18.995,6.970 -18.275,5.280 -16.855,3.910 C-15.475,2.480 -13.815,1.770 -11.865,1.770 C-9.975,1.770 -8.535,2.290 -7.575,3.330 C-6.525,4.440 -5.975,5.540 -5.895,6.640 C-5.715,7.950 -5.385,9.060 -4.935,9.970 C-4.475,11.010 -3.725,11.540 -2.685,11.540 C-1.965,11.540 -0.855,10.620 0.635,8.800 C2.005,6.980 2.855,5.550 3.175,4.500 L7.085,-10.450 C2.725,-8.890 -0.825,-8.110 -3.555,-8.110 C-5.895,-8.110 -7.915,-8.860 -9.615,-10.360 C-11.375,-11.780 -12.255,-13.670 -12.255,-16.010 C-12.255,-18.030 -11.535,-19.720 -10.105,-21.090 C-8.675,-22.530 -6.955,-23.250 -4.935,-23.250 C-2.975,-23.250 -1.575,-22.720 -0.725,-21.680 C0.175,-20.570 0.735,-19.460 0.925,-18.360 C1.125,-17.050 1.455,-15.950 1.915,-15.040 C2.365,-14.000 3.075,-13.480 4.055,-13.480 C4.775,-13.480 5.815,-14.390 7.175,-16.210 C8.545,-17.840 9.395,-19.170 9.725,-20.210 L13.725,-35.360 C9.625,-33.860 6.165,-33.110 3.365,-33.110 C1.095,-33.110 -0.955,-33.860 -2.775,-35.360 C-4.475,-36.840 -5.325,-38.730 -5.325,-41.010 C-5.325,-43.030 -4.605,-44.720 -3.165,-46.090 C-1.795,-47.530 -0.105,-48.250 1.915,-48.250',
'32nd': 'M1.915,-48.250 C3.795,-48.250 5.235,-47.720 6.215,-46.680 C7.185,-45.510 7.705,-44.400 7.775,-43.360 C7.895,-42.380 8.225,-41.270 8.745,-40.040 C9.325,-39.000 10.075,-38.480 10.995,-38.480 C11.575,-38.480 12.515,-39.260 13.825,-40.820 C14.995,-42.510 15.775,-43.810 16.165,-44.730 C16.485,-45.380 16.945,-45.700 17.535,-45.700 L17.635,-45.700 C18.275,-45.640 18.735,-45.310 18.995,-44.730 L-2.585,47.270 C-3.235,47.930 -4.175,48.250 -5.415,48.250 C-6.515,48.250 -7.425,47.930 -8.145,47.270 L0.445,14.550 C-3.655,16.110 -7.245,16.890 -10.305,16.890 C-12.585,16.890 -14.635,16.150 -16.465,14.660 C-18.145,13.160 -18.995,11.270 -18.995,8.990 C-18.995,6.970 -18.275,5.280 -16.855,3.910 C-15.475,2.480 -13.815,1.770 -11.865,1.770 C-9.975,1.770 -8.535,2.290 -7.565,3.330 C-6.525,4.440 -5.975,5.540 -5.895,6.640 C-5.715,7.950 -5.385,9.060 -4.925,9.970 C-4.475,11.010 -3.725,11.540 -2.675,11.540 C-1.965,11.540 -0.855,10.620 0.635,8.800 C2.005,6.980 2.855,5.550 3.185,4.500 L7.085,-10.450 C2.725,-8.890 -0.825,-8.110 -3.555,-8.110 C-5.895,-8.110 -7.915,-8.860 -9.615,-10.360 C-11.375,-11.780 -12.255,-13.670 -12.255,-16.010 C-12.255,-18.030 -11.535,-19.720 -10.105,-21.090 C-8.675,-22.530 -6.955,-23.250 -4.925,-23.250 C-2.975,-23.250 -1.565,-22.720 -0.725,-21.680 C0.185,-20.570 0.735,-19.460 0.935,-18.360 C1.125,-17.050 1.455,-15.950 1.915,-15.040 C2.365,-14.000 3.075,-13.480 4.055,-13.480 C4.775,-13.480 5.815,-14.390 7.185,-16.210 C8.545,-17.840 9.395,-19.170 9.725,-20.210 L13.725,-35.360 C9.625,-33.860 6.165,-33.110 3.365,-33.110 C1.095,-33.110 -0.955,-33.860 -2.775,-35.360 C-4.475,-36.840 -5.315,-38.730 -5.315,-41.010 C-5.315,-43.030 -4.605,-44.720 -3.165,-46.090 C-1.795,-47.530 -0.105,-48.250 1.915,-48.250',
}
def get_note_name(note_pitch: NotePitch):
return note_pitch.step + ['', '#', 'b'][note_pitch.alter]
def get_note_position(staff_prop, staff_base_octave, note: NotePitch) -> int:
last_line = (staff_prop.staff_line_count - 1) * staff_prop.staff_line_offset
octave_offset = staff_prop.staff_line_offset * 3 # 2 lines 3 spaces divides 1 octave
note_octave_offset = (note.octave - staff_base_octave) * octave_offset
note_name = note.step + ['', '#', 'b'][note.alter]
note_grade, note_orientation = notes_offsets[note_name]
note_offset = note_grade * staff_prop.staff_line_offset
return last_line - note_octave_offset - note_offset
def get_note_sign(note: Note):
note_name = note.pitch.step + ['', '#', 'b'][note.pitch.alter]
note_grade, note_orientation = notes_offsets[note_name]
note_type = note_signs.get(note.type, note_signs['partial_note'])
image_orientation = ['centred', 'upper', 'lower'][note_orientation]
return getattr(note_type, image_orientation)
def get_rest_sign(note: Note):
if note.type not in rest_signs:
return rest_signs['whole']
else:
return rest_signs[note.type]
def markup_note_body(sign, note_position: Point):
return Path(d=moved_path(sign, note_position.x, note_position.y))
hooks = {
0: None,
1: "M0.000,25.000 L0.000,0.000 L3.125,0.000 C3.125,3.709 3.844,7.422 5.281,11.141 C6.844,15.172 8.568,18.688 10.453,21.688 C11.818,23.834 13.870,27.120 16.610,31.547 C18.693,34.933 20.451,38.479 21.883,42.188 C23.315,45.896 24.031,49.610 24.031,53.328 C24.031,60.099 22.630,67.000 19.828,74.032 C19.308,74.813 18.688,75.203 17.969,75.203 C17.323,75.203 16.740,74.943 16.219,74.422 C15.823,74.026 15.625,73.537 15.625,72.953 L15.625,72.469 C18.427,66.021 19.828,59.641 19.828,53.328 C19.828,50.068 18.849,46.552 16.891,42.782 C14.943,38.938 12.959,35.875 10.938,33.594 C8.656,30.927 6.052,28.063 3.125,25.000 L0.000,25.000",
2: "M3.125,0.000 C3.125,3.323 3.843,6.646 5.281,9.969 C6.645,13.219 8.338,16.308 10.359,19.235 L16.218,27.828 C18.166,30.505 19.859,33.599 21.297,37.110 C22.724,40.370 23.437,43.657 23.437,46.969 C23.437,50.032 22.656,53.224 21.093,56.547 C23.052,60.516 24.031,64.391 24.031,68.172 C24.031,74.162 22.630,80.282 19.828,86.532 C19.307,87.313 18.687,87.703 17.968,87.703 C17.323,87.703 16.739,87.443 16.218,86.922 C15.823,86.526 15.625,86.037 15.625,85.453 C15.625,85.453 15.625,85.292 15.625,84.969 C18.427,79.427 19.828,73.828 19.828,68.172 C19.828,65.630 19.338,63.287 18.359,61.141 C17.192,58.599 15.760,56.255 14.062,54.110 C11.916,51.308 10.224,49.287 8.984,48.047 C7.297,46.360 5.343,44.505 3.125,42.485 L3.125,50.000 L0.000,50.000 L0.000,0.000 L3.125,0.000 M18.453,51.657 C18.911,50.292 19.140,48.729 19.140,46.969 C19.140,44.698 18.687,42.391 17.781,40.047 C16.802,37.703 15.432,35.323 13.672,32.907 C12.047,30.896 10.385,28.912 8.687,26.953 C7.385,25.526 5.531,23.641 3.125,21.297 C3.125,23.964 3.677,26.792 4.781,29.782 C5.958,32.584 7.166,35.026 8.406,37.110 C9.833,39.328 11.588,41.836 13.672,44.633 C15.755,47.430 17.349,49.771 18.453,51.657",
3: "M3.125,21.188 L3.125,21.688 C3.125,24.552 3.646,27.250 4.687,29.782 C5.927,32.979 7.036,35.292 8.015,36.719 C9.703,39.261 11.396,41.672 13.094,43.953 C15.239,47.078 16.802,49.323 17.781,50.688 C18.041,49.844 18.172,48.735 18.172,47.360 C18.172,44.495 17.260,41.276 15.437,37.703 C13.541,33.985 11.750,31.120 10.062,29.110 C7.719,26.245 5.406,23.604 3.125,21.188 M3.125,0.000 C3.125,3.323 3.776,6.646 5.078,9.969 C6.317,13.094 7.948,16.219 9.969,19.344 L15.625,28.125 C17.583,31.052 19.213,34.177 20.515,37.500 C21.817,40.886 22.469,44.172 22.469,47.360 C22.469,50.037 21.817,52.771 20.515,55.563 C22.463,59.542 23.437,63.318 23.437,66.891 C23.437,70.349 22.625,73.834 21.000,77.344 C23.021,81.125 24.031,84.901 24.031,88.672 C24.031,94.464 22.630,100.292 19.828,106.157 C19.370,107.000 18.750,107.422 17.969,107.422 C17.323,107.422 16.739,107.162 16.219,106.641 C15.823,106.120 15.625,105.599 15.625,105.078 C15.625,105.078 15.625,104.948 15.625,104.688 C18.427,99.354 19.828,94.016 19.828,88.672 C19.828,84.568 18.265,80.563 15.140,76.657 C11.953,72.688 7.948,68.719 3.125,64.750 L3.125,75.000 L0.000,75.000 L0.000,0.000 L3.125,0.000 M3.125,42.969 L3.125,43.453 C3.125,45.995 3.677,48.729 4.781,51.657 C5.958,54.657 7.099,56.969 8.203,58.594 C8.922,59.636 10.682,62.047 13.484,65.828 C15.828,69.016 17.422,71.229 18.265,72.469 C18.849,70.646 19.140,68.787 19.140,66.891 C19.140,64.099 18.198,61.073 16.312,57.813 C14.614,54.823 12.692,52.219 10.547,50.000 C8.463,47.792 5.989,45.448 3.125,42.969"
}
def markup_note(staff_prop: StaffProperties, staff_start_position, staff_octave, horizontal_note_position, chord_offset,
note, chords_notes):
not_chord_note = note.id not in chords_notes
chord_note = note.id in chords_notes
last_chord_note = chord_note and chords_notes.get(note.id, {}).last
objects = []
note_offset = get_note_position(staff_prop, staff_octave, note.pitch)
vertical_note_position = staff_start_position + note_offset
note_sign = get_note_sign(note)
objects += [markup_note_body(
note_sign,
Point(
horizontal_note_position + chord_offset,
vertical_note_position
)
)]
if note.dot:
addition = (note_offset - 0.5) % staff_prop.staff_line_offset - staff_prop.staff_line_offset / 2
objects += [
Circle(
center=(
horizontal_note_position + 35 + chord_offset,
vertical_note_position + addition
),
r=4)
]
if note.time_modification:
objects += [Text(
str(note.time_modification['actual-notes']),
insert=(
horizontal_note_position,
staff_start_position - staff_prop.staff_offset // 2),
fill="rgb(110,110,110)",
style="font-size:15px; font-family:Arial",
)]
objects += []
flag = {
'whole': (0, 0),
'half': (0.83, 0),
'quarter': (0.83, 0),
'eighth': (0.9, 1),
'16th': (1, 2),
'32nd': (1.2, 3),
}
stem_length_multiplier, beam_count = flag[note.type]
if stem_length_multiplier:
half_note_offset = 18.2
stem_width = 3
stem_lenght = 85 * stem_length_multiplier
stem_offset = -0.5
objects += [
Polyline(
points=[(horizontal_note_position + half_note_offset, vertical_note_position + stem_offset),
(horizontal_note_position + half_note_offset,
vertical_note_position - stem_lenght + stem_offset)]
).stroke(
color=svgwrite.rgb(0, 0, 0),
width=stem_width,
linejoin='bevel',
linecap="round",
)
]
# TODO extract beam|stemm drawing into note groups drawing
# logger.debug(f'{not_chord_note=} {last_chord_note=} {first_chord_note=}')
if not_chord_note or last_chord_note:
assert beam_count <= 3, f'max 32nd note, {beam_count=} given'
beam = hooks[beam_count]
if beam:
beam_length = 13
beam_offset = -0.5
objects += [
Path(d=moved_path(
beam,
horizontal_note_position + half_note_offset - stem_width / 2,
vertical_note_position - stem_lenght + beam_offset
))
]
return objects
def calc_note_length(measure, time, note):
note_lenght = (measure.end - measure.start - measure.left_offset - measure.right_offset) \
/ (notes_times[note.type] if note.type else notes_times['whole'])
note_lenght *= (time.beat_type / time.beats)
if note.dot:
note_lenght += note_lenght / 2
if note.time_modification:
logger.debug(f'{note.time_modification=}')
actual = note.time_modification['actual-notes']
normal = note.time_modification['normal-notes']
note_lenght_multiplier = int(normal) / int(actual)
logger.debug(f'{note.time_modification} {note_lenght_multiplier}')
note_lenght = note_lenght * note_lenght_multiplier
return note_lenght
| 82.160338
| 1,727
| 0.638507
|
7962fbee5311a22dd05adf1acb992a047cb89b61
| 3,379
|
py
|
Python
|
fs_stats.py
|
grimkor/fs_stats
|
01cbca388b8e1b2e43b0baa727d69354cd017f7d
|
[
"MIT"
] | null | null | null |
fs_stats.py
|
grimkor/fs_stats
|
01cbca388b8e1b2e43b0baa727d69354cd017f7d
|
[
"MIT"
] | null | null | null |
fs_stats.py
|
grimkor/fs_stats
|
01cbca388b8e1b2e43b0baa727d69354cd017f7d
|
[
"MIT"
] | 1
|
2020-09-07T22:52:42.000Z
|
2020-09-07T22:52:42.000Z
|
import asyncio
from enum import Enum, auto
import re
import os
import watchgod
import database
OUTPUT_LOG = os.environ['USERPROFILE'] + r'\AppData\LocalLow\Sirlin Games\Fantasy Strike\output_log.txt'
class State(Enum):
GAME_CLOSED = auto()
NO_MATCH = auto()
MATCH = auto()
class StateMachine():
def __init__(self, state=None):
if state is None:
state = State.GAME_CLOSED
self.state = state
self.gameplay_random_seed = None
self.opp_name = None
self.opp_rank = None
self.my_rank = None
self.player_number = None
self.win = None
self.loser_score = None
def __call__(self, line):
if self.state == State.GAME_CLOSED:
self.game_closed(line)
elif self.state == State.NO_MATCH:
self.no_match(line)
elif self.state == State.MATCH:
self.match(line)
def game_closed(self, line):
# If the game is closed but we're getting updates,
# it must be open again
self.state = State.NO_MATCH
def no_match(self, line):
if 'Steam shutdown' in line:
self.on_shutdown()
if '[|joinranked:' in line:
data = line[:-1].split('|joinranked:')[1]
my_dict = dict([value.split(':') for value in data.split(',')])
if 'oppName' in my_dict:
self.gameplay_random_seed = int(my_dict['gameplayRandomSeed'])
self.player_number = int(my_dict['pnum'])
self.opp_name = my_dict['oppName']
self.opp_rank = int(my_dict['oppLeague']), int(my_dict['oppRank'])
self.my_rank = int(my_dict['playerLeague']), int(my_dict['playerRank'])
self.state = State.MATCH
print(f'Match found! Opponent is {self.opp_name}')
def match(self, line):
if 'Steam shutdown' in line:
self.on_shutdown()
if 'END PrepareTeamBattleScreen' in line:
if (match := re.search(r'winnerChars P1 \[(.*?)\] P2 \[(.*?)\]', line)):
if len(match.group(1).split(',')) == 3:
# player 1 wins
if match.group(2):
self.loser_score = len(match.group(2).split(','))
else:
self.loser_score = 0
self.win = self.player_number == 1
elif len(match.group(2).split(',')) == 3:
# player 2 wins
if match.group(1):
self.loser_score = len(match.group(1).split(','))
else:
self.loser_score = 0
self.win = self.player_number == 2
else:
return
print('Match complete!')
print(f'My score: {3 if self.win else self.loser_score}')
print(f'{self.opp_name} score: {3 if not self.win else self.loser_score}')
database.add(
self.gameplay_random_seed,
self.win,
self.opp_name,
self.opp_rank[0],
self.opp_rank[1],
self.my_rank[0],
self.my_rank[1],
self.loser_score
)
self.gameplay_random_seed = self.opp_name = self.opp_rank = self.my_rank = self.player_number = self.win = self.loser_score = None
self.state = State.NO_MATCH
database.publish()
def on_shutdown(self):
self.gameplay_random_seed = self.opp_name = self.opp_rank = self.my_rank = self.player_number = self.win = self.loser_score = None
self.state = State.GAME_CLOSED
def main(state_machine):
with open(OUTPUT_LOG) as f:
for _ in watchgod.watch(OUTPUT_LOG):
for line in f.readlines():
line = line.strip()
if line:
state_machine(line)
if __name__ == '__main__':
main(StateMachine())
| 28.158333
| 135
| 0.636875
|
5c644115445f19befe3644f699495db6fc2552a5
| 344
|
py
|
Python
|
Pythonexer/ExerPython/aprendendopython/ex077.py
|
felipemcm3/ExerPython
|
d66c891eb82c0f7fd9c15203fe85a06e96d916b5
|
[
"MIT"
] | null | null | null |
Pythonexer/ExerPython/aprendendopython/ex077.py
|
felipemcm3/ExerPython
|
d66c891eb82c0f7fd9c15203fe85a06e96d916b5
|
[
"MIT"
] | null | null | null |
Pythonexer/ExerPython/aprendendopython/ex077.py
|
felipemcm3/ExerPython
|
d66c891eb82c0f7fd9c15203fe85a06e96d916b5
|
[
"MIT"
] | null | null | null |
lista = ('morango', 'pessego', 'melancia', 'manga', 'uva', 'carro')
vogal = ('a', 'e', 'i', 'o', 'u')
for y in lista:
print('\nA palavra {} tem vogal '.format(y.upper()), end = ' ')
for x in vogal:
try:
if y.index(x):
print('{}'.format(x), end = ' ')
except ValueError:
continue
| 28.666667
| 67
| 0.465116
|
ed9ba7cea7497dde7e2efb9f1a3c9509938ea10e
| 2,472
|
py
|
Python
|
test.py
|
adamhamden/MultiModalHumor
|
6d66d9e3d654f92c4be615f4b403fa51c9e532a2
|
[
"CC0-1.0"
] | null | null | null |
test.py
|
adamhamden/MultiModalHumor
|
6d66d9e3d654f92c4be615f4b403fa51c9e532a2
|
[
"CC0-1.0"
] | null | null | null |
test.py
|
adamhamden/MultiModalHumor
|
6d66d9e3d654f92c4be615f4b403fa51c9e532a2
|
[
"CC0-1.0"
] | null | null | null |
from data import Data
import torch.optim as optim
from MultiModalHumor.model import *
from config import config, humor_speakers, speakers
from sklearn.metrics import confusion_matrix
config = config()
common_kwargs = dict(path2data='../PATS/data',
speaker=['fallon', 'rock'],
modalities=['pose/normalize','audio/log_mel_512', 'text/bert'],
fs_new=[15, 15, 15],
time=4.3,
batch_size=config['batch_size'],
window_hop=5,
shuffle=True)
model = HumorClassifier(config).to(config['device'])
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters(), lr=config['learning_rate'])
for epoch in range(config['epochs']):
data = Data(**common_kwargs)
style_dict = data.style_dict
style_to_speaker_dict = {v: k for k, v in style_dict.items()}
print(style_to_speaker_dict)
train_loss = 0.0
i = 0
model.train()
for batch in data.train:
x_t = batch['text/bert']
x_a = batch['audio/log_mel_512']
x_p = batch['pose/normalize']
x_t = x_t.to(config['device'])
x_a = x_a.to(config['device'])
x_p = x_p.to(config['device'])
# this is assuming time*fs = 64
if x_t.shape[0] != config['batch_size']:
break
x_t = x_t.reshape((config['batch_size'], config['context_length'], -1, config['lstm_text_input']))
x_a = x_a.reshape((config['batch_size'], config['context_length'], -1, config['lstm_audio_input']))
x_p = x_p.reshape((config['batch_size'], config['context_length'], -1, config['lstm_pose_input']))
styles = batch['style'][:, 0]
#print(batch['style'])
speakers = list(map(lambda x: style_to_speaker_dict[x], styles.numpy()))
#print(speakers)
batch_label = [1 if speaker in humor_speakers else 0 for speaker in speakers]
#print(f'1s = {batch_label.count(1)} and 0s = {batch_label.count(0)}')
batch_label = torch.Tensor(batch_label).unsqueeze(1).to(config['device'])
optimizer.zero_grad()
pred = model(x_t.float(), x_a.float(), x_p.float()).squeeze(0)
loss = criterion(pred, batch_label.float())
train_loss += loss.item()
loss.backward()
optimizer.step()
print(f'Epoch {epoch} loss: {train_loss/len(data.train):.3f}')
torch.save(model.state_dict(), './trained_models/model_2')
| 38.030769
| 107
| 0.614078
|
1d23e1606da8142e44a40ca17d6cf9ec6c9d1cb1
| 1,002
|
py
|
Python
|
donate_stuff/users/admin.py
|
Raekker/donate-stuff
|
afc3c4235e1b72c02c237c27354741388369b710
|
[
"MIT"
] | null | null | null |
donate_stuff/users/admin.py
|
Raekker/donate-stuff
|
afc3c4235e1b72c02c237c27354741388369b710
|
[
"MIT"
] | 7
|
2021-05-12T06:13:03.000Z
|
2022-03-30T13:09:48.000Z
|
donate_stuff/users/admin.py
|
Raekker/donate-stuff
|
afc3c4235e1b72c02c237c27354741388369b710
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.contrib.auth import admin as auth_admin
from django.contrib.auth import get_user_model
from django.utils.translation import gettext_lazy as _
from donate_stuff.users.forms import UserChangeForm, UserCreationForm
User = get_user_model()
@admin.register(User)
class UserAdmin(auth_admin.UserAdmin):
form = UserChangeForm
add_form = UserCreationForm
fieldsets = (
(None, {"fields": ("username", "password")}),
(_("Personal info"), {"fields": ("name", "email")}),
(
_("Permissions"),
{
"fields": (
"is_active",
"is_staff",
"is_superuser",
"groups",
"user_permissions",
),
},
),
(_("Important dates"), {"fields": ("last_login", "date_joined")}),
)
list_display = ["username", "name", "is_superuser"]
search_fields = ["name"]
| 28.628571
| 74
| 0.557884
|
0841d9fb2b290cbad942b842769f6c2bdd7aa7d5
| 3,317
|
py
|
Python
|
plugins/feeds/public/otx_alienvault.py
|
GDATAAdvancedAnalytics/yeti
|
fcd3ee3d3d064df772d0392c20c22aad2bc4c8e6
|
[
"Apache-2.0"
] | 1,250
|
2017-03-12T16:20:47.000Z
|
2022-03-29T02:12:11.000Z
|
plugins/feeds/public/otx_alienvault.py
|
GDATAAdvancedAnalytics/yeti
|
fcd3ee3d3d064df772d0392c20c22aad2bc4c8e6
|
[
"Apache-2.0"
] | 540
|
2017-03-20T16:45:35.000Z
|
2022-03-22T16:55:02.000Z
|
plugins/feeds/public/otx_alienvault.py
|
GDATAAdvancedAnalytics/yeti
|
fcd3ee3d3d064df772d0392c20c22aad2bc4c8e6
|
[
"Apache-2.0"
] | 293
|
2017-03-20T13:59:07.000Z
|
2022-03-28T16:00:10.000Z
|
import logging
import time
from datetime import datetime, timedelta
from core import Feed
from core.config.config import yeti_config
from core.entities import Exploit, Entity
from core.errors import ObservableValidationError
from core.indicators import Yara, Indicator
from core.observables import Hash, Hostname, Url, Observable
class OTXAlienvault(Feed):
default_values = {
"frequency": timedelta(days=1),
"name": "OTXAlienvault",
"source": "https://otx.alienvault.com/api/v1/pulses/subscribed",
"description": "Feed of OTX by Alienvault",
}
def __init__(self, *args, **kwargs):
self.refs = {
"hostname": Hostname,
"domain": Hostname,
"FileHash-MD5": Hash,
"FileHash-SHA256": Hash,
"FileHash-SHA1": Hash,
"URL": Url,
"YARA": Yara,
"CVE": Exploit,
}
super(OTXAlienvault, self).__init__(*args, **kwargs)
def update(self):
otx_key = yeti_config.get("otx", "key")
number_page = yeti_config.get("otx", "pages")
assert otx_key and number_page, "OTX key and pages not configured in yeti.conf"
headers = {"X-OTX-API-KEY": otx_key}
for i in range(1, int(number_page)):
items = self.update_json(
headers=headers, params={"page": i}, key="results", filter_row="created"
)
for index, item in items:
self.analyze(item)
time.sleep(2)
def analyze(self, item):
context = dict(source=self.name)
context["references"] = "\r\n".join(item["references"])
context["description"] = item["description"]
context["link"] = "https://otx.alienvault.com/pulse/%s" % item["id"]
tags = item["tags"]
for indicator in item["indicators"]:
type_ind = self.refs.get(indicator["type"])
if not type_ind:
continue
context["title"] = indicator["title"]
context["infos"] = indicator["description"]
context["created"] = datetime.strptime(
indicator["created"], "%Y-%m-%dT%H:%M:%S"
)
if issubclass(type_ind, Observable):
try:
obs = type_ind.get_or_create(value=indicator["indicator"])
obs.tag(tags)
obs.add_context(context)
obs.add_source("feed")
except ObservableValidationError as e:
logging.error(e)
elif issubclass(type_ind, Entity):
type_ind.get_or_create(name=indicator["indicator"])
elif issubclass(type_ind, Indicator):
if type_ind == Yara:
try:
type_ind.get_or_create(
name="YARA_%s" % indicator["indicator"],
diamond="capability",
location="feeds",
pattern=indicator["content"],
)
except Exception:
logging.error("Error to create indicator %s" % indicator)
else:
logging.error("type of indicators is unknown %s", indicator["type"])
| 33.505051
| 88
| 0.538438
|
d9ea91a493b43b2f9a0037709ca84927df9ab7cd
| 533
|
py
|
Python
|
aaclient/test/test_util.py
|
mdavidsaver/aaclient
|
1ca30d6b988965d6cf1aec97279c71bbd656b2e9
|
[
"BSD-3-Clause"
] | 1
|
2022-03-21T16:25:27.000Z
|
2022-03-21T16:25:27.000Z
|
aaclient/test/test_util.py
|
mdavidsaver/aaclient
|
1ca30d6b988965d6cf1aec97279c71bbd656b2e9
|
[
"BSD-3-Clause"
] | null | null | null |
aaclient/test/test_util.py
|
mdavidsaver/aaclient
|
1ca30d6b988965d6cf1aec97279c71bbd656b2e9
|
[
"BSD-3-Clause"
] | 1
|
2022-03-18T18:26:53.000Z
|
2022-03-18T18:26:53.000Z
|
# Copyright 2022 Michael Davidsaver
# SPDX-License-Identifier: BSD
# See LICENSE file
import unittest
from .. import util
class TestWild(unittest.TestCase):
def test_ok(self):
for inp, exp in [
(r"hello", r"hello"),
(r"hello.", r"hello\."),
(r"he?lo.", r"he.lo\."),
(r"he?lo. wor\?d", r"he.lo\.\ wor\?d"),
(r"hel*w\*rld", r"hel.*w\*rld"),
]:
out = util.wild2re(inp)
self.assertEqual(exp, out, msg=inp)
| 26.65
| 55
| 0.491557
|
e7ffac7a2b2bd59727ac6968b43c60d1c037a9f8
| 5,358
|
py
|
Python
|
vulnerabilities/importers/apache_kafka.py
|
InLaw/vulnerablecode
|
e93154ce15f577430dda18cabd1feb1dabc7230a
|
[
"Apache-2.0"
] | null | null | null |
vulnerabilities/importers/apache_kafka.py
|
InLaw/vulnerablecode
|
e93154ce15f577430dda18cabd1feb1dabc7230a
|
[
"Apache-2.0"
] | null | null | null |
vulnerabilities/importers/apache_kafka.py
|
InLaw/vulnerablecode
|
e93154ce15f577430dda18cabd1feb1dabc7230a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/vulnerablecode/
# The VulnerableCode software is licensed under the Apache License version 2.0.
# Data generated with VulnerableCode require an acknowledgment.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with VulnerableCode or any VulnerableCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with VulnerableCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# VulnerableCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# VulnerableCode is a free software tool from nexB Inc. and others.
# Visit https://github.com/nexB/vulnerablecode/ for support and download.
import asyncio
import requests
from bs4 import BeautifulSoup
from dephell_specifier import RangeSpecifier
from packageurl import PackageURL
from vulnerabilities.data_source import Advisory
from vulnerabilities.data_source import DataSource
from vulnerabilities.data_source import Reference
from vulnerabilities.package_managers import GitHubTagsAPI
GH_PAGE_URL = "https://raw.githubusercontent.com/apache/kafka-site/asf-site/cve-list.html"
ASF_PAGE_URL = "https://kafka.apache.org/cve-list"
class ApacheKafkaDataSource(DataSource):
@staticmethod
def fetch_advisory_page():
page = requests.get(GH_PAGE_URL)
return page.content
def set_api(self):
self.version_api = GitHubTagsAPI()
asyncio.run(self.version_api.load_api(["apache/kafka"]))
def updated_advisories(self):
advisory_page = self.fetch_advisory_page()
self.set_api()
parsed_data = self.to_advisory(advisory_page)
return self.batch_advisories(parsed_data)
def to_advisory(self, advisory_page):
advisories = []
advisory_page = BeautifulSoup(advisory_page, features="lxml")
cve_section_beginnings = advisory_page.find_all("h2")
for cve_section_beginning in cve_section_beginnings:
cve_id = cve_section_beginning.text.split("\n")[0]
cve_description_paragraph = cve_section_beginning.find_next_sibling("p")
cve_data_table = cve_section_beginning.find_next_sibling("table")
cve_data_table_rows = cve_data_table.find_all("tr")
affected_versions_row = cve_data_table_rows[0]
fixed_versions_row = cve_data_table_rows[1]
affected_version_ranges = to_version_ranges(
affected_versions_row.find_all("td")[1].text
)
fixed_version_ranges = to_version_ranges(fixed_versions_row.find_all("td")[1].text)
fixed_packages = [
PackageURL(type="apache", name="kafka", version=version)
for version in self.version_api.get("apache/kafka")
if any([version in version_range for version_range in fixed_version_ranges])
]
affected_packages = [
PackageURL(type="apache", name="kafka", version=version)
for version in self.version_api.get("apache/kafka")
if any([version in version_range for version_range in affected_version_ranges])
]
advisories.append(
Advisory(
vulnerability_id=cve_id,
summary=cve_description_paragraph.text,
impacted_package_urls=affected_packages,
resolved_package_urls=fixed_packages,
vuln_references=[
Reference(url=ASF_PAGE_URL),
Reference(
url=f"https://cve.mitre.org/cgi-bin/cvename.cgi?name={cve_id}",
reference_id=cve_id,
),
],
)
)
return advisories
def to_version_ranges(version_range_text):
version_ranges = []
range_expressions = version_range_text.split(",")
for range_expression in range_expressions:
if "to" in range_expression:
# eg range_expression == "3.2.0 to 3.2.1"
lower_bound, upper_bound = range_expression.split("to")
lower_bound = f">={lower_bound}"
upper_bound = f"<={upper_bound}"
version_ranges.append(RangeSpecifier(f"{lower_bound},{upper_bound}"))
elif "and later" in range_expression:
# eg range_expression == "2.1.1 and later"
range_expression = range_expression.replace("and later", "")
version_ranges.append(RangeSpecifier(f">={range_expression}"))
else:
# eg range_expression == "3.0.0"
version_ranges.append(RangeSpecifier(range_expression))
return version_ranges
| 44.280992
| 95
| 0.676745
|
14c74de394dc5f18766a3fd72cbe1758717800c4
| 478
|
py
|
Python
|
config/urls.py
|
yezz123/Django-Authentication
|
3f207660950370aeaf8377f062d4767a0f48fa8c
|
[
"MIT"
] | 10
|
2021-08-30T08:37:12.000Z
|
2021-11-12T01:33:06.000Z
|
config/urls.py
|
yezz123/Django-Authentication
|
3f207660950370aeaf8377f062d4767a0f48fa8c
|
[
"MIT"
] | null | null | null |
config/urls.py
|
yezz123/Django-Authentication
|
3f207660950370aeaf8377f062d4767a0f48fa8c
|
[
"MIT"
] | null | null | null |
import debug_toolbar
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path("admin/", admin.site.urls),
path("accounts/", include("allauth.urls")),
path("", include("pages.urls")),
]
if settings.DEBUG:
urlpatterns += [
path("__debug__/", include(debug_toolbar.urls)),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 26.555556
| 69
| 0.715481
|
eca7d9072ba6ca9c2c17846e8cb1f2b0f62a549a
| 2,919
|
py
|
Python
|
etna/analysis/feature_relevance/relevance_table.py
|
martins0n/etna
|
51e9cec5183da2499ca247b0e2db215507246ceb
|
[
"Apache-2.0"
] | 326
|
2021-11-18T15:30:50.000Z
|
2022-03-31T09:44:15.000Z
|
etna/analysis/feature_relevance/relevance_table.py
|
martins0n/etna
|
51e9cec5183da2499ca247b0e2db215507246ceb
|
[
"Apache-2.0"
] | 305
|
2021-11-17T10:28:31.000Z
|
2022-03-31T18:05:03.000Z
|
etna/analysis/feature_relevance/relevance_table.py
|
martins0n/etna
|
51e9cec5183da2499ca247b0e2db215507246ceb
|
[
"Apache-2.0"
] | 29
|
2021-11-21T12:10:48.000Z
|
2022-03-31T22:55:06.000Z
|
from typing import Union
import numpy as np
import pandas as pd
from catboost import CatBoostRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeRegressor
from etna.libs.tsfresh import calculate_relevance_table
TreeBasedRegressor = Union[
DecisionTreeRegressor,
ExtraTreeRegressor,
RandomForestRegressor,
ExtraTreesRegressor,
GradientBoostingRegressor,
CatBoostRegressor,
]
def get_statistics_relevance_table(df: pd.DataFrame, df_exog: pd.DataFrame) -> pd.DataFrame:
"""Calculate relevance table with p-values from tsfresh.
Parameters
----------
df:
dataframe with timeseries
df_exog:
dataframe with exogenous data
Returns
-------
pd.DataFrame
dataframe with p-values.
"""
regressors = sorted(df_exog.columns.get_level_values("feature").unique())
segments = sorted(df.columns.get_level_values("segment").unique())
result = np.empty((len(segments), len(regressors)))
for k, seg in enumerate(segments):
first_valid_idx = df.loc[:, seg].first_valid_index()
df_now = df.loc[first_valid_idx:, seg]["target"]
df_exog_now = df_exog.loc[first_valid_idx:, seg]
relevance = calculate_relevance_table(df_exog_now[: len(df_now)], df_now)[["feature", "p_value"]].values
result[k] = np.array(sorted(relevance, key=lambda x: x[0]))[:, 1]
relevance_table = pd.DataFrame(result)
relevance_table.index = segments
relevance_table.columns = regressors
return relevance_table
def get_model_relevance_table(df: pd.DataFrame, df_exog: pd.DataFrame, model: TreeBasedRegressor) -> pd.DataFrame:
"""Calculate relevance table with feature importance from model.
Parameters
----------
df:
dataframe with timeseries
df_exog:
dataframe with exogenous data
model:
model to obtain feature importance, should have ``feature_importances_`` property
Returns
-------
pd.DataFrame
dataframe with feature importance values.
"""
regressors = sorted(df_exog.columns.get_level_values("feature").unique())
segments = sorted(df.columns.get_level_values("segment").unique())
result = np.empty((len(segments), len(regressors)))
for k, seg in enumerate(segments):
df_exog_seg = df_exog.loc[:, seg].dropna()[regressors]
df_seg = df.loc[:, seg].dropna()["target"]
common_index = df_seg.index.intersection(df_exog_seg.index)
model.fit(df_exog_seg.loc[common_index], df_seg.loc[common_index])
result[k] = model.feature_importances_
relevance_table = pd.DataFrame(result)
relevance_table.index = segments
relevance_table.columns = regressors
return relevance_table
| 34.75
| 114
| 0.713601
|
e51510c21edf911aa7a838721710cad0f1028539
| 24,139
|
py
|
Python
|
sense/surface/i2em.py
|
PMarzahn/sense
|
332852bf781620a5cc714efb2d86ffaff5275955
|
[
"Apache-2.0"
] | 3
|
2018-10-08T13:40:52.000Z
|
2021-03-07T07:59:40.000Z
|
sense/surface/i2em.py
|
PMarzahn/sense
|
332852bf781620a5cc714efb2d86ffaff5275955
|
[
"Apache-2.0"
] | 2
|
2017-07-31T12:51:02.000Z
|
2017-08-10T22:09:56.000Z
|
sense/surface/i2em.py
|
PMarzahn/sense
|
332852bf781620a5cc714efb2d86ffaff5275955
|
[
"Apache-2.0"
] | 6
|
2018-06-29T10:10:36.000Z
|
2022-03-06T20:24:54.000Z
|
"""
implements the I2EM model (see Ulaby (2014), Chapter 10
backscattering model for single scale random surfaces
The code originates from ideas obtained from the supplement
of Ulaby et al (2014)
"""
from . scatter import SurfaceScatter
import numpy as np
from .. util import f2lam
from .. core import Reflectivity
import math
from scipy.integrate import dblquad
from past.builtins import xrange
from numba import jit
import pdb
@jit(cache=True,nopython=True)
def _calc_roughness_spectra_matrix(nx, ny, kl2, nspec, s, acf_type_id):
"""
calculate roughness spectra
needs to return a matrix for further use
in crosspol calculations
"""
if acf_type_id == 1: # gauss
wm = _calc_wm_matrix_gauss(nx, ny, nspec, kl2, s)
wn = _calc_wn_matrix_gauss(nx, ny, nspec, kl2, s)
elif acf_type_id == 2: # exp
wm = _calc_wm_matrix_exp(nx, ny, nspec, kl2, s)
wn = _calc_wn_matrix_exp(nx, ny, nspec, kl2, s)
else:
assert False
return wn, wm
class I2EM(SurfaceScatter):
def __init__(self, f, eps, sig, l, theta, **kwargs):
"""
BACKSCATTERING MODEL
Parameters
----------
f : float
frequency [GHz]
eps : complex
relative dielectric permitivity
sig : float
vertical surface roughness [m]
l : float
autocorrelation length [m]
theta : float
incidence angle [rad]
acf_type : str
type of autocorrelation function
'gauss' : gaussian type ACF
auto : bool
specify if number of spectral components should be automatically
determined for cross-pol calculations
if False, then nspec=15
xpol : bool
perform cross-pol calculations if possible
might be slow in case of I2EM usage
"""
self.freq = f
lam = f2lam(self.freq)
k = 2.*np.pi/lam
self.k = k
self.sig = sig
self.ks = self.k*self.sig
self.l = l
self._kl2 = (self.k*self.l)**2.
self.acf_type = kwargs.get('acf_type', 'gauss')
# pdb.set_trace()
super(I2EM, self).__init__(eps, k*sig, theta, kl=k*l)
# assume backscatter geometry
self.phi = 0.
self.thetas = self.theta*1.
self.phis = np.deg2rad(180.)
self.mode = 'backscatter'
self.auto = kwargs.get('auto', True)
self.xpol = kwargs.get('xpol', True)
# do initializations for backscatter calculations
self._init_hlp()
self.init_model()
# pdb.set_trace()
# calculate the actual backscattering coefficients
self._calc_sigma_backscatter()
def init_model(self):
"""
initialize model for calculations
"""
self.niter = self._estimate_itterations()
# determine number of spectral components for cross-pol calculations
if self.auto:
# same as function _estimate_itterations, but with slightly different config
nspec = 0
error = 1.E8
while error > 1.0E-8:
nspec += 1
error = (self._ks2*(2.*self._cs)**2.)**nspec / math.factorial(nspec)
self.n_spec = nspec
else:
self.n_spec = 15
I = np.arange(self.n_spec)
# self._fac = map(math.factorial, I+1) # factorial(n)
self._fac = [math.factorial(x) for x in I+1]
def _estimate_itterations(self):
"""
estimate the number of necessary itterations for
the integral calculations
"""
err = 1.E8
Ts = 1
while err > 1.0e-8:
Ts += 1
err = ((self._ks2 *(np.mean(self._cs) + np.mean(self._css))**2 )**Ts) / math.factorial(Ts)
# err = ((self._ks2 *(self._cs + self._css)**2 )**Ts) / math.factorial(Ts)
# pdb.set_trace()
return Ts
def _init_hlp(self):
""" initiate help variables """
self._ks2 = self.ks**2.
self._cs = np.cos(self.theta)
self._cs2 = self._cs**2.
self._s = np.sin(self.theta)
self._sf = np.sin(self.phi)
self._cf = np.cos(self.phi)
self._ss = np.sin(self.thetas)
self._css = np.cos(self.thetas)
self._cfs = np.cos(self.phis)
self._sfs = np.sin(self.phis)
self._s2 = self._s**2.
self._kx = self.k*self._s*self._cf
self._ky = self.k*self._s*self._sf
self._kz = self.k*self._cs
self._ksx = self.k * self._ss *self._cfs
self._ksy = self.k * self._ss *self._sfs
self._ksz = self.k * self._css
def _calc_sigma_backscatter(self):
# assert isinstance(self.theta, float), 'Currently array processing not supported yet!'
# calculate backscattering coefficients
# pdb.set_trace()
if type(self.eps) is np.ndarray:
self.vv = []
self.hh = []
theta_origin = self.theta
thetas_origin = self.thetas
eps_origin = self.eps
if self.xpol:
self.hv = []
for i in range(len(self.eps)):
self.i = i
if type(theta_origin) is np.ndarray:
self.theta = theta_origin[i]
self.thetas = thetas_origin[i]
self._init_hlp()
self.init_model()
self.eps=eps_origin[i]
# pdb.set_trace()
vv, hh = self._i2em_bistatic()
self.vv.append(vv)
self.hh.append(hh)
if self.xpol:
hv = self._i2em_cross()
self.hv.append(hv)
else:
self._init_hlp()
self.init_model()
self.vv, self.hh = self._i2em_bistatic()
if self.xpol:
self.hv = self._i2em_cross()
def _i2em_bistatic(self):
"""
calculate sigma for the co-pol case
backscatter geometr
calculate sigma for the co-pol case
backscatter geometry
module 10.1
"""
# calculate the integral
idx = np.arange(self.niter)+1
# self.fac = map(math.factorial, idx) # factorial for all N itterations; this is stored as it is needed multipole times
self.fac = [math.factorial(x) for x in idx]
self.wn, self.rss = self.calc_roughness_spectrum(acf_type=self.acf_type)
Ivv, Ihh = self._calc_Ipp()
Ivv_abs = np.abs(Ivv)
Ihh_abs = np.abs(Ihh)
# calculate shadowing effects
ShdwS = self._calc_shadowing()
a0 = self.wn / self.fac * (self.sig**(2.*idx))
# final backscatter calculation
hlp = ShdwS*0.5*self.k**2*np.exp(-self.sig**2*(self._kz**2.+self._ksz**2.))
sigvv = np.sum(a0 * Ivv_abs**2.) * hlp
sighh = np.sum(a0 * Ihh_abs**2.) * hlp
return sigvv, sighh
def _i2em_cross(self):
rt = np.sqrt(self.eps - self._s2)
rv = (self.eps*self._cs -rt) / (self.eps*self._cs + rt)
rh = (self._cs - rt)/(self._cs + rt)
rvh = (rv-rh)/2.
Shdw = self._calc_shadow_cross()
svh = self._integrate_xpol(rvh)
print(svh*Shdw)
return svh*Shdw
def _integrate_xpol(self, rvh):
"""
integrate for X-pol
dblquad(@(r,phi)xpol_integralfunc(r, phi, sp,xx, ks2, cs,s, kl2, L, er, rss, rvh, n_spec), 0.1, 1, 0, pi)
the original matlab routines integrates
xpol_integral(r,phi)
rmin=0.1, rmax=1.
phimin=0.,phimax=1.
when using python, x and y are reversed, however
this does not matter unless the bounds are specified in the right order
"""
ans, err = dblquad(self._xpol_integralfunc, 0.1, 1., lambda x : 0., lambda x : 1., args=[[rvh,self.eps, self._ks2, self._cs2, self.rss, self._cs, self._fac, self._kl2, self._s, self._get_acf_id()]])
return ans
def _get_acf_id(self):
if self.acf_type == 'gauss':
return 1
if self.acf_type == 'exp15':
return 2
assert False, 'Unknown ACF type'
@jit(cache=True)
def _xpol_integralfunc(self, r, phi, *args):
"""
while the original matlab function
returns a vector, this function
returns a scalar, as the dblquad function
in python requires so
"""
rvh = args[0][0]
eps = args[0][1]
ks2 = args[0][2]
cs2 = args[0][3]
rss = args[0][4]
cs = args[0][5]
fac = args[0][6]
nspec = len(fac)
kl2 = args[0][7]
s = args[0][8]
acf_type_id = args[0][9]
r2 = r**2.
sf = np.sin(phi)
csf = np.cos(phi)
rx = r * csf
ry = r * sf
rp = 1. + rvh
rm = 1. - rvh
q = np.sqrt(1.0001 - r2)
qt = np.sqrt(eps - r2)
a = rp / q
b = rm / q
c = rp / qt
d = rm / qt
# calculate cross-pol coefficient
B3 = rx * ry / cs
fvh1 = (b-c)*(1.- 3.*rvh) - (b - c/eps) * rp
fvh2 = (a-d)*(1.+ 3.*rvh) - (a - d*eps) * rm
Fvh = ( np.abs( (fvh1 + fvh2) *B3))**2.
# calculate x-pol shadowing
au = q /r /1.414 /rss
fsh = (0.2821/au) *np.exp(-au**2.) -0.5 *(1.- math.erf(au))
sha = 1./(1. + fsh)
# calculate expressions for the surface spectra
wn, wm = _calc_roughness_spectra_matrix(rx, ry, kl2, nspec, s, acf_type_id)
vhmnsum = 0.
for i in xrange(nspec):
for j in xrange(nspec):
vhmnsum += wn[i] * wm[j] * (ks2*cs2)**((i+1)+(j+1))/fac[i]/fac[j]
# compute VH scattering coefficient
acc = np.exp(-2.* ks2 *cs2) /(16. * np.pi)
VH = 4. * acc * Fvh * vhmnsum * r
y = VH * sha
# print('y =',y)
# print('r =',r)
# print('phi =',phi)
# print('sp = 1, exp15')
# print('xx = ??? 1')
# print('ks2 =',ks2)
# print('cs =',cs)
# print('s =',s)
# print('kl2 =',kl2)
# print('L =', self.l)
# print('er =',eps)
# print('rss =',rss)
# print('rvh =',rvh)
# print('n_spec =',nspec)
# print(y)
# pdb.set_trace()
return y
def _calc_shadow_cross(self):
""""
calculating shadow consideration in single scat (Smith, 1967)
"""
ct = np.cos(self.theta)/np.sin(self.theta)
farg = ct /np.sqrt(2.) /self.rss
gamma = 0.5 *(np.exp(-farg**2.) / 1.772 / farg - math.erfc(farg))
return 1. / (1. + gamma)
def _calc_shadowing(self):
if self.mode == 'backscatter': #todo comparison with binary variable instead of string to be faster ??
ct = np.cos(self.theta)/np.sin(self.theta)
cts = np.cos(self.thetas)/np.sin(self.thetas)
rslp = self.rss
ctorslp = ct / math.sqrt(2.) /rslp
ctsorslp = cts / np.sqrt(2.) /rslp
shadf = 0.5 *(np.exp(-ctorslp**2.) / np.sqrt(np.pi)/ctorslp - math.erfc(ctorslp))
shadfs = 0.5 *(np.exp(-ctsorslp**2.) / np.sqrt(np.pi)/ctsorslp - math.erfc(ctsorslp))
ShdwS = 1./(1. + shadf + shadfs)
else:
ShdwS = 1.
return ShdwS
def calc_roughness_spectrum(self, acf_type=None):
"""
calculate roughness spectrum
Return wn as an array
"""
assert 'Validate with code again'
if acf_type == 'gauss':
# gaussian autocorrelation function
S = GaussianSpectrum(niter=self.niter, l=self.l, theta=self.theta, thetas=self.thetas, phi=self.phi,phis=self.phis, freq=self.freq, sig=self.sig)
elif acf_type == 'exp15':
# 1.5 power exponential function
S = ExponentialSpectrum(niter=self.niter, l=self.l, theta=self.theta, thetas=self.thetas, phi=self.phi,phis=self.phis, freq=self.freq, sig=self.sig)
else:
assert False, 'Invalid surface roughness spectrum: ' + str(acf_type)
return S.wn() # returns wn as an array with length NITER
def _calc_Ipp(self):
n = np.arange(self.niter)+1.
qi = self.k*self._cs
qs = self.k*self._css
h1= np.exp(-self.sig**2. * self._kz * self._ksz)*(self._kz + self._ksz)**n
# Calculate the Fppup(dn) i(s) coefficient
R = Reflectivity(self.eps, self.theta)
Rvi = R.rho_v
Rhi = R.rho_h
Fvvupi, Fhhupi = self.Fppupdn( 1,1,Rvi,Rhi)
Fvvups, Fhhups = self.Fppupdn( 1,2,Rvi,Rhi)
Fvvdni, Fhhdni = self.Fppupdn(-1,1,Rvi,Rhi)
Fvvdns, Fhhdns = self.Fppupdn(-1,2,Rvi,Rhi)
# fpp calculations
fvv, fhh = self.calc_fpp(Rvi, Rhi)
# pdb.set_trace()
# Ipp
Ivv = fvv*h1
Ivv += 0.25*(Fvvupi *(self._ksz-qi)**(n-1) *np.exp(-self.sig**2. *(qi**2. - qi*(self._ksz-self._kz))))
Ivv += 0.25*(Fvvdni *(self._ksz+qi)**(n-1) *np.exp(-self.sig**2. *(qi**2. + qi*(self._ksz-self._kz))))
Ivv += 0.25*(Fvvups *(self._kz +qs)**(n-1) *np.exp(-self.sig**2. *(qs**2. - qs*(self._ksz-self._kz))))
Ivv += 0.25*(Fvvdns *(self._kz -qs)**(n-1) *np.exp(-self.sig**2. *(qs**2. + qs*(self._ksz-self._kz))))
Ihh = fhh*h1
Ihh += 0.25*(Fhhupi *(self._ksz-qi)**(n-1) *np.exp(-self.sig**2. *(qi**2. - qi*(self._ksz-self._kz))))
Ihh += 0.25*(Fhhdni *(self._ksz+qi)**(n-1) *np.exp(-self.sig**2. *(qi**2. + qi*(self._ksz-self._kz))))
Ihh += 0.25*(Fhhups *(self._kz +qs)**(n-1) *np.exp(-self.sig**2. *(qs**2. - qs*(self._ksz-self._kz))))
Ihh += 0.25*(Fhhdns *(self._kz -qs)**(n-1) *np.exp(-self.sig**2. *(qs**2. + qs*(self._ksz-self._kz))))
return Ivv, Ihh
def calc_fpp(self, Rvi, Rhi):
Rvt, Rht = self.calc_reflection_coefficients(Rvi, Rhi)
fvv = 2. * Rvt *(self._s * self._ss - (1. + self._cs * self._css) * self._cfs)/(self._cs + self._css)
fhh = -2. * Rht *(self._s * self._ss - (1. + self._cs * self._css) * self._cfs)/(self._cs + self._css)
return fvv, fhh
def Fppupdn(self, u_d, i_s, Rvi, Rhi):
assert i_s in [1,2]
assert u_d in [-1,1]
# set coefficients
if i_s == 1:
Gqi = u_d * self._kz
Gqti = u_d * self.k *np.sqrt(self.eps-self._s**2.);
qi = u_d * self._kz
c11 = self.k * self._cfs *(self._ksz - qi)
c21 = self._cs *(self._cfs *(self.k**2 *self._s*self._cf*(self._ss *self._cfs - self._s * self._cf) + Gqi*(self.k * self._css - qi))+ self.k**2. *self._cf * self._s *self._ss *self._sfs**2.)
c31 = self.k*self._s*(self._s*self._cf*self._cfs*(self.k*self._css-qi) - Gqi*(self._cfs*(self._ss*self._cfs -self._s*self._cf)+ self._ss *self._sfs**2.))
c41 = self.k *self._cs*(self._cfs*self._css*(self.k*self._css - qi) + self.k *self._ss*(self._ss*self._cfs-self._s*self._cf))
c51 = Gqi*(self._cfs *self._css*(qi-self.k*self._css) - self.k *self._ss*(self._ss*self._cfs-self._s*self._cf))
c12 = self.k * self._cfs *(self._ksz - qi)
c22 = self._cs *(self._cfs *(self.k**2. *self._s*self._cf*(self._ss *self._cfs - self._s * self._cf) + Gqti*(self.k * self._css - qi)) + self.k**2. *self._cf * self._s *self._ss *self._sfs**2.)
c32 = self.k*self._s*(self._s*self._cf*self._cfs*(self.k*self._css-qi) - Gqti*(self._cfs*(self._ss*self._cfs -self._s*self._cf)- self._ss *self._sfs**2.))
c42 = self.k *self._cs*(self._cfs*self._css*(self.k*self._css - qi) + self.k *self._ss*(self._ss*self._cfs-self._s*self._cf))
c52 = Gqti*(self._cfs *self._css*(qi-self.k*self._css) - self.k *self._ss*(self._ss*self._cfs-self._s*self._cf))
else:
Gqs = u_d * self._ksz
Gqts = u_d *self.k *np.sqrt(self.eps-self._ss**2.)
qs = u_d * self._ksz
c11 = self.k * self._cfs *(self._kz + qs)
c21 = Gqs *(self._cfs*(self._cs*(self.k*self._cs+qs)-self.k*self._s*(self._ss *self._cfs-self._s*self._cf))-self.k*self._s*self._ss*self._sfs**2.)
c31 = self.k *self._ss*(self.k*self._cs*(self._ss*self._cfs - self._s*self._cf)+ self._s*(self._kz+qs))
c41 = self.k*self._css*(self._cfs*(self._cs*(self._kz+qs)-self.k*self._s*(self._ss*self._cfs-self._s*self._cf))-self.k*self._s*self._ss*self._sfs**2.)
c51 = -self._css *(self.k**2. *self._ss *(self._ss*self._cfs -self._s*self._cf)+ Gqs*self._cfs*(self._kz+qs))
c12 = self.k * self._cfs *(self._kz + qs)
c22 = Gqts *(self._cfs*(self._cs*(self._kz+qs)-self.k*self._s*(self._ss *self._cfs-self._s*self._cf))-self.k*self._s*self._ss*self._sfs**2.)
c32 = self.k *self._ss*(self.k*self._cs*(self._ss*self._cfs - self._s*self._cf)+ self._s*(self._kz+qs))
c42 = self.k*self._css*(self._cfs*(self._cs*(self._kz+qs)-self.k*self._s*(self._ss*self._cfs-self._s*self._cf))-self.k*self._s*self._ss*self._sfs**2.)
c52 = -self._css *(self.k**2. *self._ss *(self._ss*self._cfs -self._s*self._cf)+ Gqts*self._cfs*(self._kz+qs))
# now do final calculations ...
q = self._kz
qt = self.k * np.sqrt(self.eps - self._s**2.)
vv = (1.+Rvi) *( -(1-Rvi) *c11 /q + (1.+Rvi) *c12 / qt)
vv += (1.-Rvi) *( (1-Rvi) *c21 /q - (1.+Rvi) *c22 / qt)
vv += (1.+Rvi) *( (1-Rvi) *c31 /q - (1.+Rvi) *c32 /self.eps /qt)
vv += (1.-Rvi) *( (1+Rvi) *c41 /q - self.eps*(1. - Rvi) *c42 / qt)
vv += (1.+Rvi) *( (1+Rvi) *c51 /q - (1.-Rvi) *c52 / qt)
hh = (1.+Rhi) *( (1.-Rhi) * c11 /q - self.eps *(1.+Rhi) *c12 / qt)
hh -= (1.-Rhi) *( (1.-Rhi) * c21 /q - (1.+Rhi) *c22 / qt)
hh -= (1.+Rhi) *( (1.-Rhi) * c31 /q - (1.+Rhi) *c32 / qt)
hh -= (1.-Rhi) *( (1.+Rhi) * c41 /q - (1.-Rhi) *c42 / qt)
hh -= (1.+Rhi) *( (1.+Rhi) * c51 /q - (1.-Rhi) *c52 / qt)
return vv, hh
def _calc_r_transition(self):
""" compute R transition """
Rv0 = (np.sqrt(self.eps)-1.) / (np.sqrt(self.eps) + 1.)
Rh0 = -Rv0
Ft = 8. * Rv0**2. + self._ss * (self._cs + np.sqrt(self.eps - self._s2))/(self._cs * np.sqrt(self.eps - self._s2))
idx = np.arange(self.niter)+1
a0 = (self.ks*self._cs)**(2.*idx)/self.fac
a1 = np.sum(a0*self.wn)
b1 = np.sum(a0 * (np.abs(Ft/2. + 2.**(idx+1) *Rv0/self._cs *np.exp(-(self.ks*self._cs)**2.)))**2. * self.wn)
St = 0.25 * np.abs(Ft)**2. * a1/b1
St0 = 1. / np.abs(1.+8.*Rv0/(self._cs * Ft))**2.
Tf = 1. - St / St0
return Rv0, Rh0, Tf
def _calculate_average_reflection_coefficients(self):
assert False, 'Not implemented yet!'
#%----------- compute average reflection coefficients ------------
#%-- these coefficients account for slope effects, especially near the
#%brewster angle. They are not important if the slope is small.
#sigx = 1.1 .*sig/L;
#sigy = sigx;
#xxx = 3*sigx;
#Rav = dblquad(@(Zx, Zy)Rav_integration(Zx, Zy, cs,s,er,s2,sigx, sigy),-xxx,xxx, -xxx, xxx );
#Rah = dblquad(@(Zx, Zy)Rah_integration(Zx, Zy, cs,s,er,s2,sigx, sigy),-xxx,xxx, -xxx, xxx );
#Rav = Rav ./(2*pi * sigx * sigy);
#Rah = Rah ./(2*pi * sigx * sigy);
def calc_reflection_coefficients(self, Rvi, Rhi):
Rv0, Rh0, Tf = self._calc_r_transition()
# select proper reflection coefficients
if self.mode == 'backscatter': # todo this comparison might slow down the program as it is called very often; perhaps modify
Rvt = Rvi + (Rv0 - Rvi) * Tf
Rht = Rhi + (Rh0 - Rhi) * Tf
elif self.mode == 'bistatic':
Rav = Rah = self._calculate_average_reflection_coefficients()
Rvt = Rav
Rht = Rah
pass
else:
assert False
return Rvt, Rht
class Roughness(object):
"""
calculate roughness spectrum
"""
def __init__(self, **kwargs):
self.niter = kwargs.get('niter', None)
self.l = kwargs.get('l', None)
self.sig = kwargs.get('sig', None)
self.theta = kwargs.get('theta', None)
self.thetas = kwargs.get('thetas', None)
self.phi = kwargs.get('phi', None)
self.phis = kwargs.get('phis', None)
self.freq = kwargs.get('freq', None)
self.i = kwargs.get('i', None)
self._check()
self.n = np.arange(self.niter)+1
self._init()
def wn(self):
assert False, 'Should be implemented in child class!'
def _init(self):
ss = np.sin(self.thetas)
self._s = np.sin(self.theta)
sf = np.sin(self.phi)
sfs = np.sin(self.phis)
cfs = np.cos(self.phis)
cf = np.cos(self.phi)
lam = f2lam(self.freq)
self.k = 2.*np.pi / lam
self._kl = self.k*self.l
self._kl2 = self._kl**2.
# todo whereis this defined ???
self.wvnb = self.k * np.sqrt( (ss *cfs - self._s *cf)**2. + (ss * sfs - self._s * sf)**2. )
def _check(self):
assert self.niter is not None, 'ERROR: niter was not set!'
assert self.l is not None
assert self.sig is not None
assert self.theta is not None
assert self.thetas is not None
assert self.phi is not None
assert self.phis is not None
assert self.freq is not None
@jit(cache=False, nopython=True)
def _calc_wn_matrix_gauss(rx, ry, nspec, kl2, s):
wn = np.zeros(nspec)
for i in xrange(nspec):
wn[i] = 0.5 *kl2/(i+1.) * np.exp(-kl2*((rx-s)**2. + ry**2.)/(4.*(i+1)))
return wn
@jit(cache=False, nopython=True)
def _calc_wm_matrix_gauss(rx, ry, nspec, kl2, s):
wm = np.zeros(nspec)
for i in xrange(nspec):
wm[i] = 0.5 *kl2/(i+1.) * np.exp(-kl2*((rx+s)**2. + ry**2.)/(4.*(i+1)))
return wm
class GaussianSpectrum(Roughness):
def __init__(self, **kwargs):
super(GaussianSpectrum, self).__init__(**kwargs)
def wn(self):
# Fung (1994), Eq. 2B.4; except for wvnb
n = self.n
# xx, yy = np.meshgrid(n, self.wvnb)
# wn = (self.l**2.)/(2.*n) * np.exp(-(yy*self.l)**2. / (4.*xx))
# pdb.set_trace()
wn = (self.l**2.)/(2.*n) * np.exp(-(self.wvnb*self.l)**2. / (4.*n))
rss = np.sqrt(2.)*self.sig/self.l
return wn, rss
def calc_wn_matrix(self, rx, ry, nspec):
return _calc_wn_matrix_gauss(rx, ry, nspec, self._kl2, self._s)
def calc_wm_matrix(self, rx, ry, nspec):
return _calc_wm_matrix_gauss(rx, ry, nspec, self._kl2, self._s)
@jit(cache=True,nopython=True)
def _calc_wn_matrix_exp(rx, ry, nspec, kl2, s):
wn = np.zeros(nspec)
for i in xrange(nspec):
wn[i] = (i+1) * kl2 / ((i+1)**2.+kl2*((rx-s)**2. + ry**2.))**1.5
return wn
@jit(cache=True,nopython=True)
def _calc_wm_matrix_exp(rx, ry, nspec, kl2, s):
wm = np.zeros(nspec)
for i in xrange(nspec):
wm[i] = (i+1) * kl2 / ((i+1)**2.+kl2*((rx+s)**2. + ry**2.))**1.5
return wm
class ExponentialSpectrum(Roughness):
"""
exponential spectrum
"""
def __init__(self, **kwargs):
super(ExponentialSpectrum, self).__init__(**kwargs)
def wn(self):
# Fung (1994): eq. 2.B.14
n = self.n
wn= self.l**2. / n**2. * (1.+(self.wvnb*self.l/n)**2.)**(-1.5)
rss = self.sig/self.l
return wn, rss
def calc_wn_matrix(self, rx, ry, nspec):
#for i in xrange(nspec):
# n = i+1
#return np.array([(i+1) * self._kl2 / ((i+1)**2.+self._kl2*((rx-self._s)**2. + ry**2.))**1.5 for i in xrange(nspec)])
return _calc_wn_matrix_gauss(rx, ry, nspec, self._kl2, self._s)
def calc_wm_matrix(self, rx, ry, nspec):
#return np.array([(i+1) * self._kl2 / ((i+1)**2.+self._kl2*((rx+self._s)**2. + ry**2.))**1.5 for i in xrange(nspec)])
return _calc_wm_matrix_gauss(rx, ry, nspec, self._kl2, self._s)
| 34.632712
| 206
| 0.540619
|
5a9deca18ef915df2060702a02b3e295638b22e2
| 8,620
|
py
|
Python
|
app/dashboard/routes.py
|
wilfredinni/pythoncheatsheet.org
|
b3c8407cf4468558dcd6b430dac58b12719d91d5
|
[
"MIT"
] | 5
|
2019-03-09T07:24:34.000Z
|
2021-08-24T14:53:21.000Z
|
app/dashboard/routes.py
|
wilfredinni/pythoncheatsheet.org
|
b3c8407cf4468558dcd6b430dac58b12719d91d5
|
[
"MIT"
] | 2
|
2018-05-18T01:07:49.000Z
|
2018-05-18T01:26:21.000Z
|
app/dashboard/routes.py
|
wilfredinni/pysheetBlog
|
b3c8407cf4468558dcd6b430dac58b12719d91d5
|
[
"MIT"
] | 3
|
2018-06-30T14:56:27.000Z
|
2018-09-03T11:17:34.000Z
|
from flask import render_template, flash, redirect, url_for, request
from flask_login import login_required, current_user
from app.dashboard import bp
from app.dashboard.forms import RegistrationForm, EditProfileForm, PostForm, PinMsgForm
from app.models import User, Post, Tag, PinedMsg
from app import db
import re
from datetime import datetime
@bp.before_request
def before_request():
""" Save the las activity of the user. """
if current_user.is_authenticated:
current_user.last_seen = datetime.utcnow()
db.session.commit()
@bp.route("/overview")
@login_required
def overview():
""" Dashboard Overview. """
# For avatar and user name in the dashboard
user = User.query.filter_by(username=current_user.username).first()
my_posts = Post.query.filter_by(user_id=current_user.id).order_by(
Post.timestamp.desc()
)
# cant use my post to check if there are articles or not,
# have to use this query:
posts = Post.query.filter_by(user_id=current_user.id).first()
return render_template(
"dashboard/overview.html",
title="Dashboard",
my_posts=my_posts,
overview_active="is-active",
user=user,
post_list=posts,
)
@bp.route("/manage_articles")
@login_required
def manage_articles():
""" Dashboard Article Manager: Edit, Delete and Create. """
# All posts ordered by date
posts_list = Post.query.first()
posts = Post.query.filter_by().order_by(Post.timestamp.desc())
return render_template(
"dashboard/overview.html",
title="Manage Articles",
my_posts=posts,
articles_active="is-active",
post_list=posts_list,
)
@bp.route("/add_user", methods=["GET", "POST"])
@login_required
def add_user():
""" Dashboard New User. """
form = RegistrationForm()
if form.validate_on_submit():
# if administrator check box is selected, create an administrator
if form.administrator.data:
user = User(
username=form.username.data,
email=form.email.data,
is_administrator=True,
)
else:
user = User(username=form.username.data, email=form.email.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash("Account created for {}.".format(form.username.data))
return redirect(url_for("dashboard.manage_users"))
return render_template(
"dashboard/add_user.html", title="Add User", form=form, add_active="is-active"
)
@bp.route("/manage_users")
@login_required
def manage_users():
""" Dashboard User Manager: Edit, Delete and Create. """
all_users = User.query.all()
return render_template(
"dashboard/manage_users.html",
title="Manage Users",
all_users=all_users,
users_active="is-active",
)
@bp.route("/edit_profile/<username>", methods=["GET", "POST"])
@login_required
def edit_profile(username):
""" Dashboard Profile Manager: Edit, Delete and Create. """
if current_user.username != username:
flash("You can't edit other users profiles.")
return redirect(url_for("dashboard.overview"))
user = User.query.filter_by(username=username).first_or_404()
form = EditProfileForm(user.username)
if form.validate_on_submit():
user.username = form.username.data
user.about_me = form.about_me.data
user.email = form.email.data
user.screen_name = form.screen_name.data
user.website = form.website.data
user.github = form.github.data
user.twitter = form.twitter.data
db.session.commit()
flash("{}, your changes have been saved.".format(form.username.data))
return redirect(url_for("dashboard.overview"))
elif request.method == "GET":
form.username.data = user.username
form.about_me.data = user.about_me
form.email.data = user.email
form.screen_name.data = user.screen_name
form.website.data = user.website
form.github.data = user.github
form.twitter.data = user.twitter
return render_template(
"dashboard/edit_profile.html",
form=form,
user=user,
title="Edit Profile",
edit_active="is-active",
)
@bp.route("/new_post", methods=["GET", "POST"])
@login_required
def new_post():
""" Dashboard: Create a New Article. Same as Edit Article. """
form = PostForm()
if form.validate_on_submit():
post = Post(
markdown_url=form.markdown_url.data,
author=current_user,
title=form.title.data,
url=form.url.data,
img_url=form.img_url.data,
summary=form.summary.data,
)
# split the tags by the comas
post_tags = form.tags.data.replace(" ", "").split(",")
# check if the tag exists to append it to the post. Else, create it
Tag.add_or_create_tags(post_tags, post)
# add tag and post
db.session.add(post)
# commit to the db
db.session.commit()
flash('"{}" is now live!'.format(form.title.data))
return redirect(url_for("dashboard.overview"))
return render_template(
"dashboard/new_post.html", title="New Post", form=form, post_active="is-active"
)
@bp.route("/edit_post/<url>", methods=["GET", "POST"])
@login_required
def edit_post(url):
post = Post.query.filter_by(url=url).first_or_404()
form = PostForm()
if form.validate_on_submit():
post.title = form.title.data
post.url = form.url.data
post.markdown_url = form.markdown_url.data
post.summary = form.summary.data
post.img_url = form.img_url.data
# split the tags by comas
post_tags = form.tags.data.replace(" ", "").split(",")
# check for deleted tags
Tag.check_deleted_tags(post, post_tags)
# check for existing tags
Tag.update_tags(post_tags, post)
db.session.commit()
flash('Changes on "{}" have been saved.'.format(form.title.data))
return redirect(url_for("dashboard.overview"))
elif request.method == "GET":
form.title.data = post.title
form.url.data = post.url
form.markdown_url.data = post.markdown_url
form.summary.data = post.summary
form.img_url.data = post.img_url
# use regex to format the tags
tag_regex = re.compile(r"\[(.*)\]")
mo = tag_regex.search(str(post.tag.all()))
form.tags.data = mo.group(1)
return render_template(
"dashboard/new_post.html",
post=post,
form=form,
title="Edit Post",
overview_active="is-active",
)
@bp.route("/site_configuration", methods=["GET", "POST"])
@login_required
def site_configuration():
""" For now, just edit the Pinned Notification on the Index. """
form = PinMsgForm()
msg = PinedMsg.query.filter_by(id=1).first()
if form.validate_on_submit():
if msg:
msg.home_msg = form.home_msg.data
msg.home_enable = form.home_enable.data
else:
msg = PinedMsg(
home_msg=form.home_msg.data, home_enable=form.home_enable.data
)
db.session.add(msg)
db.session.commit()
flash("The Pinned message has ben Updated.")
return redirect(url_for("dashboard.site_configuration"))
# check if there is a msg created and get it
elif request.method == "GET":
if msg:
form.home_msg.data = msg.home_msg
if msg.home_enable:
enabled = True
else:
enabled = False
else:
enabled = "None"
return render_template(
"dashboard/site_configuration.html",
title="Site Configuration",
form=form,
config_active="is-active",
enabled=enabled,
)
@bp.route("/delete_user/<id>", methods=["POST"])
@login_required
def delete_user(id):
user = User.query.filter_by(id=id).first_or_404()
db.session.delete(user)
db.session.commit()
flash("User {} has been Deleted".format(user.username))
return redirect(url_for("dashboard.manage_users"))
@bp.route("/delete_post/<url>", methods=["GET", "POST"])
@login_required
def delete_post(url):
post = Post.query.filter_by(url=url).first_or_404()
db.session.delete(post)
db.session.commit()
flash('"{}" has been Deleted'.format(post.title))
return redirect(url_for("dashboard.overview"))
| 29.419795
| 87
| 0.631903
|
ba990831837433b4ea128a1b91c17e4fc1f128b0
| 3,525
|
py
|
Python
|
src/service/gcal/__init__.py
|
chenhao-ye/snow
|
df7125af8a17a77c55f0c62ef2f8013c32859e89
|
[
"MIT"
] | 4
|
2022-03-13T18:25:23.000Z
|
2022-03-19T14:53:24.000Z
|
src/service/gcal/__init__.py
|
chenhao-ye/snow
|
df7125af8a17a77c55f0c62ef2f8013c32859e89
|
[
"MIT"
] | 1
|
2022-03-15T15:01:57.000Z
|
2022-03-15T15:01:57.000Z
|
src/service/gcal/__init__.py
|
chenhao-ye/snow
|
df7125af8a17a77c55f0c62ef2f8013c32859e89
|
[
"MIT"
] | null | null | null |
import os.path
import logging
from typing import List, Optional
from google.auth.transport.requests import Request
from google.auth.exceptions import RefreshError
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
from .calendar import Calendar
class GoogleCalendarService:
# For now, we only need the readonly permission of Google Calendar.
# If modifying these scopes, delete the file token file.
SCOPES = ['https://www.googleapis.com/auth/calendar.readonly']
FIELDS = ["cal_name", "title", "time", "location", "description"]
def __init__(self) -> None:
# `creds` is a confusing name here. It actually means user's access
# token, not the developer's credentials
self.creds = None
self.service = None
self.is_auth = False
self.cal_map = None
def auth(self, creds_path: str, token_path: str) -> None:
"""Perform authentications.
Two files are involved:
- credentials file: to prove to Google that the current application is SNOW.
- token file: to ask the user to grant the access of the calendar data.
"""
if self.is_auth:
return
if os.path.exists(token_path):
self.creds = Credentials.from_authorized_user_file(
token_path, self.SCOPES)
# If there are no (valid) credentials available, let the user login.
if not self.creds or not self.creds.valid:
if self.creds and self.creds.expired and self.creds.refresh_token:
logging.info("No valid token found. Will try to refresh.")
try:
self.creds.refresh(Request())
except RefreshError:
logging.info(
"Fail to refresh token. User must retry login.")
else:
logging.info("No valid token found. Please retry login.")
if not self.creds or not self.creds.valid:
flow = InstalledAppFlow.from_client_secrets_file(
creds_path, self.SCOPES)
self.creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(token_path, 'w') as token:
token.write(self.creds.to_json())
self.service = build('calendar', 'v3', credentials=self.creds)
self.is_auth = True
def fetch_calendars(self) -> None:
assert self.is_auth
if self.cal_map is not None:
return
self.cal_map = {}
# calendar list is broken into multiple pages
# use page_token to iterate through the pages
page_token = None
while True:
cal_list_page = self.service.calendarList().list(
pageToken=page_token).execute()
for cal_data in cal_list_page['items']:
cal = Calendar(self.service, cal_data)
self.cal_map[cal.name] = cal
logging.info(f"Get calendar: {cal.name}")
page_token = cal_list_page.get('nextPageToken')
if not page_token:
break
def list_calendars_name(self) -> List[str]:
"""Return all calendars' name of this user."""
self.fetch_calendars()
return self.cal_map.keys()
def get_calendar(self, cal_name: str) -> Optional[Calendar]:
self.fetch_calendars()
return self.cal_map.get(cal_name)
| 38.736264
| 84
| 0.620709
|
5ba8354b509fd47c03e3f43280968e75003a0c80
| 6,751
|
py
|
Python
|
bokeh/core/property/tests/test_bases.py
|
isaacmg/bokeh
|
1025d1177b8e636c36f6160da4bd2fbf8ca51962
|
[
"BSD-3-Clause"
] | null | null | null |
bokeh/core/property/tests/test_bases.py
|
isaacmg/bokeh
|
1025d1177b8e636c36f6160da4bd2fbf8ca51962
|
[
"BSD-3-Clause"
] | null | null | null |
bokeh/core/property/tests/test_bases.py
|
isaacmg/bokeh
|
1025d1177b8e636c36f6160da4bd2fbf8ca51962
|
[
"BSD-3-Clause"
] | null | null | null |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
from mock import patch
import numpy as np
# Bokeh imports
from bokeh.core.has_props import HasProps
from bokeh._testing.util.api import verify_all
# Module under test
import bokeh.core.property.bases as bcpb
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = ()
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
@patch('bokeh.core.property.bases.Property.validate')
def test_is_valid_supresses_validation_detail(mock_validate):
p = bcpb.Property()
p.is_valid(None)
assert mock_validate.called
assert mock_validate.call_args[0] == (None, False)
def test_property_assert_bools():
hp = HasProps()
p = bcpb.Property()
p.asserts(True, "true")
assert p.prepare_value(hp, "foo", 10) == 10
p.asserts(False, "false")
with pytest.raises(ValueError) as e:
p.prepare_value(hp, "foo", 10)
assert str(e) == "false"
def test_property_assert_functions():
hp = HasProps()
p = bcpb.Property()
p.asserts(lambda obj, value: True, "true")
p.asserts(lambda obj, value: obj is hp, "true")
p.asserts(lambda obj, value: value==10, "true")
assert p.prepare_value(hp, "foo", 10) == 10
p.asserts(lambda obj, value: False, "false")
with pytest.raises(ValueError) as e:
p.prepare_value(hp, "foo", 10)
assert str(e) == "false"
def test_property_assert_msg_funcs():
hp = HasProps()
p = bcpb.Property()
def raise_(ex):
raise ex
p.asserts(False, lambda obj, name, value: raise_(ValueError("bad %s %s %s" % (hp==obj, name, value))))
with pytest.raises(ValueError) as e:
p.prepare_value(hp, "foo", 10)
assert str(e) == "bad True name, 10"
def test_property_matches_basic_types(capsys):
p = bcpb.Property()
for x in [1, 1.2, "a", np.arange(4), None, False, True, {}, []]:
assert p.matches(x, x) is True
assert p.matches(x, "junk") is False
out, err = capsys.readouterr()
assert err == ""
def test_property_matches_compatible_arrays(capsys):
p = bcpb.Property()
a = np.arange(5)
b = np.arange(5)
assert p.matches(a, b) is True
assert p.matches(a, b+1) is False
for x in [1, 1.2, "a", np.arange(4), None, False]:
assert p.matches(a, x) is False
assert p.matches(x, b) is False
out, err = capsys.readouterr()
assert err == ""
def test_property_matches_incompatible_arrays(capsys):
p = bcpb.Property()
a = np.arange(5)
b = np.arange(5).astype(str)
assert p.matches(a, b) is False
out, err = capsys.readouterr()
# no way to suppress FutureWarning in this case
# assert err == ""
def test_property_matches_dicts_with_array_values(capsys):
p = bcpb.Property()
d1 = dict(foo=np.arange(10))
d2 = dict(foo=np.arange(10))
assert p.matches(d1, d1) is True
assert p.matches(d1, d2) is True
# XXX not sure if this is preferable to have match, or not
assert p.matches(d1, dict(foo=list(range(10)))) is True
assert p.matches(d1, dict(foo=np.arange(11))) is False
assert p.matches(d1, dict(bar=np.arange(10))) is False
assert p.matches(d1, dict(bar=10)) is False
out, err = capsys.readouterr()
assert err == ""
def test_property_matches_non_dict_containers_with_array_false(capsys):
p = bcpb.Property()
d1 = [np.arange(10)]
d2 = [np.arange(10)]
assert p.matches(d1, d1) is True # because object identity
assert p.matches(d1, d2) is False
t1 = (np.arange(10),)
t2 = (np.arange(10),)
assert p.matches(t1, t1) is True # because object identity
assert p.matches(t1, t2) is False
out, err = capsys.readouterr()
assert err == ""
def test_property_matches_dicts_with_series_values(capsys, pd):
p = bcpb.Property()
d1 = pd.DataFrame(dict(foo=np.arange(10)))
d2 = pd.DataFrame(dict(foo=np.arange(10)))
assert p.matches(d1.foo, d1.foo) is True
assert p.matches(d1.foo, d2.foo) is True
# XXX not sure if this is preferable to have match, or not
assert p.matches(d1.foo, (range(10))) is True
assert p.matches(d1.foo, np.arange(11)) is False
assert p.matches(d1.foo, np.arange(10)+1) is False
assert p.matches(d1.foo, 10) is False
out, err = capsys.readouterr()
assert err == ""
def test_property_matches_dicts_with_index_values(capsys, pd):
p = bcpb.Property()
d1 = pd.DataFrame(dict(foo=np.arange(10)))
d2 = pd.DataFrame(dict(foo=np.arange(10)))
assert p.matches(d1.index, d1.index) is True
assert p.matches(d1.index, d2.index) is True
# XXX not sure if this is preferable to have match, or not
assert p.matches(d1.index, list(range(10))) is True
assert p.matches(d1.index, np.arange(11)) is False
assert p.matches(d1.index, np.arange(10)+1) is False
assert p.matches(d1.index, 10) is False
out, err = capsys.readouterr()
assert err == ""
def test_validation_on():
assert bcpb.Property._should_validate == True
assert bcpb.validation_on()
bcpb.Property._should_validate = False
assert not bcpb.validation_on()
bcpb.Property._should_validate = True
assert bcpb.validation_on()
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
Test___all__ = verify_all(bcpb, ALL)
| 32.613527
| 106
| 0.541698
|
21039be4408883265de862be07a936d4383e5ebf
| 677
|
py
|
Python
|
src/icp/apps/user/models.py
|
project-icp/bee-pollinator-app
|
9357755e6d78e1bf8594de1b777d02318bb3e54f
|
[
"Apache-2.0"
] | 6
|
2016-10-14T18:54:39.000Z
|
2021-06-03T21:04:27.000Z
|
src/icp/apps/user/models.py
|
project-icp/bee-pollinator-app
|
9357755e6d78e1bf8594de1b777d02318bb3e54f
|
[
"Apache-2.0"
] | 528
|
2016-10-14T17:38:54.000Z
|
2022-02-26T10:53:21.000Z
|
src/icp/apps/user/models.py
|
project-icp/bee-pollinator-app
|
9357755e6d78e1bf8594de1b777d02318bb3e54f
|
[
"Apache-2.0"
] | 2
|
2016-10-17T18:06:38.000Z
|
2020-10-23T09:48:24.000Z
|
# -*- coding: utf-8 -*-
from django.db import models
from django.conf import settings
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
class UserProfile(models.Model):
"""For additional user properties."""
POLLINATION = 'Pollination'
BEEKEEPERS = 'Beekeepers'
APP_CHOICES = (
(POLLINATION, POLLINATION),
(BEEKEEPERS, BEEKEEPERS),
)
user = models.OneToOneField(AUTH_USER_MODEL)
origin_app = models.CharField(
max_length=255,
choices=APP_CHOICES,
null=False,
help_text="Record on which app the user signed up"
)
def __unicode__(self):
return self.user.username
| 23.344828
| 67
| 0.660266
|
7e7630edc47f6c046ced853bf4a5eebc65fd7af2
| 164
|
py
|
Python
|
easy_logger/config.py
|
omsobliga/easy-logger
|
d3ffd7f80c1fe2f5ed8725b859ad68fdfbe7819f
|
[
"MIT"
] | null | null | null |
easy_logger/config.py
|
omsobliga/easy-logger
|
d3ffd7f80c1fe2f5ed8725b859ad68fdfbe7819f
|
[
"MIT"
] | null | null | null |
easy_logger/config.py
|
omsobliga/easy-logger
|
d3ffd7f80c1fe2f5ed8725b859ad68fdfbe7819f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
DEFAULT_LEVEL = logging.INFO
DEFAULT_FORMAT = '%(asctime)s - %(levelname)s - %(name)s - %(message)s'
| 20.5
| 71
| 0.652439
|
890022300f5c15f52c0c8790e01a36e5059bc606
| 13,544
|
py
|
Python
|
zerver/lib/test_fixtures.py
|
myii/zulip
|
915d8013271f1823954dd8d4441842842857ab9f
|
[
"Apache-2.0"
] | 1
|
2020-10-02T07:39:04.000Z
|
2020-10-02T07:39:04.000Z
|
zerver/lib/test_fixtures.py
|
myii/zulip
|
915d8013271f1823954dd8d4441842842857ab9f
|
[
"Apache-2.0"
] | null | null | null |
zerver/lib/test_fixtures.py
|
myii/zulip
|
915d8013271f1823954dd8d4441842842857ab9f
|
[
"Apache-2.0"
] | null | null | null |
import json
import os
import re
import subprocess
import sys
from typing import Any, List, Set
from importlib import import_module
from io import StringIO
import glob
import time
import shutil
from django.db import connections, DEFAULT_DB_ALIAS, ProgrammingError, \
connection
from django.db.utils import OperationalError
from django.apps import apps
from django.conf import settings
from django.core.management import call_command
from django.utils.module_loading import module_has_submodule
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
from scripts.lib.zulip_tools import (
get_dev_uuid_var_path, run, TEMPLATE_DATABASE_DIR,
is_digest_obsolete, write_new_digest,
)
UUID_VAR_DIR = get_dev_uuid_var_path()
IMPORTANT_FILES = [
'zilencer/management/commands/populate_db.py',
'zerver/lib/bulk_create.py',
'zerver/lib/generate_test_data.py',
'zerver/lib/server_initialization.py',
'tools/setup/postgres-init-test-db',
'tools/setup/postgres-init-dev-db',
'zerver/migrations/0258_enable_online_push_notifications_default.py',
]
VERBOSE_MESSAGE_ABOUT_HASH_TRANSITION = '''
NOTE!!!!
We are rebuilding your database for a one-time transition.
We have a hashing scheme that we use to detect whether any
important files used in the construction of the database
have changed.
We are changing that scheme so it only uses one file
instead of a directory of files.
In order to prevent errors due to this transition, we are
doing a one-time rebuild of your database. This should
be the last time this happens (for this particular reason,
at least), unless you go back to older branches.
'''
def migration_paths() -> List[str]:
return [
*glob.glob('*/migrations/*.py'),
'requirements/dev.txt',
]
class Database:
def __init__(self, platform: str, database_name: str, settings: str):
self.database_name = database_name
self.settings = settings
self.digest_name = 'db_files_hash_for_' + platform
self.migration_status_file = 'migration_status_' + platform
self.migration_status_path = os.path.join(
UUID_VAR_DIR,
self.migration_status_file
)
self.migration_digest_file = "migrations_hash_" + database_name
def important_settings(self) -> List[str]:
def get(setting_name: str) -> str:
value = getattr(settings, setting_name, {})
return json.dumps(value, sort_keys=True)
return [
get('LOCAL_DATABASE_PASSWORD'),
get('INTERNAL_BOTS'),
get('REALM_INTERNAL_BOTS'),
get('DISABLED_REALM_INTERNAL_BOTS'),
]
def run_db_migrations(self) -> None:
# We shell out to `manage.py` and pass `DJANGO_SETTINGS_MODULE` on
# the command line rather than just calling the migration
# functions, because Django doesn't support changing settings like
# what the database is as runtime.
# Also we export ZULIP_DB_NAME which is ignored by dev platform but
# recognised by test platform and used to migrate correct db.
env_prelude = [
'env',
'DJANGO_SETTINGS_MODULE=' + self.settings,
'ZULIP_DB_NAME=' + self.database_name,
]
run(env_prelude + [
'./manage.py', 'migrate', '--no-input',
])
run(env_prelude + [
'./manage.py', 'get_migration_status', '--output='+self.migration_status_file,
])
def what_to_do_with_migrations(self) -> str:
status_fn = self.migration_status_path
settings = self.settings
if not os.path.exists(status_fn):
return 'scrap'
with open(status_fn) as f:
previous_migration_status = f.read()
current_migration_status = get_migration_status(settings=settings)
all_curr_migrations = extract_migrations_as_list(current_migration_status)
all_prev_migrations = extract_migrations_as_list(previous_migration_status)
if len(all_curr_migrations) < len(all_prev_migrations):
return 'scrap'
for migration in all_prev_migrations:
if migration not in all_curr_migrations:
return 'scrap'
if len(all_curr_migrations) == len(all_prev_migrations):
return 'migrations_are_latest'
return 'migrate'
def database_exists(self) -> bool:
try:
connection = connections[DEFAULT_DB_ALIAS]
with connection.cursor() as cursor:
cursor.execute(
"SELECT 1 from pg_database WHERE datname=%s;", [self.database_name],
)
return_value = bool(cursor.fetchone())
connections.close_all()
return return_value
except OperationalError:
return False
def files_or_settings_have_changed(self) -> bool:
database_name = self.database_name
# Deal with legacy hash files. We can kill off this code when
# enough time has passed since April 2020 that we're not
# worried about anomalies doing `git bisect`--probably a few
# months is sufficient.
legacy_status_dir = os.path.join(UUID_VAR_DIR, database_name + '_db_status')
if os.path.exists(legacy_status_dir):
print(VERBOSE_MESSAGE_ABOUT_HASH_TRANSITION)
# Remove the old digest for several reasons:
# - tidiness
# - preventing false positives if you bisect
# - make this only a one-time headache (generally)
shutil.rmtree(legacy_status_dir)
# Return True to force a one-time rebuild.
return True
return is_digest_obsolete(
self.digest_name,
IMPORTANT_FILES,
self.important_settings(),
)
def template_status(self) -> str:
# This function returns a status string specifying the type of
# state the template db is in and thus the kind of action required.
if not self.database_exists():
# TODO: It's possible that `database_exists` will
# return `False` even though the database
# exists, but we just have the wrong password,
# probably due to changing the secrets file.
#
# The only problem this causes is that we waste
# some time rebuilding the whole database, but
# it's better to err on that side, generally.
return 'needs_rebuild'
if self.files_or_settings_have_changed():
return 'needs_rebuild'
# Here we hash and compare our migration files before doing
# the work of seeing what to do with them; if there are no
# changes, we can safely assume we don't need to run
# migrations without spending a few 100ms parsing all the
# Python migration code.
if not self.is_migration_digest_obsolete():
return 'current'
'''
NOTE:
We immediately update the digest, assuming our
callers will do what it takes to run the migrations.
Ideally our callers would just do it themselves
AFTER the migrations actually succeeded, but the
caller codepaths are kind of complicated here.
'''
self.write_new_migration_digest()
migration_op = self.what_to_do_with_migrations()
if migration_op == 'scrap':
return 'needs_rebuild'
if migration_op == 'migrate':
return 'run_migrations'
return 'current'
def is_migration_digest_obsolete(self) -> bool:
return is_digest_obsolete(
self.migration_digest_file,
migration_paths(),
)
def write_new_migration_digest(self) -> None:
write_new_digest(
self.migration_digest_file,
migration_paths(),
)
def write_new_db_digest(self) -> None:
write_new_digest(
self.digest_name,
IMPORTANT_FILES,
self.important_settings(),
)
DEV_DATABASE = Database(
platform='dev',
database_name='zulip',
settings='zproject.settings',
)
TEST_DATABASE = Database(
platform='test',
database_name='zulip_test_template',
settings='zproject.test_settings',
)
def update_test_databases_if_required(rebuild_test_database: bool=False) -> None:
"""Checks whether the zulip_test_template database template, is
consistent with our database migrations; if not, it updates it
in the fastest way possible:
* If all we need to do is add some migrations, just runs those
migrations on the template database.
* Otherwise, we rebuild the test template database from scratch.
The default behavior is sufficient for the `test-backend` use
case, where the test runner code will clone directly from the
template database.
The `rebuild_test_database` option (used by our Casper tests) asks
us to drop and re-cloning the zulip_test database from the
template so those test suites can run with a fresh copy.
"""
test_template_db_status = TEST_DATABASE.template_status()
if test_template_db_status == 'needs_rebuild':
run(['tools/rebuild-test-database'])
TEST_DATABASE.write_new_db_digest()
return
if test_template_db_status == 'run_migrations':
TEST_DATABASE.run_db_migrations()
run(['tools/setup/generate-fixtures'])
return
if rebuild_test_database:
run(['tools/setup/generate-fixtures'])
def get_migration_status(**options: Any) -> str:
verbosity = options.get('verbosity', 1)
for app_config in apps.get_app_configs():
if module_has_submodule(app_config.module, "management"):
import_module('.management', app_config.name)
app_label = options['app_label'] if options.get('app_label') else None
db = options.get('database', DEFAULT_DB_ALIAS)
out = StringIO()
command_args = ['--list', ]
if app_label:
command_args.append(app_label)
call_command(
'showmigrations',
*command_args,
database=db,
no_color=options.get('no_color', False),
settings=options.get('settings', os.environ['DJANGO_SETTINGS_MODULE']),
stdout=out,
traceback=options.get('traceback', True),
verbosity=verbosity,
)
connections.close_all()
out.seek(0)
output = out.read()
return re.sub(r'\x1b\[(1|0)m', '', output)
def extract_migrations_as_list(migration_status: str) -> List[str]:
MIGRATIONS_RE = re.compile(r'\[[X| ]\] (\d+_.+)\n')
return MIGRATIONS_RE.findall(migration_status)
def destroy_leaked_test_databases(expiry_time: int = 60 * 60) -> int:
"""The logic in zerver/lib/test_runner.py tries to delete all the
temporary test databases generated by test-backend threads, but it
cannot guarantee it handles all race conditions correctly. This
is a catch-all function designed to delete any that might have
been leaked due to crashes (etc.). The high-level algorithm is to:
* Delete every database with a name like zulip_test_template_*
* Unless it is registered in a file under TEMPLATE_DATABASE_DIR as
part of a currently running test-backend invocation
* And that file is less expiry_time old.
This should ensure we ~never break a running test-backend process,
while also ensuring we will eventually delete all leaked databases.
"""
files = glob.glob(os.path.join(UUID_VAR_DIR, TEMPLATE_DATABASE_DIR, "*"))
test_databases: Set[str] = set()
try:
with connection.cursor() as cursor:
cursor.execute("SELECT datname FROM pg_database;")
rows = cursor.fetchall()
for row in rows:
if 'zulip_test_template_' in row[0]:
test_databases.add(row[0])
except ProgrammingError:
pass
databases_in_use: Set[str] = set()
for file in files:
if round(time.time()) - os.path.getmtime(file) < expiry_time:
with open(file) as f:
for line in f:
databases_in_use.add('zulip_test_template_{}'.format(line).rstrip())
else:
# Any test-backend run older than expiry_time can be
# cleaned up, both the database and the file listing its
# databases.
os.remove(file)
databases_to_drop = test_databases - databases_in_use
if not databases_to_drop:
return 0
commands = "\n".join("DROP DATABASE IF EXISTS %s;" % (db,) for db in databases_to_drop)
p = subprocess.Popen(["psql", "-q", "-v", "ON_ERROR_STOP=1", "-h", "localhost",
"postgres", "zulip_test"],
stdin=subprocess.PIPE)
p.communicate(input=commands.encode())
if p.returncode != 0:
raise RuntimeError("Error cleaning up test databases!")
return len(databases_to_drop)
def remove_test_run_directories(expiry_time: int = 60 * 60) -> int:
removed = 0
directories = glob.glob(os.path.join(UUID_VAR_DIR, "test-backend", "run_*"))
for test_run in directories:
if round(time.time()) - os.path.getmtime(test_run) > expiry_time:
try:
shutil.rmtree(test_run)
removed += 1
except FileNotFoundError:
pass
return removed
| 35.642105
| 91
| 0.651359
|
138b8922702fcc66ff21bec541620c878a0f475d
| 5,242
|
py
|
Python
|
bindings/python/pinocchio/visualize/meshcat_visualizer.py
|
rstrudel/pinocchio
|
e038c7bf283b1df56a35014455e0e2d6f36e03ac
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
bindings/python/pinocchio/visualize/meshcat_visualizer.py
|
rstrudel/pinocchio
|
e038c7bf283b1df56a35014455e0e2d6f36e03ac
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
bindings/python/pinocchio/visualize/meshcat_visualizer.py
|
rstrudel/pinocchio
|
e038c7bf283b1df56a35014455e0e2d6f36e03ac
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
from .. import libpinocchio_pywrap as pin
from ..shortcuts import buildModelsFromUrdf, createDatas
from . import BaseVisualizer
import os
import numpy as np
class MeshcatVisualizer(BaseVisualizer):
"""A Pinocchio display using Meshcat"""
def getViewerNodeName(self, geometry_object, geometry_type):
"""Return the name of the geometry object inside the viewer."""
if geometry_type is pin.GeometryType.VISUAL:
return self.viewerVisualGroupName + '/' + geometry_object.name
elif geometry_type is pin.GeometryType.COLLISION:
return None # TODO: collision meshes
def initViewer(self, viewer=None, open=False, loadModel=False):
"""Start a new MeshCat server and client.
Note: the server can also be started separately using the "meshcat-server" command in a terminal:
this enables the server to remain active after the current script ends.
"""
import meshcat
self.viewer = meshcat.Visualizer() if viewer is None else viewer
if open:
self.viewer.open()
if loadModel:
self.loadViewerModel()
def loadViewerGeometryObject(self, geometry_object,geometry_type, color=None):
"""Load a single geometry object"""
import meshcat.geometry
viewer_name = self.getViewerNodeName(geometry_object, geometry_type)
if geometry_object.meshPath == "":
raise IOError("{} mesh file not found for link {}.".format(str(geometry_type).lower(),geometry_object.name))
# Get file type from filename extension.
_, file_extension = os.path.splitext(geometry_object.meshPath)
if file_extension.lower() == ".dae":
obj = meshcat.geometry.DaeMeshGeometry.from_file(geometry_object.meshPath)
elif file_extension.lower() == ".obj":
obj = meshcat.geometry.ObjMeshGeometry.from_file(geometry_object.meshPath)
elif file_extension.lower() == ".stl":
obj = meshcat.geometry.StlMeshGeometry.from_file(geometry_object.meshPath)
else:
raise ImportError("Unknown mesh file format: {}.".format(geometry_object.meshPath))
material = meshcat.geometry.MeshPhongMaterial()
# Set material color from URDF, converting for triplet of doubles to a single int.
if color is None:
meshColor = geometry_object.meshColor
else:
meshColor = color
material.color = int(meshColor[0] * 255) * 256**2 + int(meshColor[1] * 255) * 256 + int(meshColor[2] * 255)
# Add transparency, if needed.
if float(meshColor[3]) != 1.0:
material.transparent = True
material.opacity = float(meshColor[3])
self.viewer[viewer_name].set_object(obj, material)
def loadViewerModel(self, rootNodeName="pinocchio", color = None):
"""Load the robot in a MeshCat viewer.
Parameters:
rootNodeName: name to give to the robot in the viewer
color: optional, color to give to the robot. This overwrites the color present in the urdf.
Format is a list of four RGBA floats (between 0 and 1)
"""
# Set viewer to use to gepetto-gui.
self.viewerRootNodeName = rootNodeName
# Load robot meshes in MeshCat
# Collisions
# self.viewerCollisionGroupName = self.viewerRootNodeName + "/" + "collisions"
self.viewerCollisionGroupName = None # TODO: collision meshes
# Visuals
self.viewerVisualGroupName = self.viewerRootNodeName + "/" + "visuals"
for visual in self.visual_model.geometryObjects:
self.loadViewerGeometryObject(visual,pin.GeometryType.VISUAL,color)
def display(self, q):
"""Display the robot at configuration q in the viewer by placing all the bodies."""
pin.forwardKinematics(self.model,self.data,q)
pin.updateGeometryPlacements(self.model, self.data, self.visual_model, self.visual_data)
for visual in self.visual_model.geometryObjects:
# Get mesh pose.
M = self.visual_data.oMg[self.visual_model.getGeometryId(visual.name)]
# Manage scaling
scale = np.asarray(visual.meshScale).flatten()
S = np.diag(np.concatenate((scale,[1.0])))
T = np.array(M.homogeneous).dot(S)
# Update viewer configuration.
self.viewer[self.getViewerNodeName(visual,pin.GeometryType.VISUAL)].set_transform(T)
def displayCollisions(self,visibility):
"""Set whether to display collision objects or not.
WARNING: Plotting collision meshes is not yet available for MeshcatVisualizer."""
# TODO
import warnings
warnings.warn("Plotting collision meshes is not available for MeshcatVisualizer", category=UserWarning, stacklevel=2)
pass
def displayVisuals(self,visibility):
"""Set whether to display visual objects or not
WARNING: Visual meshes are always plotted for MeshcatVisualizer"""
# TODO
import warnings
warnings.warn("Visual meshes are always plotted for MeshcatVisualizer", category=UserWarning, stacklevel=2)
pass
__all__ = ['MeshcatVisualizer']
| 43.683333
| 125
| 0.667493
|
c233f77ee01e800131ddfc15840855a3b42674db
| 3,087
|
py
|
Python
|
nemo/backends/pytorch/module_wrapper.py
|
vsl9/NeMo
|
4137c2b4e3cba0ec5ca1da7b58b3ff97fdb25e50
|
[
"Apache-2.0"
] | 2
|
2021-03-04T16:37:46.000Z
|
2021-03-04T16:40:22.000Z
|
nemo/backends/pytorch/module_wrapper.py
|
vsl9/NeMo
|
4137c2b4e3cba0ec5ca1da7b58b3ff97fdb25e50
|
[
"Apache-2.0"
] | null | null | null |
nemo/backends/pytorch/module_wrapper.py
|
vsl9/NeMo
|
4137c2b4e3cba0ec5ca1da7b58b3ff97fdb25e50
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019 NVIDIA Corporation
import torch as t
import torch.nn as nn
from ...core import DeviceType, NeuralModule
from ...utils.helpers import rgetattr, rsetattr
class TrainableNeuralModuleWrapper(NeuralModule, nn.Module):
"""This class wraps an instance of Pytorch's nn.Module and
returns NeuralModule's instance."""
def __init__(self, pt_nn_module, input_ports_dict, output_ports_dict, **kwargs):
NeuralModule.__init__(self, **kwargs)
nn.Module.__init__(self)
self._input_ports = input_ports_dict
self._output_ports = output_ports_dict
self._device = t.device("cuda" if self.placement in [DeviceType.GPU, DeviceType.AllGpu] else "cpu")
self._pt_module = pt_nn_module
self._pt_module.to(self._device)
@property
def input_ports(self):
"""Returns definitions of module input ports.
"""
return self._input_ports
@property
def output_ports(self):
"""Returns definitions of module output ports.
"""
return self._output_ports
# def forward(self, *input):
# return self._pt_module(input)
def eval(self):
return self._pt_module.eval()
def train(self):
return self._pt_module.train()
def __call__(self, force_pt=False, *input, **kwargs):
pt_call = len(input) > 0 or force_pt
if pt_call:
return self._pt_module.__call__(*input, **kwargs)
else:
return NeuralModule.__call__(self, **kwargs)
def get_weights(self):
result = dict()
for name, parameter in self.named_parameters():
result[name] = (parameter, parameter.requires_grad)
return result
def save_to(self, path):
t.save(self._pt_module.state_dict(), path)
def restore_from(self, path):
self._pt_module.load_state_dict(t.load(path))
def parameters(self):
return self._pt_module.parameters()
def named_parameters(self):
return self._pt_module.named_parameters()
def freeze(self, weights=None):
for name, param in self._pt_module.named_parameters():
if weights is None or name in weights:
param.requires_grad = False
def unfreeze(self, weights=None):
for name, param in self._pt_module.named_parameters():
if weights is None or name in weights:
param.requires_grad = True
def get_weights(self):
result = dict()
for name, parameter in self._pt_module.named_parameters():
result[name] = (parameter, parameter.requires_grad)
return result
def set_weights(self, name2weight, name2name_and_transform=None):
self._pt_module.load_state_dict({key: name2weight[key][0] for key in name2weight.keys()})
def tie_weights_with(self, module, weight_names):
for name in weight_names:
rsetattr(self._pt_module, name, rgetattr(module, name))
@property
def num_weights(self):
return sum(p.numel() for p in self._pt_module.parameters() if p.requires_grad)
| 32.840426
| 107
| 0.660836
|
1e65c1ce3f077cdd07897da17d414cc27c3da3c4
| 303
|
py
|
Python
|
contrib/spendfrom/setup.py
|
GreenCoinX/greencoin
|
318995aa6b13a246e780fed3cb30917e36525da2
|
[
"MIT"
] | null | null | null |
contrib/spendfrom/setup.py
|
GreenCoinX/greencoin
|
318995aa6b13a246e780fed3cb30917e36525da2
|
[
"MIT"
] | null | null | null |
contrib/spendfrom/setup.py
|
GreenCoinX/greencoin
|
318995aa6b13a246e780fed3cb30917e36525da2
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
setup(name='xgcspendfrom',
version='1.0',
description='Command-line utility for greencoin "coin control"',
author='Gavin Andresen',
author_email='gavin@greencoinfoundation.org',
requires=['jsonrpc'],
scripts=['spendfrom.py'],
)
| 30.3
| 70
| 0.663366
|
9ca99ca019c514319d3fa464e15a1250dbb08385
| 19,827
|
py
|
Python
|
src/compas_plotters/artists/meshartist.py
|
duchaoyu/compas
|
d484500d68d44fd6e227c3bbee20a2edde6e6c96
|
[
"MIT"
] | null | null | null |
src/compas_plotters/artists/meshartist.py
|
duchaoyu/compas
|
d484500d68d44fd6e227c3bbee20a2edde6e6c96
|
[
"MIT"
] | null | null | null |
src/compas_plotters/artists/meshartist.py
|
duchaoyu/compas
|
d484500d68d44fd6e227c3bbee20a2edde6e6c96
|
[
"MIT"
] | null | null | null |
from typing import Dict
from typing import Tuple
from typing import List
from typing import Union
from typing import Optional
from typing import Any
from typing_extensions import Literal
from matplotlib.collections import LineCollection, PatchCollection
from matplotlib.patches import Polygon as PolygonPatch
from matplotlib.patches import Circle
from compas.geometry import centroid_points_xy
from compas.geometry import Line
from compas.geometry import offset_line
from compas.geometry import Frame
from compas.geometry import Scale
from compas.datastructures import Mesh
from compas.artists import MeshArtist
from compas.utilities import is_color_rgb
from compas.utilities.colors import is_color_light
from .artist import PlotterArtist
Color = Tuple[float, float, float]
class MeshArtist(PlotterArtist, MeshArtist):
"""Artist for COMPAS mesh data structures.
Parameters
----------
mesh : :class:`compas.datastructures.Mesh`
A COMPAS mesh.
vertices : list of int, optional
A list of vertex identifiers.
Default is ``None``, in which case all vertices are drawn.
edges : list, optional
A list of edge keys (as uv pairs) identifying which edges to draw.
The default is ``None``, in which case all edges are drawn.
faces : list, optional
A list of face identifiers.
The default is ``None``, in which case all faces are drawn.
vertexcolor : rgb-tuple or dict of rgb-tuples, optional
The color specification for the vertices.
edgecolor : rgb-tuple or dict of rgb-tuples, optional
The color specification for the edges.
facecolor : rgb-tuple or dict of rgb-tuples, optional
The color specification for the faces.
show_vertices : bool, optional
show_edges : bool, optional
show_faces : bool, optional
vertexsize : int, optional
sizepolicy : {'relative', 'absolute'}, optional
Attributes
----------
vertexcollection : :class:`PatchCollection`
The collection containing the vertices.
edgecollection : :class:`LineCollection`
The collection containing the edges.
facecollection : :class:`PatchCollection`
The collection containing the faces.
Class Attributes
----------------
zorder_vertices : int
zorder_edges : int
zorder_faces : int
"""
default_halfedgecolor = (0.7, 0.7, 0.7)
def __init__(self,
mesh: Mesh,
vertices: Optional[List[int]] = None,
edges: Optional[List[int]] = None,
faces: Optional[List[int]] = None,
vertexcolor: Color = (1.0, 1.0, 1.0),
edgecolor: Color = (0.0, 0.0, 0.0),
facecolor: Color = (0.9, 0.9, 0.9),
edgewidth: float = 1.0,
show_vertices: bool = True,
show_edges: bool = True,
show_faces: bool = True,
vertexsize: int = 5,
vertextext: Optional[Union[str, Dict[int, str]]] = None,
edgetext: Optional[Union[str, Dict[Tuple[int, int], str]]] = None,
facetext: Optional[Union[str, Dict[int, str]]] = None,
sizepolicy: Literal['relative', 'absolute'] = 'relative',
zorder: int = 1000,
**kwargs: Any):
super().__init__(mesh=mesh, **kwargs)
self.sizepolicy = sizepolicy
self.vertices = vertices
self.edges = edges
self.faces = faces
self.vertex_color = vertexcolor
self.vertex_size = vertexsize
self.vertex_text = vertextext
self.edge_color = edgecolor
self.edge_width = edgewidth
self.face_color = facecolor
self.show_vertices = show_vertices
self.show_edges = show_edges
self.show_faces = show_faces
self.zorder = zorder
self._halfedges = None
self._halfedgecollection = None
self._halfedge_color = None
@property
def halfedges(self):
if not self._halfedges:
self._halfedges = [(u, v) for u in self.mesh.halfedge for v in self.mesh.halfedge[u]]
return self._halfedges
@halfedges.setter
def halfedges(self, halfedges):
self._halfedges = halfedges
@property
def vertex_size(self):
if not self._vertex_size:
factor = self.plotter.dpi if self.sizepolicy == 'absolute' else self.mesh.number_of_vertices()
size = self.default_vertexsize / factor
self._vertex_size = {vertex: size for vertex in self.mesh.vertices()}
return self._vertex_size
@vertex_size.setter
def vertex_size(self, vertexsize):
factor = self.plotter.dpi if self.sizepolicy == 'absolute' else self.mesh.number_of_vertices()
if isinstance(vertexsize, dict):
self.vertex_size.update({vertex: size / factor for vertex, size in vertexsize.items()})
elif isinstance(vertexsize, (int, float)):
self._vertex_size = {vertex: vertexsize / factor for vertex in self.mesh.vertices()}
@property
def halfedge_color(self):
if self._halfedge_color is None:
self._halfedge_color = {(u, v): self.default_halfedgecolor for u in self.mesh.halfedge for v in self.mesh.halfedge[u]}
return self._halfedge_color
@halfedge_color.setter
def halfedge_color(self, halfedge_color):
if isinstance(halfedge_color, dict):
self._halfedge_color = halfedge_color
elif is_color_rgb(halfedge_color):
self._halfedge_color = {(u, v): halfedge_color for u in self.mesh.halfedge for v in self.mesh.halfedge[u]}
@property
def zorder_faces(self):
return self.zorder + 10
@property
def zorder_edges(self):
return self.zorder + 20
@property
def zorder_vertices(self):
return self.zorder + 30
@property
def item(self):
"""Mesh: Alias for ``~MeshArtist.mesh``"""
return self.mesh
@item.setter
def item(self, item: Mesh):
self.mesh = item
@property
def data(self) -> List[List[float]]:
return self.mesh.vertices_attributes('xy')
# ==============================================================================
# clear and draw
# ==============================================================================
def clear_vertices(self) -> None:
if self._vertexcollection:
self._vertexcollection.remove()
self._vertexcollection = None
def clear_edges(self) -> None:
if self._edgecollection:
self._edgecollection.remove()
self._edgecollection = None
def clear_halfedges(self) -> None:
if self._halfedgecollection:
for artist in self._halfedgecollection:
artist.remove()
self._halfedgecollection = None
def clear_faces(self) -> None:
if self._facecollection:
self._facecollection.remove()
self._facecollection = None
def draw(self,
vertices: Optional[List[int]] = None,
edges: Optional[List[Tuple[int, int]]] = None,
faces: Optional[List[int]] = None,
vertexcolor: Optional[Union[str, Color, List[Color], Dict[int, Color]]] = None,
edgecolor: Optional[Union[str, Color, List[Color], Dict[int, Color]]] = None,
facecolor: Optional[Union[str, Color, List[Color], Dict[int, Color]]] = None
) -> None:
"""Draw the mesh.
Parameters
----------
vertices : list of int, optional
A list of vertex identifiers.
Default is ``None``, in which case all vertices are drawn.
edges : list, optional
A list of edge keys (as uv pairs) identifying which edges to draw.
The default is ``None``, in which case all edges are drawn.
faces : list, optional
A list of face identifiers.
The default is ``None``, in which case all faces are drawn.
vertexcolor : rgb-tuple or dict of rgb-tuples, optional
The color specification for the vertices.
edgecolor : rgb-tuple or dict of rgb-tuples, optional
The color specification for the edges.
facecolor : rgb-tuple or dict of rgb-tuples, optional
The color specification for the faces.
"""
self.clear()
if self.show_vertices:
self.draw_vertices(vertices=vertices, color=vertexcolor)
if self.show_edges:
self.draw_edges(edges=edges, color=edgecolor)
if self.show_faces:
self.draw_faces(faces=faces, color=facecolor)
def draw_vertices(self,
vertices: Optional[List[int]] = None,
color: Optional[Union[str, Color, List[Color], Dict[int, Color]]] = None
) -> None:
"""Draw a selection of vertices.
Parameters
----------
vertices : list of int, optional
A list of vertex identifiers.
Default is ``None``, in which case all vertices are drawn.
color : rgb-tuple or dict of rgb-tuples, optional
The color specification for the vertices.
Returns
-------
None
"""
self.clear_vertices()
if vertices:
self.vertices = vertices
if color:
self.vertex_color = color
circles = []
for vertex in self.vertices:
x, y = self.vertex_xyz[vertex][:2]
circle = Circle(
[x, y],
radius=self.vertex_size.get(vertex, self.default_vertexsize),
facecolor=self.vertex_color.get(vertex, self.default_vertexcolor),
edgecolor=(0, 0, 0),
lw=0.3,
)
circles.append(circle)
collection = PatchCollection(
circles,
match_original=True,
zorder=self.zorder_vertices,
alpha=1.0,
picker=5
)
self.plotter.axes.add_collection(collection)
self._vertexcollection = collection
def draw_edges(self,
edges: Optional[List[Tuple[int, int]]] = None,
color: Optional[Union[str, Color, List[Color], Dict[int, Color]]] = None
) -> None:
"""Draw a selection of edges.
Parameters
----------
edges : list, optional
A list of edge keys (as uv pairs) identifying which edges to draw.
The default is ``None``, in which case all edges are drawn.
color : rgb-tuple or dict of rgb-tuples, optional
The color specification for the edges.
Returns
-------
None
"""
self.clear_edges()
if edges:
self.edges = edges
if color:
self.edge_color = color
lines = []
colors = []
widths = []
for edge in self.edges:
lines.append([self.vertex_xyz[edge[0]][:2], self.vertex_xyz[edge[1]][:2]])
colors.append(self.edge_color.get(edge, self.default_edgecolor))
widths.append(self.edge_width.get(edge, self.default_edgewidth))
collection = LineCollection(
lines,
linewidths=widths,
colors=colors,
linestyle='solid',
alpha=1.0,
zorder=self.zorder_edges
)
self.plotter.axes.add_collection(collection)
self._edgecollection = collection
def draw_halfedges(self,
halfedges: Optional[List[Tuple[int, int]]] = None,
color: Union[str, Color, List[Color], Dict[int, Color]] = (0.7, 0.7, 0.7),
distance: float = 0.05,
width: float = 0.01,
shrink: float = 0.8,
) -> None:
"""Draw a selection of halfedges.
Parameters
----------
edges : list, optional
A list of halfedges to draw.
The default is ``None``, in which case all halfedges are drawn.
color : rgb-tuple or dict of rgb-tuples, optional
The color specification for the halfedges.
Returns
-------
None
"""
self.clear_halfedges()
self._halfedgecollection = []
if color:
self.halfedge_color = color
if halfedges:
self.halfedges = halfedges
for u, v in self.halfedges:
face = self.mesh.halfedge_face(u, v)
if face is None:
normal = self.mesh.face_normal(self.mesh.halfedge_face(v, u))
else:
normal = self.mesh.face_normal(face)
a, b = self.mesh.edge_coordinates(u, v)
line = Line(* offset_line((a, b), distance, normal))
frame = Frame(line.midpoint, [1, 0, 0], [0, 1, 0])
scale = Scale.from_factors([shrink, shrink, shrink], frame=frame)
line.transform(scale)
artist = self.plotter.axes.arrow(
line.start[0], line.start[1],
line.vector[0], line.vector[1],
width=width,
head_width=10 * width,
head_length=10 * width,
length_includes_head=True,
shape='right',
color=self.halfedge_color.get((u, v), self.default_halfedgecolor),
zorder=10000
)
self._halfedgecollection.append(artist)
def draw_faces(self,
faces: Optional[List[int]] = None,
color: Optional[Union[str, Color, List[Color], Dict[int, Color]]] = None
) -> None:
"""Draw a selection of faces.
Parameters
----------
faces : list, optional
A list of face identifiers.
The default is ``None``, in which case all faces are drawn.
color : rgb-tuple or dict of rgb-tuples, optional
The color specification for the faces.
Returns
-------
None
"""
self.clear_faces()
if faces:
self.faces = faces
if color:
self.face_color = color
polygons = []
facecolors = []
edgecolors = []
linewidths = []
for face in self.faces:
data = [self.vertex_xyz[vertex][:2] for vertex in self.mesh.face_vertices(face)]
polygons.append(PolygonPatch(data))
facecolors.append(self.face_color.get(face, self.default_facecolor))
edgecolors.append((0, 0, 0))
linewidths.append(0.1)
collection = PatchCollection(
polygons,
facecolors=facecolors,
edgecolors=edgecolors,
lw=linewidths,
alpha=1.0,
linestyle='solid',
zorder=self.zorder_faces
)
self.plotter.axes.add_collection(collection)
self._facecollection = collection
def draw_vertexlabels(self, text: Optional[Dict[int, str]] = None) -> None:
"""Draw a selection of vertex labels.
Parameters
----------
text : dict of int to str, optional
A vertex-label map.
If not text dict is provided, the vertex identifiers are drawn.
Returns
-------
None
"""
if self._vertexlabelcollection:
for artist in self._vertexlabelcollection:
artist.remove()
if text:
self.vertex_text = text
labels = []
for vertex in self.vertices:
bgcolor = self.vertex_color.get(vertex, self.default_vertexcolor)
color = (0, 0, 0) if is_color_light(bgcolor) else (1, 1, 1)
text = self.vertex_text.get(vertex, None)
if text is None:
continue
x, y = self.vertex_xyz[vertex][:2]
artist = self.plotter.axes.text(
x, y,
f'{text}',
fontsize=12,
family='monospace',
ha='center', va='center',
zorder=10000,
color=color
)
labels.append(artist)
self._vertexlabelcollection = labels
def draw_edgelabels(self, text: Optional[Dict[int, str]] = None) -> None:
"""Draw a selection of edge labels.
Parameters
----------
text : dict of tuple of int to str
An edge-label map.
Returns
-------
None
"""
if self._edgelabelcollection:
for artist in self._edgelabelcollection:
artist.remove()
if text:
self.edge_text = text
labels = []
for edge in self.edges:
text = self.edge_text.get(edge, None)
if text is None:
continue
x0, y0 = self.vertex_xyz[edge[0]][:2]
x1, y1 = self.vertex_xyz[edge[1]][:2]
x = 0.5 * (x0 + x1)
y = 0.5 * (y0 + y1)
artist = self.plotter.axes.text(
x, y, f'{text}',
fontsize=12,
family='monospace',
ha='center', va='center',
zorder=10000,
color=(0, 0, 0)
)
labels.append(artist)
self._edgelabelcollection = labels
def draw_facelabels(self, text: Optional[Dict[int, str]] = None) -> None:
"""Draw a selection of face labels.
Parameters
----------
text : dict of int to str
A face-label map.
Returns
-------
None
"""
if self._facelabelcollection:
for artist in self._facelabelcollection:
artist.remove()
if text:
self.face_text = text
labels = []
for face in self.faces:
text = self.face_text.get(face, None)
if text is None:
continue
x, y, _ = centroid_points_xy([self.vertex_xyz[vertex] for vertex in self.mesh.face_vertices(face)])
artist = self.plotter.axes.text(
x, y, f'{text}',
fontsize=12,
family='monospace',
ha='center', va='center',
zorder=10000,
color=(0, 0, 0),
bbox=dict(boxstyle='circle, pad=0.7', facecolor=(1, 1, 1), edgecolor=(0.5, 0.5, 0.5), linestyle=':')
)
labels.append(artist)
self._facelabelcollection = labels
def redraw(self) -> None:
pass
def update_vertexcolors(self, colors):
facecolors = []
for vertex in self.vertices:
if vertex in colors:
color = colors[vertex]
else:
color = self.vertex_color.get(vertex, self.default_vertexcolor)
facecolors.append(color)
self._vertexcollection.set_facecolors(facecolors)
def update_edgecolors(self, colors):
edgecolors = []
for edge in self.edges:
if edge in colors:
color = colors[edge]
else:
color = self.edge_color.get(edge, self.default_edgecolor)
edgecolors.append(color)
self._edgecollection.set_colors(edgecolors)
def update_edgewidths(self, widths):
edgewidths = []
for edge in self.edges:
if edge in widths:
w = widths[edge]
else:
w = self.edge_width.get(edge, self.default_edgewidth)
edgewidths.append(w)
self._edgecollection.set_linewidths(edgewidths)
| 33.491554
| 130
| 0.557523
|
681f5c1e4f3347c249b71a047af49e97bbc1003f
| 809
|
py
|
Python
|
iol/__init__.py
|
abc123me/nasa_dsn
|
c15cd20097fbf8d1d473e5cd9a1db3840518c69c
|
[
"MIT"
] | null | null | null |
iol/__init__.py
|
abc123me/nasa_dsn
|
c15cd20097fbf8d1d473e5cd9a1db3840518c69c
|
[
"MIT"
] | null | null | null |
iol/__init__.py
|
abc123me/nasa_dsn
|
c15cd20097fbf8d1d473e5cd9a1db3840518c69c
|
[
"MIT"
] | null | null | null |
#Here to make python recognize this as a package
from iol import baseball
from iol import pygpio
from iol import adlib
from cli import colors
__old_gpio = pygpio.GPIOPin
__old_bb = baseball.BaseballSwitch
__old_adc = adlib.ADC
__emulated = False
def SET_EMULATED(mode):
if(mode):
print(u"\u001b[31mSET IOLIB TO EMULATED MODE, NO REAL IO WILL BE MODIFIED\u001b[0m")
pygpio.GPIOPin = pygpio.EmulatedGPIOPin
baseball.BaseballSwitch = baseball.EmulatedBaseballSwitch
adlib.ADC = adlib.EmulatedADC
__emulated = True
else:
print(u"\u001b[31mSET IOLIB TO NON-EMULATED MODE, REAL IO WILL BE MODIFIED\u001b[0m")
pygpio.GPIOPin = __old_gpio
baseball.BaseballSwitch = __old_bb
adlib.ADC = __old_adc
__emulated = False
def MAKE_EMULATED():
SET_EMULATED(True)
def IS_EMULATED():
return __emulated
| 29.962963
| 87
| 0.781211
|
004803763de5028fe3e48470199a2fac5234160b
| 13,661
|
py
|
Python
|
nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/network/arp.py
|
culbertm/NSttyPython
|
ff9f6aedae3fb8495342cd0fc4247c819cf47397
|
[
"Apache-2.0"
] | null | null | null |
nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/network/arp.py
|
culbertm/NSttyPython
|
ff9f6aedae3fb8495342cd0fc4247c819cf47397
|
[
"Apache-2.0"
] | null | null | null |
nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/network/arp.py
|
culbertm/NSttyPython
|
ff9f6aedae3fb8495342cd0fc4247c819cf47397
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class arp(base_resource) :
""" Configuration for arp resource. """
def __init__(self) :
self._ipaddress = None
self._td = None
self._mac = None
self._ifnum = None
self._vxlan = None
self._vtep = None
self._vlan = None
self._ownernode = None
self._all = None
self._nodeid = None
self._timeout = None
self._state = None
self._flags = None
self._type = None
self._channel = None
self.___count = None
@property
def ipaddress(self) :
r"""IP address of the network device that you want to add to the ARP table.<br/>Minimum length = 1.
"""
try :
return self._ipaddress
except Exception as e:
raise e
@ipaddress.setter
def ipaddress(self, ipaddress) :
r"""IP address of the network device that you want to add to the ARP table.<br/>Minimum length = 1
"""
try :
self._ipaddress = ipaddress
except Exception as e:
raise e
@property
def td(self) :
r"""Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Maximum length = 4094.
"""
try :
return self._td
except Exception as e:
raise e
@td.setter
def td(self, td) :
r"""Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Maximum length = 4094
"""
try :
self._td = td
except Exception as e:
raise e
@property
def mac(self) :
r"""MAC address of the network device.
"""
try :
return self._mac
except Exception as e:
raise e
@mac.setter
def mac(self, mac) :
r"""MAC address of the network device.
"""
try :
self._mac = mac
except Exception as e:
raise e
@property
def ifnum(self) :
r"""Interface through which the network device is accessible. Specify the interface in (slot/port) notation. For example, 1/3.
"""
try :
return self._ifnum
except Exception as e:
raise e
@ifnum.setter
def ifnum(self, ifnum) :
r"""Interface through which the network device is accessible. Specify the interface in (slot/port) notation. For example, 1/3.
"""
try :
self._ifnum = ifnum
except Exception as e:
raise e
@property
def vxlan(self) :
r"""ID of the VXLAN on which the IP address of this ARP entry is reachable.<br/>Minimum length = 1<br/>Maximum length = 16777215.
"""
try :
return self._vxlan
except Exception as e:
raise e
@vxlan.setter
def vxlan(self, vxlan) :
r"""ID of the VXLAN on which the IP address of this ARP entry is reachable.<br/>Minimum length = 1<br/>Maximum length = 16777215
"""
try :
self._vxlan = vxlan
except Exception as e:
raise e
@property
def vtep(self) :
r"""IP address of the VXLAN tunnel endpoint (VTEP) through which the IP address of this ARP entry is reachable.<br/>Minimum length = 1.
"""
try :
return self._vtep
except Exception as e:
raise e
@vtep.setter
def vtep(self, vtep) :
r"""IP address of the VXLAN tunnel endpoint (VTEP) through which the IP address of this ARP entry is reachable.<br/>Minimum length = 1
"""
try :
self._vtep = vtep
except Exception as e:
raise e
@property
def vlan(self) :
r"""The VLAN ID through which packets are to be sent after matching the ARP entry. This is a numeric value.
"""
try :
return self._vlan
except Exception as e:
raise e
@vlan.setter
def vlan(self, vlan) :
r"""The VLAN ID through which packets are to be sent after matching the ARP entry. This is a numeric value.
"""
try :
self._vlan = vlan
except Exception as e:
raise e
@property
def ownernode(self) :
r"""The owner node for the Arp entry.<br/>Maximum length = 31.
"""
try :
return self._ownernode
except Exception as e:
raise e
@ownernode.setter
def ownernode(self, ownernode) :
r"""The owner node for the Arp entry.<br/>Maximum length = 31
"""
try :
self._ownernode = ownernode
except Exception as e:
raise e
@property
def all(self) :
r"""Remove all ARP entries from the ARP table of the NetScaler appliance.
"""
try :
return self._all
except Exception as e:
raise e
@all.setter
def all(self, all) :
r"""Remove all ARP entries from the ARP table of the NetScaler appliance.
"""
try :
self._all = all
except Exception as e:
raise e
@property
def nodeid(self) :
r"""Unique number that identifies the cluster node.<br/>Maximum length = 31.
"""
try :
return self._nodeid
except Exception as e:
raise e
@nodeid.setter
def nodeid(self, nodeid) :
r"""Unique number that identifies the cluster node.<br/>Maximum length = 31
"""
try :
self._nodeid = nodeid
except Exception as e:
raise e
@property
def timeout(self) :
r"""The time, in seconds, after which the entry times out.
"""
try :
return self._timeout
except Exception as e:
raise e
@property
def state(self) :
r"""The state of the ARP entry.
"""
try :
return self._state
except Exception as e:
raise e
@property
def flags(self) :
r"""The flags for the entry.
"""
try :
return self._flags
except Exception as e:
raise e
@property
def type(self) :
r"""Indicates whether this ARP entry was added manually or dynamically. When you manually add an ARP entry, the value for this parameter is STATIC. Otherwise, it is DYNAMIC. For the NSIP and loopback IP addresses, the value is PERMANENT.<br/>Possible values = STATIC, PERMANENT, DYNAMIC.
"""
try :
return self._type
except Exception as e:
raise e
@property
def channel(self) :
r"""The tunnel, channel, or physical interface through which the ARP entry is identified.
"""
try :
return self._channel
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(arp_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.arp
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.ipaddress is not None :
return str(self.ipaddress)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
r""" Use this API to add arp.
"""
try :
if type(resource) is not list :
addresource = arp()
addresource.ipaddress = resource.ipaddress
addresource.td = resource.td
addresource.mac = resource.mac
addresource.ifnum = resource.ifnum
addresource.vxlan = resource.vxlan
addresource.vtep = resource.vtep
addresource.vlan = resource.vlan
addresource.ownernode = resource.ownernode
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ arp() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].ipaddress = resource[i].ipaddress
addresources[i].td = resource[i].td
addresources[i].mac = resource[i].mac
addresources[i].ifnum = resource[i].ifnum
addresources[i].vxlan = resource[i].vxlan
addresources[i].vtep = resource[i].vtep
addresources[i].vlan = resource[i].vlan
addresources[i].ownernode = resource[i].ownernode
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
r""" Use this API to delete arp.
"""
try :
if type(resource) is not list :
deleteresource = arp()
if type(resource) != type(deleteresource):
deleteresource.ipaddress = resource
else :
deleteresource.ipaddress = resource.ipaddress
deleteresource.td = resource.td
deleteresource.all = resource.all
deleteresource.ownernode = resource.ownernode
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ arp() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].ipaddress = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ arp() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].ipaddress = resource[i].ipaddress
deleteresources[i].td = resource[i].td
deleteresources[i].all = resource[i].all
deleteresources[i].ownernode = resource[i].ownernode
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def send(cls, client, resource) :
r""" Use this API to send arp.
"""
try :
if type(resource) is not list :
sendresource = arp()
sendresource.ipaddress = resource.ipaddress
sendresource.td = resource.td
sendresource.all = resource.all
return sendresource.perform_operation(client,"send")
else :
if (resource and len(resource) > 0) :
sendresources = [ arp() for _ in range(len(resource))]
for i in range(len(resource)) :
sendresources[i].ipaddress = resource[i].ipaddress
sendresources[i].td = resource[i].td
sendresources[i].all = resource[i].all
result = cls.perform_operation_bulk_request(client, sendresources,"send")
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
r""" Use this API to fetch all the arp resources that are configured on netscaler.
"""
try :
if not name :
obj = arp()
response = obj.get_resources(client, option_)
else :
if type(name) == cls :
if type(name) is not list :
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(name)
response = name.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [arp() for _ in range(len(name))]
for i in range(len(name)) :
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(name[i])
response[i] = name[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_args(cls, client, args) :
r""" Use this API to fetch all the arp resources that are configured on netscaler.
# This uses arp_args which is a way to provide additional arguments while fetching the resources.
"""
try :
obj = arp()
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(args)
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
r""" Use this API to fetch filtered set of arp resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = arp()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
r""" Use this API to count the arp resources configured on NetScaler.
"""
try :
obj = arp()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
r""" Use this API to count filtered the set of arp resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = arp()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Type:
STATIC = "STATIC"
PERMANENT = "PERMANENT"
DYNAMIC = "DYNAMIC"
class arp_response(base_response) :
def __init__(self, length=1) :
self.arp = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.arp = [arp() for _ in range(length)]
| 27.822811
| 289
| 0.684796
|
ead0749f69c5d57dce2f768c2d6e612020b91718
| 4,259
|
py
|
Python
|
python/taichi/ui/canvas.py
|
kxxt/taichi
|
15f39b79c258080f1e34fcbdc29646d9ced0a4fe
|
[
"MIT"
] | 11,699
|
2020-01-09T03:02:46.000Z
|
2022-03-31T20:59:08.000Z
|
python/taichi/ui/canvas.py
|
kxxt/taichi
|
15f39b79c258080f1e34fcbdc29646d9ced0a4fe
|
[
"MIT"
] | 3,589
|
2020-01-09T03:18:25.000Z
|
2022-03-31T19:06:42.000Z
|
python/taichi/ui/canvas.py
|
kxxt/taichi
|
15f39b79c258080f1e34fcbdc29646d9ced0a4fe
|
[
"MIT"
] | 1,391
|
2020-01-09T03:02:54.000Z
|
2022-03-31T08:44:29.000Z
|
from .staging_buffer import (copy_colors_to_vbo, copy_vertices_to_vbo,
get_vbo_field, to_u8_rgba)
from .utils import get_field_info
class Canvas:
def __init__(self, canvas) -> None:
self.canvas = canvas #reference to a PyCanvas
def set_background_color(self, color):
self.canvas.set_background_color(color)
def set_image(self, img):
staging_img = to_u8_rgba(img)
info = get_field_info(staging_img)
self.canvas.set_image(info)
def triangles(self,
vertices,
color=(0.5, 0.5, 0.5),
indices=None,
per_vertex_color=None):
"""Declare a set of 2D triangles inside the scene.
Args:
vertices: a taichi 2D Vector field, where each element indicate the 3D location of a vertex.
indices: a taichi int field of shape (3 * #triangles), which indicate the vertex indices of the triangles. If this is None, then it is assumed that the vertices are already arranged in triangles order.
color: a global color for the triangles as 3 floats representing RGB values. If `per_vertex_color` is provided, this is ignored.
per_vertex_color (Tuple[float]): a taichi 3D vector field, where each element indicate the RGB color of a vertex.
"""
vbo = get_vbo_field(vertices)
copy_vertices_to_vbo(vbo, vertices)
has_per_vertex_color = per_vertex_color is not None
if has_per_vertex_color:
copy_colors_to_vbo(vbo, per_vertex_color)
vbo_info = get_field_info(vbo)
indices_info = get_field_info(indices)
self.canvas.triangles(vbo_info, indices_info, has_per_vertex_color,
color)
def lines(self,
vertices,
width,
indices=None,
color=(0.5, 0.5, 0.5),
per_vertex_color=None):
"""Declare a set of 2D lines inside the scene.
Args:
vertices: a taichi 2D Vector field, where each element indicate the 3D location of a vertex.
width (float): width of the lines, relative to the height of the screen.
indices: a taichi int field of shape (2 * #lines), which indicate the vertex indices of the lines. If this is None, then it is assumed that the vertices are already arranged in lines order.
color: a global color for the triangles as 3 floats representing RGB values. If `per_vertex_color` is provided, this is ignored.
per_vertex_color (Tuple[float]): a taichi 3D vector field, where each element indicate the RGB color of a vertex.
"""
vbo = get_vbo_field(vertices)
copy_vertices_to_vbo(vbo, vertices)
has_per_vertex_color = per_vertex_color is not None
if has_per_vertex_color:
copy_colors_to_vbo(vbo, per_vertex_color)
vbo_info = get_field_info(vbo)
indices_info = get_field_info(indices)
self.canvas.lines(vbo_info, indices_info, has_per_vertex_color, color,
width)
def circles(self,
centers,
radius,
color=(0.5, 0.5, 0.5),
per_vertex_color=None):
"""Declare a set of 2D circles inside the scene.
Args:
centers: a taichi 2D Vector field, where each element indicate the 3D location of a vertex.
radius (float): radius of the circles, relative to the height of the screen.
color: a global color for the triangles as 3 floats representing RGB values. If `per_vertex_color` is provided, this is ignored.
per_vertex_color (Tuple[float]): a taichi 3D vector field, where each element indicate the RGB color of a circle.
"""
vbo = get_vbo_field(centers)
copy_vertices_to_vbo(vbo, centers)
has_per_vertex_color = per_vertex_color is not None
if has_per_vertex_color:
copy_colors_to_vbo(vbo, per_vertex_color)
vbo_info = get_field_info(vbo)
self.canvas.circles(vbo_info, has_per_vertex_color, color, radius)
def scene(self, scene):
"""Draw a 3D scene on the canvas"""
self.canvas.scene(scene)
| 47.322222
| 213
| 0.642874
|
78ed68898eac4181d0b1443bd1742e5469cc8424
| 644
|
py
|
Python
|
dedoc/data_structures/attached_file.py
|
kirillskor/dedoc
|
7793a1be2220a26e7520521306351dfc0a9c8d98
|
[
"Apache-2.0"
] | null | null | null |
dedoc/data_structures/attached_file.py
|
kirillskor/dedoc
|
7793a1be2220a26e7520521306351dfc0a9c8d98
|
[
"Apache-2.0"
] | null | null | null |
dedoc/data_structures/attached_file.py
|
kirillskor/dedoc
|
7793a1be2220a26e7520521306351dfc0a9c8d98
|
[
"Apache-2.0"
] | null | null | null |
from dedoc.attachments_extractors.base_attached_file import BaseAttachedFile
class AttachedFile(BaseAttachedFile):
def __init__(self, original_name: str, tmp_file_path: str):
"""
Holds information about attached files.
:param original_name: Name of the file from which the attachments are extracted
:param tmp_file_path: path to the attachment file.
"""
self.original_name = original_name
self.tmp_file_path = tmp_file_path
def get_filename_in_path(self) -> str:
return self.tmp_file_path
def get_original_filename(self) -> str:
return self.original_name
| 32.2
| 87
| 0.712733
|
5279513c22401eda2c9902ce1797dd96128f874e
| 18,118
|
py
|
Python
|
openmp/mdpic2/mdpic2_py/cmdpic2.py
|
gcasabona/cuda
|
064cfa02398e2402c113d45153d7ba36ae930f7e
|
[
"W3C"
] | 51
|
2017-03-22T04:06:03.000Z
|
2022-01-18T22:48:51.000Z
|
openmp/mdpic2/mdpic2_py/cmdpic2.py
|
gcasabona/cuda
|
064cfa02398e2402c113d45153d7ba36ae930f7e
|
[
"W3C"
] | null | null | null |
openmp/mdpic2/mdpic2_py/cmdpic2.py
|
gcasabona/cuda
|
064cfa02398e2402c113d45153d7ba36ae930f7e
|
[
"W3C"
] | 25
|
2017-02-22T05:21:32.000Z
|
2022-01-02T14:53:19.000Z
|
#-----------------------------------------------------------------------
# Skeleton 2-1/2D Darwin OpenMP PIC code
# written by Viktor K. Decyk, Adam Tableman, and Qiyang Hu, UCLA
import math
import numpy
from cmdpush2 import *
from dtimer import *
from complib import *
int_type = numpy.int32
double_type = numpy.float64
float_type = numpy.float32
complex_type = numpy.complex64
# indx/indy = exponent which determines grid points in x/y direction:
# nx = 2**indx, ny = 2**indy.
indx = 9; indy = 9
# npx/npy = number of electrons distributed in x/y direction.
npx = 3072; npy = 3072
# ndim = number of velocity coordinates = 3
ndim = 3
# tend = time at end of simulation, in units of plasma frequency.
# dt = time interval between successive calculations.
# qme = charge on electron, in units of e.
tend = 10.0; dt = 0.1; qme = -1.0
# vtx/vty = thermal velocity of electrons in x/y direction
# vx0/vy0 = drift velocity of electrons in x/y direction.
vtx = 1.0; vty = 1.0; vx0 = 0.0; vy0 = 0.0
# vtx/vz0 = thermal/drift velocity of electrons in z direction
vtz = 1.0; vz0 = 0.0
# ax/ay = smoothed particle size in x/y direction
# ci = reciprocal of velocity of light.
ax = .912871; ay = .912871; ci = 0.1
# idimp = number of particle coordinates = 5
# ipbc = particle boundary condition: 1 = periodic
idimp = 5; ipbc = 1
# omx/omy/omz = magnetic field electron cyclotron frequency in x/y/z
omx = 0.4; omy = 0.0; omz = 0.0
# ndc = number of corrections in darwin iteration
ndc = 1
# wke/we/wt = particle kinetic/electric field/total energy
# wke/we = particle kinetic/electrostatic field energy
# wf/wm/wt = magnetic field/transverse electric field/total energy
wke = numpy.zeros((1),float_type)
we = numpy.zeros((1),float_type)
wf = numpy.zeros((1),float_type)
wm = numpy.zeros((1),float_type)
wt = numpy.zeros((1),float_type)
zero = 0.0
# mx/my = number of grids in x/y in sorting tiles
mx = 16; my = 16
# xtras = fraction of extra particles needed for particle management
xtras = 0.2
# declare scalars for standard code
wpmax = numpy.empty((1),float_type)
wpmin = numpy.empty((1),float_type)
# declare scalars for OpenMP code
nppmx = numpy.empty((1),int_type)
irc = numpy.zeros((1),int_type)
# declare and initialize timing data
itime = numpy.empty((4),numpy.int32)
tdpost = 0.0; tguard = 0.0; tfft = 0.0; tfield = 0.0
tdjpost = 0.0; tdcjpost = 0.0; tpush = 0.0; tsort = 0.0
dtime = numpy.empty((1),double_type)
# nvp = number of shared memory nodes (0=default)
nvp = 0
#nvp = int(input("enter number of nodes: "))
# initialize for shared memory parallel processing
cinit_omp(nvp)
# initialize scalars for standard code
# np = total number of particles in simulation
# nx/ny = number of grid points in x/y direction
np = npx*npy; nx = int(math.pow(2,indx)); ny = int(math.pow(2,indy))
nxh = int(nx/2); nyh = max(1,int(ny/2))
nxe = nx + 2; nye = ny + 1; nxeh = int(nxe/2)
nxyh = int(max(nx,ny)/2); nxhy = max(nxh,ny)
# mx1/my1 = number of tiles in x/y direction
mx1 = int((nx - 1)/mx + 1); my1 = int((ny - 1)/my + 1); mxy1 = mx1*my1
# nloop = number of time steps in simulation
# ntime = current time step
nloop = int(tend/dt + .0001); ntime = 0
# mdim = dimension of amu array
mdim = 2*ndim - 2
qbme = qme
affp = float(nx*ny)/float(np)
# allocate data for standard code
# part = particle array
part = numpy.empty((idimp,np),float_type,'F')
# qe = electron charge density with guard cells
qe = numpy.empty((nxe,nye),float_type,'F')
# cue = electron current density with guard cells
cue = numpy.empty((ndim,nxe,nye),float_type,'F')
# dcu = acceleration density with guard cells
dcu = numpy.empty((ndim,nxe,nye),float_type,'F')
# cus = smoothed transverse electric field with guard cells
cus = numpy.empty((ndim,nxe,nye),float_type,'F')
# amu = momentum flux with guard cells
amu = numpy.empty((mdim,nxe,nye),float_type,'F')
# exyze = smoothed total electric field with guard cells
exyze = numpy.empty((ndim,nxe,nye),float_type,'F')
# fxyze = smoothed longitudinal electric field with guard cells
fxyze = numpy.empty((ndim,nxe,nye),float_type,'F')
# bxyze = smoothed magnetic field with guard cells
bxyze = numpy.empty((ndim,nxe,nye),float_type,'F')
# ffc, ffe = form factor arrays for poisson solvers
ffc = numpy.empty((nxh,nyh),complex_type,'F')
ffe = numpy.empty((nxh,nyh),complex_type,'F')
# mixup = bit reverse table for FFT
mixup = numpy.empty((nxhy),int_type,'F')
# sct = sine/cosine table for FFT
sct = numpy.empty((nxyh),complex_type,'F')
# kpic = number of particles in each tile
kpic = numpy.empty((mxy1),int_type,'F')
# ss = scratch array for WFFT2RN
ss = numpy.empty((mdim*nxeh,nye),complex_type,'F')
# prepare fft tables
cwfft2rinit(mixup,sct,indx,indy,nxhy,nxyh)
# calculate form factors: ffc
isign = 0
cmpois23(qe,fxyze,isign,ffc,ax,ay,affp,we,nx,ny,nxeh,nye,nxh,nyh)
# initialize electrons
cdistr2h(part,vtx,vty,vtz,vx0,vy0,vz0,npx,npy,idimp,np,nx,ny,ipbc)
# find number of particles in each of mx, my tiles: updates kpic, nppmx
cdblkp2l(part,kpic,nppmx,idimp,np,mx,my,mx1,mxy1,irc)
if (irc[0] != 0):
print "cdblkp2l error, irc=", irc[0]
exit(0)
# allocate vector particle data
nppmx0 = int((1.0 + xtras)*nppmx)
ntmax = int(xtras*nppmx)
npbmx = int(xtras*nppmx)
# ppart = tiled particle array
ppart = numpy.empty((idimp,nppmx0,mxy1),float_type,'F')
# ppbuff = buffer array for reordering tiled particle array
ppbuff = numpy.empty((idimp,npbmx,mxy1),float_type,'F')
# ncl = number of particles departing tile in each direction
ncl = numpy.empty((8,mxy1),int_type,'F')
# ihole = location/destination of each particle departing tile
ihole = numpy.empty((2,ntmax+1,mxy1),int_type,'F')
# copy ordered particle data for OpenMP: updates ppart and kpic
cppmovin2l(part,ppart,kpic,nppmx0,idimp,np,mx,my,mx1,mxy1,irc)
if (irc[0] != 0):
print "cppmovin2l overflow error, irc=", irc[0]
exit(0)
# sanity check
cppcheck2l(ppart,kpic,idimp,nppmx0,nx,ny,mx,my,mx1,my1,irc)
if (irc[0] != 0):
print "cppcheck2l error, irc=", irc[0]
exit(0)
# find maximum and minimum initial electron density
qe.fill(0.0)
cgppost2l(ppart,qe,kpic,qme,nppmx0,idimp,mx,my,nxe,nye,mx1,mxy1)
caguard2l(qe,nx,ny,nxe,nye)
cfwpminmx2(qe,qbme,wpmax,wpmin,nx,ny,nxe,nye)
wpm = 0.5*(wpmax[0] + wpmin[0])*affp
# accelerate convergence: update wpm
if (wpm <= 10.0):
wpm = 0.75*wpm
print "wpm=",wpm
q2m0 = wpm/affp
# calculate form factor: ffe
isign = 0
cmepois23(dcu,cus,isign,ffe,ax,ay,affp,wpm,ci,wf,nx,ny,nxeh,nye,
nxh,nyh)
# initialize transverse electric field
cus.fill(0.0)
# * * * start main iteration loop * * *
for ntime in xrange(0,nloop):
# print "ntime = ", ntime
# deposit current with OpenMP: updates cue
dtimer(dtime,itime,-1)
cue.fill(0.0)
cgjppost2l(ppart,cue,kpic,qme,zero,nppmx0,idimp,nx,ny,mx,my,nxe,nye,
mx1,mxy1,ipbc)
dtimer(dtime,itime,1)
time = float(dtime)
tdjpost = tdjpost + time
# deposit charge with OpenMP: updates qe
dtimer(dtime,itime,-1)
qe.fill(0.0)
cgppost2l(ppart,qe,kpic,qme,nppmx0,idimp,mx,my,nxe,nye,mx1,mxy1)
dtimer(dtime,itime,1)
time = float(dtime)
tdpost = tdpost + time
# add guard cells with OpenMP: updates qe, cue
dtimer(dtime,itime,-1)
caguard2l(qe,nx,ny,nxe,nye)
cacguard2l(cue,nx,ny,nxe,nye)
dtimer(dtime,itime,1)
time = float(dtime)
tguard = tguard + time
# transform charge to fourier space with OpenMP: updates qe
dtimer(dtime,itime,-1)
isign = -1
cwfft2rmx(qe,isign,mixup,sct,indx,indy,nxeh,nye,nxhy,nxyh)
dtimer(dtime,itime,1)
time = float(dtime)
tfft = tfft + time
# calculate longitudinal force/charge in fourier space with OpenMP:
# updates fxyze, we
dtimer(dtime,itime,-1)
isign = -1
cmpois23(qe,fxyze,isign,ffc,ax,ay,affp,we,nx,ny,nxeh,nye,nxh,nyh)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
# transform longitudinal electric force to real space with OpenMP:
# updates fxyze
dtimer(dtime,itime,-1)
isign = 1
cwfft2rm3(fxyze,isign,mixup,sct,indx,indy,nxeh,nye,nxhy,nxyh)
dtimer(dtime,itime,1)
time = float(dtime)
tfft = tfft + time
# transform current to fourier space with OpenMP: update cue
dtimer(dtime,itime,-1)
isign = -1
cwfft2rm3(cue,isign,mixup,sct,indx,indy,nxeh,nye,nxhy,nxyh)
dtimer(dtime,itime,1)
time = float(dtime)
tfft = tfft + time
# take transverse part of current with OpenMP: updates cue
dtimer(dtime,itime,-1)
cmcuperp2(cue,nx,ny,nxeh,nye)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
# calculate magnetic field in fourier space with OpenMP:
# updates bxyze, wm
dtimer(dtime,itime,-1)
cmbbpois23(cue,bxyze,ffc,ci,wm,nx,ny,nxeh,nye,nxh,nyh)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
# transform magnetic force to real space with OpenMP: updates bxyze
dtimer(dtime,itime,-1)
isign = 1
cwfft2rm3(bxyze,isign,mixup,sct,indx,indy,nxeh,nye,nxhy,nxyh)
dtimer(dtime,itime,1)
time = float(dtime)
tfft = tfft + time
# add constant to magnetic field with OpenMP: updates bxyze
dtimer(dtime,itime,-1)
cbaddext2(bxyze,omx,omy,omz,nx,ny,nxe,nye)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
# copy guard cells with OpenMP: updates fxyze, bxyze
dtimer(dtime,itime,-1)
cbguard2l(fxyze,nx,ny,nxe,nye)
cbguard2l(bxyze,nx,ny,nxe,nye)
dtimer(dtime,itime,1)
time = float(dtime)
tguard = tguard + time
# add longitudinal and old transverse electric fields with OpenMP:
# updates exyze
dtimer(dtime,itime,-1)
caddvrfield2(exyze,cus,fxyze,ndim,nxe,nye)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
# deposit electron acceleration density and momentum flux with OpenMP:
# updates dcu, amu
dtimer(dtime,itime,-1)
dcu.fill(0.0); amu.fill(0.0)
cgdjppost2l(ppart,exyze,bxyze,dcu,amu,kpic,qme,qbme,dt,idimp,nppmx0,
nx,ny,mx,my,nxe,nye,mx1,mxy1)
# add old scaled electric field with OpenMP: updates dcu
cascfguard2l(dcu,cus,q2m0,nx,ny,nxe,nye)
dtimer(dtime,itime,1)
time = float(dtime)
tdcjpost = tdcjpost + time
# add guard cells with OpenMP: updates dcu, amu
dtimer(dtime,itime,-1)
cacguard2l(dcu,nx,ny,nxe,nye)
camcguard2l(amu,nx,ny,nxe,nye,mdim)
dtimer(dtime,itime,1)
time = float(dtime)
tguard = tguard + time
# transform acceleration density and momentum flux to fourier space
# with OpenMP: updates dcu, amu
dtimer(dtime,itime,-1)
isign = -1
cwfft2rm3(dcu,isign,mixup,sct,indx,indy,nxeh,nye,nxhy,nxyh)
cwfft2rmn(amu,ss,isign,mixup,sct,indx,indy,nxeh,nye,mdim,nxhy,nxyh)
dtimer(dtime,itime,1)
time = float(dtime)
tfft = tfft + time
# take transverse part of time derivative of current with OpenMP:
# updates dcu
dtimer(dtime,itime,-1)
cmadcuperp23(dcu,amu,nx,ny,nxeh,nye)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
# calculate transverse electric field with OpenMP: updates cus, wf
dtimer(dtime,itime,-1)
isign = -1
cmepois23(dcu,cus,isign,ffe,ax,ay,affp,wpm,ci,wf,nx,ny,nxeh,nye,nxh,
nyh)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
# transform transverse electric field to real space with OpenMP:
# updates cus
dtimer(dtime,itime,-1)
isign = 1
cwfft2rm3(cus,isign,mixup,sct,indx,indy,nxeh,nye,nxhy,nxyh)
dtimer(dtime,itime,1)
time = float(dtime)
tfft = tfft + time
# copy guard cells with OpenMP: updates cus
dtimer(dtime,itime,-1)
cbguard2l(cus,nx,ny,nxe,nye)
dtimer(dtime,itime,1)
time = float(dtime)
tguard = tguard + time
# add longitudinal and transverse electric fields with OpenMP:
# exyze = cus + fxyze, updates exyze
# cus needs to be retained for next time step
dtimer(dtime,itime,-1)
caddvrfield2(exyze,cus,fxyze,ndim,nxe,nye)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
# inner iteration loop
for k in xrange(0,ndc):
# deposit electron current and acceleration density and momentum flux
# with OpenMP: updates cue, dcu, amu
dtimer(dtime,itime,-1)
cue.fill(0.0); dcu.fill(0.0); amu.fill(0.0)
cgdcjppost2l(ppart,exyze,bxyze,cue,dcu,amu,kpic,qme,qbme,dt,idimp,
nppmx0,nx,ny,mx,my,nxe,nye,mx1,mxy1)
# add scaled electric field with OpenMP: updates dcu
cascfguard2l(dcu,cus,q2m0,nx,ny,nxe,nye)
dtimer(dtime,itime,1)
time = float(dtime)
tdcjpost = tdcjpost + time
# add guard cells for current, acceleration density, and momentum flux
# with OpenMP: updates cue, dcu, amu
dtimer(dtime,itime,-1)
cacguard2l(cue,nx,ny,nxe,nye)
cacguard2l(dcu,nx,ny,nxe,nye)
camcguard2l(amu,nx,ny,nxe,nye,mdim)
dtimer(dtime,itime,1)
time = float(dtime)
tguard = tguard + time
# transform current to fourier space with OpenMP: update cue
dtimer(dtime,itime,-1)
isign = -1
cwfft2rm3(cue,isign,mixup,sct,indx,indy,nxeh,nye,nxhy,nxyh)
dtimer(dtime,itime,1)
time = float(dtime)
tfft = tfft + time
# take transverse part of current with OpenMP: updates cue
dtimer(dtime,itime,-1)
cmcuperp2(cue,nx,ny,nxeh,nye)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
# calculate magnetic field in fourier space with OpenMP:
# updates bxyze, wm
dtimer(dtime,itime,-1)
cmbbpois23(cue,bxyze,ffc,ci,wm,nx,ny,nxeh,nye,nxh,nyh)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
# transform magnetic force to real space with OpenMP: updates bxyze
dtimer(dtime,itime,-1)
isign = 1
cwfft2rm3(bxyze,isign,mixup,sct,indx,indy,nxeh,nye,nxhy,nxyh)
dtimer(dtime,itime,1)
time = float(dtime)
tfft = tfft + time
# add constant to magnetic field with OpenMP: updates bxzye
dtimer(dtime,itime,-1)
cbaddext2(bxyze,omx,omy,omz,nx,ny,nxe,nye)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
# transform acceleration density and momentum flux to fourier space
# with OpenMP: updates dcu and amu
dtimer(dtime,itime,-1)
isign = -1
cwfft2rm3(dcu,isign,mixup,sct,indx,indy,nxeh,nye,nxhy,nxyh)
cwfft2rmn(amu,ss,isign,mixup,sct,indx,indy,nxeh,nye,mdim,nxhy,nxyh)
dtimer(dtime,itime,1)
time = float(dtime)
tfft = tfft + time
# take transverse part of time derivative of current with OpenMP:
# updates dcu
dtimer(dtime,itime,-1)
cmadcuperp23(dcu,amu,nx,ny,nxeh,nye)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
# calculate transverse electric field with OpenMP: updates cus, wf
dtimer(dtime,itime,-1)
isign = -1
cmepois23(dcu,cus,isign,ffe,ax,ay,affp,wpm,ci,wf,nx,ny,nxeh,nye,
nxh,nyh)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
# transform transverse electric field to real space with OpenMP:
# updates cus
dtimer(dtime,itime,-1)
isign = 1
cwfft2rm3(cus,isign,mixup,sct,indx,indy,nxeh,nye,nxhy,nxyh)
dtimer(dtime,itime,1)
time = float(dtime)
tfft = tfft + time
# copy guard cells with OpenMP: updates bxyze, cus
dtimer(dtime,itime,-1)
cbguard2l(bxyze,nx,ny,nxe,nye)
cbguard2l(cus,nx,ny,nxe,nye)
dtimer(dtime,itime,1)
time = float(dtime)
tguard = tguard + time
# add longitudinal and transverse electric fields with OpenMP:
# exyze = cus + fxyze, updates exyze
# cus needs to be retained for next time step
dtimer(dtime,itime,-1)
caddvrfield2(exyze,cus,fxyze,ndim,nxe,nye)
dtimer(dtime,itime,1)
time = float(dtime)
tfield = tfield + time
pass
# push particles with OpenMP:
wke[0] = 0.0
dtimer(dtime,itime,-1)
# updates ppart, wke
# cgbppush23l(ppart,exyze,bxyze,kpic,qbme,dt,dt,wke,idimp,nppmx0,nx,ny,
# mx,my,nxe,nye,mx1,mxy1,ipbc)
# updates ppart, ncl, ihole, wke, irc
cgbppushf23l(ppart,exyze,bxyze,kpic,ncl,ihole,qbme,dt,dt,wke,idimp,
nppmx0,nx,ny,mx,my,nxe,nye,mx1,mxy1,ntmax,irc)
dtimer(dtime,itime,1)
time = float(dtime)
tpush = tpush + time
if (irc[0] != 0):
print "cgbppushf23l error, irc=", irc[0]
exit(0)
# reorder particles by cell with OpenMP:
dtimer(dtime,itime,-1)
# updates ppart, ppbuff, kpic, ncl, ihole, and irc
# cpporder2l(ppart,ppbuff,kpic,ncl,ihole,idimp,nppmx0,nx,ny,mx,my,mx1,
# my1,npbmx,ntmax,irc)
# updates ppart, ppbuff, kpic, ncl, and irc
cpporderf2l(ppart,ppbuff,kpic,ncl,ihole,idimp,nppmx0,mx1,my1,npbmx,
ntmax,irc)
dtimer(dtime,itime,1)
time = float(dtime)
tsort = tsort + time
if (irc[0] != 0):
print "cpporderf2l error, ntmax, irc=", ntmax, irc[0]
exit(0)
if (ntime==0):
wt = we + wm
print "Initial Total Field, Kinetic and Total Energies:"
print "%14.7e %14.7e %14.7e" % (wt, wke, wke + wt)
print "Initial Electrostatic, Transverse Electric and Magnetic " \
"Field Energies:"
print "%14.7e %14.7e %14.7e" % (we, wf, wm)
ntime = ntime + 1
# * * * end main iteration loop * * *
print "ntime, ndc = ", ntime, ndc
wt = we + wm
print "Final Total Field, Kinetic and Total Energies:"
print "%14.7e %14.7e %14.7e" % (wt, wke, wke + wt)
print "Final Electrostatic, Transverse Electric and Magnetic Field " \
"Energies:"
print "%14.7e %14.7e %14.7e" % (we, wf, wm)
print ""
print "deposit time = ", tdpost
print "current deposit time = ", tdjpost
print "current derivative deposit time = ", tdcjpost
tdpost = tdpost + tdjpost + tdcjpost
print "total deposit time = ", tdpost
print "guard time = ", tguard
print "solver time = ", tfield
print "fft time = ", tfft
print "push time = ", tpush
print "sort time = ", tsort
tfield = tfield + tguard + tfft
print "total solver time = ", tfield
time = tdpost + tpush + tsort
print "total particle time = ", time
wt = time + tfield
print "total time = ", wt
print ""
wt = 1.0e+09/(float(nloop)*float(np))
print "Push Time (nsec) = ", tpush*wt
print "Deposit Time (nsec) = ", tdpost*wt
print "Sort Time (nsec) = ", tsort*wt
print "Total Particle Time (nsec) = ", time*wt
| 32.941818
| 73
| 0.68236
|
0457379d198dd507c8e37cc60a820a8188649ff2
| 10,147
|
py
|
Python
|
easypl/callbacks/predictors/base.py
|
data-sachez-2511/EasyPL
|
5c47f7935a2c88e36deafc7e40e101d02f89b796
|
[
"MIT"
] | null | null | null |
easypl/callbacks/predictors/base.py
|
data-sachez-2511/EasyPL
|
5c47f7935a2c88e36deafc7e40e101d02f89b796
|
[
"MIT"
] | null | null | null |
easypl/callbacks/predictors/base.py
|
data-sachez-2511/EasyPL
|
5c47f7935a2c88e36deafc7e40e101d02f89b796
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
import pytorch_lightning
from pytorch_lightning.callbacks import Callback
from typing import List, Dict, Any, Tuple
class BaseTestTimeAugmentation(Callback):
"""
Base callback for test-time-augmentation
Attributes
----------
n: int
Number of augmentations.
augmentations: List
List of augmentations, which will be used.
augmentation_method: str
Method of selecting augmentations from list. Available: ["first", "random"]
phase: str
Phase which will be used by this predictor callback.
Available: ["val", "test", "predict"].
"""
def __init__(
self,
n: int,
augmentations: List,
augmentation_method: str = 'first',
phase='val'
):
super().__init__()
self.n = n
self.augmentations = augmentations
self.augmentation_method = augmentation_method
self.phase = phase
if self.augmentation_method == 'first':
self.current_n = min(self.n, len(self.augmentations))
elif self.augmentation_method == 'random':
self.current_n = self.n
else:
self.current_n = len(self.augmentations)
self.data_keys = None
self.collate_fns = []
self.metrics = []
def post_init(
self,
trainer: pytorch_lightning.Trainer,
pl_module: pytorch_lightning.LightningModule
):
"""
Abstract method for initialization in first batch handling. [NOT REQUIRED]
Attributes
----------
trainer: pytorch_lightning.Trainer
Trainer of pytorch-lightning
pl_module: pytorch_lightning.LightningModule
LightningModule of pytorch-lightning
"""
pass
def on_phase_start(self, trainer, pl_module):
if self.data_keys is None:
pl_module.return_output_phase[self.phase] = True
self.data_keys = pl_module.data_keys
for dataloader_idx in range(len(trainer.__getattribute__(f'{self.phase}_dataloaders'))):
self.collate_fns.append(
trainer.__getattribute__(f'{self.phase}_dataloaders')[dataloader_idx].collate_fn)
trainer.__getattribute__(
f'{self.phase}_dataloaders'
)[dataloader_idx].collate_fn = self.__collate_fn(dataloader_idx)
if self.phase != 'predict':
self.metrics = [pl_module.metrics[self.phase][0].clone()]
self.post_init(trainer, pl_module)
def on_phase_batch_end(
self,
trainer,
pl_module,
outputs,
batch,
batch_idx,
dataloader_idx
):
def reshape_tensor(tensor):
return tensor.reshape(
self.current_n + 1, -1, *output.shape[1:]
)
output = outputs['output']
target = outputs['target']
output = self.reduce(reshape_tensor(output)) if isinstance(output, torch.Tensor) else {
key: self.reduce(reshape_tensor(output[key])) for key in output
}
target = reshape_tensor(target)[0] if isinstance(target, torch.Tensor) else {
key: reshape_tensor(target[key])[0] for key in target
}
outputs['output'] = output
outputs['target'] = target
if self.phase != 'predict':
output, target = self.metric_formatting(outputs=output, targets=target)
if len(self.metrics) <= dataloader_idx:
self.metrics.append(self.metrics[-1].clone())
self.metrics[dataloader_idx].update(output, target)
def on_phase_end(
self,
trainer,
pl_module
):
if self.phase != 'predict':
for dataloader_idx in range(len(self.metrics)):
prefix = f'{self.phase}_{dataloader_idx}' if dataloader_idx > 0 else self.phase
metrics = self.metrics[dataloader_idx].compute()
self.metrics[dataloader_idx].reset()
for metric_name in metrics:
pl_module.formated_log(
f'{prefix}_tta[n={self.n} method={self.augmentation_method}]/{metric_name}',
metrics[metric_name],
on_step=False,
on_epoch=True,
prog_bar=True
)
def metric_formatting(
self,
outputs: Any,
targets: Any
) -> Tuple:
"""
Preparing before metric pass. On default, return passed values.
Attributes
----------
outputs: Any
Output from model
targets: Any
Targets from batch
Returns
----------
Tuple
Formatted outputs and targets
"""
return outputs, targets
def reduce(
self,
tensor: torch.Tensor
) -> torch.Tensor:
"""
Abstract method for reducing of results.
Attributes
----------
tensor: torch.Tensor
Any tensor with size [batch_size X ...]
Returns
----------
torch.Tensor
Reduced tensor
"""
raise NotImplementedError
def augment(
self,
sample: Dict,
augmentation
) -> Dict:
"""
Abstract method for augmentation apply.
Attributes
----------
sample: Dict
Any sample of batch
augmentation
Transform object
Returns
----------
Dict
Augmented sample
"""
raise NotImplementedError
def preprocessing(
self,
sample: Dict,
dataloader_idx: int = 0
) -> Dict:
"""
Abstract method for preprocessing sample
Attributes
----------
sample: Dict
Any sample of batch
dataloader_idx: int
Index of dataloader
Returns
----------
Dict
Preprocessed sample
"""
return sample
def postprocessing(
self,
sample: Dict,
dataloader_idx: int = 0
) -> Dict:
"""
Abstract method for postprocessing sample
Attributes
----------
sample: Dict
Any sample of batch
dataloader_idx: int
Index of dataloader
Returns
----------
Dict
Postprocessed sample
"""
return sample
def __augmentation_generator(self):
if self.augmentation_method == 'first':
return (augmentation for augmentation in self.augmentations[:self.n])
elif self.augmentation_method == 'random':
augmentations = np.random.choice(self.augmentations, self.n)
return (augmentation for augmentation in augmentations)
else:
return (augmentation for augmentation in self.augmentations)
def __collate_fn(self, dataloader_idx):
def collate_fn_wrapper(batch):
# TODO collate_fn_wrapper multiprocessing optimization
batch_size = len(batch)
samples = [
self.preprocessing(_, dataloader_idx) for _ in batch
]
augmented_samples = []
augmentations = self.__augmentation_generator()
for augmentation in augmentations:
for sample in samples:
augmented_samples.append(self.augment(sample, augmentation))
samples = samples + augmented_samples
samples = [self.postprocessing(sample, dataloader_idx) for sample in samples]
batch = self.collate_fns[dataloader_idx](samples)
batch['batch_size'] = batch_size
return batch
return collate_fn_wrapper
def on_validation_start(
self, trainer, pl_module
):
if self.phase == 'val':
self.on_phase_start(trainer, pl_module)
def on_validation_batch_end(
self,
trainer,
pl_module,
outputs,
batch,
batch_idx,
dataloader_idx
):
if self.phase == 'val':
self.on_phase_batch_end(
trainer,
pl_module,
outputs,
batch,
batch_idx,
dataloader_idx
)
def on_test_start(
self, trainer, pl_module
):
if self.phase == 'test':
self.on_phase_start(trainer, pl_module)
def on_test_batch_end(
self,
trainer,
pl_module,
outputs,
batch,
batch_idx,
dataloader_idx
):
if self.phase == 'test':
self.on_phase_batch_end(
trainer,
pl_module,
outputs,
batch,
batch_idx,
dataloader_idx
)
def on_predict_start(
self, trainer, pl_module
):
if self.phase == 'predict':
self.on_phase_start(trainer, pl_module)
def on_predict_batch_end(
self,
trainer,
pl_module,
outputs,
batch,
batch_idx,
dataloader_idx
):
if self.phase == 'predict':
self.on_phase_batch_end(
trainer,
pl_module,
outputs,
batch,
batch_idx,
dataloader_idx
)
def on_validation_epoch_end(
self,
trainer,
pl_module
):
if self.phase == 'val':
self.on_phase_end(trainer, pl_module)
def on_test_epoch_end(
self,
trainer,
pl_module
):
if self.phase == 'test':
self.on_phase_end(trainer, pl_module)
| 27.724044
| 101
| 0.528333
|
5dee44396ef8a9829a26b7d7116f2eb4d9ec0a9f
| 4,144
|
py
|
Python
|
openshift/test/test_v1_role_binding.py
|
flaper87/openshift-restclient-python
|
13d5d86ca89035b9f596032e7a34f3cc33bf8f18
|
[
"Apache-2.0"
] | null | null | null |
openshift/test/test_v1_role_binding.py
|
flaper87/openshift-restclient-python
|
13d5d86ca89035b9f596032e7a34f3cc33bf8f18
|
[
"Apache-2.0"
] | null | null | null |
openshift/test/test_v1_role_binding.py
|
flaper87/openshift-restclient-python
|
13d5d86ca89035b9f596032e7a34f3cc33bf8f18
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
OpenShift API (with Kubernetes)
OpenShift provides builds, application lifecycle, image content management, and administrative policy on top of Kubernetes. The API allows consistent management of those objects. All API operations are authenticated via an Authorization bearer token that is provided for service accounts as a generated secret (in JWT form) or via the native OAuth endpoint located at /oauth/authorize. Core infrastructure components may use openshift.client certificates that require no authentication. All API operations return a 'resourceVersion' string that represents the version of the object in the underlying storage. The standard LIST operation performs a snapshot read of the underlying objects, returning a resourceVersion representing a consistent version of the listed objects. The WATCH operation allows all updates to a set of objects after the provided resourceVersion to be observed by a openshift.client. By listing and beginning a watch from the returned resourceVersion, openshift.clients may observe a consistent view of the state of one or more objects. Note that WATCH always returns the update after the provided resourceVersion. Watch may be extended a limited time in the past - using etcd 2 the watch window is 1000 events (which on a large cluster may only be a few tens of seconds) so openshift.clients must explicitly handle the \"watch to old error\" by re-listing. Objects are divided into two rough categories - those that have a lifecycle and must reflect the state of the cluster, and those that have no state. Objects with lifecycle typically have three main sections: * 'metadata' common to all objects * a 'spec' that represents the desired state * a 'status' that represents how much of the desired state is reflected on the cluster at the current time Objects that have no state have 'metadata' but may lack a 'spec' or 'status' section. Objects are divided into those that are namespace scoped (only exist inside of a namespace) and those that are cluster scoped (exist outside of a namespace). A namespace scoped resource will be deleted when the namespace is deleted and cannot be created if the namespace has not yet been created or is in the process of deletion. Cluster scoped resources are typically only accessible to admins - resources like nodes, persistent volumes, and cluster policy. All objects have a schema that is a combination of the 'kind' and 'apiVersion' fields. This schema is additive only for any given version - no backwards incompatible changes are allowed without incrementing the apiVersion. The server will return and accept a number of standard responses that share a common schema - for instance, the common error type is 'unversioned.Status' (described below) and will be returned on any error from the API server. The API is available in multiple serialization formats - the default is JSON (Accept: application/json and Content-Type: application/json) but openshift.clients may also use YAML (application/yaml) or the native Protobuf schema (application/vnd.kubernetes.protobuf). Note that the format of the WATCH API call is slightly different - for JSON it returns newline delimited objects while for Protobuf it returns length-delimited frames (4 bytes in network-order) that contain a 'versioned.Watch' Protobuf object. See the OpenShift documentation at https://docs.openshift.org for more information.
OpenAPI spec version: v3.6.0-alpha.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import openshift.client
from kubernetes.client.rest import ApiException
from openshift.client.models.v1_role_binding import V1RoleBinding
class TestV1RoleBinding(unittest.TestCase):
""" V1RoleBinding unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1RoleBinding(self):
"""
Test V1RoleBinding
"""
model = openshift.client.models.v1_role_binding.V1RoleBinding()
if __name__ == '__main__':
unittest.main()
| 96.372093
| 3,380
| 0.787886
|
4fff6f0fcfe802f83881662efc7964a21b2210a5
| 19,436
|
py
|
Python
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_05_10/aio/operations/_deployment_operations_operations.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | 1
|
2022-03-09T08:59:13.000Z
|
2022-03-09T08:59:13.000Z
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_05_10/aio/operations/_deployment_operations_operations.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | null | null | null |
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_05_10/aio/operations/_deployment_operations_operations.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | 1
|
2022-03-04T06:21:56.000Z
|
2022-03-04T06:21:56.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._deployment_operations_operations import build_get_at_management_group_scope_request, build_get_at_subscription_scope_request, build_get_request, build_list_at_management_group_scope_request, build_list_at_subscription_scope_request, build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DeploymentOperationsOperations:
"""DeploymentOperationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2019_05_10.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
operation_id: str,
**kwargs: Any
) -> "_models.DeploymentOperation":
"""Gets a deployments operation.
:param group_id: The management group ID.
:type group_id: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param operation_id: The ID of the operation to get.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentOperation, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_05_10.models.DeploymentOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
operation_id=operation_id,
template_url=self.get_at_management_group_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_management_group_scope.metadata = {'url': '/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations/{operationId}'} # type: ignore
@distributed_trace
def list_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentOperationsListResult"]:
"""Gets all deployments operations for a deployment.
:param group_id: The management group ID.
:type group_id: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param top: The number of results to return.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentOperationsListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_05_10.models.DeploymentOperationsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentOperationsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
top=top,
template_url=self.list_at_management_group_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
top=top,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentOperationsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_management_group_scope.metadata = {'url': '/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations'} # type: ignore
@distributed_trace_async
async def get_at_subscription_scope(
self,
deployment_name: str,
operation_id: str,
**kwargs: Any
) -> "_models.DeploymentOperation":
"""Gets a deployments operation.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param operation_id: The ID of the operation to get.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentOperation, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_05_10.models.DeploymentOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_at_subscription_scope_request(
deployment_name=deployment_name,
operation_id=operation_id,
subscription_id=self._config.subscription_id,
template_url=self.get_at_subscription_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_subscription_scope.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations/{operationId}'} # type: ignore
@distributed_trace
def list_at_subscription_scope(
self,
deployment_name: str,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentOperationsListResult"]:
"""Gets all deployments operations for a deployment.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param top: The number of results to return.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentOperationsListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_05_10.models.DeploymentOperationsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentOperationsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
top=top,
template_url=self.list_at_subscription_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
top=top,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentOperationsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_subscription_scope.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
deployment_name: str,
operation_id: str,
**kwargs: Any
) -> "_models.DeploymentOperation":
"""Gets a deployments operation.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param operation_id: The ID of the operation to get.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentOperation, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_05_10.models.DeploymentOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
operation_id=operation_id,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/operations/{operationId}'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
deployment_name: str,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentOperationsListResult"]:
"""Gets all deployments operations for a deployment.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param top: The number of results to return.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentOperationsListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_05_10.models.DeploymentOperationsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentOperationsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
top=top,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
top=top,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentOperationsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/operations'} # type: ignore
| 44.072562
| 271
| 0.662225
|
a14c144650c7e2398bd9e4b2d3afd783cca08242
| 2,459
|
py
|
Python
|
DAN-msa/pyErrorPred/dataProcessingUtils.py
|
AliMahmoudzadeh/RoseTTAFold
|
1c95e0b255097ddbfc5d44d4e9f3c0f295206e47
|
[
"MIT"
] | 1,493
|
2021-07-01T09:46:31.000Z
|
2022-03-29T06:43:47.000Z
|
DAN-msa/pyErrorPred/dataProcessingUtils.py
|
AliMahmoudzadeh/RoseTTAFold
|
1c95e0b255097ddbfc5d44d4e9f3c0f295206e47
|
[
"MIT"
] | 101
|
2021-07-05T15:07:59.000Z
|
2022-03-31T03:35:52.000Z
|
DAN-msa/pyErrorPred/dataProcessingUtils.py
|
AliMahmoudzadeh/RoseTTAFold
|
1c95e0b255097ddbfc5d44d4e9f3c0f295206e47
|
[
"MIT"
] | 343
|
2021-07-01T13:44:24.000Z
|
2022-03-29T00:21:46.000Z
|
from pyrosetta import *
import math
import numpy as np
import pandas as pd
import csv
import pkg_resources
####################
# INDEXERS/MAPPERS
####################
# Assigning numbers to 3 letter amino acids.
residues= ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLU',\
'GLY', 'HIS', 'ILE', 'LEU', 'LYS', 'MET', 'PHE',\
'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL']
residuemap = dict([(residues[i], i) for i in range(len(residues))])
# Mapping 3 letter AA to 1 letter AA (e.g. ALA to A)
oneletter = ["A", "R", "N", "D", "C", \
"Q", "E", "G", "H", "I", \
"L", "K", "M", "F", "P", \
"S", "T", "W", "Y", "V"]
aanamemap = dict([(residues[i], oneletter[i]) for i in range(len(residues))])
#################
# BLOSUM SCORES
#################
# Dictionary for Blosum score.
# Keys are 1 letter residues and it returns corresponding slice of blosum
location = pkg_resources.resource_filename(__name__, 'data/blosum62.txt')
blosum = [i.strip().split() for i in open(location).readlines()[1:-1]]
blosummap = dict([(l[0], np.array([int(i) for i in l[1:]])/10.0) for l in blosum])
####################
# ROSETTA ENERGIES
####################
energy_terms = [pyrosetta.rosetta.core.scoring.ScoreType.fa_atr,\
pyrosetta.rosetta.core.scoring.ScoreType.fa_rep,\
pyrosetta.rosetta.core.scoring.ScoreType.fa_sol,\
pyrosetta.rosetta.core.scoring.ScoreType.lk_ball_wtd,\
pyrosetta.rosetta.core.scoring.ScoreType.fa_elec,\
pyrosetta.rosetta.core.scoring.ScoreType.hbond_bb_sc,\
pyrosetta.rosetta.core.scoring.ScoreType.hbond_sc]
energy_names = ["fa_atr", "fa_rep", "fa_sol", "lk_ball_wtd", "fa_elec", "hbond_bb_sc", "hbond_sc"]
###################
# MEILER FEATIRES
###################
location = pkg_resources.resource_filename(__name__, "data/labeled_features_meiler2001.csv")
temp = pd.read_csv(location).values
meiler_features = dict([(t[0], t[1:]) for t in temp])
###################
# ATYPE CHANNELS
###################
atypes = {}
types = {}
ntypes = 0
script_dir = os.path.dirname(__file__)
location = pkg_resources.resource_filename(__name__, "data/groups20.txt")
with open(location, 'r') as f:
data = csv.reader(f, delimiter=' ')
for line in data:
if line[1] in types:
atypes[line[0]] = types[line[1]]
else:
types[line[1]] = ntypes
atypes[line[0]] = ntypes
ntypes += 1
| 36.161765
| 98
| 0.583977
|
c52804d19591cf2858f0b135efac2f7f23a259f5
| 7,834
|
py
|
Python
|
environments/runner_cb.py
|
nbro/contextual-bandit-recommender
|
0176dad94e0e791327dc2f50e38aa3ab4e327673
|
[
"MIT"
] | null | null | null |
environments/runner_cb.py
|
nbro/contextual-bandit-recommender
|
0176dad94e0e791327dc2f50e38aa3ab4e327673
|
[
"MIT"
] | null | null | null |
environments/runner_cb.py
|
nbro/contextual-bandit-recommender
|
0176dad94e0e791327dc2f50e38aa3ab4e327673
|
[
"MIT"
] | null | null | null |
"""
Runner for fully observable reward CB problems.
"""
import os
import numpy as np
import pandas as pd
from datautils.mushroom.sample_data import sample_mushroom
from datautils.preprocessing import load_data
from datautils.synthetic.sample_data import sample_synthetic
from environments.utils import create_if_not_exists
from policies.context_free_policies import (
EpsilonGreedyPolicy,
UCBPolicy,
ContextFreePolicy
)
from policies.disjoint_contextual_policy import (
LinUCBPolicy,
LinearGaussianThompsonSamplingPolicy,
)
root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
results_dir = os.path.abspath(os.path.join(root_dir, "results"))
create_if_not_exists(results_dir)
def simulate_contextual_bandit(data, n_samples, policies):
"""Simulator for for contextual bandit (CB) problems.
Runs n_samples steps.
"""
results = [None] * len(policies)
for i, policy in enumerate(policies):
# Create a dictionary for policy i where we save different statistics related to it (such as the regret).
results[i] = {}
# log contains a_t, optimal_a_t, r_t, regret_t
results[i]["log"] = np.zeros((4, n_samples))
t = 0
for x_t, actions_to_reward, optimal_a_t, _ in zip(*data):
if isinstance(policy, ContextFreePolicy):
a_t = policy.action()
else:
a_t = policy.action(x_t) # x_t is the context at time step t.
r_t = actions_to_reward[a_t] # reward for each of the actions.
if isinstance(policy, ContextFreePolicy):
policy.update(a_t, r_t)
else:
policy.update(a_t, x_t, r_t)
# Get the reward for the optimal action.
r_t_opt = actions_to_reward[optimal_a_t] # optimal_a_t optimal action at time step t.
# Compute the regret as the difference between the optimal reward and the reward for taking the action
# according to the given behaviour policy.
regret_t = r_t_opt - r_t
# Save the results for policy i.
results[i]["log"][:, t] = [a_t, optimal_a_t, r_t, regret_t]
t += 1
results[i]["policy"] = policy
# All regrets for all time steps
regrets = results[i]["log"][3, :]
# https://numpy.org/doc/stable/reference/generated/numpy.cumsum.html
# TODO: Why are we interested in the cumulative regret and why do we compute it like that?
# TODO: for example, how does this relate to equation 1 of the paper "A Contextual-Bandit Approach to
# Personalized News Article Recommendation"
results[i]["cum_regret"] = np.cumsum(regrets)
# results[i]["simple_regret"] = np.sum(regrets[-500:])
return results
# This function is called from main.py.
def run_cb(args):
"""Run fully observable reward CB problems."""
task = args.task
n_rounds = args.n_rounds
# https://archive.ics.uci.edu/ml/datasets/mushroom
if task == "mushroom":
# X.shape = (8123, 117)
X, y = load_data(name="mushroom")
# Each observation/feature vector is an array of 117 elements.
# Although the mushrooms dataset only contains 22 input features, 117 is because we convert the initial vectors
# to indicator variables.
# See https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.get_dummies.html
context_dim = 117
n_actions = 2 # 2 actions: eat and not eat.
samples = sample_mushroom(X,
y,
n_rounds,
# Defines the different types of rewards.
r_eat_good=10.0,
r_eat_bad_lucky=10.0,
r_eat_bad_unlucky=-50.0,
r_eat_bad_lucky_prob=0.7,
r_no_eat=0.0
)
# samples is a tuple
# samples[0].shape = (600, 117) => 600 contexts, each of which of 117 dimensions (i.e. the feature vector).
# samples[1].shape = (600, 2) => rewards for each of the 2 actions
# samples[2].shape = (600,) => optimal action for each of the contexts
# samples[3].shape = (600,)
# 600 is the number of rounds (args.n_rounds)
elif task == "synthetic":
n_actions = 5
context_dim = 10
sigma = 1.0 # set low covariance
samples = sample_synthetic(n_rounds, n_actions, context_dim, sigma)
else:
raise NotImplementedError("other tasks have not yet been implemented")
# define a solver
# Context-free bandit policies
egp = EpsilonGreedyPolicy(n_actions,
lr=0.001,
epsilon=0.5,
epsilon_annealing_factor=0.001)
ucbp = UCBPolicy(num_actions=n_actions,
lr=0.001)
# Contextual bandit policies
linucbp = LinUCBPolicy(num_actions=n_actions,
context_dimension=context_dim,
delta=0.001,
updating_starts_at=100,
update_frequency=5)
lgtsp = LinearGaussianThompsonSamplingPolicy(n_actions=n_actions,
context_dim=context_dim,
eta_prior=6.0,
lambda_prior=0.25,
train_starts_at=100,
posterior_update_freq=5,
lr=0.05)
policies = [egp, ucbp, linucbp, lgtsp]
policy_names = ["$\epsilon$-greedy", "UCB1", "LinUCB", "LinGaussianThompson"]
# simulate a bandit over n_rounds steps
results = simulate_contextual_bandit(samples, n_rounds, policies)
# results contains a list of dictionaries, one for each policy. Each of these dictionaries contains statistics
# associated with the results (e.g. regret for each time step) of running the corresponding policy with the given
# data.
return results, policies, policy_names
def write_results_cb(results, policies, policy_names, trial_idx, args):
"""Writes results to csv files."""
# log results
cumulative_regret_data = None
actions_data = None
for i in range(len(policies)):
# Cumulative regret (where regret is true reward - reward).
# None adds an extra dimension, this is done so that we can stack all the cumulative regrets as columns.
cr = results[i]["cum_regret"][:, None]
# print(cr.shape)
if cumulative_regret_data is None:
cumulative_regret_data = cr
else:
cumulative_regret_data = np.hstack((cumulative_regret_data, cr))
# Save the actions taken by the policy i
# 0 were the actions in the simulate_cb method above.
acts = results[i]["log"][0, :][:, None]
if actions_data is None:
actions_data = acts
else:
actions_data = np.hstack((actions_data, acts))
# select the optimal actions.
acts_opt = results[0]["log"][1, :][:, None]
# Actions taken by all policies and optimal actions.
actions_data = np.hstack((actions_data, acts_opt))
df = pd.DataFrame(cumulative_regret_data, columns=policy_names)
df.to_csv("{}/{}.cumulative_regret.{}.csv".format(results_dir, args.task, trial_idx), header=True, index=False)
df = pd.DataFrame(actions_data, columns=policy_names + ["opt_p"])
df.to_csv("{}/{}.actions.{}.csv".format(results_dir, args.task, trial_idx), header=True, index=False)
| 37.127962
| 119
| 0.600842
|
462169a65654dd9da7ef3dc55ac8ec7c765cf4d8
| 587
|
py
|
Python
|
setup.py
|
nathanhnew/quantfolio
|
957520cccc351e1e0968fd72df7a5debad068f78
|
[
"MIT"
] | null | null | null |
setup.py
|
nathanhnew/quantfolio
|
957520cccc351e1e0968fd72df7a5debad068f78
|
[
"MIT"
] | null | null | null |
setup.py
|
nathanhnew/quantfolio
|
957520cccc351e1e0968fd72df7a5debad068f78
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name="quantfolio",
version="0.1.0",
author="Nathan New",
author_email="nathanhnew@gmail.com",
description="Python Portfolio Optimization Tool",
long_description="Tool for optimizing stock portfolios based on variety of metrics",
url="https://github.com/nathanhnew/quantfolio",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
],
python_requirements=">=3.6"
)
| 32.611111
| 88
| 0.67632
|
4d632147dbdb86e8fd1b14d3e45c718eb8c5d607
| 1,942
|
py
|
Python
|
emailProxy.py
|
xulongzhe/solrTool
|
9f1956e6bb8f12ee5390e1dc9b042d8f2ae0023e
|
[
"Apache-2.0"
] | null | null | null |
emailProxy.py
|
xulongzhe/solrTool
|
9f1956e6bb8f12ee5390e1dc9b042d8f2ae0023e
|
[
"Apache-2.0"
] | null | null | null |
emailProxy.py
|
xulongzhe/solrTool
|
9f1956e6bb8f12ee5390e1dc9b042d8f2ae0023e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#coding=utf-8
import socket
import smtplib
import logging
import sys
from BaseHTTPServer import BaseHTTPRequestHandler
from StringIO import StringIO
from email.mime.text import MIMEText
from email.header import Header
reload(sys)
sys.setdefaultencoding('utf-8')
recipients=["xiajibayong@sohu.com"]
mailserver = 'smtp.sohu.com'
user = 'xiajibayong@sohu.com'
passwd = '88909090'
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s [line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
filename='emailproxy.log',
filemode='a')
class HTTPRequest(BaseHTTPRequestHandler):
def __init__(self, request_text):
self.rfile = StringIO(request_text)
self.raw_requestline = self.rfile.readline()
self.error_code = self.error_message = None
self.parse_request()
def send_error(self, code, message):
self.error_code = code
self.error_message = message
def send(content):
msg = MIMEText(content,'plain','utf-8')
msg['Subject'] = Header("异常告警", 'utf-8')
server = smtplib.SMTP(mailserver,25)
server.login(user,passwd)
server.sendmail(user, recipients, msg.as_string())
server.quit()
HOST, PORT = '21.60.100.83', 8888
listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_socket.bind((HOST, PORT))
listen_socket.listen(1)
logging.info('Serving HTTP on port %s ...' % PORT)
while True:
client_connection, client_address = listen_socket.accept()
try:
request = client_connection.recv(1024)
rs=unicode(request, "utf-8")
logging.info(rs)
send(rs)
http_response = """
HTTP/1.1 200 OK
"""
except BaseException as e:
logging.exception(e)
http_response = """
http/1.1 500 server error
"""
finally:
client_connection.sendall(http_response)
client_connection.close()
| 27.352113
| 81
| 0.701854
|
959b0da875d1776f81312535de21866f62eb95b2
| 411
|
py
|
Python
|
venues/migrations/0009_venue_image.py
|
bsassoli/milan_culture_map
|
89996b6f41c985c3b90719fdab2325f4627bcfb2
|
[
"MIT"
] | null | null | null |
venues/migrations/0009_venue_image.py
|
bsassoli/milan_culture_map
|
89996b6f41c985c3b90719fdab2325f4627bcfb2
|
[
"MIT"
] | 14
|
2021-04-08T10:52:11.000Z
|
2021-04-22T15:32:12.000Z
|
venues/migrations/0009_venue_image.py
|
bsassoli/milan_culture_map
|
89996b6f41c985c3b90719fdab2325f4627bcfb2
|
[
"MIT"
] | 1
|
2021-04-18T18:40:36.000Z
|
2021-04-18T18:40:36.000Z
|
# Generated by Django 3.1.7 on 2021-04-02 07:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('venues', '0008_auto_20210401_1257'),
]
operations = [
migrations.AddField(
model_name='venue',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='images/'),
),
]
| 21.631579
| 80
| 0.600973
|
223506b51c7fc49ee61f1e363da7d5027ad0a32b
| 927
|
py
|
Python
|
application/recommendationrules/dummypage.py
|
crossgovernmentservices/csl-my-learning-plan
|
a22b76b5ba0327e426d91dce073c0e0f887b400e
|
[
"MIT"
] | null | null | null |
application/recommendationrules/dummypage.py
|
crossgovernmentservices/csl-my-learning-plan
|
a22b76b5ba0327e426d91dce073c0e0f887b400e
|
[
"MIT"
] | null | null | null |
application/recommendationrules/dummypage.py
|
crossgovernmentservices/csl-my-learning-plan
|
a22b76b5ba0327e426d91dce073c0e0f887b400e
|
[
"MIT"
] | null | null | null |
def __constructtargetnode(item):
print(item.targetnode)
return str(min(int(item.targetnode)+1, 5))
def __matchitem(item, matchingitems):
for matchitem in matchingitems:
if (matchitem.educationalFramework == item['educationalFramework']
and __constructtargetnode(matchitem) == item['target']
and matchitem.audience == item['audience']):
return True
return False
def run(matchingitems, candidate_data_generator):
""" Rule basically just looks for the next incremental item in the targetUrl """
matcheditems = { f.educationalFramework : [] for f in matchingitems }
print(matcheditems)
for item in candidate_data_generator:
if __matchitem(item, matchingitems):
matcheditems[item['educationalFramework']].append(item)
return [{'educationalFramework': f, 'recommendations': matcheditems[f]} for f in matcheditems.keys()]
| 35.653846
| 105
| 0.696872
|
f68c935e6de85bfbae41be50c9f51b2c6851aeb4
| 580
|
py
|
Python
|
sphinx_gallery/load_style.py
|
kosik/Sphinx
|
379726d9af855302137ff14dbc52ebf76b64a1cc
|
[
"BSD-3-Clause"
] | 1
|
2021-04-13T11:46:28.000Z
|
2021-04-13T11:46:28.000Z
|
sphinx_gallery/load_style.py
|
kosik/Sphinx
|
379726d9af855302137ff14dbc52ebf76b64a1cc
|
[
"BSD-3-Clause"
] | 1
|
2019-01-31T15:37:51.000Z
|
2019-10-29T09:38:22.000Z
|
sphinx_gallery/load_style.py
|
kosik/Sphinx
|
379726d9af855302137ff14dbc52ebf76b64a1cc
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Only load CSS and modify html_static_path
=========================================
This should not be used at the same time as sphinx_gallery.gen_gallery.
"""
from . import __version__, glr_path_static
def config_inited(app, config):
path = glr_path_static()
if path not in config.html_static_path:
config.html_static_path.append(path)
app.add_css_file('gallery.css')
def setup(app):
app.require_sphinx('1.8')
app.connect('config-inited', config_inited)
return {
'parallel_read_safe': True,
'version': __version__,
}
| 23.2
| 71
| 0.648276
|
7d64eeab3739c49339c9664f8a68005113736c38
| 28,678
|
py
|
Python
|
sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2020_04_01_preview/operations/_managed_hsms_operations.py
|
pjachowi/azure-sdk-for-python
|
372bf6b6b9314d688eca5b5a56df0264c78d6618
|
[
"MIT"
] | null | null | null |
sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2020_04_01_preview/operations/_managed_hsms_operations.py
|
pjachowi/azure-sdk-for-python
|
372bf6b6b9314d688eca5b5a56df0264c78d6618
|
[
"MIT"
] | null | null | null |
sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2020_04_01_preview/operations/_managed_hsms_operations.py
|
pjachowi/azure-sdk-for-python
|
372bf6b6b9314d688eca5b5a56df0264c78d6618
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ManagedHsmsOperations(object):
"""ManagedHsmsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.keyvault.v2020_04_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
name, # type: str
parameters, # type: "models.ManagedHsm"
**kwargs # type: Any
):
# type: (...) -> "models.ManagedHsm"
cls = kwargs.pop('cls', None) # type: ClsType["models.ManagedHsm"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01-preview"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str', pattern=r'^[a-zA-Z0-9]{3,24}$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ManagedHsm')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ManagedHsmError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ManagedHsm', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ManagedHsm', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/managedHSMs/{name}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
name, # type: str
parameters, # type: "models.ManagedHsm"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ManagedHsm"]
"""Create or update a managed HSM Pool in the specified subscription.
:param resource_group_name: Name of the resource group that contains the managed HSM pool.
:type resource_group_name: str
:param name: Name of the managed HSM Pool.
:type name: str
:param parameters: Parameters to create or update the managed HSM Pool.
:type parameters: ~azure.mgmt.keyvault.v2020_04_01_preview.models.ManagedHsm
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ManagedHsm or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.keyvault.v2020_04_01_preview.models.ManagedHsm]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ManagedHsm"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
name=name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ManagedHsm', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/managedHSMs/{name}'} # type: ignore
def _update_initial(
self,
resource_group_name, # type: str
name, # type: str
parameters, # type: "models.ManagedHsm"
**kwargs # type: Any
):
# type: (...) -> "models.ManagedHsm"
cls = kwargs.pop('cls', None) # type: ClsType["models.ManagedHsm"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01-preview"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str', pattern=r'^[a-zA-Z0-9]{3,24}$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ManagedHsm')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ManagedHsmError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ManagedHsm', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ManagedHsm', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/managedHSMs/{name}'} # type: ignore
def begin_update(
self,
resource_group_name, # type: str
name, # type: str
parameters, # type: "models.ManagedHsm"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ManagedHsm"]
"""Update a managed HSM Pool in the specified subscription.
:param resource_group_name: Name of the resource group that contains the managed HSM pool.
:type resource_group_name: str
:param name: Name of the managed HSM Pool.
:type name: str
:param parameters: Parameters to patch the managed HSM Pool.
:type parameters: ~azure.mgmt.keyvault.v2020_04_01_preview.models.ManagedHsm
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ManagedHsm or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.keyvault.v2020_04_01_preview.models.ManagedHsm]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ManagedHsm"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
name=name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ManagedHsm', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/managedHSMs/{name}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01-preview"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ManagedHsmError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/managedHSMs/{name}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified managed HSM Pool.
:param resource_group_name: Name of the resource group that contains the managed HSM pool.
:type resource_group_name: str
:param name: The name of the managed HSM Pool to delete.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
name=name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/managedHSMs/{name}'} # type: ignore
def get(
self,
resource_group_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ManagedHsm"
"""Gets the specified managed HSM Pool.
:param resource_group_name: Name of the resource group that contains the managed HSM pool.
:type resource_group_name: str
:param name: The name of the managed HSM Pool.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedHsm, or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2020_04_01_preview.models.ManagedHsm
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ManagedHsm"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01-preview"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ManagedHsmError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedHsm', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/managedHSMs/{name}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
top=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.ManagedHsmListResult"]
"""The List operation gets information about the managed HSM Pools associated with the
subscription and within the specified resource group.
:param resource_group_name: Name of the resource group that contains the managed HSM pool.
:type resource_group_name: str
:param top: Maximum number of results to return.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedHsmListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.keyvault.v2020_04_01_preview.models.ManagedHsmListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ManagedHsmListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01-preview"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ManagedHsmListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ManagedHsmError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/managedHSMs'} # type: ignore
def list_by_subscription(
self,
top=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.ManagedHsmListResult"]
"""The List operation gets information about the managed HSM Pools associated with the
subscription.
:param top: Maximum number of results to return.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedHsmListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.keyvault.v2020_04_01_preview.models.ManagedHsmListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ManagedHsmListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01-preview"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ManagedHsmListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ManagedHsmError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.KeyVault/managedHSMs'} # type: ignore
| 47.876461
| 182
| 0.653044
|
ba478b3fb0d28eff98b33b646d8776742b0cd8d6
| 9,844
|
py
|
Python
|
tests/kerastuner/engine/metrics_tracking_test.py
|
stefanvasilev/keras-tuner
|
5c402b02af9a2a98ab5eece802f1ec7ca5331379
|
[
"Apache-2.0"
] | 1
|
2021-05-07T17:12:41.000Z
|
2021-05-07T17:12:41.000Z
|
tests/kerastuner/engine/metrics_tracking_test.py
|
stefanvasilev/keras-tuner
|
5c402b02af9a2a98ab5eece802f1ec7ca5331379
|
[
"Apache-2.0"
] | null | null | null |
tests/kerastuner/engine/metrics_tracking_test.py
|
stefanvasilev/keras-tuner
|
5c402b02af9a2a98ab5eece802f1ec7ca5331379
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The Keras Tuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import numpy as np
import pytest
from tensorflow.keras import losses
from tensorflow.keras import metrics
from kerastuner.engine import metrics_tracking
def test_register_from_metrics():
# As well as direction inference.
tracker = metrics_tracking.MetricsTracker(
metrics=[metrics.CategoricalAccuracy(), metrics.MeanSquaredError()]
)
assert set(tracker.metrics.keys()) == {
"categorical_accuracy",
"mean_squared_error",
}
assert tracker.metrics["categorical_accuracy"].direction == "max"
assert tracker.metrics["mean_squared_error"].direction == "min"
def test_register():
tracker = metrics_tracking.MetricsTracker()
tracker.register("new_metric", direction="max")
assert set(tracker.metrics.keys()) == {"new_metric"}
assert tracker.metrics["new_metric"].direction == "max"
with pytest.raises(ValueError, match="`direction` should be one of"):
tracker.register("another_metric", direction="wrong")
with pytest.raises(ValueError, match="already exists"):
tracker.register("new_metric", direction="max")
def test_exists():
tracker = metrics_tracking.MetricsTracker()
tracker.register("new_metric", direction="max")
assert tracker.exists("new_metric")
assert not tracker.exists("another_metric")
def test_update():
tracker = metrics_tracking.MetricsTracker()
tracker.update("new_metric", 0.5) # automatic registration
assert set(tracker.metrics.keys()) == {"new_metric"}
assert tracker.metrics["new_metric"].direction == "min" # default direction
assert tracker.get_history("new_metric") == [
metrics_tracking.MetricObservation(0.5, step=0)
]
def test_get_history():
tracker = metrics_tracking.MetricsTracker()
tracker.update("new_metric", 0.5, step=0)
tracker.update("new_metric", 1.5, step=1)
tracker.update("new_metric", 2.0, step=2)
assert tracker.get_history("new_metric") == [
metrics_tracking.MetricObservation(0.5, 0),
metrics_tracking.MetricObservation(1.5, 1),
metrics_tracking.MetricObservation(2.0, 2),
]
with pytest.raises(ValueError, match="Unknown metric"):
tracker.get_history("another_metric")
def test_set_history():
tracker = metrics_tracking.MetricsTracker()
tracker.set_history(
"new_metric",
[
metrics_tracking.MetricObservation(0.5, 0),
metrics_tracking.MetricObservation(1.5, 1),
metrics_tracking.MetricObservation(2.0, 2),
],
)
values = [obs.value for obs in tracker.get_history("new_metric")]
steps = [obs.step for obs in tracker.get_history("new_metric")]
assert values == [[0.5], [1.5], [2.0]]
assert steps == [0, 1, 2]
def test_get_best_value():
tracker = metrics_tracking.MetricsTracker()
tracker.register("metric_min", "min")
tracker.register("metric_max", "max")
assert tracker.get_best_value("metric_min") is None
tracker.set_history(
"metric_min",
[
metrics_tracking.MetricObservation(1.0, 0),
metrics_tracking.MetricObservation(2.0, 1),
metrics_tracking.MetricObservation(3.0, 2),
],
)
tracker.set_history(
"metric_max",
[
metrics_tracking.MetricObservation(1.0, 0),
metrics_tracking.MetricObservation(2.0, 1),
metrics_tracking.MetricObservation(3.0, 2),
],
)
assert tracker.get_best_value("metric_min") == 1.0
assert tracker.get_best_value("metric_max") == 3.0
def test_get_statistics():
tracker = metrics_tracking.MetricsTracker()
history = [
metrics_tracking.MetricObservation(random.random(), i) for i in range(10)
]
tracker.set_history("new_metric", history)
stats = tracker.get_statistics("new_metric")
assert set(stats.keys()) == {"min", "max", "mean", "median", "var", "std"}
history = [obs.value for obs in history]
assert stats["min"] == np.min(history)
assert stats["max"] == np.max(history)
assert stats["mean"] == np.mean(history)
assert stats["median"] == np.median(history)
assert stats["var"] == np.var(history)
assert stats["std"] == np.std(history)
def test_get_last_value():
tracker = metrics_tracking.MetricsTracker()
tracker.register("new_metric", "min")
assert tracker.get_last_value("new_metric") is None
tracker.set_history(
"new_metric",
[
metrics_tracking.MetricObservation(1.0, 0),
metrics_tracking.MetricObservation(2.0, 1),
metrics_tracking.MetricObservation(3.0, 2),
],
)
assert tracker.get_last_value("new_metric") == 3.0
def test_serialization():
tracker = metrics_tracking.MetricsTracker()
tracker.register("metric_min", "min")
tracker.register("metric_max", "max")
tracker.set_history(
"metric_min",
[
metrics_tracking.MetricObservation(1.0, 0),
metrics_tracking.MetricObservation(2.0, 1),
metrics_tracking.MetricObservation(3.0, 2),
],
)
tracker.set_history(
"metric_max",
[
metrics_tracking.MetricObservation(1.0, 0),
metrics_tracking.MetricObservation(2.0, 1),
metrics_tracking.MetricObservation(3.0, 2),
],
)
new_tracker = metrics_tracking.MetricsTracker.from_config(tracker.get_config())
assert new_tracker.metrics.keys() == tracker.metrics.keys()
def test_metricobservation_proto():
obs = metrics_tracking.MetricObservation(-10, 5)
proto = obs.to_proto()
assert proto.value == [-10]
assert proto.step == 5
new_obs = metrics_tracking.MetricObservation.from_proto(proto)
assert new_obs == obs
def test_metrichistory_proto():
tracker = metrics_tracking.MetricHistory("max")
tracker.update(5, step=3)
tracker.update(10, step=4)
proto = tracker.to_proto()
assert proto.maximize
assert proto.observations[0].value == [5]
assert proto.observations[0].step == 3
assert proto.observations[1].value == [10]
assert proto.observations[1].step == 4
new_tracker = metrics_tracking.MetricHistory.from_proto(proto)
assert new_tracker.direction == "max"
assert new_tracker.get_history() == [
metrics_tracking.MetricObservation(5, 3),
metrics_tracking.MetricObservation(10, 4),
]
def test_metricstracker_proto():
tracker = metrics_tracking.MetricsTracker()
tracker.register("score", direction="max")
tracker.update("score", value=10, step=1)
tracker.update("score", value=20, step=1)
tracker.update("score", value=30, step=2)
proto = tracker.to_proto()
obs = proto.metrics["score"].observations
assert obs[0].value == [10, 20]
assert obs[0].step == 1
assert obs[1].value == [30]
assert obs[1].step == 2
assert proto.metrics["score"].maximize
new_tracker = metrics_tracking.MetricsTracker.from_proto(proto)
assert new_tracker.metrics["score"].direction == "max"
assert new_tracker.metrics["score"].get_history() == [
metrics_tracking.MetricObservation([10, 20], 1),
metrics_tracking.MetricObservation(30, 2),
]
def test_metric_direction_inference():
# Test min metrics.
assert metrics_tracking.infer_metric_direction("MAE") == "min"
assert (
metrics_tracking.infer_metric_direction(metrics.binary_crossentropy) == "min"
)
assert metrics_tracking.infer_metric_direction(metrics.FalsePositives()) == "min"
# All losses in keras.losses are considered as 'min'.
assert metrics_tracking.infer_metric_direction("squared_hinge") == "min"
assert metrics_tracking.infer_metric_direction(losses.hinge) == "min"
assert (
metrics_tracking.infer_metric_direction(losses.CategoricalCrossentropy())
== "min"
)
# Test max metrics.
assert metrics_tracking.infer_metric_direction("binary_accuracy") == "max"
assert (
metrics_tracking.infer_metric_direction(metrics.categorical_accuracy)
== "max"
)
assert metrics_tracking.infer_metric_direction(metrics.Precision()) == "max"
# Test unknown metrics.
assert metrics_tracking.infer_metric_direction("my_metric") is None
def my_metric_fn(x, y):
return x
assert metrics_tracking.infer_metric_direction(my_metric_fn) is None
class MyMetric(metrics.Metric):
def update_state(self, x, y):
return 1
def result(self):
return 1
assert metrics_tracking.infer_metric_direction(MyMetric()) is None
# Test special cases.
assert metrics_tracking.infer_metric_direction("loss") == "min"
assert metrics_tracking.infer_metric_direction("acc") == "max"
assert metrics_tracking.infer_metric_direction("val_acc") == "max"
assert metrics_tracking.infer_metric_direction("crossentropy") == "min"
assert metrics_tracking.infer_metric_direction("ce") == "min"
assert metrics_tracking.infer_metric_direction("weighted_acc") == "max"
assert metrics_tracking.infer_metric_direction("val_weighted_ce") == "min"
assert (
metrics_tracking.infer_metric_direction("weighted_binary_accuracy") == "max"
)
| 34.784452
| 85
| 0.684681
|
5f03db1ac649a43c134525c744dbfdb9ad20b4cc
| 349
|
py
|
Python
|
Exam-Prep/Exam_22-Aug-20/project/rooms/alone_young.py
|
geodimitrov/PythonOOP_SoftUni
|
f1c6718c878b618b3ab3f174cd4d187bd178940b
|
[
"MIT"
] | 1
|
2021-06-30T11:53:44.000Z
|
2021-06-30T11:53:44.000Z
|
Exam-Prep/Exam_22-Aug-20/project/rooms/alone_young.py
|
geodimitrov/PythonOOP_SoftUni
|
f1c6718c878b618b3ab3f174cd4d187bd178940b
|
[
"MIT"
] | null | null | null |
Exam-Prep/Exam_22-Aug-20/project/rooms/alone_young.py
|
geodimitrov/PythonOOP_SoftUni
|
f1c6718c878b618b3ab3f174cd4d187bd178940b
|
[
"MIT"
] | null | null | null |
from project.appliances.tv import TV
from project.rooms.room import Room
class AloneYoung(Room):
default_room_members = 1
room_cost = 10
appliances = [TV()]
def __init__(self, family_name: str, salary: float):
super().__init__(family_name,salary, self.default_room_members)
self.calculate_expenses(self.appliances)
| 29.083333
| 71
| 0.727794
|
16a19250eecfdd508f280598a1a788b913bc15bb
| 2,492
|
py
|
Python
|
fsf-server/modules/EXTRACT_TAR.py
|
akniffe1/fsf
|
15303aa298414397f9aa5d19ca343040a0fe0bbd
|
[
"Apache-2.0"
] | 259
|
2015-08-06T13:10:11.000Z
|
2022-03-19T19:43:00.000Z
|
fsf-server/modules/EXTRACT_TAR.py
|
akniffe1/fsf
|
15303aa298414397f9aa5d19ca343040a0fe0bbd
|
[
"Apache-2.0"
] | 46
|
2015-08-13T10:58:11.000Z
|
2021-09-14T13:19:42.000Z
|
fsf-server/modules/EXTRACT_TAR.py
|
akniffe1/fsf
|
15303aa298414397f9aa5d19ca343040a0fe0bbd
|
[
"Apache-2.0"
] | 58
|
2015-08-06T16:00:40.000Z
|
2021-07-27T08:29:22.000Z
|
#!/usr/bin/env python
#
# Author: Jason Batchelor
# Description: Extract files from TAR archive file
# Date: 11/16/2015
'''
Copyright 2015 Emerson Electric Co.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import sys
import tarfile
from datetime import datetime
from StringIO import StringIO
from collections import OrderedDict
# For security reasons, we will only allow our module to process a max of twenty identified files
MAX_FILES = 20
def get_tar_type(ti):
type = 'Unknown'
if ti.isfile(): type = 'File'
elif ti.isdir(): type = 'Directory'
elif ti.issym(): type = 'Sym Link'
elif ti.islnk(): type = 'Hard Link'
elif ischr(): type = 'Character device'
elif isblk(): type = 'Block device'
elif isfifo(): type = 'FIFO'
return type
def EXTRACT_TAR(s, buff):
EXTRACT_TAR = {}
file_num = 0
tarf = tarfile.TarFile(fileobj=StringIO(buff), mode='r')
for ti in tarf:
if file_num >= MAX_FILES:
tarf.close()
EXTRACT_TAR['Object_%s' % file_num] = { 'Error' : 'Max number of archived files reached' }
return EXTRACT_TAR
CHILD_TAR = OrderedDict([('Name', ti.name),
('Last modified', datetime.fromtimestamp(ti.mtime).strftime("%Y-%m-%d %H:%M:%S")),
('Type', get_tar_type(ti)),
('UID', ti.uid ),
('GID', ti.gid ),
('Username', ti.uname),
('Groupname', ti.gname)])
if ti.isfile():
try:
f = tarf.extractfile(ti)
CHILD_TAR['Buffer'] = f.read()
f.close()
except:
CHILD_TAR['Buffer'] = 'Failed to extract this specific archive. Invalid or corrupt?'
EXTRACT_TAR['Object_%s' % file_num] = CHILD_TAR
file_num += 1
tarf.close()
return EXTRACT_TAR
if __name__ == '__main__':
print EXTRACT_TAR(None, sys.stdin.read())
| 28.976744
| 113
| 0.610754
|
62b72f5751875ad8f03b753bef7797afe46384d7
| 32,392
|
py
|
Python
|
paddlenlp/transformers/ernie/modeling.py
|
zkh2016/PaddleNLP
|
33146398dfce1f9582d01146c675c0d8f089275e
|
[
"Apache-2.0"
] | 1
|
2021-07-17T09:30:35.000Z
|
2021-07-17T09:30:35.000Z
|
paddlenlp/transformers/ernie/modeling.py
|
zkh2016/PaddleNLP
|
33146398dfce1f9582d01146c675c0d8f089275e
|
[
"Apache-2.0"
] | null | null | null |
paddlenlp/transformers/ernie/modeling.py
|
zkh2016/PaddleNLP
|
33146398dfce1f9582d01146c675c0d8f089275e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from .. import PretrainedModel, register_base_model
__all__ = [
'ErnieModel', 'ErniePretrainedModel', 'ErnieForSequenceClassification',
'ErnieForTokenClassification', 'ErnieForQuestionAnswering',
'ErnieForPretraining', 'ErniePretrainingCriterion'
]
class ErnieEmbeddings(nn.Layer):
r"""
Include embeddings from word, position and token_type embeddings.
"""
def __init__(
self,
vocab_size,
hidden_size=768,
hidden_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
pad_token_id=0,
weight_attr=None, ):
super(ErnieEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(
vocab_size,
hidden_size,
padding_idx=pad_token_id,
weight_attr=weight_attr)
self.position_embeddings = nn.Embedding(
max_position_embeddings, hidden_size, weight_attr=weight_attr)
self.token_type_embeddings = nn.Embedding(
type_vocab_size, hidden_size, weight_attr=weight_attr)
self.layer_norm = nn.LayerNorm(hidden_size)
self.dropout = nn.Dropout(hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None):
if position_ids is None:
# maybe need use shape op to unify static graph and dynamic graph
#seq_length = input_ids.shape[1]
ones = paddle.ones_like(input_ids, dtype="int64")
seq_length = paddle.cumsum(ones, axis=1)
position_ids = seq_length - ones
position_ids.stop_gradient = True
if token_type_ids is None:
token_type_ids = paddle.zeros_like(input_ids, dtype="int64")
input_embedings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = input_embedings + position_embeddings + token_type_embeddings
embeddings = self.layer_norm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class ErniePooler(nn.Layer):
def __init__(self, hidden_size, weight_attr=None):
super(ErniePooler, self).__init__()
self.dense = nn.Linear(
hidden_size, hidden_size, weight_attr=weight_attr)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class ErniePretrainedModel(PretrainedModel):
r"""
An abstract class for pretrained ERNIE models. It provides ERNIE related
`model_config_file`, `pretrained_init_configuration`, `resource_files_names`,
`pretrained_resource_files_map`, `base_model_prefix` for downloading and
loading pretrained models.
Refer to :class:`~paddlenlp.transformers.model_utils.PretrainedModel` for more details.
"""
model_config_file = "model_config.json"
pretrained_init_configuration = {
"ernie-1.0": {
"attention_probs_dropout_prob": 0.1,
"hidden_act": "relu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"max_position_embeddings": 513,
"num_attention_heads": 12,
"num_hidden_layers": 12,
"type_vocab_size": 2,
"vocab_size": 18000,
"pad_token_id": 0,
},
"ernie-tiny": {
"attention_probs_dropout_prob": 0.1,
"hidden_act": "relu",
"hidden_dropout_prob": 0.1,
"hidden_size": 1024,
"initializer_range": 0.02,
"intermediate_size": 4096,
"max_position_embeddings": 600,
"num_attention_heads": 16,
"num_hidden_layers": 3,
"type_vocab_size": 2,
"vocab_size": 50006,
"pad_token_id": 0,
},
"ernie-2.0-en": {
"attention_probs_dropout_prob": 0.1,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"max_position_embeddings": 512,
"num_attention_heads": 12,
"num_hidden_layers": 12,
"type_vocab_size": 4,
"vocab_size": 30522,
"pad_token_id": 0,
},
"ernie-2.0-en-finetuned-squad": {
"attention_probs_dropout_prob": 0.1,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"max_position_embeddings": 512,
"num_attention_heads": 12,
"num_hidden_layers": 12,
"type_vocab_size": 4,
"vocab_size": 30522,
"pad_token_id": 0,
},
"ernie-2.0-large-en": {
"attention_probs_dropout_prob": 0.1,
"intermediate_size": 4096, # special for ernie-2.0-large-en
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 1024,
"initializer_range": 0.02,
"max_position_embeddings": 512,
"num_attention_heads": 16,
"num_hidden_layers": 24,
"type_vocab_size": 4,
"vocab_size": 30522,
"pad_token_id": 0,
},
}
resource_files_names = {"model_state": "model_state.pdparams"}
pretrained_resource_files_map = {
"model_state": {
"ernie-1.0":
"https://paddlenlp.bj.bcebos.com/models/transformers/ernie/ernie_v1_chn_base.pdparams",
"ernie-tiny":
"https://paddlenlp.bj.bcebos.com/models/transformers/ernie_tiny/ernie_tiny.pdparams",
"ernie-2.0-en":
"https://paddlenlp.bj.bcebos.com/models/transformers/ernie_v2_base/ernie_v2_eng_base.pdparams",
"ernie-2.0-en-finetuned-squad":
"https://paddlenlp.bj.bcebos.com/models/transformers/ernie_v2_base/ernie_v2_eng_base_finetuned_squad.pdparams",
"ernie-2.0-large-en":
"https://paddlenlp.bj.bcebos.com/models/transformers/ernie_v2_large/ernie_v2_eng_large.pdparams",
}
}
base_model_prefix = "ernie"
def init_weights(self, layer):
""" Initialization hook """
if isinstance(layer, (nn.Linear, nn.Embedding)):
# only support dygraph, use truncated_normal and make it inplace
# and configurable later
if isinstance(layer.weight, paddle.Tensor):
layer.weight.set_value(
paddle.tensor.normal(
mean=0.0,
std=self.initializer_range
if hasattr(self, "initializer_range") else
self.ernie.config["initializer_range"],
shape=layer.weight.shape))
elif isinstance(layer, nn.LayerNorm):
layer._epsilon = 1e-12
@register_base_model
class ErnieModel(ErniePretrainedModel):
r"""
The bare ERNIE Model transformer outputting raw hidden-states.
This model inherits from :class:`~paddlenlp.transformers.model_utils.PretrainedModel`.
Refer to the superclass documentation for the generic methods.
This model is also a Paddle `paddle.nn.Layer <https://www.paddlepaddle.org.cn/documentation
/docs/en/api/paddle/fluid/dygraph/layers/Layer_en.html>`__ subclass. Use it as a regular Paddle Layer
and refer to the Paddle documentation for all matter related to general usage and behavior.
Args:
vocab_size (int):
Vocabulary size of `inputs_ids` in `ErnieModel`. Also is the vocab size of token embedding matrix.
Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling `ErnieModel`.
hidden_size (int, optional):
Dimensionality of the embedding layer, encoder layers and pooler layer. Defaults to `768`.
num_hidden_layers (int, optional):
Number of hidden layers in the Transformer encoder. Defaults to `12`.
num_attention_heads (int, optional):
Number of attention heads for each attention layer in the Transformer encoder.
Defaults to `12`.
intermediate_size (int, optional):
Dimensionality of the feed-forward (ff) layer in the encoder. Input tensors
to ff layers are firstly projected from `hidden_size` to `intermediate_size`,
and then projected back to `hidden_size`. Typically `intermediate_size` is larger than `hidden_size`.
Defaults to `3072`.
hidden_act (str, optional):
The non-linear activation function in the feed-forward layer.
``"gelu"``, ``"relu"`` and any other paddle supported activation functions
are supported. Defaults to `"gelu"`.
hidden_dropout_prob (float, optional):
The dropout probability for all fully connected layers in the embeddings and encoder.
Defaults to `0.1`.
attention_probs_dropout_prob (float, optional):
The dropout probability used in MultiHeadAttention in all encoder layers to drop some attention target.
Defaults to `0.1`.
max_position_embeddings (int, optional):
The maximum value of the dimensionality of position encoding, which dictates the maximum supported length of an input
sequence. Defaults to `512`.
type_vocab_size (int, optional):
The vocabulary size of the `token_type_ids`.
Defaults to `2`.
initializer_range (float, optional):
The standard deviation of the normal initializer for initializing all weight matrices.
Defaults to `0.02`.
.. note::
A normal_initializer initializes weight matrices as normal distributions.
See :meth:`ErniePretrainedModel._init_weights()` for how weights are initialized in `ErnieModel`.
pad_token_id(int, optional):
The index of padding token in the token vocabulary.
Defaults to `0`.
"""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
pad_token_id=0):
super(ErnieModel, self).__init__()
self.pad_token_id = pad_token_id
self.initializer_range = initializer_range
weight_attr = paddle.ParamAttr(initializer=nn.initializer.Normal(
mean=0.0, std=self.initializer_range))
self.embeddings = ErnieEmbeddings(
vocab_size, hidden_size, hidden_dropout_prob,
max_position_embeddings, type_vocab_size, pad_token_id, weight_attr)
encoder_layer = nn.TransformerEncoderLayer(
hidden_size,
num_attention_heads,
intermediate_size,
dropout=hidden_dropout_prob,
activation=hidden_act,
attn_dropout=attention_probs_dropout_prob,
act_dropout=0,
weight_attr=weight_attr, )
self.encoder = nn.TransformerEncoder(encoder_layer, num_hidden_layers)
self.pooler = ErniePooler(hidden_size, weight_attr)
self.apply(self.init_weights)
def forward(self,
input_ids,
token_type_ids=None,
position_ids=None,
attention_mask=None):
r"""
Args:
input_ids (Tensor):
Indices of input sequence tokens in the vocabulary. They are
numerical representations of tokens that build the input sequence.
It's data type should be `int64` and has a shape of [batch_size, sequence_length].
token_type_ids (Tensor, optional):
Segment token indices to indicate different portions of the inputs.
Selected in the range ``[0, type_vocab_size - 1]``.
If `type_vocab_size` is 2, which means the inputs have two portions.
Indices can either be 0 or 1:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
Its data type should be `int64` and it has a shape of [batch_size, sequence_length].
Defaults to `None`, which means we don't add segment embeddings.
position_ids (Tensor, optional):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
max_position_embeddings - 1]``.
Shape as `[batch_size, num_tokens]` and dtype as int64. Defaults to `None`.
attention_mask (Tensor, optional):
Mask used in multi-head attention to avoid performing attention on to some unwanted positions,
usually the paddings or the subsequent positions.
Its data type can be int, float and bool.
When the data type is bool, the `masked` tokens have `False` values and the others have `True` values.
When the data type is int, the `masked` tokens have `0` values and the others have `1` values.
When the data type is float, the `masked` tokens have `-INF` values and the others have `0` values.
It is a tensor with shape broadcasted to `[batch_size, num_attention_heads, sequence_length, sequence_length]`.
For example, its shape can be [batch_size, sequence_length], [batch_size, sequence_length, sequence_length],
[batch_size, num_attention_heads, sequence_length, sequence_length].
We use whole-word-mask in ERNIE, so the whole word will have the same value. For example, "使用" as a word,
"使" and "用" will have the same value.
Defaults to `None`, which means nothing needed to be prevented attention to.
Returns:
tuple: Returns tuple (``sequence_output``, ``pooled_output``).
With the fields:
- `sequence_output` (Tensor):
Sequence of hidden-states at the last layer of the model.
It's data type should be float32 and its shape is [batch_size, sequence_length, hidden_size].
- `pooled_output` (Tensor):
The output of first token (`[CLS]`) in sequence.
We "pool" the model by simply taking the hidden state corresponding to the first token.
Its data type should be float32 and its shape is [batch_size, hidden_size].
Example:
.. code-block::
import paddle
from paddlenlp.transformers import ErnieModel, ErnieTokenizer
tokenizer = ErnieTokenizer.from_pretrained('ernie-1.0')
model = ErnieModel.from_pretrained('ernie-1.0')
inputs = tokenizer("Welcome to use PaddlePaddle and PaddleNLP!")
inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()}
sequence_output, pooled_output = model(**inputs)
"""
if attention_mask is None:
attention_mask = paddle.unsqueeze(
(input_ids == self.pad_token_id
).astype(self.pooler.dense.weight.dtype) * -1e9,
axis=[1, 2])
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids)
encoder_outputs = self.encoder(embedding_output, attention_mask)
sequence_output = encoder_outputs
pooled_output = self.pooler(sequence_output)
return sequence_output, pooled_output
class ErnieForSequenceClassification(ErniePretrainedModel):
r"""
Ernie Model with a linear layer on top of the output layer,
designed for sequence classification/regression tasks like GLUE tasks.
Args:
ernie (ErnieModel):
An instance of `paddlenlp.transformers.ErnieModel`.
num_classes (int, optional):
The number of classes. Default to `2`.
dropout (float, optional):
The dropout probability for output of ERNIE.
If None, use the same value as `hidden_dropout_prob`
of `paddlenlp.transformers.ErnieModel` instance. Defaults to `None`.
"""
def __init__(self, ernie, num_classes=2, dropout=None):
super(ErnieForSequenceClassification, self).__init__()
self.num_classes = num_classes
self.ernie = ernie # allow ernie to be config
self.dropout = nn.Dropout(dropout if dropout is not None else
self.ernie.config["hidden_dropout_prob"])
self.classifier = nn.Linear(self.ernie.config["hidden_size"],
num_classes)
self.apply(self.init_weights)
def forward(self,
input_ids,
token_type_ids=None,
position_ids=None,
attention_mask=None):
r"""
Args:
input_ids (Tensor):
See :class:`ErnieModel`.
token_type_ids (Tensor, optional):
See :class:`ErnieModel`.
position_ids (Tensor, optional):
See :class:`ErnieModel`.
attention_mask (Tensor, optional):
See :class:`ErnieModel`.
Returns:
Tensor: Returns tensor `logits`, a tensor of the input text classification logits.
Shape as `[batch_size, num_classes]` and dtype as float32.
Example:
.. code-block::
import paddle
from paddlenlp.transformers import ErnieForSequenceClassification, ErnieTokenizer
tokenizer = ErnieTokenizer.from_pretrained('ernie-1.0')
model = ErnieForSequenceClassification.from_pretrained('ernie-1.0')
inputs = tokenizer("Welcome to use PaddlePaddle and PaddleNLP!")
inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()}
logits = model(**inputs)
"""
_, pooled_output = self.ernie(
input_ids,
token_type_ids=token_type_ids,
position_ids=position_ids,
attention_mask=attention_mask)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
return logits
class ErnieForQuestionAnswering(ErniePretrainedModel):
"""
Ernie Model with a linear layer on top of the hidden-states
output to compute `span_start_logits` and `span_end_logits`,
designed for question-answering tasks like SQuAD.
Args:
ernie (`ErnieModel`):
An instance of `ErnieModel`.
"""
def __init__(self, ernie):
super(ErnieForQuestionAnswering, self).__init__()
self.ernie = ernie # allow ernie to be config
self.classifier = nn.Linear(self.ernie.config["hidden_size"], 2)
self.apply(self.init_weights)
def forward(self,
input_ids,
token_type_ids=None,
position_ids=None,
attention_mask=None):
r"""
Args:
input_ids (Tensor):
See :class:`ErnieModel`.
token_type_ids (Tensor, optional):
See :class:`ErnieModel`.
position_ids (Tensor, optional):
See :class:`ErnieModel`.
attention_mask (Tensor, optional):
See :class:`ErnieModel`.
Returns:
tuple: Returns tuple (`start_logits`, `end_logits`).
With the fields:
- `start_logits` (Tensor):
A tensor of the input token classification logits, indicates the start position of the labelled span.
Its data type should be float32 and its shape is [batch_size, sequence_length].
- `end_logits` (Tensor):
A tensor of the input token classification logits, indicates the end position of the labelled span.
Its data type should be float32 and its shape is [batch_size, sequence_length].
Example:
.. code-block::
import paddle
from paddlenlp.transformers import ErnieForQuestionAnswering, ErnieTokenizer
tokenizer = ErnieTokenizer.from_pretrained('ernie-1.0')
model = ErnieForQuestionAnswering.from_pretrained('ernie-1.0')
inputs = tokenizer("Welcome to use PaddlePaddle and PaddleNLP!")
inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()}
logits = model(**inputs)
"""
sequence_output, _ = self.ernie(
input_ids,
token_type_ids=token_type_ids,
position_ids=position_ids,
attention_mask=attention_mask)
logits = self.classifier(sequence_output)
logits = paddle.transpose(logits, perm=[2, 0, 1])
start_logits, end_logits = paddle.unstack(x=logits, axis=0)
return start_logits, end_logits
class ErnieForTokenClassification(ErniePretrainedModel):
r"""
ERNIE Model with a linear layer on top of the hidden-states output layer,
designed for token classification tasks like NER tasks.
Args:
ernie (`ErnieModel`):
An instance of `ErnieModel`.
num_classes (int, optional):
The number of classes. Defaults to `2`.
dropout (float, optional):
The dropout probability for output of ERNIE.
If None, use the same value as `hidden_dropout_prob`
of `ErnieModel` instance `ernie`. Defaults to `None`.
"""
def __init__(self, ernie, num_classes=2, dropout=None):
super(ErnieForTokenClassification, self).__init__()
self.num_classes = num_classes
self.ernie = ernie # allow ernie to be config
self.dropout = nn.Dropout(dropout if dropout is not None else
self.ernie.config["hidden_dropout_prob"])
self.classifier = nn.Linear(self.ernie.config["hidden_size"],
num_classes)
self.apply(self.init_weights)
def forward(self,
input_ids,
token_type_ids=None,
position_ids=None,
attention_mask=None):
r"""
Args:
input_ids (Tensor):
See :class:`ErnieModel`.
token_type_ids (Tensor, optional):
See :class:`ErnieModel`.
position_ids (Tensor, optional):
See :class:`ErnieModel`.
attention_mask (Tensor, optional):
See :class:`ErnieModel`.
Returns:
Tensor: Returns tensor `logits`, a tensor of the input token classification logits.
Shape as `[batch_size, sequence_length, num_classes]` and dtype as `float32`.
Example:
.. code-block::
import paddle
from paddlenlp.transformers import ErnieForTokenClassification, ErnieTokenizer
tokenizer = ErnieTokenizer.from_pretrained('ernie-1.0')
model = ErnieForTokenClassification.from_pretrained('ernie-1.0')
inputs = tokenizer("Welcome to use PaddlePaddle and PaddleNLP!")
inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()}
logits = model(**inputs)
"""
sequence_output, _ = self.ernie(
input_ids,
token_type_ids=token_type_ids,
position_ids=position_ids,
attention_mask=attention_mask)
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
return logits
class ErnieLMPredictionHead(nn.Layer):
r"""
Ernie Model with a `language modeling` head on top.
"""
def __init__(
self,
hidden_size,
vocab_size,
activation,
embedding_weights=None,
weight_attr=None, ):
super(ErnieLMPredictionHead, self).__init__()
self.transform = nn.Linear(
hidden_size, hidden_size, weight_attr=weight_attr)
self.activation = getattr(nn.functional, activation)
self.layer_norm = nn.LayerNorm(hidden_size)
self.decoder_weight = self.create_parameter(
shape=[vocab_size, hidden_size],
dtype=self.transform.weight.dtype,
attr=weight_attr,
is_bias=False) if embedding_weights is None else embedding_weights
self.decoder_bias = self.create_parameter(
shape=[vocab_size], dtype=self.decoder_weight.dtype, is_bias=True)
def forward(self, hidden_states, masked_positions=None):
if masked_positions is not None:
hidden_states = paddle.reshape(hidden_states,
[-1, hidden_states.shape[-1]])
hidden_states = paddle.tensor.gather(hidden_states,
masked_positions)
# gather masked tokens might be more quick
hidden_states = self.transform(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = paddle.tensor.matmul(
hidden_states, self.decoder_weight,
transpose_y=True) + self.decoder_bias
return hidden_states
class ErniePretrainingHeads(nn.Layer):
def __init__(
self,
hidden_size,
vocab_size,
activation,
embedding_weights=None,
weight_attr=None, ):
super(ErniePretrainingHeads, self).__init__()
self.predictions = ErnieLMPredictionHead(
hidden_size, vocab_size, activation, embedding_weights, weight_attr)
self.seq_relationship = nn.Linear(
hidden_size, 2, weight_attr=weight_attr)
def forward(self, sequence_output, pooled_output, masked_positions=None):
prediction_scores = self.predictions(sequence_output, masked_positions)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class ErnieForPretraining(ErniePretrainedModel):
r"""
Ernie Model with a `masked language modeling` head and a `sentence order prediction` head
on top.
"""
def __init__(self, ernie):
super(ErnieForPretraining, self).__init__()
self.ernie = ernie
weight_attr = paddle.ParamAttr(initializer=nn.initializer.Normal(
mean=0.0, std=self.ernie.initializer_range))
self.cls = ErniePretrainingHeads(
self.ernie.config["hidden_size"],
self.ernie.config["vocab_size"],
self.ernie.config["hidden_act"],
embedding_weights=self.ernie.embeddings.word_embeddings.weight,
weight_attr=weight_attr, )
self.apply(self.init_weights)
def forward(self,
input_ids,
token_type_ids=None,
position_ids=None,
attention_mask=None,
masked_positions=None):
r"""
Args:
input_ids (Tensor):
See :class:`ErnieModel`.
token_type_ids (Tensor, optional):
See :class:`ErnieModel`.
position_ids (Tensor, optional):
See :class:`ErnieModel`.
attention_mask (Tensor, optional):
See :class:`ErnieModel`.
Returns:
tuple: Returns tuple (``prediction_scores``, ``seq_relationship_score``).
With the fields:
- `prediction_scores` (Tensor):
The scores of masked token prediction. Its data type should be float32.
If `masked_positions` is None, its shape is [batch_size, sequence_length, vocab_size].
Otherwise, its shape is [batch_size, mask_token_num, vocab_size].
- `seq_relationship_score` (Tensor):
The scores of next sentence prediction.
Its data type should be float32 and its shape is [batch_size, 2].
"""
with paddle.static.amp.fp16_guard():
outputs = self.ernie(
input_ids,
token_type_ids=token_type_ids,
position_ids=position_ids,
attention_mask=attention_mask)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(
sequence_output, pooled_output, masked_positions)
return prediction_scores, seq_relationship_score
class ErniePretrainingCriterion(paddle.nn.Layer):
r"""
The loss output of Ernie Model during the pretraining:
a `masked language modeling` head and a `next sentence prediction (classification)` head.
"""
def __init__(self):
super(ErniePretrainingCriterion, self).__init__()
#self.loss_fn = paddle.nn.loss.CrossEntropyLoss(ignore_index=-1)
def forward(self, prediction_scores, seq_relationship_score,
masked_lm_labels, next_sentence_labels):
"""
Args:
prediction_scores(Tensor):
The scores of masked token prediction. Its data type should be float32.
If `masked_positions` is None, its shape is [batch_size, sequence_length, vocab_size].
Otherwise, its shape is [batch_size, mask_token_num, vocab_size]
seq_relationship_score(Tensor):
The scores of next sentence prediction. Its data type should be float32 and
its shape is [batch_size, 2]
masked_lm_labels(Tensor):
The labels of the masked language modeling, its dimensionality is equal to `prediction_scores`.
Its data type should be int64. If `masked_positions` is None, its shape is [batch_size, sequence_length, 1].
Otherwise, its shape is [batch_size, mask_token_num, 1]
next_sentence_labels(Tensor):
The labels of the next sentence prediction task, the dimensionality of `next_sentence_labels`
is equal to `seq_relation_labels`. Its data type should be int64 and
its shape is [batch_size, 1]
Returns:
Tensor: The pretraining loss, equals to the sum of `masked_lm_loss` plus the mean of `next_sentence_loss`.
Its data type should be float32 and its shape is [1].
"""
with paddle.static.amp.fp16_guard():
masked_lm_loss = F.cross_entropy(
prediction_scores,
masked_lm_labels,
ignore_index=-1,
reduction='none')
next_sentence_loss = F.cross_entropy(
seq_relationship_score, next_sentence_labels, reduction='none')
return paddle.mean(masked_lm_loss), paddle.mean(next_sentence_loss)
| 42.122237
| 129
| 0.616726
|
d1979045dfbbbcac8d34e125d8ef7f411c44573e
| 7,164
|
py
|
Python
|
hoft/core/decorators.py
|
sys-git/hoft
|
a59bd3f38a258eb6d7f56a9a79034159b18fd6a4
|
[
"MIT"
] | null | null | null |
hoft/core/decorators.py
|
sys-git/hoft
|
a59bd3f38a258eb6d7f56a9a79034159b18fd6a4
|
[
"MIT"
] | 323
|
2017-09-13T07:20:51.000Z
|
2022-03-31T12:30:24.000Z
|
hoft/core/decorators.py
|
sys-git/hoft
|
a59bd3f38a258eb6d7f56a9a79034159b18fd6a4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: latin-1 -*-
#
# Brief description
# @module hoft.core.decorators
# @version 0.1
# @copyright (c) 2017-present Francis Horsman.
from inspect import getargspec, getcallargs
import six
from hoft.core.parsers_in import parse_all_in_args
from hoft.core.parsers_sig import parse_all_sig_args
from hoft.core.utils import raise_exc
def analyse_in(*parse_args, **parse_kwargs):
"""
Decorator for methods (to analyse) the args and kwargs of the decorated callable.
This method does not modify the args or kwargs in any way.
Deprecated. Will be removed in a future version, use `analyse_sig` instead.
:param parse_args:
A list of callables which accept two values only:
These callables will be passed the target function's argument at the same position as - the
callable is in the decorator's arguments list and the index of the argument.
If callable==`IGNORE`, then the decorated function's arg is not parsed.
:param parse_kwargs:
A dictionary of name, callables. The name represents the target function's kwarg that
will be passed to the callable. The callable receives the name,
value and a boolean representing if the name is present in the kwargs:
ie: `def my_func(name, value, name_in_decorated_funcs_passed_kwargs)`.
:param bool parse_kwargs['_fail_fast_']:
True: Fail on the first exception raised by any supplied callable.
:param bool parse_kwargs['_on_error_']:
Callable or type to be called when an exception is found
in a supplied callable, if the type is an exception or subclass-of, it will be raised (the
exception constructor should take the same signature as my_func below):
ie: `def my_func(exc, list_of_excs)`.
If the type is not an exception or subclass-of it will be called, it is up to this
callable to raise an exception if required.
:returns:
Decorated function.
:note:
Any exception raised by a supplied callable will have an additional field: `_errors_`.
This is always a list of one or all of the errors encountered during the supplied
callables (depending on the value of the `_fail_fast_` kwargs).
Example:
>>> @hoft.analyse_in(
_a_func(z=1), None, bar=_b_func(x=1, y=2), baz=_validate_baz(), x=None,
_fail_fast_=True, _on_error_=my_func,
)
def _validate_something_decorated(foo, ignored, bar=hoft.IGNORE, baz=None, x=None):
...
"""
def decorator(func):
@six.wraps(func)
def wrapper(*args, **kwargs):
fail_fast = parse_kwargs.pop('_fail_fast_', False)
on_error = parse_kwargs.pop('_on_error_', None)
argspec = getargspec(func)
errors = parse_all_in_args(
parse_args,
parse_kwargs,
args,
kwargs,
argspec,
on_error,
fail_fast,
)
if errors and not fail_fast:
# We have errors to raise which have not already been raised.
exc = errors[0]
raise_exc(
exc=exc.error,
on_error=on_error,
errors=errors,
fail_fast=fail_fast,
force=True,
)
# Call the wrapped function:
return func(*args, **kwargs)
return wrapper
return decorator
def analyse_sig(*parse_args, **parse_kwargs):
"""
Decorator for methods (to analyse) the args and kwargs of the decorated callable.
This method does not modify the args or kwargs in any way.
Preferred method over `analyse_in`.
:param parse_args:
A list of callables which accept two values only:
These callables will be passed the target function's argument at the same position as - the
callable is in the decorator's arguments list and the index of the argument.
If callable==`IGNORE`, then the decorated function's arg is not parsed.
:param parse_kwargs:
A dictionary of name, callables. The name represents the target function's kwarg that
will be passed to the callable. The callable receives the name,
value and a boolean representing if the name is present in the kwargs:
ie: `def my_func(name, value, name_in_decorated_funcs_passed_kwargs)`.
:param bool parse_kwargs['_fail_fast_']:
True: Fail on the first exception raised by any supplied callable.
:param bool parse_kwargs['_on_error_']:
Callable or type to be called when an exception is found
in a supplied callable, if the type is an exception or subclass-of, it will be raised (the
exception constructor should take the same signature as my_func below):
ie: `def my_func(exc, list_of_excs)`.
If the type is not an exception or subclass-of it will be called, it is up to this
callable to raise an exception if required.
:param bool parse_kwargs['_strict_']:
True=Error if all params are not analysed.
:param callable parse_kwargs['_default_']:
Default handler for all not previously analysed arguments.
:returns:
Decorated function.
:note:
Any exception raised by a supplied callable will have an additional field: `_errors_`.
This is always a list of one or all of the errors encountered during the supplied
callables (depending on the value of the `_fail_fast_` kwargs).
Example:
>>> @hoft.analyse_sig(
_a_func(z=1), None, bar=_b_func(x=1, y=2), baz=_validate_baz(), x=None,
_fail_fast_=True, _on_error_=my_func, _strict_=False, _default_=_default_func,
)
def _validate_something_decorated(foo, ignored, bar=hoft.IGNORE, baz=None, x=None):
...
"""
def decorator(func):
@six.wraps(func)
def wrapper(*args, **kwargs):
argspec = getargspec(func)
callargs = getcallargs(func, *args, **kwargs)
strict = parse_kwargs.pop('_strict_', None)
default = parse_kwargs.pop('_default_', None)
fail_fast = parse_kwargs.pop('_fail_fast_', False)
on_error = parse_kwargs.pop('_on_error_', None)
errors = parse_all_sig_args(
parse_args,
parse_kwargs,
args,
kwargs,
argspec,
callargs,
strict,
default,
on_error,
fail_fast,
)
if errors and not fail_fast:
# We have errors to raise which have not already been raised.
exc = errors[0]
raise_exc(
exc=exc.error,
on_error=on_error,
errors=errors,
fail_fast=fail_fast,
force=True,
)
# Call the wrapped function:
return func(*args, **kwargs)
return wrapper
return decorator
| 36.365482
| 99
| 0.624511
|
0f61c98588c8fd774ed29c9c698d5bca06c76d8b
| 384
|
py
|
Python
|
shadock/tests.py
|
jibe-b/bioshadock
|
d4769946be29d74377435734c771fe19136a64a4
|
[
"Apache-2.0"
] | null | null | null |
shadock/tests.py
|
jibe-b/bioshadock
|
d4769946be29d74377435734c771fe19136a64a4
|
[
"Apache-2.0"
] | 1
|
2017-05-05T14:02:44.000Z
|
2017-05-05T14:26:24.000Z
|
shadock/tests.py
|
jibe-b/bioshadock
|
d4769946be29d74377435734c771fe19136a64a4
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from pyramid import testing
class ViewTests(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def test_my_view(self):
from .views import my_view
request = testing.DummyRequest()
info = my_view(request)
self.assertEqual(info['project'], 'shadock')
| 21.333333
| 52
| 0.651042
|
589b72196e945e78da11ec9b6e720e87338b1039
| 112
|
py
|
Python
|
riskgame/__init__.py
|
AxisAndAllies/game1
|
77ff6941233a00276e4c4cccbfb19be2a2251eb7
|
[
"MIT"
] | null | null | null |
riskgame/__init__.py
|
AxisAndAllies/game1
|
77ff6941233a00276e4c4cccbfb19be2a2251eb7
|
[
"MIT"
] | null | null | null |
riskgame/__init__.py
|
AxisAndAllies/game1
|
77ff6941233a00276e4c4cccbfb19be2a2251eb7
|
[
"MIT"
] | null | null | null |
from gym.envs.registration import register
register(
id='risk-v0',
entrypoint='riskgame.envs:RiskEnv'
)
| 18.666667
| 42
| 0.732143
|
852f6c12a621330c5b563246ea97859d4d28253b
| 2,478
|
py
|
Python
|
ltc/base/migrations/0001_initial.py
|
v0devil/jltom
|
b302a39a187b8e1154c6deda636a4db8b30bb40b
|
[
"MIT"
] | 4
|
2016-12-30T13:26:59.000Z
|
2017-04-26T12:07:36.000Z
|
ltc/base/migrations/0001_initial.py
|
v0devil/jltom
|
b302a39a187b8e1154c6deda636a4db8b30bb40b
|
[
"MIT"
] | null | null | null |
ltc/base/migrations/0001_initial.py
|
v0devil/jltom
|
b302a39a187b8e1154c6deda636a4db8b30bb40b
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.20 on 2021-05-05 15:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('enabled', models.BooleanField(default=True)),
('confluence_space', models.TextField(blank=True, null=True)),
('confluence_page', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'project',
},
),
migrations.CreateModel(
name='Test',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(default='')),
('status', models.CharField(choices=[('C', 'created'), ('R', 'running'), ('A', 'analyzing'), ('S', 'scheduled'), ('F', 'finished'), ('FA', 'failed')], default='C', max_length=12)),
('threads', models.IntegerField(default=0)),
('duration', models.IntegerField(default=0)),
('started_at', models.DateTimeField(db_index=True, null=True)),
('finished_at', models.DateTimeField(db_index=True, null=True)),
('project', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='base.Project')),
],
),
migrations.CreateModel(
name='TestFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('path', models.TextField()),
('file_type', models.CharField(choices=[('O', 'online_result'), ('M', 'result'), ('L', 'log'), ('T', 'testplan'), ('J', 'jenkins_build_xml')], default='M', max_length=12)),
('file_size', models.IntegerField(default=0)),
('last_analyzed', models.DateTimeField(default=None, null=True)),
('last_analyzed_line', models.IntegerField(default=0)),
('test', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='base.Test')),
],
),
]
| 45.888889
| 196
| 0.558111
|
0e37b773295053acbdfdb46849eb0158a0060f26
| 275
|
py
|
Python
|
apps/addresses/utils.py
|
goztrk/django-htk
|
c56bf112e5d627780d2f4288460eae5cce80fa9e
|
[
"MIT"
] | 206
|
2015-10-15T07:05:08.000Z
|
2021-02-19T11:48:36.000Z
|
apps/addresses/utils.py
|
goztrk/django-htk
|
c56bf112e5d627780d2f4288460eae5cce80fa9e
|
[
"MIT"
] | 8
|
2017-10-16T10:18:31.000Z
|
2022-03-09T14:24:27.000Z
|
apps/addresses/utils.py
|
goztrk/django-htk
|
c56bf112e5d627780d2f4288460eae5cce80fa9e
|
[
"MIT"
] | 61
|
2015-10-15T08:12:44.000Z
|
2022-03-10T12:25:06.000Z
|
# HTK Imports
from htk.utils.enums import get_enum_symbolic_name
def get_unit_type_choices():
from htk.apps.addresses.enums import AddressUnitType
choices = [(unit_type.value, get_enum_symbolic_name(unit_type),) for unit_type in AddressUnitType]
return choices
| 30.555556
| 102
| 0.796364
|
6f9d728b1ce37c22616135bd04f23230348d6813
| 2,893
|
py
|
Python
|
FWCore/Framework/test/testBitsCount_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
FWCore/Framework/test/testBitsCount_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
FWCore/Framework/test/testBitsCount_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("PROD")
import FWCore.Framework.test.cmsExceptionsFatalOption_cff
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True),
Rethrow = FWCore.Framework.test.cmsExceptionsFatalOption_cff.Rethrow
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(99)
)
process.source = cms.Source("EmptySource")
process.m1a = cms.EDProducer("IntProducer",
ivalue = cms.int32(1)
)
process.m2a = cms.EDProducer("IntProducer",
ivalue = cms.int32(2)
)
process.m3a = cms.EDProducer("IntProducer",
ivalue = cms.int32(3)
)
process.m4a = cms.EDProducer("IntProducer",
ivalue = cms.int32(4)
)
process.m5a = cms.EDProducer("IntProducer",
ivalue = cms.int32(5)
)
process.m6a = cms.EDProducer("IntProducer",
ivalue = cms.int32(6)
)
process.a1 = cms.EDAnalyzer("TestResultAnalyzer",
name = cms.untracked.string('a1'),
dump = cms.untracked.bool(True),
numbits = cms.untracked.int32(9)
)
process.f1 = cms.EDFilter("TestFilterModule",
acceptValue = cms.untracked.int32(30),
onlyOne = cms.untracked.bool(True)
)
process.f2 = cms.EDFilter("TestFilterModule",
acceptValue = cms.untracked.int32(70),
onlyOne = cms.untracked.bool(True)
)
process.f3 = cms.EDFilter("TestFilterModule",
acceptValue = cms.untracked.int32(12),
onlyOne = cms.untracked.bool(True)
)
process.f4 = cms.EDFilter("TestFilterModule",
acceptValue = cms.untracked.int32(30),
onlyOne = cms.untracked.bool(False)
)
process.f5 = cms.EDFilter("TestFilterModule",
acceptValue = cms.untracked.int32(70),
onlyOne = cms.untracked.bool(False)
)
process.f6 = cms.EDFilter("TestFilterModule",
acceptValue = cms.untracked.int32(12),
onlyOne = cms.untracked.bool(False)
)
process.outp4 = cms.OutputModule("SewerModule",
shouldPass = cms.int32(4),
name = cms.string('for_p1ap2a'),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('p1a',
'p2a')
)
)
process.outp7 = cms.OutputModule("SewerModule",
shouldPass = cms.int32(99),
name = cms.string('for_none')
)
process.outpempty = cms.OutputModule("SewerModule",
shouldPass = cms.int32(99),
name = cms.string('p2empty'),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('p2empty')
)
)
process.p1empty = cms.Path()
process.p1a = cms.Path(process.f1*process.m1a)
process.p2a = cms.Path(process.f2*process.m2a)
process.p3a = cms.Path(process.f3*process.m3a)
process.p2empty = cms.Path()
process.p4a = cms.Path(process.f4*process.m4a)
process.p5a = cms.Path(process.f5*process.m5a)
process.p6a = cms.Path(process.f6*process.m6a)
process.p3empty = cms.Path()
process.e1 = cms.EndPath(process.a1)
process.e2 = cms.EndPath(process.outp4)
process.e3 = cms.EndPath(process.outp7)
process.e4 = cms.EndPath(process.outpempty)
| 25.156522
| 72
| 0.707224
|
40769ff580e17c6a52758314f5d344bf0213cca4
| 3,842
|
py
|
Python
|
setup.py
|
steschuser/certreq
|
98a0b45bee5f3cddfaa88500d09b83bb2a5645d3
|
[
"MIT"
] | null | null | null |
setup.py
|
steschuser/certreq
|
98a0b45bee5f3cddfaa88500d09b83bb2a5645d3
|
[
"MIT"
] | null | null | null |
setup.py
|
steschuser/certreq
|
98a0b45bee5f3cddfaa88500d09b83bb2a5645d3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pipenv install twine --dev
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'certreq'
DESCRIPTION = 'python implementation of certreq'
URL = 'https://github.com/steschuser/certreq'
EMAIL = 's.schwebel@uvensys.de'
AUTHOR = 'Steffen Schwebel'
REQUIRES_PYTHON = '>=3.5.0'
VERSION = '0.1.0'
# What packages are required for this module to be executed?
REQUIRED = [
'requests', 'requests_ntlm', 'loguru', 'parse_it', 'bs4'
# 'requests', 'maya', 'records',
]
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
}
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
entry_points={
'console_scripts': ['certreq=certreq:cli'],
},
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
| 28.887218
| 86
| 0.640292
|
ef8acef313ec6555a7e0b8961a652cec735b9eaa
| 112,968
|
py
|
Python
|
scipy/linalg/tests/test_lapack.py
|
Didou09/scipy
|
061aca619504e198509969fbe5908d1085966889
|
[
"BSD-3-Clause"
] | 1
|
2020-08-04T08:29:47.000Z
|
2020-08-04T08:29:47.000Z
|
scipy/linalg/tests/test_lapack.py
|
Didou09/scipy
|
061aca619504e198509969fbe5908d1085966889
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/linalg/tests/test_lapack.py
|
Didou09/scipy
|
061aca619504e198509969fbe5908d1085966889
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Created by: Pearu Peterson, September 2002
#
import sys
import subprocess
import time
from functools import reduce
from numpy.testing import (assert_equal, assert_array_almost_equal, assert_,
assert_allclose, assert_almost_equal,
assert_array_equal)
import pytest
from pytest import raises as assert_raises
import numpy as np
from numpy import (eye, ones, zeros, zeros_like, triu, tril, tril_indices,
triu_indices)
from numpy.random import rand, randint, seed
from scipy.linalg import _flapack as flapack, lapack
from scipy.linalg import inv, svd, cholesky, solve, ldl, norm, block_diag, qr
from scipy.linalg import eigh
from scipy.linalg.lapack import _compute_lwork
from scipy.stats import ortho_group, unitary_group
import scipy.sparse as sps
try:
from scipy.linalg import _clapack as clapack
except ImportError:
clapack = None
from scipy.linalg.lapack import get_lapack_funcs
from scipy.linalg.blas import get_blas_funcs
REAL_DTYPES = [np.float32, np.float64]
COMPLEX_DTYPES = [np.complex64, np.complex128]
DTYPES = REAL_DTYPES + COMPLEX_DTYPES
def generate_random_dtype_array(shape, dtype):
# generates a random matrix of desired data type of shape
if dtype in COMPLEX_DTYPES:
return (np.random.rand(*shape)
+ np.random.rand(*shape)*1.0j).astype(dtype)
return np.random.rand(*shape).astype(dtype)
def test_lapack_documented():
"""Test that all entries are in the doc."""
if lapack.__doc__ is None: # just in case there is a python -OO
pytest.skip('lapack.__doc__ is None')
names = set(lapack.__doc__.split())
ignore_list = set([
'absolute_import', 'clapack', 'division', 'find_best_lapack_type',
'flapack', 'print_function',
])
missing = list()
for name in dir(lapack):
if (not name.startswith('_') and name not in ignore_list and
name not in names):
missing.append(name)
assert missing == [], 'Name(s) missing from lapack.__doc__ or ignore_list'
class TestFlapackSimple(object):
def test_gebal(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a1 = [[1, 0, 0, 3e-4],
[4, 0, 0, 2e-3],
[7, 1, 0, 0],
[0, 1, 0, 0]]
for p in 'sdzc':
f = getattr(flapack, p+'gebal', None)
if f is None:
continue
ba, lo, hi, pivscale, info = f(a)
assert_(not info, repr(info))
assert_array_almost_equal(ba, a)
assert_equal((lo, hi), (0, len(a[0])-1))
assert_array_almost_equal(pivscale, np.ones(len(a)))
ba, lo, hi, pivscale, info = f(a1, permute=1, scale=1)
assert_(not info, repr(info))
# print(a1)
# print(ba, lo, hi, pivscale)
def test_gehrd(self):
a = [[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]]
for p in 'd':
f = getattr(flapack, p+'gehrd', None)
if f is None:
continue
ht, tau, info = f(a)
assert_(not info, repr(info))
def test_trsyl(self):
a = np.array([[1, 2], [0, 4]])
b = np.array([[5, 6], [0, 8]])
c = np.array([[9, 10], [11, 12]])
trans = 'T'
# Test single and double implementations, including most
# of the options
for dtype in 'fdFD':
a1, b1, c1 = a.astype(dtype), b.astype(dtype), c.astype(dtype)
trsyl, = get_lapack_funcs(('trsyl',), (a1,))
if dtype.isupper(): # is complex dtype
a1[0] += 1j
trans = 'C'
x, scale, info = trsyl(a1, b1, c1)
assert_array_almost_equal(np.dot(a1, x) + np.dot(x, b1),
scale * c1)
x, scale, info = trsyl(a1, b1, c1, trana=trans, tranb=trans)
assert_array_almost_equal(
np.dot(a1.conjugate().T, x) + np.dot(x, b1.conjugate().T),
scale * c1, decimal=4)
x, scale, info = trsyl(a1, b1, c1, isgn=-1)
assert_array_almost_equal(np.dot(a1, x) - np.dot(x, b1),
scale * c1, decimal=4)
def test_lange(self):
a = np.array([
[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]])
for dtype in 'fdFD':
for norm_str in 'Mm1OoIiFfEe':
a1 = a.astype(dtype)
if dtype.isupper():
# is complex dtype
a1[0, 0] += 1j
lange, = get_lapack_funcs(('lange',), (a1,))
value = lange(norm_str, a1)
if norm_str in 'FfEe':
if dtype in 'Ff':
decimal = 3
else:
decimal = 7
ref = np.sqrt(np.sum(np.square(np.abs(a1))))
assert_almost_equal(value, ref, decimal)
else:
if norm_str in 'Mm':
ref = np.max(np.abs(a1))
elif norm_str in '1Oo':
ref = np.max(np.sum(np.abs(a1), axis=0))
elif norm_str in 'Ii':
ref = np.max(np.sum(np.abs(a1), axis=1))
assert_equal(value, ref)
class TestLapack(object):
def test_flapack(self):
if hasattr(flapack, 'empty_module'):
# flapack module is empty
pass
def test_clapack(self):
if hasattr(clapack, 'empty_module'):
# clapack module is empty
pass
class TestLeastSquaresSolvers(object):
def test_gels(self):
seed(1234)
# Test fat/tall matrix argument handling - gh-issue #8329
for ind, dtype in enumerate(DTYPES):
m = 10
n = 20
nrhs = 1
a1 = rand(m, n).astype(dtype)
b1 = rand(n).astype(dtype)
gls, glslw = get_lapack_funcs(('gels', 'gels_lwork'), dtype=dtype)
# Request of sizes
lwork = _compute_lwork(glslw, m, n, nrhs)
_, _, info = gls(a1, b1, lwork=lwork)
assert_(info >= 0)
_, _, info = gls(a1, b1, trans='TTCC'[ind], lwork=lwork)
assert_(info >= 0)
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
lqr_truth, _, _, _ = geqrf(a1)
assert_array_equal(lqr, lqr_truth)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
lqr_truth, _, _, _ = geqrf(a1)
assert_array_equal(lqr, lqr_truth)
def test_gelsd(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, iwork, info = gelsd_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, iwork_size,
-1, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, rwork, iwork, info = gelsd_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
rwork_size = int(rwork)
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, rwork_size, iwork_size,
-1, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
assert_allclose(s,
np.array([13.035514762572043, 4.337666985231382],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
def test_gelss(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelss_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelss_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([13.035514762572043,
4.337666985231382], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
def test_gelsy(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
@pytest.mark.parametrize('dtype', DTYPES)
@pytest.mark.parametrize('shape', [(3, 4), (5, 2), (2**18, 2**18)])
def test_geqrf_lwork(dtype, shape):
geqrf_lwork = get_lapack_funcs(('geqrf_lwork'), dtype=dtype)
m, n = shape
lwork, info = geqrf_lwork(m=m, n=n)
assert_equal(info, 0)
class TestRegression(object):
def test_ticket_1645(self):
# Check that RQ routines have correct lwork
for dtype in DTYPES:
a = np.zeros((300, 2), dtype=dtype)
gerqf, = get_lapack_funcs(['gerqf'], [a])
assert_raises(Exception, gerqf, a, lwork=2)
rq, tau, work, info = gerqf(a)
if dtype in REAL_DTYPES:
orgrq, = get_lapack_funcs(['orgrq'], [a])
assert_raises(Exception, orgrq, rq[-2:], tau, lwork=1)
orgrq(rq[-2:], tau, lwork=2)
elif dtype in COMPLEX_DTYPES:
ungrq, = get_lapack_funcs(['ungrq'], [a])
assert_raises(Exception, ungrq, rq[-2:], tau, lwork=1)
ungrq(rq[-2:], tau, lwork=2)
class TestDpotr(object):
def test_gh_2691(self):
# 'lower' argument of dportf/dpotri
for lower in [True, False]:
for clean in [True, False]:
np.random.seed(42)
x = np.random.normal(size=(3, 3))
a = x.dot(x.T)
dpotrf, dpotri = get_lapack_funcs(("potrf", "potri"), (a, ))
c, info = dpotrf(a, lower, clean=clean)
dpt = dpotri(c, lower)[0]
if lower:
assert_allclose(np.tril(dpt), np.tril(inv(a)))
else:
assert_allclose(np.triu(dpt), np.triu(inv(a)))
class TestDlasd4(object):
def test_sing_val_update(self):
sigmas = np.array([4., 3., 2., 0])
m_vec = np.array([3.12, 5.7, -4.8, -2.2])
M = np.hstack((np.vstack((np.diag(sigmas[0:-1]),
np.zeros((1, len(m_vec) - 1)))),
m_vec[:, np.newaxis]))
SM = svd(M, full_matrices=False, compute_uv=False, overwrite_a=False,
check_finite=False)
it_len = len(sigmas)
sgm = np.concatenate((sigmas[::-1], [sigmas[0] + it_len*norm(m_vec)]))
mvc = np.concatenate((m_vec[::-1], (0,)))
lasd4 = get_lapack_funcs('lasd4', (sigmas,))
roots = []
for i in range(0, it_len):
res = lasd4(i, sgm, mvc)
roots.append(res[1])
assert_((res[3] <= 0), "LAPACK root finding dlasd4 failed to find \
the singular value %i" % i)
roots = np.array(roots)[::-1]
assert_((not np.any(np.isnan(roots)), "There are NaN roots"))
assert_allclose(SM, roots, atol=100*np.finfo(np.float64).eps,
rtol=100*np.finfo(np.float64).eps)
class TestTbtrs(object):
@pytest.mark.parametrize('dtype', DTYPES)
def test_nag_example_f07vef_f07vsf(self, dtype):
"""Test real (f07vef) and complex (f07vsf) examples from NAG
Examples available from:
* https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vef.html
* https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vsf.html
"""
if dtype in REAL_DTYPES:
ab = np.array([[-4.16, 4.78, 6.32, 0.16],
[-2.25, 5.86, -4.82, 0]],
dtype=dtype)
b = np.array([[-16.64, -4.16],
[-13.78, -16.59],
[13.10, -4.94],
[-14.14, -9.96]],
dtype=dtype)
x_out = np.array([[4, 1],
[-1, -3],
[3, 2],
[2, -2]],
dtype=dtype)
elif dtype in COMPLEX_DTYPES:
ab = np.array([[-1.94+4.43j, 4.12-4.27j, 0.43-2.66j, 0.44+0.1j],
[-3.39+3.44j, -1.84+5.52j, 1.74 - 0.04j, 0],
[1.62+3.68j, -2.77-1.93j, 0, 0]],
dtype=dtype)
b = np.array([[-8.86 - 3.88j, -24.09 - 5.27j],
[-15.57 - 23.41j, -57.97 + 8.14j],
[-7.63 + 22.78j, 19.09 - 29.51j],
[-14.74 - 2.40j, 19.17 + 21.33j]],
dtype=dtype)
x_out = np.array([[2j, 1 + 5j],
[1 - 3j, -7 - 2j],
[-4.001887 - 4.988417j, 3.026830 + 4.003182j],
[1.996158 - 1.045105j, -6.103357 - 8.986653j]],
dtype=dtype)
else:
raise ValueError(f"Datatype {dtype} not understood.")
tbtrs = get_lapack_funcs(('tbtrs'), dtype=dtype)
x, info = tbtrs(ab=ab, b=b, uplo='L')
assert_equal(info, 0)
assert_allclose(x, x_out, rtol=0, atol=1e-5)
@pytest.mark.parametrize('dtype,trans',
[(dtype, trans)
for dtype in DTYPES for trans in ['N', 'T', 'C']
if not (trans == 'C' and dtype in REAL_DTYPES)])
@pytest.mark.parametrize('uplo', ['U', 'L'])
@pytest.mark.parametrize('diag', ['N', 'U'])
def test_random_matrices(self, dtype, trans, uplo, diag):
seed(1724)
# n, nrhs, kd are used to specify A and b.
# A is of shape n x n with kd super/sub-diagonals
# b is of shape n x nrhs matrix
n, nrhs, kd = 4, 3, 2
tbtrs = get_lapack_funcs('tbtrs', dtype=dtype)
is_upper = (uplo == 'U')
ku = kd * is_upper
kl = kd - ku
# Construct the diagonal and kd super/sub diagonals of A with
# the corresponding offsets.
band_offsets = range(ku, -kl - 1, -1)
band_widths = [n - abs(x) for x in band_offsets]
bands = [generate_random_dtype_array((width,), dtype)
for width in band_widths]
if diag == 'U': # A must be unit triangular
bands[ku] = np.ones(n, dtype=dtype)
# Construct the diagonal banded matrix A from the bands and offsets.
a = sps.diags(bands, band_offsets, format='dia')
# Convert A into banded storage form
ab = np.zeros((kd + 1, n), dtype)
for row, k in enumerate(band_offsets):
ab[row, max(k, 0):min(n+k, n)] = a.diagonal(k)
# The RHS values.
b = generate_random_dtype_array((n, nrhs), dtype)
x, info = tbtrs(ab=ab, b=b, uplo=uplo, trans=trans, diag=diag)
assert_equal(info, 0)
if trans == 'N':
assert_allclose(a @ x, b, rtol=5e-5)
elif trans == 'T':
assert_allclose(a.T @ x, b, rtol=5e-5)
elif trans == 'C':
assert_allclose(a.H @ x, b, rtol=5e-5)
else:
raise ValueError('Invalid trans argument')
@pytest.mark.parametrize('uplo,trans,diag',
[['U', 'N', 'Invalid'],
['U', 'Invalid', 'N'],
['Invalid', 'N', 'N']])
def test_invalid_argument_raises_exception(self, uplo, trans, diag):
"""Test if invalid values of uplo, trans and diag raise exceptions"""
# Argument checks occur independently of used datatype.
# This mean we must not parameterize all available datatypes.
tbtrs = get_lapack_funcs('tbtrs', dtype=np.float)
ab = rand(4, 2)
b = rand(2, 4)
assert_raises(Exception, tbtrs, ab, b, uplo, trans, diag)
def test_zero_element_in_diagonal(self):
"""Test if a matrix with a zero diagonal element is singular
If the i-th diagonal of A is zero, ?tbtrs should return `i` in `info`
indicating the provided matrix is singular.
Note that ?tbtrs requires the matrix A to be stored in banded form.
In this form the diagonal corresponds to the last row."""
ab = np.ones((3, 4), dtype=float)
b = np.ones(4, dtype=float)
tbtrs = get_lapack_funcs('tbtrs', dtype=float)
ab[-1, 3] = 0
_, info = tbtrs(ab=ab, b=b, uplo='U')
assert_equal(info, 4)
@pytest.mark.parametrize('ldab,n,ldb,nrhs', [
(5, 5, 0, 5),
(5, 5, 3, 5)
])
def test_invalid_matrix_shapes(self, ldab, n, ldb, nrhs):
"""Test ?tbtrs fails correctly if shapes are invalid."""
ab = np.ones((ldab, n), dtype=float)
b = np.ones((ldb, nrhs), dtype=float)
tbtrs = get_lapack_funcs('tbtrs', dtype=float)
assert_raises(Exception, tbtrs, ab, b)
def test_lartg():
for dtype in 'fdFD':
lartg = get_lapack_funcs('lartg', dtype=dtype)
f = np.array(3, dtype)
g = np.array(4, dtype)
if np.iscomplexobj(g):
g *= 1j
cs, sn, r = lartg(f, g)
assert_allclose(cs, 3.0/5.0)
assert_allclose(r, 5.0)
if np.iscomplexobj(g):
assert_allclose(sn, -4.0j/5.0)
assert_(type(r) == complex)
assert_(type(cs) == float)
else:
assert_allclose(sn, 4.0/5.0)
def test_rot():
# srot, drot from blas and crot and zrot from lapack.
for dtype in 'fdFD':
c = 0.6
s = 0.8
u = np.full(4, 3, dtype)
v = np.full(4, 4, dtype)
atol = 10**-(np.finfo(dtype).precision-1)
if dtype in 'fd':
rot = get_blas_funcs('rot', dtype=dtype)
f = 4
else:
rot = get_lapack_funcs('rot', dtype=dtype)
s *= -1j
v *= 1j
f = 4j
assert_allclose(rot(u, v, c, s), [[5, 5, 5, 5],
[0, 0, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, n=2), [[5, 5, 3, 3],
[0, 0, f, f]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, offy=2),
[[3, 3, 5, 5], [f, f, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, incx=2, offy=2, n=2),
[[5, 3, 5, 3], [f, f, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, incy=2, n=2),
[[3, 3, 5, 5], [0, f, 0, f]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, incx=2, offy=2, incy=2, n=1),
[[3, 3, 5, 3], [f, f, 0, f]], atol=atol)
assert_allclose(rot(u, v, c, s, incx=-2, incy=-2, n=2),
[[5, 3, 5, 3], [0, f, 0, f]], atol=atol)
a, b = rot(u, v, c, s, overwrite_x=1, overwrite_y=1)
assert_(a is u)
assert_(b is v)
assert_allclose(a, [5, 5, 5, 5], atol=atol)
assert_allclose(b, [0, 0, 0, 0], atol=atol)
def test_larfg_larf():
np.random.seed(1234)
a0 = np.random.random((4, 4))
a0 = a0.T.dot(a0)
a0j = np.random.random((4, 4)) + 1j*np.random.random((4, 4))
a0j = a0j.T.conj().dot(a0j)
# our test here will be to do one step of reducing a hermetian matrix to
# tridiagonal form using householder transforms.
for dtype in 'fdFD':
larfg, larf = get_lapack_funcs(['larfg', 'larf'], dtype=dtype)
if dtype in 'FD':
a = a0j.copy()
else:
a = a0.copy()
# generate a householder transform to clear a[2:,0]
alpha, x, tau = larfg(a.shape[0]-1, a[1, 0], a[2:, 0])
# create expected output
expected = np.zeros_like(a[:, 0])
expected[0] = a[0, 0]
expected[1] = alpha
# assemble householder vector
v = np.zeros_like(a[1:, 0])
v[0] = 1.0
v[1:] = x
# apply transform from the left
a[1:, :] = larf(v, tau.conjugate(), a[1:, :], np.zeros(a.shape[1]))
# apply transform from the right
a[:, 1:] = larf(v, tau, a[:, 1:], np.zeros(a.shape[0]), side='R')
assert_allclose(a[:, 0], expected, atol=1e-5)
assert_allclose(a[0, :], expected, atol=1e-5)
@pytest.mark.xslow
def test_sgesdd_lwork_bug_workaround():
# Test that SGESDD lwork is sufficiently large for LAPACK.
#
# This checks that workaround around an apparent LAPACK bug
# actually works. cf. gh-5401
#
# xslow: requires 1GB+ of memory
p = subprocess.Popen([sys.executable, '-c',
'import numpy as np; '
'from scipy.linalg import svd; '
'a = np.zeros([9537, 9537], dtype=np.float32); '
'svd(a)'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# Check if it an error occurred within 5 sec; the computation can
# take substantially longer, and we will not wait for it to finish
for j in range(50):
time.sleep(0.1)
if p.poll() is not None:
returncode = p.returncode
break
else:
# Didn't exit in time -- probably entered computation. The
# error is raised before entering computation, so things are
# probably OK.
returncode = 0
p.terminate()
assert_equal(returncode, 0,
"Code apparently failed: " + p.stdout.read().decode())
class TestSytrd(object):
@pytest.mark.parametrize('dtype', REAL_DTYPES)
def test_sytrd_with_zero_dim_array(self, dtype):
# Assert that a 0x0 matrix raises an error
A = np.zeros((0, 0), dtype=dtype)
sytrd = get_lapack_funcs('sytrd', (A,))
assert_raises(ValueError, sytrd, A)
@pytest.mark.parametrize('dtype', REAL_DTYPES)
@pytest.mark.parametrize('n', (1, 3))
def test_sytrd(self, dtype, n):
A = np.zeros((n, n), dtype=dtype)
sytrd, sytrd_lwork = \
get_lapack_funcs(('sytrd', 'sytrd_lwork'), (A,))
# some upper triangular array
A[np.triu_indices_from(A)] = \
np.arange(1, n*(n+1)//2+1, dtype=dtype)
# query lwork
lwork, info = sytrd_lwork(n)
assert_equal(info, 0)
# check lower=1 behavior (shouldn't do much since the matrix is
# upper triangular)
data, d, e, tau, info = sytrd(A, lower=1, lwork=lwork)
assert_equal(info, 0)
assert_allclose(data, A, atol=5*np.finfo(dtype).eps, rtol=1.0)
assert_allclose(d, np.diag(A))
assert_allclose(e, 0.0)
assert_allclose(tau, 0.0)
# and now for the proper test (lower=0 is the default)
data, d, e, tau, info = sytrd(A, lwork=lwork)
assert_equal(info, 0)
# assert Q^T*A*Q = tridiag(e, d, e)
# build tridiagonal matrix
T = np.zeros_like(A, dtype=dtype)
k = np.arange(A.shape[0])
T[k, k] = d
k2 = np.arange(A.shape[0]-1)
T[k2+1, k2] = e
T[k2, k2+1] = e
# build Q
Q = np.eye(n, n, dtype=dtype)
for i in range(n-1):
v = np.zeros(n, dtype=dtype)
v[:i] = data[:i, i+1]
v[i] = 1.0
H = np.eye(n, n, dtype=dtype) - tau[i] * np.outer(v, v)
Q = np.dot(H, Q)
# Make matrix fully symmetric
i_lower = np.tril_indices(n, -1)
A[i_lower] = A.T[i_lower]
QTAQ = np.dot(Q.T, np.dot(A, Q))
# disable rtol here since some values in QTAQ and T are very close
# to 0.
assert_allclose(QTAQ, T, atol=5*np.finfo(dtype).eps, rtol=1.0)
class TestHetrd(object):
@pytest.mark.parametrize('complex_dtype', COMPLEX_DTYPES)
def test_hetrd_with_zero_dim_array(self, complex_dtype):
# Assert that a 0x0 matrix raises an error
A = np.zeros((0, 0), dtype=complex_dtype)
hetrd = get_lapack_funcs('hetrd', (A,))
assert_raises(ValueError, hetrd, A)
@pytest.mark.parametrize('real_dtype,complex_dtype',
zip(REAL_DTYPES, COMPLEX_DTYPES))
@pytest.mark.parametrize('n', (1, 3))
def test_hetrd(self, n, real_dtype, complex_dtype):
A = np.zeros((n, n), dtype=complex_dtype)
hetrd, hetrd_lwork = \
get_lapack_funcs(('hetrd', 'hetrd_lwork'), (A,))
# some upper triangular array
A[np.triu_indices_from(A)] = (
np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
+ 1j * np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
)
np.fill_diagonal(A, np.real(np.diag(A)))
# test query lwork
for x in [0, 1]:
_, info = hetrd_lwork(n, lower=x)
assert_equal(info, 0)
# lwork returns complex which segfaults hetrd call (gh-10388)
# use the safe and recommended option
lwork = _compute_lwork(hetrd_lwork, n)
# check lower=1 behavior (shouldn't do much since the matrix is
# upper triangular)
data, d, e, tau, info = hetrd(A, lower=1, lwork=lwork)
assert_equal(info, 0)
assert_allclose(data, A, atol=5*np.finfo(real_dtype).eps, rtol=1.0)
assert_allclose(d, np.real(np.diag(A)))
assert_allclose(e, 0.0)
assert_allclose(tau, 0.0)
# and now for the proper test (lower=0 is the default)
data, d, e, tau, info = hetrd(A, lwork=lwork)
assert_equal(info, 0)
# assert Q^T*A*Q = tridiag(e, d, e)
# build tridiagonal matrix
T = np.zeros_like(A, dtype=real_dtype)
k = np.arange(A.shape[0], dtype=int)
T[k, k] = d
k2 = np.arange(A.shape[0]-1, dtype=int)
T[k2+1, k2] = e
T[k2, k2+1] = e
# build Q
Q = np.eye(n, n, dtype=complex_dtype)
for i in range(n-1):
v = np.zeros(n, dtype=complex_dtype)
v[:i] = data[:i, i+1]
v[i] = 1.0
H = np.eye(n, n, dtype=complex_dtype) \
- tau[i] * np.outer(v, np.conj(v))
Q = np.dot(H, Q)
# Make matrix fully Hermitian
i_lower = np.tril_indices(n, -1)
A[i_lower] = np.conj(A.T[i_lower])
QHAQ = np.dot(np.conj(Q.T), np.dot(A, Q))
# disable rtol here since some values in QTAQ and T are very close
# to 0.
assert_allclose(
QHAQ, T, atol=10*np.finfo(real_dtype).eps, rtol=1.0
)
def test_gglse():
# Example data taken from NAG manual
for ind, dtype in enumerate(DTYPES):
# DTYPES = <s,d,c,z> gglse
func, func_lwork = get_lapack_funcs(('gglse', 'gglse_lwork'),
dtype=dtype)
lwork = _compute_lwork(func_lwork, m=6, n=4, p=2)
# For <s,d>gglse
if ind < 2:
a = np.array([[-0.57, -1.28, -0.39, 0.25],
[-1.93, 1.08, -0.31, -2.14],
[2.30, 0.24, 0.40, -0.35],
[-1.93, 0.64, -0.66, 0.08],
[0.15, 0.30, 0.15, -2.13],
[-0.02, 1.03, -1.43, 0.50]], dtype=dtype)
c = np.array([-1.50, -2.14, 1.23, -0.54, -1.68, 0.82], dtype=dtype)
d = np.array([0., 0.], dtype=dtype)
# For <s,d>gglse
else:
a = np.array([[0.96-0.81j, -0.03+0.96j, -0.91+2.06j, -0.05+0.41j],
[-0.98+1.98j, -1.20+0.19j, -0.66+0.42j, -0.81+0.56j],
[0.62-0.46j, 1.01+0.02j, 0.63-0.17j, -1.11+0.60j],
[0.37+0.38j, 0.19-0.54j, -0.98-0.36j, 0.22-0.20j],
[0.83+0.51j, 0.20+0.01j, -0.17-0.46j, 1.47+1.59j],
[1.08-0.28j, 0.20-0.12j, -0.07+1.23j, 0.26+0.26j]])
c = np.array([[-2.54+0.09j],
[1.65-2.26j],
[-2.11-3.96j],
[1.82+3.30j],
[-6.41+3.77j],
[2.07+0.66j]])
d = np.zeros(2, dtype=dtype)
b = np.array([[1., 0., -1., 0.], [0., 1., 0., -1.]], dtype=dtype)
_, _, _, result, _ = func(a, b, c, d, lwork=lwork)
if ind < 2:
expected = np.array([0.48904455,
0.99754786,
0.48904455,
0.99754786])
else:
expected = np.array([1.08742917-1.96205783j,
-0.74093902+3.72973919j,
1.08742917-1.96205759j,
-0.74093896+3.72973895j])
assert_array_almost_equal(result, expected, decimal=4)
def test_sycon_hecon():
seed(1234)
for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES):
# DTYPES + COMPLEX DTYPES = <s,d,c,z> sycon + <c,z>hecon
n = 10
# For <s,d,c,z>sycon
if ind < 4:
func_lwork = get_lapack_funcs('sytrf_lwork', dtype=dtype)
funcon, functrf = get_lapack_funcs(('sycon', 'sytrf'), dtype=dtype)
A = (rand(n, n)).astype(dtype)
# For <c,z>hecon
else:
func_lwork = get_lapack_funcs('hetrf_lwork', dtype=dtype)
funcon, functrf = get_lapack_funcs(('hecon', 'hetrf'), dtype=dtype)
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
# Since sycon only refers to upper/lower part, conj() is safe here.
A = (A + A.conj().T)/2 + 2*np.eye(n, dtype=dtype)
anorm = norm(A, 1)
lwork = _compute_lwork(func_lwork, n)
ldu, ipiv, _ = functrf(A, lwork=lwork, lower=1)
rcond, _ = funcon(a=ldu, ipiv=ipiv, anorm=anorm, lower=1)
# The error is at most 1-fold
assert_(abs(1/rcond - np.linalg.cond(A, p=1))*rcond < 1)
def test_sygst():
seed(1234)
for ind, dtype in enumerate(REAL_DTYPES):
# DTYPES = <s,d> sygst
n = 10
potrf, sygst, syevd, sygvd = get_lapack_funcs(('potrf', 'sygst',
'syevd', 'sygvd'),
dtype=dtype)
A = rand(n, n).astype(dtype)
A = (A + A.T)/2
# B must be positive definite
B = rand(n, n).astype(dtype)
B = (B + B.T)/2 + 2 * np.eye(n, dtype=dtype)
# Perform eig (sygvd)
eig_gvd, _, info = sygvd(A, B)
assert_(info == 0)
# Convert to std problem potrf
b, info = potrf(B)
assert_(info == 0)
a, info = sygst(A, b)
assert_(info == 0)
eig, _, info = syevd(a)
assert_(info == 0)
assert_allclose(eig, eig_gvd, rtol=1e-4)
def test_hegst():
seed(1234)
for ind, dtype in enumerate(COMPLEX_DTYPES):
# DTYPES = <c,z> hegst
n = 10
potrf, hegst, heevd, hegvd = get_lapack_funcs(('potrf', 'hegst',
'heevd', 'hegvd'),
dtype=dtype)
A = rand(n, n).astype(dtype) + 1j * rand(n, n).astype(dtype)
A = (A + A.conj().T)/2
# B must be positive definite
B = rand(n, n).astype(dtype) + 1j * rand(n, n).astype(dtype)
B = (B + B.conj().T)/2 + 2 * np.eye(n, dtype=dtype)
# Perform eig (hegvd)
eig_gvd, _, info = hegvd(A, B)
assert_(info == 0)
# Convert to std problem potrf
b, info = potrf(B)
assert_(info == 0)
a, info = hegst(A, b)
assert_(info == 0)
eig, _, info = heevd(a)
assert_(info == 0)
assert_allclose(eig, eig_gvd, rtol=1e-4)
def test_tzrzf():
"""
This test performs an RZ decomposition in which an m x n upper trapezoidal
array M (m <= n) is factorized as M = [R 0] * Z where R is upper triangular
and Z is unitary.
"""
seed(1234)
m, n = 10, 15
for ind, dtype in enumerate(DTYPES):
tzrzf, tzrzf_lw = get_lapack_funcs(('tzrzf', 'tzrzf_lwork'),
dtype=dtype)
lwork = _compute_lwork(tzrzf_lw, m, n)
if ind < 2:
A = triu(rand(m, n).astype(dtype))
else:
A = triu((rand(m, n) + rand(m, n)*1j).astype(dtype))
# assert wrong shape arg, f2py returns generic error
assert_raises(Exception, tzrzf, A.T)
rz, tau, info = tzrzf(A, lwork=lwork)
# Check success
assert_(info == 0)
# Get Z manually for comparison
R = np.hstack((rz[:, :m], np.zeros((m, n-m), dtype=dtype)))
V = np.hstack((np.eye(m, dtype=dtype), rz[:, m:]))
Id = np.eye(n, dtype=dtype)
ref = [Id-tau[x]*V[[x], :].T.dot(V[[x], :].conj()) for x in range(m)]
Z = reduce(np.dot, ref)
assert_allclose(R.dot(Z) - A, zeros_like(A, dtype=dtype),
atol=10*np.spacing(dtype(1.0).real), rtol=0.)
def test_tfsm():
"""
Test for solving a linear system with the coefficient matrix is a
triangular array stored in Full Packed (RFP) format.
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = triu(rand(n, n) + rand(n, n)*1j + eye(n)).astype(dtype)
trans = 'C'
else:
A = triu(rand(n, n) + eye(n)).astype(dtype)
trans = 'T'
trttf, tfttr, tfsm = get_lapack_funcs(('trttf', 'tfttr', 'tfsm'),
dtype=dtype)
Afp, _ = trttf(A)
B = rand(n, 2).astype(dtype)
soln = tfsm(-1, Afp, B)
assert_array_almost_equal(soln, solve(-A, B),
decimal=4 if ind % 2 == 0 else 6)
soln = tfsm(-1, Afp, B, trans=trans)
assert_array_almost_equal(soln, solve(-A.conj().T, B),
decimal=4 if ind % 2 == 0 else 6)
# Make A, unit diagonal
A[np.arange(n), np.arange(n)] = dtype(1.)
soln = tfsm(-1, Afp, B, trans=trans, diag='U')
assert_array_almost_equal(soln, solve(-A.conj().T, B),
decimal=4 if ind % 2 == 0 else 6)
# Change side
B2 = rand(3, n).astype(dtype)
soln = tfsm(-1, Afp, B2, trans=trans, diag='U', side='R')
assert_array_almost_equal(soln, solve(-A, B2.T).conj().T,
decimal=4 if ind % 2 == 0 else 6)
def test_ormrz_unmrz():
"""
This test performs a matrix multiplication with an arbitrary m x n matric C
and a unitary matrix Q without explicitly forming the array. The array data
is encoded in the rectangular part of A which is obtained from ?TZRZF. Q
size is inferred by m, n, side keywords.
"""
seed(1234)
qm, qn, cn = 10, 15, 15
for ind, dtype in enumerate(DTYPES):
tzrzf, tzrzf_lw = get_lapack_funcs(('tzrzf', 'tzrzf_lwork'),
dtype=dtype)
lwork_rz = _compute_lwork(tzrzf_lw, qm, qn)
if ind < 2:
A = triu(rand(qm, qn).astype(dtype))
C = rand(cn, cn).astype(dtype)
orun_mrz, orun_mrz_lw = get_lapack_funcs(('ormrz', 'ormrz_lwork'),
dtype=dtype)
else:
A = triu((rand(qm, qn) + rand(qm, qn)*1j).astype(dtype))
C = (rand(cn, cn) + rand(cn, cn)*1j).astype(dtype)
orun_mrz, orun_mrz_lw = get_lapack_funcs(('unmrz', 'unmrz_lwork'),
dtype=dtype)
lwork_mrz = _compute_lwork(orun_mrz_lw, cn, cn)
rz, tau, info = tzrzf(A, lwork=lwork_rz)
# Get Q manually for comparison
V = np.hstack((np.eye(qm, dtype=dtype), rz[:, qm:]))
Id = np.eye(qn, dtype=dtype)
ref = [Id-tau[x]*V[[x], :].T.dot(V[[x], :].conj()) for x in range(qm)]
Q = reduce(np.dot, ref)
# Now that we have Q, we can test whether lapack results agree with
# each case of CQ, CQ^H, QC, and QC^H
trans = 'T' if ind < 2 else 'C'
tol = 10*np.spacing(dtype(1.0).real)
cq, info = orun_mrz(rz, tau, C, lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - Q.dot(C), zeros_like(C), atol=tol, rtol=0.)
cq, info = orun_mrz(rz, tau, C, trans=trans, lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - Q.conj().T.dot(C), zeros_like(C), atol=tol,
rtol=0.)
cq, info = orun_mrz(rz, tau, C, side='R', lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - C.dot(Q), zeros_like(C), atol=tol, rtol=0.)
cq, info = orun_mrz(rz, tau, C, side='R', trans=trans, lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - C.dot(Q.conj().T), zeros_like(C), atol=tol,
rtol=0.)
def test_tfttr_trttf():
"""
Test conversion routines between the Rectengular Full Packed (RFP) format
and Standard Triangular Array (TR)
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A_full = (rand(n, n) + rand(n, n)*1j).astype(dtype)
transr = 'C'
else:
A_full = (rand(n, n)).astype(dtype)
transr = 'T'
trttf, tfttr = get_lapack_funcs(('trttf', 'tfttr'), dtype=dtype)
A_tf_U, info = trttf(A_full)
assert_(info == 0)
A_tf_L, info = trttf(A_full, uplo='L')
assert_(info == 0)
A_tf_U_T, info = trttf(A_full, transr=transr, uplo='U')
assert_(info == 0)
A_tf_L_T, info = trttf(A_full, transr=transr, uplo='L')
assert_(info == 0)
# Create the RFP array manually (n is even!)
A_tf_U_m = zeros((n+1, n//2), dtype=dtype)
A_tf_U_m[:-1, :] = triu(A_full)[:, n//2:]
A_tf_U_m[n//2+1:, :] += triu(A_full)[:n//2, :n//2].conj().T
A_tf_L_m = zeros((n+1, n//2), dtype=dtype)
A_tf_L_m[1:, :] = tril(A_full)[:, :n//2]
A_tf_L_m[:n//2, :] += tril(A_full)[n//2:, n//2:].conj().T
assert_array_almost_equal(A_tf_U, A_tf_U_m.reshape(-1, order='F'))
assert_array_almost_equal(A_tf_U_T,
A_tf_U_m.conj().T.reshape(-1, order='F'))
assert_array_almost_equal(A_tf_L, A_tf_L_m.reshape(-1, order='F'))
assert_array_almost_equal(A_tf_L_T,
A_tf_L_m.conj().T.reshape(-1, order='F'))
# Get the original array from RFP
A_tr_U, info = tfttr(n, A_tf_U)
assert_(info == 0)
A_tr_L, info = tfttr(n, A_tf_L, uplo='L')
assert_(info == 0)
A_tr_U_T, info = tfttr(n, A_tf_U_T, transr=transr, uplo='U')
assert_(info == 0)
A_tr_L_T, info = tfttr(n, A_tf_L_T, transr=transr, uplo='L')
assert_(info == 0)
assert_array_almost_equal(A_tr_U, triu(A_full))
assert_array_almost_equal(A_tr_U_T, triu(A_full))
assert_array_almost_equal(A_tr_L, tril(A_full))
assert_array_almost_equal(A_tr_L_T, tril(A_full))
def test_tpttr_trttp():
"""
Test conversion routines between the Rectengular Full Packed (RFP) format
and Standard Triangular Array (TR)
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A_full = (rand(n, n) + rand(n, n)*1j).astype(dtype)
else:
A_full = (rand(n, n)).astype(dtype)
trttp, tpttr = get_lapack_funcs(('trttp', 'tpttr'), dtype=dtype)
A_tp_U, info = trttp(A_full)
assert_(info == 0)
A_tp_L, info = trttp(A_full, uplo='L')
assert_(info == 0)
# Create the TP array manually
inds = tril_indices(n)
A_tp_U_m = zeros(n*(n+1)//2, dtype=dtype)
A_tp_U_m[:] = (triu(A_full).T)[inds]
inds = triu_indices(n)
A_tp_L_m = zeros(n*(n+1)//2, dtype=dtype)
A_tp_L_m[:] = (tril(A_full).T)[inds]
assert_array_almost_equal(A_tp_U, A_tp_U_m)
assert_array_almost_equal(A_tp_L, A_tp_L_m)
# Get the original array from TP
A_tr_U, info = tpttr(n, A_tp_U)
assert_(info == 0)
A_tr_L, info = tpttr(n, A_tp_L, uplo='L')
assert_(info == 0)
assert_array_almost_equal(A_tr_U, triu(A_full))
assert_array_almost_equal(A_tr_L, tril(A_full))
def test_pftrf():
"""
Test Cholesky factorization of a positive definite Rectengular Full
Packed (RFP) format array
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
A = A + A.conj().T + n*eye(n)
else:
A = (rand(n, n)).astype(dtype)
A = A + A.T + n*eye(n)
pftrf, trttf, tfttr = get_lapack_funcs(('pftrf', 'trttf', 'tfttr'),
dtype=dtype)
# Get the original array from TP
Afp, info = trttf(A)
Achol_rfp, info = pftrf(n, Afp)
assert_(info == 0)
A_chol_r, _ = tfttr(n, Achol_rfp)
Achol = cholesky(A)
assert_array_almost_equal(A_chol_r, Achol)
def test_pftri():
"""
Test Cholesky factorization of a positive definite Rectengular Full
Packed (RFP) format array to find its inverse
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
A = A + A.conj().T + n*eye(n)
else:
A = (rand(n, n)).astype(dtype)
A = A + A.T + n*eye(n)
pftri, pftrf, trttf, tfttr = get_lapack_funcs(('pftri',
'pftrf',
'trttf',
'tfttr'),
dtype=dtype)
# Get the original array from TP
Afp, info = trttf(A)
A_chol_rfp, info = pftrf(n, Afp)
A_inv_rfp, info = pftri(n, A_chol_rfp)
assert_(info == 0)
A_inv_r, _ = tfttr(n, A_inv_rfp)
Ainv = inv(A)
assert_array_almost_equal(A_inv_r, triu(Ainv),
decimal=4 if ind % 2 == 0 else 6)
def test_pftrs():
"""
Test Cholesky factorization of a positive definite Rectengular Full
Packed (RFP) format array and solve a linear system
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
A = A + A.conj().T + n*eye(n)
else:
A = (rand(n, n)).astype(dtype)
A = A + A.T + n*eye(n)
B = ones((n, 3), dtype=dtype)
Bf1 = ones((n+2, 3), dtype=dtype)
Bf2 = ones((n-2, 3), dtype=dtype)
pftrs, pftrf, trttf, tfttr = get_lapack_funcs(('pftrs',
'pftrf',
'trttf',
'tfttr'),
dtype=dtype)
# Get the original array from TP
Afp, info = trttf(A)
A_chol_rfp, info = pftrf(n, Afp)
# larger B arrays shouldn't segfault
soln, info = pftrs(n, A_chol_rfp, Bf1)
assert_(info == 0)
assert_raises(Exception, pftrs, n, A_chol_rfp, Bf2)
soln, info = pftrs(n, A_chol_rfp, B)
assert_(info == 0)
assert_array_almost_equal(solve(A, B), soln,
decimal=4 if ind % 2 == 0 else 6)
def test_sfrk_hfrk():
"""
Test for performing a symmetric rank-k operation for matrix in RFP format.
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
A = A + A.conj().T + n*eye(n)
else:
A = (rand(n, n)).astype(dtype)
A = A + A.T + n*eye(n)
prefix = 's'if ind < 2 else 'h'
trttf, tfttr, shfrk = get_lapack_funcs(('trttf', 'tfttr', '{}frk'
''.format(prefix)),
dtype=dtype)
Afp, _ = trttf(A)
C = np.random.rand(n, 2).astype(dtype)
Afp_out = shfrk(n, 2, -1, C, 2, Afp)
A_out, _ = tfttr(n, Afp_out)
assert_array_almost_equal(A_out, triu(-C.dot(C.conj().T) + 2*A),
decimal=4 if ind % 2 == 0 else 6)
def test_syconv():
"""
Test for going back and forth between the returned format of he/sytrf to
L and D factors/permutations.
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 10
if ind > 1:
A = (randint(-30, 30, (n, n)) +
randint(-30, 30, (n, n))*1j).astype(dtype)
A = A + A.conj().T
else:
A = randint(-30, 30, (n, n)).astype(dtype)
A = A + A.T + n*eye(n)
tol = 100*np.spacing(dtype(1.0).real)
syconv, trf, trf_lwork = get_lapack_funcs(('syconv', 'sytrf',
'sytrf_lwork'), dtype=dtype)
lw = _compute_lwork(trf_lwork, n, lower=1)
L, D, perm = ldl(A, lower=1, hermitian=False)
lw = _compute_lwork(trf_lwork, n, lower=1)
ldu, ipiv, info = trf(A, lower=1, lwork=lw)
a, e, info = syconv(ldu, ipiv, lower=1)
assert_allclose(tril(a, -1,), tril(L[perm, :], -1), atol=tol, rtol=0.)
# Test also upper
U, D, perm = ldl(A, lower=0, hermitian=False)
ldu, ipiv, info = trf(A, lower=0)
a, e, info = syconv(ldu, ipiv, lower=0)
assert_allclose(triu(a, 1), triu(U[perm, :], 1), atol=tol, rtol=0.)
class TestBlockedQR(object):
"""
Tests for the blocked QR factorization, namely through geqrt, gemqrt, tpqrt
and tpmqr.
"""
def test_geqrt_gemqrt(self):
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
else:
A = (rand(n, n)).astype(dtype)
tol = 100*np.spacing(dtype(1.0).real)
geqrt, gemqrt = get_lapack_funcs(('geqrt', 'gemqrt'), dtype=dtype)
a, t, info = geqrt(n, A)
assert(info == 0)
# Extract elementary reflectors from lower triangle, adding the
# main diagonal of ones.
v = np.tril(a, -1) + np.eye(n, dtype=dtype)
# Generate the block Householder transform I - VTV^H
Q = np.eye(n, dtype=dtype) - v @ t @ v.T.conj()
R = np.triu(a)
# Test columns of Q are orthogonal
assert_allclose(Q.T.conj() @ Q, np.eye(n, dtype=dtype), atol=tol,
rtol=0.)
assert_allclose(Q @ R, A, atol=tol, rtol=0.)
if ind > 1:
C = (rand(n, n) + rand(n, n)*1j).astype(dtype)
transpose = 'C'
else:
C = (rand(n, n)).astype(dtype)
transpose = 'T'
for side in ('L', 'R'):
for trans in ('N', transpose):
c, info = gemqrt(a, t, C, side=side, trans=trans)
assert(info == 0)
if trans == transpose:
q = Q.T.conj()
else:
q = Q
if side == 'L':
qC = q @ C
else:
qC = C @ q
assert_allclose(c, qC, atol=tol, rtol=0.)
# Test default arguments
if (side, trans) == ('L', 'N'):
c_default, info = gemqrt(a, t, C)
assert(info == 0)
assert_equal(c_default, c)
# Test invalid side/trans
assert_raises(Exception, gemqrt, a, t, C, side='A')
assert_raises(Exception, gemqrt, a, t, C, trans='A')
def test_tpqrt_tpmqrt(self):
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
B = (rand(n, n) + rand(n, n)*1j).astype(dtype)
else:
A = (rand(n, n)).astype(dtype)
B = (rand(n, n)).astype(dtype)
tol = 100*np.spacing(dtype(1.0).real)
tpqrt, tpmqrt = get_lapack_funcs(('tpqrt', 'tpmqrt'), dtype=dtype)
# Test for the range of pentagonal B, from square to upper
# triangular
for l in (0, n // 2, n):
a, b, t, info = tpqrt(l, n, A, B)
assert(info == 0)
# Check that lower triangular part of A has not been modified
assert_equal(np.tril(a, -1), np.tril(A, -1))
# Check that elements not part of the pentagonal portion of B
# have not been modified.
assert_equal(np.tril(b, l - n - 1), np.tril(B, l - n - 1))
# Extract pentagonal portion of B
B_pent, b_pent = np.triu(B, l - n), np.triu(b, l - n)
# Generate elementary reflectors
v = np.concatenate((np.eye(n, dtype=dtype), b_pent))
# Generate the block Householder transform I - VTV^H
Q = np.eye(2 * n, dtype=dtype) - v @ t @ v.T.conj()
R = np.concatenate((np.triu(a), np.zeros_like(a)))
# Test columns of Q are orthogonal
assert_allclose(Q.T.conj() @ Q, np.eye(2 * n, dtype=dtype),
atol=tol, rtol=0.)
assert_allclose(Q @ R, np.concatenate((np.triu(A), B_pent)),
atol=tol, rtol=0.)
if ind > 1:
C = (rand(n, n) + rand(n, n)*1j).astype(dtype)
D = (rand(n, n) + rand(n, n)*1j).astype(dtype)
transpose = 'C'
else:
C = (rand(n, n)).astype(dtype)
D = (rand(n, n)).astype(dtype)
transpose = 'T'
for side in ('L', 'R'):
for trans in ('N', transpose):
c, d, info = tpmqrt(l, b, t, C, D, side=side,
trans=trans)
assert(info == 0)
if trans == transpose:
q = Q.T.conj()
else:
q = Q
if side == 'L':
cd = np.concatenate((c, d), axis=0)
CD = np.concatenate((C, D), axis=0)
qCD = q @ CD
else:
cd = np.concatenate((c, d), axis=1)
CD = np.concatenate((C, D), axis=1)
qCD = CD @ q
assert_allclose(cd, qCD, atol=tol, rtol=0.)
if (side, trans) == ('L', 'N'):
c_default, d_default, info = tpmqrt(l, b, t, C, D)
assert(info == 0)
assert_equal(c_default, c)
assert_equal(d_default, d)
# Test invalid side/trans
assert_raises(Exception, tpmqrt, l, b, t, C, D, side='A')
assert_raises(Exception, tpmqrt, l, b, t, C, D, trans='A')
def test_pstrf():
seed(1234)
for ind, dtype in enumerate(DTYPES):
# DTYPES = <s, d, c, z> pstrf
n = 10
r = 2
pstrf = get_lapack_funcs('pstrf', dtype=dtype)
# Create positive semidefinite A
if ind > 1:
A = rand(n, n-r).astype(dtype) + 1j * rand(n, n-r).astype(dtype)
A = A @ A.conj().T
else:
A = rand(n, n-r).astype(dtype)
A = A @ A.T
c, piv, r_c, info = pstrf(A)
U = triu(c)
U[r_c - n:, r_c - n:] = 0.
assert_equal(info, 1)
# python-dbg 3.5.2 runs cause trouble with the following assertion.
# assert_equal(r_c, n - r)
single_atol = 1000 * np.finfo(np.float32).eps
double_atol = 1000 * np.finfo(np.float64).eps
atol = single_atol if ind in [0, 2] else double_atol
assert_allclose(A[piv-1][:, piv-1], U.conj().T @ U, rtol=0., atol=atol)
c, piv, r_c, info = pstrf(A, lower=1)
L = tril(c)
L[r_c - n:, r_c - n:] = 0.
assert_equal(info, 1)
# assert_equal(r_c, n - r)
single_atol = 1000 * np.finfo(np.float32).eps
double_atol = 1000 * np.finfo(np.float64).eps
atol = single_atol if ind in [0, 2] else double_atol
assert_allclose(A[piv-1][:, piv-1], L @ L.conj().T, rtol=0., atol=atol)
def test_pstf2():
seed(1234)
for ind, dtype in enumerate(DTYPES):
# DTYPES = <s, d, c, z> pstf2
n = 10
r = 2
pstf2 = get_lapack_funcs('pstf2', dtype=dtype)
# Create positive semidefinite A
if ind > 1:
A = rand(n, n-r).astype(dtype) + 1j * rand(n, n-r).astype(dtype)
A = A @ A.conj().T
else:
A = rand(n, n-r).astype(dtype)
A = A @ A.T
c, piv, r_c, info = pstf2(A)
U = triu(c)
U[r_c - n:, r_c - n:] = 0.
assert_equal(info, 1)
# python-dbg 3.5.2 runs cause trouble with the commented assertions.
# assert_equal(r_c, n - r)
single_atol = 1000 * np.finfo(np.float32).eps
double_atol = 1000 * np.finfo(np.float64).eps
atol = single_atol if ind in [0, 2] else double_atol
assert_allclose(A[piv-1][:, piv-1], U.conj().T @ U, rtol=0., atol=atol)
c, piv, r_c, info = pstf2(A, lower=1)
L = tril(c)
L[r_c - n:, r_c - n:] = 0.
assert_equal(info, 1)
# assert_equal(r_c, n - r)
single_atol = 1000 * np.finfo(np.float32).eps
double_atol = 1000 * np.finfo(np.float64).eps
atol = single_atol if ind in [0, 2] else double_atol
assert_allclose(A[piv-1][:, piv-1], L @ L.conj().T, rtol=0., atol=atol)
def test_geequ():
desired_real = np.array([[0.6250, 1.0000, 0.0393, -0.4269],
[1.0000, -0.5619, -1.0000, -1.0000],
[0.5874, -1.0000, -0.0596, -0.5341],
[-1.0000, -0.5946, -0.0294, 0.9957]])
desired_cplx = np.array([[-0.2816+0.5359*1j,
0.0812+0.9188*1j,
-0.7439-0.2561*1j],
[-0.3562-0.2954*1j,
0.9566-0.0434*1j,
-0.0174+0.1555*1j],
[0.8607+0.1393*1j,
-0.2759+0.7241*1j,
-0.1642-0.1365*1j]])
for ind, dtype in enumerate(DTYPES):
if ind < 2:
# Use examples from the NAG documentation
A = np.array([[1.80e+10, 2.88e+10, 2.05e+00, -8.90e+09],
[5.25e+00, -2.95e+00, -9.50e-09, -3.80e+00],
[1.58e+00, -2.69e+00, -2.90e-10, -1.04e+00],
[-1.11e+00, -6.60e-01, -5.90e-11, 8.00e-01]])
A = A.astype(dtype)
else:
A = np.array([[-1.34e+00, 0.28e+10, -6.39e+00],
[-1.70e+00, 3.31e+10, -0.15e+00],
[2.41e-10, -0.56e+00, -0.83e-10]], dtype=dtype)
A += np.array([[2.55e+00, 3.17e+10, -2.20e+00],
[-1.41e+00, -0.15e+10, 1.34e+00],
[0.39e-10, 1.47e+00, -0.69e-10]])*1j
A = A.astype(dtype)
geequ = get_lapack_funcs('geequ', dtype=dtype)
r, c, rowcnd, colcnd, amax, info = geequ(A)
if ind < 2:
assert_allclose(desired_real.astype(dtype), r[:, None]*A*c,
rtol=0, atol=1e-4)
else:
assert_allclose(desired_cplx.astype(dtype), r[:, None]*A*c,
rtol=0, atol=1e-4)
def test_syequb():
desired_log2s = np.array([0, 0, 0, 0, 0, 0, -1, -1, -2, -3])
for ind, dtype in enumerate(DTYPES):
A = np.eye(10, dtype=dtype)
alpha = dtype(1. if ind < 2 else 1.j)
d = np.array([alpha * 2.**x for x in range(-5, 5)], dtype=dtype)
A += np.rot90(np.diag(d))
syequb = get_lapack_funcs('syequb', dtype=dtype)
s, scond, amax, info = syequb(A)
assert_equal(np.log2(s).astype(int), desired_log2s)
def test_heequb():
# zheequb has a bug for versions =< LAPACK 3.9.0
# See Reference-LAPACK gh-61 and gh-408
# Hence the zheequb test is customized accordingly to avoid
# work scaling.
A = np.diag([2]*5 + [1002]*5) + np.diag(np.ones(9), k=1)*1j
s, scond, amax, info = lapack.zheequb(A)
assert_equal(info, 0)
assert_allclose(np.log2(s), [0., -1.]*2 + [0.] + [-4]*5)
A = np.diag(2**np.abs(np.arange(-5, 6)) + 0j)
A[5, 5] = 1024
A[5, 0] = 16j
s, scond, amax, info = lapack.cheequb(A.astype(np.complex64), lower=1)
assert_equal(info, 0)
assert_allclose(np.log2(s), [-2, -1, -1, 0, 0, -5, 0, -1, -1, -2, -2])
def test_getc2_gesc2():
np.random.seed(42)
n = 10
desired_real = np.random.rand(n)
desired_cplx = np.random.rand(n) + np.random.rand(n)*1j
for ind, dtype in enumerate(DTYPES):
if ind < 2:
A = np.random.rand(n, n)
A = A.astype(dtype)
b = A @ desired_real
b = b.astype(dtype)
else:
A = np.random.rand(n, n) + np.random.rand(n, n)*1j
A = A.astype(dtype)
b = A @ desired_cplx
b = b.astype(dtype)
getc2 = get_lapack_funcs('getc2', dtype=dtype)
gesc2 = get_lapack_funcs('gesc2', dtype=dtype)
lu, ipiv, jpiv, info = getc2(A, overwrite_a=0)
x, scale = gesc2(lu, b, ipiv, jpiv, overwrite_rhs=0)
if ind < 2:
assert_array_almost_equal(desired_real.astype(dtype),
x/scale, decimal=4)
else:
assert_array_almost_equal(desired_cplx.astype(dtype),
x/scale, decimal=4)
@pytest.mark.parametrize('size', [(6, 5), (5, 5)])
@pytest.mark.parametrize('dtype', REAL_DTYPES)
@pytest.mark.parametrize('joba', range(6)) # 'C', 'E', 'F', 'G', 'A', 'R'
@pytest.mark.parametrize('jobu', range(4)) # 'U', 'F', 'W', 'N'
@pytest.mark.parametrize('jobv', range(4)) # 'V', 'J', 'W', 'N'
@pytest.mark.parametrize('jobr', [0, 1])
@pytest.mark.parametrize('jobp', [0, 1])
def test_gejsv_general(size, dtype, joba, jobu, jobv, jobr, jobp, jobt=0):
"""Test the lapack routine ?gejsv.
This function tests that a singular value decomposition can be performed
on the random M-by-N matrix A. The test performs the SVD using ?gejsv
then performs the following checks:
* ?gejsv exist successfully (info == 0)
* The returned singular values are correct
* `A` can be reconstructed from `u`, `SIGMA`, `v`
* Ensure that u.T @ u is the identity matrix
* Ensure that v.T @ v is the identity matrix
* The reported matrix rank
* The reported number of singular values
* If denormalized floats are required
Notes
-----
joba specifies several choices effecting the calculation's accuracy
Although all arguments are tested, the tests only check that the correct
solution is returned - NOT that the prescribed actions are performed
internally.
jobt is, as of v3.9.0, still experimental and removed to cut down number of
test cases. However keyword itself is tested externally.
"""
seed(42)
# Define some constants for later use:
m, n = size
atol = 100 * np.finfo(dtype).eps
A = generate_random_dtype_array(size, dtype)
gejsv = get_lapack_funcs('gejsv', dtype=dtype)
# Set up checks for invalid job? combinations
# if an invalid combination occurs we set the appropriate
# exit status.
lsvec = jobu < 2 # Calculate left singular vectors
rsvec = jobv < 2 # Calculate right singular vectors
l2tran = (jobt == 1) and (m == n)
is_complex = np.iscomplexobj(A)
invalid_real_jobv = (jobv == 1) and (not lsvec) and (not is_complex)
invalid_cplx_jobu = (jobu == 2) and not (rsvec and l2tran) and is_complex
invalid_cplx_jobv = (jobv == 2) and not (lsvec and l2tran) and is_complex
# Set the exit status to the expected value.
# Here we only check for invalid combinations, not individual
# parameters.
if invalid_cplx_jobu:
exit_status = -2
elif invalid_real_jobv or invalid_cplx_jobv:
exit_status = -3
else:
exit_status = 0
if (jobu > 1) and (jobv == 1):
assert_raises(Exception, gejsv, A, joba, jobu, jobv, jobr, jobt, jobp)
else:
sva, u, v, work, iwork, info = gejsv(A,
joba=joba,
jobu=jobu,
jobv=jobv,
jobr=jobr,
jobt=jobt,
jobp=jobp)
# Check that ?gejsv exited successfully/as expected
assert_equal(info, exit_status)
# If exit_status is non-zero the combination of jobs is invalid.
# We test this above but no calculations are performed.
if not exit_status:
# Check the returned singular values
sigma = (work[0] / work[1]) * sva[:n]
assert_allclose(sigma, svd(A, compute_uv=False), atol=atol)
if jobu == 1:
# If JOBU = 'F', then u contains the M-by-M matrix of
# the left singular vectors, including an ONB of the orthogonal
# complement of the Range(A)
# However, to recalculate A we are concerned about the
# first n singular values and so can ignore the latter.
# TODO: Add a test for ONB?
u = u[:, :n]
if lsvec and rsvec:
assert_allclose(u @ np.diag(sigma) @ v.conj().T, A, atol=atol)
if lsvec:
assert_allclose(u.conj().T @ u, np.identity(n), atol=atol)
if rsvec:
assert_allclose(v.conj().T @ v, np.identity(n), atol=atol)
assert_equal(iwork[0], np.linalg.matrix_rank(A))
assert_equal(iwork[1], np.count_nonzero(sigma))
# iwork[2] is non-zero if requested accuracy is not warranted for
# the data. This should never occur for these tests.
assert_equal(iwork[2], 0)
@pytest.mark.parametrize('dtype', REAL_DTYPES)
def test_gejsv_edge_arguments(dtype):
"""Test edge arguments return expected status"""
gejsv = get_lapack_funcs('gejsv', dtype=dtype)
# scalar A
sva, u, v, work, iwork, info = gejsv(1.)
assert_equal(info, 0)
assert_equal(u.shape, (1, 1))
assert_equal(v.shape, (1, 1))
assert_equal(sva, np.array([1.], dtype=dtype))
# 1d A
A = np.ones((1,), dtype=dtype)
sva, u, v, work, iwork, info = gejsv(A)
assert_equal(info, 0)
assert_equal(u.shape, (1, 1))
assert_equal(v.shape, (1, 1))
assert_equal(sva, np.array([1.], dtype=dtype))
# 2d empty A
A = np.ones((1, 0), dtype=dtype)
sva, u, v, work, iwork, info = gejsv(A)
assert_equal(info, 0)
assert_equal(u.shape, (1, 0))
assert_equal(v.shape, (1, 0))
assert_equal(sva, np.array([], dtype=dtype))
@pytest.mark.parametrize(('kwargs'),
({'joba': 9},
{'jobu': 9},
{'jobv': 9},
{'jobr': 9},
{'jobt': 9},
{'jobp': 9})
)
def test_gejsv_invalid_job_arguments(kwargs):
"""Test invalid job arguments raise an Exception"""
A = np.ones((2, 2), dtype=float)
gejsv = get_lapack_funcs('gejsv', dtype=float)
assert_raises(Exception, gejsv, A, **kwargs)
@pytest.mark.parametrize("A,sva_expect,u_expect,v_expect",
[(np.array([[2.27, -1.54, 1.15, -1.94],
[0.28, -1.67, 0.94, -0.78],
[-0.48, -3.09, 0.99, -0.21],
[1.07, 1.22, 0.79, 0.63],
[-2.35, 2.93, -1.45, 2.30],
[0.62, -7.39, 1.03, -2.57]]),
np.array([9.9966, 3.6831, 1.3569, 0.5000]),
np.array([[0.2774, -0.6003, -0.1277, 0.1323],
[0.2020, -0.0301, 0.2805, 0.7034],
[0.2918, 0.3348, 0.6453, 0.1906],
[-0.0938, -0.3699, 0.6781, -0.5399],
[-0.4213, 0.5266, 0.0413, -0.0575],
[0.7816, 0.3353, -0.1645, -0.3957]]),
np.array([[0.1921, -0.8030, 0.0041, -0.5642],
[-0.8794, -0.3926, -0.0752, 0.2587],
[0.2140, -0.2980, 0.7827, 0.5027],
[-0.3795, 0.3351, 0.6178, -0.6017]]))])
def test_gejsv_NAG(A, sva_expect, u_expect, v_expect):
"""
This test implements the example found in the NAG manual, f08khf.
An example was not found for the complex case.
"""
# NAG manual provides accuracy up to 4 decimals
atol = 1e-4
gejsv = get_lapack_funcs('gejsv', dtype=A.dtype)
sva, u, v, work, iwork, info = gejsv(A)
assert_allclose(sva_expect, sva, atol=atol)
assert_allclose(u_expect, u, atol=atol)
assert_allclose(v_expect, v, atol=atol)
@pytest.mark.parametrize("dtype", DTYPES)
def test_gttrf_gttrs(dtype):
# The test uses ?gttrf and ?gttrs to solve a random system for each dtype,
# tests that the output of ?gttrf define LU matricies, that input
# parameters are unmodified, transposal options function correctly, that
# incompatible matrix shapes raise an error, and singular matrices return
# non zero info.
seed(42)
n = 10
atol = 100 * np.finfo(dtype).eps
# create the matrix in accordance with the data type
du = generate_random_dtype_array((n-1,), dtype=dtype)
d = generate_random_dtype_array((n,), dtype=dtype)
dl = generate_random_dtype_array((n-1,), dtype=dtype)
diag_cpy = [dl.copy(), d.copy(), du.copy()]
A = np.diag(d) + np.diag(dl, -1) + np.diag(du, 1)
x = np.random.rand(n)
b = A @ x
gttrf, gttrs = get_lapack_funcs(('gttrf', 'gttrs'), dtype=dtype)
_dl, _d, _du, du2, ipiv, info = gttrf(dl, d, du)
# test to assure that the inputs of ?gttrf are unmodified
assert_array_equal(dl, diag_cpy[0])
assert_array_equal(d, diag_cpy[1])
assert_array_equal(du, diag_cpy[2])
# generate L and U factors from ?gttrf return values
# L/U are lower/upper triangular by construction (initially and at end)
U = np.diag(_d, 0) + np.diag(_du, 1) + np.diag(du2, 2)
L = np.eye(n, dtype=dtype)
for i, m in enumerate(_dl):
# L is given in a factored form.
# See
# www.hpcavf.uclan.ac.uk/softwaredoc/sgi_scsl_html/sgi_html/ch03.html
piv = ipiv[i] - 1
# right multiply by permutation matrix
L[:, [i, piv]] = L[:, [piv, i]]
# right multiply by Li, rank-one modification of identity
L[:, i] += L[:, i+1]*m
# one last permutation
i, piv = -1, ipiv[-1] - 1
# right multiply by final permutation matrix
L[:, [i, piv]] = L[:, [piv, i]]
# check that the outputs of ?gttrf define an LU decomposition of A
assert_allclose(A, L @ U, atol=atol)
b_cpy = b.copy()
x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b)
# test that the inputs of ?gttrs are unmodified
assert_array_equal(b, b_cpy)
# test that the result of ?gttrs matches the expected input
assert_allclose(x, x_gttrs, atol=atol)
# test that ?gttrf and ?gttrs work with transposal options
if dtype in REAL_DTYPES:
trans = "T"
b_trans = A.T @ x
else:
trans = "C"
b_trans = A.conj().T @ x
x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b_trans, trans=trans)
assert_allclose(x, x_gttrs, atol=atol)
# test that ValueError is raised with incompatible matrix shapes
with assert_raises(ValueError):
gttrf(dl[:-1], d, du)
with assert_raises(ValueError):
gttrf(dl, d[:-1], du)
with assert_raises(ValueError):
gttrf(dl, d, du[:-1])
# test that matrix of size n=2 raises exception
with assert_raises(Exception):
gttrf(dl[0], d[:1], du[0])
# test that singular (row of all zeroes) matrix fails via info
du[0] = 0
d[0] = 0
__dl, __d, __du, _du2, _ipiv, _info = gttrf(dl, d, du)
np.testing.assert_(__d[info - 1] == 0,
"?gttrf: _d[info-1] is {}, not the illegal value :0."
.format(__d[info - 1]))
@pytest.mark.parametrize("du, d, dl, du_exp, d_exp, du2_exp, ipiv_exp, b, x",
[(np.array([2.1, -1.0, 1.9, 8.0]),
np.array([3.0, 2.3, -5.0, -.9, 7.1]),
np.array([3.4, 3.6, 7.0, -6.0]),
np.array([2.3, -5, -.9, 7.1]),
np.array([3.4, 3.6, 7, -6, -1.015373]),
np.array([-1, 1.9, 8]),
np.array([2, 3, 4, 5, 5]),
np.array([[2.7, 6.6],
[-0.5, 10.8],
[2.6, -3.2],
[0.6, -11.2],
[2.7, 19.1]
]),
np.array([[-4, 5],
[7, -4],
[3, -3],
[-4, -2],
[-3, 1]])),
(
np.array([2 - 1j, 2 + 1j, -1 + 1j, 1 - 1j]),
np.array([-1.3 + 1.3j, -1.3 + 1.3j,
-1.3 + 3.3j, - .3 + 4.3j,
-3.3 + 1.3j]),
np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j]),
# du exp
np.array([-1.3 + 1.3j, -1.3 + 3.3j,
-0.3 + 4.3j, -3.3 + 1.3j]),
np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j,
-1.3399 + 0.2875j]),
np.array([2 + 1j, -1 + 1j, 1 - 1j]),
np.array([2, 3, 4, 5, 5]),
np.array([[2.4 - 5j, 2.7 + 6.9j],
[3.4 + 18.2j, - 6.9 - 5.3j],
[-14.7 + 9.7j, - 6 - .6j],
[31.9 - 7.7j, -3.9 + 9.3j],
[-1 + 1.6j, -3 + 12.2j]]),
np.array([[1 + 1j, 2 - 1j],
[3 - 1j, 1 + 2j],
[4 + 5j, -1 + 1j],
[-1 - 2j, 2 + 1j],
[1 - 1j, 2 - 2j]])
)])
def test_gttrf_gttrs_NAG_f07cdf_f07cef_f07crf_f07csf(du, d, dl, du_exp, d_exp,
du2_exp, ipiv_exp, b, x):
# test to assure that wrapper is consistent with NAG Library Manual Mark 26
# example problems: f07cdf and f07cef (real)
# examples: f07crf and f07csf (complex)
# (Links may expire, so search for "NAG Library Manual Mark 26" online)
gttrf, gttrs = get_lapack_funcs(('gttrf', "gttrs"), (du[0], du[0]))
_dl, _d, _du, du2, ipiv, info = gttrf(dl, d, du)
assert_allclose(du2, du2_exp)
assert_allclose(_du, du_exp)
assert_allclose(_d, d_exp, atol=1e-4) # NAG examples provide 4 decimals.
assert_allclose(ipiv, ipiv_exp)
x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b)
assert_allclose(x_gttrs, x)
@pytest.mark.parametrize('dtype', DTYPES)
@pytest.mark.parametrize('shape', [(3, 7), (7, 3), (2**18, 2**18)])
def test_geqrfp_lwork(dtype, shape):
geqrfp_lwork = get_lapack_funcs(('geqrfp_lwork'), dtype=dtype)
m, n = shape
lwork, info = geqrfp_lwork(m=m, n=n)
assert_equal(info, 0)
@pytest.mark.parametrize("ddtype,dtype",
zip(REAL_DTYPES + REAL_DTYPES, DTYPES))
def test_pttrf_pttrs(ddtype, dtype):
seed(42)
# set test tolerance appropriate for dtype
atol = 100*np.finfo(dtype).eps
# n is the length diagonal of A
n = 10
# create diagonals according to size and dtype
# diagonal d should always be real.
# add 4 to d so it will be dominant for all dtypes
d = generate_random_dtype_array((n,), ddtype) + 4
# diagonal e may be real or complex.
e = generate_random_dtype_array((n-1,), dtype)
# assemble diagonals together into matrix
A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
# store a copy of diagonals to later verify
diag_cpy = [d.copy(), e.copy()]
pttrf = get_lapack_funcs('pttrf', dtype=dtype)
_d, _e, info = pttrf(d, e)
# test to assure that the inputs of ?pttrf are unmodified
assert_array_equal(d, diag_cpy[0])
assert_array_equal(e, diag_cpy[1])
assert_equal(info, 0, err_msg="pttrf: info = {}, should be 0".format(info))
# test that the factors from pttrf can be recombined to make A
L = np.diag(_e, -1) + np.diag(np.ones(n))
D = np.diag(_d)
assert_allclose(A, L@D@L.conjugate().T, atol=atol)
# generate random solution x
x = generate_random_dtype_array((n,), dtype)
# determine accompanying b to get soln x
b = A@x
# determine _x from pttrs
pttrs = get_lapack_funcs('pttrs', dtype=dtype)
_x, info = pttrs(_d, _e.conj(), b)
assert_equal(info, 0, err_msg="pttrs: info = {}, should be 0".format(info))
# test that _x from pttrs matches the expected x
assert_allclose(x, _x, atol=atol)
@pytest.mark.parametrize("ddtype,dtype",
zip(REAL_DTYPES + REAL_DTYPES, DTYPES))
def test_pttrf_pttrs_errors_incompatible_shape(ddtype, dtype):
n = 10
pttrf = get_lapack_funcs('pttrf', dtype=dtype)
d = generate_random_dtype_array((n,), ddtype) + 2
e = generate_random_dtype_array((n-1,), dtype)
# test that ValueError is raised with incompatible matrix shapes
assert_raises(ValueError, pttrf, d[:-1], e)
assert_raises(ValueError, pttrf, d, e[:-1])
@pytest.mark.parametrize("ddtype,dtype",
zip(REAL_DTYPES + REAL_DTYPES, DTYPES))
def test_pttrf_pttrs_errors_singular_nonSPD(ddtype, dtype):
n = 10
pttrf = get_lapack_funcs('pttrf', dtype=dtype)
d = generate_random_dtype_array((n,), ddtype) + 2
e = generate_random_dtype_array((n-1,), dtype)
# test that singular (row of all zeroes) matrix fails via info
d[0] = 0
e[0] = 0
_d, _e, info = pttrf(d, e)
assert_equal(_d[info - 1], 0,
"?pttrf: _d[info-1] is {}, not the illegal value :0."
.format(_d[info - 1]))
# test with non-spd matrix
d = generate_random_dtype_array((n,), ddtype)
_d, _e, info = pttrf(d, e)
assert_(info != 0, "?pttrf should fail with non-spd matrix, but didn't")
@pytest.mark.parametrize(("d, e, d_expect, e_expect, b, x_expect"), [
(np.array([4, 10, 29, 25, 5]),
np.array([-2, -6, 15, 8]),
np.array([4, 9, 25, 16, 1]),
np.array([-.5, -.6667, .6, .5]),
np.array([[6, 10], [9, 4], [2, 9], [14, 65],
[7, 23]]),
np.array([[2.5, 2], [2, -1], [1, -3], [-1, 6],
[3, -5]])
), (
np.array([16, 41, 46, 21]),
np.array([16 + 16j, 18 - 9j, 1 - 4j]),
np.array([16, 9, 1, 4]),
np.array([1+1j, 2-1j, 1-4j]),
np.array([[64+16j, -16-32j], [93+62j, 61-66j],
[78-80j, 71-74j], [14-27j, 35+15j]]),
np.array([[2+1j, -3-2j], [1+1j, 1+1j], [1-2j, 1-2j],
[1-1j, 2+1j]])
)])
def test_pttrf_pttrs_NAG(d, e, d_expect, e_expect, b, x_expect):
# test to assure that wrapper is consistent with NAG Manual Mark 26
# example problems: f07jdf and f07jef (real)
# examples: f07jrf and f07csf (complex)
# NAG examples provide 4 decimals.
# (Links expire, so please search for "NAG Library Manual Mark 26" online)
atol = 1e-4
pttrf = get_lapack_funcs('pttrf', dtype=e[0])
_d, _e, info = pttrf(d, e)
assert_allclose(_d, d_expect, atol=atol)
assert_allclose(_e, e_expect, atol=atol)
pttrs = get_lapack_funcs('pttrs', dtype=e[0])
_x, info = pttrs(_d, _e.conj(), b)
assert_allclose(_x, x_expect, atol=atol)
# also test option `lower`
if e.dtype in COMPLEX_DTYPES:
_x, info = pttrs(_d, _e, b, lower=1)
assert_allclose(_x, x_expect, atol=atol)
def pteqr_get_d_e_A_z(dtype, realtype, n, compute_z):
# used by ?pteqr tests to build parameters
# returns tuple of (d, e, A, z)
if compute_z == 1:
# build Hermitian A from Q**T * tri * Q = A by creating Q and tri
A_eig = generate_random_dtype_array((n, n), dtype)
A_eig = A_eig + np.diag(np.zeros(n) + 4*n)
A_eig = (A_eig + A_eig.conj().T) / 2
# obtain right eigenvectors (orthogonal)
vr = eigh(A_eig)[1]
# create tridiagonal matrix
d = generate_random_dtype_array((n,), realtype) + 4
e = generate_random_dtype_array((n-1,), realtype)
tri = np.diag(d) + np.diag(e, 1) + np.diag(e, -1)
# Build A using these factors that sytrd would: (Q**T * tri * Q = A)
A = vr @ tri @ vr.conj().T
# vr is orthogonal
z = vr
else:
# d and e are always real per lapack docs.
d = generate_random_dtype_array((n,), realtype)
e = generate_random_dtype_array((n-1,), realtype)
# make SPD
d = d + 4
A = np.diag(d) + np.diag(e, 1) + np.diag(e, -1)
z = np.diag(d) + np.diag(e, -1) + np.diag(e, 1)
return (d, e, A, z)
@pytest.mark.parametrize("dtype,realtype",
zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
@pytest.mark.parametrize("compute_z", range(3))
def test_pteqr(dtype, realtype, compute_z):
'''
Tests the ?pteqr lapack routine for all dtypes and compute_z parameters.
It generates random SPD matrix diagonals d and e, and then confirms
correct eigenvalues with scipy.linalg.eig. With applicable compute_z=2 it
tests that z can reform A.
'''
seed(42)
atol = 1000*np.finfo(dtype).eps
pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
n = 10
d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
d_pteqr, e_pteqr, z_pteqr, info = pteqr(d=d, e=e, z=z, compute_z=compute_z)
assert_equal(info, 0, "info = {}, should be 0.".format(info))
# compare the routine's eigenvalues with scipy.linalg.eig's.
assert_allclose(np.sort(eigh(A)[0]), np.sort(d_pteqr), atol=atol)
if compute_z:
# verify z_pteqr as orthogonal
assert_allclose(z_pteqr @ np.conj(z_pteqr).T, np.identity(n),
atol=atol)
# verify that z_pteqr recombines to A
assert_allclose(z_pteqr @ np.diag(d_pteqr) @ np.conj(z_pteqr).T,
A, atol=atol)
@pytest.mark.parametrize("dtype,realtype",
zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
@pytest.mark.parametrize("compute_z", range(3))
def test_pteqr_error_non_spd(dtype, realtype, compute_z):
seed(42)
pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
n = 10
d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
# test with non-spd matrix
d_pteqr, e_pteqr, z_pteqr, info = pteqr(d - 4, e, z=z, compute_z=compute_z)
assert info > 0
@pytest.mark.parametrize("dtype,realtype",
zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
@pytest.mark.parametrize("compute_z", range(3))
def test_pteqr_raise_error_wrong_shape(dtype, realtype, compute_z):
seed(42)
pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
n = 10
d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
# test with incorrect/incompatible array sizes
assert_raises(ValueError, pteqr, d[:-1], e, z=z, compute_z=compute_z)
assert_raises(ValueError, pteqr, d, e[:-1], z=z, compute_z=compute_z)
if compute_z:
assert_raises(ValueError, pteqr, d, e, z=z[:-1], compute_z=compute_z)
@pytest.mark.parametrize("dtype,realtype",
zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
@pytest.mark.parametrize("compute_z", range(3))
def test_pteqr_error_singular(dtype, realtype, compute_z):
seed(42)
pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
n = 10
d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
# test with singular matrix
d[0] = 0
e[0] = 0
d_pteqr, e_pteqr, z_pteqr, info = pteqr(d, e, z=z, compute_z=compute_z)
assert info > 0
@pytest.mark.parametrize("compute_z,d,e,d_expect,z_expect",
[(2, # "I"
np.array([4.16, 5.25, 1.09, .62]),
np.array([3.17, -.97, .55]),
np.array([8.0023, 1.9926, 1.0014, 0.1237]),
np.array([[0.6326, 0.6245, -0.4191, 0.1847],
[0.7668, -0.4270, 0.4176, -0.2352],
[-0.1082, 0.6071, 0.4594, -0.6393],
[-0.0081, 0.2432, 0.6625, 0.7084]])),
])
def test_pteqr_NAG_f08jgf(compute_z, d, e, d_expect, z_expect):
'''
Implements real (f08jgf) example from NAG Manual Mark 26.
Tests for correct outputs.
'''
# the NAG manual has 4 decimals accuracy
atol = 1e-4
pteqr = get_lapack_funcs(('pteqr'), dtype=d.dtype)
z = np.diag(d) + np.diag(e, 1) + np.diag(e, -1)
_d, _e, _z, info = pteqr(d=d, e=e, z=z, compute_z=compute_z)
assert_allclose(_d, d_expect, atol=atol)
assert_allclose(np.abs(_z), np.abs(z_expect), atol=atol)
@pytest.mark.parametrize('dtype', DTYPES)
@pytest.mark.parametrize('matrix_size', [(3, 4), (7, 6), (6, 6)])
def test_geqrfp(dtype, matrix_size):
# Tests for all dytpes, tall, wide, and square matrices.
# Using the routine with random matrix A, Q and R are obtained and then
# tested such that R is upper triangular and non-negative on the diagonal,
# and Q is an orthagonal matrix. Verifies that A=Q@R. It also
# tests against a matrix that for which the linalg.qr method returns
# negative diagonals, and for error messaging.
# set test tolerance appropriate for dtype
np.random.seed(42)
rtol = 250*np.finfo(dtype).eps
atol = 100*np.finfo(dtype).eps
# get appropriate ?geqrfp for dtype
geqrfp = get_lapack_funcs(('geqrfp'), dtype=dtype)
gqr = get_lapack_funcs(("orgqr"), dtype=dtype)
m, n = matrix_size
# create random matrix of dimentions m x n
A = generate_random_dtype_array((m, n), dtype=dtype)
# create qr matrix using geqrfp
qr_A, tau, info = geqrfp(A)
# obtain r from the upper triangular area
r = np.triu(qr_A)
# obtain q from the orgqr lapack routine
# based on linalg.qr's extraction strategy of q with orgqr
if m > n:
# this adds an extra column to the end of qr_A
# let qqr be an empty m x m matrix
qqr = np.zeros((m, m), dtype=dtype)
# set first n columns of qqr to qr_A
qqr[:, :n] = qr_A
# determine q from this qqr
# note that m is a sufficient for lwork based on LAPACK documentation
q = gqr(qqr, tau=tau, lwork=m)[0]
else:
q = gqr(qr_A[:, :m], tau=tau, lwork=m)[0]
# test that q and r still make A
assert_allclose(q@r, A, rtol=rtol)
# ensure that q is orthogonal (that q @ transposed q is the identity)
assert_allclose(np.eye(q.shape[0]), q@(q.conj().T), rtol=rtol,
atol=atol)
# ensure r is upper tri by comparing original r to r as upper triangular
assert_allclose(r, np.triu(r), rtol=rtol)
# make sure diagonals of r are positive for this random solution
assert_(np.all(np.diag(r) > np.zeros(len(np.diag(r)))))
# ensure that info is zero for this success
assert_(info == 0)
# test that this routine gives r diagonals that are positive for a
# matrix that returns negatives in the diagonal with scipy.linalg.rq
A_negative = generate_random_dtype_array((n, m), dtype=dtype) * -1
r_rq_neg, q_rq_neg = qr(A_negative)
rq_A_neg, tau_neg, info_neg = geqrfp(A_negative)
# assert that any of the entries on the diagonal from linalg.qr
# are negative and that all of geqrfp are positive.
assert_(np.any(np.diag(r_rq_neg) < 0) and
np.all(np.diag(r) > 0))
def test_geqrfp_errors_with_empty_array():
# check that empty array raises good error message
A_empty = np.array([])
geqrfp = get_lapack_funcs('geqrfp', dtype=A_empty.dtype)
assert_raises(Exception, geqrfp, A_empty)
@pytest.mark.parametrize("driver", ['ev', 'evd', 'evr', 'evx'])
@pytest.mark.parametrize("pfx", ['sy', 'he'])
def test_standard_eigh_lworks(pfx, driver):
n = 1200 # Some sufficiently big arbitrary number
dtype = REAL_DTYPES if pfx == 'sy' else COMPLEX_DTYPES
sc_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[0])
dz_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[1])
try:
_compute_lwork(sc_dlw, n, lower=1)
_compute_lwork(dz_dlw, n, lower=1)
except Exception as e:
pytest.fail("{}_lwork raised unexpected exception: {}"
"".format(pfx+driver, e))
@pytest.mark.parametrize("driver", ['gv', 'gvx'])
@pytest.mark.parametrize("pfx", ['sy', 'he'])
def test_generalized_eigh_lworks(pfx, driver):
n = 1200 # Some sufficiently big arbitrary number
dtype = REAL_DTYPES if pfx == 'sy' else COMPLEX_DTYPES
sc_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[0])
dz_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[1])
# Shouldn't raise any exceptions
try:
_compute_lwork(sc_dlw, n, uplo="L")
_compute_lwork(dz_dlw, n, uplo="L")
except Exception as e:
pytest.fail("{}_lwork raised unexpected exception: {}"
"".format(pfx+driver, e))
@pytest.mark.parametrize("dtype_", DTYPES)
@pytest.mark.parametrize("m", [1, 10, 100, 1000])
def test_orcsd_uncsd_lwork(dtype_, m):
seed(1234)
p = randint(0, m)
q = m - p
pfx = 'or' if dtype_ in REAL_DTYPES else 'un'
dlw = pfx + 'csd_lwork'
lw = get_lapack_funcs(dlw, dtype=dtype_)
lwval = _compute_lwork(lw, m, p, q)
lwval = lwval if pfx == 'un' else (lwval,)
assert all([x > 0 for x in lwval])
@pytest.mark.parametrize("dtype_", DTYPES)
def test_orcsd_uncsd(dtype_):
m, p, q = 250, 80, 170
pfx = 'or' if dtype_ in REAL_DTYPES else 'un'
X = ortho_group.rvs(m) if pfx == 'or' else unitary_group.rvs(m)
drv, dlw = get_lapack_funcs((pfx + 'csd', pfx + 'csd_lwork'), dtype=dtype_)
lwval = _compute_lwork(dlw, m, p, q)
lwvals = {'lwork': lwval} if pfx == 'or' else dict(zip(['lwork',
'lrwork'], lwval))
cs11, cs12, cs21, cs22, theta, u1, u2, v1t, v2t, info =\
drv(X[:p, :q], X[:p, q:], X[p:, :q], X[p:, q:], **lwvals)
assert info == 0
U = block_diag(u1, u2)
VH = block_diag(v1t, v2t)
r = min(min(p, q), min(m-p, m-q))
n11 = min(p, q) - r
n12 = min(p, m-q) - r
n21 = min(m-p, q) - r
n22 = min(m-p, m-q) - r
S = np.zeros((m, m), dtype=dtype_)
one = dtype_(1.)
for i in range(n11):
S[i, i] = one
for i in range(n22):
S[p+i, q+i] = one
for i in range(n12):
S[i+n11+r, i+n11+r+n21+n22+r] = -one
for i in range(n21):
S[p+n22+r+i, n11+r+i] = one
for i in range(r):
S[i+n11, i+n11] = np.cos(theta[i])
S[p+n22+i, i+r+n21+n22] = np.cos(theta[i])
S[i+n11, i+n11+n21+n22+r] = -np.sin(theta[i])
S[p+n22+i, i+n11] = np.sin(theta[i])
Xc = U @ S @ VH
assert_allclose(X, Xc, rtol=0., atol=1e4*np.finfo(dtype_).eps)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("trans_bool", [False, True])
@pytest.mark.parametrize("fact", ["F", "N"])
def test_gtsvx(dtype, trans_bool, fact):
"""
These tests uses ?gtsvx to solve a random Ax=b system for each dtype.
It tests that the outputs define an LU matrix, that inputs are unmodified,
transposal options, incompatible shapes, singular matrices, and
singular factorizations. It parametrizes DTYPES and the 'fact' value along
with the fact related inputs.
"""
seed(42)
# set test tolerance appropriate for dtype
atol = 100 * np.finfo(dtype).eps
# obtain routine
gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype)
# Generate random tridiagonal matrix A
n = 10
dl = generate_random_dtype_array((n-1,), dtype=dtype)
d = generate_random_dtype_array((n,), dtype=dtype)
du = generate_random_dtype_array((n-1,), dtype=dtype)
A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1)
# generate random solution x
x = generate_random_dtype_array((n, 2), dtype=dtype)
# create b from x for equation Ax=b
trans = ("T" if dtype in REAL_DTYPES else "C") if trans_bool else "N"
b = (A.conj().T if trans_bool else A) @ x
# store a copy of the inputs to check they haven't been modified later
inputs_cpy = [dl.copy(), d.copy(), du.copy(), b.copy()]
# set these to None if fact = 'N', or the output of gttrf is fact = 'F'
dlf_, df_, duf_, du2f_, ipiv_, info_ = \
gttrf(dl, d, du) if fact == 'F' else [None]*6
gtsvx_out = gtsvx(dl, d, du, b, fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
assert_(info == 0, "?gtsvx info = {}, should be zero".format(info))
# assure that inputs are unmodified
assert_array_equal(dl, inputs_cpy[0])
assert_array_equal(d, inputs_cpy[1])
assert_array_equal(du, inputs_cpy[2])
assert_array_equal(b, inputs_cpy[3])
# test that x_soln matches the expected x
assert_allclose(x, x_soln, atol=atol)
# assert that the outputs are of correct type or shape
# rcond should be a scalar
assert_(hasattr(rcond, "__len__") is not True,
"rcond should be scalar but is {}".format(rcond))
# ferr should be length of # of cols in x
assert_(ferr.shape[0] == b.shape[1], "ferr.shape is {} but shoud be {},"
.format(ferr.shape[0], b.shape[1]))
# berr should be length of # of cols in x
assert_(berr.shape[0] == b.shape[1], "berr.shape is {} but shoud be {},"
.format(berr.shape[0], b.shape[1]))
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("trans_bool", [0, 1])
@pytest.mark.parametrize("fact", ["F", "N"])
def test_gtsvx_error_singular(dtype, trans_bool, fact):
seed(42)
# obtain routine
gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype)
# Generate random tridiagonal matrix A
n = 10
dl = generate_random_dtype_array((n-1,), dtype=dtype)
d = generate_random_dtype_array((n,), dtype=dtype)
du = generate_random_dtype_array((n-1,), dtype=dtype)
A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1)
# generate random solution x
x = generate_random_dtype_array((n, 2), dtype=dtype)
# create b from x for equation Ax=b
trans = "T" if dtype in REAL_DTYPES else "C"
b = (A.conj().T if trans_bool else A) @ x
# set these to None if fact = 'N', or the output of gttrf is fact = 'F'
dlf_, df_, duf_, du2f_, ipiv_, info_ = \
gttrf(dl, d, du) if fact == 'F' else [None]*6
gtsvx_out = gtsvx(dl, d, du, b, fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
# test with singular matrix
# no need to test inputs with fact "F" since ?gttrf already does.
if fact == "N":
# Construct a singular example manually
d[-1] = 0
dl[-1] = 0
# solve using routine
gtsvx_out = gtsvx(dl, d, du, b)
dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
# test for the singular matrix.
assert info > 0, "info should be > 0 for singular matrix"
elif fact == 'F':
# assuming that a singular factorization is input
df_[-1] = 0
duf_[-1] = 0
du2f_[-1] = 0
gtsvx_out = gtsvx(dl, d, du, b, fact=fact, dlf=dlf_, df=df_, duf=duf_,
du2=du2f_, ipiv=ipiv_)
dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
# info should not be zero and should provide index of illegal value
assert info > 0, "info should be > 0 for singular matrix"
@pytest.mark.parametrize("dtype", DTYPES*2)
@pytest.mark.parametrize("trans_bool", [False, True])
@pytest.mark.parametrize("fact", ["F", "N"])
def test_gtsvx_error_incompatible_size(dtype, trans_bool, fact):
seed(42)
# obtain routine
gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype)
# Generate random tridiagonal matrix A
n = 10
dl = generate_random_dtype_array((n-1,), dtype=dtype)
d = generate_random_dtype_array((n,), dtype=dtype)
du = generate_random_dtype_array((n-1,), dtype=dtype)
A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1)
# generate random solution x
x = generate_random_dtype_array((n, 2), dtype=dtype)
# create b from x for equation Ax=b
trans = "T" if dtype in REAL_DTYPES else "C"
b = (A.conj().T if trans_bool else A) @ x
# set these to None if fact = 'N', or the output of gttrf is fact = 'F'
dlf_, df_, duf_, du2f_, ipiv_, info_ = \
gttrf(dl, d, du) if fact == 'F' else [None]*6
if fact == "N":
assert_raises(ValueError, gtsvx, dl[:-1], d, du, b,
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
assert_raises(ValueError, gtsvx, dl, d[:-1], du, b,
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
assert_raises(ValueError, gtsvx, dl, d, du[:-1], b,
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
assert_raises(Exception, gtsvx, dl, d, du, b[:-1],
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
else:
assert_raises(ValueError, gtsvx, dl, d, du, b,
fact=fact, trans=trans, dlf=dlf_[:-1], df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
assert_raises(ValueError, gtsvx, dl, d, du, b,
fact=fact, trans=trans, dlf=dlf_, df=df_[:-1],
duf=duf_, du2=du2f_, ipiv=ipiv_)
assert_raises(ValueError, gtsvx, dl, d, du, b,
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_[:-1], du2=du2f_, ipiv=ipiv_)
assert_raises(ValueError, gtsvx, dl, d, du, b,
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_[:-1], ipiv=ipiv_)
@pytest.mark.parametrize("du,d,dl,b,x",
[(np.array([2.1, -1.0, 1.9, 8.0]),
np.array([3.0, 2.3, -5.0, -0.9, 7.1]),
np.array([3.4, 3.6, 7.0, -6.0]),
np.array([[2.7, 6.6], [-.5, 10.8], [2.6, -3.2],
[.6, -11.2], [2.7, 19.1]]),
np.array([[-4, 5], [7, -4], [3, -3], [-4, -2],
[-3, 1]])),
(np.array([2 - 1j, 2 + 1j, -1 + 1j, 1 - 1j]),
np.array([-1.3 + 1.3j, -1.3 + 1.3j, -1.3 + 3.3j,
-.3 + 4.3j, -3.3 + 1.3j]),
np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j]),
np.array([[2.4 - 5j, 2.7 + 6.9j],
[3.4 + 18.2j, -6.9 - 5.3j],
[-14.7 + 9.7j, -6 - .6j],
[31.9 - 7.7j, -3.9 + 9.3j],
[-1 + 1.6j, -3 + 12.2j]]),
np.array([[1 + 1j, 2 - 1j], [3 - 1j, 1 + 2j],
[4 + 5j, -1 + 1j], [-1 - 2j, 2 + 1j],
[1 - 1j, 2 - 2j]]))])
def test_gtsvx_NAG(du, d, dl, b, x):
# Test to ensure wrapper is consistent with NAG Manual Mark 26
# example problems: real (f07cbf) and complex (f07cpf)
gtsvx = get_lapack_funcs('gtsvx', dtype=d.dtype)
gtsvx_out = gtsvx(dl, d, du, b)
dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
assert_array_almost_equal(x, x_soln)
@pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES
+ REAL_DTYPES))
@pytest.mark.parametrize("fact,df_de_lambda",
[("F",
lambda d, e:get_lapack_funcs('pttrf',
dtype=e.dtype)(d, e)),
("N", lambda d, e: (None, None, None))])
def test_ptsvx(dtype, realtype, fact, df_de_lambda):
'''
This tests the ?ptsvx lapack routine wrapper to solve a random system
Ax = b for all dtypes and input variations. Tests for: unmodified
input parameters, fact options, incompatible matrix shapes raise an error,
and singular matrices return info of illegal value.
'''
seed(42)
# set test tolerance appropriate for dtype
atol = 100 * np.finfo(dtype).eps
ptsvx = get_lapack_funcs('ptsvx', dtype=dtype)
n = 5
# create diagonals according to size and dtype
d = generate_random_dtype_array((n,), realtype) + 4
e = generate_random_dtype_array((n-1,), dtype)
A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
x_soln = generate_random_dtype_array((n, 2), dtype=dtype)
b = A @ x_soln
# use lambda to determine what df, ef are
df, ef, info = df_de_lambda(d, e)
# create copy to later test that they are unmodified
diag_cpy = [d.copy(), e.copy(), b.copy()]
# solve using routine
df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b, fact=fact,
df=df, ef=ef)
# d, e, and b should be unmodified
assert_array_equal(d, diag_cpy[0])
assert_array_equal(e, diag_cpy[1])
assert_array_equal(b, diag_cpy[2])
assert_(info == 0, "info should be 0 but is {}.".format(info))
assert_array_almost_equal(x_soln, x)
# test that the factors from ptsvx can be recombined to make A
L = np.diag(ef, -1) + np.diag(np.ones(n))
D = np.diag(df)
assert_allclose(A, L@D@(np.conj(L).T), atol=atol)
# assert that the outputs are of correct type or shape
# rcond should be a scalar
assert not hasattr(rcond, "__len__"), \
"rcond should be scalar but is {}".format(rcond)
# ferr should be length of # of cols in x
assert_(ferr.shape == (2,), "ferr.shape is {} but shoud be ({},)"
.format(ferr.shape, x_soln.shape[1]))
# berr should be length of # of cols in x
assert_(berr.shape == (2,), "berr.shape is {} but shoud be ({},)"
.format(berr.shape, x_soln.shape[1]))
@pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES
+ REAL_DTYPES))
@pytest.mark.parametrize("fact,df_de_lambda",
[("F",
lambda d, e:get_lapack_funcs('pttrf',
dtype=e.dtype)(d, e)),
("N", lambda d, e: (None, None, None))])
def test_ptsvx_error_raise_errors(dtype, realtype, fact, df_de_lambda):
seed(42)
ptsvx = get_lapack_funcs('ptsvx', dtype=dtype)
n = 5
# create diagonals according to size and dtype
d = generate_random_dtype_array((n,), realtype) + 4
e = generate_random_dtype_array((n-1,), dtype)
A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
x_soln = generate_random_dtype_array((n, 2), dtype=dtype)
b = A @ x_soln
# use lambda to determine what df, ef are
df, ef, info = df_de_lambda(d, e)
# test with malformatted array sizes
assert_raises(ValueError, ptsvx, d[:-1], e, b, fact=fact, df=df, ef=ef)
assert_raises(ValueError, ptsvx, d, e[:-1], b, fact=fact, df=df, ef=ef)
assert_raises(Exception, ptsvx, d, e, b[:-1], fact=fact, df=df, ef=ef)
@pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES
+ REAL_DTYPES))
@pytest.mark.parametrize("fact,df_de_lambda",
[("F",
lambda d, e:get_lapack_funcs('pttrf',
dtype=e.dtype)(d, e)),
("N", lambda d, e: (None, None, None))])
def test_ptsvx_non_SPD_singular(dtype, realtype, fact, df_de_lambda):
seed(42)
ptsvx = get_lapack_funcs('ptsvx', dtype=dtype)
n = 5
# create diagonals according to size and dtype
d = generate_random_dtype_array((n,), realtype) + 4
e = generate_random_dtype_array((n-1,), dtype)
A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
x_soln = generate_random_dtype_array((n, 2), dtype=dtype)
b = A @ x_soln
# use lambda to determine what df, ef are
df, ef, info = df_de_lambda(d, e)
if fact == "N":
d[3] = 0
# obtain new df, ef
df, ef, info = df_de_lambda(d, e)
# solve using routine
df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b)
# test for the singular matrix.
assert info > 0 and info <= n
# non SPD matrix
d = generate_random_dtype_array((n,), realtype)
df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b)
assert info > 0 and info <= n
else:
# assuming that someone is using a singular factorization
df, ef, info = df_de_lambda(d, e)
df[0] = 0
ef[0] = 0
df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b, fact=fact,
df=df, ef=ef)
assert info > 0
@pytest.mark.parametrize('d,e,b,x',
[(np.array([4, 10, 29, 25, 5]),
np.array([-2, -6, 15, 8]),
np.array([[6, 10], [9, 4], [2, 9], [14, 65],
[7, 23]]),
np.array([[2.5, 2], [2, -1], [1, -3],
[-1, 6], [3, -5]])),
(np.array([16, 41, 46, 21]),
np.array([16 + 16j, 18 - 9j, 1 - 4j]),
np.array([[64 + 16j, -16 - 32j],
[93 + 62j, 61 - 66j],
[78 - 80j, 71 - 74j],
[14 - 27j, 35 + 15j]]),
np.array([[2 + 1j, -3 - 2j],
[1 + 1j, 1 + 1j],
[1 - 2j, 1 - 2j],
[1 - 1j, 2 + 1j]]))])
def test_ptsvx_NAG(d, e, b, x):
# test to assure that wrapper is consistent with NAG Manual Mark 26
# example problemss: f07jbf, f07jpf
# (Links expire, so please search for "NAG Library Manual Mark 26" online)
# obtain routine with correct type based on e.dtype
ptsvx = get_lapack_funcs('ptsvx', dtype=e.dtype)
# solve using routine
df, ef, x_ptsvx, rcond, ferr, berr, info = ptsvx(d, e, b)
# determine ptsvx's solution and x are the same.
assert_array_almost_equal(x, x_ptsvx)
| 38.621538
| 79
| 0.509773
|
5fce8584e09887ea464c60faeb1051f6d9f5b07c
| 7,933
|
py
|
Python
|
pyaf/TS/Time.py
|
vishalbelsare/pyaf
|
94aeeb0e78bea6a82353cf351bc8bec529e439bb
|
[
"BSD-3-Clause"
] | null | null | null |
pyaf/TS/Time.py
|
vishalbelsare/pyaf
|
94aeeb0e78bea6a82353cf351bc8bec529e439bb
|
[
"BSD-3-Clause"
] | null | null | null |
pyaf/TS/Time.py
|
vishalbelsare/pyaf
|
94aeeb0e78bea6a82353cf351bc8bec529e439bb
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (C) 2016 Antoine Carme <Antoine.Carme@Laposte.net>
# All rights reserved.
# This file is part of the Python Automatic Forecasting (PyAF) library and is made available under
# the terms of the 3 Clause BSD license
import pandas as pd
import numpy as np
from enum import IntEnum
from . import Utils as tsutil
from . import TimeSeries_Cutting as tscut
from . import DateTime_Functions as dtfunc
class cTimeInfo:
# class data
def __init__(self):
self.mSignalFrame = pd.DataFrame()
self.mTimeMin = None;
self.mTimeMax = None;
self.mTimeMinMaxDiff = None;
self.mTimeDelta = None;
self.mHorizon = None;
self.mResolution = dtfunc.eTimeResolution.NONE
self.mSplit = None
def info(self):
lStr2 = "TimeVariable='" + self.mTime +"'";
lStr2 += " TimeMin=" + str(self.mTimeMin) +"";
lStr2 += " TimeMax=" + str(self.mTimeMax) +"";
lStr2 += " TimeDelta=" + str(self.mTimeDelta) +"";
lStr2 += " Horizon=" + str(self.mHorizon) +"";
return lStr2;
def to_dict(self):
dict1 = {};
dict1["TimeVariable"] = self.mTime;
dict1["TimeMinMax"] = [str(self.mSignalFrame[self.mTime].min()) ,
str(self.mSignalFrame[self.mTime].max())];
dict1["Horizon"] = self.mHorizon;
return dict1;
def addVars(self, df):
df[self.mRowNumberColumn] = self.mSignalFrame[self.mRowNumberColumn]
df[self.mTime] = self.mSignalFrame[self.mTime]
df[self.mNormalizedTimeColumn] = self.mSignalFrame[self.mNormalizedTimeColumn]
df[self.mSignal] = self.mSignalFrame[self.mSignal]
df[self.mOriginalSignal] = self.mSignalFrame[self.mOriginalSignal]
def get_time_dtype(self):
# print(self.mTimeMax, type(self.mTimeMax))
lType = self.mSignalFrame[self.mTime].dtype;
return lType;
def checkDateTypesForNewDataset(self, df):
if(self.mTimeMax is not None):
lType1 = self.get_time_dtype();
lType2 = df[self.mTime].dtype
if(lType1.kind != lType2.kind):
raise tsutil.PyAF_Error('Incompatible Time Column Type expected=' + str(lType1) + ' got: ' + str(lType2) + "'");
pass
def transformDataset(self, df):
self.checkDateTypesForNewDataset(df);
# new row
lLastRow = df.tail(1).copy();
lNextTime = self.nextTime(df, 1)
lLastRow[self.mTime] = lNextTime
lLastRow[self.mSignal] = np.nan
if(self.mNormalizedTimeColumn in df.columns):
lLastRow[self.mNormalizedTimeColumn] = self.normalizeTime(lNextTime)
lLastRow[self.mRowNumberColumn] = lLastRow[self.mRowNumberColumn].max() + 1
# print(lLastRow.columns , df.columns)
assert(str(lLastRow.columns) == str(df.columns))
df = pd.concat([df, lLastRow], ignore_index=True, verify_integrity = True, sort=False);
if(self.mNormalizedTimeColumn not in df.columns):
df[self.mRowNumberColumn] = np.arange(0, df.shape[0]);
df[self.mNormalizedTimeColumn] = self.compute_normalized_date_column(df[self.mTime])
# print(df.tail());
return df;
def isPhysicalTime(self):
lHelper = dtfunc.cDateTime_Helper()
return lHelper.isPhysicalTime(self.mSignalFrame[self.mTime])
def analyzeSeasonals(self):
if(not self.isPhysicalTime()):
return;
lEstim = self.mSplit.getEstimPart(self.mSignalFrame);
lEstimTime = lEstim[self.mTime]
lHelper = dtfunc.cDateTime_Helper()
self.mResolution = lHelper.guess_time_resolution(lEstimTime);
def checkDateTypes(self):
# print(self.mSignalFrame.info());
type1 = self.mSignalFrame[self.mTime].dtype
if(type1.kind == 'O'):
raise tsutil.PyAF_Error('Invalid Time Column Type ' + self.mTime + '[' + str(type1) + ']');
def adaptTimeDeltaToTimeResolution(self):
if(not self.isPhysicalTime()):
return;
lHelper = dtfunc.cDateTime_Helper()
self.mTimeDelta = lHelper.adaptTimeDeltaToTimeResolution(self.mResolution , self.mTimeDelta);
def computeTimeDelta(self):
#print(self.mSignalFrame.columns);
# print(self.mSignalFrame[self.mTime].head());
lEstim = self.mSplit.getEstimPart(self.mSignalFrame)
lTimeBefore = lEstim[self.mTime].shift(1);
# lTimeBefore.fillna(self.mTimeMin, inplace=True)
N = lEstim.shape[0];
if(N == 1):
if(self.isPhysicalTime()):
self.mTimeDelta = np.timedelta64(1,'D');
else:
self.mTimeDelta = 1
return
#print(self.mSignal, self.mTime, N);
#print(lEstim[self.mTime].head());
#print(lTimeBefore.head());
lDiffs = lEstim[self.mTime][1:N] - lTimeBefore[1:N]
if(self.mOptions.mTimeDeltaComputationMethod == "USER"):
self.mTimeDelta = self.mOptions.mUserTimeDelta;
if(self.mOptions.mTimeDeltaComputationMethod == "AVG"):
self.mTimeDelta = np.mean(lDiffs);
type1 = self.mSignalFrame[self.mTime].dtype
if(type1.kind == 'i' or type1.kind == 'u'):
self.mTimeDelta = int(self.mTimeDelta)
if(self.mOptions.mTimeDeltaComputationMethod == "MODE"):
delta_counts = pd.DataFrame(lDiffs.value_counts());
self.mTimeDelta = delta_counts[self.mTime].argmax();
self.adaptTimeDeltaToTimeResolution();
def estimate(self):
#print(self.mSignalFrame.columns);
#print(self.mSignalFrame[self.mTime].head());
self.checkDateTypes();
self.mRowNumberColumn = "row_number"
self.mNormalizedTimeColumn = self.mTime + "_Normalized";
self.analyzeSeasonals();
lEstim = self.mSplit.getEstimPart(self.mSignalFrame)
self.mTimeMin = lEstim[self.mTime].min();
self.mTimeMax = lEstim[self.mTime].max();
if(self.isPhysicalTime()):
self.mTimeMin = np.datetime64(self.mTimeMin.to_pydatetime());
self.mTimeMax = np.datetime64(self.mTimeMax.to_pydatetime());
self.mTimeMinMaxDiff = self.mTimeMax - self.mTimeMin;
self.mEstimCount = lEstim.shape[0]
# print(self.mTimeMin, self.mTimeMax , self.mTimeMinMaxDiff , (self.mTimeMax - self.mTimeMin)/self.mTimeMinMaxDiff)
self.computeTimeDelta();
self.mSignalFrame[self.mNormalizedTimeColumn] = self.compute_normalized_date_column(self.mSignalFrame[self.mTime])
self.dump();
def dump(self):
time_info = self.info();
def compute_normalized_date_column(self, idate_column):
if(self.mEstimCount == 1):
return 0.0;
vf = np.vectorize(self.normalizeTime)
return vf(idate_column)
@tsutil.cMemoize
def normalizeTime(self , iTime):
if(self.mEstimCount == 1):
return 0.0;
output = ( iTime- self.mTimeMin) / self.mTimeMinMaxDiff
return output
def cast_to_time_dtype(self, iTimeValue):
lType1 = self.get_time_dtype();
lTimeValue = np.array([iTimeValue]).astype(lType1)[0];
return lTimeValue;
def nextTime(self, df, iSteps):
#print(df.tail(1)[self.mTime]);
lLastTime = df[self.mTime].values[-1]
if(self.isPhysicalTime()):
lLastTime = pd.Timestamp(lLastTime)
# print("NEXT_TIME" , lLastTime, iSteps, self.mTimeDelta);
lNextTime = lLastTime + iSteps * self.mTimeDelta;
lNextTime = self.cast_to_time_dtype(lNextTime.to_datetime64())
else:
lNextTime = lLastTime + iSteps * self.mTimeDelta;
lNextTime = self.cast_to_time_dtype(lNextTime)
return lNextTime;
| 38.323671
| 128
| 0.61969
|
d29b108dc29e22a5b005439b7bb8222054869c01
| 217
|
py
|
Python
|
contests/leetcode-b6/a.py
|
Nightwish-cn/my_leetcode
|
40f206e346f3f734fb28f52b9cde0e0041436973
|
[
"MIT"
] | 23
|
2020-03-30T05:44:56.000Z
|
2021-09-04T16:00:57.000Z
|
contests/leetcode-b6/a.py
|
Nightwish-cn/my_leetcode
|
40f206e346f3f734fb28f52b9cde0e0041436973
|
[
"MIT"
] | 1
|
2020-05-10T15:04:05.000Z
|
2020-06-14T01:21:44.000Z
|
contests/leetcode-b6/a.py
|
Nightwish-cn/my_leetcode
|
40f206e346f3f734fb28f52b9cde0e0041436973
|
[
"MIT"
] | 6
|
2020-03-30T05:45:04.000Z
|
2020-08-13T10:01:39.000Z
|
class Solution:
def isMajorityElement(self, nums: List[int], target: int) -> bool:
import collections
ct = collections.Counter(nums)
len1 = len(nums)
return ct[target] > (len1 // 2)
| 36.166667
| 70
| 0.608295
|
98650705531955eb233944ea7348fd17cff7d192
| 1,643
|
py
|
Python
|
scripts/gen_gaussian_cdt.py
|
banerjeeutsav/sapphire_sim
|
85b96ef353a6135c96835841bf539de7df086f43
|
[
"MIT"
] | 4
|
2020-03-09T06:05:27.000Z
|
2021-09-17T06:49:06.000Z
|
scripts/gen_gaussian_cdt.py
|
banerjeeutsav/sapphire_sim
|
85b96ef353a6135c96835841bf539de7df086f43
|
[
"MIT"
] | null | null | null |
scripts/gen_gaussian_cdt.py
|
banerjeeutsav/sapphire_sim
|
85b96ef353a6135c96835841bf539de7df086f43
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python
###################################################################################################
#
# Python CDT Generator
#
# Author: Utsav Banerjee
# Last Modified: 12-Oct-2019
#
###################################################################################################
import sys, math, os, binascii, random
from decimal import *
####################################
# CDT for Discrete Gaussian Sampler
####################################
def gen_Gaussian_CDT(sigma, cdt_len, precision, cdt_file):
# Compute golden PMF
prob_golden = [0.0] * (cdt_len)
for i in range(cdt_len):
prob_golden[i] = Decimal(0.5 * (math.erf((i + 0.5) / (sigma * math.sqrt(2))) - math.erf((i - 0.5) / (sigma * math.sqrt(2)))))
# Compute quantized CDT
CDT = [0 for i in range(cdt_len)]
CDT[0] = prob_golden[0]
for i in range(1, cdt_len):
CDT[i] = CDT[i-1] + 2*prob_golden[i]
CDT = [int((CDT[i]) * (2 ** (precision-1))) for i in range(cdt_len)]
print("CDF_TABLE = %s" % CDT)
f = open(cdt_file, "w")
for i in range(cdt_len):
f.write("%d\n" % CDT[i])
f.close()
if len(sys.argv) < 5:
print("ERROR: Incorrect arguments provided for CDT generator script")
print("Usage: python gen_gaussian_cdt.py <sigma> <cdt_len> <prec> <out_cdt_file_path>")
exit()
if int(sys.argv[2]) > 64:
print("ERROR: Length of CDT must not be greater than 64\n")
exit()
if int(sys.argv[3]) > 32:
print("ERROR: Precision of CDT must not be greater than 32\n")
exit()
gen_Gaussian_CDT(float(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3]), sys.argv[4])
| 31
| 133
| 0.517346
|
8cb1c444346e6900cfa2f763a5bc475d052e748d
| 591
|
py
|
Python
|
sequence_search/consumer/gunicorn.py
|
RNAcentral/sequence_search
|
e0319e384cc9dea017f165e2c4c5143ee232f9fd
|
[
"Apache-2.0"
] | 2
|
2019-02-13T16:33:46.000Z
|
2019-10-22T16:27:00.000Z
|
sequence_search/consumer/gunicorn.py
|
RNAcentral/sequence_search
|
e0319e384cc9dea017f165e2c4c5143ee232f9fd
|
[
"Apache-2.0"
] | 110
|
2019-02-15T15:06:05.000Z
|
2022-03-04T16:03:38.000Z
|
sequence_search/consumer/gunicorn.py
|
RNAcentral/sequence_search
|
e0319e384cc9dea017f165e2c4c5143ee232f9fd
|
[
"Apache-2.0"
] | 1
|
2021-06-30T21:39:35.000Z
|
2021-06-30T21:39:35.000Z
|
"""
This file allows your to serve your application using gunicorn. gunicorn is not installed by default
by the requirements file adev creates, you'll need to install it yourself and add it to requirements.txt.
To run the app using gunicorn, in the terminal run
pip install gunicorn
gunicorn app.gunicorn:app --worker-class aiohttp.worker.GunicornWebWorker
You could use a variant of the above with heroku (in the `Procfile`) or with Docker in the ENTRYPOINT statement.
"""
import asyncio
from .__main__ import create_app
loop = asyncio.get_event_loop()
app = create_app(loop)
| 31.105263
| 112
| 0.780034
|
7a2ad8795a36a75aa81e33835b6a5f873f8636db
| 3,824
|
py
|
Python
|
ogr/services/pagure/flag.py
|
shreyaspapi/ogr
|
176add79eeb7d71e765550da76c9cdc8aced9e92
|
[
"MIT"
] | null | null | null |
ogr/services/pagure/flag.py
|
shreyaspapi/ogr
|
176add79eeb7d71e765550da76c9cdc8aced9e92
|
[
"MIT"
] | null | null | null |
ogr/services/pagure/flag.py
|
shreyaspapi/ogr
|
176add79eeb7d71e765550da76c9cdc8aced9e92
|
[
"MIT"
] | null | null | null |
# MIT License
#
# Copyright (c) 2018-2019 Red Hat, Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import warnings
import datetime
from typing import List, Dict, Any, Union
from ogr.abstract import CommitFlag, CommitStatus
from ogr.services import pagure as ogr_pagure
class PagureCommitFlag(CommitFlag):
_states = {
"pending": CommitStatus.pending,
"success": CommitStatus.success,
"failure": CommitStatus.failure,
"error": CommitStatus.error,
"canceled": CommitStatus.canceled,
}
def __str__(self) -> str:
return "Pagure" + super().__str__()
def _from_raw_commit_flag(self):
self.commit = self._raw_commit_flag["commit_hash"]
self.comment = self._raw_commit_flag["comment"]
self.state = self._state_from_str(self._raw_commit_flag["status"])
self.context = self._raw_commit_flag["username"]
self.url = self._raw_commit_flag["url"]
@staticmethod
def get(project: "ogr_pagure.PagureProject", commit: str) -> List["CommitFlag"]:
response = project._call_project_api("c", commit, "flag")
return [
PagureCommitFlag(raw_commit_flag=flag, project=project)
for flag in response["flags"]
]
@staticmethod
def set(
project: "ogr_pagure.PagureProject",
commit: str,
state: Union[CommitStatus, str],
target_url: str,
description: str,
context: str,
percent: int = None,
trim: bool = False,
uid: str = None,
) -> "CommitFlag":
if isinstance(state, str):
warnings.warn(
"Using the string representation of commit states, that will be removed in 0.14.0"
" (or 1.0.0 if it comes sooner). Please use CommitStatus enum instead. "
)
state = PagureCommitFlag._states[state]
if trim:
description = description[:140]
data: Dict[str, Any] = {
"username": context,
"comment": description,
"url": target_url,
"status": state.name,
}
if percent:
data["percent"] = percent
if uid:
data["uid"] = uid
response = project._call_project_api(
"c", commit, "flag", method="POST", data=data
)
return PagureCommitFlag(
project=project, raw_commit_flag=response["flag"], uid=response["uid"]
)
@property
def created(self) -> datetime.datetime:
return datetime.datetime.fromtimestamp(
int(self._raw_commit_flag["date_created"])
)
@property
def edited(self) -> datetime.datetime:
return datetime.datetime.fromtimestamp(
int(self._raw_commit_flag["date_updated"])
)
| 34.763636
| 98
| 0.649059
|
f5bcc397eb3bb9e1a885fe2cca431556d03d796e
| 2,160
|
py
|
Python
|
Hub/ecointeraction/classification/migrations/0001_initial.py
|
maximedaniel/ITAME
|
ca820337911695fa3625b32dcad5d87ff0b192d0
|
[
"MIT"
] | null | null | null |
Hub/ecointeraction/classification/migrations/0001_initial.py
|
maximedaniel/ITAME
|
ca820337911695fa3625b32dcad5d87ff0b192d0
|
[
"MIT"
] | 1
|
2021-06-10T23:19:09.000Z
|
2021-06-10T23:19:09.000Z
|
Hub/ecointeraction/classification/migrations/0001_initial.py
|
maximedaniel/ITAME
|
ca820337911695fa3625b32dcad5d87ff0b192d0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-05-08 06:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Characteristic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.CharField(max_length=500)),
],
),
migrations.CreateModel(
name='Criterium',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.CharField(max_length=500)),
('characteristics', models.ManyToManyField(blank=True, to='classification.Characteristic')),
],
),
migrations.CreateModel(
name='Entity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.CharField(max_length=500)),
('criteria', models.ManyToManyField(blank=True, to='classification.Criterium')),
],
),
migrations.CreateModel(
name='InteractiveSystem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('reference', models.CharField(max_length=500)),
('abstract', models.CharField(max_length=500)),
],
),
migrations.AddField(
model_name='characteristic',
name='interactive_systems',
field=models.ManyToManyField(blank=True, to='classification.InteractiveSystem'),
),
]
| 37.894737
| 114
| 0.573611
|
5be2d7df3a468f3864608d93b60d4cb0f0bd1bda
| 3,305
|
py
|
Python
|
zcp/messaging.py
|
victormonteiro/zcp
|
a4d808c89c3296fe27a08da5eadcf88ae08418eb
|
[
"Apache-2.0"
] | 4
|
2017-05-11T07:34:34.000Z
|
2021-03-22T13:40:06.000Z
|
zcp/messaging.py
|
apolloliu/ZCP
|
646e72d44b4445eb4a81ccd67d44b71e1fb9ea66
|
[
"Apache-2.0"
] | 7
|
2017-05-02T14:18:27.000Z
|
2020-12-15T19:03:42.000Z
|
zcp/messaging.py
|
apolloliu/ZCP
|
646e72d44b4445eb4a81ccd67d44b71e1fb9ea66
|
[
"Apache-2.0"
] | 6
|
2017-05-11T07:34:11.000Z
|
2021-03-22T13:37:39.000Z
|
# Copyright 2017 EasyStack, Inc
# Authors: Hanxi Liu<apolloliuhx@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import pika
import time
from zcp.common import conf
LOG = logging.getLogger(__name__)
cfg = conf.Conf()
hosts = cfg.read_option('os_rabbitmq', 'rabbit_hosts')
user = cfg.read_option('os_rabbitmq', 'rabbit_user')
passwd = cfg.read_option('os_rabbitmq', 'rabbit_pass')
port = cfg.read_option('os_rabbitmq', 'rabbit_port')
vh = cfg.read_option('os_rabbitmq', 'rabbit_virtual_host')
max_retries = int(cfg.read_option('os_rabbitmq', 'max_retries', -1))
retry_interval = int(cfg.read_option('os_rabbitmq', 'retry_interval', 5))
def connection():
connect = None
connection_state = False
attemps = 0
MAX_RETRIES = max_retries * len(hosts.split(','))
while True:
if connection_state:
break
try:
for host in hosts.split(','):
LOG.info("Connecting to Rabbitmq server %s..." % host)
connect = pika.BlockingConnection(pika.ConnectionParameters(
host=host,
port=int(port),
virtual_host=vh,
credentials=pika.PlainCredentials(user,
passwd)))
except Exception as e:
if max_retries < 0:
LOG.error('Unable to connect to the Rabbitmq cluster: '
'%(msg)s.Trying again in %(retry_interval)d '
'seconds,Continuing to retry to connect '
% {'msg': e,
'retry_interval': retry_interval})
time.sleep(retry_interval)
elif max_retries > 0 and attemps <= MAX_RETRIES:
LOG.error('Unable to connect to the Rabbitmq cluster: '
'%(msg)s.Trying again in %(retry_interval)d '
'seconds,max_retries time: %(max_retries)d,'
'retry times left:%(left)d'
% {'msg': e,
'retry_interval': retry_interval,
'max_retries': MAX_RETRIES,
'left': (MAX_RETRIES - attemps)})
attemps += 1
time.sleep(retry_interval)
else:
LOG.error('Unable to connect to the Rabbitmq cluster: '
'%(msg)s.' % {'msg': e})
raise
else:
connection_state = True
return connect
class MQConnection(object):
"""RabbitMQ connection class
"""
def __init__(self):
self.connection = connection()
def __call__(self):
self.connection = connection()
| 37.988506
| 78
| 0.56944
|
dd99b9114aa90ae785efef8f2d974208550babb6
| 226
|
py
|
Python
|
change_inside/3/antes.py
|
parzibyte/ejemplos_vim
|
1329616bb2344d43e6e90c2a8c0f90ae1e3c52da
|
[
"MIT"
] | null | null | null |
change_inside/3/antes.py
|
parzibyte/ejemplos_vim
|
1329616bb2344d43e6e90c2a8c0f90ae1e3c52da
|
[
"MIT"
] | null | null | null |
change_inside/3/antes.py
|
parzibyte/ejemplos_vim
|
1329616bb2344d43e6e90c2a8c0f90ae1e3c52da
|
[
"MIT"
] | null | null | null |
"""
https://parzibyte.me/blog
"""
# Cambia el contenido de la lista para que tenga los números del 1 al 3
lista = [12, 321, 321, 321, 3, 213, 21, 321, 321, 3,
213, 12, 4, 54, 5, 4, 6, 65, 6, 5346, 5436, 5346, 54]
| 32.285714
| 71
| 0.584071
|
891d4ed726e77fd986edee372d297804eaad447d
| 385
|
py
|
Python
|
protera_stability/engine/__init__.py
|
stepp1/protera-stability
|
62f70af00b9475a0b0aeba39fa6ae57f0bb25b34
|
[
"MIT"
] | 1
|
2021-11-05T02:14:31.000Z
|
2021-11-05T02:14:31.000Z
|
protera_stability/engine/__init__.py
|
stepp1/protera-stability
|
62f70af00b9475a0b0aeba39fa6ae57f0bb25b34
|
[
"MIT"
] | null | null | null |
protera_stability/engine/__init__.py
|
stepp1/protera-stability
|
62f70af00b9475a0b0aeba39fa6ae57f0bb25b34
|
[
"MIT"
] | null | null | null |
from protera_stability.engine.default import get_cfg, setup_train, DefaultTrainer
from protera_stability.engine.lightning_train import (
default_cbs,
DataModule,
LitProteins,
TrainingPl,
)
__all__ = [
"DataModule",
"DefaultTrainer",
"LitProteins",
"TrainingPl",
"default_cbs",
"get_cfg",
"setup_train",
]
assert __all__ == sorted(__all__)
| 19.25
| 81
| 0.698701
|
9a0fd48a2b7f4de3700428c18634fbf6f509ad07
| 533
|
py
|
Python
|
chatapp/models.py
|
steve-chen-nyc/django_chat_bot
|
8e943cc5eeff2bc87c09e59ff6c3c2a3f4eed187
|
[
"MIT"
] | null | null | null |
chatapp/models.py
|
steve-chen-nyc/django_chat_bot
|
8e943cc5eeff2bc87c09e59ff6c3c2a3f4eed187
|
[
"MIT"
] | 8
|
2019-12-04T23:05:32.000Z
|
2022-02-10T08:37:45.000Z
|
chatapp/models.py
|
steve-chen-nyc/django_chat_bot
|
8e943cc5eeff2bc87c09e59ff6c3c2a3f4eed187
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class Client(models.Model):
name = models.CharField(max_length=100)
date_created = models.DateTimeField('date published')
def __str__(self):
return self.name
class Project(models.Model):
client = models.ForeignKey(Client, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
description = models.CharField(max_length=100)
date_created = models.DateTimeField('date published')
def __str__(self):
return self.name
| 28.052632
| 64
| 0.724203
|
bf68b38f44575495543aaa359ec1aaff4b839c68
| 54,677
|
py
|
Python
|
ticdat/utils.py
|
8135tao/ticdat
|
71aeddce4bfcdb0ab03ee4ee6ffed108bb010848
|
[
"BSD-2-Clause"
] | 15
|
2019-05-16T13:22:50.000Z
|
2022-02-18T08:07:10.000Z
|
ticdat/utils.py
|
qtbgo/ticdat
|
71aeddce4bfcdb0ab03ee4ee6ffed108bb010848
|
[
"BSD-2-Clause"
] | 86
|
2019-03-13T16:18:07.000Z
|
2022-02-07T22:13:15.000Z
|
ticdat/utils.py
|
qtbgo/ticdat
|
71aeddce4bfcdb0ab03ee4ee6ffed108bb010848
|
[
"BSD-2-Clause"
] | 9
|
2020-05-06T15:13:32.000Z
|
2022-01-26T15:30:44.000Z
|
"""
general utility module
PEP8
"""
from numbers import Number
from itertools import chain, combinations
from collections import defaultdict
import ticdat
import getopt
import sys
import os
from collections import namedtuple
import datetime as datetime_
try:
import dateutil, dateutil.parser
except:
dateutil = None
try:
import pandas as pd
from pandas import DataFrame
import numpy
except:
pd = DataFrame = numpy = None
try:
import ocp_ticdat_drm as drm
except:
drm = None
import inspect
def faster_df_apply(df, func, trip_wire_check=None):
"""
pandas.DataFrame.apply is rarely used because it is slow. It is slow because it creates a Series for each row
of the DataFrame, and passes this Series to the function. faster_df_apply creates a dict for each row of the
DataFrame instead, and as a result is **much** faster.
See https://bit.ly/3xnLFld.
It's certainly possible newer versions of pandas will implement a more performant DataFrame.apply. The broader
point is, row-wise apply should not be discarded wholesale for performance reasons, as DataFrame.itertuples
is reasonably fast
:param df: a DataFrame
:param func: a function to apply to each row of the DataFrame. The function should accept a fieldname->data
dictionary as argument. func will be applied to each row of the DataFrame
:param trip_wire_check: optional. If provided, a function that will be passed each result returned by func.
trip_wire_check can either return falsey, or a replacement to func to be applied
to the remainder of the DataFrame
:return: a pandas Series with the same index as df and the values of calling func on each row dict.
"""
verify(DataFrame and isinstance(df, DataFrame), "df argument needs to be a DataFrame")
verify(callable(func), "func needs to be a function")
verify(not trip_wire_check or callable(trip_wire_check), "trip_wire_check needs to None, or a function")
cols = list(df.columns)
data, index = [], []
for row in df.itertuples(index=True):
row_dict = {f:v for f,v in zip(cols, row[1:])}
data.append(func(row_dict))
index.append(row[0])
if trip_wire_check:
new_func = trip_wire_check(data[-1])
if new_func:
func = new_func
trip_wire_check = None
# will default to float for empty Series, like original pandas
return pd.Series(data, index=index, **({"dtype": numpy.float64} if not data else {}))
def dat_restricted(table_list):
'''
Decorator factory used to decorate action functions (or solve function) to restrict the access to the
tables in the input_schema
:param table_list: A list of tables that are a subset input_schema.all_tables
:return: A decorator that can be applied to a function to fine-tune how ticdat controls its access to the
input_schema
Example usage
@dat_restricted(['table_one', 'table_five'])
def some_action(dat):
# the action
ticdat will pass a dat object that only has table_one and table_five as attributes. If a dat object
is returned back for writing, any attributes other than table_one, table_five will be ignored.
Note that the input_schema is not known by this decorator_factory, and thus table_list can't be sanity checked
at the time the function is decorated. ticdat will sanity check the table_list when the decorated function is
used by ticdat.standard_main (the Enframe-ticdat code will also perform an equivalent check, as will any ticdat
supporting platform).
As the function will be decorated with a dat_restricted attribute, a programmer is allowed to avoid the decorator
factory and simply do the following instead.
def some_action(dat):
# the action
some_action.dat_restricted = table_list
Although this will work the same, you are encouraged to use the dat_restricted decorator factory for better
readability.
dat_restricted can be used to decorate the solve function, in which case standard_main and Enframe will do the
expected thing and pass a dat object that is restricted to table_list.
'''
verify(containerish(table_list) and table_list and all(isinstance(_, str) for _ in table_list),
"table_list needs to be a non-empty container of strings")
def dat_restricted_decorator(func): # no need to use functools.wraps since not actually wrapping.
func.dat_restricted = tuple(table_list)
return func
return dat_restricted_decorator
def sln_restricted(table_list):
'''
Decorator factory used to decorate action functions (or solve function) to restrict the access to the
tables in the solution_schema
:param table_list: A list of tables that are a subset solution_schema.all_tables
:return: A decorator that can be applied to a function to fine-tune how ticdat controls its access to the
solution_schema
Example usage
@sln_restricted(['table_one', 'table_five'])
def some_action(sln):
# the action
ticdat will pass a sln object that only has table_one and table_five as attributes. If {"sln":sln}
is returned back for writing, any sln attributes other than table_one, table_five will be ignored.
Note that the solution_schema is not known by this decorator_factory, and thus table_list can't be sanity checked
at the time the function is decorated. ticdat will sanity check the table_list when the decorated function is
used by ticdat.standard_main (the Enframe-ticdat code will also perform an equivalent check, as will any ticdat
supporting platform).
As the function will be decorated with a sln_restricted attribute, a programmer is allowed to avoid the decorator
factory and simply do the following instead.
def some_action(sln):
# the action
some_action.sln_restricted = table_list
Although this will work the same, you are encouraged to use the sln_restricted decorator factory for better
readability.
sln_restricted can be used to decorate the solve function, in which case standard_main and Enframe will do the
expected thing and handle only the table_list attributes of the returned sln object.
'''
verify(containerish(table_list) and table_list and all(isinstance(_, str) for _ in table_list),
"table_list needs to be a non-empty container of strings")
def sln_restricted_decorator(func): # no need to use functools.wraps since not actually wrapping.
func.sln_restricted = tuple(table_list)
return func
return sln_restricted_decorator
def clone_a_anchillary_info_schema(schema, table_restrictions):
'''
:param schema: the result of calling _.schema(include_ancillary_info=True) when _ is a
TicDatFactory or PanDatFactory
:param table_restrictions: None (indicating a simple clone) or a sublist of the tables in schema.
:return: a clone of schema, except with the tables outside of table_restrictions removed (unlesss
table_restrictions is None, in which case schema is returned).
'''
if table_restrictions is None:
return schema
verify(containerish(table_restrictions) and table_restrictions and
all(isinstance(_, str) for _ in table_restrictions), "table_restrictions needs to be a container of strings")
verify(dictish(schema) and set(table_restrictions).issubset(schema.get("tables_fields", [])),
"table_restrictions needs to be a subset of schema['tables_fields']")
rtn = {}
for k, v in schema.items():
if k in ["tables_fields", "default_values", "data_types"]:
rtn[k] = {_k:_v for _k, _v in v.items() if _k in table_restrictions}
elif k == "foreign_keys":
rtn[k] = tuple(fk for fk in v if set(fk[:2]).issubset(table_restrictions))
elif k == "parameters":
rtn[k] = v if k in table_restrictions else {}
else:
assert k in {"infinity_io_flag", "xlsx_trailing_empty_rows"}, f"{k} is unexpected part of schema"
rtn[k] = v
return rtn
def dateutil_adjuster(x):
if isinstance(x, datetime_.datetime):
return x
# note that pd.Timestamp tends to create NaT from Falsey, this is ok so long as you check for null using pd.isnull
# also, pd.Timestampp can do weird things making Timestamps from numbers, so not enabling that.
def _try_to_timestamp(y):
if pd and not numericish(y):
rtn = safe_apply(pd.Timestamp)(y)
if rtn is not None:
return rtn
if dateutil:
return safe_apply(dateutil.parser.parse)(y)
rtn = _try_to_timestamp(x)
if rtn is not None:
return rtn
if not numericish(x):
return _try_to_timestamp(str(x))
def acceptable_default(v) :
return numericish(v) or stringish(v) or (v is None)
def all_fields(tpdf, tbl):
assert tbl in tpdf.all_tables
return tpdf.primary_key_fields.get(tbl, ()) + tpdf.data_fields.get(tbl, ())
# can I get away with ordering this consistently with the function? hopefully I can!
class TypeDictionary(namedtuple("TypeDictionary",
("number_allowed", "inclusive_min", "inclusive_max", "min",
"max", "must_be_int", "strings_allowed", "nullable", "datetime"))):
def valid_data(self, data):
if (pd and pd.isnull(data)) or (data is None):
return bool(self.nullable)
if self.datetime:
return isinstance(data, datetime_.datetime) or dateutil_adjuster(data) is not None
if numericish(data):
if not self.number_allowed:
return False
if (data < self.min) or (data > self.max):
return False
if (not self.inclusive_min) and (data == self.min):
return False
if (not self.inclusive_max) and (data == self.max):
return False
if (self.must_be_int) and (safe_apply(int)(data) != data) and \
not (data == self.max == float("inf") and self.inclusive_max):
return False
return True
if stringish(data):
if self.strings_allowed == "*":
return True
assert containerish(self.strings_allowed)
return data in self.strings_allowed
return False
@staticmethod
def safe_creator(number_allowed, inclusive_min, inclusive_max, min, max,
must_be_int, strings_allowed, nullable, datetime=False):
verify(dateutil or pd or not datetime,
"dateutil or pandas needs to be installed in order to use datetime data type")
if datetime:
return TypeDictionary(number_allowed=False, strings_allowed=(), nullable=bool(nullable),
min=0, max=float("inf"), inclusive_min=True, inclusive_max=True, must_be_int=False,
datetime=True)
verify((strings_allowed == '*') or
(containerish(strings_allowed) and all(stringish(x) for x in strings_allowed)),
"""The strings_allowed argument should be a container of strings, or the single '*' character.""")
if containerish(strings_allowed):
strings_allowed = tuple(strings_allowed) # defensive copy
if number_allowed:
verify(numericish(max), "max should be numeric")
verify(numericish(min), "min should be numeric")
verify(max >= min, "max cannot be smaller than min")
return TypeDictionary(number_allowed=True, strings_allowed=strings_allowed, nullable=bool(nullable),
min=min, max=max, inclusive_min=bool(inclusive_min),inclusive_max=bool(inclusive_max),
must_be_int=bool(must_be_int), datetime=False)
return TypeDictionary(number_allowed=False, strings_allowed=strings_allowed, nullable=bool(nullable),
min=0, max=float("inf"), inclusive_min=True, inclusive_max=True, must_be_int=False,
datetime=False)
class ForeignKey(namedtuple("ForeignKey", ("native_table", "foreign_table", "mapping", "cardinality"))) :
def nativefields(self):
return (self.mapping.native_field,) if type(self.mapping) is ForeignKeyMapping \
else tuple(_.native_field for _ in self.mapping)
def foreigntonativemapping(self):
if type(self.mapping) is ForeignKeyMapping : # simple field fk
return {self.mapping.foreign_field:self.mapping.native_field}
else: # compound foreign key
return {_.foreign_field:_.native_field for _ in self.mapping}
def nativetoforeignmapping(self):
return {v:k for k,v in self.foreigntonativemapping().items()}
ForeignKeyMapping = namedtuple("FKMapping", ("native_field", "foreign_field"))
# likely replace this with some sort of sys.platform call that makes a good guess
development_deployed_environment = False
def _clone_to_restricted_as_needed(function, schema, name):
if not hasattr(function, name):
return schema, set()
restricted = getattr(function, name)
verify(containerish(restricted) and restricted and
all(isinstance(_, str) for _ in restricted), f"{name} needs to be a container of strings")
verify(set(restricted).issubset(schema.all_tables), f"{restricted} needs to be a subset of {schema.all_tables}")
if set(restricted) == set(schema.all_tables):
return schema, set()
return schema.clone(table_restrictions=restricted), set(restricted)
def standard_main(input_schema, solution_schema, solve, case_space_table_names=False):
"""
provides standardized command line functionality for a ticdat solve engine
:param input_schema: a TicDatFactory or PanDatFactory defining the input schema
:param solution_schema: a TicDatFactory or PanDatFactory defining the output schema
:param solve: a function that takes a input_schema.TicDat object and
returns a solution_schema.TicDat object
:param case_space_table_names - passed through to any TicDatFactory/PanDatFactory write functions that have
case_space_table_names as an argument. Will also pass through to
case_space_sheet_names for Excel writers.
boolean - make best guesses how to add spaces and upper case characters to
table names when writing to the file system.
:return: None
Implements a command line signature of
"python engine_file.py --input <input_file_or_dir> --output <output_file_or_dir>"
For the input/output command line arguments.
--> endings in ".xls" or ".xlsx" imply reading/writing Excel files
--> endings in ".mdb" or ".accdb" imply reading/writing Access files (TicDatFactory only)
--> ending in ".db" imply reading/writing SQLite database files
--> ending in ".sql" imply reading/writing SQLite text files rendered in
schema-less SQL statements (TicDatFactory only)
--> ending in ".json" imply reading/writing .json files
--> otherwise, the assumption is that an input/output directory is being specified,
which will be used for reading/writing .csv files.
(Recall that .csv format is implemented as one-csv-file-per-table, so an entire
model will be stored in a directory containing a series of .csv files)
Defaults are input.xlsx, output.xlsx
"""
# See EnframeOfflineHandler for details for how to configure the enframe.json file
verify(all(isinstance(_, ticdat.TicDatFactory) for _ in (input_schema, solution_schema)) or
all(isinstance(_, ticdat.PanDatFactory) for _ in (input_schema, solution_schema)),
"input_schema and solution_schema both need to be TicDatFactory (or PanDatFactory) objects")
verify(callable(solve), "solve needs to be a function")
_args = inspect.getfullargspec(solve).args
verify(_args, "solve needs at least one argument")
create_routine = "create_pan_dat"
if all(isinstance(_, ticdat.TicDatFactory) for _ in (input_schema, solution_schema)):
create_routine = "create_tic_dat"
file_name = sys.argv[0]
def usage():
print ("python %s --help --input <input file or dir> --output <output file or dir>"%file_name +
" --enframe <enframe_config.json> --action <action_function>")
try:
opts, args = getopt.getopt(sys.argv[1:], "hi:o:e:a:", ["help", "input=", "output=", "enframe=", "action="])
except getopt.GetoptError as err:
print (str(err)) # will print something like "option -a not recognized"
usage()
sys.exit(2)
input_file, output_file, enframe_config, enframe_handler, action_name = "input.xlsx", "output.xlsx", "", None, None
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-i", "--input"):
input_file = a
elif o in ("-o", "--output"):
output_file = a
elif o in ("-e", "--enframe"):
enframe_config = a
elif o in ("-a", "--action"):
action_name = a
else:
verify(False, "unhandled option")
original_input_schema = input_schema
if action_name:
module = sys.modules[solve.__module__.split('.')[0]]
verify(hasattr(module, action_name), f"{action_name} is not an attribute of the module")
action_func = getattr(module, action_name)
verify(callable(action_func), f"{action_name} is not callable")
action_func_args = inspect.getfullargspec(action_func).args
verify({"dat", "sln"}.intersection(action_func_args),
f"{action_name} needs at least one of 'dat', 'sln' as arguments")
input_schema, input_restrictions = _clone_to_restricted_as_needed(action_func, input_schema, "dat_restricted")
solution_schema, solution_restrictions = _clone_to_restricted_as_needed(action_func, solution_schema,
"sln_restricted")
else:
input_schema, input_restrictions = _clone_to_restricted_as_needed(solve, input_schema, "dat_restricted")
solution_schema, solution_restrictions = _clone_to_restricted_as_needed(solve, solution_schema,
"sln_restricted")
if enframe_config:
enframe_handler = make_enframe_offline_handler(enframe_config, input_schema, solution_schema,
solve if not action_name else action_func)
if enframe_handler.solve_type.lower() == "Copy Input to Postgres".lower():
input_schema, input_restrictions = original_input_schema, set()
if "solve" in enframe_handler.solve_type.lower() and solution_restrictions:
print("\nNote - only the following subset of tables will be written to the local Enframe DB\n" +
str(solution_restrictions) + "\n")
verify(enframe_handler, "-e/--enframe command line functionality requires additional Enframe specific package")
if enframe_handler.solve_type == "Proxy Enframe Solve":
if action_name:
enframe_handler.perform_action_with_function()
else:
enframe_handler.proxy_enframe_solve()
print(f"Enframe proxy solve executed with {enframe_config}" +
(f" and action {action_name}" if action_name else ""))
return
recognized_extensions = (".json", ".xls", ".xlsx", ".db")
if create_routine == "create_tic_dat":
recognized_extensions += (".sql", ".mdb", ".accdb")
file_or_dir = lambda f: "file" if any(f.endswith(_) for _ in recognized_extensions) else "directory"
if not (os.path.exists(input_file)):
print("%s is not a valid input file or directory"%input_file)
return
print("input data from %s %s"%(file_or_dir(input_file), input_file))
dat = _get_dat_object(tdf=input_schema, create_routine=create_routine, file_path=input_file,
file_or_directory=file_or_dir(input_file),
check_for_dups=create_routine == "create_tic_dat")
if enframe_handler:
enframe_handler.copy_input_dat(dat)
print(f"Input data copied from {input_file} to the postgres DB defined by {enframe_config}")
if enframe_handler.solve_type == "Copy Input to Postgres and Solve":
if action_name:
enframe_handler.perform_action_with_function()
else:
enframe_handler.proxy_enframe_solve()
print(f"Enframe proxy solve executed with {enframe_config}" +
(f" and action {action_name}" if action_name else ""))
return
print("output %s %s"%(file_or_dir(output_file), output_file))
write_func, write_kwargs = _get_write_function_and_kwargs(tdf=solution_schema, file_path=output_file,
file_or_directory=file_or_dir(output_file),
case_space_table_names=case_space_table_names)
if not action_name:
sln = solve(dat)
verify(not (sln is not None and safe_apply(bool)(sln) is None),
"The solve (or action) function should return either a TicDat/PanDat object (for success), " +
"or something falsey (to indicate failure)")
if sln:
print("%s output %s %s"%("Overwriting" if os.path.exists(output_file) else "Creating",
file_or_dir(output_file), output_file))
write_func(sln, output_file, **write_kwargs)
else:
print("No solution was created!")
return
print("solution data from %s %s"%(file_or_dir(output_file), output_file))
kwargs = {}
if "dat" in action_func_args:
kwargs["dat"] = dat
if "sln" in action_func_args:
sln = _get_dat_object(tdf=solution_schema, create_routine=create_routine, file_path=output_file,
file_or_directory=file_or_dir(output_file),
check_for_dups=create_routine == "create_tic_dat")
kwargs["sln"] = sln
rtn = action_func(**kwargs)
def quickie_good_obj(dat, tdf):
return all(hasattr(dat, t) for t in tdf.all_tables)
def dat_write(dat):
w_func, w_kwargs = _get_write_function_and_kwargs(tdf=input_schema, file_path=input_file,
file_or_directory=file_or_dir(input_file),
case_space_table_names=case_space_table_names)
print("%s input %s %s" % ("Overwriting" if os.path.exists(input_file) else "Creating",
file_or_dir(input_file), input_file))
w_func(dat, input_file, **w_kwargs)
if rtn:
if isinstance(rtn, dict):
verify({"dat", "sln"}.intersection(rtn), "The returned dict is missing both 'dat' and 'sln' keys")
if "dat" in rtn:
verify(quickie_good_obj(rtn["dat"], input_schema), "rtn['dat'] fails sanity check")
dat_write(rtn["dat"])
if "sln" in rtn:
verify(quickie_good_obj(rtn["sln"], solution_schema), "rtn['sln'] fails sanity check")
print("%s output %s %s" % ("Overwriting" if os.path.exists(output_file) else "Creating",
file_or_dir(output_file), output_file))
write_func(rtn["sln"], output_file, **write_kwargs)
else:
verify(quickie_good_obj(rtn, input_schema), "rtn fails sanity check")
dat_write(rtn)
else:
print(f"{action_func} failed to return anything!")
def _get_dat_object(tdf, create_routine, file_path, file_or_directory, check_for_dups):
def inner_f():
if os.path.isfile(file_path) and file_or_directory == "file":
if file_path.endswith(".json"):
assert not (check_for_dups and tdf.json.find_duplicates(file_path)), "duplicate rows found"
return getattr(tdf.json, create_routine)(file_path)
if file_path.endswith(".xls") or file_path.endswith(".xlsx"):
assert not (check_for_dups and tdf.xls.find_duplicates(file_path)), "duplicate rows found"
return getattr(tdf.xls, create_routine)(file_path)
if file_path.endswith(".db"):
assert not (check_for_dups and tdf.sql.find_duplicates(file_path)), "duplicate rows found"
return getattr(tdf.sql, create_routine)(file_path)
if file_path.endswith(".sql"):
# no way to check a .sql file for duplications
return tdf.sql.create_tic_dat_from_sql(file_path) # only TicDat objects handle .sql files
if file_path.endswith(".mdb") or file_path.endswith(".accdb"):
assert not (check_for_dups and tdf.mdb.find_duplicates(file_path)), "duplicate rows found"
return tdf.mdb.create_tic_dat(file_path)
elif os.path.isdir(file_path) and file_or_directory == "directory":
assert not (check_for_dups and tdf.csv.find_duplicates(file_path)), "duplicate rows found"
return getattr(tdf.csv, create_routine)(file_path)
dat = inner_f()
verify(dat, f"Failed to read from and/or recognize {file_path}{_extra_input_file_check_str(file_path)}")
return dat
def _get_write_function_and_kwargs(tdf, file_path, file_or_directory, case_space_table_names):
write_func = None
if file_or_directory == "file":
if file_path.endswith(".json"):
write_func = tdf.json.write_file
if file_path.endswith(".xls") or file_path.endswith(".xlsx"):
write_func = tdf.xls.write_file
if file_path.endswith(".db"):
write_func = getattr(tdf.sql, "write_db_data", getattr(tdf.sql, "write_file", None))
if file_path.endswith(".sql"):
write_func = tdf.sql.write_sql_file
if file_path.endswith(".mdb") or file_path.endswith(".accdb"):
write_func = tdf.mdb.write_file
else:
write_func = tdf.csv.write_directory
verify(write_func, f"Unable to resolve write function for {file_path}")
kwargs = {"case_space_table_names": case_space_table_names, "case_space_sheet_names": case_space_table_names,
"allow_overwrite": True}
kwargs = {k: v for k, v in kwargs.items() if k in inspect.getfullargspec(write_func).args}
return write_func, kwargs
def _extra_input_file_check_str(input_file):
if os.path.isfile(input_file) and input_file.endswith(".csv"):
return "\nTo load data from .csv files, pass the directory containing the .csv files as the " +\
"command line argument."
return ""
def make_enframe_offline_handler(enframe_config, input_schema, solution_schema, core_func):
try:
from framework_utils.ticdat_deployer import EnframeOfflineHandler
except:
try:
from enframe_offline_handler import EnframeOfflineHandler
except:
EnframeOfflineHandler = None
if EnframeOfflineHandler:
return EnframeOfflineHandler(enframe_config, input_schema, solution_schema, core_func)
def verify(b, msg) :
"""
raise a TicDatError exception if the boolean condition is False
:param b: boolean condition.
:param msg: string argument to the TicDatError construction
:return:
"""
if not b :
raise TicDatError(msg)
try:
import gurobipy as gu
verify(set(gu.tuplelist(((1,2), (2,3),(3,2))).select("*", 2))
== {(1, 2), (3, 2)}, "")
except:
gu = None
# Our experience was that for a production license the following needed to be truthy, but when running unit tests
# with a development license, it needed to be disabled. See test_kehaar for example.
gurobi_env_explicit_creation_enabled = True
def gurobi_env(*args, **kwargs):
"""
Return an object that can be passed to gurobipy.Model() as the env argument.
On an ordinary Python installation, just returns None
Useful for Gurobi licensing/DRM issues.
:return: An object that can be passed to gurobipy.Model as the env argument
"""
verify(gu, "gurobipy is not installed")
if drm:
return drm.gurobi_env()
if gurobi_env_explicit_creation_enabled:
return gu.Env()
try:
import docplex.mp.progress as cplexprogress
except:
cplexprogress = None
def ampl_format(mod_str, **kwargs):
"""
Return a formatted version of mod_str, using substitutions from kwargs.
The substitutions are identified by doubled-braces ('{{' and '}}').
Very similar to str.format, except single braces are left unmolested and double-braces
are used to identify substitutions. This allows AMPL mod code to be more readable
to AMPL developers.
:param mod_str: the string that has doubled-braced substitutions entries.
:param kwargs: Named arguments map from substitution-entry label to value.
:return: A copy of mod_str with the substitutions performed.
"""
verify(stringish(mod_str), "mod_str argument should be a string")
left, right = ["_ticdat_ampl_format_%s_"%_ for _ in ["[", "]"]]
for _ in [left, right]:
verify(_ not in mod_str, "The %s string cannot be a sub-string of mod_str"%_)
rtn = mod_str.replace("{{", left).replace("}}", right)
rtn = rtn.replace("{", "{{").replace("}", "}}")
rtn = rtn.replace(left, "{").replace(right, "}")
return rtn.format(**kwargs)
def dict_overlay(d1, d2):
rtn = dict(d1)
for k,v in d2.items():
rtn[k] = v
return rtn
def create_duplicate_focused_tdf(tdf):
primary_key_fields = {k:v for k,v in tdf.primary_key_fields.items() if v}
if primary_key_fields:
return ticdat.TicDatFactory(**{k:[[],v] for k,v in primary_key_fields.items()})
def find_duplicates(td, tdf_for_dups):
assert tdf_for_dups.good_tic_dat_object(td)
assert not any(tdf_for_dups.primary_key_fields.values())
assert not tdf_for_dups.generator_tables
rtn = {t:defaultdict(int) for t in tdf_for_dups.primary_key_fields}
for t,flds in list(tdf_for_dups.data_fields.items()):
tbl = getattr(td, t)
for row in tbl:
k = tuple(row[f] for f in flds)
k = k[0] if len(k)==1 else k
rtn[t][k] += 1
rtn[t] = {k:v for k,v in rtn[t].items() if v > 1}
if not rtn[t]:
del(rtn[t])
return rtn
def find_duplicates_from_dict_ticdat(tdf, dict_ticdat):
assert isinstance(tdf, ticdat.TicDatFactory)
assert dictish(dict_ticdat) and all(map(stringish, dict_ticdat)) and \
all(map(containerish, dict_ticdat.values()))
primary_key_fields = {k:v for k,v in tdf.primary_key_fields.items() if v}
if primary_key_fields:
old_schema = {k:v for k,v in tdf.schema().items() if k in primary_key_fields}
all_data_tdf = ticdat.TicDatFactory(**{t:[[], pks+dfs]
for t,(pks,dfs) in old_schema.items()})
td = all_data_tdf.TicDat(**{k:v for k,v in dict_ticdat.items()
if k in primary_key_fields})
rtn = {t:defaultdict(int) for t in primary_key_fields}
for t,flds in list(primary_key_fields.items()):
tbl = getattr(td, t)
for row in tbl:
k = tuple(row[f] for f in flds)
k = k[0] if len(k)==1 else k
rtn[t][k] += 1
rtn[t] = {k:v for k,v in rtn[t].items() if v > 1}
if not rtn[t]:
del(rtn[t])
return rtn
def find_case_space_duplicates(tdf):
"""
Finds fields that are case space duplicates
:param tdf: A TicDatFactory defining the schema
:return: A dictionary with the keys being tables that have case space duplicates
"""
schema = tdf.schema()
tables_with_case_insensitive_dups = {}
for table in schema:
fields = set(schema[table][0]).union(schema[table][1])
case_insensitive_fields = set(map(lambda k: k.lower().replace(" ", "_"), fields))
if len(fields) != len(case_insensitive_fields):
tables_with_case_insensitive_dups[table] = fields
return tables_with_case_insensitive_dups
def case_space_to_pretty(str_):
if not str_:
return str_
str_ = list(str_[0].upper() + str_[1:])
for i in range(len(str_)):
if str_[i] == "_":
str_[i] = " "
if i + 1 < len(str_):
str_[i + 1] = str_[i + 1].upper()
return "".join(str_)
def change_fields_with_reserved_keywords(tdf, reserved_keywords, undo=False):
tdf_schema = tdf.schema()
mapping = {}
for table, fields in tdf_schema.items():
for fields_list in [fields[0], fields[1]]:
for findex in range(len(fields_list)):
original_field = fields_list[findex]
if not undo:
verify(not fields_list[findex].startswith('_'),
("Field names cannot start with '_', in table %s : " +
"field is %s") % (table, fields_list[findex]))
if fields_list[findex].lower() in reserved_keywords:
fields_list[findex] = '_' + fields_list[findex]
else:
if fields_list[findex].startswith('_'):
fields_list[findex] = fields_list[findex][1:]
mapping[table,original_field] = fields_list[findex]
rtn = ticdat.TicDatFactory(**tdf_schema)
for (table, original_field),new_field in mapping.items():
if original_field in tdf.default_values.get(table, ()):
rtn.set_default_value(table, new_field,
tdf.default_values[table][original_field])
if original_field in tdf.data_types.get(table, ()):
rtn.set_data_type(table, new_field,
*(tdf.data_types[table][original_field]))
if hasattr(tdf,'opl_prepend'):
rtn.opl_prepend = tdf.opl_prepend
if hasattr(tdf,'ampl_prepend'):
rtn.ampl_prepend = tdf.ampl_prepend
return rtn
def create_generic_free(td, tdf):
assert tdf.good_tic_dat_object(td)
if not tdf.generic_tables:
return td, tdf
sch = {k:v for k,v in tdf.schema().items() if k not in tdf.generic_tables}
for t in tdf.generic_tables:
if len(getattr(td, t)):
sch[t] = [[],list(getattr(td, t).columns)]
rtn_tdf = ticdat.TicDatFactory(**sch)
return rtn_tdf.TicDat(**{t:getattr(td, t) for t in rtn_tdf.all_tables}), rtn_tdf
class Slicer(object):
"""
Object to perform multi-index slicing over an index sequence
"""
def __init__(self, iter_of_iters):
"""
Construct a multi-index Slicer object
:param iter_of_iters An iterable of iterables. Usually a list of lists, or a list
of tuples. Each inner iterable must be the same size. The "*" string has a special
flag meaning and cannot be a member of any of the inner iterables.
Slicer is fairly similar to gurobipy.tuplelist, and will try to use tuplelist for improved performance
whenever possible. One key difference is Slicer can accommodate tuples that themselves contain tuples (or
really any hashable) wherease tuplelist should only be used with tuples that themselves contain only primitives.
"""
verify(hasattr(iter_of_iters, "__iter__"), "need an iterator of iterators")
copied = tuple(iter_of_iters)
verify(all(hasattr(_, "__iter__") for _ in copied), "need iterator of iterators")
self._indicies = tuple(map(tuple, copied))
if self._indicies:
verify(min(map(len, self._indicies)) == max(map(len, self._indicies)),
"each inner iterator needs to have the same number of elements")
verify(not any("*" in _ for _ in self._indicies),
"The '*' character cannot itself be used as an index")
self._gu = None
if gu and not any(any(map(containerish, _)) for _ in self._indicies):
self._gu = gu.tuplelist(self._indicies)
self._indicies = None
self.clear()
def slice(self, *args):
"""
Perform a multi-index slice. (Not to be confused with the native Python slice)
:param *args a series of index values or '*'. The latter means 'match every value'
:return: a list of tuples which match args.
:caveat will run faster if gurobipy is available and tuplelist can accommodate the interior iterables
"""
if not (self._indicies or self._gu):
return []
verify(len(args) == len((self._indicies or self._gu)[0]), "inconsistent number of elements")
if self._gu:
return self._gu.select(*args)
wildcards = tuple(i for i,x in enumerate(args) if x == "*")
fixedposns = tuple(i for i in range(len(args)) if i not in wildcards)
def fa(t):
return tuple(t[i] for i in fixedposns)
if wildcards not in self._archived_slicings:
for indx in self._indicies:
self._archived_slicings[wildcards][fa(indx)].append(indx)
return list(self._archived_slicings[wildcards][fa(args)])
def clear(self):
"""
reduce memory overheard by clearing out any archived slicing.
this is a no-op if gurobipy is available
:return:
"""
self._archived_slicings = defaultdict(lambda : defaultdict(list))
def _forceguout(self):
if self._gu:
self._indicies = tuple(map(tuple, self._gu))
self._gu = None
def do_it(g): # just walks through everything in a gen - I like the syntax this enables
for x in g :
pass
def all_underscore_replacements(s):
rtn = []
underscore_positions = [i for i,c in enumerate(s) if c == "_"]
for indexsets in chain.from_iterable(
combinations(list(underscore_positions), r)
for r in range(len(list(underscore_positions))+1)):
s_ = str(s)
for i in indexsets:
s_ = s_[:i] + " " + s_[i+1:]
rtn.append(s_)
return rtn
def all_subsets(my_set):
return [set(subset) for l in range(len(my_set)+1) for subset in combinations(my_set, l)]
class TicDatError(Exception) :
pass
def debug_break():
import ipdb; ipdb.set_trace()
def safe_apply(f) :
def _rtn (*args, **kwargs) :
try :
return f(*args, **kwargs)
except :
return None
return _rtn
def dictish(x): return all(hasattr(x, _) for _ in
("__getitem__", "keys", "values", "items", "__contains__", "__len__"))
def stringish(x): return all(hasattr(x, _) for _ in ("lower", "upper", "strip"))
def containerish(x): return all(hasattr(x, _) for _ in ("__iter__", "__len__", "__contains__")) \
and not stringish(x)
def generatorish(x): return all(hasattr(x, _) for _ in ("__iter__", "next")) \
and not (containerish(x) or dictish(x))
def numericish(x) : return isinstance(x, Number) and not isinstance(x, bool)
def lupish(x) : return containerish(x) and hasattr(x, "__getitem__") and not dictish(x)
def baseConverter(number, base):
if number < base:
return [number]
rtn = []
power = base
while power * base <= number:
power *= base
while power >= base :
rtn.append(number / power)
number -= power * (number/power)
power /= base
rtn.append(number%base)
return rtn
def freezable_factory(baseClass, freezeAttr, alwaysEditable = None) :
alwaysEditable = alwaysEditable or set()
class _Freezeable(baseClass) :
def __setattr__(self, key, value):
if key in alwaysEditable or not getattr(self, freezeAttr, False):
return super(_Freezeable, self).__setattr__(key, value)
raise TicDatError("can't set attributes to a frozen " + self.__class__.__name__)
def __delattr__(self, item):
if not getattr(self, freezeAttr, False):
return super(_Freezeable, self).__delattr__(item)
raise TicDatError("can't del attributes to a frozen " + self.__class__.__name__)
return _Freezeable
_FreezableDictBase = freezable_factory(dict, "_attributesFrozen")
class FreezeableDict(_FreezableDictBase) :
def __setattr__(self, key, value):
if key == "_dataFrozen" and value :
return super(_FreezableDictBase, self).__setattr__(key, value)
return super(FreezeableDict, self).__setattr__(key, value)
def __setitem__(self, key, value):
if not getattr(self, "_dataFrozen", False) :
return super(FreezeableDict, self).__setitem__(key, value)
raise TicDatError("Can't edit a frozen " + self.__class__.__name__)
def __delitem__(self, key):
if not getattr(self, "_dataFrozen", False) :
return super(FreezeableDict, self).__delitem__(key)
raise TicDatError("Can't edit a frozen " + self.__class__.__name__)
def update(self, *args, **kwargs) :
if not getattr(self, "_dataFrozen", False) :
return super(FreezeableDict, self).update(*args, **kwargs)
raise TicDatError("Can't edit a frozen " + self.__class__.__name__)
def pop(self, *args, **kwargs) :
if not getattr(self, "_dataFrozen", False) :
return super(FreezeableDict, self).pop(*args, **kwargs)
raise TicDatError("Can't edit a frozen " + self.__class__.__name__)
class FrozenDict(FreezeableDict) :
def __init__(self, *args, **kwargs):
super(FrozenDict, self).__init__(*args, **kwargs)
self._dataFrozen = True # need to do first, obviously
self._attributesFrozen = True
def deep_freeze(x) :
if stringish(x) or not hasattr(x, "__contains__") :
return x
if hasattr(x, "keys") and hasattr(x, "values") :
return FrozenDict({deep_freeze(k) : deep_freeze(v) for k,v in x.items()})
if hasattr(x, "__getitem__") :
return tuple(map(deep_freeze, x))
return frozenset(map(deep_freeze,x))
def td_row_factory(table, key_field_names, data_field_names, default_values={}):
assert dictish(default_values) and set(default_values).issubset(data_field_names)
assert not set(key_field_names).intersection(data_field_names)
if not data_field_names:
# need a freezeable dict not a frozen dict here so can still link foreign keys
def makefreezeabledict(x=()) :
verify(containerish(x) and len(x) == 0, "Attempting to add non-empty data to %s"%table)
return FreezeableDict()
return makefreezeabledict
fieldtoindex = {x:data_field_names.index(x) for x in data_field_names}
indextofield = {v:k for k,v in fieldtoindex.items()}
class TicDatDataRow(freezable_factory(object, "_attributesFrozen")) :
def __init__(self, x):
# since ticDat targeting numerical analysis, 0 is good default default
self._data = [0] * len(fieldtoindex)
if dictish(x) :
verify(set(x.keys()).issubset(fieldtoindex),
"Applying inappropriate data field names to %s"%table)
for f,i in fieldtoindex.items():
if f in default_values :
self._data[i] = default_values[f]
for f,_d in x.items():
self[f] = _d
elif containerish(x) :
verify(len(x) == len(self), "%s requires each row to have %s data values"%
(table, len(self)))
for i in range(len(self)):
self._data[i] = x[i]
else:
verify(len(self) ==1, "%s requires each row to have %s data values"%
(table, len(self)))
self._data[0] = x
def __getitem__(self, item):
try :
return self._data[fieldtoindex[item]]
except :
raise TicDatError("Key error : %s not data field name for table %s"% (item, table))
def __setitem__(self, key, value):
verify(key in fieldtoindex, "Key error : %s not data field name for table %s"%
(key, table))
if getattr(self, "_dataFrozen", False) :
raise TicDatError("Can't edit a frozen TicDatDataRow")
self._data[fieldtoindex[key]] = value
def keys(self):
return tuple(indextofield[i] for i in range(len(self)))
def values(self):
return tuple(self._data)
def items(self):
return zip(self.keys(), self.values())
def __contains__(self, item):
return item in fieldtoindex
def __iter__(self):
return iter(fieldtoindex)
def __len__(self):
return len(self._data)
def __repr__(self):
return "_td:" + {k:v for k,v in self.items()}.__repr__()
assert dictish(TicDatDataRow)
return TicDatDataRow
class Sloc(object):
"""
A utility class for the slicing on pandas Series.
Works just like .loc, except doesn't exception out when
encountering an empty slice.
**All** credit for this class goes to the inimitable IL.
https://github.com/pydata/pandas/issues/10695
"""
def __init__(self, s):
"""
In general there is no need to create this object explicitly.
TicDatFactory.copy_to_pandas can create them for each of your
data columns, or you can use the add_sloc utility function.
:param s: a Series object.
:return:
"""
verify(pd, "pandas needs to be installed in order to enable pandas functionality")
# as of this writing, the DataFrame doesn't handle references like df[:,"item"] correctly
verify(isinstance(s, pd.Series), "sloc only implemented for Series")
self._s = s
def __getitem__(self, key):
try:
return self._s.loc[key]
except Exception as e:
if containerish(key) and any(isinstance(k, slice) and
(k.start == k.step == k.stop == None) for k in key):
return pd.Series([], dtype=numpy.float64)
raise e
@staticmethod
def add_sloc(s):
"""
adds an .sloc attribute to a the series or to every column of the data frame
:param s: either a series or a data frame
:return: s if .sloc could be added, None otherwise
"""
verify(pd, "pandas needs to be installed in order to enable pandas functionality")
if isinstance(s.index, pd.MultiIndex) :
# sloc functionality really makes sense only for a MultiIndex
if isinstance(s, pd.DataFrame):
# adding sloc just to the columns of the DataFrame and not to the DataFrame itself.
for c in s.columns:
Sloc.add_sloc(s[c])
else:
s.sloc = Sloc(s)
return s
class LogFile(object) :
"""
Utility class for writing log files.
Also enables writing on-the-fly tables into log files.
"""
def __init__(self, path):
self._f = open(path, "w") if path else None
def write(self, *args, **kwargs):
self._f.write(*args, **kwargs) if self._f else None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
self._f.close()if self._f else None
def log_table(self, table_name, seq, formatter = lambda _ : "%s"%_,
max_write = 10) :
"""
Writes a table to the log file. Extremely useful functionality for
on the fly errors, warnings and diagnostics.
:param log_table : the name to be given to the logged table
:param seq: An iterable of iterables. The first iterable
lists the field names for the table. The remaining iterables
list the column values for each row. The outer iterable
is thus of length num_rows + 1, while each of the inner
iterables are of length num_cols.
:param formatter: a function used to turn column entries into strings
:param max_write: the maximum number of table entries to write
to the actual log file.
:return:
"""
verify(containerish(seq) and all(map(containerish, seq)),
"seq needs to be container of containers")
verify(len(seq) >= 1, "seq missing initial header row")
verify(max(map(len, seq)) == min(map(len, seq)),
"each row of seq needs to be the same length as the header row")
self.write("Table %s:\n"%table_name)
if len(seq[0]) <= 2:
ljust = 30
elif len(seq[0]) == 3:
ljust = 25
elif len(seq[0]) == 4:
ljust = 20
else:
ljust = 18
if len(seq) - 1 > max_write:
self.write("(Showing first %s entries out of %s in total)\n"
%(max_write, len(seq)-1))
for row in list(seq)[:max_write+1]:
self.write("".join(formatter(_).ljust(ljust) for _ in row) + "\n")
self.write("\n")
class Progress(object):
"""
Utility class for indicating progress.
"""
def __init__(self, quiet = False):
self._quiet = quiet
def numerical_progress(self, theme, progress):
"""
indicate generic progress
:param theme: string describing the type of progress being advanced
:param progress: numerical indicator to the degree of progress advanced
:return: False if GUI indicates solve should gracefully finish, True otherwise
"""
verify(stringish(theme), "type_ needs to be string")
verify(numericish(progress), "progress needs to be numerical")
if not self._quiet:
print("%s:%s"%(theme.ljust(40), "{:.5f}".format(progress)))
return True
def mip_progress(self, theme, lower_bound, upper_bound):
"""
indicate progress towards solving a MIP via converging upper and lower bounds
:param theme: string describing the type of MIP solve underway
:param lower_bound: the best current lower bound to the MIP objective
:param upper_bound: the best current upper bound to the MIP objective
:return: False if GUI indicates solve should gracefully finish, True otherwise
"""
verify(stringish(theme), "type_ needs to be string")
verify(all(map(numericish, (lower_bound, upper_bound))),
"lower_bound, upper_bound need to be numeric")
verify(lower_bound - abs(lower_bound) * .00001 <= upper_bound,
"lower_bound can't be bigger than upper_bound")
if not self._quiet:
print("%s:%s:%s"%(theme.ljust(30), "{:.5f}".format(lower_bound).ljust(20),
"{:.5f}".format(upper_bound)))
return True
def gurobi_call_back_factory(self, theme, model) :
"""
Allow a Gurobi model to call mip_progress. **Only for minimize**
:param theme: string describing the type of MIP solve underway
:param model: a Gurobi model (or ticdat.Model.core_model)
:return: a call_back function that can be passed to Model.optimize
"""
verify(gu, "gurobipy is not installed and properly licensed")
def rtn(gu_model, where) :
assert gu_model is model
if where == gu.GRB.callback.MIP:
ub = model.cbGet(gu.GRB.callback.MIP_OBJBST)
lb = model.cbGet(gu.GRB.callback.MIP_OBJBND)
keep_going = self.mip_progress(theme, lb, ub)
if not keep_going :
model.terminate()
return rtn
def add_cplex_listener(self, theme, model):
'''
Allow a CPLEX model to call mip_progress. **Only for minimize**
:param theme: short descriptive string
:param model: cplex.Model object (or ticdat.Model.core_model)
:return:
'''
verify(cplexprogress, "docplex is not installed")
super_self = self
class MyListener(cplexprogress.ProgressListener):
def notify_progress(self, progress_data):
# this is assuming a minimization problem.
ub = float("inf") if progress_data.current_objective is None else progress_data.current_objective
keep_going = super_self.mip_progress(theme, progress_data.best_bound, ub)
if not keep_going:
self.abort()
model.add_progress_listener(MyListener())
EPSILON = 1e-05
def per_error(x1, x2) :
x1 = float(x1) if numericish(x1) else x1
x2 = float(x2) if numericish(x2) else x2
if (x1 < 0) and (x2 < 0) :
return per_error(-x1, -x2)
if x1 == float("inf") :
return 0 if (x2 == float("inf")) else x1
SMALL_NOT_ZERO = 1e-10
assert(EPSILON>SMALL_NOT_ZERO)
abs1 = abs(x1)
abs2 = abs(x2)
# is it safe to divide by the bigger absolute value
if max(abs1, abs2) > SMALL_NOT_ZERO:
rtn = ((max(x1, x2) - min(x1, x2)) / max(abs1, abs2))
return rtn
return 0
def nearly_same(x1, x2, epsilon) :
return per_error(x1, x2) < epsilon
RowPredicateInfo = namedtuple("RowPredicateInfo", ["predicate", "predicate_kwargs_maker",
"predicate_failure_response"])
def does_new_fk_complete_circle(native_tbl, foreign_tbl, tdf):
fks = defaultdict(set)
for fk in tdf.foreign_keys:
fks[fk.native_table].add(fk)
rtn = []
def process_table(t, already_seen):
if t == native_tbl:
rtn[:] = [True]
elif t not in already_seen:
for fk in fks.get(t, ()):
process_table(fk.foreign_table, already_seen + [t])
process_table(foreign_tbl, [])
return bool(rtn)
| 47.586597
| 120
| 0.6363
|
c4b5032e3213115418558bba443da468d6be2b2f
| 555
|
py
|
Python
|
ex106.py
|
ClovisA-Dev/ex-anteriores-Python
|
61fa7e41033267db1e057d08180c015f30695a83
|
[
"MIT"
] | null | null | null |
ex106.py
|
ClovisA-Dev/ex-anteriores-Python
|
61fa7e41033267db1e057d08180c015f30695a83
|
[
"MIT"
] | null | null | null |
ex106.py
|
ClovisA-Dev/ex-anteriores-Python
|
61fa7e41033267db1e057d08180c015f30695a83
|
[
"MIT"
] | null | null | null |
from time import sleep
def mini_sistema():
sleep(1)
print('\033[7;32;40m~'*30)
print('\033[7;32;40mSISTEMA DE AJUDA PyHELP')
print('\033[7;32;40m~' * 30)
print('\033[m')
while True:
funcao = str(input('Função ou Biblioteca > '))
if funcao != 'fim':
sleep(1)
print('\033[0;30;45m')
help(funcao)
print('\033[m')
sleep(1)
if funcao.upper() == 'FIM':
sleep(1)
print('\033[0;30;41mATÉ LOGO!')
break
mini_sistema()
| 24.130435
| 54
| 0.49009
|
9d1988e35027ded61acd00933744f6b7278d6e9a
| 1,446
|
py
|
Python
|
api/app/schemas/theq/csr_schema.py
|
sumesh-aot/queue-management
|
d8de45c2d94c1a557c8f8d207d73a067709d5abb
|
[
"Apache-2.0"
] | 1
|
2019-10-04T23:30:14.000Z
|
2019-10-04T23:30:14.000Z
|
api/app/schemas/theq/csr_schema.py
|
sumesh-aot/queue-management
|
d8de45c2d94c1a557c8f8d207d73a067709d5abb
|
[
"Apache-2.0"
] | 59
|
2018-06-27T02:39:35.000Z
|
2019-06-20T20:36:09.000Z
|
api/app/schemas/theq/csr_schema.py
|
sumesh-aot/queue-management
|
d8de45c2d94c1a557c8f8d207d73a067709d5abb
|
[
"Apache-2.0"
] | 2
|
2018-05-21T21:30:22.000Z
|
2018-05-23T11:46:43.000Z
|
'''Copyright 2018 Province of British Columbia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
import toastedmarshmallow
from marshmallow import fields
from app.models.theq import CSR
from app.schemas.theq import CSRStateSchema, OfficeSchema, RoleSchema
from qsystem import ma
class CSRSchema(ma.ModelSchema):
class Meta:
model = CSR
jit = toastedmarshmallow.Jit
exclude = ('periods',)
csr_id = fields.Int()
username = fields.Str()
office_id = fields.Int()
role_id = fields.Int()
receptionist_ind = fields.Int()
deleted = fields.DateTime()
csr_state_id = fields.Int()
counter_id = fields.Int()
csr_state = fields.Nested(CSRStateSchema(exclude=('csrs',)))
office = fields.Nested(OfficeSchema())
role = fields.Nested(RoleSchema(exclude=('roles',)))
ita_designate = fields.Int()
pesticide_designate = fields.Int()
finance_designate = fields.Int()
liaison_designate = fields.Int()
| 32.863636
| 72
| 0.733057
|
d215c1ffb497fe9cb0827e2e0aec7a383842e411
| 5,287
|
py
|
Python
|
geopandas/io/file.py
|
BoBednar/geopandas
|
f89a6e34de2b32c1e2160f0c079b7e50067304eb
|
[
"BSD-3-Clause"
] | 1
|
2021-02-18T20:52:14.000Z
|
2021-02-18T20:52:14.000Z
|
geopandas/io/file.py
|
BoBednar/geopandas
|
f89a6e34de2b32c1e2160f0c079b7e50067304eb
|
[
"BSD-3-Clause"
] | null | null | null |
geopandas/io/file.py
|
BoBednar/geopandas
|
f89a6e34de2b32c1e2160f0c079b7e50067304eb
|
[
"BSD-3-Clause"
] | 1
|
2018-12-16T22:57:23.000Z
|
2018-12-16T22:57:23.000Z
|
import os
import fiona
import numpy as np
import six
from geopandas import GeoDataFrame, GeoSeries
# Adapted from pandas.io.common
if six.PY3:
from urllib.request import urlopen as _urlopen
from urllib.parse import urlparse as parse_url
from urllib.parse import uses_relative, uses_netloc, uses_params
else:
from urllib2 import urlopen as _urlopen
from urlparse import urlparse as parse_url
from urlparse import uses_relative, uses_netloc, uses_params
_VALID_URLS = set(uses_relative + uses_netloc + uses_params)
_VALID_URLS.discard('')
def _is_url(url):
"""Check to see if *url* has a valid protocol."""
try:
return parse_url(url).scheme in _VALID_URLS
except:
return False
def read_file(filename, bbox=None, **kwargs):
"""
Returns a GeoDataFrame from a file or URL.
Parameters
----------
filename: str
Either the absolute or relative path to the file or URL to
be opened.
bbox : tuple | GeoDataFrame or GeoSeries, default None
Filter features by given bounding box, GeoSeries, or GeoDataFrame.
CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame.
**kwargs:
Keyword args to be passed to the `open` or `BytesCollection` method
in the fiona library when opening the file. For more information on
possible keywords, type:
``import fiona; help(fiona.open)``
Examples
--------
>>> df = geopandas.read_file("nybb.shp")
Returns
-------
geodataframe : GeoDataFrame
"""
if _is_url(filename):
req = _urlopen(filename)
path_or_bytes = req.read()
reader = fiona.BytesCollection
else:
path_or_bytes = filename
reader = fiona.open
with reader(path_or_bytes, **kwargs) as features:
crs = features.crs
if bbox is not None:
if isinstance(bbox, GeoDataFrame) or isinstance(bbox, GeoSeries):
bbox = tuple(bbox.to_crs(crs).total_bounds)
assert len(bbox) == 4
f_filt = features.filter(bbox=bbox)
else:
f_filt = features
columns = list(features.meta["schema"]["properties"]) + ["geometry"]
gdf = GeoDataFrame.from_features(f_filt, crs=crs, columns=columns)
return gdf
def to_file(df, filename, driver="ESRI Shapefile", schema=None,
**kwargs):
"""
Write this GeoDataFrame to an OGR data source
A dictionary of supported OGR providers is available via:
>>> import fiona
>>> fiona.supported_drivers
Parameters
----------
df : GeoDataFrame to be written
filename : string
File path or file handle to write to.
driver : string, default 'ESRI Shapefile'
The OGR format driver used to write the vector file.
schema : dict, default None
If specified, the schema dictionary is passed to Fiona to
better control how the file is written. If None, GeoPandas
will determine the schema based on each column's dtype
The *kwargs* are passed to fiona.open and can be used to write
to multi-layer data, store data within archives (zip files), etc.
"""
if schema is None:
schema = infer_schema(df)
filename = os.path.abspath(os.path.expanduser(filename))
with fiona.drivers():
with fiona.open(filename, 'w', driver=driver, crs=df.crs,
schema=schema, **kwargs) as colxn:
colxn.writerecords(df.iterfeatures())
def infer_schema(df):
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
def convert_type(column, in_type):
if in_type == object:
return 'str'
out_type = type(np.asscalar(np.zeros(1, in_type))).__name__
if out_type == 'long':
out_type = 'int'
if out_type == 'bool':
raise ValueError('column "{}" is boolean type, '.format(column) +
'which is unsupported in file writing. '
'Consider casting the column to int type.')
return out_type
properties = OrderedDict([
(col, convert_type(col, _type)) for col, _type in
zip(df.columns, df.dtypes) if col != df._geometry_column_name
])
if df.empty:
raise ValueError("Cannot write empty DataFrame to file.")
geom_type = _common_geom_type(df)
if not geom_type:
raise ValueError("Geometry column cannot contain mutiple "
"geometry types when writing to file.")
schema = {'geometry': geom_type, 'properties': properties}
return schema
def _common_geom_type(df):
# Need to check geom_types before we write to file...
# Some (most?) providers expect a single geometry type:
# Point, LineString, or Polygon
geom_types = df.geometry.geom_type.unique()
from os.path import commonprefix
# use reversed geom types and commonprefix to find the common suffix,
# then reverse the result to get back to a geom type
geom_type = commonprefix([g[::-1] for g in geom_types if g])[::-1]
if not geom_type:
return None
if df.geometry.has_z.any():
geom_type = "3D " + geom_type
return geom_type
| 31.470238
| 77
| 0.643087
|
86d427dc700c37b8b007ba42d8258e0de8ba1bd4
| 2,065
|
py
|
Python
|
FakeNews/fakenewsFE/test.py
|
arkochatterjee/X-check-fake-news
|
7636e0785134def088a30fd34167236a647ed457
|
[
"MIT"
] | 4
|
2020-02-08T16:08:58.000Z
|
2021-01-21T18:17:54.000Z
|
FakeNews/fakenewsFE/test.py
|
arkochatterjee/X-check-fake-news
|
7636e0785134def088a30fd34167236a647ed457
|
[
"MIT"
] | null | null | null |
FakeNews/fakenewsFE/test.py
|
arkochatterjee/X-check-fake-news
|
7636e0785134def088a30fd34167236a647ed457
|
[
"MIT"
] | 1
|
2018-10-13T05:14:25.000Z
|
2018-10-13T05:14:25.000Z
|
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import matplotlib.cm as cmap
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.metrics import accuracy_score
def predict(filename):
df=pd.read_csv('C:\\Users\\Niladri Shekhar Dutt\\Desktop\\IET-FE\\FakeNews\\fakenewsFE\\fake_or_real_news.csv')
y = df.label
df.drop("label", axis=1)
X_train, X_test, y_train, y_test = train_test_split(df['text'], y, test_size=0.5, random_state=53)
count_vectorizer = CountVectorizer(stop_words='english')
count_train = count_vectorizer.fit_transform(X_train)
tfidf_vectorizer = TfidfVectorizer(stop_words='english', max_df=0.7)
tfidf_train = tfidf_vectorizer.fit_transform(X_train)
print(tfidf_vectorizer.get_feature_names()[-10:])
print(count_vectorizer.get_feature_names()[:10])
count_df = pd.DataFrame(count_train.A, columns=count_vectorizer.get_feature_names())
tfidf_df = pd.DataFrame(tfidf_train.A, columns=tfidf_vectorizer.get_feature_names())
difference = set(count_df.columns) - set(tfidf_df.columns)
set()
print(count_df.equals(tfidf_df))
count_df.head()
tfidf_df.head()
linear_clf = PassiveAggressiveClassifier(n_iter=50)
linear_clf.fit(tfidf_train, y_train)
linear_clf.fit(tfidf_train, y_train)
a=pd.read_csv(filename,encoding='latin1')
X_test=a['text']
count_test = count_vectorizer.transform(X_test)
tfidf_test = tfidf_vectorizer.transform(X_test)
pred=linear_clf.predict(tfidf_test)
probs=linear_clf.decision_function(tfidf_test)
probs=(probs+1.0)/2.0
print(probs)
flag=True
for i in probs:
if(i>(0.25)):
flag=True
else:
flag=False
print(flag)
return (probs[0]*100)
| 18.114035
| 115
| 0.723487
|
82f7051042c55ec9fd2e1b42210de20fc226dfdd
| 1,448
|
py
|
Python
|
gff2exonpos.py
|
sotuamax/gff_script
|
6c7ef3c87281ca44ed8ea36ab9cb6cc29ddfee4c
|
[
"MIT"
] | null | null | null |
gff2exonpos.py
|
sotuamax/gff_script
|
6c7ef3c87281ca44ed8ea36ab9cb6cc29ddfee4c
|
[
"MIT"
] | null | null | null |
gff2exonpos.py
|
sotuamax/gff_script
|
6c7ef3c87281ca44ed8ea36ab9cb6cc29ddfee4c
|
[
"MIT"
] | null | null | null |
import argparse
import pysam
def args_parser():
'''parser the argument from terminal command'''
parser=argparse.ArgumentParser(prog = "PROG", formatter_class = argparse.RawDescriptionHelpFormatter, description=" \n\
Usage: python gtf_info.py -gff <gff> -O <output> ")
parser.add_argument("-gff", "--gff", help = "gff annotation file (sorted and indexed). ")
parser.add_argument("-O", "--output", help="output prefix. ")
args = parser.parse_args()
return args
def attributes_Parent(attribute):
'''parse attribute field and return gene id '''
exon_parent = attribute.split("Parent")[-1].split(";")[0].split(":")[-1]
return exon_parent
def parse_gff(args):
'''parse gtf for gene location. '''
gff = args.gff
output = args.output
gff_df = pysam.TabixFile(gff, parser = pysam.asGTF(), threads = 2)
###
gene = open(output + ".txt", "w")
gene.write("transcript\tcontig\tstart\tend\tstrand\n")
###
for i in gff_df.fetch():
if i.feature == "exon":
atb = i.attributes
transcript = attributes_Parent(atb)
tig = i.contig
st = i.start + 1
ed = i.end
strand = i.strand
gene.write(f"{transcript}\t{tig}\t{st}\t{ed}\t{strand}\n")
gene.close()
def main():
args = args_parser()
parse_gff(args)
##############
### Run it ###
##############
if __name__ == "__main__":
main()
| 30.808511
| 123
| 0.593232
|
c1db957292abd50b0ee8931490ba8d8e8ffb8edd
| 2,427
|
py
|
Python
|
pcdet/models/detectors/pointpillar.py
|
CSL-KU/OpenPCDet
|
2c5fca0da1521add4b40e6cdfe75d02d4285b83f
|
[
"Apache-2.0"
] | null | null | null |
pcdet/models/detectors/pointpillar.py
|
CSL-KU/OpenPCDet
|
2c5fca0da1521add4b40e6cdfe75d02d4285b83f
|
[
"Apache-2.0"
] | null | null | null |
pcdet/models/detectors/pointpillar.py
|
CSL-KU/OpenPCDet
|
2c5fca0da1521add4b40e6cdfe75d02d4285b83f
|
[
"Apache-2.0"
] | null | null | null |
from .detector3d_template import Detector3DTemplate
import torch
#128 151 178
class PointPillar(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
self.vfe, self.map_to_bev, self.backbone_2d, self.dense_head = self.module_list
torch.backends.cudnn.benchmark = True
torch.cuda.manual_seed(0)
self.update_time_dict( {
'VFE': [], #'PillarFeatureNet': [],
'MapToBEV': [], #'PillarScatter': [],
'RPN-finalize': [],
'RPN-total': [],
'Post-RPN': [],
'PostProcess': [],})
def forward(self, batch_dict):
#for cur_module in self.module_list:
# batch_dict = cur_module(batch_dict)
#self.measure_time_start('VFE')
batch_dict = self.vfe(batch_dict)
#self.measure_time_end('VFE')
#self.measure_time_start('MapToBEV')
batch_dict = self.map_to_bev(batch_dict)
#self.measure_time_end('MapToBEV')
self.measure_time_end('Pre-RPN')
self.measure_time_start('RPN-total')
batch_dict = self.backbone_2d(batch_dict)
self.measure_time_end('RPN-total')
self.measure_time_start('Post-RPN')
self.measure_time_start('RPN-finalize')
batch_dict = self.dense_head(batch_dict)
self.measure_time_end('RPN-finalize')
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
self.measure_time_end('Post-RPN')
return ret_dict, tb_dict, disp_dict
else:
self.measure_time_start('PostProcess')
pred_dicts, recall_dicts = self.post_processing(batch_dict, False)
self.measure_time_end('PostProcess')
for dd in pred_dicts:
for k,v in dd.items():
dd[k] = v.cpu()
self.measure_time_end('Post-RPN')
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
tb_dict = {
'loss_rpn': loss_rpn.item(),
**tb_dict
}
loss = loss_rpn
return loss, tb_dict, disp_dict
| 33.246575
| 87
| 0.591265
|
fe3cc81b1e01244175856a76d0366ae37703fe8b
| 11,662
|
py
|
Python
|
tlstool.py
|
somerovi/tlstool
|
95db6264fcc5d63ae7ec8f177d2b0a7b79fe1ae8
|
[
"MIT"
] | null | null | null |
tlstool.py
|
somerovi/tlstool
|
95db6264fcc5d63ae7ec8f177d2b0a7b79fe1ae8
|
[
"MIT"
] | null | null | null |
tlstool.py
|
somerovi/tlstool
|
95db6264fcc5d63ae7ec8f177d2b0a7b79fe1ae8
|
[
"MIT"
] | null | null | null |
import argparse
import logging
import os
import subprocess
import sys
import jinja2
import yaml
CMD = "/usr/bin/openssl"
base_path = os.path.dirname(os.path.realpath(__file__))
VERSION = "1"
logger = logging.getLogger()
def openssl(args, input=None, env=None):
process = subprocess.Popen(
[CMD] + args,
env=env,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
)
stdout, stderr = [
s.decode("utf-8").strip() for s in process.communicate(input=input)
]
logger.debug(f"{stdout} {stderr}")
if process.returncode != 0:
raise Exception(f"Error[{process.returncode}]:\n{stdout}\n\n{stderr}")
return stdout
def generate_openssl_config(name, root_dir, conf, templates_dir="."):
tpl_name = conf.get("tpl_name")
if tpl_name:
searchpath = conf.get("tpl_path", templates_dir)
template = jinja2.Environment(
loader=jinja2.FileSystemLoader(searchpath=searchpath)
).get_template(tpl_name)
params = {"name": name, "root_dir": root_dir}
fpath = conf.get("path", os.path.join(root_dir, f"{name}.conf"))
create_file_if_not_exists(fpath, template.render(**params).encode("utf-8"))
conf["path"] = fpath
def create_file_if_not_exists(fpath, data=None):
if not os.path.exists(fpath):
with open(fpath, "wb") as fobj:
if data:
try:
data = data.encode("utf-8")
except AttributeError:
pass
fobj.write(data)
def initialize_directories(root_dir, serial=None, clrnumber=None):
dirs = dict(
(directory, os.path.join(root_dir, directory))
for directory in ["certs", "crl", "requests", "newcerts", "private"]
)
os.makedirs(root_dir, exist_ok=True)
for directory, path in dirs.items():
os.makedirs(path, exist_ok=True)
os.chmod(dirs["private"], 0o700)
create_file_if_not_exists(os.path.join(root_dir, "index.txt"))
create_file_if_not_exists(os.path.join(root_dir, "index.txt.attr"))
create_file_if_not_exists(os.path.join(root_dir, "serial"), serial)
create_file_if_not_exists(os.path.join(root_dir, "crlnumber"), clrnumber)
return dirs
def create_private_key(name, private_dir, settings):
fpath = os.path.join(private_dir, f"{name}.key.pem")
if not os.path.exists(fpath):
cipher = settings.get("cipher", "aes256")
numbits = settings.get("numbits", 4096)
password = settings.get("password")
args = ["genrsa", f"-{cipher}", "-out", fpath]
args = args + (["-passout", f"pass:{password}"] if password else [])
args = args + [f"{numbits}"]
openssl(args)
os.chmod(fpath, 0o400)
logger.debug(f"Created private key {fpath}")
return fpath
def create_certificate(name, conf_file, certs_dir, key_file, key_password, settings):
fpath = os.path.join(certs_dir, f"{name}.cert.pem")
if not os.path.exists(fpath):
subject = settings["subject"]
cipher = settings.get("cipher", "sha256")
days = settings.get("days", "7300")
extensions = settings.get("extensions")
args = ["req", "-new", "-x509", "-config", conf_file, "-key", key_file]
args = args + [
"-out",
fpath,
"-days",
f"{days}",
f"-{cipher}",
"-subj",
subject,
]
args = args + (["-passin", f"pass:{key_password}"] if key_password else [])
args = args + (["-extensions", extensions] if extensions else [])
openssl(args)
os.chmod(fpath, 0o444)
logger.debug(openssl(["x509", "-noout", "-text", "-in", fpath]))
logger.debug(f"Created certifcate {fpath}")
return fpath
def create_certificate_request(
name, conf_file, requests_dir, key_file, key_password, settings
):
fpath = os.path.join(requests_dir, f"{name}.csr.pem")
if not os.path.exists(fpath):
subject = settings["subject"]
cipher = settings.get("cipher", "sha256")
args = ["req", "-config", conf_file, "-new", f"-{cipher}", "-key", key_file]
args = args + ["-out", fpath, "-subj", subject]
args = args + (["-passin", f"pass:{key_password}"] if key_password else [])
openssl(args)
logger.debug(f"Created Certificate Signing Request {fpath}")
return fpath
def create_signed_certificate(
name,
ca_conf_file,
ca_cert_file,
ca_key_password,
certs_dir,
requests_file,
settings,
):
fpath = os.path.join(certs_dir, f"{name}.cert.pem")
if not os.path.exists(fpath):
cipher = settings["cipher"]
days = settings["days"]
extensions = settings["extensions"]
args = ["ca", "-batch", "-config", ca_conf_file]
args = args + ["-extensions", extensions, "-days", f"{days}"]
args = args + (
["-passin", f"pass:{ca_key_password}"] if ca_key_password else []
)
args = args + ["-notext", "-md", cipher, "-in", requests_file, "-out", fpath]
openssl(args)
os.chmod(fpath, 0o444)
openssl(["x509", "-noout", "-text", "-in", fpath])
logger.debug(f"Signed certificate {fpath} created")
return fpath
def create_chain_certificate(name, certs_dir, cert_file, ca_or_chain_cert_file):
fpath = os.path.join(certs_dir, f"{name}-chain.cert.pem")
if not os.path.exists(fpath):
with open(fpath, "wb") as fobj:
for cf in [cert_file, ca_or_chain_cert_file]:
with open(cf, "rb") as infile:
fobj.write(infile.read())
os.chmod(fpath, 0o444)
logger.debug(f"Chain file {fpath} created")
return fpath
def verify(cert_file, ca_or_chain_cert_file, int_cert_file=None):
args = ["verify", "-CAfile", ca_or_chain_cert_file]
args = args + (['-untrusted', int_cert_file] if int_cert_file else [])
args = args + [cert_file]
openssl(args)
class Exporter:
@classmethod
def pfx(
cls,
name,
certs_dir,
cert_file,
key_file,
key_password,
pfx_password,
ca_cert_file=None,
):
fpath = os.path.join(certs_dir, f"{name}.pfx")
if not os.path.exists(fpath):
args = ["pkcs12", "-export"]
args = args + ["-in", cert_file, "-inkey", key_file, "-out", fpath]
args = args + (["-passin", f"pass:{key_password}"] if key_password else [])
args = args + (["-passout", f"pass:{pfx_password}"] if pfx_password else [])
args = args + (["-certfile", ca_cert_file] if ca_cert_file else [])
openssl(args)
logger.debug(f"Exported certificate to PFX format: {fpath}")
@classmethod
def der(cls, name, certs_dir, cert_file, key_file, key_password, ca_cert_file=None):
"""
openssl x509 -outform der -in certificate.pem -out certificate.der
"""
@classmethod
def pkc8(
cls, name, certs_dir, cert_file, key_file, key_password, ca_cert_file=None
):
"""
openssl pkcs8 -topk8 -inform PEM -outform DER -in filename -out filename -nocrypt
"""
def build(configuration, templates_dir):
version = configuration.pop("version")
logger.debug(f"conf version: {version}")
for cert, settings in configuration.items():
logger.debug(f"Generating {cert} Certificate")
settings.setdefault("keys", {})
settings.setdefault("certs", {})
settings.setdefault("chains", {})
settings.setdefault("requests", {})
settings["dirs"] = initialize_directories(
settings["root_dir"],
serial=settings.get("serial"),
clrnumber=settings.get("clrnumber"),
)
generate_openssl_config(
cert, settings["root_dir"], settings["conf"], templates_dir
)
if settings.get("key"):
settings["keys"][cert] = create_private_key(
cert, settings["dirs"]["private"], settings["key"]
)
if settings.get("cert"):
settings["certs"][cert] = create_certificate(
cert,
settings["conf"]["path"],
settings["dirs"]["certs"],
settings["keys"][cert],
settings["key"].get("password"),
settings["cert"],
)
if settings.get("csr"):
settings["requests"][cert] = create_certificate_request(
cert,
settings["conf"]["path"],
settings["dirs"]["requests"],
settings["keys"][cert],
settings["key"].get("password"),
settings["csr"],
)
if settings.get("from"):
ca = settings["from"]["ca"]
settings["certs"][cert] = create_signed_certificate(
cert,
configuration[ca]["conf"]["path"],
configuration[ca]["certs"][ca],
configuration[ca]["key"].get("password"),
settings["dirs"]["certs"],
settings["requests"][cert],
settings["from"],
)
verify(
settings["certs"][cert],
configuration[ca]["chains"].get(ca, configuration[ca]["certs"][ca])
)
if settings.get("bundle"):
ca = settings["from"]["ca"]
settings["chains"][cert] = create_chain_certificate(
cert,
settings["dirs"]["certs"],
settings["certs"][cert],
configuration[ca]["chains"].get(ca, configuration[ca]["certs"][ca]),
)
verify(
settings["certs"][cert],
settings["chains"][cert]
)
if settings.get("export"):
for ext in settings["export"]:
try:
ca_or_chain_cert_file = None
if settings.get("from"):
ca = settings["from"]["ca"]
ca_or_chain_cert_file = configuration[ca]["chains"].get(
ca, configuration[ca]["certs"][ca]
)
exporter = getattr(Exporter, ext)
exporter(
cert,
settings["dirs"]["certs"],
settings["certs"][cert],
settings["keys"][cert],
settings["key"].get("password"),
settings["export"][ext].get("password"),
ca_or_chain_cert_file,
)
except AttributeError:
logger.error(f"Unsupported export format: ext")
def cli():
parser = argparse.ArgumentParser(
description="TLSTool simplifies generating TLS certs"
)
parser.add_argument("-c", "--conf", type=str, help="TLSTool config file")
parser.add_argument("-v", "--verbose", action="store_true", help="Verbose")
parser.add_argument(
"-t",
"--templates-dir",
type=str,
help="Specify folder where openssl config templates are stored.",
default="./templates",
)
args = parser.parse_args()
log_level = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(stream=sys.stdout, level=log_level)
with open(args.conf) as fobj:
conf = yaml.load(fobj, Loader=yaml.Loader)
build(conf, args.templates_dir)
if __name__ == "__main__":
cli()
| 33.32
| 89
| 0.56131
|
9408bd8bbce8c06abc5124b282df699d38a7194b
| 6,672
|
py
|
Python
|
hyfed-server/hyfed_server/serializer/hyfed_serializers.py
|
TUM-AIMED/hyfed
|
48c7ee0dda92ebb70cc985dc4c0eedb7403dc823
|
[
"Apache-2.0"
] | 11
|
2021-04-13T12:11:16.000Z
|
2022-03-21T11:45:07.000Z
|
hyfed-server/hyfed_server/serializer/hyfed_serializers.py
|
AnneHartebrodt/hyfed-pca
|
57c009d17d00524f216d57f4fd3fb8732c3fccce
|
[
"Apache-2.0"
] | null | null | null |
hyfed-server/hyfed_server/serializer/hyfed_serializers.py
|
AnneHartebrodt/hyfed-pca
|
57c009d17d00524f216d57f4fd3fb8732c3fccce
|
[
"Apache-2.0"
] | 4
|
2021-04-04T12:17:03.000Z
|
2021-05-25T11:11:20.000Z
|
"""
Serializer classes to serialize the models
Copyright 2021 Julian Matschinske, Reza NasiriGerdeh, and Reihaneh TorkzadehMahani. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from rest_framework import serializers
from hyfed_server.models import UserModel
from hyfed_server.model.hyfed_models import TokenModel, HyFedProjectModel, TimerModel, TrafficModel
# ############### Serializer classes to serve WEBAPP requests ####################
class UserSerializer(serializers.ModelSerializer):
"""
Serializes the user information
"""
password = serializers.CharField(write_only=True)
def create(self, validated_data):
"""
Create a user instance (account) when the user signs up
"""
user = super(UserSerializer, self).create(validated_data)
user.set_password(validated_data['password'])
user.save()
return user
class Meta:
# TODO: remove attributes not needed
model = UserModel
fields = ('id', 'username', 'first_name', 'last_name', 'email', 'password', 'date_joined')
write_only_fields = ('password',)
read_only_fields = ('id', 'date_joined',)
class HyFedProjectSerializer(serializers.ModelSerializer):
"""
Serializes the HyFed project model fields to WebApp and client
"""
id = serializers.SerializerMethodField()
roles = serializers.SerializerMethodField()
coordinator = serializers.SerializerMethodField()
# runtime stats
client_computation = serializers.SerializerMethodField()
client_network_send = serializers.SerializerMethodField()
client_network_receive = serializers.SerializerMethodField()
client_idle = serializers.SerializerMethodField()
compensator_computation = serializers.SerializerMethodField()
compensator_network_send = serializers.SerializerMethodField()
server_computation = serializers.SerializerMethodField()
runtime_total = serializers.SerializerMethodField()
# traffic stats between components
client_server = serializers.SerializerMethodField()
server_client = serializers.SerializerMethodField()
client_compensator = serializers.SerializerMethodField()
compensator_server = serializers.SerializerMethodField()
traffic_total = serializers.SerializerMethodField()
def get_id(self, instance):
""" Convert id from UUID type to string """
return str(instance.id)
def get_coordinator(self, instance):
""" Get the username of the coordinator """
return instance.coordinator.username
def get_roles(self, instance):
"""
Get the role(s) (coordinator|participant|both) of the user
"""
roles = []
try:
if instance.coordinator == self.context['request'].user:
roles.append('coordinator')
if self.context['request'].user in UserModel.objects.filter(projects__project=instance).all():
roles.append('participant')
return roles
except:
return ['-']
# functions to get the client/compensator/server times
def get_client_computation(self, instance):
return instance.timer.client_computation
def get_client_network_send(self, instance):
return instance.timer.client_network_send
def get_client_network_receive(self, instance):
return instance.timer.client_network_receive
def get_client_idle(self, instance):
return instance.timer.client_idle
def get_compensator_computation(self, instance):
return instance.timer.compensator_computation
def get_compensator_network_send(self, instance):
return instance.timer.compensator_network_send
def get_server_computation(self, instance):
return instance.timer.server_computation
def get_runtime_total(self, instance):
return instance.timer.runtime_total
def get_client_server(self, instance):
return instance.traffic.client_server
def get_server_client(self, instance):
return instance.traffic.server_client
def get_client_compensator(self, instance):
return instance.traffic.client_compensator
def get_compensator_server(self, instance):
return instance.traffic.compensator_server
def get_traffic_total(self, instance):
return instance.traffic.traffic_total
class Meta:
model = HyFedProjectModel
fields = ('id', 'coordinator', 'tool', 'algorithm', 'name', 'description', 'status', 'step', 'comm_round',
'roles', 'created_at', 'client_computation', 'client_network_send', 'client_network_receive', 'client_idle',
'compensator_computation', 'compensator_network_send', 'server_computation', 'runtime_total',
'client_server', 'server_client', 'client_compensator', 'compensator_server', 'traffic_total')
read_only_fields = ('id', 'created_at',)
class TokenSerializer(serializers.ModelSerializer):
"""
Serializes the token with customized fields
"""
username = serializers.SerializerMethodField()
roles = serializers.SerializerMethodField()
def get_username(self, instance):
try:
return instance.participant.username
except:
return "-"
def get_roles(self, instance):
roles = []
try:
if instance.participant.id == instance.project.coordinator.id:
roles.append('coordinator')
roles.append('participant')
else:
roles.append('participant')
return roles
except:
return ['-']
class Meta:
model = TokenModel
fields = ('id', 'username', 'roles')
class TimerSerializer(serializers.ModelSerializer):
class Meta:
model = TimerModel
fields = ('id', 'computation', 'network_send', 'network_receive', 'idle', 'aggregation')
class TrafficSerializer(serializers.ModelSerializer):
class Meta:
model = TrafficModel
fields = ('id', 'client_server', 'server_client')
| 34.215385
| 126
| 0.685701
|
554a9570e7697c667bd8fbdc4a4807fdddaed091
| 13,208
|
py
|
Python
|
maro/simulator/core.py
|
jreynolds01/maro
|
a61968b9e53ab83e248ec258dd8ea7c05c56bf4c
|
[
"MIT"
] | 1
|
2021-08-17T17:10:02.000Z
|
2021-08-17T17:10:02.000Z
|
maro/simulator/core.py
|
afelipeg/maro
|
69e212703e49adc6eee0741c12dee06c30c0c82e
|
[
"MIT"
] | null | null | null |
maro/simulator/core.py
|
afelipeg/maro
|
69e212703e49adc6eee0741c12dee06c30c0c82e
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from collections import Iterable
from importlib import import_module
from inspect import getmembers, isclass
from typing import List
from maro.backends.frame import FrameBase, SnapshotList
from maro.data_lib.dump_csv_converter import DumpConverter
from maro.event_buffer import EventBuffer, EventState
from maro.streamit import streamit
from maro.utils.exception.simulator_exception import BusinessEngineNotFoundError
from .abs_core import AbsEnv, DecisionMode
from .scenarios.abs_business_engine import AbsBusinessEngine
from .utils import seed as sim_seed
from .utils.common import tick_to_frame_index
class Env(AbsEnv):
"""Default environment implementation using generator.
Args:
scenario (str): Scenario name under maro/simulator/scenarios folder.
topology (str): Topology name under specified scenario folder.
If it points to an existing folder, the corresponding topology will be used for the built-in scenario.
start_tick (int): Start tick of the scenario, usually used for pre-processed data streaming.
durations (int): Duration ticks of this environment from start_tick.
snapshot_resolution (int): How many ticks will take a snapshot.
max_snapshots(int): Max in-memory snapshot number.
When the number of dumped snapshots reached the limitation, oldest one will be overwrote by new one.
None means keeping all snapshots in memory. Defaults to None.
business_engine_cls (type): Class of business engine. If specified, use it to construct the be instance,
or search internally by scenario.
disable_finished_events (bool): Disable finished events list, with this set to True, EventBuffer will
re-use finished event object, this reduce event object number.
record_finished_events (bool): If record finished events into csv file, default is False.
record_file_path (str): Where to save the recording file, only work if record_finished_events is True.
options (dict): Additional parameters passed to business engine.
"""
def __init__(
self, scenario: str = None, topology: str = None,
start_tick: int = 0, durations: int = 100, snapshot_resolution: int = 1, max_snapshots: int = None,
decision_mode: DecisionMode = DecisionMode.Sequential,
business_engine_cls: type = None, disable_finished_events: bool = False,
record_finished_events: bool = False,
record_file_path: str = None,
options: dict = {}
):
super().__init__(
scenario, topology, start_tick, durations,
snapshot_resolution, max_snapshots, decision_mode, business_engine_cls,
disable_finished_events, options
)
self._name = f'{self._scenario}:{self._topology}' if business_engine_cls is None \
else business_engine_cls.__name__
self._business_engine: AbsBusinessEngine = None
self._event_buffer = EventBuffer(disable_finished_events, record_finished_events, record_file_path)
# decision_events array for dump.
self._decision_events = []
# The generator used to push the simulator forward.
self._simulate_generator = self._simulate()
# Initialize the business engine.
self._init_business_engine()
if "enable-dump-snapshot" in self._additional_options:
parent_path = self._additional_options["enable-dump-snapshot"]
self._converter = DumpConverter(parent_path, self._business_engine._scenario_name)
self._converter.reset_folder_path()
self._streamit_episode = 0
def step(self, action):
"""Push the environment to next step with action.
Args:
action (Action): Action(s) from agent.
Returns:
tuple: a tuple of (metrics, decision event, is_done).
"""
try:
metrics, decision_event, _is_done = self._simulate_generator.send(action)
except StopIteration:
return None, None, True
return metrics, decision_event, _is_done
def dump(self):
"""Dump environment for restore.
NOTE:
Not implemented.
"""
return
def reset(self):
"""Reset environment."""
self._tick = self._start_tick
self._simulate_generator.close()
self._simulate_generator = self._simulate()
self._event_buffer.reset()
if ("enable-dump-snapshot" in self._additional_options) and (self._business_engine._frame is not None):
dump_folder = self._converter.get_new_snapshot_folder()
self._business_engine._frame.dump(dump_folder)
self._converter.start_processing(self.configs)
self._converter.dump_descsion_events(self._decision_events, self._start_tick, self._snapshot_resolution)
self._business_engine.dump(dump_folder)
self._decision_events.clear()
self._business_engine.reset()
@property
def configs(self) -> dict:
"""dict: Configurations of current environment."""
return self._business_engine.configs
@property
def summary(self) -> dict:
"""dict: Summary about current simulator, including node details and mappings."""
return {
"node_mapping": self._business_engine.get_node_mapping(),
"node_detail": self.current_frame.get_node_info(),
"event_payload": self._business_engine.get_event_payload_detail(),
}
@property
def name(self) -> str:
"""str: Name of current environment."""
return self._name
@property
def current_frame(self) -> FrameBase:
"""Frame: Frame of current environment."""
return self._business_engine.frame
@property
def tick(self) -> int:
"""int: Current tick of environment."""
return self._tick
@property
def frame_index(self) -> int:
"""int: Frame index in snapshot list for current tick."""
return tick_to_frame_index(self._start_tick, self._tick, self._snapshot_resolution)
@property
def snapshot_list(self) -> SnapshotList:
"""SnapshotList: A snapshot list containing all the snapshots of frame at each dump point.
NOTE: Due to different environment configurations, the resolution of the snapshot may be different.
"""
return self._business_engine.snapshots
@property
def agent_idx_list(self) -> List[int]:
"""List[int]: Agent index list that related to this environment."""
return self._business_engine.get_agent_idx_list()
def set_seed(self, seed: int):
"""Set random seed used by simulator.
NOTE:
This will not set seed for Python random or other packages' seed, such as NumPy.
Args:
seed (int): Seed to set.
"""
if seed is not None:
sim_seed(seed)
@property
def metrics(self) -> dict:
"""Some statistics information provided by business engine.
Returns:
dict: Dictionary of metrics, content and format is determined by business engine.
"""
return self._business_engine.get_metrics()
def get_finished_events(self):
"""List[Event]: All events finished so far."""
return self._event_buffer.get_finished_events()
def get_pending_events(self, tick):
"""Pending events at certain tick.
Args:
tick (int): Specified tick to query.
"""
return self._event_buffer.get_pending_events(tick)
def _init_business_engine(self):
"""Initialize business engine object.
NOTE:
1. For built-in scenarios, they will always under "maro/simulator/scenarios" folder.
2. For external scenarios, the business engine instance is built with the loaded business engine class.
"""
max_tick = self._start_tick + self._durations
if self._business_engine_cls is not None:
business_class = self._business_engine_cls
else:
# Combine the business engine import path.
business_class_path = f'maro.simulator.scenarios.{self._scenario}.business_engine'
# Load the module to find business engine for that scenario.
business_module = import_module(business_class_path)
business_class = None
for _, obj in getmembers(business_module, isclass):
if issubclass(obj, AbsBusinessEngine) and obj != AbsBusinessEngine:
# We find it.
business_class = obj
break
if business_class is None:
raise BusinessEngineNotFoundError()
self._business_engine = business_class(
event_buffer=self._event_buffer,
topology=self._topology,
start_tick=self._start_tick,
max_tick=max_tick,
snapshot_resolution=self._snapshot_resolution,
max_snapshots=self._max_snapshots,
additional_options=self._additional_options
)
def _simulate(self):
"""This is the generator to wrap each episode process."""
is_end_tick = False
self._streamit_episode += 1
streamit.episode(self._streamit_episode)
while True:
# Ask business engine to do thing for this tick, such as generating and pushing events.
# We do not push events now.
streamit.tick(self._tick)
self._business_engine.step(self._tick)
while True:
# Keep processing events, until no more events in this tick.
pending_events = self._event_buffer.execute(self._tick)
# Processing pending events.
pending_event_length: int = len(pending_events)
if pending_event_length == 0:
# We have processed all the event of current tick, lets go for next tick.
break
# Insert snapshot before each action.
self._business_engine.frame.take_snapshot(self.frame_index)
# Append source event id to decision events, to support sequential action in joint mode.
decision_events = [event.payload for event in pending_events]
decision_events = decision_events[0] if self._decision_mode == DecisionMode.Sequential \
else decision_events
# Yield current state first, and waiting for action.
actions = yield self._business_engine.get_metrics(), decision_events, False
# archive decision events.
self._decision_events.append(decision_events)
if actions is None:
# Make business engine easy to work.
actions = []
if actions is not None and not isinstance(actions, Iterable):
actions = [actions]
if self._decision_mode == DecisionMode.Sequential:
# Generate a new atom event first.
action_event = self._event_buffer.gen_action_event(self._tick, actions)
# NOTE: decision event always be a CascadeEvent
# We just append the action into sub event of first pending cascade event.
pending_events[0].state = EventState.EXECUTING
pending_events[0].add_immediate_event(action_event, is_head=True)
else:
# For joint mode, we will assign actions from beginning to end.
# Then mark others pending events to finished if not sequential action mode.
for i, pending_event in enumerate(pending_events):
if i >= len(actions):
if self._decision_mode == DecisionMode.Joint:
# Ignore following pending events that have no action matched.
pending_event.state = EventState.FINISHED
else:
# Set the state as executing, so event buffer will not pop them again.
# Then insert the action to it.
action = actions[i]
pending_event.state = EventState.EXECUTING
action_event = self._event_buffer.gen_action_event(self._tick, action)
pending_event.add_immediate_event(action_event, is_head=True)
# Check the end tick of the simulation to decide if we should end the simulation.
is_end_tick = self._business_engine.post_step(self._tick)
if is_end_tick:
break
self._tick += 1
# Make sure we have no missing data.
if (self._tick + 1) % self._snapshot_resolution != 0:
self._business_engine.frame.take_snapshot(self.frame_index)
# The end.
yield self._business_engine.get_metrics(), None, True
| 39.663664
| 116
| 0.639915
|
7bf457d331102b0615696a1c3aa48ea65ea3a25d
| 30,108
|
py
|
Python
|
Lib/distutils/msvc9compiler.py
|
hashiqizaizai/hashiqizaizai.github.io
|
7217400802f6b944dfd1e29d4b00d268957ff769
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/distutils/msvc9compiler.py
|
hashiqizaizai/hashiqizaizai.github.io
|
7217400802f6b944dfd1e29d4b00d268957ff769
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/distutils/msvc9compiler.py
|
hashiqizaizai/hashiqizaizai.github.io
|
7217400802f6b944dfd1e29d4b00d268957ff769
|
[
"bzip2-1.0.6"
] | null | null | null |
"""distutils.msvc9compiler
Contains MSVCCompiler, an implementation of the abstract CCompiler class
for the Microsoft Visual Studio 2008.
The module is compatible with VS 2005 and VS 2008. You can find legacy support
for older versions of VS in distutils.msvccompiler.
"""
# Written by Perry Stoll
# hacked by Robin Becker and Thomas Heller to do a better job of
# finding DevStudio (through the registry)
# ported to VS2005 and VS 2008 by Christian Heimes
__revision__ = "$Id: msvc9compiler.py 86440 2010-11-12 22:27:28Z eric.araujo $"
import os
import subprocess
import sys
import re
from distutils.errors import (DistutilsExecError, DistutilsPlatformError,
CompileError, LibError, LinkError)
from distutils.ccompiler import CCompiler, gen_lib_options
from distutils import log
from distutils.util import get_platform
import _winreg
RegOpenKeyEx = _winreg.OpenKeyEx
RegEnumKey = _winreg.EnumKey
RegEnumValue = _winreg.EnumValue
RegError = _winreg.error
HKEYS = (_winreg.HKEY_USERS,
_winreg.HKEY_CURRENT_USER,
_winreg.HKEY_LOCAL_MACHINE,
_winreg.HKEY_CLASSES_ROOT)
NATIVE_WIN64 = (sys.platform == 'win32' and sys.maxsize > 2**32)
if NATIVE_WIN64:
# Visual C++ is a 32-bit application, so we need to look in
# the corresponding registry branch, if we're running a
# 64-bit Python on Win64
VS_BASE = r"Software\Wow6432Node\Microsoft\VisualStudio\%0.1f"
VSEXPRESS_BASE = r"Software\Wow6432Node\Microsoft\VCExpress\%0.1f"
WINSDK_BASE = r"Software\Wow6432Node\Microsoft\Microsoft SDKs\Windows"
NET_BASE = r"Software\Wow6432Node\Microsoft\.NETFramework"
else:
VS_BASE = r"Software\Microsoft\VisualStudio\%0.1f"
VSEXPRESS_BASE = r"Software\Microsoft\VCExpress\%0.1f"
WINSDK_BASE = r"Software\Microsoft\Microsoft SDKs\Windows"
NET_BASE = r"Software\Microsoft\.NETFramework"
# A map keyed by get_platform() return values to values accepted by
# 'vcvarsall.bat'. Note a cross-compile may combine these (eg, 'x86_amd64' is
# the param to cross-compile on x86 targetting amd64.)
PLAT_TO_VCVARS = {
'win32' : 'x86',
'win-amd64' : 'amd64',
'win-ia64' : 'ia64',
}
class Reg:
"""Helper class to read values from the registry
"""
def get_value(cls, path, key):
for base in HKEYS:
d = cls.read_values(base, path)
if d and key in d:
return d[key]
raise KeyError(key)
get_value = classmethod(get_value)
def read_keys(cls, base, key):
"""Return list of registry keys."""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
L = []
i = 0
while True:
try:
k = RegEnumKey(handle, i)
except RegError:
break
L.append(k)
i += 1
return L
read_keys = classmethod(read_keys)
def read_values(cls, base, key):
"""Return dict of registry keys and values.
All names are converted to lowercase.
"""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
d = {}
i = 0
while True:
try:
name, value, type = RegEnumValue(handle, i)
except RegError:
break
name = name.lower()
d[cls.convert_mbcs(name)] = cls.convert_mbcs(value)
i += 1
return d
read_values = classmethod(read_values)
def convert_mbcs(s):
dec = getattr(s, "decode", None)
if dec is not None:
try:
s = dec("mbcs")
except UnicodeError:
pass
return s
convert_mbcs = staticmethod(convert_mbcs)
class MacroExpander:
def __init__(self, version):
self.macros = {}
self.vsbase = VS_BASE % version
self.load_macros(version)
def set_macro(self, macro, path, key):
self.macros["$(%s)" % macro] = Reg.get_value(path, key)
def load_macros(self, version):
self.set_macro("VCInstallDir", self.vsbase + r"\Setup\VC", "productdir")
self.set_macro("VSInstallDir", self.vsbase + r"\Setup\VS", "productdir")
self.set_macro("FrameworkDir", NET_BASE, "installroot")
try:
if version >= 8.0:
self.set_macro("FrameworkSDKDir", NET_BASE,
"sdkinstallrootv2.0")
else:
raise KeyError("sdkinstallrootv2.0")
except KeyError:
raise DistutilsPlatformError(
"""Python was built with Visual Studio 2008;
extensions must be built with a compiler than can generate compatible binaries.
Visual Studio 2008 was not found on this system. If you have Cygwin installed,
you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""")
if version >= 9.0:
self.set_macro("FrameworkVersion", self.vsbase, "clr version")
self.set_macro("WindowsSdkDir", WINSDK_BASE, "currentinstallfolder")
else:
p = r"Software\Microsoft\NET Framework Setup\Product"
for base in HKEYS:
try:
h = RegOpenKeyEx(base, p)
except RegError:
continue
key = RegEnumKey(h, 0)
d = Reg.get_value(base, r"%s\%s" % (p, key))
self.macros["$(FrameworkVersion)"] = d["version"]
def sub(self, s):
for k, v in self.macros.items():
s = s.replace(k, v)
return s
def get_build_version():
"""Return the version of MSVC that was used to build Python.
For Python 2.3 and up, the version number is included in
sys.version. For earlier versions, assume the compiler is MSVC 6.
"""
prefix = "MSC v."
i = sys.version.find(prefix)
if i == -1:
return 6
i = i + len(prefix)
s, rest = sys.version[i:].split(" ", 1)
majorVersion = int(s[:-2]) - 6
minorVersion = int(s[2:3]) / 10.0
# I don't think paths are affected by minor version in version 6
if majorVersion == 6:
minorVersion = 0
if majorVersion >= 6:
return majorVersion + minorVersion
# else we don't know what version of the compiler this is
return None
def normalize_and_reduce_paths(paths):
"""Return a list of normalized paths with duplicates removed.
The current order of paths is maintained.
"""
# Paths are normalized so things like: /a and /a/ aren't both preserved.
reduced_paths = []
for p in paths:
np = os.path.normpath(p)
# XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set.
if np not in reduced_paths:
reduced_paths.append(np)
return reduced_paths
def removeDuplicates(variable):
"""Remove duplicate values of an environment variable.
"""
oldList = variable.split(os.pathsep)
newList = []
for i in oldList:
if i not in newList:
newList.append(i)
newVariable = os.pathsep.join(newList)
return newVariable
def find_vcvarsall(version):
"""Find the vcvarsall.bat file
At first it tries to find the productdir of VS 2008 in the registry. If
that fails it falls back to the VS90COMNTOOLS env var.
"""
vsbase = VS_BASE % version
try:
productdir = Reg.get_value(r"%s\Setup\VC" % vsbase,
"productdir")
except KeyError:
productdir = None
# trying Express edition
if productdir is None:
vsbase = VSEXPRESS_BASE % version
try:
productdir = Reg.get_value(r"%s\Setup\VC" % vsbase,
"productdir")
except KeyError:
productdir = None
log.debug("Unable to find productdir in registry")
if not productdir or not os.path.isdir(productdir):
toolskey = "VS%0.f0COMNTOOLS" % version
toolsdir = os.environ.get(toolskey, None)
if toolsdir and os.path.isdir(toolsdir):
productdir = os.path.join(toolsdir, os.pardir, os.pardir, "VC")
productdir = os.path.abspath(productdir)
if not os.path.isdir(productdir):
log.debug("%s is not a valid directory" % productdir)
return None
else:
log.debug("Env var %s is not set or invalid" % toolskey)
if not productdir:
log.debug("No productdir found")
return None
vcvarsall = os.path.join(productdir, "vcvarsall.bat")
if os.path.isfile(vcvarsall):
return vcvarsall
log.debug("Unable to find vcvarsall.bat")
return None
def query_vcvarsall(version, arch="x86"):
"""Launch vcvarsall.bat and read the settings from its environment
"""
vcvarsall = find_vcvarsall(version)
interesting = set(("include", "lib", "libpath", "path"))
result = {}
if vcvarsall is None:
raise DistutilsPlatformError("Unable to find vcvarsall.bat")
log.debug("Calling 'vcvarsall.bat %s' (version=%s)", arch, version)
popen = subprocess.Popen('"%s" %s & set' % (vcvarsall, arch),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
stdout, stderr = popen.communicate()
if popen.wait() != 0:
raise DistutilsPlatformError(stderr.decode("mbcs"))
stdout = stdout.decode("mbcs")
for line in stdout.split("\n"):
line = Reg.convert_mbcs(line)
if '=' not in line:
continue
line = line.strip()
key, value = line.split('=', 1)
key = key.lower()
if key in interesting:
if value.endswith(os.pathsep):
value = value[:-1]
result[key] = removeDuplicates(value)
finally:
popen.stdout.close()
popen.stderr.close()
if len(result) != len(interesting):
raise ValueError(str(list(result.keys())))
return result
# More globals
VERSION = get_build_version()
if VERSION < 8.0:
raise DistutilsPlatformError("VC %0.1f is not supported by this module" % VERSION)
# MACROS = MacroExpander(VERSION)
class MSVCCompiler(CCompiler) :
"""Concrete class that implements an interface to Microsoft Visual C++,
as defined by the CCompiler abstract class."""
compiler_type = 'msvc'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
_rc_extensions = ['.rc']
_mc_extensions = ['.mc']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = (_c_extensions + _cpp_extensions +
_rc_extensions + _mc_extensions)
res_extension = '.res'
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__(self, verbose=0, dry_run=0, force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
self.__version = VERSION
self.__root = r"Software\Microsoft\VisualStudio"
# self.__macros = MACROS
self.__paths = []
# target platform (.plat_name is consistent with 'bdist')
self.plat_name = None
self.__arch = None # deprecated name
self.initialized = False
def initialize(self, plat_name=None):
# multi-init means we would need to check platform same each time...
assert not self.initialized, "don't init multiple times"
if plat_name is None:
plat_name = get_platform()
# sanity check for platforms to prevent obscure errors later.
ok_plats = 'win32', 'win-amd64', 'win-ia64'
if plat_name not in ok_plats:
raise DistutilsPlatformError("--plat-name must be one of %s" %
(ok_plats,))
if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"):
# Assume that the SDK set up everything alright; don't try to be
# smarter
self.cc = "cl.exe"
self.linker = "link.exe"
self.lib = "lib.exe"
self.rc = "rc.exe"
self.mc = "mc.exe"
else:
# On x86, 'vcvars32.bat amd64' creates an env that doesn't work;
# to cross compile, you use 'x86_amd64'.
# On AMD64, 'vcvars32.bat amd64' is a native build env; to cross
# compile use 'x86' (ie, it runs the x86 compiler directly)
# No idea how itanium handles this, if at all.
if plat_name == get_platform() or plat_name == 'win32':
# native build or cross-compile to win32
plat_spec = PLAT_TO_VCVARS[plat_name]
else:
# cross compile from win32 -> some 64bit
plat_spec = PLAT_TO_VCVARS[get_platform()] + '_' + \
PLAT_TO_VCVARS[plat_name]
vc_env = query_vcvarsall(VERSION, plat_spec)
# take care to only use strings in the environment.
self.__paths = vc_env['path'].encode('mbcs').split(os.pathsep)
os.environ['lib'] = vc_env['lib'].encode('mbcs')
os.environ['include'] = vc_env['include'].encode('mbcs')
if len(self.__paths) == 0:
raise DistutilsPlatformError("Python was built with %s, "
"and extensions need to be built with the same "
"version of the compiler, but it isn't installed."
% self.__product)
self.cc = self.find_exe("cl.exe")
self.linker = self.find_exe("link.exe")
self.lib = self.find_exe("lib.exe")
self.rc = self.find_exe("rc.exe") # resource compiler
self.mc = self.find_exe("mc.exe") # message compiler
#self.set_path_env_var('lib')
#self.set_path_env_var('include')
# extend the MSVC path with the current path
try:
for p in os.environ['path'].split(';'):
self.__paths.append(p)
except KeyError:
pass
self.__paths = normalize_and_reduce_paths(self.__paths)
os.environ['path'] = ";".join(self.__paths)
self.preprocess_options = None
if self.__arch == "x86":
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3',
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3',
'/Z7', '/D_DEBUG']
else:
# Win64
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-' ,
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-',
'/Z7', '/D_DEBUG']
self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
if self.__version >= 7:
self.ldflags_shared_debug = [
'/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG', '/pdb:None'
]
self.ldflags_static = [ '/nologo']
self.initialized = True
# -- Worker methods ------------------------------------------------
def object_filenames(self,
source_filenames,
strip_dir=0,
output_dir=''):
# Copied from ccompiler.py, extended to return .res as 'object'-file
# for .rc input file
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
(base, ext) = os.path.splitext (src_name)
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if ext not in self.src_extensions:
# Better to raise an exception instead of silently continuing
# and later complain about sources and targets having
# different lengths
raise CompileError ("Don't know how to compile %s" % src_name)
if strip_dir:
base = os.path.basename (base)
if ext in self._rc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
elif ext in self._mc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
if not self.initialized:
self.initialize()
compile_info = self._setup_compile(output_dir, macros, include_dirs,
sources, depends, extra_postargs)
macros, objects, extra_postargs, pp_opts, build = compile_info
compile_opts = extra_preargs or []
compile_opts.append ('/c')
if debug:
compile_opts.extend(self.compile_options_debug)
else:
compile_opts.extend(self.compile_options)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
if debug:
# pass the full pathname to MSVC in debug mode,
# this allows the debugger to find the source file
# without asking the user to browse for it
src = os.path.abspath(src)
if ext in self._c_extensions:
input_opt = "/Tc" + src
elif ext in self._cpp_extensions:
input_opt = "/Tp" + src
elif ext in self._rc_extensions:
# compile .RC to .RES file
input_opt = src
output_opt = "/fo" + obj
try:
self.spawn([self.rc] + pp_opts +
[output_opt] + [input_opt])
except DistutilsExecError, msg:
raise CompileError(msg)
continue
elif ext in self._mc_extensions:
# Compile .MC to .RC file to .RES file.
# * '-h dir' specifies the directory for the
# generated include file
# * '-r dir' specifies the target directory of the
# generated RC file and the binary message resource
# it includes
#
# For now (since there are no options to change this),
# we use the source-directory for the include file and
# the build directory for the RC file and message
# resources. This works at least for win32all.
h_dir = os.path.dirname(src)
rc_dir = os.path.dirname(obj)
try:
# first compile .MC to .RC and .H file
self.spawn([self.mc] +
['-h', h_dir, '-r', rc_dir] + [src])
base, _ = os.path.splitext (os.path.basename (src))
rc_file = os.path.join (rc_dir, base + '.rc')
# then compile .RC to .RES file
self.spawn([self.rc] +
["/fo" + obj] + [rc_file])
except DistutilsExecError, msg:
raise CompileError(msg)
continue
else:
# how to handle this file?
raise CompileError("Don't know how to compile %s to %s"
% (src, obj))
output_opt = "/Fo" + obj
try:
self.spawn([self.cc] + compile_opts + pp_opts +
[input_opt, output_opt] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError(msg)
return objects
def create_static_lib(self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
if not self.initialized:
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
output_filename = self.library_filename(output_libname,
output_dir=output_dir)
if self._need_link(objects, output_filename):
lib_args = objects + ['/OUT:' + output_filename]
if debug:
pass # XXX what goes here?
try:
self.spawn([self.lib] + lib_args)
except DistutilsExecError, msg:
raise LibError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def link(self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
if not self.initialized:
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
fixed_args = self._fix_lib_args(libraries, library_dirs,
runtime_library_dirs)
(libraries, library_dirs, runtime_library_dirs) = fixed_args
if runtime_library_dirs:
self.warn ("I don't know what to do with 'runtime_library_dirs': "
+ str (runtime_library_dirs))
lib_opts = gen_lib_options(self,
library_dirs, runtime_library_dirs,
libraries)
if output_dir is not None:
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
if target_desc == CCompiler.EXECUTABLE:
if debug:
ldflags = self.ldflags_shared_debug[1:]
else:
ldflags = self.ldflags_shared[1:]
else:
if debug:
ldflags = self.ldflags_shared_debug
else:
ldflags = self.ldflags_shared
export_opts = []
for sym in (export_symbols or []):
export_opts.append("/EXPORT:" + sym)
ld_args = (ldflags + lib_opts + export_opts +
objects + ['/OUT:' + output_filename])
# The MSVC linker generates .lib and .exp files, which cannot be
# suppressed by any linker switches. The .lib files may even be
# needed! Make sure they are generated in the temporary build
# directory. Since they have different names for debug and release
# builds, they can go into the same directory.
build_temp = os.path.dirname(objects[0])
if export_symbols is not None:
(dll_name, dll_ext) = os.path.splitext(
os.path.basename(output_filename))
implib_file = os.path.join(
build_temp,
self.library_filename(dll_name))
ld_args.append ('/IMPLIB:' + implib_file)
# Embedded manifests are recommended - see MSDN article titled
# "How to: Embed a Manifest Inside a C/C++ Application"
# (currently at http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx)
# Ask the linker to generate the manifest in the temp dir, so
# we can embed it later.
temp_manifest = os.path.join(
build_temp,
os.path.basename(output_filename) + ".manifest")
ld_args.append('/MANIFESTFILE:' + temp_manifest)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
try:
self.spawn([self.linker] + ld_args)
except DistutilsExecError, msg:
raise LinkError(msg)
# embed the manifest
# XXX - this is somewhat fragile - if mt.exe fails, distutils
# will still consider the DLL up-to-date, but it will not have a
# manifest. Maybe we should link to a temp file? OTOH, that
# implies a build environment error that shouldn't go undetected.
if target_desc == CCompiler.EXECUTABLE:
mfid = 1
else:
mfid = 2
self._remove_visual_c_ref(temp_manifest)
out_arg = '-outputresource:%s;%s' % (output_filename, mfid)
try:
self.spawn(['mt.exe', '-nologo', '-manifest',
temp_manifest, out_arg])
except DistutilsExecError, msg:
raise LinkError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def _remove_visual_c_ref(self, manifest_file):
try:
# Remove references to the Visual C runtime, so they will
# fall through to the Visual C dependency of Python.exe.
# This way, when installed for a restricted user (e.g.
# runtimes are not in WinSxS folder, but in Python's own
# folder), the runtimes do not need to be in every folder
# with .pyd's.
manifest_f = open(manifest_file)
try:
manifest_buf = manifest_f.read()
finally:
manifest_f.close()
pattern = re.compile(
r"""<assemblyIdentity.*?name=("|')Microsoft\."""\
r"""VC\d{2}\.CRT("|').*?(/>|</assemblyIdentity>)""",
re.DOTALL)
manifest_buf = re.sub(pattern, "", manifest_buf)
pattern = "<dependentAssembly>\s*</dependentAssembly>"
manifest_buf = re.sub(pattern, "", manifest_buf)
manifest_f = open(manifest_file, 'w')
try:
manifest_f.write(manifest_buf)
finally:
manifest_f.close()
except IOError:
pass
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function, in
# ccompiler.py.
def library_dir_option(self, dir):
return "/LIBPATH:" + dir
def runtime_library_dir_option(self, dir):
raise DistutilsPlatformError(
"don't know how to set runtime library search path for MSVC++")
def library_option(self, lib):
return self.library_filename(lib)
def find_library_file(self, dirs, lib, debug=0):
# Prefer a debugging library if found (and requested), but deal
# with it if we don't have one.
if debug:
try_names = [lib + "_d", lib]
else:
try_names = [lib]
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename (name))
if os.path.exists(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
# Helper methods for using the MSVC registry settings
def find_exe(self, exe):
"""Return path to an MSVC executable program.
Tries to find the program in several places: first, one of the
MSVC program search paths from the registry; next, the directories
in the PATH environment variable. If any of those work, return an
absolute path that is known to exist. If none of them work, just
return the original program name, 'exe'.
"""
for p in self.__paths:
fn = os.path.join(os.path.abspath(p), exe)
if os.path.isfile(fn):
return fn
# didn't find it; try existing path
for p in os.environ['Path'].split(';'):
fn = os.path.join(os.path.abspath(p),exe)
if os.path.isfile(fn):
return fn
return exe
| 39.356863
| 100
| 0.543543
|
ec75048c7672b94151aaf117b189b20137af7e31
| 3,295
|
py
|
Python
|
ecs/batch-app/main.py
|
avcaliani/aws-app
|
d2c2db1f427f049842e867bd65c8bce180071a40
|
[
"MIT"
] | null | null | null |
ecs/batch-app/main.py
|
avcaliani/aws-app
|
d2c2db1f427f049842e867bd65c8bce180071a40
|
[
"MIT"
] | null | null | null |
ecs/batch-app/main.py
|
avcaliani/aws-app
|
d2c2db1f427f049842e867bd65c8bce180071a40
|
[
"MIT"
] | null | null | null |
import json
import logging as log
from argparse import ArgumentParser
from datetime import datetime
from os import environ
from random import choice
from time import sleep
from uuid import uuid4
import requests
from boto3 import Session
def init_log():
log.basicConfig(
format='[%(asctime)s][%(name)s][%(levelname)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=log.INFO
)
def init_session():
return Session(
aws_access_key_id=environ.get('AWS_ACCESS_KEY_ID'),
aws_secret_access_key=environ.get('AWS_SECRET_ACCESS_KEY'),
region_name=environ.get('AWS_REGION'),
)
def get_args():
parser = ArgumentParser(description='Job - Chuck Norris')
parser.add_argument('-p', dest='pipeline', required=True, choices=['extract', 'show'], help='Job Pipeline')
parser.add_argument('-b', dest='bucket', default='nth-dev-datalake', help='S3 Bucket')
parser.add_argument('-o', dest='output_path', default='sandbox/jokes', help='Output path inside bucket.')
parser.add_argument('--api-url', default='https://api.chucknorris.io/jokes/random', help='API URL')
parser.add_argument('--api-sleep', default=1, type=int, help='API Request Interval (Seconds)')
parser.add_argument('--api-requests', default=10, type=int, help='How many requests?')
return parser.parse_args()
def save(session, bucket, path, data):
if data:
time = datetime.utcnow().strftime('%Y%m%d%H%M%S')
file_path = f'{path}/{time}-{uuid4()}.json'
log.info(f'Writing file "s3://{bucket}/{file_path}"')
client = session.client('s3')
client.put_object(
Body=bytes(json.dumps(data, ensure_ascii=False), 'utf8'),
Bucket=bucket,
Key=file_path
)
def request(url):
data = requests.get(url).json()
if data:
data['created_at'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
log.info(f'Response Data: {data}')
return data
def exec_extraction(args):
session = init_session()
how_many = args.api_requests
log.info(f'Starting {how_many} executions...')
for i in range(how_many):
count = f'({i + 1}/{how_many})'
log.info(f'{count} Requesting data...')
data = request(args.api_url)
log.info(f'{count} Saving data...')
save(session, args.bucket, args.output_path, data)
log.info(f'{count} Done!')
sleep(args.api_sleep)
def exec_show(args):
session = init_session()
client = session.client('s3')
log.info(f'Listing bucket "s3://{args.bucket}/{args.output_path}"')
files = client.list_objects(Bucket=args.bucket, Prefix=args.output_path).get('Contents')
log.info(f'{len(files)} files found!')
random_joke = choice(files)
data = client.get_object(Bucket=args.bucket, Key=random_joke.get('Key'))
data = json.loads(data['Body'].read().decode('utf8'))
log.info(f'Joke: "{data.get("value")}"')
log.info(f'Created At: "{data.get("created_at")}"')
def main(args):
log.info(f'Env: {environ.get("APP_ENV", "unknown")}')
log.info(f'Args: {args}')
if args.pipeline.strip().lower() == 'extract':
exec_extraction(args)
else:
exec_show(args)
if __name__ == '__main__':
init_log()
main(get_args())
| 31.380952
| 111
| 0.641882
|
12d577e733da19f394298d9384d421fa9b84f54c
| 15,688
|
py
|
Python
|
src/sqlfluff/dialects/dialect_hive.py
|
DipeshCS/sqlfluff
|
ca3eb7f037ca68a969c17d844949f947be94a300
|
[
"MIT"
] | null | null | null |
src/sqlfluff/dialects/dialect_hive.py
|
DipeshCS/sqlfluff
|
ca3eb7f037ca68a969c17d844949f947be94a300
|
[
"MIT"
] | null | null | null |
src/sqlfluff/dialects/dialect_hive.py
|
DipeshCS/sqlfluff
|
ca3eb7f037ca68a969c17d844949f947be94a300
|
[
"MIT"
] | null | null | null |
"""The Hive dialect."""
from sqlfluff.core.parser import (
BaseSegment,
Sequence,
Ref,
OneOf,
Bracketed,
Delimited,
StartsWith,
NamedParser,
SymbolSegment,
StringParser,
OptionallyBracketed,
)
from sqlfluff.core.dialects import load_raw_dialect
from sqlfluff.core.parser.segments.raw import CodeSegment, KeywordSegment
from sqlfluff.dialects.dialect_hive_keywords import (
RESERVED_KEYWORDS,
UNRESERVED_KEYWORDS,
)
ansi_dialect = load_raw_dialect("ansi")
hive_dialect = ansi_dialect.copy_as("hive")
# Clear ANSI Keywords and add all Hive keywords
# Commented clearing for now as some are needed for some statements imported
# from ANSI to work
# hive_dialect.sets("unreserved_keywords").clear()
hive_dialect.sets("unreserved_keywords").update(UNRESERVED_KEYWORDS)
# hive_dialect.sets("reserved_keywords").clear()
hive_dialect.sets("reserved_keywords").update(RESERVED_KEYWORDS)
hive_dialect.sets("angle_bracket_pairs").update(
[
("angle", "StartAngleBracketSegment", "EndAngleBracketSegment", False),
]
)
hive_dialect.add(
DoubleQuotedLiteralSegment=NamedParser(
"double_quote",
CodeSegment,
name="quoted_literal",
type="literal",
trim_chars=('"',),
),
SingleOrDoubleQuotedLiteralGrammar=OneOf(
Ref("QuotedLiteralSegment"), Ref("DoubleQuotedLiteralSegment")
),
StartAngleBracketSegment=StringParser(
"<", SymbolSegment, name="start_angle_bracket", type="start_angle_bracket"
),
EndAngleBracketSegment=StringParser(
">", SymbolSegment, name="end_angle_bracket", type="end_angle_bracket"
),
JsonfileKeywordSegment=StringParser(
"JSONFILE", KeywordSegment, name="json_file", type="file_format"
),
RcfileKeywordSegment=StringParser(
"RCFILE", KeywordSegment, name="rc_file", type="file_format"
),
SequencefileKeywordSegment=StringParser(
"SEQUENCEFILE", KeywordSegment, name="sequence_file", type="file_format"
),
TextfileKeywordSegment=StringParser(
"TEXTFILE", KeywordSegment, name="text_file", type="file_format"
),
LocationGrammar=Sequence("LOCATION", Ref("SingleOrDoubleQuotedLiteralGrammar")),
PropertyGrammar=Sequence(
Ref("SingleOrDoubleQuotedLiteralGrammar"),
Ref("EqualsSegment"),
Ref("SingleOrDoubleQuotedLiteralGrammar"),
),
BracketedPropertyListGrammar=Bracketed(Delimited(Ref("PropertyGrammar"))),
TablePropertiesGrammar=Sequence(
"TBLPROPERTIES", Ref("BracketedPropertyListGrammar")
),
SerdePropertiesGrammar=Sequence(
"WITH", "SERDEPROPERTIES", Ref("BracketedPropertyListGrammar")
),
TerminatedByGrammar=Sequence("TERMINATED", "BY", Ref("QuotedLiteralSegment")),
FileFormatGrammar=OneOf(
"SEQUENCEFILE",
"TEXTFILE",
"RCFILE",
"ORC",
"PARQUET",
"AVRO",
"JSONFILE",
Sequence(
"INPUTFORMAT",
Ref("SingleOrDoubleQuotedLiteralGrammar"),
"OUTPUTFORMAT",
Ref("SingleOrDoubleQuotedLiteralGrammar"),
),
),
StoredAsGrammar=Sequence("STORED", "AS", Ref("FileFormatGrammar")),
StoredByGrammar=Sequence(
"STORED",
"BY",
Ref("SingleOrDoubleQuotedLiteralGrammar"),
Ref("SerdePropertiesGrammar", optional=True),
),
StorageFormatGrammar=OneOf(
Sequence(
Ref("RowFormatClauseSegment", optional=True),
Ref("StoredAsGrammar", optional=True),
),
Ref("StoredByGrammar"),
),
CommentGrammar=Sequence("COMMENT", Ref("SingleOrDoubleQuotedLiteralGrammar")),
PartitionSpecGrammar=Sequence(
"PARTITION",
Bracketed(
Delimited(
Sequence(
Ref("ColumnReferenceSegment"),
Ref("EqualsSegment"),
Ref("LiteralGrammar"),
)
)
),
),
)
# https://cwiki.apache.org/confluence/display/hive/languagemanual+joins
hive_dialect.replace(
JoinKeywords=Sequence(Sequence("SEMI", optional=True), "JOIN"),
)
@hive_dialect.segment(replace=True)
class CreateDatabaseStatementSegment(BaseSegment):
"""A `CREATE DATABASE` statement."""
type = "create_database_statement"
match_grammar = Sequence(
"CREATE",
OneOf("DATABASE", "SCHEMA"),
Ref("IfNotExistsGrammar", optional=True),
Ref("DatabaseReferenceSegment"),
Ref("CommentGrammar", optional=True),
Ref("LocationGrammar", optional=True),
Sequence(
"MANAGEDLOCATION", Ref("SingleOrDoubleQuotedLiteralGrammar"), optional=True
),
Sequence(
"WITH", "DBPROPERTIES", Ref("BracketedPropertyListGrammar"), optional=True
),
)
@hive_dialect.segment(replace=True)
class CreateTableStatementSegment(BaseSegment):
"""A `CREATE TABLE` statement."""
type = "create_table_statement"
match_grammar = StartsWith(
Sequence(
"CREATE",
Ref.keyword("EXTERNAL", optional=True),
Ref.keyword("TEMPORARY", optional=True),
"TABLE",
)
)
parse_grammar = Sequence(
"CREATE",
Ref.keyword("EXTERNAL", optional=True),
Ref.keyword("TEMPORARY", optional=True),
"TABLE",
Ref("IfNotExistsGrammar", optional=True),
Ref("TableReferenceSegment"),
OneOf(
# Columns and comment syntax:
Sequence(
Bracketed(
Delimited(
OneOf(
# TODO: support all constraints
Ref("TableConstraintSegment", optional=True),
Sequence(
Ref("ColumnDefinitionSegment"),
Ref("CommentGrammar", optional=True),
),
),
bracket_pairs_set="angle_bracket_pairs",
),
optional=True,
),
Ref("CommentGrammar", optional=True),
# `STORED AS` can be called before or after the additional table properties below
Ref("StoredAsGrammar", optional=True),
Sequence(
"PARTITIONED",
"BY",
Bracketed(
Delimited(
Sequence(
Ref("ColumnDefinitionSegment"),
Ref("CommentGrammar", optional=True),
),
),
),
optional=True,
),
Sequence(
"CLUSTERED",
"BY",
Ref("BracketedColumnReferenceListGrammar"),
Sequence(
"SORTED",
"BY",
Bracketed(
Delimited(
Sequence(
Ref("ColumnReferenceSegment"),
OneOf("ASC", "DESC", optional=True),
)
)
),
optional=True,
),
"INTO",
Ref("NumericLiteralSegment"),
"BUCKETS",
optional=True,
),
# Second call of `STORED AS` to match when appears after
Ref("StoredAsGrammar", optional=True),
Ref("SkewedByClauseSegment", optional=True),
Ref("StorageFormatGrammar", optional=True),
Ref("LocationGrammar", optional=True),
Ref("TablePropertiesGrammar", optional=True),
Ref("CommentGrammar", optional=True),
Sequence(
"AS",
OptionallyBracketed(Ref("SelectableGrammar")),
optional=True,
),
),
# Create like syntax
Sequence(
"LIKE",
Ref("TableReferenceSegment"),
Ref("LocationGrammar", optional=True),
Ref("TablePropertiesGrammar", optional=True),
),
),
)
@hive_dialect.segment()
class PrimitiveTypeSegment(BaseSegment):
"""Primitive data types."""
type = "primitive_type"
match_grammar = OneOf(
"TINYINT",
"SMALLINT",
"INT",
"BIGINT",
"BOOLEAN",
"FLOAT",
Sequence("DOUBLE", Ref.keyword("PRECISION", optional=True)),
"STRING",
"BINARY",
"TIMESTAMP",
Sequence(
"DECIMAL",
Bracketed(
Ref("NumericLiteralSegment"),
Ref("CommaSegment"),
Ref("NumericLiteralSegment"),
optional=True,
),
),
"DATE",
"VARCHAR",
"CHAR",
)
@hive_dialect.segment(replace=True)
class DatatypeSegment(BaseSegment):
"""Data types."""
type = "data_type"
match_grammar = OneOf(
Ref("PrimitiveTypeSegment"),
Sequence(
"ARRAY",
Bracketed(
Ref("DatatypeSegment"),
bracket_pairs_set="angle_bracket_pairs",
bracket_type="angle",
),
),
Sequence(
"MAP",
Bracketed(
Sequence(
Ref("PrimitiveTypeSegment"),
Ref("CommaSegment"),
Ref("DatatypeSegment"),
),
bracket_pairs_set="angle_bracket_pairs",
bracket_type="angle",
),
),
Sequence(
"STRUCT",
Bracketed(
Delimited(
Sequence(
Ref("NakedIdentifierSegment"),
Ref("ColonSegment"),
Ref("DatatypeSegment"),
Ref("CommentGrammar", optional=True),
),
bracket_pairs_set="angle_bracket_pairs",
),
bracket_pairs_set="angle_bracket_pairs",
bracket_type="angle",
),
),
Sequence(
"UNIONTYPE",
Bracketed(
Delimited(
Ref("DatatypeSegment"), bracket_pairs_set="angle_bracket_pairs"
),
bracket_pairs_set="angle_bracket_pairs",
bracket_type="angle",
),
),
)
@hive_dialect.segment()
class SkewedByClauseSegment(BaseSegment):
"""`SKEWED BY` clause in a CREATE / ALTER statement."""
type = "skewed_by_clause"
match_grammar = Sequence(
"SKEWED",
"BY",
Ref("BracketedColumnReferenceListGrammar"),
"ON",
Bracketed(
Delimited(
OneOf(
Ref("LiteralGrammar"), Bracketed(Delimited(Ref("LiteralGrammar")))
)
)
),
Sequence("STORED", "AS", "DIRECTORIES", optional=True),
)
@hive_dialect.segment()
class RowFormatClauseSegment(BaseSegment):
"""`ROW FORMAT` clause in a CREATE statement."""
type = "row_format_clause"
match_grammar = Sequence(
"ROW",
"FORMAT",
OneOf(
Sequence(
"DELIMITED",
Sequence(
"FIELDS",
Ref("TerminatedByGrammar"),
Sequence(
"ESCAPED", "BY", Ref("QuotedLiteralSegment"), optional=True
),
optional=True,
),
Sequence(
"COLLECTION", "ITEMS", Ref("TerminatedByGrammar"), optional=True
),
Sequence("MAP", "KEYS", Ref("TerminatedByGrammar"), optional=True),
Sequence("LINES", Ref("TerminatedByGrammar"), optional=True),
Sequence(
"NULL", "DEFINED", "AS", Ref("QuotedLiteralSegment"), optional=True
),
),
Sequence(
"SERDE",
Ref("SingleOrDoubleQuotedLiteralGrammar"),
Ref("SerdePropertiesGrammar", optional=True),
),
),
)
@hive_dialect.segment()
class AlterDatabaseStatementSegment(BaseSegment):
"""An `ALTER DATABASE/SCHEMA` statement."""
type = "alter_database_statement"
match_grammar = Sequence(
"ALTER",
OneOf("DATABASE", "SCHEMA"),
Ref("DatabaseReferenceSegment"),
"SET",
OneOf(
Sequence("DBPROPERTIES", Ref("BracketedPropertyListGrammar")),
Sequence(
"OWNER",
OneOf("USER", "ROLE"),
Ref("SingleOrDoubleQuotedLiteralGrammar"),
),
Ref("LocationGrammar"),
Sequence("MANAGEDLOCATION", Ref("SingleOrDoubleQuotedLiteralGrammar")),
),
)
@hive_dialect.segment(replace=True)
class DropStatementSegment(BaseSegment):
"""A `DROP` statement."""
type = "drop_statement"
match_grammar = StartsWith("DROP")
parse_grammar = OneOf(
Ref("DropDatabaseStatementSegment"),
Ref("DropTableStatementSegment"),
# TODO: add other drops
)
@hive_dialect.segment()
class DropDatabaseStatementSegment(BaseSegment):
"""A `DROP DATEBASE/SCHEMA` statement."""
type = "drop_table_statement"
match_grammar = Sequence(
"DROP",
OneOf("DATABASE", "SCHEMA"),
Ref("IfExistsGrammar", optional=True),
Ref("DatabaseReferenceSegment"),
OneOf("RESTRICT", "CASCADE", optional=True),
)
@hive_dialect.segment()
class DropTableStatementSegment(BaseSegment):
"""A `DROP TABLE` statement."""
type = "drop_table_statement"
match_grammar = Sequence(
"DROP",
"TABLE",
Ref("IfExistsGrammar", optional=True),
Ref("TableReferenceSegment"),
Ref.keyword("PURGE", optional=True),
)
@hive_dialect.segment(replace=True)
class TruncateStatementSegment(BaseSegment):
"""`TRUNCATE TABLE` statement."""
type = "truncate_table"
match_grammar = StartsWith("TRUNCATE")
parse_grammar = Sequence(
"TRUNCATE",
Ref.keyword("TABLE", optional=True),
Ref("TableReferenceSegment"),
Ref("PartitionSpecGrammar", optional=True),
)
@hive_dialect.segment(replace=True)
class UseStatementSegment(BaseSegment):
"""An `USE` statement."""
type = "use_statement"
match_grammar = Sequence(
"USE",
Ref("DatabaseReferenceSegment"),
)
@hive_dialect.segment(replace=True)
class StatementSegment(ansi_dialect.get_segment("StatementSegment")): # type: ignore
"""Overriding StatementSegment to allow for additional segment parsing."""
parse_grammar = ansi_dialect.get_segment("StatementSegment").parse_grammar.copy(
insert=[Ref("AlterDatabaseStatementSegment")],
remove=[
Ref("TransactionStatementSegment"),
Ref("CreateSchemaStatementSegment"),
Ref("SetSchemaStatementSegment"),
Ref("DropSchemaStatementSegment"),
Ref("CreateExtensionStatementSegment"),
Ref("CreateModelStatementSegment"),
Ref("DropModelStatementSegment"),
],
)
| 31.003953
| 97
| 0.5436
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.