hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2c78b7efdd194e516ae812f551f6e716de7980d0
| 2,462
|
py
|
Python
|
utils/exact_shapley.py
|
marcoancona/DASP
|
cbd82b36443199f11fa04ecb0322fa68f5505b2c
|
[
"MIT"
] | 47
|
2019-04-29T19:14:10.000Z
|
2022-02-28T13:37:21.000Z
|
utils/exact_shapley.py
|
marcoancona/DASP
|
cbd82b36443199f11fa04ecb0322fa68f5505b2c
|
[
"MIT"
] | 5
|
2019-06-05T13:41:12.000Z
|
2022-03-08T05:31:02.000Z
|
utils/exact_shapley.py
|
marcoancona/DASP
|
cbd82b36443199f11fa04ecb0322fa68f5505b2c
|
[
"MIT"
] | 13
|
2019-06-07T12:07:18.000Z
|
2021-12-20T02:02:20.000Z
|
import numpy as np
from itertools import chain, combinations
import scipy.special
fact = scipy.special.factorial
def f_max(inputs):
return np.max(inputs)
def f_linear_relu(x, w, b):
y = np.sum(x*w, -1) + b
return np.maximum(0, y)
def powerset(iterable):
"""
powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)
"""
xs = list(iterable)
# note we return an iterator rather than a list
return chain.from_iterable(combinations(xs, n) for n in range(len(xs) + 1))
def vec_bin_array(arr, m):
"""
Arguments:
arr: Numpy array of positive integers
m: Number of bits of each integer to retain
Returns a copy of arr with every element replaced with a bit vector.
Bits encoded as int8's.
"""
to_str_func = np.vectorize(lambda x: np.binary_repr(x).zfill(m))
strs = to_str_func(arr)
ret = np.zeros(list(arr.shape) + [m], dtype=np.int8)
for bit_ix in range(0, m):
fetch_bit_func = np.vectorize(lambda x: x[bit_ix] == '1')
ret[...,bit_ix] = fetch_bit_func(strs).astype("int8")
return ret
def compute_shapley(inputs, f, baseline=None):
if baseline is None:
baseline = np.zeros_like(inputs)
results = np.zeros(inputs.shape)
n = inputs.shape[0]
assert inputs.shape == (n,), inputs.shape
# Create powerset binary mask with shape (2**n, n)
# Note: we first exclude column with index index, then we add it
mask = vec_bin_array(np.arange(2 ** (n-1)), n-1)
assert mask.shape == (2**(n-1), n-1)
# assert mask.shape == (2**(n-1), n-1), 'Mask shape does not match'
coeff = (fact(mask.sum(1)) * fact(n - mask.sum(1) - 1)) / fact(n)
for index in range(n):
# Copy mask and set the current player active
mask_wo_index = np.insert(mask, index, np.zeros(2 ** (n-1)), axis=1)
mask_wi_index = np.insert(mask, index, np.ones(2 ** (n-1)), axis=1)
# print(mask_wo_index.shape)
assert mask_wo_index.shape == (2 ** (n - 1), n), 'Mask shape does not match'
assert np.max(mask_wo_index) == 1, np.max(mask_wo_index)
assert np.min(mask_wo_index) == 0, np.min(mask_wo_index)
run_wo_i = f(inputs * mask_wo_index + baseline * (1-mask_wo_index)) # run all masks at once
run_wi_i = f(inputs * mask_wi_index + baseline * (1-mask_wi_index)) # run all masks at once
r = (run_wi_i - run_wo_i) * coeff
results[index] = r.sum()
return results
| 34.194444
| 100
| 0.623071
|
d5466d0d6ae28f217e320e69a50f86d00f66d62a
| 20,515
|
py
|
Python
|
adafruit_progressbar/__init__.py
|
lesamouraipourpre/Adafruit_CircuitPython_ProgressBar
|
17f1290869db1cd8beb03e7640635eb4d9483734
|
[
"Unlicense",
"MIT-0",
"MIT"
] | 5
|
2020-04-14T20:50:30.000Z
|
2021-10-19T18:49:46.000Z
|
adafruit_progressbar/__init__.py
|
lesamouraipourpre/Adafruit_CircuitPython_ProgressBar
|
17f1290869db1cd8beb03e7640635eb4d9483734
|
[
"Unlicense",
"MIT-0",
"MIT"
] | 25
|
2020-07-17T03:05:56.000Z
|
2022-01-06T16:00:51.000Z
|
adafruit_progressbar/__init__.py
|
lesamouraipourpre/Adafruit_CircuitPython_ProgressBar
|
17f1290869db1cd8beb03e7640635eb4d9483734
|
[
"Unlicense",
"MIT-0",
"MIT"
] | 10
|
2020-01-07T20:14:56.000Z
|
2021-11-14T20:34:43.000Z
|
# SPDX-FileCopyrightText: 2020 Brent Rubell for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`adafruit_progressbar`
================================================================================
Dynamic progress bar widget for CircuitPython displays
* Author(s): Brent Rubell and Hugo Dahl
Implementation Notes
--------------------
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
# imports
try:
from typing import Tuple, Union, List
except ImportError:
pass # No harm if the module isn't located
import displayio
class ProgressBarBase(displayio.TileGrid):
"""The base class for dynamic progress bar widgets.
:param position: The coordinates (x, y) of the top left corner
:type position: Tuple[int, int]
:param size: The size (width, height) of the progress bar
:type size: Tuple[int, int]
:param bar_color: The color of the bar representing the value. This can
be a hexadecimal value for color (0x224466).
Default: 0x00FF00 (Solid green)
:type bar_color: int
:param border_color: The color of the border around the progress bar. This
can be a hexadecimal value for color (0x4488BB).
Default: 0xFFFFFF (White)
:type border_color: int
:param fill_color: The colour of the bar representing the remainder of the
value. i.e. if the current value is 42%, the 42 value
is represented by the bar_color parameter. The remainder,
58%, will be displayed in this color. This can also
be a hexadecimal value for color (0xEE7755).
Default: 0x000000 (Black)
:type fill_color: int
:param margin_size: Specify whether a margin between the border of the widget and the bar
representing the value should be visible or not.
Default: True
:type margin_size: bool
:param value_range: Specify the range of allowed values for which the progress
should be displayed. When setting the "value" property,
this range is the one against which its progression will be determined.
Default: (0.0, 1.0)
:type value_range: Tuple[int, int] or Tuple[float, float]
"""
# pylint: disable=too-many-arguments, too-many-instance-attributes
def __init__(
self,
position: Tuple[int, int],
size: Tuple[int, int],
value: Union[int, float] = 0,
bar_color=0x00FF00,
border_color=0xFFFFFF,
fill_color=0x000000,
border_thickness: int = 1,
margin_size: int = 1,
value_range: Union[Tuple[int, int], Tuple[float, float]] = (0, 100),
) -> None:
if value_range[0] >= value_range[1]:
raise ValueError("The minimum value must be less than the maximum value")
if size[0] <= 0 or size[1] <= 0:
raise ValueError("The width and the height must be greater than zero")
if not value_range[0] <= value <= value_range[1]:
raise ValueError(
"The starting value must be within the range of minimum to maximum"
)
_edge_size = 2 * margin_size + 2 * border_thickness
if _edge_size >= size[0]:
raise ValueError(
"The size of the borders and margins combined must be "
"less than the width of the widget"
)
if _edge_size >= size[1]:
raise ValueError(
"The size of the borders and margins combined must be "
"less than the height of the widget"
)
self._progress = 0.0
self._widget_size = size
self._position = position
self._bitmap = displayio.Bitmap(size[0], size[1], 3)
self._palette = displayio.Palette(3)
self._border_thickness = border_thickness
self._margin_size = margin_size
self._range = value_range
self._progress = 0.0
self._old_value = self.minimum
self._value = self.minimum
self.fill = fill_color
self.bar_color = bar_color
self.border_color = border_color
# Setup value and old_value to handle the change to the new
# initial value later.
self._value = self.minimum
self._old_value = self.minimum
super().__init__(
self._bitmap,
pixel_shader=self._palette,
x=self._position[0],
y=self._position[1],
)
self._draw_outline()
self.value = value
# _bitmap: displayio.Bitmap # The bitmap used for the bar/value
# _position: (int, int) # The (x,y) coordinates of the top-left corner
# _widget_size: (int, int) # The dimensions of the progress bar
# _palette: displayio.Palette(3) # The palette to be used
# _progress: float # The value to represent, between 0.0 and 100.0
# _border_thickness: int # The thickness of the border around the control, in pixels
# _margin_size: bool # Whether we should display a margin between
# the border and the value/bar
# # The minimum and maximum values we can represent
# _range: (int, int) or (float, float)
# Color palette index to property mapping:
# 0: Bar fill color
# 1: Border color
# 2: Background fill color
@property
def widget_size(self) -> int:
"""The size at the outer edge of the control, returned as a tuple (width, height)
:rtype: int
"""
return self._widget_size
@property
def widget_width(self) -> Tuple[int, int]:
"""The total width of the widget, in pixels. Includes the border and margin.
:rtype: Tuple[int, int]
"""
return self.widget_size[0]
@property
def border_thickness(self) -> int:
"""Gets the currently configured thickness of the border (in pixels)
:rtype: int
"""
return self._border_thickness
@property
def widget_height(self) -> int:
"""The total height of the widget, in pixels. Includes the border and margin.
:rtype: int
"""
return self.widget_size[1]
@property
def border_color(self) -> int:
"""Returns the currently configured value for the color of the
outline (border) of the widget.
:rtype: int
"""
return self._border_color
@border_color.setter
def border_color(self, color: Union[int, Tuple[int, int, int]]) -> None:
"""Sets the color of the border of the widget. Set it to 'None'
if a border should still be part of the widget but not displayed.
:param color: The color to be used for the border
:type int/None/Tuple[int, int, int]:
:rtype: None
"""
if not (isinstance(color, int) or color is None):
raise TypeError("A color must be represented by a integer value")
self._border_color = color
if color is None:
self._palette[1] = 0x00
self._palette.make_transparent(1)
else:
self._palette[1] = color
self._palette.make_opaque(1)
@property
def fill(self) -> int:
"""The fill of the progress bar. Can be a hex value for a color or ``None`` for
transparent.
:rtype: int
"""
return self._fill_color
@fill.setter
def fill(self, color: Union[int, Tuple[int, int, int]]) -> None:
"""Sets the fill of the progress bar. Can be a hex value for a color or ``None`` for
transparent.
:param color: The color to use for the widget's background
:type color: int/None/Tuple[int, int, int]
"""
self._fill_color = color
if color is None:
self._palette[0] = 0x00
self._palette.make_transparent(0)
else:
self._palette[0] = color
self._palette.make_opaque(0)
@property
def bar_color(self) -> int:
"""The color of the bar's fill
:rtype: int/None
"""
return self._bar_color
@bar_color.setter
def bar_color(self, color: Union[int, Tuple[int, int, int]]) -> None:
"""Sets the color of the bar
:param color: The color to use for the bar
:type color: int/None/Tuple[int, int, int]
:rtype: None
"""
self._bar_color = color
if color is None:
self._palette[2] = 0x00
self._palette.make_transparent(2)
else:
self._palette[2] = color
self._palette.make_opaque(2)
@property
def value(self) -> Union[int, float]:
"""
The current value of the control, used to determine its progress/ratio
:rtype: int/float
"""
return self._value
@value.setter
def value(self, value: Union[int, float]) -> None:
"""Sets the current value of the progress within the min-max range
:param value: The new value for the progress status
:type value: int/float
:rtype: None
"""
if not isinstance(value, (int, float)):
raise TypeError("The value to set must be either an integer or a float")
if not self.minimum <= value <= self.maximum:
raise ValueError(
f"The value must be between minimum ({self.minimum}) and maximum ({self.maximum})"
)
# Save off the previous value, so we can pass it in the
# call to "Render"
self._old_value = self._value
self._value = value
# Convert value to float since we may be dealing with
# integer types, and we can't work with integer division
# to get a ratio (position) of "value" within range.
self._set_progress(self.get_value_ratio(value))
@property
def progress(self) -> float:
"""Gets the current displayed value of the widget.
:return: The current progress ratio
:rtype: float
"""
return self._progress
@progress.setter
def progress(self, value: float) -> None:
"""Sets the current displayed value of the widget. This will update the
`value` property to an approximation based on the allowed range. The calculation
used to determine the approximate value is
`((self.minimum + (self.maximum - self.minimum)) * progress)`.
For the most accurate representation of a given value, it is recommended to set the
property "value" to the desired value.
Example: If the range for the widget is 0-10, setting a progress value of "35"
will result in `value` being "3.5", since 3.5 is the 35% value of the range between
0 and 10. The value determined from this method makes no assumptions or checks based on
the type of the "value" field.
:param value: The new value which should be displayed by the progress
bar. Must be between 0.0-100.0
:type value: float
:rtype: None
"""
if not isinstance(value, (float, int)):
raise TypeError("'progress' must be an int or a float")
if not 0.0 <= value <= 100.0:
raise ValueError("'progress' must be between 0 and 100")
self.value = (self.minimum + (self.maximum - self.minimum)) * (value * 0.01)
# Bit of a hack to be able to work around the shim "ProgressBar" class
# to be able to handle values as it used to.
def _set_progress(self, value: float) -> None:
"""Sets the value for the underlying variable _progress, then
calls self.render() with the appropriate values.
:param value: The value to which self.progress should be set
:type value: float
:rtype: None
"""
self._progress = round(value, 4)
self._render(self._old_value, self._value, value)
@property
def range(self) -> Tuple[Union[int, float], Union[int, float]]:
"""The range which can be handled as a Tuple(min,max)
:rtype: Tuple(int/float, int/float)
"""
return self._range
@property
def minimum(self) -> Union[int, float]:
"""The minimum (lowest) value which can be displayed
:rtype: int/float
"""
return self.range[0]
@property
def maximum(self) -> Union[int, float]:
"""The maximum (highest) value which can be displayed
:rtype: int/float
"""
return self.range[1]
def _draw_outline(self) -> None:
"""Draws the outline (border) of the progressbar, with a thickness value
from self.border_thickness.
:rtype None:
"""
stroke = self.border_thickness
# draw outline rectangle
for _w in range(self.widget_width):
for line in range(stroke):
self._bitmap[_w, line] = 1
self._bitmap[_w, self.widget_height - 1 - line] = 1
for _h in range(self.widget_height):
for line in range(stroke):
self._bitmap[line, _h] = 1
self._bitmap[self.widget_width - 1 - line, _h] = 1
def fill_width(self) -> int:
"""Returns the amount of horizontal space within the widget
which can be used for value display. This is typically the
width of the widget as defined, minus any visually reserved space.
:rtype: int
"""
return self.widget_width - self._get_fill_border_size()
def fill_height(self) -> int:
"""Returns the amount of vertical space within the widget
which can be used for value display. This is typically the
width of the widget as defined, minus any visually reserved
space.
:rtype: int
"""
return self.widget_height - self._get_fill_border_size()
def _get_fill_border_size(self) -> int:
"""Determines any visual space reserved for the widget
based on the defined border thickness, and whether a margin
should be placed between the border and the bar.
The value is calculated as (2 x border_thickness) minus
(2 x margin_size). The value for margin_size is either 0 (zero)
or 1 (one) depending on the value of margin_size when the
widget was created.
:rtype: int
"""
return (2 * self.border_thickness) + (2 * self.margin_size)
@property
def margin_size(self) -> int:
"""Returns the size of the margin on a single side of the display
:return int:
"""
return self._margin_size
@margin_size.setter
def margin_size(self, value: int) -> None:
"""Sets the new size of the margin to be used between the border
(if displayed) and the value bar.
:param value: The new size of the margin between the border
and value bar on all sides of the widget.
:type value: int
:rtype: None
"""
if not isinstance(value, int):
raise TypeError("The margin size must be an integer")
margin_spacing = (2 * value) + (2 * self._border_thickness)
if margin_spacing >= self.widget_width:
raise ValueError(
"The size of the borders and margins combined can total the same or more"
"than the widget's width."
)
if margin_spacing >= self.widget_height:
raise ValueError(
"The size of the borders and margins combined can total the same or more"
"than the widget's height."
)
self._margin_size = value
self._set_progress(self._progress) # For a render pass
def get_value_ratio(self, value: Union[int, float]) -> float:
"""Gets the ratio (percentage) of a given value within the
range of self.minimum and self.maximum.
:param value: The value for which the ration should be calculated
:type value: int/float
:return: The ratio of value:range
:rtype: float
"""
if self.maximum == self.minimum:
return 0.0
return (float(value) - self.minimum) / (self.maximum - self.minimum)
@classmethod
def _get_value_sizes(cls, _old_ratio: float, _new_ratio: float) -> Tuple[int, int]:
return 0, 0
@classmethod
def _get_max_fill_size(cls) -> int:
return 0
def _get_ratios(
self, _old_value: Union[int, float], _new_value: Union[int, float]
) -> Tuple[float, float]:
return self.get_value_ratio(_old_value), self.get_value_ratio(_new_value)
def _adjust_size_for_range_limits(
self, _new_value_size: int, _new_value: Union[int, float]
) -> int:
# If we have *ANY* value other than "zero" (minimum), we should
# have at least one element showing
if _new_value_size == 0 and _new_value > self.minimum:
_new_value_size = 1
# Conversely, if we have *ANY* value other than 100% (maximum),
# we should NOT show a full bar.
if _new_value_size == self._get_max_fill_size() and _new_value < self.maximum:
_new_value_size -= 1
return _new_value_size
def _get_sizes_min_max(self) -> Tuple[int, int]:
return 0, min(self.fill_width(), self.fill_height())
@classmethod
def _invert_fill_direction(cls) -> bool:
return False
def _get_horizontal_fill(
self, _start: int, _end: int, _incr: int
) -> Tuple[int, int, int]:
return 0, self.fill_width(), 1 # Subclass must return values
def _get_vertical_fill(
self, _start: int, _end: int, _incr: int
) -> Tuple[int, int, int]:
return 0, self.fill_height(), 1 # Subclass must return values
# pylint: disable=too-many-locals
def _render(
self,
_old_value: Union[int, float],
_new_value: Union[int, float],
_progress_value: float,
) -> None:
"""
Does the work of actually creating the graphical representation of
the value (percentage, aka "progress") to be displayed.
:param _old_value: The previously displayed value
:type _old_value: int/float
:param _new_value: The new value to display
:type _new_value: int/float
:param _progress_value: The value to display, as a percentage, represented
by a float from 0.0 to 1.0 (0% to 100%)
:type _progress_value: float
:rtype: None
"""
_prev_ratio, _new_ratio = self._get_ratios(_old_value, _new_value)
_old_value_size, _new_value_size = self._get_value_sizes(
_prev_ratio, _new_ratio
)
# Adjusts for edge cases, such as 0-width non-zero value, or 100% width
# non-maximum values
_new_value_size = self._adjust_size_for_range_limits(
_new_value_size, _new_value
)
# Default values for increasing value
_color = 2
_incr = 1
_start = max(_old_value_size, 0)
_end = max(_new_value_size, 0)
if _old_value_size >= _new_value_size:
# Override defaults to be decreasing
_color = 0 # Clear
_incr = -1 # Iterate range downward
_start = max(_old_value_size, 0) - 1
_end = max(_new_value_size, 0) - 1
# If we're setting to minimum, make sure we're clearing by
# starting one "bar" further
if _new_value == self.minimum:
_start += 1
_render_offset = self.margin_size + self.border_thickness
vert_start, vert_end, vert_incr = self._get_vertical_fill(_start, _end, _incr)
horiz_start, horiz_end, horiz_incr = self._get_horizontal_fill(
_start, _end, _incr
)
vert_start += _render_offset
vert_end += _render_offset
horiz_start += _render_offset
horiz_end += _render_offset
for vertical_position in range(vert_start, vert_end, vert_incr):
for horizontal_position in range(horiz_start, horiz_end, horiz_incr):
self._bitmap[horizontal_position, vertical_position] = _color
| 34.248748
| 98
| 0.601852
|
1f4d2ac36f75b79a5ff4f6b2051e73a0fa2de684
| 721
|
py
|
Python
|
main.py
|
MCAR43/PyAudit
|
e0bea458648deb29df0ef3bdad210a1cc6de8f71
|
[
"Beerware"
] | null | null | null |
main.py
|
MCAR43/PyAudit
|
e0bea458648deb29df0ef3bdad210a1cc6de8f71
|
[
"Beerware"
] | null | null | null |
main.py
|
MCAR43/PyAudit
|
e0bea458648deb29df0ef3bdad210a1cc6de8f71
|
[
"Beerware"
] | null | null | null |
#!/usr/bin/python3
import argparse
import sys
from app.parser.device import Device
def parseInputArgs(argv):
parser = argparse.ArgumentParser(description="Python Cisco CIS Auditor")
parser.add_argument('config', help='Path to the config file')
parser.add_argument('benchmark', help='Path to the benchmark JSON you wish to execute')
args = parser.parse_args()
return args
def exec(inputArgs):
config_file = ""
parsed_args = parseInputArgs(inputArgs)
if parsed_args.config:
deviceParser = Device(parsed_args.config, parsed_args.benchmark)
deviceParser.performAudit()
deviceParser.output()
def main():
exec(sys.argv[1:])
if __name__ == "__main__":
main()
| 26.703704
| 91
| 0.710125
|
7e78045b706a75c8c1f558dceea88a6091775dad
| 454
|
py
|
Python
|
pydis_site/apps/home/migrations/0002_auto_now_on_repository_metadata.py
|
Robin5605/site
|
81aa42aa748cb228d7a09e6cf6b211484b654496
|
[
"MIT"
] | 700
|
2018-11-17T15:56:51.000Z
|
2022-03-30T22:53:17.000Z
|
pydis_site/apps/home/migrations/0002_auto_now_on_repository_metadata.py
|
foxy4096/site
|
63b464b57ea0824570879f24baaaca6fd80393ee
|
[
"MIT"
] | 542
|
2018-11-17T13:39:42.000Z
|
2022-03-31T11:24:00.000Z
|
pydis_site/apps/home/migrations/0002_auto_now_on_repository_metadata.py
|
foxy4096/site
|
63b464b57ea0824570879f24baaaca6fd80393ee
|
[
"MIT"
] | 178
|
2018-11-21T09:06:56.000Z
|
2022-03-31T07:43:28.000Z
|
# Generated by Django 3.0.11 on 2020-12-21 22:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='repositorymetadata',
name='last_updated',
field=models.DateTimeField(auto_now=True, help_text='The date and time this data was last fetched.'),
),
]
| 23.894737
| 113
| 0.627753
|
f064cbd4dbb855d6f69c4a471c771d48487ec55b
| 2,171
|
py
|
Python
|
desktop/libs/libopenid/src/libopenid/views.py
|
kokosing/hue
|
2307f5379a35aae9be871e836432e6f45138b3d9
|
[
"Apache-2.0"
] | 3
|
2018-01-29T14:16:02.000Z
|
2019-02-05T21:33:05.000Z
|
desktop/libs/libopenid/src/libopenid/views.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 4
|
2021-03-11T04:02:00.000Z
|
2022-03-27T08:31:56.000Z
|
desktop/libs/libopenid/src/libopenid/views.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 2
|
2019-12-05T17:24:36.000Z
|
2021-11-22T21:21:32.000Z
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django_openid_auth.views import login_begin as django_login_begin, login_complete
from desktop.lib.django_util import render
from django.core import urlresolvers
from django.conf import settings
from django.shortcuts import render_to_response
from django.template import RequestContext
import libopenid.conf
from libopenid.backend import OpenIDBackend
from libopenid.forms import OpenIDLoginFormExt
__all__ = ['login_begin', 'login_complete']
def login_begin(request):
redirect_to = request.GET.get('next', '/')
is_first_login_ever = OpenIDBackend.is_first_login_ever()
request.session.set_test_cookie()
openid_url = getattr(settings, 'OPENID_SSO_SERVER_URL', None)
identity_url_prefix = getattr(settings, 'OPENID_IDENTITY_URL_PREFIX', None)
#Case of centralized server endpoint Get request
if openid_url is not None:
if request.method == 'GET':
return render_to_response('openid-login.html', {
'action': urlresolvers.reverse('openid-login'),
'next': redirect_to,
'first_login_ever': is_first_login_ever,
'hide_field': True
}, context_instance=RequestContext(request))
return django_login_begin(request, template_name='openid-login.html', form_class = OpenIDLoginFormExt)
setattr(login_begin, 'login_notrequired', True)
setattr(login_complete, 'login_notrequired', True)
| 36.183333
| 104
| 0.771994
|
d468f29a5dbe27fc6f84b9ad1831d10ade9eb944
| 2,196
|
py
|
Python
|
aliyun-python-sdk-mts/aliyunsdkmts/request/v20140618/ReportTerrorismJobResultRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | 1
|
2019-12-23T12:36:43.000Z
|
2019-12-23T12:36:43.000Z
|
aliyun-python-sdk-mts/aliyunsdkmts/request/v20140618/ReportTerrorismJobResultRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-mts/aliyunsdkmts/request/v20140618/ReportTerrorismJobResultRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | 1
|
2021-02-23T11:27:54.000Z
|
2021-02-23T11:27:54.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ReportTerrorismJobResultRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Mts', '2014-06-18', 'ReportTerrorismJobResult','mts')
def get_JobId(self):
return self.get_query_params().get('JobId')
def set_JobId(self,JobId):
self.add_query_param('JobId',JobId)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_Label(self):
return self.get_query_params().get('Label')
def set_Label(self,Label):
self.add_query_param('Label',Label)
def get_Detail(self):
return self.get_query_params().get('Detail')
def set_Detail(self,Detail):
self.add_query_param('Detail',Detail)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
| 33.272727
| 83
| 0.764117
|
9c1bcd16332cae19eee426f22272016e10031410
| 48,851
|
py
|
Python
|
made.py
|
var-skip/var-skip
|
0b19994466cd2a690ed8bfeefd78dbf36ea85410
|
[
"Apache-2.0"
] | 5
|
2020-07-15T13:37:23.000Z
|
2021-05-03T07:25:12.000Z
|
made.py
|
var-skip/var-skip
|
0b19994466cd2a690ed8bfeefd78dbf36ea85410
|
[
"Apache-2.0"
] | null | null | null |
made.py
|
var-skip/var-skip
|
0b19994466cd2a690ed8bfeefd78dbf36ea85410
|
[
"Apache-2.0"
] | 3
|
2020-07-15T18:22:43.000Z
|
2021-03-18T14:30:08.000Z
|
import time
import numpy as np
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
# try:
# from typing_extensions import Final
# except:
# # If you don't have `typing_extensions` installed, you can use a
# # polyfill from `torch.jit`.
# from torch.jit import Final
# from typing import Optional
# This is a generic wrapper for any driver function you want to time
def time_this(f):
def timed_wrapper(*args, **kw):
start_time = time.time()
result = f(*args, **kw)
end_time = time.time()
# Time taken = end_time - start_time
print('| func:%r took: %2.4f seconds |' % \
(f.__name__, end_time - start_time))
# print('| func:%r args:[%r, %r] took: %2.4f seconds |' % \
# (f.__name__, args, kw, end_time - start_time))
return result
return timed_wrapper
class MaskedLinear(nn.Linear):
""" same as Linear except has a configurable mask on the weights """
# masked_weight: Optional[torch.Tensor]
def __init__(self,
in_features,
out_features,
bias=True,
condition_on_ordering=False):
super().__init__(in_features, out_features, bias)
self.register_buffer('mask', torch.ones(out_features, in_features))
self.condition_ordering_linear = None
if condition_on_ordering:
self.condition_ordering_linear = nn.Linear(in_features,
out_features,
bias=False)
self.masked_weight = None
def set_mask(self, mask):
"""Accepts a mask of shape [in_features, out_features]."""
self.mask.data.copy_(torch.from_numpy(mask.astype(np.uint8).T))
def set_cached_mask(self, mask):
self.mask.data.copy_(mask)
def get_cached_mask(self):
return self.mask.clone().detach()
def forward(self, input):
if self.masked_weight is None:
mw = self.mask * self.weight
out = F.linear(input, mw, self.bias)
# NOTE: this tied-weight variant has much higher error.
# if self.condition_ordering_linear is None:
# return out
# return out + F.linear(torch.ones_like(input), mw)
else:
# ~17% speedup for Prog Sampling.
out = F.linear(input, self.masked_weight, self.bias)
if self.condition_ordering_linear is None:
return out
return out + F.linear(torch.ones_like(input),
self.mask * self.condition_ordering_linear.weight)
class MaskedResidualBlock(nn.Module):
def __init__(self,
in_features,
out_features,
activation,
condition_on_ordering=False):
assert in_features == out_features, [in_features, out_features]
super().__init__()
self.layers = nn.ModuleList()
self.layers.append(
MaskedLinear(in_features,
out_features,
bias=True,
condition_on_ordering=condition_on_ordering))
self.layers.append(
MaskedLinear(in_features,
out_features,
bias=True,
condition_on_ordering=condition_on_ordering))
self.activation = activation
def set_mask(self, mask):
self.layers[0].set_mask(mask)
self.layers[1].set_mask(mask)
def set_cached_mask(self, mask):
# They have the same mask.
self.layers[0].mask.copy_(mask)
self.layers[1].mask.copy_(mask)
def get_cached_mask(self):
return self.layers[0].mask.clone().detach()
def forward(self, input):
out = input
out = self.activation(out)
out = self.layers[0](out)
out = self.activation(out)
out = self.layers[1](out)
return input + out
# class MADE(torch.jit.ScriptModule):
class MADE(nn.Module):
def __init__(
self,
nin,
hidden_sizes,
nout,
num_masks=1,
natural_ordering=True,
input_bins=None,
activation=nn.ReLU,
do_direct_io_connections=False,
input_encoding=None,
direct_io_bias=True, # True for backward-compat of checkpoints
output_encoding="one_hot",
embed_size=32,
input_no_emb_if_leq=True,
embs_tied=False,
residual_connections=False,
dropout_p=0,
fixed_dropout_p=False,
factor_table=None,
seed=11123,
fixed_ordering=None,
per_row_dropout_p=False,
prefix_dropout=False,
disable_learnable_unk=False,
):
"""MADE.
Args:
nin: integer; number of inputs
hidden sizes: a list of integers; number of units in hidden layers
nout: integer; number of outputs, which usually collectively
parameterize some kind of 1D distribution. note: if nout is e.g. 2x
larger than nin (perhaps the mean and std), then the first nin will
be all the means and the second nin will be stds. i.e. output
dimensions depend on the same input dimensions in "chunks" and
should be carefully decoded downstream appropriately. the output of
running the tests for this file makes this a bit more clear with
examples.
num_masks: can be used to train ensemble over orderings/connections
natural_ordering: force natural ordering of dimensions, don't use
random permutations
input_bins: classes each input var can take on, e.g., [5, 2]
means input x1 has values in {0, ..., 4} and x2 in {0, 1}.
"""
super().__init__()
self.nin = nin
if num_masks > 1:
# Double the weights, so need to reduce the size to be fair.
hidden_sizes = [int(h // 2**0.5) for h in hidden_sizes]
print("Auto reducing MO hidden sizes to", hidden_sizes, num_masks)
# None: feed inputs as-is, no encoding applied. Each column thus
# occupies 1 slot in the input layer. For testing only.
assert input_encoding in [
None, "one_hot", "two_level", "binary", "binary_100p", "embed"
]
self.input_encoding = input_encoding
assert output_encoding in ["one_hot", "bits", "embed"]
self.embed_size = self.emb_dim = embed_size
self.output_encoding = output_encoding
self.activation = activation
self.nout = nout
self.per_row_dropout_p = per_row_dropout_p
self.prefix_dropout = prefix_dropout
print("per row dropout", self.per_row_dropout_p)
print("prefix dropout", self.prefix_dropout)
self.hidden_sizes = hidden_sizes
self.input_bins = input_bins
self.input_no_emb_if_leq = input_no_emb_if_leq
self.do_direct_io_connections = do_direct_io_connections
self.embs_tied = embs_tied
self.dropout_p = dropout_p
if self.prefix_dropout or self.per_row_dropout_p:
assert self.dropout_p
self.fixed_dropout_p = fixed_dropout_p
self.factor_table = factor_table
self.residual_connections = residual_connections
self.disable_learnable_unk = disable_learnable_unk
self.num_masks = num_masks
if nout > nin:
# nout must be integer multiple of nin; or we're given more info.
assert nout % nin == 0 or input_bins is not None
self.fixed_ordering = fixed_ordering
if fixed_ordering is not None:
assert num_masks == 1
print('** Fixed ordering {} supplied, ignoring natural_ordering'.
format(fixed_ordering))
assert self.input_bins is not None
encoded_bins = list(
map(self._get_output_encoded_dist_size, self.input_bins))
self.input_bins_encoded = list(
map(self._get_input_encoded_dist_size, self.input_bins))
self.input_bins_encoded_cumsum = np.cumsum(self.input_bins_encoded)
hs = [nin] + hidden_sizes + [sum(encoded_bins)]
# print('hs={}, nin={}, hiddens={}, encoded_bins={}'.format(
# hs, nin, hidden_sizes, encoded_bins))
print('encoded_bins (output)', encoded_bins)
print('encoded_bins (input)', self.input_bins_encoded)
# define a simple MLP neural net
self.net = []
for h0, h1 in zip(hs, hs[1:]):
if residual_connections:
if h0 == h1:
self.net.extend([
MaskedResidualBlock(
h0,
h1,
activation=activation(inplace=False),
condition_on_ordering=self.num_masks > 1)
])
else:
self.net.extend([
MaskedLinear(h0,
h1,
condition_on_ordering=self.num_masks > 1),
])
else:
self.net.extend([
MaskedLinear(h0,
h1,
condition_on_ordering=self.num_masks > 1),
activation(inplace=True),
])
if not residual_connections:
self.net.pop() # pop the last ReLU for the output layer
self.net = nn.Sequential(*self.net)
if self.input_encoding is not None:
# Input layer should be changed.
assert self.input_bins is not None
input_size = 0
for i, dist_size in enumerate(self.input_bins):
input_size += self._get_input_encoded_dist_size(dist_size)
new_layer0 = MaskedLinear(input_size,
self.net[0].out_features,
condition_on_ordering=self.num_masks > 1)
self.net[0] = new_layer0
if self.input_encoding == "embed":
self.embedding_networks = nn.ModuleList()
if not self.embs_tied:
self.embedding_networks_out = nn.ModuleList()
for i, dist_size in enumerate(self.input_bins):
if dist_size <= self.embed_size and self.input_no_emb_if_leq:
embed = embed2 = None
else:
embed = nn.Embedding(dist_size, self.embed_size)
embed2 = nn.Embedding(dist_size, self.embed_size)
self.embedding_networks.append(embed)
self.embedding_networks_out.append(embed2)
else:
for i, dist_size in enumerate(self.input_bins):
if dist_size <= self.embed_size and self.input_no_emb_if_leq:
embed = None
else:
embed = nn.Embedding(dist_size, self.embed_size)
self.embedding_networks.append(embed)
# Learnable [MASK] representation.
if self.dropout_p:
self.unk_embeddings = nn.ParameterList()
print('Disable learnable?', disable_learnable_unk)
for i, dist_size in enumerate(self.input_bins):
self.unk_embeddings.append(
nn.Parameter(torch.zeros(1, self.input_bins_encoded[i]),
requires_grad=not disable_learnable_unk))
# seeds for orders/connectivities of the model ensemble
self.natural_ordering = natural_ordering
self.num_masks = num_masks
self.seed = seed if seed is not None else 11123 # for cycling through num_masks orderings
print('self.seed', self.seed)
self.direct_io_layer = None
self.logit_indices = np.cumsum(encoded_bins)
self.m = {}
self.cached_masks = {}
self.update_masks() # builds the initial self.m connectivity
# note, we could also precompute the masks and cache them, but this
# could get memory expensive for large number of masks.
# Logit indices for the columns.
self.orderings = [self.m[-1]]
# Optimization: cache some values needed in EncodeInput().
self.bin_as_onehot_shifts = None
def _build_or_update_direct_io(self):
assert self.nout > self.nin and self.input_bins is not None
direct_nin = self.net[0].in_features
direct_nout = self.net[-1].out_features
if self.direct_io_layer is None:
self.direct_io_layer = MaskedLinear(
direct_nin,
direct_nout,
condition_on_ordering=self.num_masks > 1)
mask = np.zeros((direct_nout, direct_nin), dtype=np.uint8)
print('in _build_or_update_direct_io(), self.m[-1]', self.m[-1])
# Inverse: ord_idx -> natural idx.
inv_ordering = [None] * self.nin
for natural_idx in range(self.nin):
inv_ordering[self.m[-1][natural_idx]] = natural_idx
for ord_i in range(self.nin):
nat_i = inv_ordering[ord_i]
# x_(nat_i) in the input occupies range [inp_l, inp_r).
inp_l = 0 if nat_i == 0 else self.input_bins_encoded_cumsum[nat_i -
1]
inp_r = self.input_bins_encoded_cumsum[nat_i]
assert inp_l < inp_r
for ord_j in range(ord_i + 1, self.nin):
nat_j = inv_ordering[ord_j]
# Output x_(nat_j) should connect to input x_(nat_i); it
# occupies range [out_l, out_r) in the output.
out_l = 0 if nat_j == 0 else self.logit_indices[nat_j - 1]
out_r = self.logit_indices[nat_j]
assert out_l < out_r
# print('setting mask[{}:{}, {}:{}]'.format(
# out_l, out_r, inp_l, inp_r))
mask[out_l:out_r, inp_l:inp_r] = 1
# print('do_direct_io_connections mask', mask)
# print('mask', mask)
mask = mask.T
self.direct_io_layer.set_mask(mask)
def _get_input_encoded_dist_size(self, dist_size):
if self.input_encoding == "two_level":
dist_size += 1 + dist_size // 10
elif self.input_encoding == "embed":
if self.input_no_emb_if_leq:
dist_size = min(dist_size, self.embed_size)
else:
dist_size = self.embed_size
elif self.input_encoding == "one_hot":
pass
# if dist_size <= 2:
# dist_size = 1 # don't one-hot encode binary vals
elif self.input_encoding == "binary":
dist_size = max(1, int(np.ceil(np.log2(dist_size))))
elif self.input_encoding == "binary_100p":
if dist_size > 100:
dist_size = max(1, int(np.ceil(np.log2(dist_size))))
elif self.input_encoding is None:
return 1
else:
assert False, self.input_encoding
return dist_size
def _get_output_encoded_dist_size(self, dist_size):
if self.output_encoding == "two_level":
dist_size += 1 + dist_size // 10
elif self.output_encoding == "embed":
if self.input_no_emb_if_leq:
dist_size = min(dist_size, self.embed_size)
else:
dist_size = self.embed_size
elif self.output_encoding == "one_hot":
pass
# if dist_size <= 2:
# dist_size = 1 # don't one-hot encode binary vals
elif self.output_encoding == "binary":
dist_size = max(1, int(np.ceil(np.log2(dist_size))))
elif self.output_encoding == "binary_100p":
if dist_size > 100:
dist_size = max(1, int(np.ceil(np.log2(dist_size))))
return dist_size
def update_masks(self, invoke_order=None):
"""Update m() for all layers and change masks correspondingly.
No-op if "self.num_masks" is 1.
"""
if self.m and self.num_masks == 1:
# FIXME
# assert np.array_equal(invoke_order,
# self.m[-1]), 'invoke={} curr={}'.format(
# invoke_order, self.m[-1])
return # only a single seed, skip for efficiency
L = len(self.hidden_sizes)
layers = [
l for l in self.net if isinstance(l, MaskedLinear) or
isinstance(l, MaskedResidualBlock)
]
### Precedence of several params determining ordering:
#
# invoke_order
# orderings
# fixed_ordering
# natural_ordering
#
# from high precedence to low.
# For multi-order models, we associate RNG seeds with orderings as
# follows:
# orderings = [ o0, o1, o2, ... ]
# seeds = [ 0, 1, 2, ... ]
# This must be consistent across training & inference.
if invoke_order is not None:
# Inference path.
found = False
for i in range(len(self.orderings)):
if np.array_equal(self.orderings[i], invoke_order):
found = True
break
if not found:
print("WARNING: eval on order not trained on", invoke_order)
assert found, 'specified={}, avail={}'.format(
invoke_order, self.orderings)
# print('found, order i=', i)
if self.seed == (i + 1) % self.num_masks and np.array_equal(
self.m[-1], invoke_order):
# During querying, after a multi-order model is configured to
# take a specific ordering, it can be used to do multiple
# forward passes per query.
return
self.seed = i
rng = np.random.RandomState(self.seed)
self.m[-1] = np.asarray(invoke_order)
# print('looking up seed in cached masks:', self.seed)
if self.seed in self.cached_masks:
masks, direct_io_mask = self.cached_masks[self.seed]
assert len(layers) == len(masks), (len(layers), len(masks))
for l, m in zip(layers, masks):
l.set_cached_mask(m)
if self.do_direct_io_connections:
assert direct_io_mask is not None
self.direct_io_layer.set_cached_mask(direct_io_mask)
self.seed = (self.seed + 1) % self.num_masks
# print('found, updated seed to', self.seed)
return # Early return
curr_seed = self.seed
self.seed = (self.seed + 1) % self.num_masks
elif hasattr(self, 'orderings'):
# Training path: cycle through the special orderings.
rng = np.random.RandomState(self.seed)
assert 0 <= self.seed and self.seed < len(self.orderings)
self.m[-1] = self.orderings[self.seed]
if self.seed in self.cached_masks:
masks, direct_io_mask = self.cached_masks[self.seed]
assert len(layers) == len(masks), (len(layers), len(masks))
for l, m in zip(layers, masks):
l.set_cached_mask(m)
if self.do_direct_io_connections:
assert direct_io_mask is not None
self.direct_io_layer.set_cached_mask(direct_io_mask)
# print('using cached masks for seed', self.seed)
self.seed = (self.seed + 1) % self.num_masks
return # Early return
print('constructing masks with seed', self.seed, 'self.m[-1]',
self.m[-1])
curr_seed = self.seed
self.seed = (self.seed + 1) % self.num_masks
else:
# Train-time initial construction: either single-order, or
# .orderings has not been assigned yet.
rng = np.random.RandomState(self.seed)
self.seed = (self.seed + 1) % self.num_masks
self.m[-1] = np.arange(
self.nin) if self.natural_ordering else rng.permutation(
self.nin)
if self.fixed_ordering is not None:
self.m[-1] = np.asarray(self.fixed_ordering)
if self.nin > 1:
for l in range(L):
if self.residual_connections:
# sequential assignment for ResMade: https://arxiv.org/pdf/1904.05626.pdf
# FIXME: this seems incorrect since it's [1, ncols).
self.m[l] = np.array([
(k - 1) % (self.nin - 1)
# [(k - 1) % (self.nin - 1) + 1
for k in range(self.hidden_sizes[l])
])
else:
# Samples from [0, ncols - 1).
self.m[l] = rng.randint(self.m[l - 1].min(),
self.nin - 1,
size=self.hidden_sizes[l])
else:
# This should result in first layer's masks == 0.
# So output units are disconnected to any inputs.
for l in range(L):
self.m[l] = np.asarray([-1] * self.hidden_sizes[l])
# print('ordering', self.m[-1])
# print('self.m', self.m)
# construct the mask matrices
masks = [self.m[l - 1][:, None] <= self.m[l][None, :] for l in range(L)]
masks.append(self.m[L - 1][:, None] < self.m[-1][None, :])
if self.nout > self.nin:
# Last layer's mask needs to be changed.
if self.input_bins is None:
k = int(self.nout / self.nin)
# replicate the mask across the other outputs
# so [x1, x2, ..., xn], ..., [x1, x2, ..., xn].
masks[-1] = np.concatenate([masks[-1]] * k, axis=1)
else:
# [x1, ..., x1], ..., [xn, ..., xn] where the i-th list has
# input_bins[i - 1] many elements (multiplicity, # of classes).
mask = np.asarray([])
for k in range(masks[-1].shape[0]):
tmp_mask = []
for idx, x in enumerate(zip(masks[-1][k], self.input_bins)):
mval, nbins = x[0], self._get_output_encoded_dist_size(
x[1])
tmp_mask.extend([mval] * nbins)
tmp_mask = np.asarray(tmp_mask)
if k == 0:
mask = tmp_mask
else:
mask = np.vstack([mask, tmp_mask])
masks[-1] = mask
if self.input_encoding is not None:
# Input layer's mask should be changed.
assert self.input_bins is not None
# [nin, hidden].
mask0 = masks[0]
new_mask0 = []
for i, dist_size in enumerate(self.input_bins):
dist_size = self._get_input_encoded_dist_size(dist_size)
# [dist size, hidden]
new_mask0.append(
np.concatenate([mask0[i].reshape(1, -1)] * dist_size,
axis=0))
# [sum(dist size), hidden]
new_mask0 = np.vstack(new_mask0)
masks[0] = new_mask0
assert len(layers) == len(masks), (len(layers), len(masks))
for l, m in zip(layers, masks):
l.set_mask(m)
dio_mask = None
if self.do_direct_io_connections:
self._build_or_update_direct_io()
dio_mask = self.direct_io_layer.get_cached_mask()
# Cache.
if hasattr(self, 'orderings'):
print('caching masks for seed', curr_seed)
masks = [l.get_cached_mask() for l in layers]
print('signatures:', [m.sum() for m in masks]
+ [dio_mask.sum() if dio_mask is not None else 0])
assert curr_seed not in self.cached_masks
self.cached_masks[curr_seed] = (masks, dio_mask)
def name(self):
n = 'made'
if self.residual_connections:
n += '-resmade'
n += '-hidden' + '_'.join(str(h) for h in self.hidden_sizes)
n += '-emb' + str(self.embed_size)
if self.num_masks > 1:
n += '-{}masks'.format(self.num_masks)
if not self.natural_ordering:
n += '-nonNatural'
n += ('-no' if not self.do_direct_io_connections else '-') + 'directIo'
n += '-{}In{}Out'.format(self.input_encoding, self.output_encoding)
n += '-embsTied' if self.embs_tied else '-embsNotTied'
if self.input_no_emb_if_leq:
n += '-inputNoEmbIfLeq'
if self.dropout_p:
n += '-dropout'
if self.disable_learnable_unk:
n += '-nolearnableUnk'
else:
n += '-learnableUnk'
if self.fixed_dropout_p:
n += '-fixedDropout{:.2f}'.format(self.dropout_p)
return n
def get_unk(self, i):
if self.disable_learnable_unk:
return torch.zeros_like(self.unk_embeddings[i].detach())
else:
return self.unk_embeddings[i]
# @torch.jit.script
def Embed(self, data, natural_col=None, out=None):
if data is None:
if out is None:
return self.get_unk(natural_col)
out.copy_(self.get_unk(natural_col))
return out
bs = data.size()[0]
y_embed = []
data = data.long()
if natural_col is not None:
# Fast path only for inference. One col.
coli_dom_size = self.input_bins[natural_col]
# Embed?
if coli_dom_size > self.embed_size or not self.input_no_emb_if_leq:
res = self.embedding_networks[natural_col](data.view(-1,))
if out is not None:
out.copy_(res)
return out
return res
else:
if out is None:
out = torch.zeros(bs, coli_dom_size, device=data.device)
out.scatter_(1, data, 1)
return out
else:
if self.per_row_dropout_p == 1 or self.per_row_dropout_p is True:
row_dropout_probs = torch.rand(bs, device=data.device)
elif self.per_row_dropout_p == 2:
# Also per row masking, but makes more sense (draw num masked
# tokens first). In [0, 1).
row_dropout_probs = torch.randint(
0, self.nin, (bs,), device=data.device).float() / self.nin
row_dropout_lim = torch.rand(bs, device=data.device) * len(
self.input_bins)
for i, coli_dom_size in enumerate(self.input_bins):
# Wildcard column? use -1 as special token.
# Inference pass only (see estimators.py).
not_skip = data[:, i] >= 0
data_col = torch.clamp(data[:, i], 0)
# Embed?
if coli_dom_size > self.embed_size or not self.input_no_emb_if_leq:
# assert not self.dropout_p, "not implemented"
col_i_embs = self.embedding_networks[i](data_col)
if not self.dropout_p:
y_embed.append(col_i_embs)
else:
dropped_repr = self.get_unk(i)
# During training, non-dropped 1's are scaled by
# 1/(1-p), so we clamp back to 1.
def dropout_p():
if self.fixed_dropout_p:
return self.dropout_p
return 1. - np.random.randint(
1, self.nin + 1) * 1. / self.nin
batch_mask = torch.clamp(
torch.dropout(
torch.ones(bs, 1, device=data.device),
p=dropout_p(),
# np.random.randint(5, 12) * 1. / self.nin,
train=self.training),
0,
1)
if self.training and self.per_row_dropout_p:
# 1 means original repr, 0 means use masked repr.
batch_mask = (
torch.rand(bs, device=data.device) >=
row_dropout_probs).float().unsqueeze(1)
elif self.training and self.prefix_dropout:
batch_mask = (i * torch.ones(bs, device=data.device)
>
row_dropout_lim).float().unsqueeze(1)
elif not self.training:
batch_mask = not_skip.float().unsqueeze(1)
y_embed.append(batch_mask * col_i_embs +
(1. - batch_mask) * dropped_repr)
else:
y_onehot = torch.zeros(bs,
coli_dom_size,
device=data.device)
y_onehot.scatter_(1, data_col.view(-1, 1), 1)
if self.dropout_p:
dropped_repr = self.get_unk(i)
if self.factor_table and self.factor_table.columns[
i].factor_id:
pass # use prev col's batch mask
else:
# During training, non-dropped 1's are scaled by
# 1/(1-p), so we clamp back to 1.
def dropout_p():
if self.fixed_dropout_p:
return self.dropout_p
return 1. - np.random.randint(
1, self.nin + 1) * 1. / self.nin
batch_mask = torch.clamp(
torch.dropout(
torch.ones(bs, 1, device=data.device),
# p=self.dropout_p,
p=dropout_p(),
# np.random.randint(5, 12) * 1. / self.nin,
train=self.training),
0,
1)
if self.training and self.per_row_dropout_p:
# 1 means original repr, 0 means use masked repr.
batch_mask = (
torch.rand(bs, device=data.device) >=
row_dropout_probs).float().unsqueeze(1)
elif self.training and self.prefix_dropout:
batch_mask = (
i * torch.ones(bs, device=data.device) >
row_dropout_lim).float().unsqueeze(1)
elif not self.training:
batch_mask = not_skip.float().unsqueeze(1)
y_embed.append(batch_mask * y_onehot +
(1. - batch_mask) * dropped_repr)
else:
y_embed.append(y_onehot)
return torch.cat(y_embed, 1)
def ToOneHot(self, data):
assert not self.dropout_p, "not implemented"
bs = data.size()[0]
y_onehots = []
data = data.long()
for i, coli_dom_size in enumerate(self.input_bins):
if coli_dom_size <= 2:
y_onehots.append(data[:, i].view(-1, 1).float())
else:
y_onehot = torch.zeros(bs, coli_dom_size, device=data.device)
y_onehot.scatter_(1, data[:, i].view(-1, 1), 1)
y_onehots.append(y_onehot)
# [bs, sum(dist size)]
return torch.cat(y_onehots, 1)
def ToBinaryAsOneHot(self, data, threshold=0, natural_col=None, out=None):
if data is None:
if out is None:
return self.get_unk(natural_col)
out.copy_(self.get_unk(natural_col))
return out
bs = data.size()[0]
data = data.long()
# print('data.device', data.device)
if self.bin_as_onehot_shifts is None:
# This caching gives very sizable gains.
self.bin_as_onehot_shifts = [None] * self.nin
const_one = torch.ones([], dtype=torch.long, device=data.device)
for i, coli_dom_size in enumerate(self.input_bins):
# Max with 1 to guard against cols with 1 distinct val.
one_hot_dims = max(1, int(np.ceil(np.log2(coli_dom_size))))
self.bin_as_onehot_shifts[i] = const_one << torch.arange(
one_hot_dims, device=data.device)
# print('data.device', data.device, 'const_one', const_one.device,
# 'bin_as_onehot_shifts', self.bin_as_onehot_shifts[0].device)
if natural_col is None:
# Train path.
assert out is None
y_onehots = [None] * self.nin
if self.per_row_dropout_p == 1 or self.per_row_dropout_p is True:
row_dropout_probs = torch.rand(bs, device=data.device)
elif self.per_row_dropout_p == 2:
# Also per row masking, but makes more sense (draw num masked
# tokens first). In [0, 1).
row_dropout_probs = torch.randint(
0, self.nin, (bs,), device=data.device).float() / self.nin
row_dropout_lim = torch.rand(bs, device=data.device) * len(
self.input_bins)
for i, coli_dom_size in enumerate(self.input_bins):
if coli_dom_size > threshold:
# Bit shift in PyTorch + GPU is 27% faster than np.
# data_np = data[:, i].view(-1, 1)
data_np = data.narrow(1, i, 1)
# print(data_np.device, self.bin_as_onehot_shifts[i].device)
binaries = (data_np & self.bin_as_onehot_shifts[i]) > 0
y_onehots[i] = binaries
if self.dropout_p:
dropped_repr = self.get_unk(i)
# During training, non-dropped 1's are scaled by
# 1/(1-p), so we clamp back to 1.
def dropout_p():
if self.fixed_dropout_p:
return self.dropout_p
return 1. - np.random.randint(
1, self.nin + 1) * 1. / self.nin
batch_mask = torch.clamp(
torch.dropout(
torch.ones(bs, 1, device=data.device),
# p=self.dropout_p,
p=dropout_p(),
# np.random.randint(5, 12) * 1. / self.nin,
train=self.training),
0,
1) #.to(torch.int8, non_blocking=True, copy=False)
if self.training and self.per_row_dropout_p:
batch_mask = (
torch.rand(bs, device=data.device) >=
row_dropout_probs).float().unsqueeze(1)
elif self.training and self.prefix_dropout:
batch_mask = (i * torch.ones(bs, device=data.device)
>
row_dropout_lim).float().unsqueeze(1)
binaries = binaries.to(torch.float32,
non_blocking=True,
copy=False)
# print(batch_mask.dtype, binaries.dtype, dropped_repr.dtype)
# assert False
y_onehots[i] = batch_mask * binaries + (
1. - batch_mask) * dropped_repr
else:
# encode as plain one-hot
y_onehot = torch.zeros(bs,
coli_dom_size,
device=data.device)
y_onehot.scatter_(1, data[:, i].view(-1, 1), 1)
y_onehots[i] = y_onehot
# [bs, sum(log2(dist size))]
res = torch.cat(y_onehots, 1)
return res.to(torch.float32, non_blocking=True, copy=False)
else:
# Inference path.
natural_idx = natural_col
coli_dom_size = self.input_bins[natural_idx]
# skip = data is None #data[0, 0] < 0
# if skip:
# if out is None:
# return self.unk_embeddings[natural_idx]
# out.copy_(self.unk_embeddings[natural_idx])
# return out
if coli_dom_size > threshold:
# Bit shift in PyTorch + GPU is 27% faster than np.
# data_np = data[:, i].view(-1, 1)
data_np = data #.narrow(1, 0, 1)
# print(data_np.device, self.bin_as_onehot_shifts[i].device)
if out is None:
res = (data_np & self.bin_as_onehot_shifts[natural_idx]) > 0
return res.to(torch.float32, non_blocking=True, copy=False)
else:
out.copy_(
(data_np & self.bin_as_onehot_shifts[natural_idx]) > 0)
return out
else:
assert False, 'inference'
# encode as plain one-hot
if out is None:
y_onehot = torch.zeros(bs,
coli_dom_size,
device=data.device)
y_onehot.scatter_(
1,
data, #data[:, i].view(-1, 1),
1)
res = y_onehot
return res.to(torch.float32, non_blocking=True, copy=False)
out.scatter_(1, data, 1)
return out
def ToTwoLevel(self, data):
bs = data.size()[0]
y_onehots = []
data = data.long()
for i, coli_dom_size in enumerate(self.input_bins):
y_onehot = torch.zeros(bs, coli_dom_size, device=data.device)
y_onehot.scatter_(1, data[:, i].view(-1, 1), 1)
y_onehot = torch.dropout(y_onehot, p=0.3, train=self.training)
# add on one-hot encoding at coarser second-level
# e.g., for domain of 35, the 2nd level will have domain size of 4
second_level_dom_size = 1 + coli_dom_size // 10
y2_onehot = torch.zeros(bs,
second_level_dom_size,
device=data.device)
y2_onehot.scatter_(1, data[:, i].view(-1, 1) // 10, 1)
y_onehots.append(y_onehot)
y_onehots.append(y2_onehot)
# [bs, sum(dist size) + sum(2nd_level)]
return torch.cat(y_onehots, 1)
# @time_this
def EncodeInput(self, data, natural_col=None, out=None):
""""Warning: this could take up a significant portion of a forward pass.
Args:
natural_col: if specified, 'data' has shape [N, 1] corresponding to
col-'natural-col'. Otherwise 'data' corresponds to all cols.
out: if specified, assign results into this Tensor storage.
"""
if self.input_encoding == "binary":
# TODO: try out=out see if it helps dmv11
return self.ToBinaryAsOneHot(data, natural_col=natural_col, out=out)
elif self.input_encoding == "embed":
return self.Embed(data, natural_col=natural_col, out=out)
elif self.input_encoding is None:
return data
elif self.input_encoding == "one_hot":
return self.ToOneHot(data)
elif self.input_encoding == "two_level":
return self.ToTwoLevel(data)
elif self.input_encoding == "binary_100p":
return self.ToBinaryAsOneHot(data, threshold=100)
else:
assert False, self.input_encoding
# @torch.jit.script_method
# @torch.jit.ignore
def forward(self, x, skip_prefix=[]):
"""Calculates unnormalized logits.
If self.input_bins is not specified, the output units are ordered as:
[x1, x2, ..., xn], ..., [x1, x2, ..., xn].
So they can be reshaped as thus and passed to a cross entropy loss:
out.view(-1, model.nout // model.nin, model.nin)
Otherwise, they are ordered as:
[x1, ..., x1], ..., [xn, ..., xn]
And they can't be reshaped directly.
Args:
x: [bs, ncols].
"""
if skip_prefix:
assert len(skip_prefix) == x.shape[0], (len(skip_prefix), x.shape)
for i, n in enumerate(skip_prefix):
x[i][:n] = -1
x = self.EncodeInput(x)
if self.direct_io_layer is not None:
residual = self.direct_io_layer(x)
return self.net(x) + residual
return self.net(x)
# @time_this
# @torch.jit.export
def forward_with_encoded_input(self, x):
if self.direct_io_layer is not None:
residual = self.direct_io_layer(x)
return self.net(x) + residual
return self.net(x)
# FIXME: be careful about this.... if we add this inference on old MADE ckpts seems broken
def do_forward(self, x, ordering):
"""Performs forward pass, invoking a specified ordering."""
self.update_masks(invoke_order=ordering)
if self.direct_io_layer is not None:
residual = self.direct_io_layer(x)
return self.net(x) + residual
return self.net(x)
def logits_for_col(self, idx, logits):
"""Returns the logits (vector) corresponding to log p(x_i | x_(<i)).
Args:
idx: int, in natural (table) ordering.
logits: [batch size, hidden] where hidden can either be sum(dom
sizes), or emb_dims.
Returns:
logits_for_col: [batch size, domain size for column idx].
"""
assert self.input_bins is not None
if idx == 0:
# print('slicing out', self.logit_indices[0])
logits_for_var = logits[:, :self.logit_indices[0]]
else:
# print('slicing out', self.logit_indices[idx - 1], 'and', self.logit_indices[idx])
logits_for_var = logits[:, self.logit_indices[idx - 1]:self.
logit_indices[idx]]
if self.output_encoding != 'embed':
return logits_for_var
if self.embs_tied:
embed = self.embedding_networks[idx]
else:
# some ckpts do not tie weights....
embed = self.embedding_networks_out[idx]
if embed is None:
# Can be None for small domain size columns.
return logits_for_var
# Otherwise, dot with embedding matrix to get the true logits.
# [bs, emb] * [emb, dom size for idx]
return torch.matmul(
logits_for_var,
# embed.weight.t().to(torch.float32)
embed.weight.t())
# * torch.rsqrt(torch.tensor(self.embed_size, dtype=torch.float))
def HasMaterializedOutput(self, natural_idx):
return self.input_bins[natural_idx] < 1e6
def nll(self, logits, data):
"""Calculates -log p(data), given logits (the conditionals).
Args:
logits: [batch size, hidden] where hidden can either be sum(dom
sizes), or emb_dims.
data: [batch size, nin].
Returns:
nll: [batch size].
"""
if data.dtype != torch.long:
data = data.long()
nll = torch.zeros(logits.size()[0], device=logits.device)
for i in range(self.nin):
if self.HasMaterializedOutput(i):
logits_i = self.logits_for_col(i, logits)
nll += F.cross_entropy(logits_i, data[:, i], reduction='none')
else:
# assert False
# Discretized MoL.
mixture_params_i = self.logits_for_col
return nll
def sample(self, num=1, device=None):
assert self.natural_ordering
with torch.no_grad():
sampled = torch.zeros((num, self.nin), device=device)
if self.nout > self.nin:
if self.input_bins is None:
assert num == 1, 'implement me'
# Softmax on discrete classes.
for i in range(self.nin):
logits = self.forward(sampled)
l = logits[0].view(self.nout // self.nin, self.nin)
l = torch.softmax(l[:, i], 0)
sampled[0, i] = torch.multinomial(l, 1)
else:
indices = np.cumsum(self.input_bins)
for i in range(self.nin):
logits = self.forward(sampled)
if i > 0:
scores_for_i = logits[:, indices[i - 1]:indices[i]]
else:
scores_for_i = logits[:, :indices[0]]
s = torch.multinomial(torch.softmax(scores_for_i, -1),
1)
sampled[:, i] = s.view(-1,)
else:
assert num == 1, 'implement me'
# Binary variables.
for i in range(self.nin):
logits = self.forward(sampled)
p = torch.sigmoid(logits[0, i])
# Turn on the pixel with probability p.
sampled[0, i] = 1 if np.random.rand() < p else 0
return sampled
if __name__ == '__main__':
# Checks for the autoregressive property.
rng = np.random.RandomState(14)
# (nin, hiddens, nout, input_bins, direct_io)
configs_with_input_bins = [
# (4, [32, 512], 122 * 4, [122] * 4, False),
(2, [10], 2 + 5, [2, 5], False),
(2, [10, 30], 2 + 5, [2, 5], False),
(3, [6], 2 + 2 + 2, [2, 2, 2], False),
(3, [4, 4], 2 + 1 + 2, [2, 1, 2], False),
(4, [16, 8, 16], 2 + 3 + 1 + 2, [2, 3, 1, 2], False),
(2, [10], 2 + 5, [2, 5], True),
(2, [10, 30], 2 + 5, [2, 5], True),
(3, [6], 2 + 2 + 2, [2, 2, 2], True),
(3, [4, 4], 2 + 1 + 2, [2, 1, 2], True),
(4, [16, 8, 16], 2 + 3 + 1 + 2, [2, 3, 1, 2], True),
]
for nin, hiddens, nout, input_bins, direct_io in configs_with_input_bins:
print(nin, hiddens, nout, input_bins, direct_io, '...', end='')
model = MADE(nin,
hiddens,
nout,
input_bins=input_bins,
natural_ordering=True,
do_direct_io_connections=direct_io)
model.eval()
print(model)
for k in range(nout):
inp = torch.tensor(rng.rand(1, nin).astype(np.float32),
requires_grad=True)
loss = model(inp)
l = loss[0, k]
l.backward()
depends = (inp.grad[0].numpy() != 0).astype(
np.uint8) # is there a gradient on the input for this k
depends_ix = np.where(depends)[0].astype(np.int32) #indexes where
var_idx = np.argmax(k < np.cumsum(input_bins))
prev_idxs = np.arange(var_idx).astype(np.int32)
# Asserts that k depends only on < var_idx.
print('depends', depends_ix, 'prev_idxs', prev_idxs)
assert len(torch.nonzero(inp.grad[0, var_idx:])) == 0
print('ok')
| 41.646206
| 98
| 0.513234
|
b02b8a84c5c9c793a224678fd7450679afcf7aff
| 55,132
|
py
|
Python
|
scipy/stats/tests/test_hypotests.py
|
khavernathy/scipy
|
f09a01721a3859240a8b69f42df8a45508da86d7
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/stats/tests/test_hypotests.py
|
khavernathy/scipy
|
f09a01721a3859240a8b69f42df8a45508da86d7
|
[
"BSD-3-Clause"
] | 2
|
2015-01-06T19:51:42.000Z
|
2015-12-04T21:54:44.000Z
|
scipy/stats/tests/test_hypotests.py
|
khavernathy/scipy
|
f09a01721a3859240a8b69f42df8a45508da86d7
|
[
"BSD-3-Clause"
] | 1
|
2021-12-12T12:01:36.000Z
|
2021-12-12T12:01:36.000Z
|
from __future__ import division, print_function, absolute_import
from itertools import product
import numpy as np
import pytest
from numpy.testing import (assert_, assert_equal, assert_allclose,
assert_almost_equal) # avoid new uses
from pytest import raises as assert_raises
import scipy.stats as stats
from scipy.stats import distributions
from scipy.stats._hypotests import (epps_singleton_2samp, cramervonmises,
_cdf_cvm, cramervonmises_2samp,
_pval_cvm_2samp_exact, barnard_exact,
boschloo_exact)
from scipy.stats._mannwhitneyu import mannwhitneyu, _mwu_state
from .common_tests import check_named_results
class TestEppsSingleton:
def test_statistic_1(self):
# first example in Goerg & Kaiser, also in original paper of
# Epps & Singleton. Note: values do not match exactly, the
# value of the interquartile range varies depending on how
# quantiles are computed
x = np.array([-0.35, 2.55, 1.73, 0.73, 0.35,
2.69, 0.46, -0.94, -0.37, 12.07])
y = np.array([-1.15, -0.15, 2.48, 3.25, 3.71,
4.29, 5.00, 7.74, 8.38, 8.60])
w, p = epps_singleton_2samp(x, y)
assert_almost_equal(w, 15.14, decimal=1)
assert_almost_equal(p, 0.00442, decimal=3)
def test_statistic_2(self):
# second example in Goerg & Kaiser, again not a perfect match
x = np.array((0, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 5, 5, 5, 5, 6, 10,
10, 10, 10))
y = np.array((10, 4, 0, 5, 10, 10, 0, 5, 6, 7, 10, 3, 1, 7, 0, 8, 1,
5, 8, 10))
w, p = epps_singleton_2samp(x, y)
assert_allclose(w, 8.900, atol=0.001)
assert_almost_equal(p, 0.06364, decimal=3)
def test_epps_singleton_array_like(self):
np.random.seed(1234)
x, y = np.arange(30), np.arange(28)
w1, p1 = epps_singleton_2samp(list(x), list(y))
w2, p2 = epps_singleton_2samp(tuple(x), tuple(y))
w3, p3 = epps_singleton_2samp(x, y)
assert_(w1 == w2 == w3)
assert_(p1 == p2 == p3)
def test_epps_singleton_size(self):
# raise error if less than 5 elements
x, y = (1, 2, 3, 4), np.arange(10)
assert_raises(ValueError, epps_singleton_2samp, x, y)
def test_epps_singleton_nonfinite(self):
# raise error if there are non-finite values
x, y = (1, 2, 3, 4, 5, np.inf), np.arange(10)
assert_raises(ValueError, epps_singleton_2samp, x, y)
x, y = np.arange(10), (1, 2, 3, 4, 5, np.nan)
assert_raises(ValueError, epps_singleton_2samp, x, y)
def test_epps_singleton_1d_input(self):
x = np.arange(100).reshape(-1, 1)
assert_raises(ValueError, epps_singleton_2samp, x, x)
def test_names(self):
x, y = np.arange(20), np.arange(30)
res = epps_singleton_2samp(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
class TestCvm:
# the expected values of the cdfs are taken from Table 1 in
# Csorgo / Faraway: The Exact and Asymptotic Distribution of
# Cramér-von Mises Statistics, 1996.
def test_cdf_4(self):
assert_allclose(
_cdf_cvm([0.02983, 0.04111, 0.12331, 0.94251], 4),
[0.01, 0.05, 0.5, 0.999],
atol=1e-4)
def test_cdf_10(self):
assert_allclose(
_cdf_cvm([0.02657, 0.03830, 0.12068, 0.56643], 10),
[0.01, 0.05, 0.5, 0.975],
atol=1e-4)
def test_cdf_1000(self):
assert_allclose(
_cdf_cvm([0.02481, 0.03658, 0.11889, 1.16120], 1000),
[0.01, 0.05, 0.5, 0.999],
atol=1e-4)
def test_cdf_inf(self):
assert_allclose(
_cdf_cvm([0.02480, 0.03656, 0.11888, 1.16204]),
[0.01, 0.05, 0.5, 0.999],
atol=1e-4)
def test_cdf_support(self):
# cdf has support on [1/(12*n), n/3]
assert_equal(_cdf_cvm([1/(12*533), 533/3], 533), [0, 1])
assert_equal(_cdf_cvm([1/(12*(27 + 1)), (27 + 1)/3], 27), [0, 1])
def test_cdf_large_n(self):
# test that asymptotic cdf and cdf for large samples are close
assert_allclose(
_cdf_cvm([0.02480, 0.03656, 0.11888, 1.16204, 100], 10000),
_cdf_cvm([0.02480, 0.03656, 0.11888, 1.16204, 100]),
atol=1e-4)
def test_large_x(self):
# for large values of x and n, the series used to compute the cdf
# converges slowly.
# this leads to bug in R package goftest and MAPLE code that is
# the basis of the implemenation in scipy
# note: cdf = 1 for x >= 1000/3 and n = 1000
assert_(0.99999 < _cdf_cvm(333.3, 1000) < 1.0)
assert_(0.99999 < _cdf_cvm(333.3) < 1.0)
def test_low_p(self):
# _cdf_cvm can return values larger than 1. In that case, we just
# return a p-value of zero.
n = 12
res = cramervonmises(np.ones(n)*0.8, 'norm')
assert_(_cdf_cvm(res.statistic, n) > 1.0)
assert_equal(res.pvalue, 0)
def test_invalid_input(self):
x = np.arange(10).reshape((2, 5))
assert_raises(ValueError, cramervonmises, x, "norm")
assert_raises(ValueError, cramervonmises, [1.5], "norm")
assert_raises(ValueError, cramervonmises, (), "norm")
def test_values_R(self):
# compared against R package goftest, version 1.1.1
# goftest::cvm.test(c(-1.7, 2, 0, 1.3, 4, 0.1, 0.6), "pnorm")
res = cramervonmises([-1.7, 2, 0, 1.3, 4, 0.1, 0.6], "norm")
assert_allclose(res.statistic, 0.288156, atol=1e-6)
assert_allclose(res.pvalue, 0.1453465, atol=1e-6)
# goftest::cvm.test(c(-1.7, 2, 0, 1.3, 4, 0.1, 0.6),
# "pnorm", mean = 3, sd = 1.5)
res = cramervonmises([-1.7, 2, 0, 1.3, 4, 0.1, 0.6], "norm", (3, 1.5))
assert_allclose(res.statistic, 0.9426685, atol=1e-6)
assert_allclose(res.pvalue, 0.002026417, atol=1e-6)
# goftest::cvm.test(c(1, 2, 5, 1.4, 0.14, 11, 13, 0.9, 7.5), "pexp")
res = cramervonmises([1, 2, 5, 1.4, 0.14, 11, 13, 0.9, 7.5], "expon")
assert_allclose(res.statistic, 0.8421854, atol=1e-6)
assert_allclose(res.pvalue, 0.004433406, atol=1e-6)
def test_callable_cdf(self):
x, args = np.arange(5), (1.4, 0.7)
r1 = cramervonmises(x, distributions.expon.cdf)
r2 = cramervonmises(x, "expon")
assert_equal((r1.statistic, r1.pvalue), (r2.statistic, r2.pvalue))
r1 = cramervonmises(x, distributions.beta.cdf, args)
r2 = cramervonmises(x, "beta", args)
assert_equal((r1.statistic, r1.pvalue), (r2.statistic, r2.pvalue))
class TestMannWhitneyU:
# All magic numbers are from R wilcox.test unless otherwise specied
# https://rdrr.io/r/stats/wilcox.test.html
# --- Test Input Validation ---
def test_input_validation(self):
x = np.array([1, 2]) # generic, valid inputs
y = np.array([3, 4])
with assert_raises(ValueError, match="`x` and `y` must be of nonzero"):
mannwhitneyu([], y)
with assert_raises(ValueError, match="`x` and `y` must be of nonzero"):
mannwhitneyu(x, [])
with assert_raises(ValueError, match="`x` and `y` must not contain"):
mannwhitneyu([np.nan, 2], y)
with assert_raises(ValueError, match="`use_continuity` must be one"):
mannwhitneyu(x, y, use_continuity='ekki')
with assert_raises(ValueError, match="`alternative` must be one of"):
mannwhitneyu(x, y, alternative='ekki')
with assert_raises(ValueError, match="`axis` must be an integer"):
mannwhitneyu(x, y, axis=1.5)
with assert_raises(ValueError, match="`method` must be one of"):
mannwhitneyu(x, y, method='ekki')
def test_auto(self):
# Test that default method ('auto') chooses intended method
np.random.seed(1)
n = 8 # threshold to switch from exact to asymptotic
# both inputs are smaller than threshold; should use exact
x = np.random.rand(n-1)
y = np.random.rand(n-1)
auto = mannwhitneyu(x, y)
asymptotic = mannwhitneyu(x, y, method='asymptotic')
exact = mannwhitneyu(x, y, method='exact')
assert auto.pvalue == exact.pvalue
assert auto.pvalue != asymptotic.pvalue
# one input is smaller than threshold; should use exact
x = np.random.rand(n-1)
y = np.random.rand(n+1)
auto = mannwhitneyu(x, y)
asymptotic = mannwhitneyu(x, y, method='asymptotic')
exact = mannwhitneyu(x, y, method='exact')
assert auto.pvalue == exact.pvalue
assert auto.pvalue != asymptotic.pvalue
# other input is smaller than threshold; should use exact
auto = mannwhitneyu(y, x)
asymptotic = mannwhitneyu(x, y, method='asymptotic')
exact = mannwhitneyu(x, y, method='exact')
assert auto.pvalue == exact.pvalue
assert auto.pvalue != asymptotic.pvalue
# both inputs are larger than threshold; should use asymptotic
x = np.random.rand(n+1)
y = np.random.rand(n+1)
auto = mannwhitneyu(x, y)
asymptotic = mannwhitneyu(x, y, method='asymptotic')
exact = mannwhitneyu(x, y, method='exact')
assert auto.pvalue != exact.pvalue
assert auto.pvalue == asymptotic.pvalue
# both inputs are smaller than threshold, but there is a tie
# should use asymptotic
x = np.random.rand(n-1)
y = np.random.rand(n-1)
y[3] = x[3]
auto = mannwhitneyu(x, y)
asymptotic = mannwhitneyu(x, y, method='asymptotic')
exact = mannwhitneyu(x, y, method='exact')
assert auto.pvalue != exact.pvalue
assert auto.pvalue == asymptotic.pvalue
# --- Test Basic Functionality ---
x = [210.052110, 110.190630, 307.918612]
y = [436.08811482466416, 416.37397329768191, 179.96975939463582,
197.8118754228619, 34.038757281225756, 138.54220550921517,
128.7769351470246, 265.92721427951852, 275.6617533155341,
592.34083395416258, 448.73177590617018, 300.61495185038905,
187.97508449019588]
# This test was written for mann_whitney_u in gh-4933.
# Originally, the p-values for alternatives were swapped;
# this has been corrected and the tests have been refactored for
# compactness, but otherwise the tests are unchanged.
# R code for comparison, e.g.:
# options(digits = 16)
# x = c(210.052110, 110.190630, 307.918612)
# y = c(436.08811482466416, 416.37397329768191, 179.96975939463582,
# 197.8118754228619, 34.038757281225756, 138.54220550921517,
# 128.7769351470246, 265.92721427951852, 275.6617533155341,
# 592.34083395416258, 448.73177590617018, 300.61495185038905,
# 187.97508449019588)
# wilcox.test(x, y, alternative="g", exact=TRUE)
cases_basic = [[{"alternative": 'two-sided', "method": "asymptotic"},
(16, 0.6865041817876)],
[{"alternative": 'less', "method": "asymptotic"},
(16, 0.3432520908938)],
[{"alternative": 'greater', "method": "asymptotic"},
(16, 0.7047591913255)],
[{"alternative": 'two-sided', "method": "exact"},
(16, 0.7035714285714)],
[{"alternative": 'less', "method": "exact"},
(16, 0.3517857142857)],
[{"alternative": 'greater', "method": "exact"},
(16, 0.6946428571429)]]
@pytest.mark.parametrize(("kwds", "expected"), cases_basic)
def test_basic(self, kwds, expected):
res = mannwhitneyu(self.x, self.y, **kwds)
assert_allclose(res, expected)
cases_continuity = [[{"alternative": 'two-sided', "use_continuity": True},
(23, 0.6865041817876)],
[{"alternative": 'less', "use_continuity": True},
(23, 0.7047591913255)],
[{"alternative": 'greater', "use_continuity": True},
(23, 0.3432520908938)],
[{"alternative": 'two-sided', "use_continuity": False},
(23, 0.6377328900502)],
[{"alternative": 'less', "use_continuity": False},
(23, 0.6811335549749)],
[{"alternative": 'greater', "use_continuity": False},
(23, 0.3188664450251)]]
@pytest.mark.parametrize(("kwds", "expected"), cases_continuity)
def test_continuity(self, kwds, expected):
# When x and y are interchanged, less and greater p-values should
# swap (compare to above). This wouldn't happen if the continuity
# correction were applied in the wrong direction. Note that less and
# greater p-values do not sum to 1 when continuity correction is on,
# which is what we'd expect. Also check that results match R when
# continuity correction is turned off.
# Note that method='asymptotic' -> exact=FALSE
# and use_continuity=False -> correct=FALSE, e.g.:
# wilcox.test(x, y, alternative="t", exact=FALSE, correct=FALSE)
res = mannwhitneyu(self.y, self.x, method='asymptotic', **kwds)
assert_allclose(res, expected)
def test_tie_correct(self):
# Test tie correction against R's wilcox.test
# options(digits = 16)
# x = c(1, 2, 3, 4)
# y = c(1, 2, 3, 4, 5)
# wilcox.test(x, y, exact=FALSE)
x = [1, 2, 3, 4]
y0 = np.array([1, 2, 3, 4, 5])
dy = np.array([0, 1, 0, 1, 0])*0.01
dy2 = np.array([0, 0, 1, 0, 0])*0.01
y = [y0-0.01, y0-dy, y0-dy2, y0, y0+dy2, y0+dy, y0+0.01]
res = mannwhitneyu(x, y, axis=-1, method="asymptotic")
U_expected = [10, 9, 8.5, 8, 7.5, 7, 6]
p_expected = [1, 0.9017048037317, 0.804080657472, 0.7086240584439,
0.6197963884941, 0.5368784563079, 0.3912672792826]
assert_equal(res.statistic, U_expected)
assert_allclose(res.pvalue, p_expected)
# --- Test Exact Distribution of U ---
# These are tabulated values of the CDF of the exact distribution of
# the test statistic from pg 52 of reference [1] (Mann-Whitney Original)
pn3 = {1: [0.25, 0.5, 0.75], 2: [0.1, 0.2, 0.4, 0.6],
3: [0.05, .1, 0.2, 0.35, 0.5, 0.65]}
pn4 = {1: [0.2, 0.4, 0.6], 2: [0.067, 0.133, 0.267, 0.4, 0.6],
3: [0.028, 0.057, 0.114, 0.2, .314, 0.429, 0.571],
4: [0.014, 0.029, 0.057, 0.1, 0.171, 0.243, 0.343, 0.443, 0.557]}
pm5 = {1: [0.167, 0.333, 0.5, 0.667],
2: [0.047, 0.095, 0.19, 0.286, 0.429, 0.571],
3: [0.018, 0.036, 0.071, 0.125, 0.196, 0.286, 0.393, 0.5, 0.607],
4: [0.008, 0.016, 0.032, 0.056, 0.095, 0.143,
0.206, 0.278, 0.365, 0.452, 0.548],
5: [0.004, 0.008, 0.016, 0.028, 0.048, 0.075, 0.111,
0.155, 0.21, 0.274, 0.345, .421, 0.5, 0.579]}
pm6 = {1: [0.143, 0.286, 0.428, 0.571],
2: [0.036, 0.071, 0.143, 0.214, 0.321, 0.429, 0.571],
3: [0.012, 0.024, 0.048, 0.083, 0.131,
0.19, 0.274, 0.357, 0.452, 0.548],
4: [0.005, 0.01, 0.019, 0.033, 0.057, 0.086, 0.129,
0.176, 0.238, 0.305, 0.381, 0.457, 0.543], # the last element
# of the previous list, 0.543, has been modified from 0.545;
# I assume it was a typo
5: [0.002, 0.004, 0.009, 0.015, 0.026, 0.041, 0.063, 0.089,
0.123, 0.165, 0.214, 0.268, 0.331, 0.396, 0.465, 0.535],
6: [0.001, 0.002, 0.004, 0.008, 0.013, 0.021, 0.032, 0.047,
0.066, 0.09, 0.12, 0.155, 0.197, 0.242, 0.294, 0.350,
0.409, 0.469, 0.531]}
def test_exact_distribution(self):
# I considered parametrize. I decided against it.
p_tables = {3: self.pn3, 4: self.pn4, 5: self.pm5, 6: self.pm6}
for n, table in p_tables.items():
for m, p in table.items():
# check p-value against table
u = np.arange(0, len(p))
assert_allclose(_mwu_state.cdf(k=u, m=m, n=n), p, atol=1e-3)
# check identity CDF + SF - PMF = 1
# ( In this implementation, SF(U) includes PMF(U) )
u2 = np.arange(0, m*n+1)
assert_allclose(_mwu_state.cdf(k=u2, m=m, n=n)
+ _mwu_state.sf(k=u2, m=m, n=n)
- _mwu_state.pmf(k=u2, m=m, n=n), 1)
# check symmetry about mean of U, i.e. pmf(U) = pmf(m*n-U)
pmf = _mwu_state.pmf(k=u2, m=m, n=n)
assert_allclose(pmf, pmf[::-1])
# check symmetry w.r.t. interchange of m, n
pmf2 = _mwu_state.pmf(k=u2, m=n, n=m)
assert_allclose(pmf, pmf2)
def test_asymptotic_behavior(self):
np.random.seed(0)
# for small samples, the asymptotic test is not very accurate
x = np.random.rand(5)
y = np.random.rand(5)
res1 = mannwhitneyu(x, y, method="exact")
res2 = mannwhitneyu(x, y, method="asymptotic")
assert res1.statistic == res2.statistic
assert np.abs(res1.pvalue - res2.pvalue) > 1e-2
# for large samples, they agree reasonably well
x = np.random.rand(40)
y = np.random.rand(40)
res1 = mannwhitneyu(x, y, method="exact")
res2 = mannwhitneyu(x, y, method="asymptotic")
assert res1.statistic == res2.statistic
assert np.abs(res1.pvalue - res2.pvalue) < 1e-3
# --- Test Corner Cases ---
def test_exact_U_equals_mean(self):
# Test U == m*n/2 with exact method
# Without special treatment, two-sided p-value > 1 because both
# one-sided p-values are > 0.5
res_l = mannwhitneyu([1, 2, 3], [1.5, 2.5], alternative="less",
method="exact")
res_g = mannwhitneyu([1, 2, 3], [1.5, 2.5], alternative="greater",
method="exact")
assert_equal(res_l.pvalue, res_g.pvalue)
assert res_l.pvalue > 0.5
res = mannwhitneyu([1, 2, 3], [1.5, 2.5], alternative="two-sided",
method="exact")
assert_equal(res, (3, 1))
# U == m*n/2 for asymptotic case tested in test_gh_2118
# The reason it's tricky for the asymptotic test has to do with
# continuity correction.
cases_scalar = [[{"alternative": 'two-sided', "method": "asymptotic"},
(0, 1)],
[{"alternative": 'less', "method": "asymptotic"},
(0, 0.5)],
[{"alternative": 'greater', "method": "asymptotic"},
(0, 0.977249868052)],
[{"alternative": 'two-sided', "method": "exact"}, (0, 1)],
[{"alternative": 'less', "method": "exact"}, (0, 0.5)],
[{"alternative": 'greater', "method": "exact"}, (0, 1)]]
@pytest.mark.parametrize(("kwds", "result"), cases_scalar)
def test_scalar_data(self, kwds, result):
# just making sure scalars work
assert_allclose(mannwhitneyu(1, 2, **kwds), result)
def test_equal_scalar_data(self):
# when two scalars are equal, there is an -0.5/0 in the asymptotic
# approximation. R gives pvalue=1.0 for alternatives 'less' and
# 'greater' but NA for 'two-sided'. I don't see why, so I don't
# see a need for a special case to match that behavior.
assert_equal(mannwhitneyu(1, 1, method="exact"), (0.5, 1))
assert_equal(mannwhitneyu(1, 1, method="asymptotic"), (0.5, 1))
# without continuity correction, this becomes 0/0, which really
# is undefined
assert_equal(mannwhitneyu(1, 1, method="asymptotic",
use_continuity=False), (0.5, np.nan))
# --- Test Enhancements / Bug Reports ---
@pytest.mark.parametrize("method", ["asymptotic", "exact"])
def test_gh_12837_11113(self, method):
# Test that behavior for broadcastable nd arrays is appropriate:
# output shape is correct and all values are equal to when the test
# is performed on one pair of samples at a time.
# Tests that gh-12837 and gh-11113 (requests for n-d input)
# are resolved
np.random.seed(0)
# arrays are broadcastable except for axis = -3
axis = -3
m, n = 7, 10 # sample sizes
x = np.random.rand(m, 3, 8)
y = np.random.rand(6, n, 1, 8) + 0.1
res = mannwhitneyu(x, y, method=method, axis=axis)
shape = (6, 3, 8) # appropriate shape of outputs, given inputs
assert(res.pvalue.shape == shape)
assert(res.statistic.shape == shape)
# move axis of test to end for simplicity
x, y = np.moveaxis(x, axis, -1), np.moveaxis(y, axis, -1)
x = x[None, ...] # give x a zeroth dimension
assert(x.ndim == y.ndim)
x = np.broadcast_to(x, shape + (m,))
y = np.broadcast_to(y, shape + (n,))
assert(x.shape[:-1] == shape)
assert(y.shape[:-1] == shape)
# loop over pairs of samples
statistics = np.zeros(shape)
pvalues = np.zeros(shape)
for indices in product(*[range(i) for i in shape]):
xi = x[indices]
yi = y[indices]
temp = mannwhitneyu(xi, yi, method=method)
statistics[indices] = temp.statistic
pvalues[indices] = temp.pvalue
np.testing.assert_equal(res.pvalue, pvalues)
np.testing.assert_equal(res.statistic, statistics)
def test_gh_11355(self):
# Test for correct behavior with NaN/Inf in input
x = [1, 2, 3, 4]
y = [3, 6, 7, 8, 9, 3, 2, 1, 4, 4, 5]
res1 = mannwhitneyu(x, y)
# Inf is not a problem. This is a rank test, and it's the largest value
y[4] = np.inf
res2 = mannwhitneyu(x, y)
assert_equal(res1.statistic, res2.statistic)
assert_equal(res1.pvalue, res2.pvalue)
# NaNs should raise an error. No nan_policy for now.
y[4] = np.nan
with assert_raises(ValueError, match="`x` and `y` must not contain"):
mannwhitneyu(x, y)
cases_11355 = [([1, 2, 3, 4],
[3, 6, 7, 8, np.inf, 3, 2, 1, 4, 4, 5],
10, 0.1297704873477),
([1, 2, 3, 4],
[3, 6, 7, 8, np.inf, np.inf, 2, 1, 4, 4, 5],
8.5, 0.08735617507695),
([1, 2, np.inf, 4],
[3, 6, 7, 8, np.inf, 3, 2, 1, 4, 4, 5],
17.5, 0.5988856695752),
([1, 2, np.inf, 4],
[3, 6, 7, 8, np.inf, np.inf, 2, 1, 4, 4, 5],
16, 0.4687165824462),
([1, np.inf, np.inf, 4],
[3, 6, 7, 8, np.inf, np.inf, 2, 1, 4, 4, 5],
24.5, 0.7912517950119)]
@pytest.mark.parametrize(("x", "y", "statistic", "pvalue"), cases_11355)
def test_gh_11355b(self, x, y, statistic, pvalue):
# Test for correct behavior with NaN/Inf in input
res = mannwhitneyu(x, y, method='asymptotic')
assert_allclose(res.statistic, statistic, atol=1e-12)
assert_allclose(res.pvalue, pvalue, atol=1e-12)
cases_9184 = [[True, "less", "asymptotic", 0.900775348204],
[True, "greater", "asymptotic", 0.1223118025635],
[True, "two-sided", "asymptotic", 0.244623605127],
[False, "less", "asymptotic", 0.8896643190401],
[False, "greater", "asymptotic", 0.1103356809599],
[False, "two-sided", "asymptotic", 0.2206713619198],
[True, "less", "exact", 0.8967698967699],
[True, "greater", "exact", 0.1272061272061],
[True, "two-sided", "exact", 0.2544122544123]]
@pytest.mark.parametrize(("use_continuity", "alternative",
"method", "pvalue_exp"), cases_9184)
def test_gh_9184(self, use_continuity, alternative, method, pvalue_exp):
# gh-9184 might be considered a doc-only bug. Please see the
# documentation to confirm that mannwhitneyu correctly notes
# that the output statistic is that of the first sample (x). In any
# case, check the case provided there against output from R.
# R code:
# options(digits=16)
# x <- c(0.80, 0.83, 1.89, 1.04, 1.45, 1.38, 1.91, 1.64, 0.73, 1.46)
# y <- c(1.15, 0.88, 0.90, 0.74, 1.21)
# wilcox.test(x, y, alternative = "less", exact = FALSE)
# wilcox.test(x, y, alternative = "greater", exact = FALSE)
# wilcox.test(x, y, alternative = "two.sided", exact = FALSE)
# wilcox.test(x, y, alternative = "less", exact = FALSE,
# correct=FALSE)
# wilcox.test(x, y, alternative = "greater", exact = FALSE,
# correct=FALSE)
# wilcox.test(x, y, alternative = "two.sided", exact = FALSE,
# correct=FALSE)
# wilcox.test(x, y, alternative = "less", exact = TRUE)
# wilcox.test(x, y, alternative = "greater", exact = TRUE)
# wilcox.test(x, y, alternative = "two.sided", exact = TRUE)
statistic_exp = 35
x = (0.80, 0.83, 1.89, 1.04, 1.45, 1.38, 1.91, 1.64, 0.73, 1.46)
y = (1.15, 0.88, 0.90, 0.74, 1.21)
res = mannwhitneyu(x, y, use_continuity=use_continuity,
alternative=alternative, method=method)
assert_equal(res.statistic, statistic_exp)
assert_allclose(res.pvalue, pvalue_exp)
def test_gh_6897(self):
# Test for correct behavior with empty input
with assert_raises(ValueError, match="`x` and `y` must be of nonzero"):
mannwhitneyu([], [])
def test_gh_4067(self):
# Test for correct behavior with all NaN input
a = np.array([np.nan, np.nan, np.nan, np.nan, np.nan])
b = np.array([np.nan, np.nan, np.nan, np.nan, np.nan])
with assert_raises(ValueError, match="`x` and `y` must not contain"):
mannwhitneyu(a, b)
# All cases checked against R wilcox.test, e.g.
# options(digits=16)
# x = c(1, 2, 3)
# y = c(1.5, 2.5)
# wilcox.test(x, y, exact=FALSE, alternative='less')
cases_2118 = [[[1, 2, 3], [1.5, 2.5], "greater", (3, 0.6135850036578)],
[[1, 2, 3], [1.5, 2.5], "less", (3, 0.6135850036578)],
[[1, 2, 3], [1.5, 2.5], "two-sided", (3, 1.0)],
[[1, 2, 3], [2], "greater", (1.5, 0.681324055883)],
[[1, 2, 3], [2], "less", (1.5, 0.681324055883)],
[[1, 2, 3], [2], "two-sided", (1.5, 1)],
[[1, 2], [1, 2], "greater", (2, 0.667497228949)],
[[1, 2], [1, 2], "less", (2, 0.667497228949)],
[[1, 2], [1, 2], "two-sided", (2, 1)]]
@pytest.mark.parametrize(["x", "y", "alternative", "expected"], cases_2118)
def test_gh_2118(self, x, y, alternative, expected):
# test cases in which U == m*n/2 when method is asymptotic
# applying continuity correction could result in p-value > 1
res = mannwhitneyu(x, y, use_continuity=True, alternative=alternative,
method="asymptotic")
assert_allclose(res, expected, rtol=1e-12)
class TestSomersD:
def test_like_kendalltau(self):
# All tests correspond with one in test_stats.py `test_kendalltau`
# case without ties, con-dis equal zero
x = [5, 2, 1, 3, 6, 4, 7, 8]
y = [5, 2, 6, 3, 1, 8, 7, 4]
# Cross-check with result from SAS FREQ:
expected = (0.000000000000000, 1.000000000000000)
res = stats.somersd(x, y)
assert_allclose(res.statistic, expected[0], atol=1e-15)
assert_allclose(res.pvalue, expected[1], atol=1e-15)
# case without ties, con-dis equal zero
x = [0, 5, 2, 1, 3, 6, 4, 7, 8]
y = [5, 2, 0, 6, 3, 1, 8, 7, 4]
# Cross-check with result from SAS FREQ:
expected = (0.000000000000000, 1.000000000000000)
res = stats.somersd(x, y)
assert_allclose(res.statistic, expected[0], atol=1e-15)
assert_allclose(res.pvalue, expected[1], atol=1e-15)
# case without ties, con-dis close to zero
x = [5, 2, 1, 3, 6, 4, 7]
y = [5, 2, 6, 3, 1, 7, 4]
# Cross-check with result from SAS FREQ:
expected = (-0.142857142857140, 0.630326953157670)
res = stats.somersd(x, y)
assert_allclose(res.statistic, expected[0], atol=1e-15)
assert_allclose(res.pvalue, expected[1], atol=1e-15)
# simple case without ties
x = np.arange(10)
y = np.arange(10)
# Cross-check with result from SAS FREQ:
# SAS p value is not provided.
expected = (1.000000000000000, 0)
res = stats.somersd(x, y)
assert_allclose(res.statistic, expected[0], atol=1e-15)
assert_allclose(res.pvalue, expected[1], atol=1e-15)
# swap a couple values and a couple more
x = np.arange(10)
y = np.array([0, 2, 1, 3, 4, 6, 5, 7, 8, 9])
# Cross-check with result from SAS FREQ:
expected = (0.911111111111110, 0.000000000000000)
res = stats.somersd(x, y)
assert_allclose(res.statistic, expected[0], atol=1e-15)
assert_allclose(res.pvalue, expected[1], atol=1e-15)
# same in opposite direction
x = np.arange(10)
y = np.arange(10)[::-1]
# Cross-check with result from SAS FREQ:
# SAS p value is not provided.
expected = (-1.000000000000000, 0)
res = stats.somersd(x, y)
assert_allclose(res.statistic, expected[0], atol=1e-15)
assert_allclose(res.pvalue, expected[1], atol=1e-15)
# swap a couple values and a couple more
x = np.arange(10)
y = np.array([9, 7, 8, 6, 5, 3, 4, 2, 1, 0])
# Cross-check with result from SAS FREQ:
expected = (-0.9111111111111111, 0.000000000000000)
res = stats.somersd(x, y)
assert_allclose(res.statistic, expected[0], atol=1e-15)
assert_allclose(res.pvalue, expected[1], atol=1e-15)
# with some ties
x1 = [12, 2, 1, 12, 2]
x2 = [1, 4, 7, 1, 0]
# Cross-check with result from SAS FREQ:
expected = (-0.500000000000000, 0.304901788178780)
res = stats.somersd(x1, x2)
assert_allclose(res.statistic, expected[0], atol=1e-15)
assert_allclose(res.pvalue, expected[1], atol=1e-15)
# with only ties in one or both inputs
# SAS will not produce an output for these:
# NOTE: No statistics are computed for x * y because x has fewer
# than 2 nonmissing levels.
# WARNING: No OUTPUT data set is produced for this table because a
# row or column variable has fewer than 2 nonmissing levels and no
# statistics are computed.
res = stats.somersd([2, 2, 2], [2, 2, 2])
assert_allclose(res.statistic, np.nan)
assert_allclose(res.pvalue, np.nan)
res = stats.somersd([2, 0, 2], [2, 2, 2])
assert_allclose(res.statistic, np.nan)
assert_allclose(res.pvalue, np.nan)
res = stats.somersd([2, 2, 2], [2, 0, 2])
assert_allclose(res.statistic, np.nan)
assert_allclose(res.pvalue, np.nan)
res = stats.somersd([0], [0])
assert_allclose(res.statistic, np.nan)
assert_allclose(res.pvalue, np.nan)
# empty arrays provided as input
res = stats.somersd([], [])
assert_allclose(res.statistic, np.nan)
assert_allclose(res.pvalue, np.nan)
# test unequal length inputs
x = np.arange(10.)
y = np.arange(20.)
assert_raises(ValueError, stats.somersd, x, y)
def test_asymmetry(self):
# test that somersd is asymmetric w.r.t. input order and that
# convention is as described: first input is row variable & independent
# data is from Wikipedia:
# https://en.wikipedia.org/wiki/Somers%27_D
# but currently that example contradicts itself - it says X is
# independent yet take D_XY
x = [1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 1, 2,
2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3]
y = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
# Cross-check with result from SAS FREQ:
d_cr = 0.272727272727270
d_rc = 0.342857142857140
p = 0.092891940883700 # same p-value for either direction
res = stats.somersd(x, y)
assert_allclose(res.statistic, d_cr, atol=1e-15)
assert_allclose(res.pvalue, p, atol=1e-4)
assert_equal(res.table.shape, (3, 2))
res = stats.somersd(y, x)
assert_allclose(res.statistic, d_rc, atol=1e-15)
assert_allclose(res.pvalue, p, atol=1e-15)
assert_equal(res.table.shape, (2, 3))
def test_somers_original(self):
# test against Somers' original paper [1]
# Table 5A
# Somers' convention was column IV
table = np.array([[8, 2], [6, 5], [3, 4], [1, 3], [2, 3]])
# Our convention (and that of SAS FREQ) is row IV
table = table.T
dyx = 129/340
assert_allclose(stats.somersd(table).statistic, dyx)
# table 7A - d_yx = 1
table = np.array([[25, 0], [85, 0], [0, 30]])
dxy, dyx = 3300/5425, 3300/3300
assert_allclose(stats.somersd(table).statistic, dxy)
assert_allclose(stats.somersd(table.T).statistic, dyx)
# table 7B - d_yx < 0
table = np.array([[25, 0], [0, 30], [85, 0]])
dyx = -1800/3300
assert_allclose(stats.somersd(table.T).statistic, dyx)
def test_contingency_table_with_zero_rows_cols(self):
# test that zero rows/cols in contingency table don't affect result
N = 100
shape = 4, 6
size = np.prod(shape)
np.random.seed(0)
s = stats.multinomial.rvs(N, p=np.ones(size)/size).reshape(shape)
res = stats.somersd(s)
s2 = np.insert(s, 2, np.zeros(shape[1]), axis=0)
res2 = stats.somersd(s2)
s3 = np.insert(s, 2, np.zeros(shape[0]), axis=1)
res3 = stats.somersd(s3)
s4 = np.insert(s2, 2, np.zeros(shape[0]+1), axis=1)
res4 = stats.somersd(s4)
# Cross-check with result from SAS FREQ:
assert_allclose(res.statistic, -0.116981132075470, atol=1e-15)
assert_allclose(res.statistic, res2.statistic)
assert_allclose(res.statistic, res3.statistic)
assert_allclose(res.statistic, res4.statistic)
assert_allclose(res.pvalue, 0.156376448188150, atol=1e-15)
assert_allclose(res.pvalue, res2.pvalue)
assert_allclose(res.pvalue, res3.pvalue)
assert_allclose(res.pvalue, res4.pvalue)
def test_invalid_contingency_tables(self):
N = 100
shape = 4, 6
size = np.prod(shape)
np.random.seed(0)
# start with a valid contingency table
s = stats.multinomial.rvs(N, p=np.ones(size)/size).reshape(shape)
s5 = s - 2
message = "All elements of the contingency table must be non-negative"
with assert_raises(ValueError, match=message):
stats.somersd(s5)
s6 = s + 0.01
message = "All elements of the contingency table must be integer"
with assert_raises(ValueError, match=message):
stats.somersd(s6)
message = ("At least two elements of the contingency "
"table must be nonzero.")
with assert_raises(ValueError, match=message):
stats.somersd([[]])
with assert_raises(ValueError, match=message):
stats.somersd([[1]])
s7 = np.zeros((3, 3))
with assert_raises(ValueError, match=message):
stats.somersd(s7)
s7[0, 1] = 1
with assert_raises(ValueError, match=message):
stats.somersd(s7)
def test_only_ranks_matter(self):
# only ranks of input data should matter
x = [1, 2, 3]
x2 = [-1, 2.1, np.inf]
y = [3, 2, 1]
y2 = [0, -0.5, -np.inf]
res = stats.somersd(x, y)
res2 = stats.somersd(x2, y2)
assert_equal(res.statistic, res2.statistic)
assert_equal(res.pvalue, res2.pvalue)
def test_contingency_table_return(self):
# check that contingency table is returned
x = np.arange(10)
y = np.arange(10)
res = stats.somersd(x, y)
assert_equal(res.table, np.eye(10))
def test_somersd_alternative(self):
# Test alternative parameter, asymptotic method (due to tie)
# Based on scipy.stats.test_stats.TestCorrSpearman2::test_alternative
x1 = [1, 2, 3, 4, 5]
x2 = [5, 6, 7, 8, 7]
# strong positive correlation
expected = stats.somersd(x1, x2, alternative="two-sided")
assert expected.statistic > 0
# rank correlation > 0 -> large "less" p-value
res = stats.somersd(x1, x2, alternative="less")
assert_equal(res.statistic, expected.statistic)
assert_allclose(res.pvalue, 1 - (expected.pvalue / 2))
# rank correlation > 0 -> small "greater" p-value
res = stats.somersd(x1, x2, alternative="greater")
assert_equal(res.statistic, expected.statistic)
assert_allclose(res.pvalue, expected.pvalue / 2)
# reverse the direction of rank correlation
x2.reverse()
# strong negative correlation
expected = stats.somersd(x1, x2, alternative="two-sided")
assert expected.statistic < 0
# rank correlation < 0 -> large "greater" p-value
res = stats.somersd(x1, x2, alternative="greater")
assert_equal(res.statistic, expected.statistic)
assert_allclose(res.pvalue, 1 - (expected.pvalue / 2))
# rank correlation < 0 -> small "less" p-value
res = stats.somersd(x1, x2, alternative="less")
assert_equal(res.statistic, expected.statistic)
assert_allclose(res.pvalue, expected.pvalue / 2)
with pytest.raises(ValueError, match="alternative must be 'less'..."):
stats.somersd(x1, x2, alternative="ekki-ekki")
@pytest.mark.parametrize("positive_correlation", (False, True))
def test_somersd_perfect_correlation(self, positive_correlation):
# Before the addition of `alternative`, perfect correlation was
# treated as a special case. Now it is treated like any other case, but
# make sure there are no divide by zero warnings or associated errors
x1 = np.arange(10)
x2 = x1 if positive_correlation else np.flip(x1)
expected_statistic = 1 if positive_correlation else -1
# perfect correlation -> small "two-sided" p-value (0)
res = stats.somersd(x1, x2, alternative="two-sided")
assert res.statistic == expected_statistic
assert res.pvalue == 0
# rank correlation > 0 -> large "less" p-value (1)
res = stats.somersd(x1, x2, alternative="less")
assert res.statistic == expected_statistic
assert res.pvalue == (1 if positive_correlation else 0)
# rank correlation > 0 -> small "greater" p-value (0)
res = stats.somersd(x1, x2, alternative="greater")
assert res.statistic == expected_statistic
assert res.pvalue == (0 if positive_correlation else 1)
class TestBarnardExact:
"""Some tests to show that barnard_exact() works correctly."""
@pytest.mark.parametrize(
"input_sample,expected",
[
([[43, 40], [10, 39]], (3.555406779643, 0.000362832367)),
([[100, 2], [1000, 5]], (-1.776382925679, 0.135126970878)),
([[2, 7], [8, 2]], (-2.518474945157, 0.019210815430)),
([[5, 1], [10, 10]], (1.449486150679, 0.156277546306)),
([[5, 15], [20, 20]], (-1.851640199545, 0.066363501421)),
([[5, 16], [20, 25]], (-1.609639949352, 0.116984852192)),
([[10, 5], [10, 1]], (-1.449486150679, 0.177536588915)),
([[5, 0], [1, 4]], (2.581988897472, 0.013671875000)),
([[0, 1], [3, 2]], (-1.095445115010, 0.509667991877)),
([[0, 2], [6, 4]], (-1.549193338483, 0.197019618792)),
([[2, 7], [8, 2]], (-2.518474945157, 0.019210815430)),
],
)
def test_precise(self, input_sample, expected):
"""The expected values have been generated by R, using a resolution
for the nuisance parameter of 1e-6 :
```R
library(Barnard)
options(digits=10)
barnard.test(43, 40, 10, 39, dp=1e-6, pooled=TRUE)
```
"""
res = barnard_exact(input_sample)
statistic, pvalue = res.statistic, res.pvalue
assert_allclose([statistic, pvalue], expected)
@pytest.mark.parametrize(
"input_sample,expected",
[
([[43, 40], [10, 39]], (3.920362887717, 0.000289470662)),
([[100, 2], [1000, 5]], (-1.139432816087, 0.950272080594)),
([[2, 7], [8, 2]], (-3.079373904042, 0.020172119141)),
([[5, 1], [10, 10]], (1.622375939458, 0.150599922226)),
([[5, 15], [20, 20]], (-1.974771239528, 0.063038448651)),
([[5, 16], [20, 25]], (-1.722122973346, 0.133329494287)),
([[10, 5], [10, 1]], (-1.765469659009, 0.250566655215)),
([[5, 0], [1, 4]], (5.477225575052, 0.007812500000)),
([[0, 1], [3, 2]], (-1.224744871392, 0.509667991877)),
([[0, 2], [6, 4]], (-1.732050807569, 0.197019618792)),
([[2, 7], [8, 2]], (-3.079373904042, 0.020172119141)),
],
)
def test_pooled_param(self, input_sample, expected):
"""The expected values have been generated by R, using a resolution
for the nuisance parameter of 1e-6 :
```R
library(Barnard)
options(digits=10)
barnard.test(43, 40, 10, 39, dp=1e-6, pooled=FALSE)
```
"""
res = barnard_exact(input_sample, pooled=False)
statistic, pvalue = res.statistic, res.pvalue
assert_allclose([statistic, pvalue], expected)
def test_raises(self):
# test we raise an error for wrong input number of nuisances.
error_msg = (
"Number of points `n` must be strictly positive, found 0"
)
with assert_raises(ValueError, match=error_msg):
barnard_exact([[1, 2], [3, 4]], n=0)
# test we raise an error for wrong shape of input.
error_msg = "The input `table` must be of shape \\(2, 2\\)."
with assert_raises(ValueError, match=error_msg):
barnard_exact(np.arange(6).reshape(2, 3))
# Test all values must be positives
error_msg = "All values in `table` must be nonnegative."
with assert_raises(ValueError, match=error_msg):
barnard_exact([[-1, 2], [3, 4]])
# Test value error on wrong alternative param
error_msg = (
"`alternative` should be one of {'two-sided', 'less', 'greater'},"
" found .*"
)
with assert_raises(ValueError, match=error_msg):
barnard_exact([[1, 2], [3, 4]], "not-correct")
@pytest.mark.parametrize(
"input_sample,expected",
[
([[0, 0], [4, 3]], (1.0, 0)),
],
)
def test_edge_cases(self, input_sample, expected):
res = barnard_exact(input_sample)
statistic, pvalue = res.statistic, res.pvalue
assert_equal(pvalue, expected[0])
assert_equal(statistic, expected[1])
@pytest.mark.parametrize(
"input_sample,expected",
[
([[0, 5], [0, 10]], (1.0, np.nan)),
([[5, 0], [10, 0]], (1.0, np.nan)),
],
)
def test_row_or_col_zero(self, input_sample, expected):
res = barnard_exact(input_sample)
statistic, pvalue = res.statistic, res.pvalue
assert_equal(pvalue, expected[0])
assert_equal(statistic, expected[1])
@pytest.mark.parametrize(
"input_sample,expected",
[
([[2, 7], [8, 2]], (-2.518474945157, 0.009886140845)),
([[7, 200], [300, 8]], (-21.320036698460, 0.0)),
([[21, 28], [1957, 6]], (-30.489638143953, 0.0)),
],
)
@pytest.mark.parametrize("alternative", ["greater", "less"])
def test_less_greater(self, input_sample, expected, alternative):
"""
"The expected values have been generated by R, using a resolution
for the nuisance parameter of 1e-6 :
```R
library(Barnard)
options(digits=10)
a = barnard.test(2, 7, 8, 2, dp=1e-6, pooled=TRUE)
a$p.value[1]
```
In this test, we are using the "one-sided" return value `a$p.value[1]`
to test our pvalue.
"""
expected_stat, less_pvalue_expect = expected
if alternative == "greater":
input_sample = np.array(input_sample)[:, ::-1]
expected_stat = -expected_stat
res = barnard_exact(input_sample, alternative=alternative)
statistic, pvalue = res.statistic, res.pvalue
assert_allclose(
[statistic, pvalue], [expected_stat, less_pvalue_expect], atol=1e-7
)
class TestBoschlooExact:
"""Some tests to show that boschloo_exact() works correctly."""
ATOL = 1e-7
@pytest.mark.parametrize(
"input_sample,expected",
[
([[2, 7], [8, 2]], (0.01852173, 0.009886142)),
([[5, 1], [10, 10]], (0.9782609, 0.9450994)),
([[5, 16], [20, 25]], (0.08913823, 0.05827348)),
([[10, 5], [10, 1]], (0.1652174, 0.08565611)),
([[5, 0], [1, 4]], (1, 1)),
([[0, 1], [3, 2]], (0.5, 0.34375)),
([[2, 7], [8, 2]], (0.01852173, 0.009886142)),
([[7, 12], [8, 3]], (0.06406797, 0.03410916)),
([[10, 24], [25, 37]], (0.2009359, 0.1512882)),
],
)
def test_less(self, input_sample, expected):
"""The expected values have been generated by R, using a resolution
for the nuisance parameter of 1e-8 :
```R
library(Exact)
options(digits=10)
data <- matrix(c(43, 10, 40, 39), 2, 2, byrow=TRUE)
a = exact.test(data, method="Boschloo", alternative="less",
tsmethod="central", np.interval=TRUE, beta=1e-8)
```
"""
res = boschloo_exact(input_sample, alternative="less")
statistic, pvalue = res.statistic, res.pvalue
assert_allclose([statistic, pvalue], expected, atol=self.ATOL)
@pytest.mark.parametrize(
"input_sample,expected",
[
([[43, 40], [10, 39]], (0.0002875544, 0.0001615562)),
([[2, 7], [8, 2]], (0.9990149, 0.9918327)),
([[5, 1], [10, 10]], (0.1652174, 0.09008534)),
([[5, 15], [20, 20]], (0.9849087, 0.9706997)),
([[5, 16], [20, 25]], (0.972349, 0.9524124)),
([[5, 0], [1, 4]], (0.02380952, 0.006865367)),
([[0, 1], [3, 2]], (1, 1)),
([[0, 2], [6, 4]], (1, 1)),
([[2, 7], [8, 2]], (0.9990149, 0.9918327)),
([[7, 12], [8, 3]], (0.9895302, 0.9771215)),
([[10, 24], [25, 37]], (0.9012936, 0.8633275)),
],
)
def test_greater(self, input_sample, expected):
"""The expected values have been generated by R, using a resolution
for the nuisance parameter of 1e-8 :
```R
library(Exact)
options(digits=10)
data <- matrix(c(43, 10, 40, 39), 2, 2, byrow=TRUE)
a = exact.test(data, method="Boschloo", alternative="greater",
tsmethod="central", np.interval=TRUE, beta=1e-8)
```
"""
res = boschloo_exact(input_sample, alternative="greater")
statistic, pvalue = res.statistic, res.pvalue
assert_allclose([statistic, pvalue], expected, atol=self.ATOL)
@pytest.mark.parametrize(
"input_sample,expected",
[
([[43, 40], [10, 39]], (0.0002875544, 0.0003231115)),
([[2, 7], [8, 2]], (0.01852173, 0.01977228)),
([[5, 1], [10, 10]], (0.1652174, 0.1801707)),
([[5, 16], [20, 25]], (0.08913823, 0.116547)),
([[5, 0], [1, 4]], (0.02380952, 0.01373073)),
([[0, 1], [3, 2]], (0.5, 0.6875)),
([[2, 7], [8, 2]], (0.01852173, 0.01977228)),
([[7, 12], [8, 3]], (0.06406797, 0.06821831)),
],
)
def test_two_sided(self, input_sample, expected):
"""The expected values have been generated by R, using a resolution
for the nuisance parameter of 1e-8 :
```R
library(Exact)
options(digits=10)
data <- matrix(c(43, 10, 40, 39), 2, 2, byrow=TRUE)
a = exact.test(data, method="Boschloo", alternative="two.sided",
tsmethod="central", np.interval=TRUE, beta=1e-8)
```
"""
res = boschloo_exact(input_sample, alternative="two-sided", n=64)
# Need n = 64 for python 32-bit
statistic, pvalue = res.statistic, res.pvalue
assert_allclose([statistic, pvalue], expected, atol=self.ATOL)
def test_raises(self):
# test we raise an error for wrong input number of nuisances.
error_msg = (
"Number of points `n` must be strictly positive, found 0"
)
with assert_raises(ValueError, match=error_msg):
boschloo_exact([[1, 2], [3, 4]], n=0)
# test we raise an error for wrong shape of input.
error_msg = "The input `table` must be of shape \\(2, 2\\)."
with assert_raises(ValueError, match=error_msg):
boschloo_exact(np.arange(6).reshape(2, 3))
# Test all values must be positives
error_msg = "All values in `table` must be nonnegative."
with assert_raises(ValueError, match=error_msg):
boschloo_exact([[-1, 2], [3, 4]])
# Test value error on wrong alternative param
error_msg = (
r"`alternative` should be one of \('two-sided', 'less', "
r"'greater'\), found .*"
)
with assert_raises(ValueError, match=error_msg):
boschloo_exact([[1, 2], [3, 4]], "not-correct")
@pytest.mark.parametrize(
"input_sample,expected",
[
([[0, 5], [0, 10]], (np.nan, np.nan)),
([[5, 0], [10, 0]], (np.nan, np.nan)),
],
)
def test_row_or_col_zero(self, input_sample, expected):
res = boschloo_exact(input_sample)
statistic, pvalue = res.statistic, res.pvalue
assert_equal(pvalue, expected[0])
assert_equal(statistic, expected[1])
class TestCvm_2samp:
def test_invalid_input(self):
x = np.arange(10).reshape((2, 5))
y = np.arange(5)
msg = 'The samples must be one-dimensional'
with pytest.raises(ValueError, match=msg):
cramervonmises_2samp(x, y)
with pytest.raises(ValueError, match=msg):
cramervonmises_2samp(y, x)
msg = 'x and y must contain at least two observations.'
with pytest.raises(ValueError, match=msg):
cramervonmises_2samp([], y)
with pytest.raises(ValueError, match=msg):
cramervonmises_2samp(y, [1])
msg = 'method must be either auto, exact or asymptotic'
with pytest.raises(ValueError, match=msg):
cramervonmises_2samp(y, y, 'xyz')
def test_list_input(self):
x = [2, 3, 4, 7, 6]
y = [0.2, 0.7, 12, 18]
r1 = cramervonmises_2samp(x, y)
r2 = cramervonmises_2samp(np.array(x), np.array(y))
assert_equal((r1.statistic, r1.pvalue), (r2.statistic, r2.pvalue))
def test_example_conover(self):
# Example 2 in Section 6.2 of W.J. Conover: Practical Nonparametric
# Statistics, 1971.
x = [7.6, 8.4, 8.6, 8.7, 9.3, 9.9, 10.1, 10.6, 11.2]
y = [5.2, 5.7, 5.9, 6.5, 6.8, 8.2, 9.1, 9.8, 10.8, 11.3, 11.5, 12.3,
12.5, 13.4, 14.6]
r = cramervonmises_2samp(x, y)
assert_allclose(r.statistic, 0.262, atol=1e-3)
assert_allclose(r.pvalue, 0.18, atol=1e-2)
@pytest.mark.parametrize('statistic, m, n, pval',
[(710, 5, 6, 48./462),
(1897, 7, 7, 117./1716),
(576, 4, 6, 2./210),
(1764, 6, 7, 2./1716)])
def test_exact_pvalue(self, statistic, m, n, pval):
# the exact values are taken from Anderson: On the distribution of the
# two-sample Cramer-von-Mises criterion, 1962.
# The values are taken from Table 2, 3, 4 and 5
assert_equal(_pval_cvm_2samp_exact(statistic, m, n), pval)
def test_large_sample(self):
# for large samples, the statistic U gets very large
# do a sanity check that p-value is not 0, 1 or nan
np.random.seed(4367)
x = distributions.norm.rvs(size=1000000)
y = distributions.norm.rvs(size=900000)
r = cramervonmises_2samp(x, y)
assert_(0 < r.pvalue < 1)
r = cramervonmises_2samp(x, y+0.1)
assert_(0 < r.pvalue < 1)
def test_exact_vs_asymptotic(self):
np.random.seed(0)
x = np.random.rand(7)
y = np.random.rand(8)
r1 = cramervonmises_2samp(x, y, method='exact')
r2 = cramervonmises_2samp(x, y, method='asymptotic')
assert_equal(r1.statistic, r2.statistic)
assert_allclose(r1.pvalue, r2.pvalue, atol=1e-2)
def test_method_auto(self):
x = np.arange(10)
y = [0.5, 4.7, 13.1]
r1 = cramervonmises_2samp(x, y, method='exact')
r2 = cramervonmises_2samp(x, y, method='auto')
assert_equal(r1.pvalue, r2.pvalue)
# switch to asymptotic if one sample has more than 10 observations
x = np.arange(11)
r1 = cramervonmises_2samp(x, y, method='asymptotic')
r2 = cramervonmises_2samp(x, y, method='auto')
assert_equal(r1.pvalue, r2.pvalue)
def test_same_input(self):
# make sure trivial edge case can be handled
# note that _cdf_cvm_inf(0) = nan. implementation avoids nan by
# returning pvalue=1 for very small values of the statistic
x = np.arange(15)
res = cramervonmises_2samp(x, x)
assert_equal((res.statistic, res.pvalue), (0.0, 1.0))
# check exact p-value
res = cramervonmises_2samp(x[:4], x[:4])
assert_equal((res.statistic, res.pvalue), (0.0, 1.0))
| 42.540123
| 79
| 0.563085
|
004d6dbb27ae1d41434587989afce92811f1f943
| 798
|
py
|
Python
|
PHASEfilter/lib/constants/version.py
|
ibigen/PHASEfilter
|
669729f408b9c23d5db2ba72e74195b2228669da
|
[
"MIT"
] | null | null | null |
PHASEfilter/lib/constants/version.py
|
ibigen/PHASEfilter
|
669729f408b9c23d5db2ba72e74195b2228669da
|
[
"MIT"
] | null | null | null |
PHASEfilter/lib/constants/version.py
|
ibigen/PHASEfilter
|
669729f408b9c23d5db2ba72e74195b2228669da
|
[
"MIT"
] | null | null | null |
"""Version of PHASEfilter"""
# MAJOR.MINOR.MAINTENANCE numbering scheme, where the project author increments:
# MAJOR version when they make incompatible API changes,
# MINOR version when they add functionality in a backwards-compatible manner, and
# MAINTENANCE version when they make backwards-compatible bug fixes.
# 1.2.0.dev1 # Development release
# 1.2.0a1 # Alpha Release
# 1.2.0b1 # Beta Release
# 1.2.0rc1 # Release Candidate
# 1.2.0 # Final Release
# 1.2.0.post1 # Post Release
# 15.10 # Date based release
# 23 # Serial release
VERSION_package = "0.3.7" ### For pypi
VERSION_make_alignement = "0.3.3" ### version of this script
VERSION_phase_filter = "0.3.4"
VERSION_reference_statistics = "0.3.3"
VERSION_synchronize_genomes = "0.3.5"
| 36.272727
| 81
| 0.701754
|
b5a0305d06bc565cb667f83904986bca004f1a25
| 1,962
|
py
|
Python
|
tenth/tenth/apps/gathering/migrations/0001_initial.py
|
TanDeemo/Tenth
|
52f721d4433edfa336e989e6eeedd288d4e38674
|
[
"MIT"
] | null | null | null |
tenth/tenth/apps/gathering/migrations/0001_initial.py
|
TanDeemo/Tenth
|
52f721d4433edfa336e989e6eeedd288d4e38674
|
[
"MIT"
] | null | null | null |
tenth/tenth/apps/gathering/migrations/0001_initial.py
|
TanDeemo/Tenth
|
52f721d4433edfa336e989e6eeedd288d4e38674
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.5 on 2020-08-10 21:30
import ckeditor_uploader.fields
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Gathering',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default=None, max_length=100, null=True, verbose_name='活动名称')),
('summary', models.TextField(default=None, null=True, verbose_name='活动简介')),
('detail', ckeditor_uploader.fields.RichTextUploadingField(default='', verbose_name='详细介绍')),
('address', models.CharField(default=None, max_length=100, null=True, verbose_name='举办地点')),
('sponsor', models.CharField(default=None, max_length=100, null=True, verbose_name='主办方')),
('image', models.ImageField(default=None, null=True, upload_to='', verbose_name='活动图片')),
('city', models.CharField(default=None, max_length=100, null=True, verbose_name='举办城市')),
('state', models.SmallIntegerField(choices=[(0, '不可见'), (1, '可见')], default=1, verbose_name='是否可见')),
('starttime', models.DateTimeField(null=True, verbose_name='开始时间')),
('endtime', models.DateTimeField(null=True, verbose_name='截止日期')),
('endrolltime', models.DateTimeField(null=True, verbose_name='报名截止日期')),
('users', models.ManyToManyField(related_name='gathers', to=settings.AUTH_USER_MODEL, verbose_name='参加者')),
],
options={
'verbose_name': '活动',
'verbose_name_plural': '活动',
'db_table': 'tb_gathering',
},
),
]
| 47.853659
| 123
| 0.611621
|
baadc73f57104c95c040ccdf6307a241c9a0cd0c
| 1,158
|
py
|
Python
|
python/__init__.py
|
HavoK-at/gr-shared_memory
|
e5ba535e5346c9806f052bed9038868da1bb8875
|
[
"MIT"
] | null | null | null |
python/__init__.py
|
HavoK-at/gr-shared_memory
|
e5ba535e5346c9806f052bed9038868da1bb8875
|
[
"MIT"
] | null | null | null |
python/__init__.py
|
HavoK-at/gr-shared_memory
|
e5ba535e5346c9806f052bed9038868da1bb8875
|
[
"MIT"
] | null | null | null |
#
# Copyright 2008,2009 Free Software Foundation, Inc.
#
# This application is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# The presence of this file turns this directory into a Python package
'''
This is the GNU Radio SHARED_MEMORY module. Place your Python package
description here (python/__init__.py).
'''
# import swig generated symbols into the shared_memory namespace
try:
# this might fail if the module is python-only
from shared_memory_swig import *
except ImportError:
pass
# import any pure python here
#
| 33.085714
| 74
| 0.771157
|
739edbfdbcf4b4710f65751e1c7135cb51281432
| 17,156
|
py
|
Python
|
keras/legacy_tf_layers/normalization.py
|
shantanusharma/keras
|
662f6c5bb82b54d90ec8e863ac7a44c3b8c1b938
|
[
"Apache-2.0"
] | 3
|
2021-03-15T05:32:36.000Z
|
2021-12-14T07:29:53.000Z
|
keras/legacy_tf_layers/normalization.py
|
bluemap19/keras
|
662f6c5bb82b54d90ec8e863ac7a44c3b8c1b938
|
[
"Apache-2.0"
] | 1
|
2021-03-01T13:50:34.000Z
|
2021-03-01T13:50:34.000Z
|
keras/legacy_tf_layers/normalization.py
|
bluemap19/keras
|
662f6c5bb82b54d90ec8e863ac7a44c3b8c1b938
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Contains the normalization layer classes and their functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
import warnings
from keras.layers import normalization as keras_normalization
from keras.legacy_tf_layers import base
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=['layers.BatchNormalization'])
class BatchNormalization(keras_normalization.BatchNormalization, base.Layer):
"""Batch Normalization layer from (Ioffe et al., 2015).
Keras APIs handle BatchNormalization updates to the moving_mean and
moving_variance as part of their `fit()` and `evaluate()` loops. However, if a
custom training loop is used with an instance of `Model`, these updates need
to be explicitly included. Here's a simple example of how it can be done:
```python
# model is an instance of Model that contains BatchNormalization layer.
update_ops = model.get_updates_for(None) + model.get_updates_for(features)
train_op = optimizer.minimize(loss)
train_op = tf.group([train_op, update_ops])
```
Args:
axis: An `int` or list of `int`, the axis or axes that should be normalized,
typically the features axis/axes. For instance, after a `Conv2D` layer
with `data_format="channels_first"`, set `axis=1`. If a list of axes is
provided, each axis in `axis` will be normalized
simultaneously. Default is `-1` which uses the last axis. Note: when
using multi-axis batch norm, the `beta`, `gamma`, `moving_mean`, and
`moving_variance` variables are the same rank as the input Tensor,
with dimension size 1 in all reduced (non-axis) dimensions).
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is not used. When the
next layer is linear (also e.g. `nn.relu`), this can be disabled since the
scaling can be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: An optional projection function to be applied to the `beta`
weight after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected variable and must return the projected
variable (which must have the same shape). Constraints are not safe to use
when doing asynchronous distributed training.
gamma_constraint: An optional projection function to be applied to the
`gamma` weight after being updated by an `Optimizer`.
renorm: Whether to use Batch Renormalization (Ioffe, 2017). This adds extra
variables during training. The inference is the same for either value of
this parameter.
renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction `(r,
d)` is used as `corrected_value = normalized_value * r + d`, with `r`
clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
renorm_momentum: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training and
should be neither too small (which would add noise) nor too large (which
would give stale estimates). Note that `momentum` is still applied to get
the means and variances for inference.
fused: if `None` or `True`, use a faster, fused implementation if possible.
If `False`, use the system recommended implementation.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
virtual_batch_size: An `int`. By default, `virtual_batch_size` is `None`,
which means batch normalization is performed across the whole batch. When
`virtual_batch_size` is not `None`, instead perform "Ghost Batch
Normalization", which creates virtual sub-batches which are each
normalized separately (with shared gamma, beta, and moving statistics).
Must divide the actual batch size during execution.
adjustment: A function taking the `Tensor` containing the (dynamic) shape of
the input tensor and returning a pair (scale, bias) to apply to the
normalized values (before gamma and beta), only during training. For
example, if axis==-1,
`adjustment = lambda shape: (
tf.random.uniform(shape[-1:], 0.93, 1.07),
tf.random.uniform(shape[-1:], -0.1, 0.1))` will scale the normalized
value by up to 7% up or down, then shift the result by up to 0.1
(with independent scaling and bias for each feature but shared
across all examples), and finally apply gamma and/or beta. If
`None`, no adjustment is applied. Cannot be specified if
virtual_batch_size is specified.
name: A string, the name of the layer.
References:
Batch Normalization - Accelerating Deep Network Training by Reducing
Internal Covariate Shift:
[Ioffe et al., 2015](http://proceedings.mlr.press/v37/ioffe15.html)
([pdf](http://proceedings.mlr.press/v37/ioffe15.pdf))
Batch Renormalization - Towards Reducing Minibatch Dependence in
Batch-Normalized Models:
[Ioffe,
2017](http://papers.nips.cc/paper/6790-batch-renormalization-towards-reducing-minibatch-dependence-in-batch-normalized-models)
([pdf](http://papers.nips.cc/paper/6790-batch-renormalization-towards-reducing-minibatch-dependence-in-batch-normalized-models.pdf))
"""
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=tf.compat.v1.zeros_initializer(),
gamma_initializer=tf.compat.v1.ones_initializer(),
moving_mean_initializer=tf.compat.v1.zeros_initializer(),
moving_variance_initializer=tf.compat.v1.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=None,
trainable=True,
virtual_batch_size=None,
adjustment=None,
name=None,
**kwargs):
super(BatchNormalization, self).__init__(
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
beta_constraint=beta_constraint,
gamma_constraint=gamma_constraint,
renorm=renorm,
renorm_clipping=renorm_clipping,
renorm_momentum=renorm_momentum,
fused=fused,
trainable=trainable,
virtual_batch_size=virtual_batch_size,
adjustment=adjustment,
name=name,
**kwargs)
def call(self, inputs, training=False):
return super(BatchNormalization, self).call(inputs, training=training)
@tf_export(v1=['layers.batch_normalization'])
def batch_normalization(inputs,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=tf.compat.v1.zeros_initializer(),
gamma_initializer=tf.compat.v1.ones_initializer(),
moving_mean_initializer=tf.compat.v1.zeros_initializer(),
moving_variance_initializer=tf.compat.v1.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
training=False,
trainable=True,
name=None,
reuse=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=None,
virtual_batch_size=None,
adjustment=None):
"""Functional interface for the batch normalization layer from_config(Ioffe et al., 2015).
Note: when training, the moving_mean and moving_variance need to be updated.
By default the update ops are placed in `tf.GraphKeys.UPDATE_OPS`, so they
need to be executed alongside the `train_op`. Also, be sure to add any
batch_normalization ops before getting the update_ops collection. Otherwise,
update_ops will be empty, and training/inference will not work properly. For
example:
```python
x_norm = tf.compat.v1.layers.batch_normalization(x, training=training)
# ...
update_ops = tf.compat.v1.get_collection(tf.GraphKeys.UPDATE_OPS)
train_op = optimizer.minimize(loss)
train_op = tf.group([train_op, update_ops])
```
Args:
inputs: Tensor input.
axis: An `int`, the axis that should be normalized (typically the features
axis). For instance, after a `Convolution2D` layer with
`data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is not used. When the
next layer is linear (also e.g. `nn.relu`), this can be disabled since the
scaling can be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: An optional projection function to be applied to the `beta`
weight after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected variable and must return the projected
variable (which must have the same shape). Constraints are not safe to use
when doing asynchronous distributed training.
gamma_constraint: An optional projection function to be applied to the
`gamma` weight after being updated by an `Optimizer`.
training: Either a Python boolean, or a TensorFlow boolean scalar tensor
(e.g. a placeholder). Whether to return the output in training mode
(normalized with statistics of the current batch) or in inference mode
(normalized with moving statistics). **NOTE**: make sure to set this
parameter correctly, or else your training/inference will not work
properly.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer by the same
name.
renorm: Whether to use Batch Renormalization (Ioffe, 2017). This adds extra
variables during training. The inference is the same for either value of
this parameter.
renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction `(r,
d)` is used as `corrected_value = normalized_value * r + d`, with `r`
clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
renorm_momentum: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training and
should be neither too small (which would add noise) nor too large (which
would give stale estimates). Note that `momentum` is still applied to get
the means and variances for inference.
fused: if `None` or `True`, use a faster, fused implementation if possible.
If `False`, use the system recommended implementation.
virtual_batch_size: An `int`. By default, `virtual_batch_size` is `None`,
which means batch normalization is performed across the whole batch. When
`virtual_batch_size` is not `None`, instead perform "Ghost Batch
Normalization", which creates virtual sub-batches which are each
normalized separately (with shared gamma, beta, and moving statistics).
Must divide the actual batch size during execution.
adjustment: A function taking the `Tensor` containing the (dynamic) shape of
the input tensor and returning a pair (scale, bias) to apply to the
normalized values (before gamma and beta), only during training. For
example, if axis==-1,
`adjustment = lambda shape: (
tf.random.uniform(shape[-1:], 0.93, 1.07),
tf.random.uniform(shape[-1:], -0.1, 0.1))` will scale the normalized
value by up to 7% up or down, then shift the result by up to 0.1
(with independent scaling and bias for each feature but shared
across all examples), and finally apply gamma and/or beta. If
`None`, no adjustment is applied. Cannot be specified if
virtual_batch_size is specified.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
References:
Batch Normalization - Accelerating Deep Network Training by Reducing
Internal Covariate Shift:
[Ioffe et al., 2015](http://proceedings.mlr.press/v37/ioffe15.html)
([pdf](http://proceedings.mlr.press/v37/ioffe15.pdf))
Batch Renormalization - Towards Reducing Minibatch Dependence in
Batch-Normalized Models:
[Ioffe,
2017](http://papers.nips.cc/paper/6790-batch-renormalization-towards-reducing-minibatch-dependence-in-batch-normalized-models)
([pdf](http://papers.nips.cc/paper/6790-batch-renormalization-towards-reducing-minibatch-dependence-in-batch-normalized-models.pdf))
"""
warnings.warn(
'`tf.layers.batch_normalization` is deprecated and '
'will be removed in a future version. '
'Please use `tf.keras.layers.BatchNormalization` instead. '
'In particular, `tf.control_dependencies(tf.GraphKeys.UPDATE_OPS)` '
'should not be used (consult the `tf.keras.layers.BatchNormalization` '
'documentation).')
layer = BatchNormalization(
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
beta_constraint=beta_constraint,
gamma_constraint=gamma_constraint,
renorm=renorm,
renorm_clipping=renorm_clipping,
renorm_momentum=renorm_momentum,
fused=fused,
trainable=trainable,
virtual_batch_size=virtual_batch_size,
adjustment=adjustment,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs, training=training)
# Aliases
BatchNorm = BatchNormalization
batch_norm = batch_normalization
| 49.583815
| 138
| 0.688855
|
3c4e6e7785af14bf6431137a8efb29e654b65cbf
| 1,403
|
py
|
Python
|
show_weather.py
|
AMARTYA2020/Weather_Forecast
|
019c073c9801fe30c07f2e012e8f8d0e3fca2812
|
[
"MIT"
] | 2
|
2021-02-03T10:39:18.000Z
|
2021-02-18T12:56:06.000Z
|
show_weather.py
|
AMARTYA2020/Weather_Forecast
|
019c073c9801fe30c07f2e012e8f8d0e3fca2812
|
[
"MIT"
] | null | null | null |
show_weather.py
|
AMARTYA2020/Weather_Forecast
|
019c073c9801fe30c07f2e012e8f8d0e3fca2812
|
[
"MIT"
] | 1
|
2021-02-07T07:20:36.000Z
|
2021-02-07T07:20:36.000Z
|
import requests
import json
from datetime import datetime
api_key = 'cbb6b8a7a93022bfe48120a12e239d34'
city_name = input('\nEnter the city name : ').strip()
# api.openweathermap.org/data/2.5/weather?q={city name}&appid={API key}
complete_api_link = 'https://api.openweathermap.org/data/2.5/weather?q=' + city_name + '&appid=' + api_key
api_link = requests.get(complete_api_link)
api_data = api_link.json()
if api_data['cod'] == '404':
print(api_data['message'].capitalize())
a = input()
else:
weather_desc = api_data['weather'][0]['description']
temp_city = float(api_data['main']['temp']) - 273.15
humid = api_data['main']['humidity']
wind_spd = api_data['wind']['speed']
city_name = api_data['name']
city_id = api_data['id']
date_time = datetime.now().strftime("%d-%b-%Y | %I:%M:%S %p")
print('\n------------------------------------------------------------------------------------')
print('Weather stats for -> {} | City-id : {} | [{}]'.format(city_name, city_id, date_time))
print('------------------------------------------------------------------------------------\n')
print('Current Temperature : {:.2f} deg Celcius'.format(temp_city))
print('Weather Discription : {}'.format(weather_desc))
print('Wind Speed : {} kmph'.format(wind_spd))
print('Humidity : {} %\n'.format(humid))
b = input()
| 40.085714
| 106
| 0.560941
|
438c0a61a62db2cc6815863a317abed4409c2025
| 589
|
py
|
Python
|
core/poc/struts2/make.py
|
YLingChuan/Wrath-Hemera
|
a5143f136107bd9f49e265b1e351585e07a02ad4
|
[
"MIT"
] | 1
|
2021-03-26T13:54:48.000Z
|
2021-03-26T13:54:48.000Z
|
core/poc/struts2/make.py
|
YLingChuan/Wrath-Hemera
|
a5143f136107bd9f49e265b1e351585e07a02ad4
|
[
"MIT"
] | null | null | null |
core/poc/struts2/make.py
|
YLingChuan/Wrath-Hemera
|
a5143f136107bd9f49e265b1e351585e07a02ad4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import json
#PATH
path = os.getcwd()
syscache = (path+'/cache')
struts2 = (path+'/core/poc/struts2')
os.system("cp %s/standard %s/final_execute"%(struts2,struts2))
target = open(syscache + '/url.log','r')
target = target.read()
print("[+] Target:",target)
port = input("[+] Please input port:")
print("[+] Port:",port)
os.system("sed -i 's/<url>/%s/g' %s/final_execute"%(target,struts2))
os.system("sed -i 's/<port>/%s/g' %s/final_execute"%(port,struts2))
os.system("sh %s/final_execute"%(struts2))
os.system("rm -rf %s/final_execute"%(struts2))
| 21.814815
| 68
| 0.646859
|
b29babe06c294b866763164ce25e9ce707157525
| 3,123
|
py
|
Python
|
model/cpn/ablation_study/pcontext.cpn.R101_v1c/config.py
|
akinoriosamura/TorchSeg-mirror
|
34033fe85fc24015bcef7a92aad39d2a25a001a5
|
[
"MIT"
] | null | null | null |
model/cpn/ablation_study/pcontext.cpn.R101_v1c/config.py
|
akinoriosamura/TorchSeg-mirror
|
34033fe85fc24015bcef7a92aad39d2a25a001a5
|
[
"MIT"
] | 1
|
2021-06-08T20:36:43.000Z
|
2021-06-08T20:36:43.000Z
|
model/cpn/ablation_study/pcontext.cpn.R101_v1c/config.py
|
akinoriosamura/TorchSeg-mirror
|
34033fe85fc24015bcef7a92aad39d2a25a001a5
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path as osp
import sys
import time
import numpy as np
from easydict import EasyDict as edict
import argparse
import torch.utils.model_zoo as model_zoo
C = edict()
config = C
cfg = C
C.seed = 304
"""please config ROOT_dir and user when u first using"""
C.repo_name = 'TorchSeg'
C.abs_dir = osp.realpath(".")
C.this_dir = C.abs_dir.split(osp.sep)[-1]
C.root_dir = C.abs_dir[:C.abs_dir.index(C.repo_name) + len(C.repo_name)]
C.log_dir = osp.abspath(osp.join(C.root_dir, 'log', C.this_dir))
C.log_dir_link = osp.join(C.abs_dir, 'log')
C.snapshot_dir = osp.abspath(osp.join(C.log_dir, "snapshot"))
exp_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime())
C.log_file = C.log_dir + '/log_' + exp_time + '.log'
C.link_log_file = C.log_file + '/log_last.log'
C.val_log_file = C.log_dir + '/val_' + exp_time + '.log'
C.link_val_log_file = C.log_dir + '/val_last.log'
"""Data Dir and Weight Dir"""
C.img_root_folder = "/unsullied/sharefs/yuchangqian/Storage/Datasets/PASCAL-Context/"
C.gt_root_folder = "/unsullied/sharefs/yuchangqian/Storage/Datasets/PASCAL-Context/"
C.train_source = "/unsullied/sharefs/yuchangqian/Storage/Datasets/PASCAL-Context/config/train.txt"
C.eval_source = "/unsullied/sharefs/yuchangqian/Storage/Datasets/PASCAL-Context/config/val.txt"
# C.test_source = "/unsullied/sharefs/yuchangqian/Storage/Datasets/VOC2012_AUG/config/voc12_test.txt"
C.is_test = False
"""Path Config"""
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
add_path(osp.join(C.root_dir, 'furnace'))
from utils.pyt_utils import model_urls
"""Image Config"""
C.num_classes = 59
C.background = -1
C.image_mean = np.array([0.485, 0.456, 0.406]) # 0.485, 0.456, 0.406
C.image_std = np.array([0.229, 0.224, 0.225])
C.target_size = 512
C.image_height = 512
C.image_width = 512
C.num_train_imgs = 4998
C.num_eval_imgs = 5105
""" Settings for network, this would be different for each kind of model"""
C.fix_bias = True
C.fix_bn = False
C.sync_bn = True
C.bn_eps = 1e-5
C.bn_momentum = 0.1
C.loss_weight = None
C.pretrained_model = "/unsullied/sharefs/yuchangqian/Storage/model_zoo/pytorch_model/resnet101_v1c.pth"
"""Train Config"""
C.lr = 1e-2
C.lr_power = 0.9
C.momentum = 0.9
C.weight_decay = 1e-4
C.batch_size = 16 # 4 * C.num_gpu
C.nepochs = 80
C.niters_per_epoch = int(np.ceil(C.num_train_imgs // C.batch_size))
C.num_workers = 16
C.train_scale_array = [0.5, 0.75, 1, 1.5, 1.75, 2]
"""Eval Config"""
C.eval_iter = 30
C.eval_stride_rate = 2 / 3
C.eval_scale_array = [0.75, 1, 1.5, 1.75]
C.eval_flip = True
C.eval_base_size = 512
C.eval_crop_size = 512
"""Display Config"""
C.snapshot_iter = 1
C.record_info_iter = 20
C.display_iter = 50
def open_tensorboard():
pass
if __name__ == '__main__':
print(config.epoch_num)
parser = argparse.ArgumentParser()
parser.add_argument(
'-tb', '--tensorboard', default=False, action='store_true')
args = parser.parse_args()
if args.tensorboard:
open_tensorboard()
| 27.156522
| 103
| 0.722062
|
547661a734a80d0bba2de12eb0e0e682f930e6e0
| 1,083
|
py
|
Python
|
Awwards/urls.py
|
collinsbett29/Awwards
|
85fca973fccd5a84a4c8da6feaab8dd4452c1103
|
[
"MIT"
] | null | null | null |
Awwards/urls.py
|
collinsbett29/Awwards
|
85fca973fccd5a84a4c8da6feaab8dd4452c1103
|
[
"MIT"
] | 7
|
2020-02-12T03:20:53.000Z
|
2022-03-12T00:06:11.000Z
|
Awwards/urls.py
|
collinsbett29/Awwards
|
85fca973fccd5a84a4c8da6feaab8dd4452c1103
|
[
"MIT"
] | null | null | null |
"""awards URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from django.contrib.auth import views
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('sites.urls')),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^logout/$', views.logout, {"next_page": '/'}),
url(r'^api-token-auth/', obtain_auth_token)
]
| 41.653846
| 79
| 0.704524
|
4ce13aa9008e7d7b813e16b649e29852f0072ac5
| 387
|
py
|
Python
|
allauth/urls.py
|
Cairnica/django-allauth
|
43ddfc81f7fd06fc6502d425bf78833d5dbf49d0
|
[
"MIT"
] | null | null | null |
allauth/urls.py
|
Cairnica/django-allauth
|
43ddfc81f7fd06fc6502d425bf78833d5dbf49d0
|
[
"MIT"
] | null | null | null |
allauth/urls.py
|
Cairnica/django-allauth
|
43ddfc81f7fd06fc6502d425bf78833d5dbf49d0
|
[
"MIT"
] | null | null | null |
from django.conf.urls import include, url
from allauth.socialaccount import providers
from . import app_settings
urlpatterns = [url(r'^', include('allauth.account.urls'))]
if app_settings.SOCIALACCOUNT_ENABLED:
urlpatterns += [url(r'^social/', include('allauth.socialaccount.urls'))]
for provider in providers.registry.get_list():
urlpatterns += provider.get_urlpatterns()
| 25.8
| 76
| 0.75969
|
7145bfc83e25ff66cad105596a25c6d0786a48d3
| 9,908
|
py
|
Python
|
test/mitmproxy/addons/test_view.py
|
johnsoft/mitmproxy
|
d133b8baeeefa04fa6bdf43c39be822def013a6b
|
[
"MIT"
] | null | null | null |
test/mitmproxy/addons/test_view.py
|
johnsoft/mitmproxy
|
d133b8baeeefa04fa6bdf43c39be822def013a6b
|
[
"MIT"
] | null | null | null |
test/mitmproxy/addons/test_view.py
|
johnsoft/mitmproxy
|
d133b8baeeefa04fa6bdf43c39be822def013a6b
|
[
"MIT"
] | 1
|
2020-11-07T08:54:29.000Z
|
2020-11-07T08:54:29.000Z
|
import pytest
from mitmproxy.test import tflow
from mitmproxy.addons import view
from mitmproxy import flowfilter
from mitmproxy import options
from mitmproxy.test import taddons
def tft(*, method="get", start=0):
f = tflow.tflow()
f.request.method = method
f.request.timestamp_start = start
return f
class Options(options.Options):
def __init__(
self,
*,
filter=None,
console_order=None,
console_order_reversed=False,
console_focus_follow=False,
**kwargs
):
self.filter = filter
self.console_order = console_order
self.console_order_reversed = console_order_reversed
self.console_focus_follow = console_focus_follow
super().__init__(**kwargs)
def test_order_refresh():
v = view.View()
sargs = []
def save(*args, **kwargs):
sargs.extend([args, kwargs])
v.sig_view_refresh.connect(save)
tf = tflow.tflow(resp=True)
with taddons.context(options=Options()) as tctx:
tctx.configure(v, console_order="time")
v.add(tf)
tf.request.timestamp_start = 1
assert not sargs
v.update(tf)
assert sargs
def test_order_generators():
v = view.View()
tf = tflow.tflow(resp=True)
rs = view.OrderRequestStart(v)
assert rs.generate(tf) == 0
rm = view.OrderRequestMethod(v)
assert rm.generate(tf) == tf.request.method
ru = view.OrderRequestURL(v)
assert ru.generate(tf) == tf.request.url
sz = view.OrderKeySize(v)
assert sz.generate(tf) == len(tf.request.raw_content) + len(tf.response.raw_content)
def test_simple():
v = view.View()
f = tft(start=1)
assert v.store_count() == 0
v.request(f)
assert list(v) == [f]
assert v.get_by_id(f.id)
assert not v.get_by_id("nonexistent")
# These all just call update
v.error(f)
v.response(f)
v.intercept(f)
v.resume(f)
v.kill(f)
assert list(v) == [f]
v.request(f)
assert list(v) == [f]
assert len(v._store) == 1
assert v.store_count() == 1
f2 = tft(start=3)
v.request(f2)
assert list(v) == [f, f2]
v.request(f2)
assert list(v) == [f, f2]
assert len(v._store) == 2
assert v.inbounds(0)
assert not v.inbounds(-1)
assert not v.inbounds(100)
f3 = tft(start=2)
v.request(f3)
assert list(v) == [f, f3, f2]
v.request(f3)
assert list(v) == [f, f3, f2]
assert len(v._store) == 3
f.marked = not f.marked
f2.marked = not f2.marked
v.clear_not_marked()
assert list(v) == [f, f2]
assert len(v) == 2
assert len(v._store) == 2
v.clear()
assert len(v) == 0
assert len(v._store) == 0
def test_filter():
v = view.View()
f = flowfilter.parse("~m get")
v.request(tft(method="get"))
v.request(tft(method="put"))
v.request(tft(method="get"))
v.request(tft(method="put"))
assert(len(v)) == 4
v.set_filter(f)
assert [i.request.method for i in v] == ["GET", "GET"]
assert len(v._store) == 4
v.set_filter(None)
assert len(v) == 4
v.toggle_marked()
assert len(v) == 0
v.toggle_marked()
assert len(v) == 4
v[1].marked = True
v.toggle_marked()
assert len(v) == 1
assert v[0].marked
v.toggle_marked()
assert len(v) == 4
def test_order():
v = view.View()
with taddons.context(options=Options()) as tctx:
v.request(tft(method="get", start=1))
v.request(tft(method="put", start=2))
v.request(tft(method="get", start=3))
v.request(tft(method="put", start=4))
assert [i.request.timestamp_start for i in v] == [1, 2, 3, 4]
tctx.configure(v, console_order="method")
assert [i.request.method for i in v] == ["GET", "GET", "PUT", "PUT"]
v.set_reversed(True)
assert [i.request.method for i in v] == ["PUT", "PUT", "GET", "GET"]
tctx.configure(v, console_order="time")
assert [i.request.timestamp_start for i in v] == [4, 3, 2, 1]
v.set_reversed(False)
assert [i.request.timestamp_start for i in v] == [1, 2, 3, 4]
def test_reversed():
v = view.View()
v.request(tft(start=1))
v.request(tft(start=2))
v.request(tft(start=3))
v.set_reversed(True)
assert v[0].request.timestamp_start == 3
assert v[-1].request.timestamp_start == 1
assert v[2].request.timestamp_start == 1
with pytest.raises(IndexError):
v[5]
with pytest.raises(IndexError):
v[-5]
assert v._bisect(v[0]) == 1
assert v._bisect(v[2]) == 3
def test_update():
v = view.View()
flt = flowfilter.parse("~m get")
v.set_filter(flt)
f = tft(method="get")
v.request(f)
assert f in v
f.request.method = "put"
v.update(f)
assert f not in v
f.request.method = "get"
v.update(f)
assert f in v
v.update(f)
assert f in v
class Record:
def __init__(self):
self.calls = []
def __bool__(self):
return bool(self.calls)
def __repr__(self):
return repr(self.calls)
def __call__(self, *args, **kwargs):
self.calls.append((args, kwargs))
def test_signals():
v = view.View()
rec_add = Record()
rec_update = Record()
rec_remove = Record()
rec_refresh = Record()
def clearrec():
rec_add.calls = []
rec_update.calls = []
rec_remove.calls = []
rec_refresh.calls = []
v.sig_view_add.connect(rec_add)
v.sig_view_update.connect(rec_update)
v.sig_view_remove.connect(rec_remove)
v.sig_view_refresh.connect(rec_refresh)
assert not any([rec_add, rec_update, rec_remove, rec_refresh])
# Simple add
v.add(tft())
assert rec_add
assert not any([rec_update, rec_remove, rec_refresh])
# Filter change triggers refresh
clearrec()
v.set_filter(flowfilter.parse("~m put"))
assert rec_refresh
assert not any([rec_update, rec_add, rec_remove])
v.set_filter(flowfilter.parse("~m get"))
# An update that results in a flow being added to the view
clearrec()
v[0].request.method = "PUT"
v.update(v[0])
assert rec_remove
assert not any([rec_update, rec_refresh, rec_add])
# An update that does not affect the view just sends update
v.set_filter(flowfilter.parse("~m put"))
clearrec()
v.update(v[0])
assert rec_update
assert not any([rec_remove, rec_refresh, rec_add])
# An update for a flow in state but not view does not do anything
f = v[0]
v.set_filter(flowfilter.parse("~m get"))
assert not len(v)
clearrec()
v.update(f)
assert not any([rec_add, rec_update, rec_remove, rec_refresh])
def test_focus_follow():
v = view.View()
with taddons.context(options=Options()) as tctx:
tctx.configure(v, console_focus_follow=True, filter="~m get")
v.add(tft(start=5))
assert v.focus.index == 0
v.add(tft(start=4))
assert v.focus.index == 0
assert v.focus.flow.request.timestamp_start == 4
v.add(tft(start=7))
assert v.focus.index == 2
assert v.focus.flow.request.timestamp_start == 7
mod = tft(method="put", start=6)
v.add(mod)
assert v.focus.index == 2
assert v.focus.flow.request.timestamp_start == 7
mod.request.method = "GET"
v.update(mod)
assert v.focus.index == 2
assert v.focus.flow.request.timestamp_start == 6
def test_focus():
# Special case - initialising with a view that already contains data
v = view.View()
v.add(tft())
f = view.Focus(v)
assert f.index is 0
assert f.flow is v[0]
# Start empty
v = view.View()
f = view.Focus(v)
assert f.index is None
assert f.flow is None
v.add(tft(start=1))
assert f.index == 0
assert f.flow is v[0]
# Try to set to something not in view
with pytest.raises(ValueError):
f.__setattr__("flow", tft())
with pytest.raises(ValueError):
f.__setattr__("index", 99)
v.add(tft(start=0))
assert f.index == 1
assert f.flow is v[1]
v.add(tft(start=2))
assert f.index == 1
assert f.flow is v[1]
f.index = 0
assert f.index == 0
f.index = 1
v.remove(v[1])
assert f.index == 1
assert f.flow is v[1]
v.remove(v[1])
assert f.index == 0
assert f.flow is v[0]
v.remove(v[0])
assert f.index is None
assert f.flow is None
v.add(tft(method="get", start=0))
v.add(tft(method="get", start=1))
v.add(tft(method="put", start=2))
v.add(tft(method="get", start=3))
f.flow = v[2]
assert f.flow.request.method == "PUT"
filt = flowfilter.parse("~m get")
v.set_filter(filt)
assert f.index == 2
filt = flowfilter.parse("~m oink")
v.set_filter(filt)
assert f.index is None
def test_settings():
v = view.View()
f = tft()
with pytest.raises(KeyError):
v.settings[f]
v.add(f)
v.settings[f]["foo"] = "bar"
assert v.settings[f]["foo"] == "bar"
assert len(list(v.settings)) == 1
v.remove(f)
with pytest.raises(KeyError):
v.settings[f]
assert not v.settings.keys()
v.add(f)
v.settings[f]["foo"] = "bar"
assert v.settings.keys()
v.clear()
assert not v.settings.keys()
def test_configure():
v = view.View()
with taddons.context(options=Options()) as tctx:
tctx.configure(v, filter="~q")
with pytest.raises("invalid interception filter"):
tctx.configure(v, filter="~~")
tctx.configure(v, console_order="method")
with pytest.raises("unknown flow order"):
tctx.configure(v, console_order="no")
tctx.configure(v, console_order_reversed=True)
tctx.configure(v, console_order=None)
tctx.configure(v, console_focus_follow=True)
assert v.focus_follow
| 24.048544
| 88
| 0.603654
|
ccb44eda2f35164bdd20d1768a948c928fbd40eb
| 20,268
|
py
|
Python
|
tools/mo/openvino/tools/mo/back/preprocessing.py
|
artkuli/openvino
|
eb2fb5bf7df36ae55e3251816999b801ce053335
|
[
"Apache-2.0"
] | 1,127
|
2018-10-15T14:36:58.000Z
|
2020-04-20T09:29:44.000Z
|
tools/mo/openvino/tools/mo/back/preprocessing.py
|
artkuli/openvino
|
eb2fb5bf7df36ae55e3251816999b801ce053335
|
[
"Apache-2.0"
] | 439
|
2018-10-20T04:40:35.000Z
|
2020-04-19T05:56:25.000Z
|
tools/mo/openvino/tools/mo/back/preprocessing.py
|
tuxedcat/openvino
|
5939cb1b363ebb56b73c2ad95d8899961a084677
|
[
"Apache-2.0"
] | 414
|
2018-10-17T05:53:46.000Z
|
2020-04-16T17:29:53.000Z
|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import argparse
import logging as log
from openvino.tools.mo.utils.error import Error
from openvino.tools.mo.utils.utils import refer_to_faq_msg
import numpy as np
from openvino.preprocess import PrePostProcessor # pylint: disable=no-name-in-module,import-error
# pylint: disable=no-name-in-module,import-error
from openvino.runtime import Model, Layout, PartialShape, layout_helpers
def update_mean_scale_to_dict(input_nodes: list, mean_scale_val, scale):
"""
Internal function. Updates mean/scale values from array to dictionary
:param: input_nodes Inputs of model
:param: mean_scale_val Parsed 'mean_scale_val' object from command line arguments
:param: scale Global scale factor for all inputs from --scale command line arguments
"""
if not isinstance(mean_scale_val, dict):
if len(mean_scale_val) != len(input_nodes):
raise Error('Numbers of inputs and mean/scale values do not match. ' + refer_to_faq_msg(61))
data = np.copy(mean_scale_val)
mean_scale_val = {}
for idx, node in enumerate(input_nodes):
names_list = list(node.get_tensor().get_names())
if not names_list:
continue
node_name = names_list[0]
mean_scale_val.update(
{
node_name: {
'mean': data[idx][0],
'scale': data[idx][1]
}
}
)
if scale:
for node in input_nodes:
names_list = list(node.get_tensor().get_names())
if not names_list:
continue
node_name = names_list[0]
old_val = mean_scale_val[node_name] if node_name in mean_scale_val else None
mean_scale_val.update(
{
node_name: {
'mean': old_val['mean'] if old_val and 'mean' in old_val else None,
'scale': scale
}
}
)
return mean_scale_val
def check_keys_valid(ov_function: Model, dict_to_validate: dict, search_outputs: bool):
"""
Internal function: checks if keys from cmd line arguments correspond to ov_function's inputs/outputs
Throws if some key is not found
Throws if some different keys point to the same actual input/output
"""
nodes_used = {}
nodes = ov_function.inputs
if search_outputs:
nodes += ov_function.outputs
# We need to replace all node names from dict to tensor names
rename_dict = {}
# Find names for replacing
for name in dict_to_validate.keys():
for ov_node in nodes:
if name in ov_node.get_tensor().get_names():
break
elif name == ov_node.get_node().get_friendly_name():
assert len(ov_node.get_tensor().get_names()) > 0, 'Node must have at least one tensor name'
new_name = list(ov_node.get_tensor().get_names())[0]
rename_dict[name] = new_name
break
# Replace found node names with tensor names
for name, new_name in rename_dict.items():
assert name in dict_to_validate, 'Key {} is not in initial dict'.format(name)
assert new_name not in dict_to_validate, 'Key {} is already in initial dict'.format(new_name)
dict_to_validate[new_name] = dict_to_validate[name]
del dict_to_validate[name]
# validate the dict
for name in dict_to_validate.keys():
node_found = False
for ov_node in nodes:
if name in ov_node.get_tensor().get_names():
if ov_node in nodes_used:
raise Error('Key for {} and {} point to same model input/output.'
.format(name, nodes_used[ov_node]))
nodes_used[ov_node] = name
node_found = True
break
if not node_found:
if not search_outputs:
raise Error('Input with name {} wasn\'t found! {}'.format(name, refer_to_faq_msg(83)))
else:
raise Error('Input/Output with name {} wasn\'t found! {}'.format(name, refer_to_faq_msg(83)))
def update_layout_is_input_flag(ov_function: Model, layout_values: dict):
"""
Internal function: updates layout_values with flag whether each layout belongs to input or to output
"""
for name, layout_value in layout_values.items():
layout_value['is_input'] = False
for ov_input in ov_function.inputs:
if name in ov_input.get_tensor().get_names():
layout_value['is_input'] = True
break
return layout_values
def find_channels_dimension(shape: PartialShape, num_channels: int, name: str, layout_values):
"""
Internal function. Finds dimension index matching with expected channels number
Raises error if there is no candidates or number of candidates is > 1
:param: shape Parameter's partial shape
:param: num_channels Number of channels to find in shape
:param: name Parameter's name, used for Error-handling purposes
:param: layout_values Existing source/target layout items specified by user
:return: updated layout items with guessed layouts
"""
if shape.rank.is_dynamic:
raise Error('Can\'t determine channels dimension for dynamic shape for parameter {}.'
.format(name))
dim_idx_found = -1
for dim_idx in range(shape.rank.get_length()):
dim = shape.get_dimension(dim_idx)
if dim.is_static and dim.get_length() == num_channels:
if dim_idx_found >= 0:
raise Error('Can\'t determine channels dimension for {}. '
'Input shape is {}, needed channels {}. '
'Conflicting dimensions: {} and {}. Please specify layout manually.'
.format(name, shape, num_channels, dim_idx_found, dim_idx))
dim_idx_found = dim_idx
if dim_idx_found < 0:
raise Error('Can\'t determine channels dimension for {}. '
'Input shape is {}, needed channels {}'
.format(name, shape, num_channels))
# Restrict guessed channels index to particular position depending on tensor shape(3d, 4d, 5d)
if shape.rank.get_length() == 3:
# CHW or HWC, possible channels index is 0 or 2
if dim_idx_found != 0 and dim_idx_found != 2:
raise Error('Can\'t determine channels dimension for 3D input {} (CHW or HWC) with shape {}. '
'Please specify layout containing \'C\' channels manually.'.format(name, shape))
elif shape.rank.get_length() == 4:
# NCHW or NHWC, possible channels index is 1 or 3
if dim_idx_found != 1 and dim_idx_found != 3:
raise Error('Can\'t determine channels dimension for 4D input {} (NCHW or NHWC) with shape {}. '
'Please specify layout containing \'C\' channels manually.'.format(name, shape))
elif shape.rank.get_length() == 5:
# NCDHW or NDHWC, possible channels index is 1 or 4
if dim_idx_found != 1 and dim_idx_found != 4:
raise Error('Can\'t determine channels dimension for 5D input {} (NCDHW or NDHWC) with shape {}. '
'Please specify layout containing \'C\' channels manually.'.format(name, shape))
else:
raise Error('Can\'t determine channels dimension for {}D input {} with shape {}.'
'Please specify layout containing \'C\' channels manually.'
.format(shape.rank.get_length(), name, shape))
layout_str = "?" * shape.rank.get_length()
layout_str = layout_str[:dim_idx_found] + 'C' + layout_str[dim_idx_found+1:]
layout_values[name] = {
'source_layout': layout_str,
'target_layout': None,
'source_guessed': True,
'is_input': True
}
return layout_values
def guess_source_layouts_by_mean_scale(ov_function: Model, layout_values, mean_scale_values: dict):
"""
Internal function. Try to guess source layout for input by its shape and/or framework
:param: ov_function Original model
:param: layout_values Existing source/target layout items specified by user
:param: mean_scale_values Dictionary with mean/scale values defined for each argument
:return: updated layout items with guessed layouts
"""
for ms_name, mean_scale in mean_scale_values.items():
num_channels_mean = len(mean_scale['mean']) if mean_scale['mean'] is not None else 0
num_channels_scale = len(mean_scale['scale']) if hasattr(mean_scale['scale'], '__len__') else 0
if num_channels_mean > 1 and \
num_channels_scale > 1 and \
num_channels_mean is not num_channels_scale:
raise Error('Mean/Scale values for {} have different sizes: {} {}'
.format(ms_name, num_channels_mean, num_channels_scale))
need_guess_channels = num_channels_mean > 1 or num_channels_scale > 1
if not need_guess_channels: # Mean/scale is complex and needs 'channels' specified in layout
continue
num_channels = num_channels_mean if num_channels_mean > 1 else num_channels_scale
for i in range(0, len(ov_function.inputs)):
ov_input = ov_function.input(i)
if not ov_function.get_parameters()[i].layout.empty:
continue
if ms_name not in ov_input.get_tensor().get_names():
continue
layout_item = None
for name in ov_input.get_tensor().get_names():
if name in layout_values:
layout_item = layout_values[name]
break
if layout_item is not None:
# User specified some layout, skip guessing
continue
# Guess layout is applicable only when number of channels is '3'
if num_channels != 3:
raise Error('Can\'t determine channels dimension for {}. '
'When number of mean/scale values is {} (not 3), '
'please specify layout for input manually'.format(ms_name, num_channels))
layout_values = find_channels_dimension(shape=ov_input.get_partial_shape(),
num_channels=num_channels,
name=ms_name,
layout_values=layout_values)
return layout_values
def check_suitable_for_reverse(layout: Layout, ov_input):
"""
Internal function. Checks if input with layout is suitable for reversing channels
:param: layout Existing source/target layout items specified by user
:param: ov_input Model's input
:return: True if reverse channels can be applied to input
"""
if not layout_helpers.has_channels(layout):
return False
if ov_input.get_partial_shape().rank.is_dynamic:
return False
c_idx = layout_helpers.channels_idx(layout)
rank = ov_input.get_partial_shape().rank.get_length()
if c_idx < 0:
c_idx += rank
if c_idx >= rank:
raise Error('Layout {} for input {} is inconsistent with shape {}'.format(
layout, ov_input.get_tensor().get_any_name(), ov_input.get_partial_shape()))
c_num = ov_input.get_partial_shape()[c_idx]
return c_num.is_dynamic or c_num.get_length() == 3
def guess_source_layouts_for_reverse_channels(ov_function: Model, layout_values):
"""
Internal function. Try to guess source layout for input by finding dimension with size=3 (RGB/BGR)
Additionally checks existing layouts and detects suitable inputs for reversing of input channels
:param: ov_function Original model
:param: layout_values Existing source/target layout items specified by user
:return: array with suitable parameters for reversing of input channels
"""
all_params = []
suitable_params = []
for i in range(0, len(ov_function.inputs)):
ov_input = ov_function.input(i)
param_info = [ov_input.get_tensor().get_any_name(), ov_input.get_partial_shape()]
all_params.append(param_info)
if not ov_function.get_parameters()[i].layout.empty:
if check_suitable_for_reverse(ov_function.get_parameters()[i].layout, ov_input):
suitable_params.append(param_info)
continue
layout_item = None
first_name = ov_input.get_tensor().get_any_name()
for name in ov_input.get_tensor().get_names():
if name in layout_values:
layout_item = layout_values[name]
break
if layout_item is not None:
# RIC transformation is applied before changing layout so only source_layout
# should be checked (even is target_layout is also provided)
if layout_item.get('source_layout'):
if check_suitable_for_reverse(Layout(layout_item['source_layout']), ov_input):
suitable_params.append(param_info)
continue
try:
layout_values = find_channels_dimension(shape=ov_input.get_partial_shape(),
num_channels=3,
name=first_name,
layout_values=layout_values)
except Error as e:
log.debug('Reverse input channels guess did not succeed {}'.format(e))
else:
layout = layout_values[first_name].get('source_layout')
if layout and check_suitable_for_reverse(Layout(layout), ov_input):
suitable_params.append(param_info)
if not len(suitable_params):
raise Error('Network has {} inputs overall, but none of them are suitable for input channels reversing.\n'
'Suitable for input channel reversing inputs are 4-dimensional with 3 channels (in case of dynamic '
'dimensions C channel must be provided in a layout for this input)\nAll inputs: {}'.format(
len(all_params), all_params))
elif len(suitable_params) < len(all_params):
log.error('Network has {} inputs overall, but only {} of them are suitable for input channels reversing.\n'
'Suitable for input channel reversing inputs are 4-dimensional with 3 channels (in case of dynamic '
'dimensions C channel must be provided in a layout for this input)\nAll inputs: {}\n'
'Suitable inputs {}'.format(len(all_params), len(suitable_params), all_params, suitable_params),
extra={'is_warning': True})
return suitable_params
def apply_preprocessing(ov_function: Model, argv: argparse.Namespace):
"""
Applies pre-processing of model inputs by adding appropriate operations
On return, 'ov_function' object will be updated
Expected 'argv.mean_scale_values' formats examples:
a) Dict: {'inputName': {'mean': [1., 2., 3.], 'scale': [2., 4., 8.]}}
b) List: list(np.array([(np.array([1., 2., 3.]), np.array([2., 4., 6.])),
(np.array([7., 8., 9.]), np.array([5., 6., 7.])))
Expected 'argv.layout_values' format examples:
a) Specific layouts for inputs and outputs
{ 'input1': {
'source_layout': 'nchw',
'target_layout': 'nhwc'
},
'output2': {
'source_layout': 'nhwc'
}
}
b) Layout for single input: {'': {'source_layout': 'nchw'}}
:param: ov_function OV function for applying mean/scale pre-processing
:param: argv Parsed command line arguments
"""
prep = PrePostProcessor(ov_function)
if 'mean_scale_values' in argv and argv.mean_scale_values:
mean_scale_values = argv.mean_scale_values
else:
mean_scale_values = {}
mean_scale_values = update_mean_scale_to_dict(input_nodes=ov_function.inputs,
mean_scale_val=mean_scale_values,
scale=argv.scale)
# On return, mean_scale_values is a dictionary with input names as key and mean/scale pair as value
# {'inputName': {'mean': [1., 2., 3.], 'scale': [2.]}}
layout_values = {}
if 'layout_values' in argv and argv.layout_values:
layout_values = argv.layout_values
if '' in layout_values:
if len(ov_function.inputs) > 1:
input_names = [list(ov_input.get_tensor().get_names())[0] for ov_input in ov_function.inputs]
raise Error('Layout without name can be specified for models with only one input, '
'but provided model has {} inputs: \'{}\'. '
'Please specify explicitly input/output name for --layout option'
.format(len(input_names), input_names))
layout_values = {
list(ov_function.input().get_tensor().get_names())[0]: {
'source_layout': layout_values[''].get('source_layout'),
'target_layout': layout_values[''].get('target_layout')
}
}
check_keys_valid(ov_function=ov_function, dict_to_validate=mean_scale_values, search_outputs=False)
check_keys_valid(ov_function=ov_function, dict_to_validate=layout_values, search_outputs=True)
layout_values = update_layout_is_input_flag(ov_function, layout_values)
layout_values = guess_source_layouts_by_mean_scale(ov_function, layout_values, mean_scale_values)
need_reverse = 'reverse_input_channels' in argv and argv.reverse_input_channels
suitable_params_ric = []
if need_reverse:
suitable_params_ric = guess_source_layouts_for_reverse_channels(ov_function=ov_function,
layout_values=layout_values)
for node_name, layout_value in layout_values.items():
if layout_value.get('source_layout'):
if layout_value.get('is_input'):
prep.input(node_name).model().set_layout(Layout(layout_value['source_layout']))
else:
prep.output(node_name).model().set_layout(Layout(layout_value['source_layout']))
if layout_value.get('target_layout'):
if layout_value.get('is_input'):
prep.input(node_name).tensor().set_layout(Layout(layout_value['target_layout']))
else:
prep.output(node_name).tensor().set_layout(Layout(layout_value['target_layout']))
# Apply reverse_input_channels
if need_reverse:
for name, _ in suitable_params_ric:
prep.input(name).preprocess().reverse_channels()
log.debug('reverse_input_channels pre-processing applied to {}'.format(name))
for node_name, node_mean_scale_values in mean_scale_values.items():
# Apply mean first, then scale
if node_mean_scale_values['mean'] is not None:
prep.input(node_name).preprocess().mean(node_mean_scale_values['mean'])
if node_mean_scale_values['scale'] is not None:
prep.input(node_name).preprocess().scale(node_mean_scale_values['scale'])
log.debug('Mean/Scale pre-processing applied to {}'.format(node_name))
# Apply pre-processing builder to a function
ov_function = prep.build()
# Remove guessed layout values from ov_function (these values shall not be serialized to IR
for node_name, layout_value in layout_values.items():
if layout_value.get('source_guessed') and \
not layout_value.get('target_layout'):
# search for parameter object
for idx, ov_input in enumerate(ov_function.inputs):
if node_name in ov_input.get_tensor().get_names():
log.debug('Clearing guessed layout {} for {}'
.format(layout_value['source_layout'], node_name))
ov_function.get_parameters()[idx].layout = Layout()
| 47.35514
| 120
| 0.626159
|
89059ef1c38bff2a4967f0b2e3ab7080ea74bf89
| 5,393
|
py
|
Python
|
openGaussBase/testcase/CONNECTORS/PYTHON/PYOG/Opengauss_Function_Connect_Python_Case0055.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/CONNECTORS/PYTHON/PYOG/Opengauss_Function_Connect_Python_Case0055.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/CONNECTORS/PYTHON/PYOG/Opengauss_Function_Connect_Python_Case0055.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : python驱动pyog
Case Name : openGauss模式连接数据库,定义数据类型
Description :
1.配置pg_hba入口
2.连接数据库
3.定义数据类型
3.1定义数据类型type_py_55
3.2建表插入数据,使用type_py_55类型
3.3重命名数据类型
3.4给数据类型增加一个新的属性
3.5使用数据类型
3.6删除type_py_55_new类型、表
4.断开连接
5.关闭pg_hba入口
Expect :
1.执行成功
2.连接成功,db.state返回'idle'
3.执行成功
3.1回显CREATE TYPE
3.2回显DROP TABLE.*CREATE TABLE.*INSERT 0 1
3.3回显ALTER TYPE
3.4回显ALTER TYPE
3.5回显INSERT 0 1
3.6回显DROP TABLE.*DROP TYPE
4.执行成功,db.state返回'closed'
5.执行成功
History :
"""
import os
import unittest
import py_opengauss
from yat.test import Node
from yat.test import macro
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
class ConnPython55(unittest.TestCase):
def setUp(self):
self.pri_user = Node('PrimaryDbUser')
self.constant = Constant()
self.LOG = Logger()
self.type_name = 'type_py_55'
self.t_name = 't_py_55'
text = '----Opengauss_Function_Connect_Python_Case0055 start----'
self.LOG.info(text)
def test_conn(self):
text = '----step1: 配置pg_hba入口 expect: 成功----'
self.LOG.info(text)
host_cmd = "ifconfig -a|grep inet6 -a2|" \
"grep broadcast|awk '{print $2}'"
self.host = os.popen(host_cmd).readlines()[0].strip()
self.assertIsNotNone(self.host)
guc_cmd = f'source {macro.DB_ENV_PATH}; ' \
f'gs_guc reload -D {macro.DB_INSTANCE_PATH} ' \
f'-h "host {self.pri_user.db_name} {self.pri_user.db_user} ' \
f'{self.host}/32 sha256"'
self.LOG.info(guc_cmd)
guc_res = self.pri_user.sh(guc_cmd).result()
self.LOG.info(guc_res)
self.assertIn(self.constant.GSGUC_SUCCESS_MSG, guc_res,
'执行失败:' + text)
text = '----step2: 连接数据库 expect: 成功----'
self.LOG.info(text)
conn_info = f'opengauss://{self.pri_user.db_user}:' \
f'{self.pri_user.db_password}@{self.pri_user.db_host}:' \
f'{self.pri_user.db_port}/{self.pri_user.db_name}'
self.LOG.info(conn_info)
self.db = py_opengauss.open(conn_info)
self.assertEqual('idle', self.db.state, '执行失败:' + text)
text = '----step3: 定义数据类型 expect: 成功----'
self.LOG.info(text)
text = '----step3.1: 定义数据类型 expect: 成功----'
self.LOG.info(text)
cmd = f"create type {self.type_name} as (f1 int, f2 text);"
self.LOG.info(cmd)
sql_res = self.db.prepare(cmd).first()
self.LOG.info(sql_res)
self.assertEqual(sql_res, self.constant.CREATE_TYPE_SUCCESS_MSG,
'执行失败:' + text)
text = '----step3.2: 建表插入,使用person类型 expect: 成功----'
self.LOG.info(text)
cmd = f'''drop table if exists {self.t_name};
create table {self.t_name} (id int, properties {self.type_name});
insert into {self.t_name} values (1, (1, 'test_1'));'''
self.LOG.info(cmd)
sql_res = self.db.execute(cmd)
self.LOG.info(sql_res)
self.assertIsNone(sql_res, '执行失败:' + text)
text = '----step3.3: 重命名数据类型 expect: 成功----'
self.LOG.info(text)
cmd = f"alter type {self.type_name} rename to {self.type_name}_new;"
self.LOG.info(cmd)
sql_res = self.db.prepare(cmd).first()
self.LOG.info(sql_res)
self.assertEqual(sql_res, self.constant.ALTER_TYPE_SUCCESS_MSG,
'执行失败:' + text)
text = '----step3.4: 给数据类型增加一个新的属性 expect: 成功----'
self.LOG.info(text)
cmd = f"alter type {self.type_name}_new add attribute f3 int;"
self.LOG.info(cmd)
sql_res = self.db.prepare(cmd).first()
self.LOG.info(sql_res)
self.assertEqual(sql_res, self.constant.ALTER_TYPE_SUCCESS_MSG,
'执行失败:' + text)
text = '----step3.5: 使用数据类型 expect: 成功----'
self.LOG.info(text)
cmd = f"insert into {self.t_name} values (2, (2, 'test_2', 2));"
self.LOG.info(cmd)
sql_res = self.db.prepare(cmd).first()
self.LOG.info(sql_res)
self.assertEqual(sql_res, 1, '执行失败:' + text)
def tearDown(self):
text = '----run teardown----'
self.LOG.info(text)
text = '----step3.6: 删除类型转换,转换函数 expect: 成功----'
self.LOG.info(text)
cmd = f'''drop table t_py_55 cascade;
drop type type_py_55_new cascade;'''
self.LOG.info(cmd)
sql_res = self.db.execute(cmd)
text = '----step4: 断开连接 expect: 成功----'
self.LOG.info(text)
self.db.close()
self.assertIsNone(sql_res, '执行失败:' + text)
self.assertEqual('closed', self.db.state, '执行失败:' + text)
text = '----Opengauss_Function_Connect_Python_Case0055 end----'
self.LOG.info(text)
| 34.132911
| 84
| 0.603004
|
bed1d43919380ca58e5d6cdd88033fee919124e0
| 2,035
|
py
|
Python
|
day9.py
|
mmertama/advent-of-code-2021
|
21475bb7ab8f59b3173cf1e9440e07df63b70a3e
|
[
"MIT"
] | null | null | null |
day9.py
|
mmertama/advent-of-code-2021
|
21475bb7ab8f59b3173cf1e9440e07df63b70a3e
|
[
"MIT"
] | null | null | null |
day9.py
|
mmertama/advent-of-code-2021
|
21475bb7ab8f59b3173cf1e9440e07df63b70a3e
|
[
"MIT"
] | null | null | null |
example = '''2199943210
3987894921
9856789892
8767896789
9899965678'''
def get_height_map(data):
height_map = []
width = len(data[0])
height = len(data)
for ln in data:
height_map.append([int(x) for x in list(ln.rstrip())])
min_list = []
for y in range(height):
for x in range(width):
current = height_map[y][x]
m_count = 0
if y == 0 or height_map[y - 1][x] > current:
m_count += 1
if x == 0 or height_map[y][x - 1] > current:
m_count += 1
if y >= height - 1 or height_map[y + 1][x] > current:
m_count += 1
if x >= width - 1 or height_map[y][x + 1] > current:
m_count += 1
if m_count == 4:
min_list.append((x, y))
return height_map, min_list, width, height
def risk_level_sum(data):
height_map, min_list, _, _ = get_height_map(data)
risk_level = 0
for m in min_list:
risk_level += height_map[m[1]][m[0]] + 1
print("Risk level:", risk_level)
def get_basins(x, y, height_map, width, height):
current = height_map[y][x]
height_map[y][x] = current + 10
basins = [(x, y)]
if x > 0 and height_map[y][x - 1] < 9:
basins.extend(get_basins(x - 1, y, height_map, width, height))
if y > 0 and height_map[y - 1][x] < 9:
basins.extend(get_basins(x, y - 1, height_map, width, height))
if x < width - 1 and height_map[y][x + 1] < 9:
basins.extend(get_basins(x + 1, y, height_map, width, height))
if y < height - 1 and height_map[y + 1][x] < 9:
basins.extend(get_basins(x, y + 1, height_map, width, height))
return basins
def basin_top_mul(data):
height_map, min_list, width, height = get_height_map(data)
basins = []
for m in min_list:
basins.append(get_basins(m[0], m[1], height_map, width, height))
basins.sort(key=len, reverse=True)
top = len(basins[0]) * len(basins[1]) * len(basins[2])
print("Top 3", top)
| 30.833333
| 72
| 0.566585
|
ac00bedee79c8a25d1e0387f0b144429d59cd980
| 1,944
|
py
|
Python
|
backend/notebook-preprocess/slice.py
|
WatVis/EDAssistant
|
a4be2849a65abcf2f81f9c01a2172ec67aa38853
|
[
"BSD-3-Clause"
] | null | null | null |
backend/notebook-preprocess/slice.py
|
WatVis/EDAssistant
|
a4be2849a65abcf2f81f9c01a2172ec67aa38853
|
[
"BSD-3-Clause"
] | null | null | null |
backend/notebook-preprocess/slice.py
|
WatVis/EDAssistant
|
a4be2849a65abcf2f81f9c01a2172ec67aa38853
|
[
"BSD-3-Clause"
] | null | null | null |
import subprocess
import os
from os.path import join
import shutil
from tqdm import tqdm, trange
import sys
import multiprocessing.dummy as mp
notebooks_path = "../notebooks-full"
slice_output_path = "../notebooks-locset"
parse_output_path = "../notebooks-noMD"
devnull = open(os.devnull, 'w')
def sliceNotebooks(in_path, out_path, dir_name):
'''
slice notebooks into pieces based on data flow
'''
subprocess.run(["node", "./sliceNotebooks.js", in_path, out_path, dir_name], stdout=devnull, stderr=devnull)
def parseNotebooks(in_path, out_path, dir_name):
'''
# produce flatten notebook
'''
subprocess.run(["node", "./parseNotebooks.js", in_path, out_path, dir_name], stdout=devnull, stderr=devnull)
def mkdirIfNotExists(path):
if not os.path.exists(path):
os.mkdir(path)
def parse_dir(idx):
dir_name = dirnames[idx]
print("##########################")
print("parsing competition {}".format(dir_name))
_, _, filenames = next(os.walk(join(dirpath, dir_name)))
mkdirIfNotExists(join(slice_output_path, dir_name))
mkdirIfNotExists(join(parse_output_path, dir_name))
for idx in tqdm(range(len(filenames)), file=sys.stdout):
fname = filenames[idx]
f = fname.split('.')
if f[1] == 'csv':
shutil.copyfile(join(dirpath, dir_name, fname), join(slice_output_path, dir_name, fname))
elif f[1] == 'ipynb':
sliceNotebooks(join(dirpath, dir_name, fname), join(slice_output_path, dir_name), f[0])
parseNotebooks(join(dirpath, dir_name, fname), join(parse_output_path, dir_name), f[0])
else:
raise RuntimeError("unknown extension {}".format(f[1]))
if __name__=="__main__":
process_num = 6
mkdirIfNotExists(slice_output_path)
mkdirIfNotExists(parse_output_path)
dirpath, dirnames, _ = next(os.walk(notebooks_path))
p=mp.Pool(process_num)
p.map(parse_dir,trange(0,len(dirnames)))
p.close()
p.join()
| 33.517241
| 111
| 0.686214
|
981262a6d2c6c2363c3226857f4ec633346996fb
| 352
|
py
|
Python
|
dark/__init__.py
|
UdoGi/dark-matter
|
3d49e89fa5e81f83144119f6216c5774176d203b
|
[
"MIT"
] | null | null | null |
dark/__init__.py
|
UdoGi/dark-matter
|
3d49e89fa5e81f83144119f6216c5774176d203b
|
[
"MIT"
] | null | null | null |
dark/__init__.py
|
UdoGi/dark-matter
|
3d49e89fa5e81f83144119f6216c5774176d203b
|
[
"MIT"
] | null | null | null |
import sys
if sys.version_info < (2, 7):
raise Exception('The dark matter code needs Python 2.7 or later.')
# Note that the version string must have the following format, otherwise it
# will not be found by the version() function in ../setup.py
#
# Remember to update ../CHANGELOG.md describing what's new in each version.
__version__ = '3.1.83'
| 32
| 75
| 0.727273
|
43e41f446733e5ff112391fe55950a0487249b62
| 1,971
|
py
|
Python
|
pdf_reader/app.py
|
fhightower/pdf-reader
|
caa6c562b823d67368d7188ddd5ccd7d584bd01c
|
[
"MIT"
] | null | null | null |
pdf_reader/app.py
|
fhightower/pdf-reader
|
caa6c562b823d67368d7188ddd5ccd7d584bd01c
|
[
"MIT"
] | null | null | null |
pdf_reader/app.py
|
fhightower/pdf-reader
|
caa6c562b823d67368d7188ddd5ccd7d584bd01c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
""" standard """
import sys
""" third-party """
""" custom """
from tcex import TcExLocal
"""
Copy this python script to your app base directory to use the tcex_local module.
Example Usage:
Install all the required libraries defined in setup.py. These will be the same packages
with the same version that will get *packaged* with the app.
./app.py --lib
Package the app for installation on the ThreatConnect platform. Optionally you can pass
the *--collection* flag that will bundle multiple apps into one "App Collection". The script
will also move the app package to another folder using the *--zip_out* flag.
./app.py --package
./app.py --package --collection
./app.py --package --collection --zip_out /opt/threatconnect/app/bundled/
Validate the application's install.json file. The validate command is automatically executed
when packaging an app. A configuration file name can be passed to the script using
the *--config* argument. By default the script will check the *install.json* file.
./app.py --validate
./app.py --validate --install_json myapp1.install.json
Run the script locally. The tcex_local module will use the tc.json file to generate
the CLI args required by the script. Typically an app would ship with a tc.json.template
file that provides example CLI args for the app to be run locally. The config file supports
multiple configuration for different test/use cases with the default case being named "default".
Use the *--test* arg to pass a selected test.
./app.py --run
./app.py --run --profile Test1
./app.py --run --config tcex.json --group MyGroup
"""
print('Python Version: {}.{}.{}'.format(
sys.version_info.major, sys.version_info.minor, sys.version_info.micro))
tcex_local = TcExLocal()
args = tcex_local.args
if args.lib:
tcex_local.gen_lib()
elif args.package:
tcex_local.package()
elif args.run:
tcex_local.run()
elif args.validate:
tcex_local.validate(args.install_json)
| 32.85
| 96
| 0.747336
|
469455752a4f2c33b87be5eeb4333bb67b4c750d
| 41,244
|
py
|
Python
|
tabs_extra.py
|
maheshwaghmare/TabsExtra
|
093ca19db5fb7129f23c5b98b8f87e941cda39e6
|
[
"MIT"
] | null | null | null |
tabs_extra.py
|
maheshwaghmare/TabsExtra
|
093ca19db5fb7129f23c5b98b8f87e941cda39e6
|
[
"MIT"
] | null | null | null |
tabs_extra.py
|
maheshwaghmare/TabsExtra
|
093ca19db5fb7129f23c5b98b8f87e941cda39e6
|
[
"MIT"
] | null | null | null |
"""
TabsExtra.
Copyright (c) 2014 - 2016 Isaac Muse <isaacmuse@gmail.com>
License: MIT
"""
import sublime_plugin
import sublime
import time
import sys
from TabsExtra import tab_menu
import os
import functools
from operator import itemgetter
import sublime_api
from urllib.parse import urljoin
from urllib.request import pathname2url
SETTINGS = "tabs_extra.sublime-settings"
LEFT = 0
RIGHT = 1
LAST = 2
LAST_ACTIVE = None
OVERRIDE_CONFIRM = '''TabsExtra will overwrite the entire "Tab Context.sublime-menu" file in "Packages/Default" with a new one. ST3 keeps an unmodified copy in the archive.
You do this at your own risk. If something goes wrong, you may need to manually fix the menu.
Are you sure you want to continue?
''' # noqa
RESTORE_CONFIRM = '''In ST3 TabsExtra will simply delete the override "Tab Context.sublime-menu" from "Packages/Default" to allow the archived menu to take effect.
You do this at your own risk. If something goes wrong, you may need to manually fix the menu.
Are you sure you want to continue?
''' # noqa
###############################
# Helpers
###############################
def log(msg, status=False):
"""Log message."""
string = str(msg)
print("TabsExtra: %s" % string)
if status:
sublime.status_message(string)
def debug(s):
"""Debug message."""
if sublime.load_settings(SETTINGS).get("debug", False):
log(s)
def sublime_format_path(pth):
"""Format path for sublime."""
import re
m = re.match(r"^([A-Za-z]{1}):(?:/|\\)(.*)", pth)
if sublime.platform() == "windows" and m is not None:
pth = m.group(1) + "/" + m.group(2)
return pth.replace("\\", "/")
def is_persistent():
"""Check if sticky tabs should be persistent."""
return sublime.load_settings(SETTINGS).get("persistent_sticky", False)
def sort_on_load_save():
"""Sort on save."""
return sublime.load_settings(SETTINGS).get("sort_on_load_save", False)
def view_spawn_pos():
"""Where do new views get spawned."""
return sublime.load_settings(SETTINGS).get("spawn_view", "none")
def get_fallback_direction():
"""Get the focused tab fallback direction."""
mode = LEFT
value = sublime.load_settings(SETTINGS).get("fallback_focus", "left")
if value == "last_active":
mode = LAST
elif value == "right":
mode = RIGHT
return mode
def timestamp_view(window, sheet):
"""Timestamp view."""
global LAST_ACTIVE
view = window.active_view()
if view is None:
return
# Detect if this focus is due to the last active tab being moved
if (
LAST_ACTIVE is not None and
not LAST_ACTIVE.settings().get("tabs_extra_is_closed", False) and
LAST_ACTIVE.window() is None
):
# Flag last active tab as being moved
window = view.window()
active_group, active_index = window.get_sheet_index(sheet)
LAST_ACTIVE.settings().set("tabs_extra_moving", [window.id(), active_group])
# Skip if moving a tab
LAST_ACTIVE = None
allow = False
else:
allow = True
if allow:
window = view.window()
active_group, active_index = window.get_sheet_index(sheet)
# Add time stamp of last activation
view.settings().set('tabs_extra_last_activated', time.time())
# Track the tabs last postion to help with focusing after a tab is moved
view.settings().set('tabs_extra_last_activated_sheet_index', active_index)
LAST_ACTIVE = view
debug("activated - %s" % view.file_name())
else:
debug("skipping - %s" % view.file_name())
def get_group_view(window, group, index):
"""Get the view at the given index in the given group."""
sheets = window.sheets_in_group(int(group))
sheet = sheets[index] if -1 < index < len(sheets) else None
view = sheet.view() if sheet is not None else None
return view
class Focus(object):
"""View focus handler."""
win = None
obj = None
@classmethod
def cancel(cls):
"""Cancel focus."""
cls.win = None
cls.obj = None
@classmethod
def defer(cls, win, obj):
"""Defer focus."""
if cls.win is None and cls.obj is None:
cls.win = win
cls.obj = obj
sublime.set_timeout(cls.on_focus, 100)
else:
cls.win = win
cls.obj = obj
@classmethod
def on_focus(cls):
"""On focus event."""
cls._focus()
@classmethod
def focus(cls, win, obj):
"""Set the win and obj before calling focus."""
cls.win = win
cls.obj = obj
cls._focus()
@classmethod
def _focus(cls):
"""Perform view focus."""
try:
if cls.win is not None and cls.obj is not None:
if isinstance(cls.obj, sublime.View):
cls.win.focus_view(cls.obj)
timestamp_view(cls.win, cls.obj)
elif isinstance(cls.obj, sublime.Sheet):
cls.win.focus_sheet(cls.obj)
timestamp_view(cls.win, cls.obj)
except Exception:
pass
cls.cancel()
###############################
# Sticky Tabs
###############################
class TabsExtraClearAllStickyCommand(sublime_plugin.WindowCommand):
"""Clear all sticy tabs."""
def run(self, group=-1, force=False):
"""Clear all tab sticky states of current active group."""
if group == -1:
group = self.window.active_group()
if group >= 0:
persistent = is_persistent()
views = self.window.views_in_group(int(group))
if not persistent or force:
for v in views:
v.settings().erase("tabs_extra_sticky")
def is_visible(self, group=-1, force=False):
"""Show command if any tabs in active group are sticky."""
if group == -1:
group = self.window.active_group()
marked = False
views = self.window.views_in_group(int(group))
for v in views:
if v.settings().get("tabs_extra_sticky", False):
marked = True
break
return marked
class TabsExtraToggleStickyCommand(sublime_plugin.WindowCommand):
"""Toggle sticky state for tab."""
def run(self, group=-1, index=-1):
"""Toggle a tabs sticky state."""
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
if not view.settings().get("tabs_extra_sticky", False):
view.settings().set("tabs_extra_sticky", True)
else:
view.settings().erase("tabs_extra_sticky")
def is_checked(self, group=-1, index=-1):
"""Show in menu whether the tab is sticky."""
checked = False
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
checked = view.settings().get("tabs_extra_sticky", False)
return checked
class TabsExtraSetStickyCommand(sublime_plugin.TextCommand):
"""Set sticky value for the tab."""
def run(self, edit, value):
"""Set the sticky command to the specific value."""
if self.is_enabled(value):
self.view.settings().set("tabs_extra_sticky", bool(value))
def is_enabled(self, value):
"""Check if sticky value is already set to desired value."""
enabled = False
if self.view is not None:
current_value = self.view.settings().get("tabs_extra_sticky", False)
if current_value != value:
enabled = True
return enabled
###############################
# Close
###############################
class TabsExtraCloseMenuCommand(sublime_plugin.WindowCommand):
"""Close tabs via a quick panel menu."""
close_types = [
("Close", "single"),
("Close Other Tabs", "other"),
("Close Tabs to Right", "right"),
("Close Tabs to Left", "left"),
("Close All Tabs", "all")
]
def run(self, mode="normal", close_type=None):
"""Run command."""
self.mode = mode
self.group = -1
self.index = -1
sheet = self.window.active_sheet()
if sheet is not None:
self.group, self.index = self.window.get_sheet_index(sheet)
if self.group != -1 and self.index != -1:
value = None
if close_type is not None:
index = 0
for ct in self.close_types:
if ct[1] == close_type:
value = index
index += 1
if value is None:
self.window.show_quick_panel(
[x[0] for x in self.close_types],
self.check_selection
)
else:
self.check_selection(value)
def check_selection(self, value):
"""Check the user's selection."""
if value != -1:
close_unsaved = True
unsaved_prompt = True
if self.mode == "skip_unsaved":
close_unsaved = False
if self.mode == "dismiss_unsaved":
unsaved_prompt = False
close_type = self.close_types[value][1]
self.window.run_command(
"tabs_extra_close",
{
"group": int(self.group),
"index": int(self.index),
"close_type": close_type,
"unsaved_prompt": unsaved_prompt,
"close_unsaved": close_unsaved
}
)
def is_enabled(self, mode="normal"):
"""Check if command is enabled."""
group = -1
index = -1
sheet = self.window.active_sheet()
if sheet is not None:
group, index = self.window.get_sheet_index(sheet)
return group != -1 and index != -1 and mode in ["normal", "skip_unsaved", "dismiss_unsaved"]
class TabsExtraCloseAllCommand(sublime_plugin.WindowCommand):
"""Close all tabs in the whole window."""
def run(self):
"""Close all tabs in window; not just the tabs in the active group."""
for group in range(0, self.window.num_groups()):
sheet = self.window.active_sheet_in_group(group)
if sheet is not None:
index = self.window.get_sheet_index(sheet)[1]
self.window.run_command("tabs_extra_close", {"close_type": "all", "group": group, "index": index})
class TabsExtraCloseCommand(sublime_plugin.WindowCommand):
"""Close tab command."""
def init(self, close_type, group, index):
"""
Determine which views will be targeted by close command.
Also determine which tab states need to be cleaned up.
"""
self.persistent = is_persistent()
self.sheets = self.window.sheets_in_group(int(group))
assert(close_type in ["single", "left", "right", "other", "all"])
# Setup active index and group
active_sheet = self.window.active_sheet()
active_index = None
self.active_index = index
self.active_group = None
if active_sheet is not None:
active_group, active_index = self.window.get_sheet_index(active_sheet)
if group != active_group:
active_index = None
if active_index is not None:
self.active_index = active_index
# Compile a list of existing tabs with their timestamps
self.last_activated = []
if get_fallback_direction() == LAST:
for s in self.sheets:
v = s.view()
if v is not None:
last_activated = v.settings().get("tabs_extra_last_activated", None)
if last_activated is not None:
self.last_activated.append((last_activated, s))
else:
self.last_activated.append((0, s))
# Determine targeted sheets to close and sheets to cleanup
if close_type == "single":
self.targets = [self.sheets[index]]
self.cleanup = bool(len(self.sheets[:index] + self.sheets[index + 1:]))
elif close_type == "left":
self.targets = self.sheets[:index]
self.cleanup = bool(len(self.sheets[index:]))
elif close_type == "right":
self.targets = self.sheets[index + 1:]
self.cleanup = bool(len(self.sheets[:index + 1]))
elif close_type == "other":
self.targets = self.sheets[:index] + self.sheets[index + 1:]
self.cleanup = True
elif close_type == "all":
self.targets = self.sheets[:]
self.cleanup = False
def select_left(self, fallback=True):
"""Select tab to the left if the current active tab was closed."""
selected = False
for x in reversed(range(0, self.active_index)):
if self.window.get_sheet_index(self.sheets[x])[1] != -1:
Focus.defer(self.window, self.sheets[x])
selected = True
break
if fallback and not selected:
# Fallback to other direction
selected = self.select_left(False)
return selected
def select_right(self, fallback=True):
"""Select tab to the right if the current active tab was closed."""
selected = False
for x in range(self.active_index + 1, len(self.sheets)):
if self.window.get_sheet_index(self.sheets[x])[1] != -1:
Focus.defer(self.window, self.sheets[x])
selected = True
break
if fallback and not selected:
# Fallback to other direction
selected = self.select_right(False)
return selected
def select_last(self, fallback=True):
"""Select last activated tab if available."""
selected = False
self.last_activated = sorted(self.last_activated, key=lambda x: x[0])
if len(self.last_activated):
# Get most recent activated tab
for s in reversed(self.last_activated):
if self.window.get_sheet_index(s[1])[1] != -1:
Focus.defer(self.window, s[1])
selected = True
break
if fallback and not selected:
# Fallback left
selected = self.select_left(False)
if fallback and not selected:
# Fallback right
selected = self.select_right(False)
return selected
def select_view(self):
"""Select active tab, if available, or fallback to the left or right."""
selected = False
if self.active_index is not None:
fallback_mode = get_fallback_direction()
if self.window.get_sheet_index(self.sheets[self.active_index])[1] != -1:
self.window.focus_sheet(self.sheets[self.active_index])
selected = True
elif fallback_mode == LAST:
selected = self.select_last()
elif fallback_mode == RIGHT:
selected = self.select_right()
else:
selected = self.select_left()
return selected
def can_close(self, is_sticky, is_single):
"""Prompt user in certain scenarios if okay to close."""
is_okay = True
if is_sticky:
if is_single:
is_okay = sublime.ok_cancel_dialog(
"This is a sticky tab, are you sure you want to close?"
)
else:
is_okay = False
return is_okay
def run(
self, group=-1, index=-1,
close_type="single", unsaved_prompt=True, close_unsaved=True
):
"""Close the specified tabs and cleanup sticky states."""
TabsExtraListener.extra_command_call = True
if group >= 0 and index >= 0:
self.init(close_type, group, index)
if (
len(self.targets) and
not unsaved_prompt and
not all(not target.view().is_dirty() for target in self.targets) and
not sublime.ok_cancel_dialog(
"Are you sure you want to dismiss all targeted unsaved buffers?"
)
):
return
for s in self.targets:
v = s.view()
if v is not None:
if self.can_close(v.settings().get("tabs_extra_sticky", False), close_type == "single"):
if not self.persistent:
v.settings().erase("tabs_extra_sticky")
self.window.focus_view(v)
if not v.is_dirty() or close_unsaved:
if not unsaved_prompt:
v.set_scratch(True)
sublime_api.window_close_file(self.window.id(), v.id())
elif not self.persistent:
v.settings().erase("tabs_extra_sticky")
else:
self.window.focus_sheet(s)
self.window.run_command('close_file')
if not self.persistent and self.cleanup:
self.window.run_command("tabs_extra_clear_all_sticky", {"group": group})
self.select_view()
TabsExtraListener.extra_command_call = False
###############################
# Listener
###############################
class TabsExtraListener(sublime_plugin.EventListener):
"""Listener command to handle tab focus, closing, moving events."""
extra_command_call = False
def on_window_command(self, window, command_name, args):
"""Intercept and override specific close tab commands."""
extra_command_call = TabsExtraListener.extra_command_call
cmd = None
if args is None:
view = window.active_view()
if view is None:
return cmd
# Mark all actual file closes done from TabsExtra
# This helps us know when file close was called outside of TabsExtra commands
if extra_command_call and command_name == "close_file":
view.settings().set("tabs_extra_closing", True)
return cmd
command_name = "tabs_extra_close"
group, index = window.get_view_index(view)
args = {"group": group, "index": index}
if command_name in ["close_by_index", "close"]:
command_name = "tabs_extra_close"
args["close_type"] = "single"
cmd = (command_name, args)
elif command_name == "close_all":
command_name = "tabs_extra_close_all"
args = {}
cmd = (command_name, args)
elif command_name == "close_others_by_index":
command_name = "tabs_extra_close"
args["close_type"] = "other"
cmd = (command_name, args)
elif command_name == "close_to_right_by_index":
command_name = "tabs_extra_close"
args["close_type"] = "right"
cmd = (command_name, args)
return cmd
def on_load(self, view):
"""Handle load focus or spawining."""
Focus.cancel()
if sort_on_load_save():
if not self.on_sort(view):
view.settings().set('tabsextra_to_sort', True)
else:
self.on_spawn(view)
def on_spawn(self, view):
"""When a new view is spawned, postion the view per user's preference."""
window = view.window()
if window and window.get_view_index(view)[1] != -1:
loaded = view.settings().get("tabs_extra_spawned", False)
if not loaded:
sheet = window.active_sheet()
spawn = view_spawn_pos()
if spawn != "none":
sheets = window.sheets()
group, index = window.get_sheet_index(sheet)
last_group = None
last_index = None
if LAST_ACTIVE is not None:
for s in sheets:
v = s.view()
if v is not None and LAST_ACTIVE.id() == v.id():
last_group, last_index = window.get_sheet_index(s)
break
active_in_range = (
last_group is not None and
last_index is not None and
last_group == group
)
if spawn == "right":
group, index = window.get_sheet_index(sheets[-1])
window.set_sheet_index(sheet, group, index)
elif spawn == "left":
group, index = window.get_sheet_index(sheets[0])
window.set_sheet_index(sheet, group, index)
elif spawn == "active_right" and active_in_range:
window.set_sheet_index(sheet, group, last_index + 1)
elif spawn == "active_left" and active_in_range:
window.set_sheet_index(sheet, group, last_index)
view.settings().set("tabs_extra_spawned", True)
def on_post_save(self, view):
"""On save sorting."""
if sort_on_load_save():
self.on_sort(view)
def on_sort(self, view):
"""Sort views."""
sorted_views = False
window = view.window()
if window and window.get_view_index(view)[1] != -1:
cmd = sublime.load_settings(SETTINGS).get("sort_on_load_save_command", {})
module = str(cmd.get("module", ""))
reverse = bool(cmd.get("reverse", False))
if module != "":
window.run_command(
"tabs_extra_sort",
{"sort_by": module, "reverse": reverse}
)
sorted_views = True
return sorted_views
def on_pre_close(self, view):
"""
If a view is closing without being marked, we know it was done outside of TabsExtra.
Attach view and window info so we can focus the right view after close.
"""
Focus.cancel()
view.settings().set("tabs_extra_is_closed", True)
if not view.settings().get("tabs_extra_closing", False):
TabsExtraListener.extra_command_call = True
window = view.window()
if window is not None:
view.settings().set("tabs_extra_view_info", view.window().get_view_index(view))
view.settings().set("tabs_extra_window_info", view.window().id())
def on_close(self, view):
"""
Handle focusing the correct view in window group.
Close command was initiated outside of TabsExtra, so a focus is required.
"""
view_info = view.settings().get("tabs_extra_view_info", None)
window_info = view.settings().get("tabs_extra_window_info", None)
if view_info is not None and window_info is not None:
window = None
for w in sublime.windows():
if w.id() == window_info:
window = w
break
if window is not None:
self.select_tab(window, int(view_info[0]), view_info[1])
TabsExtraListener.extra_command_call = False
def on_activated(self, view):
"""
Timestamp each view when activated.
Detect if on_move event should be executed.
"""
if not TabsExtraListener.extra_command_call:
window = view.window()
if window is None:
return
s = window.active_sheet()
timestamp_view(window, s)
# Detect if tab was moved to a new group
# Run on_move event if it has.
moving = view.settings().get("tabs_extra_moving", None)
if moving is not None:
win_id, group_id = moving
window = view.window()
if window is None:
return
active_group = window.get_view_index(view)[0]
if window.id() != win_id or int(group_id) != int(active_group):
view.settings().erase("tabs_extra_moving")
last_index = view.settings().get('tabs_extra_last_activated_sheet_index', -1)
self.on_move(view, win_id, int(group_id), last_index)
elif sort_on_load_save() and view.settings().get('tabsextra_to_sort'):
view.settings().erase('tabsextra_to_sort')
self.on_sort(view)
def on_move(self, view, win_id, group_id, last_index):
"""Select the fallback tab in the group it was moved from."""
for w in sublime.windows():
if w.id() == win_id:
self.select_tab(w, group_id, last_index)
break
def select_tab(self, window, group_id, last_index):
"""Select the desired fallback tab."""
selected = False
sheets = window.sheets_in_group(group_id)
fallback_mode = get_fallback_direction()
if len(sheets) == 0:
return
if last_index >= 0:
if fallback_mode == LAST:
selected = self.select_last(sheets, window, last_index)
elif fallback_mode == RIGHT:
selected = self.select_right(sheets, window, last_index)
else:
selected = self.select_left(sheets, window, last_index)
return selected
def select_last(self, sheets, window, closed_index, fallback=True):
"""Ensure focus of last active view."""
selected = False
last_activated = []
for s in sheets:
v = s.view()
if v is not None:
last = v.settings().get("tabs_extra_last_activated", None)
if last is not None:
last_activated.append((last, s))
else:
last_activated.append((0, s))
last_activated = sorted(last_activated, key=lambda x: x[0])
if len(last_activated):
Focus.defer(window, last_activated[-1][1])
selected = True
if not selected and fallback:
selected = self.select_left(sheets, window, closed_index, False)
if not selected and fallback:
selected = self.select_right(sheets, window, closed_index, False)
return selected
def select_right(self, sheets, window, closed_index, fallback=True):
"""Ensure focus of view to the right of closed view."""
selected = False
if len(sheets) > closed_index:
Focus.defer(window, sheets[closed_index])
selected = True
if not selected and fallback:
selected = self.select_left(sheets, window, closed_index, False)
return selected
def select_left(self, sheets, window, closed_index, fallback=True):
"""Ensure focus of view to the left of closed view."""
selected = False
if len(sheets) >= closed_index:
Focus.defer(window, sheets[closed_index - 1])
selected = True
if not selected and fallback:
selected = self.select_right(sheets, window, closed_index, False)
return selected
###############################
# Wrappers
###############################
class TabsExtraViewWrapperCommand(sublime_plugin.WindowCommand):
"""Wrapper for for executing certain commands from the tab context menu."""
def run(self, command, group=-1, index=-1, args=None):
"""Wrap command in order to ensure view gets focused first."""
if args is None:
args = {}
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
self.window.focus_view(view)
self.window.run_command(command, args)
###############################
# File Management Commands
###############################
class TabsExtraDeleteCommand(sublime_plugin.WindowCommand):
"""Delete the file."""
def run(self, group=-1, index=-1):
"""Delete the tab's file."""
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
file_name = view.file_name()
if file_name is not None and os.path.exists(file_name):
if sublime.ok_cancel_dialog("Delete %s?" % file_name, "Delete"):
if not view.close():
return
import Default.send2trash as send2trash
send2trash.send2trash(file_name)
def is_visible(self, group=-1, index=-1):
"""Check if command should be visible."""
enabled = False
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None and view.file_name() is not None and os.path.exists(view.file_name()):
enabled = True
return enabled
class TabsExtraDuplicateCommand(sublime_plugin.WindowCommand):
"""Duplicate tab."""
def run(self, group=-1, index=-1):
"""Rename the given tab."""
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
file_name = view.file_name()
if file_name is not None and os.path.exists(file_name):
v = self.window.show_input_panel(
"Duplicate:", file_name,
lambda x: self.on_done(file_name, x),
None, None
)
file_path_len = len(file_name)
file_name_len = len(os.path.basename(file_name))
v.sel().clear()
v.sel().add(
sublime.Region(
file_path_len - file_name_len,
file_path_len
)
)
def on_done(self, old, new):
"""Handle the tab duplication when the user is done with the input panel."""
new_path = os.path.dirname(new)
if os.path.exists(new_path) and os.path.isdir(new_path):
if not os.path.exists(new) or sublime.ok_cancel_dialog("Overwrite %s?" % new, "Replace"):
try:
with open(old, 'rb') as f:
text = f.read()
with open(new, 'wb') as f:
f.write(text)
self.window.open_file(new)
except Exception:
sublime.status_message("Unable to duplicate")
def is_visible(self, group=-1, index=-1):
"""Check if the command is visible."""
enabled = False
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None and view.file_name() is not None and os.path.exists(view.file_name()):
enabled = True
return enabled
class TabsExtraRenameCommand(sublime_plugin.WindowCommand):
"""Rename the tab's file."""
def run(self, group=-1, index=-1):
"""Rename the given tab."""
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
file_name = view.file_name()
if file_name is not None and os.path.exists(file_name):
branch, leaf = os.path.split(file_name)
v = self.window.show_input_panel(
"New Name:", leaf,
functools.partial(self.on_done, file_name, branch),
None, None
)
name = os.path.splitext(leaf)[0]
v.sel().clear()
v.sel().add(sublime.Region(0, len(name)))
def on_done(self, old, branch, leaf):
"""Handle the renaming when user is done with the input panel."""
new = os.path.join(branch, leaf)
try:
os.rename(old, new)
v = self.window.find_open_file(old)
if v:
v.retarget(new)
except Exception:
sublime.status_message("Unable to rename")
def is_visible(self, group=-1, index=-1):
"""Check if the command is visible."""
enabled = False
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None and view.file_name() is not None and os.path.exists(view.file_name()):
enabled = True
return enabled
class TabsExtraMoveCommand(sublime_plugin.WindowCommand):
"""Move the tab's file."""
def run(self, group=-1, index=-1):
"""Move the file in the given tab."""
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
file_name = view.file_name()
if file_name is not None and os.path.exists(file_name):
v = self.window.show_input_panel(
"New Location:", file_name,
functools.partial(self.on_done, file_name),
None, None
)
file_path_len = len(file_name)
file_name_len = len(os.path.basename(file_name))
v.sel().clear()
v.sel().add(
sublime.Region(
file_path_len - file_name_len,
file_path_len
)
)
def on_done(self, old, new):
"""Handle the moving when user is done with the input panel."""
try:
directory = os.path.dirname(new)
if not os.path.exists(directory):
os.makedirs(directory)
os.rename(old, new)
v = self.window.find_open_file(old)
if v:
v.retarget(new)
except Exception:
sublime.status_message("Unable to move")
def is_visible(self, group=-1, index=-1):
"""Check if the command is visible."""
enabled = False
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None and view.file_name() is not None and os.path.exists(view.file_name()):
enabled = True
return enabled
class TabsExtraRevertCommand(TabsExtraViewWrapperCommand):
"""Revert changes in file."""
def is_visible(self, command, group=-1, index=-1, args=None):
"""Determine if command should be visible in menu."""
if args is None:
args = {}
enabled = False
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None and view.file_name() is not None:
enabled = view.is_dirty()
return enabled
class TabsExtraFileCommand(TabsExtraViewWrapperCommand):
"""Wrapper for file commands."""
def is_enabled(self, command, group=-1, index=-1, args=None):
"""Determine if command should be enabled."""
if args is None:
args = {}
enabled = False
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
enabled = view.file_name() is not None
return enabled
class TabsExtraFilePathCommand(sublime_plugin.WindowCommand):
"""Get file paths."""
def run(self, group=-1, index=-1, path_type='path'):
"""Run the command."""
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
self.window.focus_view(view)
view.run_command('copy_path')
pth = sublime.get_clipboard()
if path_type == 'name':
pth = os.path.basename(pth)
elif path_type == 'path_uri':
pth = urljoin('file:', pathname2url(pth))
sublime.set_clipboard(pth)
def is_enabled(self, group=-1, index=-1, path_type='path'):
"""Determine if command should be enabled."""
enabled = False
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
enabled = view.file_name() is not None
return enabled
###############################
# Sort
###############################
class TabsExtraSortMenuCommand(sublime_plugin.WindowCommand):
"""Sort tabs."""
def run(self):
"""Using "sort_layout" setting, construct a quick panel sort menu."""
sort_layout = sublime.load_settings(SETTINGS).get("sort_layout", [])
if len(sort_layout):
self.sort_commands = []
sort_menu = []
for sort_entry in sort_layout:
caption = str(sort_entry.get("caption", ""))
module = str(sort_entry.get("module", ""))
reverse = bool(sort_entry.get("reverse", False))
if module != "":
self.sort_commands.append((module, reverse))
sort_menu.append(caption)
if len(sort_menu):
self.window.show_quick_panel(sort_menu, self.check_selection)
def check_selection(self, value):
"""Launch the selected sort command."""
if value != -1:
command = self.sort_commands[value]
self.window.run_command("tabs_extra_sort", {"sort_by": command[0], "reverse": command[1]})
class TabsExtraSortCommand(sublime_plugin.WindowCommand):
"""Sort tabs."""
def run(self, group=-1, sort_by=None, reverse=False):
"""Sort Tabs."""
if sort_by is not None:
if group == -1:
group = self.window.active_group()
self.group = group
self.reverse = reverse
views = self.window.views_in_group(int(group))
if len(views):
sort_module = self.get_sort_module(sort_by)
if sort_module is not None:
view_data = []
sort_module.run(views, view_data)
self.sort(view_data)
self.window.focus_view(self.window.active_view())
def sort(self, view_data):
"""Sort the views."""
indexes = tuple([x for x in range(0, len(view_data[0]) - 1)])
sorted_views = sorted(view_data, key=itemgetter(*indexes))
if self.reverse:
sorted_views = sorted_views[::-1]
if sorted_views != view_data:
for index in range(0, len(sorted_views)):
self.window.set_view_index(sorted_views[index][-1], self.group, index)
def get_sort_module(self, module_name):
"""Import the sort_by module."""
import imp
path_name = os.path.join("Packages", os.path.normpath(module_name.replace('.', '/')))
path_name += ".py"
module = imp.new_module(module_name)
sys.modules[module_name] = module
exec(
compile(
sublime.load_resource(sublime_format_path(path_name)),
module_name, 'exec'
),
sys.modules[module_name].__dict__
)
return module
###############################
# Menu Installation
###############################
class TabsExtraInstallOverrideMenuCommand(sublime_plugin.ApplicationCommand):
"""Install TabsExtra menu overriding the default tab context menu."""
def run(self):
"""Install/upgrade the override tab menu."""
msg = OVERRIDE_CONFIRM
if sublime.ok_cancel_dialog(msg):
tab_menu.upgrade_override_menu()
class TabsExtraUninstallOverrideMenuCommand(sublime_plugin.ApplicationCommand):
"""Uninstall the TabsExtra override menu."""
def run(self):
"""Uninstall the override tab menu."""
msg = RESTORE_CONFIRM
if sublime.ok_cancel_dialog(msg):
tab_menu.uninstall_override_menu()
class TabsExtraInstallMenuCommand(sublime_plugin.ApplicationCommand):
"""Install the TabsExtra menu by appending it to the existing tab context menu."""
def run(self):
"""Install/upgrade the standard tab menu."""
tab_menu.upgrade_default_menu()
###############################
# Plugin Loading
###############################
def plugin_loaded():
"""Handle plugin setup."""
win = sublime.active_window()
if win is not None:
sheet = win.active_sheet()
if sheet is not None:
timestamp_view(win, sheet)
| 34.542714
| 173
| 0.55482
|
bc533b486121cd78fe8d1cc13522902c417063ad
| 1,155
|
py
|
Python
|
Code/odooerp/odoo-8.0/openerp/addons/payment_buckaroo/controllers/main.py
|
zhupangithub/WEBERP
|
714512082ec5c6db07cbf6af0238ceefe2d2c1a5
|
[
"MIT"
] | null | null | null |
Code/odooerp/odoo-8.0/openerp/addons/payment_buckaroo/controllers/main.py
|
zhupangithub/WEBERP
|
714512082ec5c6db07cbf6af0238ceefe2d2c1a5
|
[
"MIT"
] | null | null | null |
Code/odooerp/odoo-8.0/openerp/addons/payment_buckaroo/controllers/main.py
|
zhupangithub/WEBERP
|
714512082ec5c6db07cbf6af0238ceefe2d2c1a5
|
[
"MIT"
] | 3
|
2020-10-08T14:42:10.000Z
|
2022-01-28T14:12:29.000Z
|
# -*- coding: utf-8 -*-
try:
import simplejson as json
except ImportError:
import json
import logging
import pprint
import werkzeug
from openerp import http, SUPERUSER_ID
from openerp.http import request
_logger = logging.getLogger(__name__)
class BuckarooController(http.Controller):
_return_url = '/payment/buckaroo/return'
_cancel_url = '/payment/buckaroo/cancel'
_exception_url = '/payment/buckaroo/error'
_reject_url = '/payment/buckaroo/reject'
@http.route([
'/payment/buckaroo/return',
'/payment/buckaroo/cancel',
'/payment/buckaroo/error',
'/payment/buckaroo/reject',
], type='http', auth='none')
def buckaroo_return(self, **post):
""" Buckaroo."""
_logger.info('Buckaroo: entering form_feedback with post data %s', pprint.pformat(post)) # debug
request.registry['payment.transaction'].form_feedback(request.cr, SUPERUSER_ID, post, 'buckaroo', context=request.context)
post = dict((key.upper(), value) for key, value in post.items())
return_url = post.get('ADD_RETURNDATA') or '/'
return werkzeug.utils.redirect(return_url)
| 32.083333
| 130
| 0.683983
|
a4e28a70b2e59b0ac3cf7186f9a66620fd87e72a
| 12,685
|
py
|
Python
|
tape/__main__.py
|
cthoyt/TAPE
|
70d9364f6d67fd8b5574eed729af48d4f0694019
|
[
"MIT"
] | 2
|
2020-12-06T15:41:03.000Z
|
2021-06-18T10:25:23.000Z
|
tape/__main__.py
|
cthoyt/TAPE
|
70d9364f6d67fd8b5574eed729af48d4f0694019
|
[
"MIT"
] | null | null | null |
tape/__main__.py
|
cthoyt/TAPE
|
70d9364f6d67fd8b5574eed729af48d4f0694019
|
[
"MIT"
] | null | null | null |
from typing import List, Union, Sequence, Tuple, Optional
from glob import glob
from collections import defaultdict
import atexit
import os
import shutil
import pickle as pkl
import tensorflow as tf
from tape.tasks import TaskBuilder, Task, AbstractLanguageModelingTask
from tape.models import ModelBuilder
from tape.experiments import ProteinExperiment, training_params
import rinokeras as rk
import re
from datetime import datetime
from sacred import Experiment, Ingredient
from sacred.observers import FileStorageObserver
import numpy as np
from table_logger import TableLogger
gpu = Ingredient('gpu')
proteins = Experiment('Unsupervised Protein',
ingredients=[gpu, training_params] + ModelBuilder.hparams + TaskBuilder.params)
folder_name = datetime.now().strftime("%Y-%m-%d--%H-%M-%S")
logdir = os.environ.get('PROTEIN_LOGDIR', 'results')
if not os.path.isdir('results'):
os.mkdir('results')
proteins.observers.append(FileStorageObserver.create(os.path.join('results', folder_name)))
def filter_text(text):
pattern = re.compile(r"Epoch\s+\d+:")
text = '\n'.join(filter(lambda line: not pattern.match(line), text.split('\n')))
return text
proteins.captured_out_filter = filter_text
@gpu.config
def gpu_config():
"""Configure the gpu"""
device = 0 # noqa: F841
allow_growth = False # noqa: F841
@proteins.config
def config():
tasks = [] # noqa: F841
model = '' # noqa: F841
num_epochs = 100 # noqa: F841
load_from = None # noqa: F841
load_task_from = None # noqa: F841
patience = 10 # noqa: F841
freeze_embedding_weights = False # noqa: F841
data_folder = './data' # noqa: F841
max_sequence_length = 10000 # noqa: F841
add_cls_token = False # noqa: F841
debug = False # noqa: F841
save_outputs = False # noqa: F841
steps_per_epoch = 10000 # noqa: F841
datafile = '' # noqa: F841
assert len(tasks) > 0
assert model != ''
@gpu.capture
def setup_tensorflow(device: Union[str, int, Sequence[int], Sequence[str]], allow_growth: bool):
"""Setup tensorflow session according to gpu configuration.
Args:
device (Union[str, int, Sequence[int], Sequence[str]]): GPU or list of GPUs to run on
allow_growth (bool): Whether to capture all memory on gpu or grow as necessary
Returns:
sess (tf.Session): Tensorflow Session object as the default session
"""
if isinstance(device, int):
device = str(device)
elif isinstance(device, list):
device = ', '.join([str(d) for d in device])
elif not isinstance(device, str):
raise ValueError("Unrecognized device type. Expected int, str, or list. "
"Received {}.".format(type(device)))
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = device
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1" # disable tensorflow info logging
tf.logging.set_verbosity(tf.logging.WARN)
from tensorflow.python.platform import tf_logging
try:
# tensorflow == 1.13
tf_logging.get_logger().propagate = False
except AttributeError:
# tensorflow <= 1.12
tf_logging._get_logger().propagate = False
gpu_options = tf.GPUOptions(allow_growth=allow_growth)
conf = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)
sess = tf.get_default_session()
if sess is None:
sess = tf.Session(config=conf)
sess.__enter__() # type: ignore
np.set_printoptions(suppress=True)
return sess
@proteins.capture
def get_data(task_list: List[Task],
boundaries: Tuple[List[int], List[int]],
data_folder: str,
max_sequence_length: int,
add_cls_token: bool) -> \
Tuple[tf.data.Dataset, tf.data.Dataset]:
datasets = [task.get_data(boundaries, data_folder, max_sequence_length, add_cls_token) for task in task_list]
train, valid = list(zip(*datasets))
if len(train) > 1:
train = tf.data.Dataset.zip(train)
valid = tf.data.Dataset.zip(valid)
else:
train = train[0]
valid = valid[0]
return train, valid
class MetricEvaluator:
def __init__(self, key_metric: str, maximize_or_minimize: Optional[str] = None):
self._key_metric = key_metric
if maximize_or_minimize is None:
maximize_or_minimize = 'max' if 'acc' in key_metric.lower() else 'min'
assert maximize_or_minimize in ['maximize', 'minimize', 'max', 'min']
self._best_value = float('inf') if maximize_or_minimize in ['minimize', 'min'] else float('-inf')
self._maximize_or_minimize = max if maximize_or_minimize in ['max', 'maximize'] else min
self._n_epochs_no_improvement = 0
self._epoch = 0
def _maybe_initialize_logger(self, metrics):
if not hasattr(self, '_logger'):
columns = ["Epoch"]
self._names = [name for name in metrics]
columns += ["T {}".format(name) for name in metrics]
columns += ["V {}".format(name) for name in metrics]
columns += ["Best {}".format(self._key_metric)]
columns += ["Time"]
self._logger = TableLogger(
columns=columns,
float_format='{:.3f}'.format,
default_colwidth=10)
def _is_better(self, test_metrics):
return self._maximize_or_minimize(
self._best_value, test_metrics[self._key_metric]) != self._best_value
def check_and_log_metric(self, train_metrics, test_metrics):
self._maybe_initialize_logger(train_metrics)
if self._is_better(test_metrics):
self._best_value = self._maximize_or_minimize(
self._best_value, test_metrics[self._key_metric])
self._n_epochs_no_improvement = 0
else:
self._n_epochs_no_improvement += 1
to_log = [self._epoch]
to_log += [train_metrics[name] for name in self._names]
to_log += [test_metrics[name] for name in self._names]
to_log += [self._best_value]
to_log += [str(round(train_metrics.runtime + test_metrics.runtime)) + 's']
self._logger(*tuple(to_log))
self._epoch += 1
@property
def was_improvement(self) -> bool:
return self._n_epochs_no_improvement == 0
@property
def n_epochs_no_improvement(self) -> int:
return self._n_epochs_no_improvement
@proteins.capture
def rename_directory(outdir: str, model, tasks):
if isinstance(tasks, str):
tasks = [tasks]
savedir, basedir = outdir.rsplit('/', 1)
new_outdir = os.path.join(savedir, '_'.join(tasks + [model, basedir]))
os.rename(outdir, new_outdir)
@proteins.capture
def cleanup_folders(outdir: str, model, tasks, debug):
if debug or not glob(os.path.join(outdir, '*.h5')):
shutil.rmtree(outdir)
else:
rename_directory(outdir, model, tasks)
def consolidate_data(outfile, include_hidden: bool = False):
with open(outfile, 'rb') as f:
outputs = pkl.load(f)
data = defaultdict(list) # type: ignore
for output in outputs:
output = output[0]
length = output['protein_length']
for key, protein_batch in output.items():
for protein_length, protein_data in zip(length, protein_batch):
if np.isscalar(protein_data):
data[key].append(protein_data)
elif protein_data.ndim == 1 and protein_data.dtype in [np.float32, np.float64]:
data[key].append(protein_data)
else:
data[key].append(protein_data[:protein_length])
data = dict(data)
if not include_hidden:
del data['encoder_output']
with open(outfile, 'wb') as f:
pkl.dump(data, f)
@proteins.command
def eval(_run, _config, tasks: Union[str, List[str]], model: str):
assert _config['load_task_from'] is not None
outdir = _run.observers[0].basedir
atexit.register(cleanup_folders, outdir, debug=True)
sess = setup_tensorflow()
if isinstance(tasks, str):
tasks = [tasks]
embedding_model = ModelBuilder.build_model(model)
task_list = TaskBuilder.build_tasks(tasks)
task_model = TaskBuilder.build_task_model(
embedding_model, task_list, _config['freeze_embedding_weights'])
experiment = ProteinExperiment(
task_model, task_list)
if not _config['datafile']:
_, valid_data = get_data(task_list, embedding_model.get_optimal_batch_sizes())
else:
datafile = _config['datafile'] if ',' not in _config['datafile'] else _config['datafile'].split(',')
valid_data = task_list[0].get_test_data(embedding_model.boundaries, datafile)
test_graph = rk.train.TestGraph.from_experiment(experiment, valid_data)
sess.run(tf.global_variables_initializer())
print('Model Parameters: {}'.format(embedding_model.count_params()))
print('Loading task weights from {}'.format(_config['load_task_from']))
rk.utils.load_distributed(
experiment.distribution_strategy, task_model, _config['load_task_from'])
task_dir = os.path.dirname(_config['load_task_from'])
outfile = os.path.join(task_dir, 'outputs.pkl')
print('Saving outputs to {}'.format(outfile))
test_metrics = test_graph.run_epoch(save_outputs=outfile)
print(test_metrics.get_average())
consolidate_data(outfile, include_hidden=True)
@proteins.automain
def main(_run, _config, tasks: Union[str, List[str]], model: str):
outdir = _run.observers[0].basedir
atexit.register(cleanup_folders, outdir)
sess = setup_tensorflow()
if isinstance(tasks, str):
tasks = [tasks]
embedding_model = ModelBuilder.build_model(model)
task_list = TaskBuilder.build_tasks(tasks)
task_model = TaskBuilder.build_task_model(
embedding_model, task_list, _config['freeze_embedding_weights'])
experiment = ProteinExperiment(task_model, task_list)
bounds, batch_sizes = embedding_model.get_optimal_batch_sizes()
batch_sizes = np.asarray(batch_sizes / len(tasks), np.int32)
batch_sizes[batch_sizes <= 0] = 1
train_data, valid_data = get_data(task_list, (bounds, batch_sizes))
if _config['steps_per_epoch'] != -1:
train_data = train_data.repeat()
train_graph = rk.train.TrainGraph.from_experiment(experiment, train_data)
test_graph = rk.train.TestGraph.from_experiment(experiment, valid_data)
sess.run(tf.global_variables_initializer())
print('Model Parameters: {}'.format(embedding_model.count_params()))
if _config['load_from'] is not None:
print('Loading weights from {}'.format(_config['load_from']))
rk.utils.load_distributed(
experiment.distribution_strategy, embedding_model, _config['load_from'])
if _config['load_task_from'] is not None:
print('Loading task weights from {}'.format(_config['load_task_from']))
rk.utils.load_distributed(
experiment.distribution_strategy, task_model, _config['load_task_from'])
evaluator = MetricEvaluator(task_list[0].key_metric)
train_graph.initialize()
for epoch in range(_config['num_epochs']):
train_metrics = train_graph.run_for_n_steps(_config['steps_per_epoch'], epoch_num=epoch)
outfile = os.path.join(outdir, 'outputs.pkl') if _config['save_outputs'] else None
test_metrics = test_graph.run_epoch(epoch_num=epoch, save_outputs=outfile)
if all(isinstance(task, AbstractLanguageModelingTask) for task in tasks):
with experiment.distribution_strategy.scope():
embedding_model.save_weights('{}/epoch_{}.h5'.format(outdir, epoch), overwrite=True)
evaluator.check_and_log_metric(train_metrics, test_metrics)
for name, value in train_metrics.items():
_run.log_scalar('train.{}'.format(name), value)
for name, value in test_metrics.items():
_run.log_scalar('valid.{}'.format(name), value)
_run.log_scalar('runtime', round(train_metrics.runtime + test_metrics.runtime))
if evaluator.was_improvement:
with experiment.distribution_strategy.scope():
embedding_model.save_weights('{}/best_weights.h5'.format(outdir, overwrite=True))
task_model.save_weights('{}/task_weights.h5'.format(outdir, overwrite=True))
else:
if evaluator.n_epochs_no_improvement >= _config['patience']:
print("Early stopping because no improvement in validation loss "
"for {} epochs\n".format(_config['patience']))
break
| 36.139601
| 113
| 0.671502
|
b20be4c7aded3df7419797a9c56f9b7e0993e946
| 8,321
|
py
|
Python
|
docs/conflib/release_mapping.py
|
trevor-vaughan/simp-doc
|
6c544cab47dc69fc5965a867ec22cb4a7101f007
|
[
"Apache-2.0"
] | 1
|
2015-10-29T19:10:46.000Z
|
2015-10-29T19:10:46.000Z
|
docs/conflib/release_mapping.py
|
jeannegreulich/simp-doc
|
838d36e72739ad3fb0511e0978e1590c4e1c8865
|
[
"Apache-2.0"
] | null | null | null |
docs/conflib/release_mapping.py
|
jeannegreulich/simp-doc
|
838d36e72739ad3fb0511e0978e1590c4e1c8865
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import base64
import copy
import glob
import json
import os
import re
import sys
import time
import urllib2
from textwrap import dedent
import yaml
sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
from conflib.constants import *
def github_api_get(url):
token = os.environ.get('GITHUB_API_KEY')
request = urllib2.Request(url)
request.add_header('User-Agent', 'Mozilla/55.0')
if token is not None:
request.add_header('Authorization','token %s' % token)
return urllib2.urlopen(request)
def get_version_map(simp_branch, local_simp_core_path, simp_github_api_base,
default_simp_branch, on_rtd):
""" Fetch the version map either from local disk or GitHub """
ver_map = {}
ver_mapper_name = 'release_mappings.yaml'
if not on_rtd:
# SIMP 6 and later
os_ver_mappers = glob.glob(
os.path.join(local_simp_core_path, 'build', 'distributions',
'*', '*', '*', ver_mapper_name)
)
# SIMP 4/5
if not os_ver_mappers:
os_ver_mappers = glob.glob(
os.path.join(local_simp_core_path, 'build', ver_mapper_name)
)
if os_ver_mappers:
for os_ver_mapper in os_ver_mappers:
with open(os_ver_mapper, 'r') as f:
__update_ver_map(ver_map, yaml.load(f.read()))
if on_rtd or not ver_map:
github_api_base = simp_github_api_base + '/simp-core/git/trees/'
if simp_branch:
branch_to_query = simp_branch
else:
branch_to_query = default_simp_branch
github_api_target = github_api_base + branch_to_query
github_opts = '?recursive=1'
# only try retrieving each API URL from github once, because API calls
# are rate limited
try:
# Grab the distribution tree
distro_json = json.load(github_api_get(github_api_target + github_opts))
release_mapping_targets = [x for x in distro_json['tree'] if (
x['path'] and re.search(r'release_mappings.yaml$', x['path'])
)]
for release_mapping_target in release_mapping_targets:
url = SIMP_GITHUB_RAW_BASE + '/simp-core/' + branch_to_query + '/' + release_mapping_target['path']
print("NOTICE: Downloading Version Mapper: " + url, file=sys.stderr)
for i in range(0, MAX_SIMP_URL_GET_ATTEMPTS):
try:
release_yaml_string = urllib2.urlopen(url).read()
release_yaml = yaml.load(release_yaml_string)
if isinstance(release_yaml, basestring):
# This is ugly...
# A string is returned when the release mapping file
# is actually a link. So, need to pull down the
# content of the link, instead.
parts = release_yaml.split('/')
partial_url = '/'.join(filter(lambda a: a != '..', parts))
for target in release_mapping_targets:
if partial_url in target['path']:
url = SIMP_GITHUB_RAW_BASE + '/simp-core/' + branch_to_query + \
'/' + target['path']
release_yaml_string = github_api_get(url).read()
release_yaml = yaml.load(release_yaml_string)
break
__update_ver_map(ver_map, release_yaml)
except urllib2.URLError as url_obj:
print('Error downloading ' + url, file=sys.stderr)
r = re.compile("^Status:")
print('Error status: ' + filter(r.match,url_obj.info().headers)[0])
time.sleep(1)
continue
break
except urllib2.URLError as url_obj:
print('Error downloading ' + github_api_target + github_opts, file=sys.stderr)
r = re.compile("^Status:")
print('Error status: ' + filter(r.match,url_obj.info().headers)[0])
return ver_map
def version_map_to_rst(full_version, version_family, ver_map):
""" Return a version of the version map that is suitable for printing. """
none_found_msg = '* No SIMP Mapping Data Found for "' + full_version + '"'
# Easy cop out
if not ver_map:
return none_found_msg
simp_release_list = __generate_version_list(full_version, version_family)
# Build the Release mapping table for insertion into the docs
release_mapping_list = []
ver_map_releases = ver_map.keys()
simp_release = full_version
if not simp_release in ver_map_releases:
for ver in simp_release_list:
if ver in ver_map_releases:
simp_release = ver
print("Warning: version mapper falling back to " + simp_release, file=sys.stderr)
else:
simp_release = None
if simp_release:
release_mapping_list.append('* **SIMP ' + simp_release + '**')
for os_key in sorted(ver_map[simp_release].keys()):
release_mapping_list.append("\n * **" + os_key + '**')
for i, iso in enumerate(ver_map[simp_release][os_key]['isos']):
release_mapping_list.append("\n * **ISO #" + str(i+1) + ":** " + iso['name'])
release_mapping_list.append(" * **Checksum:** " + iso['checksum'])
if not release_mapping_list:
release_mapping_list.append(none_found_msg)
# Trailing newline
release_mapping_list.append('')
return "\n".join(release_mapping_list)
def known_os_compatibility_rst(simp_version_dict,
local_simp_core_path=LOCAL_SIMP_CORE_PATH,
simp_github_api_base=SIMP_GITHUB_API_BASE,
default_simp_branch=DEFAULT_SIMP_BRANCH, on_rtd=ON_RTD):
""" Output the fully formatted OS Compatibility RST """
ver_map = get_version_map(simp_version_dict['simp_branch'],
local_simp_core_path, simp_github_api_base,
default_simp_branch, on_rtd)
os_compat_rst = """
Known OS Compatibility
----------------------
{0}
""".format(version_map_to_rst(simp_version_dict['full_version'],
simp_version_dict['version_family'], ver_map))
return dedent(os_compat_rst)
### Private Methods
def __update_ver_map(ver_map, data):
"""
This bunch of nonsense is to translate the release_mappings.yaml into
something that can be output to the Compatibility list in a sane manner
"""
simp_versions = sorted(data['simp_releases'].keys(), reverse=True)
for simp_version in simp_versions:
for flavor in data['simp_releases'][simp_version]['flavors'].keys():
isos = data['simp_releases'][simp_version]['flavors'][flavor]['isos']
os_key = flavor + ' ' + data['simp_releases'][simp_version]['flavors'][flavor]['os_version']
if not (isos and os_key):
continue
if not ver_map:
ver_map[simp_version] = {os_key: {'isos': isos}}
else:
if ver_map.get(simp_version):
if not ver_map[simp_version].get(os_key):
ver_map[simp_version][os_key] = {'isos': []}
else:
ver_map[simp_version] = {os_key: {'isos': []}}
for iso in isos:
if iso not in ver_map[simp_version][os_key]['isos']:
ver_map[simp_version][os_key]['isos'].append(iso)
def __generate_version_list(full_version, version_family):
"""
Put together an ordered list that will provide a quick match for the
provided version
"""
# From SIMP 6 on, full_version and version_family is sufficient.
# For earlier version, custom (odd) version families are needed.
version_list = [ full_version ]
version_list.extend(version_family)
if full_version.startswith('5'):
version_list.extend(['5.1.X']) # 5.1.X for a 5.2.2 or later
elif full_version.startswith('4'):
version_list.extend(['4.2.X']) # 4.2.X for a 4.3.2 or later
return version_list
| 37.481982
| 115
| 0.602331
|
677ebdaf24196214002686c2ceb286a637f3817d
| 1,625
|
py
|
Python
|
meiduo_mall/meiduo_mall/apps/verifications/views.py
|
amour-lee/MeiDuoProject
|
72bae3886d5db79f63725d3aa1a6b4bad294572e
|
[
"MIT"
] | null | null | null |
meiduo_mall/meiduo_mall/apps/verifications/views.py
|
amour-lee/MeiDuoProject
|
72bae3886d5db79f63725d3aa1a6b4bad294572e
|
[
"MIT"
] | null | null | null |
meiduo_mall/meiduo_mall/apps/verifications/views.py
|
amour-lee/MeiDuoProject
|
72bae3886d5db79f63725d3aa1a6b4bad294572e
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from rest_framework.views import APIView
import random, logging
from django_redis import get_redis_connection
from . import constants
from meiduo_mall.libs.yuntongxun.sms import CCP
from rest_framework.response import Response
from rest_framework import status
from celery_tasks.sms.tasks import send_sms_code
# Create your views here.
logger = logging.getLogger('django')
class SMSCodeView(APIView):
"""发送短信验证码"""
def get(self, request, mobile):
"""
GET /sms_codes/(?P<mobile>1[3-9]\d{9})/
"""
# 创建连接到redis的对象
redis_conn = get_redis_connection('verify_codes')
# 先判断用户在60s内是否重复发送短信
send_flag = redis_conn.get('send_flag_%s' % mobile)
if send_flag:
# 如果send_flag有值,说明60s内重复发送短信
return Response({'message': '频繁发送短信'}, status=status.HTTP_400_BAD_REQUEST)
# 生成短信验证码
sms_code = '%06d' % random.randint(0, 999999)
logger.info(sms_code)
# redis管道 将多个redis指令放在一个管道里面,统一执行,减少redis数据库访问的频率,提升性能
pl = redis_conn.pipeline()
# 保存短信验证码码到redis
pl.setex('sms_%s' % mobile, constants.SMS_CODE_REDIS_EXPIRES, sms_code)
# 在redis中添加一个发送短信的标记(值)
pl.setex('send_flag_%s' % mobile, constants.SEND_SMS_CODE_INTERVAL, 1)
# 执行管道
pl.execute()
# 使用容联云通讯发送短信验证码
# CCP().send_template_sms(mobile, [sms_code, constants.SMS_CODE_REDIS_EXPIRES // 60], 1)
#使用异步任务发送短信验证码,调用delay(),触发celery异步任务
# send_sms_code.delay(mobile, sms_code)
# 响应结果
return Response({'message': 'OK'})
| 28.508772
| 96
| 0.669538
|
b75f75b8c124c11ff7e017cbe5bc91fe84dfbd44
| 12,256
|
py
|
Python
|
build/lib/GML/AUTO_FEATURE_ENGINEERING/autofeatlight.py
|
mehrankamal/GML
|
ee36ba502293236bf90bf6056c76f106c8dad00c
|
[
"MIT"
] | 89
|
2020-02-29T15:23:34.000Z
|
2020-11-26T07:46:09.000Z
|
build/lib/GML/AUTO_FEATURE_ENGINEERING/autofeatlight.py
|
mehrankamal/GML
|
ee36ba502293236bf90bf6056c76f106c8dad00c
|
[
"MIT"
] | 8
|
2020-11-30T16:56:01.000Z
|
2021-05-13T09:29:18.000Z
|
build/lib/GML/AUTO_FEATURE_ENGINEERING/autofeatlight.py
|
mehrankamal/GML
|
ee36ba502293236bf90bf6056c76f106c8dad00c
|
[
"MIT"
] | 23
|
2020-11-30T14:06:54.000Z
|
2021-06-25T09:28:05.000Z
|
import sys
import numpy as np
import pandas as pd
from collections import defaultdict
from sklearn.base import BaseEstimator
from sklearn.utils.validation import check_array, check_is_fitted
from sklearn.preprocessing import StandardScaler, PowerTransformer
def _check_features(df, corrthr=0.995, verbose=0):
"""
Identify features with zeros variance or a correlation of (almost) 1 to other features, i.e., useless features.
Inputs:
- df: pd.DataFrame with all features
- corrthr: threshold for correlations: if a feature has a higher pearson correlation to another feature it's
considered as redundant and ignored (float; default: 0.995)
- verbose: verbosity level (int; default: 0)
Returns:
- list of column names representing 'ok' features (numeric, non-zero variance, not redundant)
"""
# make sure all data is numeric
df = df.select_dtypes(include=np.number)
useless_cols = set()
# 1. identify features with zero variance
for c, v in df.var().items():
if pd.isna(v) or v <= sys.float_info.epsilon:
useless_cols.add(c)
# 2. identify redundant features (that have a correlation of ~1 with other features)
correlated_cols = defaultdict(set)
corrmat = df.corr().abs()
np.fill_diagonal(corrmat.values, 0)
for c, v in corrmat.unstack().sort_values(ascending=False).items():
if v < corrthr:
break
if (c[0] != c[1]) and (c[0] not in useless_cols):
correlated_cols[c[0]].add(c[1])
# keep the columns that eliminate the most correlated columns
for c in sorted(correlated_cols, key=lambda x: len(correlated_cols[x]), reverse=True):
# the first variable that is correlated with others adds its correlated variables to the set of useless cols
# since we check if a variable is in useless_cols, the correlated variables can't add the original variable
if c not in useless_cols:
useless_cols.update(correlated_cols[c])
# return list of columns that should be kept
if verbose:
print("[AutoFeatLight] %i columns identified as useless:" % len(useless_cols))
print(sorted(useless_cols))
return [c for c in df.columns if c not in useless_cols]
def _compute_additional_features(X, feature_names=None, compute_ratio=True, compute_product=True, verbose=0):
"""
Compute additional non-linear features from the original features (ratio or product of two features).
Inputs:
- X: np.array with data (n_datapoints x n_features)
- feature_names: optional list of column names to identify the features in X
- compute_ratio: bool (default: True), whether to compute ratios of features
- compute_product: bool (default: True), whether to compute products of features
- verbose: verbosity level (int; default: 0)
Returns:
- np.array (n_datapoints x n_additional_features) with newly computed features
- list with n_additional_features names describing the newly computed features
"""
# check how many new features we will compute
d = X.shape[1]
n = 0
if compute_ratio:
n += d*d - d
if compute_product:
n += (d*d - d)//2
if not n:
print("ERROR: call _compute_additional_features with compute_ratio and/or compute_product set to True")
return None, []
if not feature_names:
feature_names = ["x%i" % i for i in range(1, d+1)]
# compute new features
if verbose:
print("[AutoFeatLight] computing %i additional features from %i original features" % (n, d))
new_features = []
X_new = np.zeros((X.shape[0], n))
new_i = 0
if compute_ratio:
for i in range(d):
# compute 1/x1
with np.errstate(divide='ignore'):
x = 1/X[:, i]
# instead of dividing by 0 for some data points we just set the new feature to 0 there
x[np.invert(np.isfinite(x))] = 0.
for j in range(d):
if i != j:
# multiply with x2 to get x2/x1
X_new[:, new_i] = x * X[:, j]
new_features.append("%s / %s" % (feature_names[j], feature_names[i]))
new_i += 1
if compute_product:
for i in range(d):
for j in range(i+1, d):
X_new[:, new_i] = X[:, i] * X[:, j]
new_features.append("%s * %s" % (feature_names[i], feature_names[j]))
new_i += 1
assert new_i == n, "Internal Error in _compute_additional_features: new_i: %r, n: %r" % (new_i, n)
return X_new, new_features
class AutoFeatLight(BaseEstimator):
def __init__(
self,
compute_ratio=True,
compute_product=True,
scale=False,
power_transform=False,
corrthr=0.995,
corrthr_init=0.99999,
verbose=0,
):
"""
Basic Feature Engineering:
- remove useless features (zero variance or redundant)
- compute additional non-linear features (ratios and product of original features, i.e. x1/x2 and x1*x2)
- make all features more normally distributed (using the yeo-johnson power transform)
Inputs:
- compute_ratio: bool (default: True), whether to compute ratios of features
- compute_product: bool (default: True), whether to compute products of features
- scale: bool (default: False), rudimentary scaling of the data (only relevant if not computing the power_transform anyways)
- power_transform: bool (default: False), whether to use a power transform (yeo-johnson) to make all features more normally distributed
- corrthr: threshold for correlations: if a feature has a higher pearson correlation to another feature it's
considered as redundant and ignored (float; default: 0.995)
- corrthr_init: correlation threshold for initial features (before the feat eng step) (float; default: 0.99999)
- verbose: verbosity level (int; default: 0)
Attributes (after calling fit/fit_transform):
- features_: feature names of transformed features
- original_columns_: original columns of X when calling fit
- return_df_: whether fit was called with a dataframe in which case a df will be returned as well,
otherwise a numpy array
- good_cols_org_: list of good features from the original inputs
- scaler_: if scale: fitted standard scaler
- power_transformer_: if power_transform: fitted power transform
"""
self.compute_ratio = compute_ratio
self.compute_product = compute_product
self.scale = scale
self.power_transform = power_transform
self.corrthr_init = corrthr_init
self.corrthr = corrthr
self.verbose = verbose
def fit(self, X):
"""
WARNING: call fit_transform instead!
Inputs:
- X: pandas dataframe or numpy array with original features (n_datapoints x n_features)
"""
if self.verbose:
print("[AutoFeatLight] Warning: This just calls fit_transform() but does not return the transformed dataframe.")
print("[AutoFeatLight] It is much more efficient to call fit_transform() instead of fit() and transform()!")
_ = self.fit_transform(X) # noqa
return self
def transform(self, X):
"""
Inputs:
- X: pandas dataframe or numpy array with original features (n_datapoints x n_features)
Returns:
- new_X: new pandas dataframe or numpy array with additional/transformed features
"""
check_is_fitted(self, ["good_cols_org_"])
if not self.good_cols_org_:
if self.verbose > 0:
print("[AutoFeatLight] WARNING: No good features found; returning data unchanged.")
return X
if isinstance(X, pd.DataFrame):
# make sure all data is numeric or we'll get an error when checking X
X = X.select_dtypes(include=np.number)
df_index = X.index
else:
df_index = None
# check input
cols = list(X.columns) if isinstance(X, pd.DataFrame) else ["x%i" % i for i in range(1, X.shape[1]+1)]
X = check_array(X, force_all_finite="allow-nan")
if not cols == self.original_columns_:
raise ValueError("[AutoFeatLight] Not the same features as when calling fit.")
# sort out useless original columns
df = pd.DataFrame(X, columns=cols, index=df_index)[self.good_cols_org_]
if self.compute_ratio or self.compute_product:
# compute additional useful features
X_new, new_features = _compute_additional_features(df.to_numpy(), self.good_cols_org_, self.compute_ratio, self.compute_product, self.verbose)
df = pd.concat([df, pd.DataFrame(X_new, columns=new_features)], axis=1)
df = df[self.features_]
# scale/transform
if self.scale or self.power_transform:
X_new = self.scaler_.transform(df.to_numpy())
if self.power_transform:
X_new = self.power_transformer_.transform(X_new)
df = pd.DataFrame(X_new, columns=df.columns, index=df.index)
# return either dataframe or array
return df if self.return_df_ else df.to_numpy()
def fit_transform(self, X):
"""
Inputs:
- X: pandas dataframe or numpy array with original features (n_datapoints x n_features)
Returns:
- new_X: new pandas dataframe or numpy array with additional/transformed features
"""
self.return_df_ = isinstance(X, pd.DataFrame)
if isinstance(X, pd.DataFrame):
# make sure all data is numeric or we'll get an error when checking X
X = X.select_dtypes(include=np.number)
df_index = X.index
else:
df_index = None
# store column names as they'll be lost in the other check
self.original_columns_ = list(X.columns) if isinstance(X, pd.DataFrame) else ["x%i" % i for i in range(1, X.shape[1]+1)]
# check input
X = check_array(X, force_all_finite="allow-nan")
# transform X into a dataframe (again)
df = pd.DataFrame(X, columns=self.original_columns_, index=df_index)
# see which of the original features are not completely useless
self.good_cols_org_ = _check_features(df, self.corrthr_init, self.verbose)
if not self.good_cols_org_:
if self.verbose > 0:
print("[AutoFeatLight] WARNING: No good features found; returning original features.")
return df if self.return_df_ else X
# compute additional features
df = df[self.good_cols_org_]
if self.compute_ratio or self.compute_product:
X_new, new_features = _compute_additional_features(df.to_numpy(), self.good_cols_org_, self.compute_ratio, self.compute_product, self.verbose)
# add new features to original dataframe
df = pd.concat([df, pd.DataFrame(X_new, columns=new_features, index=df_index)], axis=1)
# check again which of the features we should keep
self.features_ = _check_features(df, self.corrthr, self.verbose)
df = df[self.features_]
else:
self.features_ = self.good_cols_org_
if self.scale or self.power_transform:
# scale data to avoid errors due to large numbers
self.scaler_ = StandardScaler(with_mean=False)
X_new = self.scaler_.fit_transform(df.to_numpy())
if self.power_transform:
self.power_transformer_ = PowerTransformer(method='yeo-johnson', standardize=True)
X_new = self.power_transformer_.fit_transform(X_new)
df = pd.DataFrame(X_new, columns=df.columns, index=df.index)
if self.verbose > 0:
print("[AutoFeatLight] New data shape: %i x %i" % df.shape)
# return either dataframe or array
return df if self.return_df_ else df.to_numpy()
| 48.634921
| 154
| 0.639768
|
07e1fe0898e12966e20a3fd0b17889b7c7238d61
| 10,614
|
py
|
Python
|
devol/devol.py
|
eladmintzer/devol
|
73a0839b7164dde26a5e1f6c2eb692f7061947a5
|
[
"MIT"
] | null | null | null |
devol/devol.py
|
eladmintzer/devol
|
73a0839b7164dde26a5e1f6c2eb692f7061947a5
|
[
"MIT"
] | null | null | null |
devol/devol.py
|
eladmintzer/devol
|
73a0839b7164dde26a5e1f6c2eb692f7061947a5
|
[
"MIT"
] | null | null | null |
"""
Run a genetic algorithm to find an appropriate architecture for some image
classification task with Keras+TF.
To use, define a `GenomeHandler` defined in genomehandler.py. Then pass it, with
training data, to a DEvol instance to run the genetic algorithm. See the readme
for more detailed instructions.
"""
from __future__ import print_function
import random as rand
import csv
import operator
import gc
import os
from datetime import datetime
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.models import load_model
import tensorflow.keras.backend as K
from sklearn.metrics import log_loss
import numpy as np
if K.backend() == 'tensorflow':
import tensorflow as tf
__all__ = ['DEvol']
METRIC_OPS = [operator.__lt__, operator.__gt__]
METRIC_OBJECTIVES = [min, max]
class DEvol:
"""
Object which carries out genetic search and returns top performing model
upon completion.
"""
def __init__(self, genome_handler, data_path=""):
"""
Initialize a DEvol object which carries out the training and evaluation
of a genetic search.
Args:
genome_handler (GenomeHandler): the genome handler object defining
the restrictions for the architecture search space
data_path (str): the file which the genome encodings and metric data
will be stored in
"""
self.genome_handler = genome_handler
self.datafile = data_path or (datetime.now().ctime() + 'test_devol.csv')
self._bssf = -1
if os.path.isfile(data_path) and os.stat(data_path).st_size > 1:
raise ValueError(('Non-empty file %s already exists. Please change'
'file path to prevent overwritten genome data.'
% data_path))
print("Genome encoding and metric data stored at", self.datafile, "\n")
with open(self.datafile, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
metric_cols = ["Val Loss", "Val Accuracy"]
genome = genome_handler.genome_representation() + metric_cols
writer.writerow(genome)
def set_objective(self, metric):
"""
Set the metric for optimization. Can also be done by passing to
`run`.
Args:
metric (str): either 'acc' to maximize classification accuracy, or
else 'loss' to minimize the loss function
"""
if metric == 'acc':
metric = 'accuracy'
if metric not in ['loss', 'accuracy']:
raise ValueError(('Invalid metric name {} provided - should be'
'"accuracy" or "loss"').format(metric))
self._metric = metric
self._objective = "max" if self._metric == "accuracy" else "min"
self._metric_index = 1 if self._metric == 'loss' else -1
self._metric_op = METRIC_OPS[self._objective == 'max']
self._metric_objective = METRIC_OBJECTIVES[self._objective == 'max']
def run(self, dataset, num_generations, pop_size, epochs, fitness=None,
metric='accuracy'):
"""
Run genetic search on dataset given number of generations and
population size
Args:
dataset : tuple or list of numpy arrays in form ((train_data,
train_labels), (validation_data, validation_labels))
num_generations (int): number of generations to search
pop_size (int): initial population size
epochs (int): epochs for each model eval, passed to keras model.fit
fitness (None, optional): scoring function to be applied to
population scores, will be called on a numpy array which is
a min/max scaled version of evaluated model metrics, so It
should accept a real number including 0. If left as default
just the min/max scaled values will be used.
metric (str, optional): must be "accuracy" or "loss" , defines what
to optimize during search
Returns:
keras model: best model found with weights
"""
self.set_objective(metric)
# If no validation data is given set it to None
if len(dataset) == 2:
(self.x_train, self.y_train), (self.x_test, self.y_test) = dataset
self.x_val = None
self.y_val = None
else:
(self.x_train, self.y_train), (self.x_test, self.y_test), (self.x_val, self.y_val) = dataset
# generate and evaluate initial population
members = self._generate_random_population(pop_size)
pop = self._evaluate_population(members,
epochs,
fitness,
0,
num_generations)
# evolve
for gen in range(1, num_generations):
members = self._reproduce(pop, gen)
pop = self._evaluate_population(members,
epochs,
fitness,
gen,
num_generations)
return load_model('best-model.h5')
def _reproduce(self, pop, gen):
members = []
# 95% of population from crossover
for _ in range(int(len(pop) * 0.95)):
members.append(self._crossover(pop.select(), pop.select()))
# best models survive automatically
members += pop.get_best(len(pop) - int(len(pop) * 0.95))
# randomly mutate
for imem, mem in enumerate(members):
members[imem] = self._mutate(mem, gen)
return members
def _evaluate(self, genome, epochs):
model = self.genome_handler.decode(genome)
loss, accuracy = None, None
fit_params = {
'x': self.x_train,
'y': self.y_train,
'validation_split': 0.1,
'epochs': epochs,
'verbose': 1,
'callbacks': [
EarlyStopping(monitor='val_loss', patience=1, verbose=1)
]
}
if self.x_val is not None:
fit_params['validation_data'] = (self.x_val, self.y_val)
try:
model.fit(**fit_params)
loss, accuracy = model.evaluate(self.x_test, self.y_test, verbose=0)
except Exception as e:
loss, accuracy = self._handle_broken_model(model, e)
self._record_stats(model, genome, loss, accuracy)
return model, loss, accuracy
def _record_stats(self, model, genome, loss, accuracy):
with open(self.datafile, 'a') as csvfile:
writer = csv.writer(csvfile, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
row = list(genome) + [loss, accuracy]
writer.writerow(row)
met = loss if self._metric == 'loss' else accuracy
if (self._bssf is -1 or
self._metric_op(met, self._bssf) and
accuracy is not 0):
try:
os.remove('best-model.h5')
except OSError:
pass
self._bssf = met
model.save('best-model.h5')
def _handle_broken_model(self, model, error):
del model
n = self.genome_handler.n_classes
loss = log_loss(np.concatenate(([1], np.zeros(n - 1))), np.ones(n) / n)
accuracy = 1 / n
gc.collect()
if K.backend() == 'tensorflow':
K.clear_session()
tf.compat.v1.reset_default_graph()
print('An error occurred and the model could not train:')
print(error)
print(('Model assigned poor score. Please ensure that your model'
'constraints live within your computational resources.'))
return loss, accuracy
def _evaluate_population(self, members, epochs, fitness, igen, ngen):
fit = []
for imem, mem in enumerate(members):
self._print_evaluation(imem, len(members), igen, ngen)
res = self._evaluate(mem, epochs)
v = res[self._metric_index]
del res
fit.append(v)
fit = np.array(fit)
self._print_result(fit, igen)
return _Population(members, fit, fitness, obj=self._objective)
def _print_evaluation(self, imod, nmod, igen, ngen):
fstr = '\nmodel {0}/{1} - generation {2}/{3}:\n'
print(fstr.format(imod + 1, nmod, igen + 1, ngen))
def _generate_random_population(self, size):
return [self.genome_handler.generate() for _ in range(size)]
def _print_result(self, fitness, generation):
result_str = ('Generation {3}:\t\tbest {4}: {0:0.4f}\t\taverage:'
'{1:0.4f}\t\tstd: {2:0.4f}')
print(result_str.format(self._metric_objective(fitness),
np.mean(fitness),
np.std(fitness),
generation + 1, self._metric))
def _crossover(self, genome1, genome2):
cross_ind = rand.randint(0, len(genome1))
child = genome1[:cross_ind] + genome2[cross_ind:]
return child
def _mutate(self, genome, generation):
# increase mutations as program continues
num_mutations = max(3, generation // 4)
return self.genome_handler.mutate(genome, num_mutations)
class _Population(object):
def __len__(self):
return len(self.members)
def __init__(self, members, fitnesses, score, obj='max'):
self.members = members
scores = fitnesses - fitnesses.min()
if scores.max() > 0:
scores /= scores.max()
if obj == 'min':
scores = 1 - scores
if score:
self.scores = score(scores)
else:
self.scores = scores
self.s_fit = sum(self.scores)
def get_best(self, n):
combined = [(self.members[i], self.scores[i])
for i in range(len(self.members))]
sorted(combined, key=(lambda x: x[1]), reverse=True)
return [x[0] for x in combined[:n]]
def select(self):
dart = rand.uniform(0, self.s_fit)
sum_fits = 0
for i in range(len(self.members)):
sum_fits += self.scores[i]
if sum_fits >= dart:
return self.members[i]
| 36.982578
| 104
| 0.573394
|
84122b70d7f0be23507a51695beb753d765a37a3
| 1,389
|
py
|
Python
|
scripts/pa_delete_scheduled_task.py
|
pythonanywhere/helper-scripts
|
a1f4b5c060b15515c71fa8cbfb65bb3409c8416a
|
[
"MIT"
] | 27
|
2017-09-13T20:44:03.000Z
|
2022-01-13T19:09:07.000Z
|
scripts/pa_delete_scheduled_task.py
|
pythonanywhere/helper-scripts
|
a1f4b5c060b15515c71fa8cbfb65bb3409c8416a
|
[
"MIT"
] | 30
|
2017-01-18T16:55:52.000Z
|
2022-02-14T16:15:15.000Z
|
scripts/pa_delete_scheduled_task.py
|
pythonanywhere/helper-scripts
|
a1f4b5c060b15515c71fa8cbfb65bb3409c8416a
|
[
"MIT"
] | 9
|
2017-09-26T19:01:53.000Z
|
2021-11-08T01:18:05.000Z
|
#!/usr/bin/python3.6
"""Delete scheduled task(s) by id or nuke'em all.
Usage:
pa_delete_scheduled_task.py id <num>...
pa_delete_scheduled_task.py nuke [--force]
Options:
-h, --help Prints this message
-f, --force Turns off user confirmation before deleting tasks
Note:
Task id <num> may be acquired with `pa_get_scheduled_tasks_list.py` script."""
from docopt import docopt
from pythonanywhere.scripts_commons import ScriptSchema, get_logger, get_task_from_id
from pythonanywhere.task import TaskList
def _delete_all(force):
if not force:
if input("This will irrevocably delete all your tasks, proceed? [y/N] ").lower() != "y":
return None
for task in TaskList().tasks:
task.delete_schedule()
def _delete_by_id(id_numbers):
for task_id in id_numbers:
task = get_task_from_id(task_id, no_exit=True)
task.delete_schedule()
def main(*, id_numbers, nuke, force):
get_logger(set_info=True)
if nuke:
_delete_all(force)
else:
_delete_by_id(id_numbers)
if __name__ == "__main__":
schema = ScriptSchema(
{"id": bool, "<num>": ScriptSchema.id_multi, "nuke": bool, "--force": ScriptSchema.boolean}
)
arguments = schema.validate_user_input(docopt(__doc__), conversions={"num": "id_numbers"})
arguments.pop("id")
main(**arguments)
| 26.207547
| 99
| 0.672426
|
5cd1adeed5715dd4d64af11661cdb36222e86ad6
| 3,492
|
py
|
Python
|
maths/fibonacci.py
|
MOHAK7488/Python-3-Master
|
36dbc5a62d7bfb12ddec27efb22dd7853e408fd0
|
[
"MIT"
] | null | null | null |
maths/fibonacci.py
|
MOHAK7488/Python-3-Master
|
36dbc5a62d7bfb12ddec27efb22dd7853e408fd0
|
[
"MIT"
] | null | null | null |
maths/fibonacci.py
|
MOHAK7488/Python-3-Master
|
36dbc5a62d7bfb12ddec27efb22dd7853e408fd0
|
[
"MIT"
] | null | null | null |
# fibonacci.py
"""
1. Calculates the iterative fibonacci sequence
2. Calculates the fibonacci sequence with a formula
an = [ Phin - (phi)n ]/Sqrt[5]
reference-->Su, Francis E., et al. "Fibonacci Number Formula." Math Fun Facts.
<http://www.math.hmc.edu/funfacts>
"""
import math
import functools
import time
from decimal import getcontext, Decimal
getcontext().prec = 100
def timer_decorator(func):
@functools.wraps(func)
def timer_wrapper(*args, **kwargs):
start = time.time()
func(*args, **kwargs)
end = time.time()
if int(end - start) > 0:
print(f"Run time for {func.__name__}: {(end - start):0.2f}s")
else:
print(f"Run time for {func.__name__}: {(end - start)*1000:0.2f}ms")
return func(*args, **kwargs)
return timer_wrapper
# define Python user-defined exceptions
class Error(Exception):
"""Base class for other exceptions"""
class ValueTooLargeError(Error):
"""Raised when the input value is too large"""
class ValueTooSmallError(Error):
"""Raised when the input value is not greater than one"""
class ValueLessThanZero(Error):
"""Raised when the input value is less than zero"""
def _check_number_input(n, min_thresh, max_thresh=None):
"""
:param n: single integer
:type n: int
:param min_thresh: min threshold, single integer
:type min_thresh: int
:param max_thresh: max threshold, single integer
:type max_thresh: int
:return: boolean
"""
try:
if n >= min_thresh and max_thresh is None:
return True
elif min_thresh <= n <= max_thresh:
return True
elif n < 0:
raise ValueLessThanZero
elif n < min_thresh:
raise ValueTooSmallError
elif n > max_thresh:
raise ValueTooLargeError
except ValueLessThanZero:
print("Incorrect Input: number must not be less than 0")
except ValueTooSmallError:
print(
f"Incorrect Input: input number must be > {min_thresh} for the recursive "
"calculation"
)
except ValueTooLargeError:
print(
f"Incorrect Input: input number must be < {max_thresh} for the recursive "
"calculation"
)
return False
@timer_decorator
def fib_iterative(n):
"""
:param n: calculate Fibonacci to the nth integer
:type n:int
:return: Fibonacci sequence as a list
"""
n = int(n)
if _check_number_input(n, 2):
seq_out = [0, 1]
a, b = 0, 1
for _ in range(n - len(seq_out)):
a, b = b, a + b
seq_out.append(b)
return seq_out
@timer_decorator
def fib_formula(n):
"""
:param n: calculate Fibonacci to the nth integer
:type n:int
:return: Fibonacci sequence as a list
"""
seq_out = [0, 1]
n = int(n)
if _check_number_input(n, 2, 1000000):
sqrt = Decimal(math.sqrt(5))
phi_1 = Decimal(1 + sqrt) / Decimal(2)
phi_2 = Decimal(1 - sqrt) / Decimal(2)
for i in range(2, n):
temp_out = ((phi_1 ** Decimal(i)) - (phi_2 ** Decimal(i))) * (
Decimal(sqrt) ** Decimal(-1)
)
seq_out.append(int(temp_out))
return seq_out
if __name__ == "__main__":
num = 20
# print(f'{fib_recursive(num)}\n')
# print(f'{fib_iterative(num)}\n')
# print(f'{fib_formula(num)}\n')
fib_iterative(num)
fib_formula(num)
| 26.656489
| 86
| 0.601661
|
359ab6b5f0900d51b8199e6d2b001486d0d8df07
| 228
|
py
|
Python
|
mundo1/D14.py
|
KayanOkagawa/Cursoemvideo-Python3-Exercicios
|
10c8386102cc0928f8f090070eb3218deb3d60fe
|
[
"MIT"
] | null | null | null |
mundo1/D14.py
|
KayanOkagawa/Cursoemvideo-Python3-Exercicios
|
10c8386102cc0928f8f090070eb3218deb3d60fe
|
[
"MIT"
] | null | null | null |
mundo1/D14.py
|
KayanOkagawa/Cursoemvideo-Python3-Exercicios
|
10c8386102cc0928f8f090070eb3218deb3d60fe
|
[
"MIT"
] | null | null | null |
#-*-coding:utf8;-*-
#qpy:console
print(5 * '-', 'CONVERSOR DE TEMPERATURA', 5 * '-')
print('\n')
c = float(input('Digite o valor em Graus Celsius: '))
f = 1.8 * c + 32
print('O valor de {}°C em Farenheit é {}°F.'.format(c, f))
| 25.333333
| 58
| 0.583333
|
17dc4b1d3e427440624f9dab678917134223f897
| 3,925
|
py
|
Python
|
ProtoToLua/protoToPb.py
|
Githaojiejie/proto2cs
|
b1e4308101d92bdf3269e4442a3e453905a336a7
|
[
"MIT"
] | null | null | null |
ProtoToLua/protoToPb.py
|
Githaojiejie/proto2cs
|
b1e4308101d92bdf3269e4442a3e453905a336a7
|
[
"MIT"
] | null | null | null |
ProtoToLua/protoToPb.py
|
Githaojiejie/proto2cs
|
b1e4308101d92bdf3269e4442a3e453905a336a7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#!/usr/bin/python3
import configparser
import os
import os.path
import pprint
import re
class MsgInfo(object):
def __init__(self,msgid,msgname,comment):
self.msgid = msgid
self.msgname = msgname
self.comment = comment
msgid = ""
msgname = ""
comment = ""
class WrapFile:
fs = None
def __init__(self,real_file):
self.fs = real_file
def writelines(self,s):
self.fs.write(s + "\n")
def flush(self):
self.fs.flush()
def close(self):
self.fs.close()
def base_proto_to_pb(proto_path,dstdir):
os.chdir(proto_path)
pbfileList = []
for filename in os.listdir(proto_path):
curfile = proto_path + filename
if os.path.isfile(curfile):
houzhui = curfile[curfile.rfind('.'):]
if houzhui == '.proto':
file = filename[:filename.rfind('.')]
destname = dstdir + file + '.pb'
srcname = file + '.proto'
print(srcname)
pbfileList.append(file + '.pb')
cmd = "protoc --descriptor_set_out " + destname + " " + srcname
os.system(cmd)
return pbfileList
def ParsePbfile(fileList):
print("START GEN PbFilesDefine")
config=configparser.ConfigParser()
config.read("Config.ini")
luaPath = config.get("PATH","lua")
configFolderPath = luaPath + "Config\\"
fileDefine = configFolderPath + "PbFilesDefine.lua"
fs = WrapFile(open(fileDefine,"w+"))
fs.writelines("--Generated By ParsePbfile Do not Edit");
fs.writelines("local config = {");
for name in fileList:
fs.writelines("\t\"%s\","%(name));
fs.writelines("}");
fs.writelines("return config");
fs.flush();
fs.close();
def proto_to_pb():
begin_path = os.getcwd()
config=configparser.ConfigParser()
config.read("Config.ini")
proto_path = config.get("PATH","protocol")
dstdir = config.get("PATH","lua")
dstdir = dstdir + "Proto\\"
if not os.path.isabs(proto_path):
proto_path = begin_path + "\\" + proto_path
if not os.path.isabs(dstdir):
dstdir = begin_path + "\\" + dstdir
pbfileList =base_proto_to_pb(proto_path,dstdir)
os.chdir(begin_path)
ParsePbfile(pbfileList)
def ParseCommandProto():
config=configparser.ConfigParser()
config.read("Config.ini")
proto_path = config.get("PATH","protocol")
proto_path = proto_path + "Command.proto"
msgInfoList = []
protofile = open(proto_path,'r',encoding='UTF-8')
for line in protofile.readlines():
line = line.strip().rstrip();
print(line)
msgName = re.search("[a-zA-Z]+_[a-zA-Z]+",line)
if not msgName:
continue
msgId = re.search("[0-9]+",line)
splitstr = re.split(";[\s]*//",line);
comment = ""
if len(splitstr) > 1 :
comment = splitstr[1]
msgInfoList.append(MsgInfo(msgId.group(0),msgName.group(0),comment))
#print(comment,msgId.group(0))
#print(msgId.group(0),"=",msgName.group(0));
return msgInfoList
def ParseMsgIDMap(fs,msgidList):
fs.writelines("--Generated By msgid-gen-lua Do not Edit");
fs.writelines("local config = {");
for _msgDef in msgidList:
splitMsgname = _msgDef.msgname.split('_')
fs.writelines("\t[%s] = \"%s\",--%s"% (_msgDef.msgid, "ProtoCmd."+splitMsgname[0], _msgDef.comment));
fs.writelines("}");
fs.writelines("return config");
fs.flush();
fs.close();
def ParseMsgIDDefine(fs,msgidList):
fs.writelines("--Generated By msgid-gen-lua Do not Edit");
fs.writelines("local config = {");
for _msgDef in msgidList:
fs.writelines("\t%s = %s, --%s"%( _msgDef.msgname, _msgDef.msgid,_msgDef.comment));
fs.writelines("}");
fs.writelines("return config");
fs.flush();
fs.close();
def GenMsgID():
config=configparser.ConfigParser()
config.read("Config.ini")
luaPath = config.get("PATH","lua")
msgFolderPath = luaPath + "Config\\"
MsgIDDefine = msgFolderPath + "MsgIDDefine.lua"
MsgIDMap = msgFolderPath + "MsgIDMap.lua"
msgList = ParseCommandProto()
print("START GEN MsgIDDefine")
wf = WrapFile(open(MsgIDDefine,"w+"))
ParseMsgIDDefine(wf,msgList)
wf = WrapFile(open(MsgIDMap,"w+"))
ParseMsgIDMap(wf,msgList)
proto_to_pb()
GenMsgID()
| 25.160256
| 103
| 0.684076
|
bf83ed903938c3db7f26d148d3f788648faff435
| 863
|
py
|
Python
|
scripts/api/workflow_import.py
|
mjakubczak/galaxy
|
77c1d328d45c98881d5b310d3461d2b116363daf
|
[
"CC-BY-3.0"
] | 1
|
2019-11-03T11:45:43.000Z
|
2019-11-03T11:45:43.000Z
|
scripts/api/workflow_import.py
|
vazovn/galaxy
|
8b538f17feb17eba11b3025a2dfb3ba35414a78e
|
[
"CC-BY-3.0"
] | null | null | null |
scripts/api/workflow_import.py
|
vazovn/galaxy
|
8b538f17feb17eba11b3025a2dfb3ba35414a78e
|
[
"CC-BY-3.0"
] | null | null | null |
#!/usr/bin/env python
"""
Import workflows from the command line.
Example calls:
python workflow_import.py <api_key> <galaxy_url> '/path/to/workflow/file [--add_to_menu]'
"""
from __future__ import print_function
import os
import sys
from common import submit
def main():
api_key = sys.argv[1]
api_base_url = sys.argv[2]
api_url = "%s/api/workflows" % api_base_url
try:
data = {}
data['installed_repository_file'] = sys.argv[3]
if len(sys.argv) > 4 and sys.argv[4] == "--add_to_menu":
data['add_to_menu'] = True
except IndexError:
print('usage: %s key galaxy_url workflow_file' % os.path.basename(sys.argv[0]))
sys.exit(1)
# print display( api_key, api_base_url + "/api/workflows" )
submit( api_key, api_url, data, return_formatted=False )
if __name__ == '__main__':
main()
| 26.96875
| 89
| 0.65701
|
78b7f0c95ae8faa2079023c004a01a2eaa630f38
| 819
|
py
|
Python
|
102_user_display_and_settings_module/main/migrations/0016_auto_20200313_0942.py
|
Ampel2Go/community
|
08759509287efef08218a4eb6e6e2b029b862b4a
|
[
"Apache-2.0"
] | 2
|
2020-08-08T15:38:08.000Z
|
2020-11-18T13:13:45.000Z
|
102_user_display_and_settings_module/main/migrations/0016_auto_20200313_0942.py
|
Ampel2Go/community
|
08759509287efef08218a4eb6e6e2b029b862b4a
|
[
"Apache-2.0"
] | 1
|
2021-09-22T19:46:49.000Z
|
2021-09-22T19:46:49.000Z
|
102_user_display_and_settings_module/main/migrations/0016_auto_20200313_0942.py
|
Ampel2Go/community
|
08759509287efef08218a4eb6e6e2b029b862b4a
|
[
"Apache-2.0"
] | 1
|
2022-02-16T09:52:31.000Z
|
2022-02-16T09:52:31.000Z
|
# Generated by Django 2.1.1 on 2020-03-13 09:42
import datetime
from django.db import migrations, models
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('main', '0015_auto_20200312_1835'),
]
operations = [
migrations.AlterField(
model_name='wikiarticle',
name='body',
field=tinymce.models.HTMLField(default='body'),
),
migrations.AlterField(
model_name='wikiarticle',
name='date',
field=models.DateField(default=datetime.datetime(2020, 3, 13, 9, 42, 34, 808015)),
),
migrations.AlterField(
model_name='wikiarticle',
name='title_short',
field=models.CharField(default='title_short', max_length=27),
),
]
| 26.419355
| 94
| 0.593407
|
c9bcbfa19233933b87205af30bbf3926099311e6
| 181
|
py
|
Python
|
code_challenges/sum_matrix/sum_matrix.py
|
eugenemonnier/data-structures-and-algorithms
|
90177c2543e5a3e40a0bffe76ad2aeafba7574a6
|
[
"MIT"
] | null | null | null |
code_challenges/sum_matrix/sum_matrix.py
|
eugenemonnier/data-structures-and-algorithms
|
90177c2543e5a3e40a0bffe76ad2aeafba7574a6
|
[
"MIT"
] | null | null | null |
code_challenges/sum_matrix/sum_matrix.py
|
eugenemonnier/data-structures-and-algorithms
|
90177c2543e5a3e40a0bffe76ad2aeafba7574a6
|
[
"MIT"
] | null | null | null |
def sum_matrix(matrix):
result = list()
for idx, arr in enumerate(matrix):
result.append(0)
for val in arr:
result[idx] += val
return result
| 22.625
| 38
| 0.569061
|
a3982ab3e4bfef371c60899a2226f1f145df19a1
| 35,279
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_initializer.py
|
zhenlin-work/Paddle
|
ed7a21dea0ddcffb6f7f33ce21c5c368f5c7866b
|
[
"Apache-2.0"
] | 2
|
2018-12-27T07:13:55.000Z
|
2021-06-16T09:30:09.000Z
|
python/paddle/fluid/tests/unittests/test_initializer.py
|
zhenlin-work/Paddle
|
ed7a21dea0ddcffb6f7f33ce21c5c368f5c7866b
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/test_initializer.py
|
zhenlin-work/Paddle
|
ed7a21dea0ddcffb6f7f33ce21c5c368f5c7866b
|
[
"Apache-2.0"
] | 1
|
2020-11-25T10:41:52.000Z
|
2020-11-25T10:41:52.000Z
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import math
import unittest
import paddle
import paddle.fluid as fluid
import paddle.fluid.framework as framework
import paddle.fluid.initializer as initializer
from paddle.fluid.core import VarDesc
DELTA = 0.00001
def check_cast_op(op):
return op.type == 'cast' and \
op.attr('in_dtype') == VarDesc.VarType.FP32 and \
op.attr('out_dtype') in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]
def output_hist(out):
hist, _ = np.histogram(out, range=(-1, 1))
hist = hist.astype("float32")
hist /= float(out.size)
prob = 0.1 * np.ones((10))
return hist, prob
class TestConstantInitializer(unittest.TestCase):
def test_calculate_gain(self):
self.assertEqual(paddle.nn.initializer.calculate_gain('sigmoid'), 1)
self.assertEqual(paddle.nn.initializer.calculate_gain('linear'), 1)
self.assertEqual(paddle.nn.initializer.calculate_gain('conv2d'), 1)
self.assertEqual(paddle.nn.initializer.calculate_gain('tanh'), 5.0 / 3)
self.assertEqual(
paddle.nn.initializer.calculate_gain('relu'), math.sqrt(2.0))
self.assertEqual(
paddle.nn.initializer.calculate_gain('leaky_relu', 1), 1)
self.assertEqual(paddle.nn.initializer.calculate_gain('selu'), 3.0 / 4)
def test_constant_initializer_default_value(self, dtype="float32"):
"""Test the constant initializer with default value
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype=dtype,
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.ConstantInitializer())
num_ops = 2 if dtype == "float16" else 1
self.assertEqual(len(block.ops), num_ops)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'fill_constant')
self.assertAlmostEqual(init_op.attr('value'), 0.0, delta=DELTA)
return block
def test_constant_initializer(self, dtype="float32"):
"""Test constant initializer with supplied value
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype=dtype,
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.ConstantInitializer(2.3))
num_ops = 2 if dtype == "float16" else 1
self.assertEqual(len(block.ops), num_ops)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'fill_constant')
self.assertAlmostEqual(init_op.attr('value'), 2.3, delta=DELTA)
return block
def test_constant_initializer_fp16(self):
"""Test constant initializer with float16
"""
block = self.test_constant_initializer_default_value("float16")
self.assertTrue(check_cast_op(block.ops[1]))
block = self.test_constant_initializer("float16")
self.assertTrue(check_cast_op(block.ops[1]))
def test_constant_initializer_bf16(self):
"""Test constant initializer with bfloat16
No cast operator has been added here
"""
self.test_constant_initializer_default_value("uint16")
self.test_constant_initializer("uint16")
class TestUniformInitializer(unittest.TestCase):
def test_uniform_initializer_default_value(self, dtype="float32"):
"""Test the uniform initializer with default value
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype=dtype,
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.UniformInitializer())
num_ops = 2 if dtype == "float16" else 1
self.assertEqual(len(block.ops), num_ops)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'uniform_random')
self.assertAlmostEqual(init_op.attr('min'), -1.0, delta=DELTA)
self.assertAlmostEqual(init_op.attr('max'), 1.0, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
return block
def test_uniform_initializer_random_seed(self):
"""Test the uniform initializer with manually setting seed
"""
program = framework.Program()
program.random_seed = 123
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="param1",
initializer=initializer.UniformInitializer())
block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="param2",
initializer=initializer.UniformInitializer(seed=456))
init_op = block.ops[1]
self.assertEqual(init_op.attr("seed"), 456)
init_op1 = block.ops[0]
self.assertEqual(init_op1.attr("seed"), 123)
def test_uniform_initializer(self, dtype="float32"):
"""Test uniform initializer with supplied attributes
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype=dtype,
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.UniformInitializer(-4.2, 3.1, 123))
num_ops = 2 if dtype == "float16" else 1
self.assertEqual(len(block.ops), num_ops)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'uniform_random')
self.assertAlmostEqual(init_op.attr('min'), -4.2, delta=DELTA)
self.assertAlmostEqual(init_op.attr('max'), 3.1, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 123)
return block
def test_uniform_initializer_two_op(self, dtype="float32"):
"""Test uniform initializer with supplied attributes
"""
program = framework.Program()
block = program.global_block()
for i in range(2):
block.create_parameter(
dtype=dtype,
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.UniformInitializer(-4.2, float(i), 123))
num_ops = 2 if dtype == "float16" else 1
self.assertEqual(len(block.ops), num_ops)
init_op0 = block.ops[0]
self.assertEqual(init_op0.type, 'uniform_random')
self.assertAlmostEqual(init_op0.attr('min'), -4.2, delta=DELTA)
self.assertAlmostEqual(init_op0.attr('max'), 0.0, delta=DELTA)
self.assertEqual(init_op0.attr('seed'), 123)
return block
def test_uniform_initializer_fp16(self):
"""Test uniform initializer with float16
"""
block = self.test_uniform_initializer_default_value("float16")
self.assertTrue(check_cast_op(block.ops[1]))
block = self.test_uniform_initializer(dtype="float16")
self.assertTrue(check_cast_op(block.ops[1]))
block = self.test_uniform_initializer_two_op("float16")
self.assertTrue(check_cast_op(block.ops[1]))
def test_uniform_initializer_bf16(self):
"""Test uniform initializer with bfloat16
No cast operator has been added here
"""
block = self.test_uniform_initializer_default_value("uint16")
block = self.test_uniform_initializer(dtype="uint16")
block = self.test_uniform_initializer_two_op("uint16")
class TestNormalInitializer(unittest.TestCase):
def test_normal_initializer_default_value(self):
"""Test the normal initializer with default value
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.NormalInitializer())
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'gaussian_random')
self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
self.assertAlmostEqual(init_op.attr('std'), 1.0, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
def test_normal_initializer(self, dtype="float32"):
"""Test normal initializer with supplied attributes
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype=dtype,
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.NormalInitializer(2.3, 1.9, 123))
num_ops = 2 if dtype in ["float16", "uint16"] else 1
self.assertEqual(len(block.ops), num_ops)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'gaussian_random')
self.assertAlmostEqual(init_op.attr('mean'), 2.3, delta=DELTA)
self.assertAlmostEqual(init_op.attr('std'), 1.9, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 123)
return block
def test_normal_initializer_fp16(self):
"""Test normal initializer with float16
"""
block = self.test_normal_initializer("float16")
self.assertTrue(check_cast_op(block.ops[1]))
def test_normal_initializer_bf16(self):
"""Test normal initializer with bfloat16
"""
block = self.test_normal_initializer("uint16")
self.assertTrue(check_cast_op(block.ops[1]))
class TestXavierInitializer(unittest.TestCase):
def test_uniform_xavier_initializer(self):
"""Test Xavier initializer with uniform distribution on
for matrix multiply.
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
param = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.XavierInitializer())
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'uniform_random')
limit = np.sqrt(6.0 / (param.shape[0] + param.shape[1]))
self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
def test_uniform_xavier_initializer_conv(self):
"""Test Xavier initializer with uniform distribution on
for convolutions.
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
param = block.create_parameter(
dtype="float32",
shape=[5, 10, 15, 20],
lod_level=0,
name="param",
initializer=initializer.XavierInitializer())
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'uniform_random')
receptive_field_size = float(15 * 20)
limit = np.sqrt(6.0 / (
(param.shape[0] + param.shape[1]) * receptive_field_size))
self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
def test_normal_xavier_initializer(self):
"""Test Xavier initializer with normal distribution on
for matrix multiply.
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
param = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.XavierInitializer(uniform=False))
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'gaussian_random')
std = np.sqrt(2.0 / (param.shape[0] + param.shape[1]))
self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
self.assertAlmostEqual(init_op.attr('std'), std, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
def test_normal_xavier_initializer_conv(self):
"""Test Xavier initializer with normal distribution on
for convolutions.
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
param = block.create_parameter(
dtype="float32",
shape=[5, 10, 15, 20],
lod_level=0,
name="param",
initializer=initializer.XavierInitializer(uniform=False))
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'gaussian_random')
receptive_field_size = float(15 * 20)
std = np.sqrt(2.0 / (
(param.shape[0] + param.shape[1]) * receptive_field_size))
self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
self.assertAlmostEqual(init_op.attr('std'), std, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
def test_xavier_initializer_supplied_arguments(self,
dtype="float32",
uniform=True):
"""Test the Xavier initializer with supplied arguments
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype=dtype,
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.XavierInitializer(
uniform=uniform, fan_in=12, fan_out=23, seed=134))
num_ops = 2 if (dtype == "float16" or (dtype == "uint16" and
not uniform)) else 1
self.assertEqual(len(block.ops), num_ops)
init_op = block.ops[0]
if uniform:
self.assertEqual(init_op.type, 'uniform_random')
limit = np.sqrt(6.0 / (12 + 23))
self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
else:
self.assertEqual(init_op.type, 'gaussian_random')
self.assertEqual(init_op.attr('seed'), 134)
return block
def test_xavier_initializer_fp16(self):
"""Test the Xavier initializer with float16
"""
block = self.test_xavier_initializer_supplied_arguments("float16")
self.assertTrue(check_cast_op(block.ops[1]))
def test_xavier_initializer_bf16(self):
"""Test the Xavier initializer with bfloat16
"""
block_uniform = self.test_xavier_initializer_supplied_arguments(
"uint16")
self.assertEqual(len(block_uniform.ops), 1)
block_gaussian = self.test_xavier_initializer_supplied_arguments(
"uint16", False)
self.assertTrue(check_cast_op(block_gaussian.ops[1]))
class TestMSRAInitializer(unittest.TestCase):
def test_uniform_msra_initializer(self):
"""Test MSRA initializer with uniform distribution on
for matrix multiply.
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
param = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.MSRAInitializer())
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'uniform_random')
limit = np.sqrt(6.0 / param.shape[0])
self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
def test_uniform_msra_initializer_conv(self):
"""Test MSRA initializer with uniform distribution on
for convolutions.
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
param = block.create_parameter(
dtype="float32",
shape=[5, 10, 15, 20],
lod_level=0,
name="param",
initializer=initializer.MSRAInitializer())
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'uniform_random')
receptive_field_size = float(15 * 20)
limit = np.sqrt(6.0 / (param.shape[1] * receptive_field_size))
self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
def test_normal_msra_initializer(self):
"""Test MSRA initializer with normal distribution on
for matrix multiply.
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
param = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.MSRAInitializer(uniform=False))
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'gaussian_random')
std = np.sqrt(2.0 / param.shape[0])
self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
self.assertAlmostEqual(init_op.attr('std'), std, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
def test_normal_msra_initializer_conv(self):
"""Test MSRA initializer with normal distribution on
for convolutions.
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
param = block.create_parameter(
dtype="float32",
shape=[5, 10, 15, 20],
lod_level=0,
name="param",
initializer=initializer.MSRAInitializer(uniform=False))
self.assertEqual(len(block.ops), 1)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'gaussian_random')
receptive_field_size = float(15 * 20)
std = np.sqrt(2.0 / (param.shape[1] * receptive_field_size))
self.assertAlmostEqual(init_op.attr('mean'), 0.0, delta=DELTA)
self.assertAlmostEqual(init_op.attr('std'), std, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 0)
def test_msra_initializer_supplied_arguments(self, dtype="float32"):
"""Test the MSRA initializer with supplied arguments
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype=dtype,
shape=[5, 10],
lod_level=0,
name="param",
initializer=initializer.MSRAInitializer(
fan_in=12, seed=134))
num_ops = 2 if dtype == "float16" else 1
self.assertEqual(len(block.ops), num_ops)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'uniform_random')
limit = np.sqrt(6.0 / 12)
self.assertAlmostEqual(init_op.attr('min'), -limit, delta=DELTA)
self.assertAlmostEqual(init_op.attr('max'), limit, delta=DELTA)
self.assertEqual(init_op.attr('seed'), 134)
return block
def test_msra_initializer_fp16(self):
"""Test the MSRA initializer with float16
"""
block = self.test_msra_initializer_supplied_arguments("float16")
self.assertTrue(check_cast_op(block.ops[1]))
def test_msra_initializer_bf16(self):
"""Test the MSRA initializer with bfloat16
"""
block = self.test_msra_initializer_supplied_arguments("uint16")
class TestBilinearInitializer(unittest.TestCase):
def test_bilinear_initializer(self, dtype="float32"):
"""Test the bilinear initializer with supplied arguments
"""
program = framework.Program()
block = program.global_block()
for _ in range(2):
block.create_parameter(
dtype=dtype,
shape=[8, 1, 3, 3],
lod_level=0,
name="param",
initializer=initializer.BilinearInitializer())
num_ops = 2 if dtype in ["float16", "uint16", "float64"] else 1
self.assertEqual(len(block.ops), num_ops)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'assign_value')
return block
def test_bilinear_initializer_fp64(self):
self.test_bilinear_initializer(dtype='float64')
def test_bilinear_initializer_fp16(self):
"""Test the bilinear initializer with supplied arguments
"""
block = self.test_bilinear_initializer("float16")
self.assertTrue(check_cast_op(block.ops[1]))
def test_bilinear_initializer_bf16(self):
"""Test the bilinear initializer with supplied arguments
"""
block = self.test_bilinear_initializer("uint16")
self.assertTrue(check_cast_op(block.ops[1]))
def test_type_error(self):
self.assertRaises(TypeError, self.test_bilinear_initializer, 'int32')
class TestNumpyArrayInitializer(unittest.TestCase):
def test_numpy_array_initializer(self, dtype="float32"):
"""Test the numpy array initializer with supplied arguments
"""
import numpy
program = framework.Program()
block = program.global_block()
np_array = numpy.random.random((10000)).astype(dtype)
for _ in range(2):
block.create_parameter(
dtype=np_array.dtype,
shape=np_array.shape,
lod_level=0,
name="param",
initializer=initializer.NumpyArrayInitializer(np_array))
num_ops = 2 if dtype in ["float16", "uint16"] else 1
self.assertEqual(len(block.ops), num_ops)
init_op = block.ops[0]
self.assertEqual(init_op.type, 'assign_value')
assert (init_op.attr('fp32_values') == np_array).all()
return block
def test_numpy_array_initializer_fp16(self):
"""Test the numpy array initializer with float16
"""
block = self.test_numpy_array_initializer("float16")
self.assertTrue(block.ops[1])
def test_numpy_array_initializer_bf16(self):
"""Test the numpy array initializer with bfloat16
"""
block = self.test_numpy_array_initializer("uint16")
self.assertTrue(block.ops[1])
class TestSetGlobalInitializer(unittest.TestCase):
def test_set_global_weight_initilizer(self):
"""Test Set Global Param initilizer with UniformInitializer
"""
main_prog = framework.Program()
startup_prog = framework.Program()
fluid.set_global_initializer(initializer.Uniform(low=-0.5, high=0.5))
with fluid.program_guard(main_prog, startup_prog):
x = fluid.data(name="x", shape=[1, 3, 32, 32])
# default initilizer of param in layers.conv2d is NormalInitializer
conv = fluid.layers.conv2d(x, 5, 3)
block = startup_prog.global_block()
self.assertEqual(len(block.ops), 2)
# init weight is the first op, and bias is the second
bias_init_op = block.ops[1]
self.assertEqual(bias_init_op.type, 'fill_constant')
self.assertAlmostEqual(bias_init_op.attr('value'), 0.0, delta=DELTA)
param_init_op = block.ops[0]
self.assertEqual(param_init_op.type, 'uniform_random')
self.assertAlmostEqual(param_init_op.attr('min'), -0.5, delta=DELTA)
self.assertAlmostEqual(param_init_op.attr('max'), 0.5, delta=DELTA)
self.assertEqual(param_init_op.attr('seed'), 0)
fluid.set_global_initializer(None)
def test_set_global_bias_initilizer(self):
"""Test Set Global Bias initilizer with NormalInitializer
"""
main_prog = framework.Program()
startup_prog = framework.Program()
fluid.set_global_initializer(
initializer.Uniform(
low=-0.5, high=0.5),
bias_init=initializer.Normal(
loc=0.0, scale=2.0))
with fluid.program_guard(main_prog, startup_prog):
x = fluid.data(name="x", shape=[1, 3, 32, 32])
# default initilizer of bias in layers.conv2d is ConstantInitializer
conv = fluid.layers.conv2d(x, 5, 3)
block = startup_prog.global_block()
self.assertEqual(len(block.ops), 2)
# init weight is the first op, and bias is the second
bias_init_op = block.ops[1]
self.assertEqual(bias_init_op.type, 'gaussian_random')
self.assertAlmostEqual(bias_init_op.attr('mean'), 0.0, delta=DELTA)
self.assertAlmostEqual(bias_init_op.attr('std'), 2.0, delta=DELTA)
self.assertEqual(bias_init_op.attr('seed'), 0)
param_init_op = block.ops[0]
self.assertEqual(param_init_op.type, 'uniform_random')
self.assertAlmostEqual(param_init_op.attr('min'), -0.5, delta=DELTA)
self.assertAlmostEqual(param_init_op.attr('max'), 0.5, delta=DELTA)
self.assertEqual(param_init_op.attr('seed'), 0)
fluid.set_global_initializer(None)
class TestUniformInitializerDygraph(unittest.TestCase):
def test_uniform_initializer(self, dtype="float32"):
"""
In dygraph mode, we can use initializer directly to initialize a tensor.
"""
paddle.disable_static()
tensor = paddle.zeros([1024, 1024, 16])
tensor.stop_gradient = False
self.assertTrue(np.allclose(np.zeros((1024, 1024, 16)), tensor.numpy()))
uniform_ = paddle.nn.initializer.Uniform()
uniform_(tensor)
self.assertEqual(tensor.stop_gradient,
False) # stop_gradient is not changed
hist, prob = output_hist(tensor.numpy())
self.assertTrue(
np.allclose(
hist, prob, rtol=0, atol=1e-3), "hist: " + str(hist))
paddle.enable_static()
class TesetconsistencyOfDynamicAndStaticGraph(unittest.TestCase):
def test_order(self):
paddle.set_device('cpu')
SEED = 123
weight_attr = paddle.framework.ParamAttr(
name="linear_weight",
learning_rate=1.0,
trainable=False,
regularizer=None,
initializer=paddle.nn.initializer.TruncatedNormal(
mean=0.0, std=2.0))
bias_attr = paddle.framework.ParamAttr(
name="linear_bias",
learning_rate=1.0,
trainable=False,
regularizer=None,
initializer=paddle.nn.initializer.TruncatedNormal(
mean=0.0, std=2.0))
def run_dynamic_graph():
paddle.disable_static()
paddle.seed(SEED)
linear = paddle.nn.Linear(
1, 1, weight_attr=weight_attr, bias_attr=bias_attr)
return linear.weight.numpy(), linear.bias.numpy()
paddle.enable_static()
def run_static_graph():
paddle.enable_static()
exe = paddle.static.Executor(paddle.CPUPlace())
paddle.seed(SEED)
linear = paddle.nn.Linear(
1, 1, weight_attr=weight_attr, bias_attr=bias_attr)
res = exe.run(paddle.static.default_startup_program(),
fetch_list=['linear_weight', 'linear_bias'])
return res[0], res[1]
dynamic_res = run_dynamic_graph()
static_res = run_static_graph()
self.assertTrue(np.array_equal(dynamic_res[0], static_res[0]))
self.assertTrue(np.array_equal(dynamic_res[1], static_res[1]))
# 2-D Parameter with shape: [10, 15]
class TestOrthogonalInitializer1(unittest.TestCase):
"""
case 1
"""
def config(self):
self.weight_attr = paddle.ParamAttr(
initializer=paddle.nn.initializer.Orthogonal(gain=3.0))
self.dtype = "float64"
self.in_features = 10
self.out_features = 15
self.num_ops = 9
def check_result(self, a, b):
self.assertTrue(np.array_equal(a, b))
self.assertTrue(np.allclose(np.matmul(a, a.T), 9 * np.eye(10)))
def test_orthogonal(self):
self.config()
paddle.set_default_dtype(self.dtype)
paddle.disable_static()
paddle.seed(2021)
linear = paddle.nn.Linear(
self.in_features, self.out_features, weight_attr=self.weight_attr)
res_dygraph = linear.weight.numpy()
paddle.enable_static()
paddle.seed(2021)
start_prog = paddle.static.Program()
main_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
linear = paddle.nn.Linear(
self.in_features,
self.out_features,
weight_attr=self.weight_attr)
block = start_prog.global_block()
self.assertEqual(len(block.ops), self.num_ops)
self.assertEqual(block.ops[0].type, 'gaussian_random')
self.assertEqual(block.ops[1].type, 'qr')
self.assertEqual(block.ops[2].type, 'diag_v2')
self.assertEqual(block.ops[3].type, 'sign')
self.assertEqual(block.ops[4].type, 'elementwise_mul')
self.assertEqual(block.ops[-3].type, 'reshape2')
self.assertEqual(block.ops[-2].type, 'scale')
exe = paddle.static.Executor()
res_static = exe.run(start_prog, fetch_list=[linear.weight])[0]
self.check_result(res_dygraph, res_static)
# 2-D Parameter with shape: [15, 10]
class TestOrthogonalInitializer2(TestOrthogonalInitializer1):
"""
case 2
"""
def config(self):
self.weight_attr = paddle.ParamAttr(
initializer=paddle.nn.initializer.Orthogonal(gain=2.0))
self.dtype = "float64"
self.in_features = 15
self.out_features = 10
self.num_ops = 8
def check_result(self, a, b):
self.assertTrue(np.array_equal(a, b))
self.assertTrue(np.allclose(np.matmul(a.T, a), 4 * np.eye(10)))
# 2-D Parameter with shape: [10, 10]
class TestOrthogonalInitializer3(TestOrthogonalInitializer1):
"""
case 3
"""
def config(self):
self.weight_attr = paddle.ParamAttr(
initializer=paddle.nn.initializer.Orthogonal())
self.dtype = "float32"
self.in_features = 10
self.out_features = 10
self.num_ops = 8
def check_result(self, a, b):
self.assertTrue(np.array_equal(a, b))
self.assertTrue(np.allclose(np.matmul(a.T, a), np.eye(10), atol=1.e-6))
self.assertTrue(np.allclose(np.matmul(a, a.T), np.eye(10), atol=1.e-6))
def test_error(self):
self.config()
with self.assertRaises(AssertionError):
paddle.nn.Linear(10, 10, bias_attr=self.weight_attr)
# 4-D Parameter with shape: [6, 4, 3, 3]
class TestOrthogonalInitializer4(unittest.TestCase):
"""
case 4
"""
def config(self):
self.weight_attr = paddle.ParamAttr(
initializer=paddle.nn.initializer.Orthogonal(gain=3.0))
self.dtype = "float64"
self.in_features = 4
self.out_features = 6
self.kernel_size = (3, 3)
def check_result(self, a, b):
self.assertTrue(np.array_equal(a, b))
a = a.reshape(6, -1)
self.assertTrue(np.allclose(np.matmul(a, a.T), 9 * np.eye(6)))
def test_orthogonal(self):
self.config()
paddle.set_default_dtype(self.dtype)
paddle.disable_static()
paddle.seed(2021)
conv2d = paddle.nn.Conv2D(
self.in_features,
self.out_features,
self.kernel_size,
weight_attr=self.weight_attr)
res_dygraph = conv2d.weight.numpy()
paddle.enable_static()
paddle.seed(2021)
start_prog = paddle.static.Program()
main_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
conv2d = paddle.nn.Conv2D(
self.in_features,
self.out_features,
self.kernel_size,
weight_attr=self.weight_attr)
exe = paddle.static.Executor()
res_static = exe.run(paddle.static.default_startup_program(),
fetch_list=[conv2d.weight])[0]
self.check_result(res_dygraph, res_static)
# 4-D Parameter with shape: [50, 4, 3, 3]
class TestOrthogonalInitializer5(TestOrthogonalInitializer4):
"""
case 5
"""
def config(self):
self.weight_attr = paddle.ParamAttr(
initializer=paddle.nn.initializer.Orthogonal(gain=2.0))
self.dtype = "float64"
self.in_features = 4
self.out_features = 50
self.kernel_size = (3, 3)
def check_result(self, a, b):
self.assertTrue(np.array_equal(a, b))
a = a.reshape(50, -1)
self.assertTrue(np.allclose(np.matmul(a.T, a), 4 * np.eye(36)))
# 4-D Parameter with shape: [36, 4, 3, 3]
class TestOrthogonalInitializer6(TestOrthogonalInitializer4):
"""
case 6
"""
def config(self):
self.weight_attr = paddle.ParamAttr(
initializer=paddle.nn.initializer.Orthogonal())
self.dtype = "float32"
self.in_features = 4
self.out_features = 36
self.kernel_size = (3, 3)
def check_result(self, a, b):
self.assertTrue(np.array_equal(a, b))
a = a.reshape(36, -1)
self.assertTrue(np.allclose(np.matmul(a.T, a), np.eye(36), atol=1.e-6))
self.assertTrue(np.allclose(np.matmul(a, a.T), np.eye(36), atol=1.e-6))
if __name__ == '__main__':
unittest.main()
| 38.346739
| 80
| 0.61127
|
9d9f4df75bf2f5cddcf3bbc817874f1b95adcbc8
| 541
|
py
|
Python
|
test/strings_test.py
|
jonDuke/lambdata-jduke
|
712495aca7870398b24388c9e1388bfedf48dad2
|
[
"MIT"
] | null | null | null |
test/strings_test.py
|
jonDuke/lambdata-jduke
|
712495aca7870398b24388c9e1388bfedf48dad2
|
[
"MIT"
] | 2
|
2021-04-01T08:17:59.000Z
|
2021-04-01T08:20:37.000Z
|
test/strings_test.py
|
jonDuke/lambdata-jduke
|
712495aca7870398b24388c9e1388bfedf48dad2
|
[
"MIT"
] | null | null | null |
# test/strings_test.py
import unittest
class TestStringMethods(unittest.TestCase):
def test_upper(self):
self.assertEqual('foo'.upper(), 'FOO')
def test_isupper(self):
self.assertTrue('FOO'.isupper())
self.assertFalse('Foo'.isupper())
def test_split(self):
s = 'hello world'
self.assertEqual(s.split(), ['hello', 'world'])
# check that s.split fails when the separator is not a string
with self.assertRaises(TypeError):
s.split(2)
if __name__ == '__main__':
unittest.main()
| 24.590909
| 67
| 0.654344
|
70fbdb36b833e3541d2accae10a51866e2b06818
| 11,927
|
py
|
Python
|
mx_platform_python/model/managed_member_create_request.py
|
mxenabled/mx-platform-python
|
060dae7ddb02fdcf41fa7f7aebfa4b8a0273afac
|
[
"MIT"
] | null | null | null |
mx_platform_python/model/managed_member_create_request.py
|
mxenabled/mx-platform-python
|
060dae7ddb02fdcf41fa7f7aebfa4b8a0273afac
|
[
"MIT"
] | 14
|
2021-11-30T21:56:19.000Z
|
2022-02-07T18:47:10.000Z
|
mx_platform_python/model/managed_member_create_request.py
|
mxenabled/mx-platform-python
|
060dae7ddb02fdcf41fa7f7aebfa4b8a0273afac
|
[
"MIT"
] | 1
|
2022-01-12T14:59:39.000Z
|
2022-01-12T14:59:39.000Z
|
"""
MX Platform API
The MX Platform API is a powerful, fully-featured API designed to make aggregating and enhancing financial data easy and reliable. It can seamlessly connect your app or website to tens of thousands of financial institutions. # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from mx_platform_python.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from mx_platform_python.exceptions import ApiAttributeError
class ManagedMemberCreateRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'institution_code': (str,), # noqa: E501
'id': (str,), # noqa: E501
'metadata': (str,), # noqa: E501
'name': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'institution_code': 'institution_code', # noqa: E501
'id': 'id', # noqa: E501
'metadata': 'metadata', # noqa: E501
'name': 'name', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, institution_code, *args, **kwargs): # noqa: E501
"""ManagedMemberCreateRequest - a model defined in OpenAPI
Args:
institution_code (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): [optional] # noqa: E501
metadata (str): [optional] # noqa: E501
name (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.institution_code = institution_code
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, institution_code, *args, **kwargs): # noqa: E501
"""ManagedMemberCreateRequest - a model defined in OpenAPI
Args:
institution_code (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): [optional] # noqa: E501
metadata (str): [optional] # noqa: E501
name (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.institution_code = institution_code
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 43.529197
| 242
| 0.571141
|
d61c5b8492e3a80c086fc065f27ff7b7071e5197
| 350
|
py
|
Python
|
kale/tests/test_settings.py
|
ORIGINALLIFE/ndkale
|
759a7132afdfcabd2de658a7701673a928992827
|
[
"BSD-2-Clause"
] | null | null | null |
kale/tests/test_settings.py
|
ORIGINALLIFE/ndkale
|
759a7132afdfcabd2de658a7701673a928992827
|
[
"BSD-2-Clause"
] | null | null | null |
kale/tests/test_settings.py
|
ORIGINALLIFE/ndkale
|
759a7132afdfcabd2de658a7701673a928992827
|
[
"BSD-2-Clause"
] | null | null | null |
"""Module for kale settings for unit tests."""
from __future__ import absolute_import
import os
QUEUE_CONFIG = os.path.join(os.path.split(os.path.abspath(__file__))[0],
'test_queue_config.yaml')
QUEUE_CLASS = 'kale.test_utils.TestQueueClass'
QUEUE_SELECTOR = 'kale.test_utils.TestQueueSelector'
AWS_REGION = 'us-east-1'
| 31.818182
| 72
| 0.722857
|
97ee0ea0efebb0f559521ce0ebd840b502490bf8
| 60
|
py
|
Python
|
src/__init__.py
|
lelis-research/PyGames-synthesis
|
0c597243c04ce511d8ae5cfd52b1c043267b4503
|
[
"MIT"
] | 1
|
2021-06-03T15:54:16.000Z
|
2021-06-03T15:54:16.000Z
|
src/__init__.py
|
olivier-vadiaval/catcher-synthesis
|
0c597243c04ce511d8ae5cfd52b1c043267b4503
|
[
"MIT"
] | 3
|
2021-07-26T19:58:31.000Z
|
2021-07-27T17:35:51.000Z
|
src/__init__.py
|
olivier-vadiaval/catcher-synthesis
|
0c597243c04ce511d8ae5cfd52b1c043267b4503
|
[
"MIT"
] | null | null | null |
import os
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = 'hide'
| 20
| 49
| 0.783333
|
a6bb222e64d076b8c269fc80649f936054a5c617
| 2,600
|
py
|
Python
|
controllers/device/image/images.py
|
gbf-labs/rh-api
|
317a812164ad8943ab638c06f61723cb928bfd12
|
[
"Apache-2.0"
] | null | null | null |
controllers/device/image/images.py
|
gbf-labs/rh-api
|
317a812164ad8943ab638c06f61723cb928bfd12
|
[
"Apache-2.0"
] | 6
|
2020-03-30T23:11:27.000Z
|
2022-03-12T00:21:45.000Z
|
controllers/device/image/images.py
|
gbf-labs/rh-api
|
317a812164ad8943ab638c06f61723cb928bfd12
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=too-many-locals, ungrouped-imports
"""Device Images"""
from flask import request
from library.common import Common
from library.postgresql_queries import PostgreSQL
from library.couch_queries import Queries
from library.aws_s3 import AwsS3
class DeviceImages(Common):
"""Class for DeviceImages"""
# INITIALIZE
def __init__(self):
"""The Constructor for DeviceImages class"""
self.postgres = PostgreSQL()
self.couch_query = Queries()
self.aws3 = AwsS3()
super(DeviceImages, self).__init__()
def get_images(self):
"""
This API is for Getting All Vessel Device Images
---
tags:
- Devices
produces:
- application/json
parameters:
- name: token
in: header
description: Token
required: true
type: string
- name: userid
in: header
description: User ID
required: true
type: string
- name: vessel_id
in: query
description: Vessel ID
required: true
type: string
responses:
500:
description: Error
200:
description: Device Images
"""
data = {}
# GET DATA
token = request.headers.get('token')
userid = request.headers.get('userid')
vessel_id = request.args.get('vessel_id')
# CHECK TOKEN
token_validation = self.validate_token(token, userid)
if not token_validation:
data['alert'] = "Invalid Token"
data['status'] = 'Failed'
# RETURN ALERT
return self.return_data(data)
device_images = self.get_device_images(vessel_id)
for d_image in device_images:
d_image['image_url'] = self.aws3.get_device_image(d_image['vessel_id'],
d_image['device_id'])
data['device_images'] = device_images
data['status'] = 'ok'
return self.return_data(data)
def get_device_images(self, vessel_id):
"""Get Device Images"""
assert vessel_id, "Vessel ID is required."
sql_str = "SELECT * FROM device_image"
sql_str += " WHERE vessel_id = '{0}'".format(vessel_id)
sql_str += " AND status = 'active'"
device_images = self.postgres.query_fetch_all(sql_str)
return device_images
| 29.545455
| 84
| 0.546538
|
2526c2e772d96790f1c1962b49fefff9cab567f6
| 415
|
py
|
Python
|
manage.py
|
michael760odhiambo/News-Highlight
|
2c7585e49d342bd85d8acd9a1f056ba4bffdaace
|
[
"MIT"
] | null | null | null |
manage.py
|
michael760odhiambo/News-Highlight
|
2c7585e49d342bd85d8acd9a1f056ba4bffdaace
|
[
"MIT"
] | null | null | null |
manage.py
|
michael760odhiambo/News-Highlight
|
2c7585e49d342bd85d8acd9a1f056ba4bffdaace
|
[
"MIT"
] | null | null | null |
from app import create_app
from flask_script import Manager,Server
# Creating app instance
app = create_app('development')
manager = Manager(app)
manager.add_command('server',Server)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__':
manager.run()
| 21.842105
| 51
| 0.727711
|
5300fd5cec7ab1ea6cb3a461d584970f8cc58bed
| 7,605
|
py
|
Python
|
modules/Registry/lv1_os_win_reg_os_info.py
|
naaya17/carpe
|
fa2e3cfebe20f8839c985e5b9b78b538800172a1
|
[
"Apache-2.0"
] | null | null | null |
modules/Registry/lv1_os_win_reg_os_info.py
|
naaya17/carpe
|
fa2e3cfebe20f8839c985e5b9b78b538800172a1
|
[
"Apache-2.0"
] | null | null | null |
modules/Registry/lv1_os_win_reg_os_info.py
|
naaya17/carpe
|
fa2e3cfebe20f8839c985e5b9b78b538800172a1
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime, timedelta
import binascii
import modules.Registry.convert_util as cu
class OS_Information:
par_id = ''
case_id = ''
evd_id = ''
operating_system = ''
release_id = ''
version_number = ''
install_time = ''
product_key = ''
owner = ''
display_computer_name = ''
computer_name = ''
dhcp_dns_server = ''
operating_system_version = ''
build_number = ''
product_id = ''
last_service_pack = ''
organization = ''
last_shutdown_time = ''
system_root = ''
path = ''
last_access_time_flag = ''
timezone_utc = ''
display_timezone_name = ''
backup_flag = ''
source_location = []
def OSINFO(reg_software, reg_system):
os_list = []
os_count = 0
os_key_list = []
try:
os_key_list.append(reg_software.find_key(r"Microsoft\Windows NT\CurrentVersion"))
os_key_list.append(reg_system.find_key(r"ControlSet001\Control\ComputerName\ComputerName"))
os_key_list.append(reg_system.find_key(r"ControlSet001\Control\FileSystem"))
os_key_list.append(reg_system.find_key(r"ControlSet001\Control\TimeZoneInformation"))
os_key_list.append(reg_system.find_key(r"ControlSet001\Services\Tcpip\Parameters"))
os_key_list.append(reg_system.find_key(r"ControlSet001\Control\Windows"))
os_information = OS_Information()
os_list.append(os_information)
os_list[os_count].source_location = []
os_list[os_count].source_location.append('SOFTWARE-Microsoft/Windows NT/CurrentVersion')
os_list[os_count].source_location.append('SYSTEM-ControlSet001/Control/ComputerName/ComputerName')
os_list[os_count].source_location.append('SYSTEM-ControlSet001/Control/FileSystem')
os_list[os_count].source_location.append('SYSTEM-ControlSet001/Control/TimeZoneInformation')
os_list[os_count].source_location.append('SYSTEM-ControlSet001/Services/Tcpip/Parameters')
os_list[os_count].source_location.append('SYSTEM-ControlSet001/Control/Windows')
for os_key in os_key_list:
for os_value in os_key.values():
if os_value.name() == 'ProductName':
os_list[os_count].operating_system = os_value.data().replace('\x00', '')
if os_value.data()[:-1] == 'Windows 10 Pro':
os_list[os_count].product_key = 'W269N-WFGWX-YVC9B-4J6C9-T83GX'
elif os_value.data()[:-1] == 'Windows 10 Pro N':
os_list[os_count].product_key = 'MH37W-N47XK-V7XM9-C7227-GCQG9'
elif os_value.data()[:-1] == 'Windows 10 Pro for Workstations':
os_list[os_count].product_key = 'NRG8B-VKK3Q-CXVCJ-9G2XF-6Q84J'
elif os_value.data()[:-1] == 'Windows 10 Pro for Workstations N':
os_list[os_count].product_key = '9FNHH-K3HBT-3W4TD-6383H-6XYWF'
elif os_value.data()[:-1] == 'Windows 10 Pro Education':
os_list[os_count].product_key = '6TP4R-GNPTD-KYYHQ-7B7DP-J447Y'
elif os_value.data()[:-1] == 'Windows 10 Pro Education N':
os_list[os_count].product_key = 'YVWGF-BXNMC-HTQYQ-CPQ99-66QFC'
elif os_value.data()[:-1] == 'Windows 10 Education':
os_list[os_count].product_key = 'NW6C2-QMPVW-D7KKK-3GKT6-VCFB2'
elif os_value.data()[:-1] == 'Windows 10 Education KN':
os_list[os_count].product_key = '2WH4N-8QGBV-H22JP-CT43Q-MDWWJ'
elif os_value.data()[:-1] == 'Windows 10 Enterprise':
os_list[os_count].product_key = 'NPPR9-FWDCX-D2C8J-H872K-2YT43'
elif os_value.data()[:-1] == 'Windows 10 Enterprise KN':
os_list[os_count].product_key = 'DPH2V-TTNVB-4X9Q3-TJR4H-KHJW4'
elif os_value.data()[:-1] == 'Windows 10 Enterprise G':
os_list[os_count].product_key = 'YYVX9-NTFWV-6MDM3-9PT4T-4M68B'
elif os_value.data()[:-1] == 'Windows 10 Enterprise G N':
os_list[os_count].product_key = '44RPN-FTY23-9VTTB-MP9BX-T84FV'
elif os_value.name() == 'ReleaseId':
os_list[os_count].release_id = os_value.data().replace('\x00', '')
elif os_value.name() == 'CSDVersion':
os_list[os_count].last_service_pack = os_value.data()
elif os_value.name() == 'SystemRoot':
os_list[os_count].system_root = os_value.data().replace('\x00', '')
elif os_value.name() == 'PathName':
os_list[os_count].path = os_value.data().replace('\x00', '')
elif os_value.name() == 'EditionID':
os_list[os_count].operating_system_version = os_value.data().replace('\x00', '')
elif os_value.name() == 'RegisteredOrganization':
os_list[os_count].organization = os_value.data().replace('\x00', '')
elif os_value.name() == 'ReleaseId':
os_list[os_count].release_id = os_value.data()
elif os_value.name() == 'CurrentVersion':
os_list[os_count].version_number = os_value.data().replace('\x00', '')
elif os_value.name() == 'CurrentBuildNumber':
os_list[os_count].build_number = os_value.data().replace('\x00', '')
elif os_value.name() == 'ProductId':
os_list[os_count].product_id = os_value.data().replace('\x00', '')
elif os_value.name() == 'RegisteredOwner':
os_list[os_count].owner = os_value.data().replace('\x00', '')
elif os_value.name() == 'InstallDate':
os_list[os_count].install_time = cu.from_unix_timestamp(os_value.data())
elif os_value.name() == 'ComputerName':
os_list[os_count].computer_name = os_value.data().replace('\x00', '')
elif os_value.name() == 'NtfsDisableLastAccessUpdate':
# 0: True, 1: False
os_list[os_count].last_access_time_flag = os_value.data()
elif os_value.name() == 'TimeZoneKeyName':
os_list[os_count].display_timezone_name = os_value.data().replace('\x00', '')
for j in reg_software.find_key(r"Microsoft\Windows NT\CurrentVersion\Time Zones").subkeys():
if j.name() == os_list[os_count].display_timezone_name:
for k in j.values():
if k.name() == 'Display':
os_list[os_count].timezone_utc = k.data().replace('\x00', '')
elif os_value.name() == 'HostName':
os_list[os_count].display_computer_name = os_value.data().replace('\x00', '')
elif os_value.name() == 'Hostname':
os_list[os_count].display_computer_name = os_value.data().replace('\x00', '')
elif os_value.name() == 'DhcpNameServer':
os_list[os_count].dhcp_dns_server = os_value.data().replace('\x00', '')
elif os_value.name() == 'ShutdownTime':
os_list[os_count].last_shutdown_time = (datetime(1601, 1, 1) + timedelta(microseconds=int(binascii.b2a_hex(os_value.data()[::-1]), 16) / 10)).isoformat()+'Z'
except:
print('-----OS Information Error')
return os_list
| 58.053435
| 177
| 0.583958
|
1af805e157506f68f564dac2549f6d6692a2575c
| 250
|
py
|
Python
|
giico/drilling_water_well_service/doctype/mud_data/mud_data.py
|
thispl/giico
|
14c5631639ab56a586a7962be9871d722c20e205
|
[
"MIT"
] | null | null | null |
giico/drilling_water_well_service/doctype/mud_data/mud_data.py
|
thispl/giico
|
14c5631639ab56a586a7962be9871d722c20e205
|
[
"MIT"
] | null | null | null |
giico/drilling_water_well_service/doctype/mud_data/mud_data.py
|
thispl/giico
|
14c5631639ab56a586a7962be9871d722c20e205
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, VHRS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class MudData(Document):
pass
| 22.727273
| 49
| 0.768
|
36fd5342117d9d0ef0a6129a480b19cc2904d5fe
| 281
|
py
|
Python
|
pendium/plugins/rest.py
|
LuRsT/Pendium
|
f71b3db987853df919c14f0be4238df00852a9a7
|
[
"Apache-2.0"
] | 5
|
2015-05-07T21:26:06.000Z
|
2016-07-27T11:41:49.000Z
|
pendium/plugins/rest.py
|
LuRsT/Pendium
|
f71b3db987853df919c14f0be4238df00852a9a7
|
[
"Apache-2.0"
] | 9
|
2017-12-21T20:22:16.000Z
|
2019-07-24T13:04:35.000Z
|
pendium/plugins/rest.py
|
LuRsT/Pendium
|
f71b3db987853df919c14f0be4238df00852a9a7
|
[
"Apache-2.0"
] | null | null | null |
from docutils.core import publish_string
from flask import Markup
from pendium.plugins import IRenderPlugin
class Rest(IRenderPlugin):
name = "Rest"
def render(self, content):
content = publish_string(content, writer_name="html")
return Markup(content)
| 21.615385
| 61
| 0.733096
|
ee6c5bdfbe47a670ff04b7be922fcf91c434bbb0
| 19,156
|
py
|
Python
|
python/ray/data/grouped_dataset.py
|
Sertingolix/ray
|
4850ddc00f5cb3ac9f10095b69f37bf2ae6c8fdd
|
[
"Apache-2.0"
] | 1
|
2022-01-10T07:39:50.000Z
|
2022-01-10T07:39:50.000Z
|
python/ray/data/grouped_dataset.py
|
Sertingolix/ray
|
4850ddc00f5cb3ac9f10095b69f37bf2ae6c8fdd
|
[
"Apache-2.0"
] | 227
|
2021-10-01T08:00:01.000Z
|
2021-12-28T16:47:26.000Z
|
python/ray/data/grouped_dataset.py
|
bladesaber/ray
|
107001d8b8bea0672d5e987341bd6bfcc4a1420e
|
[
"Apache-2.0"
] | null | null | null |
from typing import Union, Callable, Generic, Tuple, List, Optional
import numpy as np
import ray
from ray.util.annotations import PublicAPI
from ray.data.dataset import Dataset
from ray.data.impl import sort
from ray.data.aggregate import AggregateFn, Count, Sum, Max, Min, \
Mean, Std, AggregateOnT
from ray.data.block import BlockExecStats
from ray.data.impl.block_list import BlockList
from ray.data.impl.remote_fn import cached_remote_fn
from ray.data.impl.progress_bar import ProgressBar
from ray.data.block import Block, BlockAccessor, BlockMetadata, \
T, U, KeyType
GroupKeyBaseT = Union[Callable[[T], KeyType], str]
GroupKeyT = Optional[Union[GroupKeyBaseT, List[GroupKeyBaseT]]]
AggregateOnTs = Union[AggregateOnT, List[AggregateOnT]]
@PublicAPI(stability="beta")
class GroupedDataset(Generic[T]):
"""Represents a grouped dataset created by calling ``Dataset.groupby()``.
The actual groupby is deferred until an aggregation is applied.
"""
def __init__(self, dataset: Dataset[T], key: GroupKeyT):
"""Construct a dataset grouped by key (internal API).
The constructor is not part of the GroupedDataset API.
Use the ``Dataset.groupby()`` method to construct one.
"""
self._dataset = dataset
if isinstance(key, list):
if len(key) > 1:
# TODO(jjyao) Support multi-key groupby.
raise NotImplementedError(
"Multi-key groupby is not supported yet")
else:
self._key = key[0]
else:
self._key = key
def aggregate(self, *aggs: AggregateFn) -> Dataset[U]:
"""Implements an accumulator-based aggregation.
This is a blocking operation.
Examples:
>>> grouped_ds.aggregate(AggregateFn(
... init=lambda k: [],
... accumulate=lambda a, r: a + [r],
... merge=lambda a1, a2: a1 + a2,
... finalize=lambda a: a
... ))
Args:
aggs: Aggregations to do.
Returns:
If the input dataset is simple dataset then the output is a simple
dataset of ``(k, v_1, ..., v_n)`` tuples where ``k`` is the groupby
key and ``v_i`` is the result of the ith given aggregation.
If the input dataset is an Arrow dataset then the output is an
Arrow dataset of ``n + 1`` columns where the first column is the
groupby key and the second through ``n + 1`` columns are the
results of the aggregations.
If groupby key is ``None`` then the key part of return is omitted.
"""
stats = self._dataset._stats.child_builder("aggregate")
stage_info = {}
if len(aggs) == 0:
raise ValueError("Aggregate requires at least one aggregation")
# Handle empty dataset.
if self._dataset.num_blocks() == 0:
return self._dataset
blocks = self._dataset._blocks.get_blocks()
num_mappers = len(blocks)
num_reducers = num_mappers
if self._key is None:
num_reducers = 1
boundaries = []
else:
boundaries = sort.sample_boundaries(
blocks, [(self._key, "ascending")]
if isinstance(self._key, str) else self._key, num_reducers)
partition_and_combine_block = cached_remote_fn(
_partition_and_combine_block).options(num_returns=num_reducers + 1)
aggregate_combined_blocks = cached_remote_fn(
_aggregate_combined_blocks, num_returns=2)
map_results = np.empty((num_mappers, num_reducers), dtype=object)
map_meta = []
for i, block in enumerate(blocks):
results = partition_and_combine_block.remote(
block, boundaries, self._key, aggs)
map_results[i, :] = results[:-1]
map_meta.append(results[-1])
map_bar = ProgressBar("GroupBy Map", len(map_results))
map_bar.block_until_complete(map_meta)
stage_info["map"] = ray.get(map_meta)
map_bar.close()
blocks = []
metadata = []
for j in range(num_reducers):
block, meta = aggregate_combined_blocks.remote(
num_reducers, self._key, aggs, *map_results[:, j].tolist())
blocks.append(block)
metadata.append(meta)
reduce_bar = ProgressBar("GroupBy Reduce", len(blocks))
reduce_bar.block_until_complete(blocks)
reduce_bar.close()
metadata = ray.get(metadata)
stage_info["reduce"] = metadata
return Dataset(
BlockList(blocks, metadata), self._dataset._epoch,
stats.build_multistage(stage_info))
def _aggregate_on(self, agg_cls: type, on: Optional[AggregateOnTs], *args,
**kwargs):
"""Helper for aggregating on a particular subset of the dataset.
This validates the `on` argument, and converts a list of column names
or lambdas to a multi-aggregation. A null `on` results in a
multi-aggregation on all columns for an Arrow Dataset, and a single
aggregation on the entire row for a simple Dataset.
"""
aggs = self._dataset._build_multicolumn_aggs(
agg_cls, on, *args, skip_cols=self._key, **kwargs)
return self.aggregate(*aggs)
def count(self) -> Dataset[U]:
"""Compute count aggregation.
This is a blocking operation.
Examples:
>>> ray.data.range(100).groupby(lambda x: x % 3).count()
>>> ray.data.from_items([
... {"A": x % 3, "B": x} for x in range(100)]).groupby(
... "A").count()
Returns:
A simple dataset of ``(k, v)`` pairs or an Arrow dataset of
``[k, v]`` columns where ``k`` is the groupby key and ``v`` is the
number of rows with that key.
If groupby key is ``None`` then the key part of return is omitted.
"""
return self.aggregate(Count())
def sum(self, on: Optional[AggregateOnTs] = None) -> Dataset[U]:
"""Compute grouped sum aggregation.
This is a blocking operation.
Examples:
>>> ray.data.range(100).groupby(lambda x: x % 3).sum()
>>> ray.data.from_items([
... (i % 3, i, i**2)
... for i in range(100)]) \
... .groupby(lambda x: x[0] % 3) \
... .sum(lambda x: x[2])
>>> ray.data.range_arrow(100).groupby("value").sum()
>>> ray.data.from_items([
... {"A": i % 3, "B": i, "C": i**2}
... for i in range(100)]) \
... .groupby("A") \
... .sum(["B", "C"])
Args:
on: The data subset on which to compute the sum.
- For a simple dataset: it can be a callable or a list thereof,
and the default is to take a sum of all rows.
- For an Arrow dataset: it can be a column name or a list
thereof, and the default is to do a column-wise sum of all
columns.
Returns:
The sum result.
For a simple dataset, the output is:
- ``on=None``: a simple dataset of ``(k, sum)`` tuples where ``k``
is the groupby key and ``sum`` is sum of all rows in that group.
- ``on=[callable_1, ..., callable_n]``: a simple dataset of
``(k, sum_1, ..., sum_n)`` tuples where ``k`` is the groupby key
and ``sum_i`` is sum of the outputs of the ith callable called on
each row in that group.
For an Arrow dataset, the output is:
- ``on=None``: an Arrow dataset containing a groupby key column,
``"k"``, and a column-wise sum column for each original column
in the dataset.
- ``on=["col_1", ..., "col_n"]``: an Arrow dataset of ``n + 1``
columns where the first column is the groupby key and the second
through ``n + 1`` columns are the results of the aggregations.
If groupby key is ``None`` then the key part of return is omitted.
"""
return self._aggregate_on(Sum, on)
def min(self, on: Optional[AggregateOnTs] = None) -> Dataset[U]:
"""Compute grouped min aggregation.
This is a blocking operation.
Examples:
>>> ray.data.range(100).groupby(lambda x: x % 3).min()
>>> ray.data.from_items([
... (i % 3, i, i**2)
... for i in range(100)]) \
... .groupby(lambda x: x[0] % 3) \
... .min(lambda x: x[2])
>>> ray.data.range_arrow(100).groupby("value").min()
>>> ray.data.from_items([
... {"A": i % 3, "B": i, "C": i**2}
... for i in range(100)]) \
... .groupby("A") \
... .min(["B", "C"])
Args:
on: The data subset on which to compute the min.
- For a simple dataset: it can be a callable or a list thereof,
and the default is to take a min of all rows.
- For an Arrow dataset: it can be a column name or a list
thereof, and the default is to do a column-wise min of all
columns.
Returns:
The min result.
For a simple dataset, the output is:
- ``on=None``: a simple dataset of ``(k, min)`` tuples where ``k``
is the groupby key and min is min of all rows in that group.
- ``on=[callable_1, ..., callable_n]``: a simple dataset of
``(k, min_1, ..., min_n)`` tuples where ``k`` is the groupby key
and ``min_i`` is min of the outputs of the ith callable called on
each row in that group.
For an Arrow dataset, the output is:
- ``on=None``: an Arrow dataset containing a groupby key column,
``"k"``, and a column-wise min column for each original column in
the dataset.
- ``on=["col_1", ..., "col_n"]``: an Arrow dataset of ``n + 1``
columns where the first column is the groupby key and the second
through ``n + 1`` columns are the results of the aggregations.
If groupby key is ``None`` then the key part of return is omitted.
"""
return self._aggregate_on(Min, on)
def max(self, on: Optional[AggregateOnTs] = None) -> Dataset[U]:
"""Compute grouped max aggregation.
This is a blocking operation.
Examples:
>>> ray.data.range(100).groupby(lambda x: x % 3).max()
>>> ray.data.from_items([
... (i % 3, i, i**2)
... for i in range(100)]) \
... .groupby(lambda x: x[0] % 3) \
... .max(lambda x: x[2])
>>> ray.data.range_arrow(100).groupby("value").max()
>>> ray.data.from_items([
... {"A": i % 3, "B": i, "C": i**2}
... for i in range(100)]) \
... .groupby("A") \
... .max(["B", "C"])
Args:
on: The data subset on which to compute the max.
- For a simple dataset: it can be a callable or a list thereof,
and the default is to take a max of all rows.
- For an Arrow dataset: it can be a column name or a list
thereof, and the default is to do a column-wise max of all
columns.
Returns:
The max result.
For a simple dataset, the output is:
- ``on=None``: a simple dataset of ``(k, max)`` tuples where ``k``
is the groupby key and ``max`` is max of all rows in that group.
- ``on=[callable_1, ..., callable_n]``: a simple dataset of
``(k, max_1, ..., max_n)`` tuples where ``k`` is the groupby key
and ``max_i`` is max of the outputs of the ith callable called on
each row in that group.
For an Arrow dataset, the output is:
- ``on=None``: an Arrow dataset containing a groupby key column,
``"k"``, and a column-wise max column for each original column in
the dataset.
- ``on=["col_1", ..., "col_n"]``: an Arrow dataset of ``n + 1``
columns where the first column is the groupby key and the second
through ``n + 1`` columns are the results of the aggregations.
If groupby key is ``None`` then the key part of return is omitted.
"""
return self._aggregate_on(Max, on)
def mean(self, on: Optional[AggregateOnTs] = None) -> Dataset[U]:
"""Compute grouped mean aggregation.
This is a blocking operation.
Examples:
>>> ray.data.range(100).groupby(lambda x: x % 3).mean()
>>> ray.data.from_items([
... (i % 3, i, i**2)
... for i in range(100)]) \
... .groupby(lambda x: x[0] % 3) \
... .mean(lambda x: x[2])
>>> ray.data.range_arrow(100).groupby("value").mean()
>>> ray.data.from_items([
... {"A": i % 3, "B": i, "C": i**2}
... for i in range(100)]) \
... .groupby("A") \
... .mean(["B", "C"])
Args:
on: The data subset on which to compute the mean.
- For a simple dataset: it can be a callable or a list thereof,
and the default is to take a mean of all rows.
- For an Arrow dataset: it can be a column name or a list
thereof, and the default is to do a column-wise mean of all
columns.
Returns:
The mean result.
For a simple dataset, the output is:
- ``on=None``: a simple dataset of ``(k, mean)`` tuples where ``k``
is the groupby key and ``mean`` is mean of all rows in that
group.
- ``on=[callable_1, ..., callable_n]``: a simple dataset of
``(k, mean_1, ..., mean_n)`` tuples where ``k`` is the groupby
key and ``mean_i`` is mean of the outputs of the ith callable
called on each row in that group.
For an Arrow dataset, the output is:
- ``on=None``: an Arrow dataset containing a groupby key column,
``"k"``, and a column-wise mean column for each original column
in the dataset.
- ``on=["col_1", ..., "col_n"]``: an Arrow dataset of ``n + 1``
columns where the first column is the groupby key and the second
through ``n + 1`` columns are the results of the aggregations.
If groupby key is ``None`` then the key part of return is omitted.
"""
return self._aggregate_on(Mean, on)
def std(self, on: Optional[AggregateOnTs] = None,
ddof: int = 1) -> Dataset[U]:
"""Compute grouped standard deviation aggregation.
This is a blocking operation.
Examples:
>>> ray.data.range(100).groupby(lambda x: x % 3).std()
>>> ray.data.from_items([
... (i % 3, i, i**2)
... for i in range(100)]) \
... .groupby(lambda x: x[0] % 3) \
... .std(lambda x: x[2])
>>> ray.data.range_arrow(100).groupby("value").std(ddof=0)
>>> ray.data.from_items([
... {"A": i % 3, "B": i, "C": i**2}
... for i in range(100)]) \
... .groupby("A") \
... .std(["B", "C"])
NOTE: This uses Welford's online method for an accumulator-style
computation of the standard deviation. This method was chosen due to
it's numerical stability, and it being computable in a single pass.
This may give different (but more accurate) results than NumPy, Pandas,
and sklearn, which use a less numerically stable two-pass algorithm.
See
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
Args:
on: The data subset on which to compute the std.
- For a simple dataset: it can be a callable or a list thereof,
and the default is to take a std of all rows.
- For an Arrow dataset: it can be a column name or a list
thereof, and the default is to do a column-wise std of all
columns.
ddof: Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
Returns:
The standard deviation result.
For a simple dataset, the output is:
- ``on=None``: a simple dataset of ``(k, std)`` tuples where ``k``
is the groupby key and ``std`` is std of all rows in that group.
- ``on=[callable_1, ..., callable_n]``: a simple dataset of
``(k, std_1, ..., std_n)`` tuples where ``k`` is the groupby key
and ``std_i`` is std of the outputs of the ith callable called on
each row in that group.
For an Arrow dataset, the output is:
- ``on=None``: an Arrow dataset containing a groupby key column,
``"k"``, and a column-wise std column for each original column in
the dataset.
- ``on=["col_1", ..., "col_n"]``: an Arrow dataset of ``n + 1``
columns where the first column is the groupby key and the second
through ``n + 1`` columns are the results of the aggregations.
If groupby key is ``None`` then the key part of return is omitted.
"""
return self._aggregate_on(Std, on, ddof=ddof)
def _partition_and_combine_block(
block: Block[T], boundaries: List[KeyType], key: GroupKeyT,
aggs: Tuple[AggregateFn]) -> List[Union[Block, BlockMetadata]]:
"""Partition the block and combine rows with the same key."""
stats = BlockExecStats.builder()
if key is None:
partitions = [block]
else:
partitions = BlockAccessor.for_block(block).sort_and_partition(
boundaries, [(key, "ascending")] if isinstance(key, str) else key,
descending=False)
parts = [BlockAccessor.for_block(p).combine(key, aggs) for p in partitions]
meta = BlockAccessor.for_block(block).get_metadata(
input_files=None, exec_stats=stats.build())
return parts + [meta]
def _aggregate_combined_blocks(
num_reducers: int, key: GroupKeyT, aggs: Tuple[AggregateFn],
*blocks: Tuple[Block, ...]) -> Tuple[Block[U], BlockMetadata]:
"""Aggregate sorted and partially combined blocks."""
return BlockAccessor.for_block(blocks[0]).aggregate_combined_blocks(
list(blocks), key, aggs)
| 41.643478
| 100
| 0.551107
|
71e556fa4139675d98d27bf9ea3b1e86e9a1dd14
| 271
|
py
|
Python
|
odoo-13.0/addons/l10n_de_skr03/migrations/9.0.2.0/pre-set_tags_and_taxes_updatable.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
odoo-13.0/addons/l10n_de_skr03/migrations/9.0.2.0/pre-set_tags_and_taxes_updatable.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
odoo-13.0/addons/l10n_de_skr03/migrations/9.0.2.0/pre-set_tags_and_taxes_updatable.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import odoo
def migrate(cr, version):
registry = odoo.registry(cr.dbname)
from odoo.addons.account.models.chart_template import migrate_set_tags_and_taxes_updatable
migrate_set_tags_and_taxes_updatable(cr, registry, 'l10n_de_skr03')
| 30.111111
| 94
| 0.771218
|
d9ae0355547a5275d21a8cc83d3d4546010688ae
| 7,478
|
py
|
Python
|
scripts/priorities.py
|
MorganHamm/gs541-phylogeography
|
ebb0916685894de0ee1ce7a78b32337defbeee8b
|
[
"MIT"
] | null | null | null |
scripts/priorities.py
|
MorganHamm/gs541-phylogeography
|
ebb0916685894de0ee1ce7a78b32337defbeee8b
|
[
"MIT"
] | null | null | null |
scripts/priorities.py
|
MorganHamm/gs541-phylogeography
|
ebb0916685894de0ee1ce7a78b32337defbeee8b
|
[
"MIT"
] | null | null | null |
"""
Mask initial bases from alignment FASTA
"""
import argparse
from random import shuffle
from collections import defaultdict
import Bio
import numpy as np
from Bio.SeqIO.FastaIO import SimpleFastaParser
from Bio.Seq import Seq
from Bio import AlignIO, SeqIO
from scipy import sparse
def compactify_sequences(sparse_matrix, sequence_names):
sequence_groups = defaultdict(list)
for s, snps in zip(sequence_names, sparse_matrix):
ind = snps.nonzero()
vals = np.array(snps[ind])
if len(ind[1]):
sequence_groups[tuple(zip(ind[1], vals[0]))].append(s)
else:
sequence_groups[tuple()].append(s)
return sequence_groups
INITIALISATION_LENGTH = 1000000
def sequence_to_int_array(s, fill_value=110):
seq = np.frombuffer(str(s).lower().encode('utf-8'), dtype=np.int8).copy()
seq[(seq!=97) & (seq!=99) & (seq!=103) & (seq!=116)] = fill_value
return seq
# Function adapted from https://github.com/gtonkinhill/pairsnp-python
def calculate_snp_matrix(fastafile, consensus=None, zipped=False, fill_value=110):
# This function generate a sparse matrix where differences to the consensus are coded as integers.
row = np.empty(INITIALISATION_LENGTH)
col = np.empty(INITIALISATION_LENGTH, dtype=np.int64)
val = np.empty(INITIALISATION_LENGTH, dtype=np.int8)
r = 0
n_snps = 0
nseqs = 0
seq_names = []
filled_positions = []
current_length = INITIALISATION_LENGTH
if zipped:
fh = gzip.open(fastafile, 'rt')
else:
fh = open(fastafile, 'rt')
with fh as fasta:
for h,s in SimpleFastaParser(fasta):
if consensus is None:
align_length = len(s)
# Take consensus as first sequence
consensus = sequence_to_int_array(s, fill_value=fill_value)
else:
align_length = len(consensus)
nseqs +=1
seq_names.append(h)
if(len(s)!=align_length):
raise ValueError('Fasta file appears to have sequences of different lengths!')
s = sequence_to_int_array(s, fill_value=fill_value)
snps = (consensus!=s) & (s!=fill_value)
right = n_snps + np.sum(snps)
filled_positions.append(np.where(s==fill_value)[0])
if right >= (current_length/2):
current_length = current_length + INITIALISATION_LENGTH
row.resize(current_length)
col.resize(current_length)
val.resize(current_length)
row[n_snps:right] = r
col[n_snps:right] = np.flatnonzero(snps)
val[n_snps:right] = s[snps]
r += 1
n_snps = right
fh.close()
if nseqs==0:
raise ValueError('No sequences found!')
row = row[0:right]
col = col[0:right]
val = val[0:right]
sparse_snps = sparse.csc_matrix((val, (row, col)), shape=(nseqs, align_length))
return {'snps': sparse_snps, 'consensus': consensus, 'names': seq_names, 'filled_positions': filled_positions}
# Function adapted from https://github.com/gtonkinhill/pairsnp-python
def calculate_distance_matrix(sparse_matrix_A, sparse_matrix_B, consensus):
n_seqs_A = sparse_matrix_A.shape[0]
n_seqs_B = sparse_matrix_B.shape[0]
d = (1*(sparse_matrix_A==97)) * (sparse_matrix_B.transpose()==97)
d = d + (1*(sparse_matrix_A==99) * (sparse_matrix_B.transpose()==99))
d = d + (1*(sparse_matrix_A==103) * (sparse_matrix_B.transpose()==103))
d = d + (1*(sparse_matrix_A==116) * (sparse_matrix_B.transpose()==116))
d = d.todense()
n_comp = (1*(sparse_matrix_A==110) * ((sparse_matrix_B==110).transpose())).todense()
d = d + n_comp
temp_total = np.zeros((n_seqs_A, n_seqs_B))
temp_total[:] = (1*(sparse_matrix_A>0)).sum(1)
temp_total += (1*(sparse_matrix_B>0)).sum(1).transpose()
total_differences_shared = (1*(sparse_matrix_A>0)) * (sparse_matrix_B.transpose()>0)
n_total = np.zeros((n_seqs_A, n_seqs_B))
n_sum = (1*(sparse_matrix_A==110)).sum(1)
n_total[:] = n_sum
n_total += (1*(sparse_matrix_B==110)).sum(1).transpose()
diff_n = n_total - 2*n_comp
d = temp_total - total_differences_shared.todense() - d - diff_n
return d
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="generate priorities files based on genetic proximity to focal sample",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--alignment", type=str, required=True, help="FASTA file of alignment")
parser.add_argument("--reference", type = str, required=True, help="reference sequence")
parser.add_argument("--metadata", type = str, required=True, help="metadata")
parser.add_argument("--focal-alignment", type = str, required=True, help="focal smaple of sequences")
parser.add_argument("--output", type=str, required=True, help="FASTA file of output alignment")
args = parser.parse_args()
# load entire alignment and the alignment of focal sequences (upper case -- probably not necessary)
ref = sequence_to_int_array(SeqIO.read(args.reference, 'genbank').seq)
context_seqs_dict = calculate_snp_matrix(args.alignment, consensus=ref)
focal_seqs_dict = calculate_snp_matrix(args.focal_alignment, consensus = ref)
alignment_length = len(ref)
print("Done reading the alignments.")
# calculate number of masked sites in either set
mask_count_focal = np.array([len(x) for x in focal_seqs_dict['filled_positions']])
mask_count_context = {s: len(x) for s,x in zip(context_seqs_dict['names'], context_seqs_dict['filled_positions'])}
# for each context sequence, calculate minimal distance to focal set, weigh with number of N/- to pick best sequence
d = np.array(calculate_distance_matrix(context_seqs_dict['snps'], focal_seqs_dict['snps'], consensus = context_seqs_dict['consensus']))
closest_match = np.argmin(d+mask_count_focal/alignment_length, axis=1)
print("Done finding closest matches.")
minimal_distance_to_focal_set = {}
for context_index, focal_index in enumerate(closest_match):
minimal_distance_to_focal_set[context_seqs_dict['names'][context_index]] = (d[context_index, focal_index], focal_seqs_dict["names"][focal_index])
# for each focal sequence with close matches (using the index), we list all close contexts
close_matches = defaultdict(list)
for seq in minimal_distance_to_focal_set:
close_matches[minimal_distance_to_focal_set[seq][1]].append(seq)
for f in close_matches:
shuffle(close_matches[f])
close_matches[f].sort(key=lambda x: minimal_distance_to_focal_set[x][0] + mask_count_context[x]/alignment_length)
# export priorities
with open(args.output, 'w') as fh:
for i, seqid in enumerate(context_seqs_dict['names']):
# use distance as negative priority
# penalize masked (N or -) -- 333 masked sites==one mutations
# penalize if many sequences are close to the same focal one by using the index of the shuffled list of neighbours
# currently each position in this lists reduced priority by 0.2, i.e. 5 other sequences == one mutation
position = close_matches[minimal_distance_to_focal_set[seqid][1]].index(seqid)
priority = -minimal_distance_to_focal_set[seqid][0] - 0.1*position
fh.write(f"{seqid}\t{priority:1.2f}\n")
| 41.776536
| 153
| 0.678122
|
97e50a9c9c2ba7973295383dbb11375950fdc74c
| 1,240
|
py
|
Python
|
chapter3/Readercoin_/test/functional/p2p-mempool.py
|
MyawBug/Blockchain-By-Example
|
2d0495a130d1a9f91b7fb99359cbb8e9f7b9763d
|
[
"MIT"
] | 51
|
2018-12-14T09:09:20.000Z
|
2022-03-28T03:25:45.000Z
|
chapter3/Readercoin_/test/functional/p2p-mempool.py
|
MyawBug/Blockchain-By-Example
|
2d0495a130d1a9f91b7fb99359cbb8e9f7b9763d
|
[
"MIT"
] | 4
|
2019-08-02T18:23:17.000Z
|
2022-02-12T04:33:25.000Z
|
chapter3/Readercoin_/test/functional/p2p-mempool.py
|
xiaqingdoc/---
|
b15448739983b0787ffc963811294bcf44487303
|
[
"MIT"
] | 42
|
2018-12-14T09:09:24.000Z
|
2022-03-31T01:49:35.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Readercoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test p2p mempool message.
Test that nodes are disconnected if they send mempool messages when bloom
filters are not enabled.
"""
from test_framework.mininode import *
from test_framework.test_framework import ReadercoinTestFramework
from test_framework.util import *
class P2PMempoolTests(ReadercoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-peerbloomfilters=0"]]
def run_test(self):
#connect a mininode
aTestNode = NodeConnCB()
node = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], aTestNode)
aTestNode.add_connection(node)
NetworkThread().start()
aTestNode.wait_for_verack()
#request mempool
aTestNode.send_message(msg_mempool())
aTestNode.wait_for_disconnect()
#mininode must be disconnected at this point
assert_equal(len(self.nodes[0].getpeerinfo()), 0)
if __name__ == '__main__':
P2PMempoolTests().main()
| 32.631579
| 75
| 0.708871
|
cffeb7e915a06a513e96e1ed60beabf6b79b6518
| 8,899
|
py
|
Python
|
tensorflow/python/tpu/training_loop.py
|
Sonata-Wang/tensorflow
|
8bbef0cd77879d05ed69bf30e76087847a8ca4a2
|
[
"Apache-2.0"
] | 36
|
2016-12-17T15:25:25.000Z
|
2022-01-29T21:50:53.000Z
|
tensorflow/python/tpu/training_loop.py
|
shekharpalit/tensorflow
|
6aa83398ab03bfae822f36772757097bcb98b6ed
|
[
"Apache-2.0"
] | 30
|
2016-10-04T15:38:08.000Z
|
2020-07-16T12:09:33.000Z
|
tensorflow/python/tpu/training_loop.py
|
shekharpalit/tensorflow
|
6aa83398ab03bfae822f36772757097bcb98b6ed
|
[
"Apache-2.0"
] | 36
|
2017-07-27T21:12:40.000Z
|
2022-02-03T16:45:56.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Library for constructing a training loop, suitable for TPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.tpu import tensor_tracer
from tensorflow.python.tpu import tpu_function
from tensorflow.python.tpu import xla
def while_loop(condition, body, inputs=None, infeed_queue=None, name=None):
"""Builds a training loop for TPUs.
The set of loop-carried tensors corresponds to `inputs`. Both
`condition` and `body` take the current value of the loop-carried
tensors. 'body' additionally takes a tuple of infeed from
infeed_queue if infeed_queue is not None. `condition` must return a
single boolean value that determines whether iteration
continues. `body` must return an updated list of values for the
loop-carried tensors.
Args:
condition: a Python function that builds the loop condition.
body: a Python function that builds the loop body.
inputs: a list of initial values passed into the training loop, or
None (equivalent to an empty list).
infeed_queue: if not None, the infeed queue from which to append a tuple
of arguments as inputs to condition.
name: (Deprecated) Does nothing.
Returns:
The final values of the loop-carried tensors.
Raises:
TypeError: if body or condition has the wrong signature.
"""
del name
# Converts inputs to Tensors.
inputs = [] if inputs is None else [ops.convert_to_tensor(x) for
x in inputs]
input_types = [x.dtype for x in inputs]
input_arity = len(inputs)
body_arg_error = xla.check_function_argument_count(
body, input_arity, infeed_queue)
if body_arg_error is not None:
if infeed_queue is None:
raise TypeError(
"Supplied loop body function cannot be called with the specified "
"inputs. You specified %d inputs: %s, but the loop body needs %s" % (
input_arity, str([i.name for i in inputs]), body_arg_error))
else:
raise TypeError(
"Supplied loop body function cannot be called with the specified "
"inputs. You specified %d inputs: %s and %d additional inputs from "
"infeed, but the computation needs %s" % (input_arity, str(
[i.name for i in inputs]), infeed_queue.number_of_tuple_elements,
body_arg_error))
condition_arg_error = xla.check_function_argument_count(
condition, input_arity, None)
if condition_arg_error is not None:
if infeed_queue is None:
raise TypeError(
"Supplied loop condition function cannot be called with the "
"specified inputs. You specified %d inputs: %s, but the loop "
"condition needs %s" % (input_arity, str([i.name for i in inputs]),
condition_arg_error))
else:
raise TypeError(
"Supplied loop condition function cannot be called with the "
"specified inputs. You specified %d inputs: %s, but the loop "
"condition needs %s. Note that infeed is not passed to the loop "
"condition." % (input_arity, str([i.name for i in inputs]),
condition_arg_error))
def condition_wrapper(*inputs):
# Discards the dummy output added for arity-0 loops.
if input_arity == 0:
inputs = []
return condition(*inputs)
def body_wrapper(*inputs):
"""Wrapper around `body` that handles infeed queues and control deps."""
inputs = list(inputs)
# Discards the dummy output added for arity-0 loops.
if input_arity == 0:
inputs = []
# Runs `body` with the dequeue_ops appended.
if infeed_queue:
number_of_shards = tpu_function.get_tpu_context().number_of_shards
if number_of_shards is None:
raise ValueError("Can't build training loop with infeed when there is "
"no tpu_shard_context. Are you building a loop or "
"graph directly rather than from inside tpu.rewrite, "
"tpu.batch_parallel, tpu.shard, or tpu.replicate?")
infeed_queue.set_number_of_shards(number_of_shards)
dequeue_ops = [d for d in infeed_queue.generate_dequeue_op()]
else:
dequeue_ops = []
outputs = body(*(inputs + dequeue_ops))
# If the computation only returned one value, make it a tuple.
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
outputs = [
o if isinstance(o, ops.Operation) else ops.convert_to_tensor(o)
for o in outputs
]
# Separates the returned Operations and Tensors.
output_operations = [o for o in outputs if isinstance(o, ops.Operation)]
output_tensors = [o for o in outputs
if not isinstance(o, ops.Operation)]
if outputs != output_tensors + output_operations:
raise ValueError(
"TPU training loop body must return zero or more Tensor values "
"followed by zero or more Operations.")
output_types = [op.dtype for op in output_tensors]
if input_types != output_types:
raise TypeError(
"Mismatch between input types and output types for training loop "
"body: {} vs {}".format(input_types, output_types))
# Add the dequeue operations to output_operations to ensure they are run
# by the loop, even if the programmer's loop body does not use them.
output_operations += dequeue_ops
# Add a dummy output, if needed.
if not output_tensors:
output_tensors = array_ops.constant(0)
if output_operations:
# TODO(phawkins): in principle this is too restrictive since it serializes
# the training loop steps. In practice it does not matter since this loop
# will be compiled by XLA.
output_tensors = control_flow_ops.tuple(output_tensors,
control_inputs=output_operations)
if tensor_tracer.TensorTracer.is_enabled():
num_replicas = tpu_function.get_tpu_context().number_of_shards
if num_replicas is None:
num_replicas = 1
tt = tensor_tracer.TensorTracer()
output_tensors = tt.trace_tpu(ops.get_default_graph(),
output_tensors, None,
num_replicas)
return output_tensors
# If the body has arity 0, add a dummy loop-carried value to which we can add
# control dependencies from any side-effecting operations.
if input_arity == 0:
inputs = [array_ops.constant(0)]
return control_flow_ops.while_loop(
condition_wrapper, body_wrapper, inputs, name="", parallel_iterations=1)
def repeat(n, body, inputs=None, infeed_queue=None, name=None):
"""Builds a training loop that executes a fixed number of iterations.
The set of loop-carried tensors correspond to `inputs`.
`body` must be a function that takes and returns the values of the
loop-carried tensors.
Args:
n: the number of loop iterations
body: a Python function that builds the loop body.
inputs: a list of initial values passed into the training loop or
None (equivalent to an empty list).
infeed_queue: if not None, the infeed queue from which to append a tuple
of arguments as inputs to condition.
name: (Deprecated) Does nothing.
Returns:
The final values of the loop-carried tensors.
Raises:
ValueError: if there is a type error.
"""
def _convert_to_list(xs):
if not isinstance(xs, (list, tuple)):
return [xs]
else:
return list(xs)
def cond(i, *args):
del args
return i < n
def body_wrapper(i, *args):
return [i + 1] + _convert_to_list(body(*args))
inputs = [0] if inputs is None else [0] + _convert_to_list(inputs)
outputs = while_loop(
cond, body_wrapper, inputs=inputs, infeed_queue=infeed_queue, name=name)
outputs = _convert_to_list(outputs)
if len(outputs) == 1:
# Returns the Op rather than an empty list.
return outputs[0].op
else:
return outputs[1:]
| 39.90583
| 80
| 0.674907
|
3a0e71ef4b879969e8624f5cd31f98b0ec96dbb2
| 2,738
|
py
|
Python
|
Weinstein2019/create_lidar_annotations.py
|
weecology/NeonTreeEvaluation_analysis
|
a426a1a6a621b67f11dc4e6cc46eb9df9d0fc677
|
[
"MIT"
] | 1
|
2020-05-18T07:14:31.000Z
|
2020-05-18T07:14:31.000Z
|
Weinstein2019/create_lidar_annotations.py
|
weecology/NeonTreeEvaluation_analysis
|
a426a1a6a621b67f11dc4e6cc46eb9df9d0fc677
|
[
"MIT"
] | 4
|
2019-11-12T02:48:19.000Z
|
2020-02-07T18:01:25.000Z
|
Weinstein2019/create_lidar_annotations.py
|
weecology/NeonTreeEvaluation_analysis
|
a426a1a6a621b67f11dc4e6cc46eb9df9d0fc677
|
[
"MIT"
] | null | null | null |
import glob
import os
import Lidar
import laspy
def write_label(point_cloud, path):
#Create laspy object
inFile = laspy.file.File("/Users/Ben/Desktop/test.laz", header=point_cloud.data.header, mode="w")
for dim in point_cloud.data.points:
setattr(inFile, dim, point_cloud.data.points[dim])
#Create second laspy object
outFile1 = laspy.file.File(path, mode = "w",header = inFile.header)
outFile1.define_new_dimension(
name="label",
data_type=5,
description = "Integer Tree Label"
)
# copy fields
for dimension in inFile.point_format:
dat = inFile.reader.get_dimension(dimension.name)
outFile1.writer.set_dimension(dimension.name, dat)
outFile1.label = point_cloud.data.points.user_data
outFile1.close()
#Training tiles
def annotate_tile(laz_path, path_to_rgb, xml_path):
annotations= Lidar.load_xml(xml_path, path_to_rgb, res=0.1)
point_cloud = Lidar.load_lidar(laz_path)
#Create boxes
boxes = Lidar.create_boxes(annotations)
#Drape RGB bounding boxes over the point cloud
point_cloud = Lidar.drape_boxes(boxes, point_cloud)
#Write Laz with label info
write_label(point_cloud, laz_path)
#annotate_tile(laz_path="../SJER/training/NEON_D17_SJER_DP1_258000_4106000_classified_point_cloud_colorized.laz",
#path_to_rgb="../SJER/training/",
#xml_path= "../SJER/annotations/2018_SJER_3_258000_4106000_image.xml")
def annotate_eval_plots(site):
path_to_rgb = "../" + site +"/plots/"
path_to_laz = path_to_rgb
path_to_annotations = "../" + site +"/annotations/"
#For each .laz file in directory.
laz_files = glob.glob(path_to_laz+"*.laz")
for laz in laz_files:
print(laz)
#Load laz
point_cloud = Lidar.load_lidar(laz)
#Find annotations
basename = os.path.basename(laz)
basename = os.path.splitext(basename)[0]
xml_path = os.path.join(path_to_annotations,basename + ".xml")
if (os.path.exists(xml_path)):
#Load annotations and get utm bounds from tif image
annotations= Lidar.load_xml(xml_path, path_to_rgb, res=0.1)
else:
print("{} does not exist, skipping image".format(xml_path))
continue
#Create boxes
boxes = Lidar.create_boxes(annotations)
#Drape RGB bounding boxes over the point cloud
point_cloud = Lidar.drape_boxes(boxes, point_cloud)
#Write Laz
write_label(point_cloud, laz)
sites = ["SJER","NIWO","TEAK","MLBS"]
for site in sites:
annotate_eval_plots(site)
| 31.837209
| 113
| 0.647553
|
386162ee7010ffd52d9babc2ec2d5bbd3c8c757b
| 2,454
|
py
|
Python
|
language/google/cloud/language/sentence.py
|
rodrigodias27/google-cloud-python
|
7d1161f70744c0dbbe67a3f472ea95667eaafe50
|
[
"Apache-2.0"
] | 1
|
2021-07-06T23:38:06.000Z
|
2021-07-06T23:38:06.000Z
|
language/google/cloud/language/sentence.py
|
rodrigodias27/google-cloud-python
|
7d1161f70744c0dbbe67a3f472ea95667eaafe50
|
[
"Apache-2.0"
] | null | null | null |
language/google/cloud/language/sentence.py
|
rodrigodias27/google-cloud-python
|
7d1161f70744c0dbbe67a3f472ea95667eaafe50
|
[
"Apache-2.0"
] | 1
|
2022-03-24T01:37:10.000Z
|
2022-03-24T01:37:10.000Z
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Representation of Sentence objects."""
from google.cloud.language.sentiment import Sentiment
class Sentence(object):
"""A Google Cloud Natural Language API sentence object.
.. _Sentence message: https://cloud.google.com/natural-language/reference\
/rest/v1/documents/annotateText#Sentence
See `Sentence message`_.
:type content: str
:param content: The text that the sentence is composed of.
:type begin: int
:param begin: The beginning offset of the sentence in the original
document according to the encoding type specified
in the API request.
:type sentiment: :class:`~google.cloud.language.sentiment.Sentiment`
:param sentiment:
(Optional) For calls to
:meth:`~google.cloud.language.document.Document.annotate_text` where
``include_sentiment`` is set to true, this field will contain the
sentiment for the sentence.
"""
def __init__(self, content, begin, sentiment=None):
self.content = content
self.begin = begin
self.sentiment = sentiment
@classmethod
def from_api_repr(cls, payload):
"""Convert a sentence from the JSON API into a :class:`Sentence`.
:param payload: dict
:type payload: The value from the backend.
:rtype: :class:`Sentence`
:returns: The sentence parsed from the API representation.
"""
text_span = payload['text']
# The sentence may or may not have a sentiment; only attempt the
# typecast if one is present.
sentiment = None
if payload.get('sentiment') is not None:
sentiment = Sentiment.from_api_repr(payload['sentiment'])
# Return a Sentence object.
return cls(text_span['content'], text_span['beginOffset'],
sentiment=sentiment)
| 35.565217
| 78
| 0.674002
|
c78498056fc6a846a4817c5c1e1c243b8256d8ed
| 1,456
|
py
|
Python
|
lhotse/bin/modes/recipes/ksponspeech.py
|
goodatlas/lhotse
|
c3e2f0012629776a70ec95a1fb8973e73903973a
|
[
"Apache-2.0"
] | null | null | null |
lhotse/bin/modes/recipes/ksponspeech.py
|
goodatlas/lhotse
|
c3e2f0012629776a70ec95a1fb8973e73903973a
|
[
"Apache-2.0"
] | null | null | null |
lhotse/bin/modes/recipes/ksponspeech.py
|
goodatlas/lhotse
|
c3e2f0012629776a70ec95a1fb8973e73903973a
|
[
"Apache-2.0"
] | 1
|
2022-03-29T01:15:32.000Z
|
2022-03-29T01:15:32.000Z
|
import click
from lhotse.bin.modes import download, prepare
from lhotse.recipes.ksponspeech import prepare_ksponspeech
from lhotse.utils import Pathlike
__all__ = ['ksponspeech']
@prepare.command(context_settings=dict(show_default=True))
@click.argument('corpus_dir', type=click.Path(exists=True, dir_okay=True))
@click.argument('output_dir', type=click.Path())
@click.option('-m', '--morpheme-analysis-model_path', type=click.Path(), default="")
@click.option('-j', '--num-jobs', type=int, default=1,
help='How many threads to use (can give good speed-ups with slow disks).')
@click.option('-w', '--word-boundary-symbol', type=str, default="",
help='word boundary symbol if it is defined in n-gram ARPA')
def ksponspeech(
corpus_dir: Pathlike,
output_dir: Pathlike,
morpheme_analysis_model_path: Pathlike,
word_boundary_symbol: str,
num_jobs: int
):
"""KsponSpeech AIHub data preparation."""
prepare_ksponspeech(corpus_dir,
morpheme_analysis_model_path=morpheme_analysis_model_path,
word_boundary_symbol=word_boundary_symbol,
output_dir=output_dir, num_jobs=num_jobs)
# @download.command(context_settings=dict(show_default=True))
# @click.argument('target_dir', type=click.Path())
# def zeroth(
# target_dir: Pathlike,
# ):
# """Zeroth data download"""
# download_zerothspeech(target_dir)
| 37.333333
| 88
| 0.690934
|
bbf2c1de8181547c0a03b7f01ec6ca2d3d9c2fa8
| 3,767
|
py
|
Python
|
analises/xavierUtils.py
|
gabinete-compartilhado-acredito/100-dias-congresso
|
ab2138fa2975818a6ea04c0d67dba174e09849b2
|
[
"MIT"
] | 1
|
2021-01-25T19:14:42.000Z
|
2021-01-25T19:14:42.000Z
|
analises/xavierUtils.py
|
gabinete-compartilhado-acredito/100-dias-congresso
|
ab2138fa2975818a6ea04c0d67dba174e09849b2
|
[
"MIT"
] | null | null | null |
analises/xavierUtils.py
|
gabinete-compartilhado-acredito/100-dias-congresso
|
ab2138fa2975818a6ea04c0d67dba174e09849b2
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import datetime as dt
import matplotlib.pyplot as pl
### Auxiliary functions ###
def Bold(text):
"""
Takes a string and returns it bold.
"""
return '\033[1m'+text+'\033[0m'
def unique(series):
"""
Takes a pandas series as input and print all unique values, separated by a blue bar.
"""
u = series.unique()
try:
print Bold(str(len(u)))+': '+'\033[1;34m | \033[0m'.join(sorted(u.astype(str)))
except:
print Bold(str(len(u)))+': '+'\033[1;34m | \033[0m'.join(sorted(u))
def columns(df):
"""
Print the number of columns and their names, separated by a blue bar.
"""
unique(df.columns)
def mapUnique(df):
"""
Takes a pandas dataframe and prints the unique values of all columns and their numbers.
If the number of unique values is greater than maxItems, only print out a sample.
"""
for c in df.columns.values:
maxItems = 20
u = df[c].unique()
n = len(u)
isStr = isinstance(u[0],basestring)
print ''
print Bold(c+': ')+str(n)+' unique values.'
if n<=maxItems:
if isStr:
print ', '.join(np.sort(u))
else:
print ', '.join(np.sort(u).astype('unicode'))
else:
if isStr:
print Bold('(sample) ')+', '.join(np.sort(np.random.choice(u,size=maxItems,replace=False)))
else:
print Bold('(sample) ')+', '.join(np.sort(np.random.choice(u,size=maxItems,replace=False)).astype('unicode'))
def checkMissing(df):
"""
Takes a pandas dataframe and prints out the columns that have missing values.
"""
colNames = df.columns.values
print Bold('Colunas com valores faltantes:')
Ntotal = len(df)
Nmiss = np.array([float(len(df.loc[df[c].isnull()])) for c in colNames])
df2 = pd.DataFrame(np.transpose([colNames,[df[c].isnull().any() for c in colNames], Nmiss, np.round(Nmiss/Ntotal*100,2)]),
columns=['coluna','missing','N','%'])
print df2.loc[df2['missing']==True][['coluna','N','%']]
def freq(series, value):
"""
Takes a pandas series and a value and returns the fraction of the series that presents a certain value.
"""
Ntotal = len(series)
Nsel = float(len(series.loc[series==value]))
return Nsel/Ntotal
### TEM BUG!! CORRIGIR! >> o split pode dar errado se o path tiver ../
def saveFigWdate(name):
"""
Takes a string (a filename with extension) and save the current plot to it,
but adding the current date to the filename.
"""
part = name.split('.')
t = dt.datetime.now().strftime('%Y-%m-%d')
filename = part[0]+'_'+t+'.'+part[1]
pl.savefig(filename, bbox_inches='tight')
def cov2corr(cov):
"""
Takes a covariance matrix and returns the correlation matrix.
"""
assert(len(cov) == len(np.transpose(cov))), 'Cov. matrix must be a square matrix.'
corr = [ [cov[i][j]/np.sqrt(cov[i][i]*cov[j][j]) for i in range(0,len(cov))] for j in range(0,len(cov))]
return np.array(corr)
def one2oneQ(df, col1, col2):
"""
Check if there is a one-to-one correspondence between two columns in a dataframe.
"""
n2in1 = df.groupby(col1)[col2].nunique()
n1in2 = df.groupby(col2)[col1].nunique()
if len(n2in1)==np.sum(n2in1) and len(n1in2)==np.sum(n1in2):
return True
else:
return False
def one2oneViolations(df, colIndex, colMultiples):
"""
Returns the unique values in colMultiples for a fixed value in colIndex (only for when the number of unique values is >1).
"""
return df.groupby(colIndex)[colMultiples].unique().loc[df.groupby(colIndex)[colMultiples].nunique()>1]
| 32.196581
| 129
| 0.607114
|
513b3e597f56c6aeff909b585cb763b31d856c20
| 45,132
|
py
|
Python
|
QA_api.py
|
alsmeirelles/QuickAnnotator
|
ffca5df537ca031543f1a311b0923619611554a5
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
QA_api.py
|
alsmeirelles/QuickAnnotator
|
ffca5df537ca031543f1a311b0923619611554a5
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
QA_api.py
|
alsmeirelles/QuickAnnotator
|
ffca5df537ca031543f1a311b0923619611554a5
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
import base64
import glob
import os, re, sys, ast, shutil
from datetime import datetime
import logging
import multiprocessing
import PIL.Image
import cv2
import numpy as np
from skimage.measure import label
from flask import Blueprint, send_from_directory, jsonify
from flask import current_app, url_for, request, make_response
import sqlalchemy
import json
from QA_config import config, get_database_uri
from QA_db import Image, Project, Roi, db, Job, get_latest_modelid
from QA_pool import pool_get_image, pool_run_script, update_completed_job_status
from QA_utils import get_file_tail,tile_for_patch,get_initial_train,get_img_metadata,run_al
api = Blueprint("api", __name__)
jobs_logger = logging.getLogger('jobs')
# This will get the last few lines from the log file
@api.route("/api/logs/<file_stem>", methods=["GET"])
def get_latest_log(file_stem):
log_lines = 100 # <-- TODO: pull from config
log_path = file_stem + '.log'
return get_file_tail(log_path, log_lines), 200
@api.route("/api/<project_name>/embed/<image_name>", methods=["GET"])
def get_embed(project_name, image_name):
upload_folder = f"./projects/{project_name}/patches/"
return send_from_directory(upload_folder, image_name)
@api.route("/api/<project_name>/generate_train", methods=["GET"])
def generate_train(project_name):
upload_folder = f"./projects/{project_name}/patches/"
cache_folder = f"./projects/{project_name}/cache/"
proj = db.session.query(Project).filter_by(name=project_name).first()
if proj is None:
return jsonify(error=f"project {project_name} doesn't exist"), 400
current_app.logger.info(f'Generate initial training set for project {project_name}:')
iset = get_initial_train(cache_folder)
save_roi = False
processes = config.getint("pooling","npoolthread", fallback=2)
#Initial training set selected, start iteration 0
proj.iteration = 0
rt = multiprocess_insert_patch(iset,proj,project_name,save_roi,processes)
return jsonify(success=True), 200
#return send_from_directory(upload_folder)
#Multiprocess patch insertion
def multiprocess_insert_patch(iset,proj,project_name,save_roi,processes):
"""iset: image set"""
with multiprocessing.Pool(processes=processes) as pool:
results = [pool.apply_async(insert_patch_into_DB,(proj,project_name,i,save_roi)) for i in iset[0]]
if not iset[1] is None:
for v in iset[1]:
results.append(pool.apply_async(insert_patch_into_DB,(proj,project_name,v,save_roi)) )
rt = [r.get() for r in results]
return rt
#Insert patches selected by other means other than user manual upload
def insert_patch_into_DB(proj,project_name,img,save_roi):
#Make tile for patch
tilename = None
pdest = ""
filename = os.path.basename(img.getPath())
tile,x,y,ps = tile_for_patch(f"./projects/{project_name}/{filename}")
newImage = None
if tile is None:
current_app.logger.info(f'Project = {str(proj.id)}: No WSI directory available')
dest = f"./projects/{project_name}/{filename}"
# Check if the file name has been used before
if os.path.isfile(dest):
return jsonify(error="file already exists"), 400
shutil.copy(img.getPath(),dest)
# if it's not a png image
filebase, fileext = os.path.splitext(filename)
dim = img.getImgDim()
else:
pdest = f"./projects/{project_name}/patches/{filename}"
tilename = os.path.basename(tile)
# if it's not a png image
filebase, fileext = os.path.splitext(tilename)
dest = f"./projects/{project_name}/{tilename}"
if os.path.isfile(dest):
print(f"Tile with multiple patches ({tilename})")
newImage = db.session.query(Image).filter_by(projId=proj.id, name=tilename).first()
if os.path.isfile(pdest):
return jsonify(error="Tile already exists")
#shutil.copy(img.getPath(),pdest)
# Get image dimension
dim = (config.getint("common","tilesize", fallback=2000),)*2
#Patches are ROIs from the tile
save_roi = True
current_app.logger.info(f'Destination = {dest}')
# Save the new image information to database, if it's a new tile
if newImage is None:
newImage = Image(name=f"{filebase}.png", path=dest,projId=proj.id,patch_size=ps,
width=dim[0], height=dim[1], date=datetime.now())
db.session.add(newImage)
db.session.commit()
if save_roi:
roi_base_name = f'{filename.replace(".png", "_")}{x}_{y}_roi.png'
roi_name = f'projects/{project_name}/roi/{roi_base_name}'
nobjects = db.session.query(Roi).filter_by(imageId=newImage.id).count()
newRoi = Roi(name=roi_base_name, path=roi_name, alpath=img.getPath(), testingROI = 0, imageId=newImage.id,
width=ps, height=ps, x=x, y=y, acq=proj.iteration, nobjects = nobjects,
date=datetime.now())
db.session.add(newRoi)
db.session.commit()
mask_folder = f"projects/{project_name}/mask/"
mask_name = f"{filebase}.png".replace(".png", "_mask.png")
mask = PIL.Image.new('RGB', dim)
mask.save(mask_folder + mask_name, "PNG")
return jsonify(success=True, image=newImage.as_dict()), 201
@api.route("/api/<project_name>/start_al", methods=["GET"])
def start_al(project_name):
proj_folder = f"./projects/{project_name}/"
proj = db.session.query(Project).filter_by(name=project_name).first()
if proj is None:
return jsonify(error=f"project {project_name} doesn't exist"), 400
current_app.logger.info(f'Starting active learning system {project_name}:')
training_rois = db.session.query(Roi.id, Roi.imageId, Roi.name, Roi.path, Roi.alpath, Roi.testingROI,Roi.height, Roi.width,
Roi.x, Roi.y, Roi.acq, Roi.anclass) \
.filter(Image.projId == proj.id) \
.filter(Roi.imageId == Image.id) \
.group_by(Roi.id).all()
selected = run_al(proj_folder,training_rois,config,proj.iteration)
if selected is None:
return jsonify(error="No pool available"),400
else:
proj.iteration += 1
processes = config.getint("pooling","npoolthread", fallback=2)
current_app.logger.info('Adding selected images to {}: {}'.format(project_name,len(selected)))
rt = multiprocess_insert_patch(selected,proj,project_name,False,processes)
current_app.logger.info(f'New patches ready to annotation. Sart iteration {proj.iteration}.')
return jsonify(success=True), 200
@api.route("/api/<project_name>/train_autoencoder", methods=["GET"])
def train_autoencoder(project_name):
proj = db.session.query(Project).filter_by(name=project_name).first()
if proj is None:
return jsonify(error=f"project {project_name} doesn't exist"), 400
current_app.logger.info(f'Training autoencoder for project {project_name}:')
# get the config options:
current_app.logger.info(f'Getting config options:')
num_images = config.getint('train_ae', 'numimages', fallback=-1)
batch_size = config.getint('train_ae', 'batchsize', fallback=32)
patch_size = config.getint('train_ae', 'patchsize', fallback=256)
num_workers = config.getint('train_ae', 'numworkers', fallback=0)
num_epochs = config.getint('train_ae', 'numepochs', fallback=1000)
num_epochs_earlystop = config.getint('train_ae', 'num_epochs_earlystop', fallback=-1)
num_min_epochs = config.getint('train_ae', 'num_min_epochs', fallback=300)
current_app.logger.info(f'Images = {num_images}, epochs = {num_epochs}, batch size = {batch_size}')
# get the command:
full_command = [sys.executable,
"train_ae.py",
f"-n{num_epochs}",
f"-p{patch_size}",
f"-s{num_epochs_earlystop}",
f"-l{num_min_epochs}",
f"-m{num_images}",
f"-b{batch_size}",
f"-r{num_workers}",
f"-o./projects/{project_name}/models/0",
f"./projects/{project_name}/patches/*.png"]
current_app.logger.info(full_command)
# run it asynchronously:
command_name = "train_autoencoder"
# Set proj.train_ae_time = null since the model 0 is being retrained, the time should be unavailable
proj.train_ae_time = None
db.session.commit()
return pool_run_script(project_name, command_name, full_command, callback=train_autoencoder_callback)
# This callback updates the train_ae_time value in the database to
# be the amount of time it took for the autoencoder to run:
def train_autoencoder_callback(result):
# update the job status in the database:
update_completed_job_status(result)
# if it was successful, mark the training time in the database:
retval, jobid = result
if retval == 0:
jobs_logger.info('Marking training ae time in database:')
engine = sqlalchemy.create_engine(get_database_uri())
projid = engine.connect().execute(f"select projId from job where id = :jobid", jobid=jobid).first()[0]
engine.connect().execute(
f"update project set train_ae_time = datetime(), iteration = CASE WHEN iteration<0 then 0 else iteration end where id = :projid",
projid=projid)
engine.dispose()
# Fill the training/test files with the available images:
def populate_training_files(project_name, train_file_path, test_file_path):
# open those text files for writing:
testfp = open(test_file_path, "w")
trainfp = open(train_file_path, "w")
# loop through the images in the database:
for img in Project.query.filter_by(name=project_name).first().images: # TODO can improve this
current_app.logger.info(f'Checking rois for img: {img.name}')
for roi in img.rois:
current_app.logger.info(f'Roi path = {roi.path}')
# check if this image roi exists:
if not os.path.isfile(roi.path):
current_app.logger.warn(f'No roi image found at {roi.path}')
continue
# append this roi to the appropriate txt file:
current_app.logger.info(f'Testing ROI = {str(roi.testingROI)}')
if roi.testingROI:
testfp.write(f"{roi.name}\n")
elif roi.testingROI == 0:
trainfp.write(f"{roi.name}\n")
# close the files:
testfp.close()
trainfp.close()
@api.route("/api/<project_name>/retrain_dl", methods=["GET"])
def retrain_dl(project_name):
proj = Project.query.filter_by(name=project_name).first()
if proj is None:
return jsonify(error=f"project {project_name} doesn't exist"), 400
current_app.logger.info(f'About to train a new transfer model for {project_name}')
frommodelid = request.args.get('frommodelid', default=0, type=int)
if(frommodelid == -1):
frommodelid = get_latest_modelid(project_name)
if frommodelid > proj.iteration or not os.path.exists(f"./projects/{project_name}/models/{frommodelid}/best_model.pth"):
return jsonify(
error=f"Deep learning model {frommodelid} doesn't exist"), 400
if proj.train_ae_time is None and frommodelid == 0:
error_message = f'The base model 0 of project {project_name} was overwritten when Retrain Model 0 started.\n ' \
f'Please wait until the Retrain Model 0 finishes. '
current_app.logger.warn(error_message)
return jsonify(error=error_message), 400
# todo: make sure there's actually a model in that subdirectory since errors still create the dir before the model is ready
new_modelid = get_latest_modelid(project_name) + 1
output_model_path = f"./projects/{project_name}/models/{new_modelid}/"
current_app.logger.info(f'New model path = {output_model_path}')
# store the list of test and training images in text files:
test_file_path = f"projects/{project_name}/test_imgs.txt"
train_file_path = f"projects/{project_name}/train_imgs.txt"
current_app.logger.info('Populating project files:')
populate_training_files(project_name, train_file_path, test_file_path)
# check if enough data exists:
empty_training = not os.path.exists(test_file_path) or os.stat(
test_file_path).st_size == 0
empty_testing = not os.path.exists(test_file_path) or os.stat(
test_file_path).st_size == 0
if empty_training or empty_testing: # TODO can improve this by simply counting ROIs in the db
error_message = f'Not enough training/test images for project {project_name}. You need at least 1 of each.'
current_app.logger.warn(error_message)
return jsonify(error=error_message), 400
# get config properties:
num_epochs = config.getint('train_tl', 'numepochs', fallback=1000)
num_epochs_earlystop = config.getint('train_tl', 'num_epochs_earlystop', fallback=-1)
num_min_epochs = config.getint('train_tl', 'num_min_epochs', fallback=300)
batch_size = config.getint('train_tl', 'batchsize', fallback=32)
patch_size = config.getint('train_tl', 'patchsize', fallback=256)
num_workers = config.getint('train_tl', 'numworkers', fallback=0)
edge_weight = config.getfloat('train_tl', 'edgeweight', fallback=2)
pclass_weight = config.getfloat('train_tl', 'pclass_weight', fallback=.5)
fillbatch = config.getboolean('train_tl', 'fillbatch', fallback=False)
# query P/N pixel count from database for ppixel_train npixel_train ppixel_test npixel_test
if pclass_weight == -1:
proj_ppixel = db.session.query(db.func.sum(Image.ppixel)).filter_by(
projId=proj.id).scalar()
proj_npixel = db.session.query(db.func.sum(Image.npixel)).filter_by(
projId=proj.id).scalar()
total = proj_npixel + proj_ppixel
pclass_weight = 1 - proj_ppixel / total
# get the command to retrain the model:
full_command = [sys.executable, "train_model.py",
f"-p{patch_size}",
f"-e{edge_weight}",
f"-n{num_epochs}",
f"-s{num_epochs_earlystop}",
f"-l{num_min_epochs}",
f"-b{batch_size}",
f"-o{output_model_path}",
f"-w{pclass_weight}",
f"-r{num_workers}",
f"-m./projects/{project_name}/models/{frommodelid}/best_model.pth",
f"./projects/{project_name}"]
if(fillbatch):
full_command.append("--fillbatch")
current_app.logger.info(f'Training command = {full_command}')
# run the script asynchronously:
command_name = "retrain_dl"
return pool_run_script(project_name, command_name, full_command, callback=retrain_dl_callback)
def retrain_dl_callback(result):
# update the job status in the database:
update_completed_job_status(result)
jobid = result[1]
engine = sqlalchemy.create_engine(get_database_uri())
dbretval = engine.connect().execute(f"select procout from jobid_{jobid} where procout like 'RETVAL:%'").first()
if dbretval is None:
# no retval, indicating superpixel didn't get to the end, leave everything as is
engine.dispose()
return
retvaldict = json.loads(dbretval[0].replace("RETVAL: ", ""))
projname = retvaldict["project_name"]
iteration = retvaldict["iteration"]
engine.connect().execute(f"update project set iteration = :iteration where name = :projname",
projname=projname, iteration=iteration)
engine.dispose()
@api.route("/api/<project_name>/make_patches", methods=["GET"])
def make_patches(project_name):
# pull this project from the database:
current_app.logger.info(f'Getting project info from database for project {project_name}.')
project = db.session.query(Project).filter_by(name=project_name).first()
if project is None:
current_app.logger.warn(f'Unable to find {project_name} in database. Returning HTML response code 400.')
return jsonify(error=f"Project {project_name} does not exist"), 400
target_files = []
current_app.logger.info('Looping through images.')
for img in project.images:
current_app.logger.info(f'Checking database if patches have been computed for image "{img.name}".')
needs_calculating = False
if img.make_patches_time:
current_app.logger.info('Database claims that the patches have been computed. Checking filesystem.')
image_name_without_extension = os.path.splitext(img.name)[0] # <-- remove extension
current_app.logger.info(f'Image {image_name_without_extension}')
patches_pattern = f'./projects/{project_name}/patches/{image_name_without_extension}*.png'
current_app.logger.info(f'Patches pattern = {patches_pattern}')
number_of_patches = len(glob.glob(patches_pattern))
current_app.logger.info(f'Number of patches = {number_of_patches}')
if number_of_patches == 0:
current_app.logger.warn(
'The database is incorrectly reporting that patches exist. We are recomputing them since no patches exist on the filesystem for this image.')
needs_calculating = True
else:
needs_calculating = True
if needs_calculating:
current_app.logger.info(
f'Patches need to be computed for image at {img.path}. Adding this image to the list.')
target_files.append(img.path)
# img.patches_computed = True # note, this only goes through when commit is called
current_app.logger.info('Marked patches_computed to be True in the database.')
if not target_files:
error_message = 'No pending target image files for making patches.'
current_app.logger.warn(error_message)
return jsonify(error=error_message), 400
current_app.logger.info('Storing image filenames for patches in text file:')
with open(f"./projects/{project_name}/patches/new_imgs.txt", "w") as textfile:
for fname in target_files:
textfile.write(f"{fname}\n")
patchsize = config.getint('make_patches', 'patchsize', fallback=256)
# get the command:
full_command = [sys.executable,
"make_patches_for_embed.py", f"-p{patchsize}",
f"-o./projects/{project_name}/patches/",
f"./projects/{project_name}/patches/new_imgs.txt"]
whiteBG = request.args.get("whiteBG", default="keep", type=str)
if whiteBG == "remove":
full_command.append("-b")
current_app.logger.info(full_command)
# close the db session and note that patches_computed is true:
db.session.commit()
# run the command asynchronously
command_name = "make_patches"
return pool_run_script(project_name, command_name, full_command, callback=make_patches_callback)
def make_patches_callback(result):
# update the job status in the database:
update_completed_job_status(result)
retval, jobid = result
engine = sqlalchemy.create_engine(get_database_uri())
dbretval = engine.connect().execute(f"select procout from jobid_{jobid} where procout like 'RETVAL:%'").first()
if dbretval is None:
# no retval, indicating make_patches didn't get to the end, leave everything as is
engine.dispose()
return
retvaldict = json.loads(dbretval[0].replace("RETVAL: ", ""))
for img in retvaldict["image_list"]:
engine.connect().execute(f"update image set make_patches_time = datetime() where path= :img", img=img)
# if it was successful, mark the training time in the database:
if retval == 0:
jobs_logger.info('Marking make_patches time in database:')
projid = engine.connect().execute(f"select projId from job where id = :jobid", jobid=jobid).first()[0]
engine.connect().execute(f"update project set make_patches_time = datetime() where id = :projid", projid=projid)
engine.dispose()
@api.route("/api/<project_name>/embed", methods=["GET"])
def make_embed(project_name):
proj = db.session.query(Project).filter_by(name=project_name).first()
if proj is None:
return jsonify(error=f"project {project_name} doesn't exist"), 400
model0ExistOrNot = os.path.exists(f"./projects/{project_name}/models/0/best_model.pth")
current_app.logger.info(f'Model 0 (autoencoder) exists = {model0ExistOrNot}')
if not model0ExistOrNot:
return jsonify(
error="Embedding is not available unless at least a base model is trained. Please make patches and train AE"), 400
if proj.train_ae_time is None and proj.iteration == 0:
error_message = f'The base model 0 of project {project_name} was overwritten when Retrain Model 0 started.\n ' \
f'Please wait until the Retrain Model 0 finishes. '
current_app.logger.warn(error_message)
return jsonify(error=error_message), 400
current_app.logger.info('Checking if the embeddings are the most recent.')
# get config options:
batchsize = config.getint('make_embed', 'batchsize', fallback=32)
patchsize = config.getint('make_embed', 'patchsize', fallback=256)
numimgs = request.args.get('numimgs', default=-1, type=int)
modelid = request.args.get('modelid', default=get_latest_modelid(project_name), type=int)
outdir = f"./projects/{project_name}/models/{modelid}"
latest_modelID = get_latest_modelid(project_name)
if modelid < 0 or modelid > latest_modelID:
return jsonify(
error=f"Your selected Embed Model ID is {modelid}. The last model ID is {latest_modelID}. A valid Model ID ranges from 0 to {latest_modelID}."), 400
# get the command:
full_command = [sys.executable, "make_embed.py", project_name, f"-o{outdir}", f"-p{patchsize}", f"-b{batchsize}",
f"-m{numimgs}"]
current_app.logger.info(f'Full command = {str(full_command)}')
# update the embedding iteration:
# current_app.logger.info('Updating the embedding iteration to the model iteration:')
# proj.embed_iteration = proj.iteration
db.session.commit()
# run the command asynchronously:
command_name = "make_embed"
return pool_run_script(project_name, command_name, full_command, callback=make_embed_callback)
def make_embed_callback(result):
# update the job status in the database:
update_completed_job_status(result)
jobid = result[1]
engine = sqlalchemy.create_engine(get_database_uri())
dbretval = engine.connect().execute(f"select procout from jobid_{jobid} where procout like 'RETVAL:%'").first()
if dbretval is None:
# no retval, indicating superpixel didn't get to the end, leave everything as is
engine.dispose()
return
retvaldict = json.loads(dbretval[0].replace("RETVAL: ", ""))
projname = retvaldict["project_name"]
modelid = retvaldict["modelid"]
engine.connect().execute(f"update project set embed_iteration = :modelid where name = :projname", projname=projname,
modelid=modelid)
engine.dispose()
@api.route("/api/<project_name>/model", methods=["GET"])
def get_model(project_name):
modelid = request.args.get('model', get_latest_modelid(project_name), type=int)
model_path = f"./projects/{project_name}/models/{modelid}/"
return send_from_directory(model_path, "best_model.pth", as_attachment=True)
@api.route('/api/<project_name>/dataset/<traintype>', methods=["GET"])
def get_traintest_images(project_name, traintype):
# List all training and testing patches for the current project
sample_images = []
with open(f"projects/{project_name}/{traintype}_imgs.txt", "r") as file:
for img_name in file:
sample_images.append(img_name.strip())
current_app.logger.info(sample_images)
return jsonify(sample_images=sample_images)
@api.route('/api/<project_name>/dataset/<traintype>/<roiname>', methods=["DELETE"])
def remove_image_from_traintest(project_name, traintype, roiname):
roi = db.session.query(Roi).filter(name=os.path.basename(roiname.strip())).first()
roi.testingROI = -1
db.session.commit()
return jsonify(success=True, roi=roi.as_dict())
@api.route('/api/<project_name>/dataset/<traintype>/<roiname>/<roi_class>', methods=["PUT"])
def add_roi_to_traintest(project_name, traintype, roiname, roi_class):
current_app.logger.info(
f'Adding new annotation image. Project = {project_name} Training type = {traintype} Name = {roiname}')
roi = db.session.query(Roi).filter_by(name=os.path.basename(roiname.strip())).first()
if roi is None:
return jsonify(error=f"{roiname} not found in project {project_name}"), 400
current_app.logger.info('Roi found = ' + str(roi.id))
if traintype == "train":
roi.testingROI = 0
if traintype == "test":
roi.testingROI = 1
roi.anclass = 1 if roi_class == "positive" else 0
current_app.logger.info('Committing new image to database:')
db.session.commit()
return jsonify(success=True, roi=roi.as_dict()), 200
@api.route("/api/<project_name>/image/<image_name>", methods=["GET"])
def get_image(project_name, image_name):
current_app.logger.info(f"Outputting file {image_name}")
return send_from_directory(f"./projects/{project_name}", image_name)
@api.route("/api/<project_name>/image/<image_name>/thumbnail", methods=["GET"])
def get_image_thumb(project_name, image_name):
width = request.form.get('width', 250)
img = cv2.imread(f"./projects/{project_name}/{image_name}")
height = int(img.shape[0] * width / img.shape[1])
dim = (width, height)
img = cv2.resize(img, dim)
success, img_encoded = cv2.imencode('.png', img)
response = make_response(img_encoded.tobytes())
response.headers['Content-Type'] = 'image/png'
response.headers['Content-Disposition'] = f'inline; filename = "{image_name.replace(".png", "_thumb.png")}"'
return response
@api.route('/api/<project_name>/image/<image_name>',
methods=["DELETE"]) # below should be done in a post-processing call
def delete_image(project_name, image_name):
proj = Project.query.filter_by(name=project_name).first()
if proj is None:
return jsonify(error=f"project {project_name} doesn't exist"), 400
# Remove the image from database
selected_image = db.session.query(Image).filter_by(projId=proj.id, name=image_name).first()
# Delete all the ROI linked to the image
select_Rois = db.session.query(Roi).filter_by(imageId=selected_image.id)
select_Rois.delete()
db.session.delete(selected_image)
db.session.commit()
# Remove the image file from server
os.remove(selected_image.path)
# Remove the corresponding mask and result files
# TODO: the below can be refactored to recursively look for *all* files which match the pattern and delete them
# need to be careful with recursive search. if one has 1.png and 100.png, and wants to delete all files associated
# with 1.png, wildcards may pick up unrelated 100.png images
mask_name = selected_image.name.replace(".png", "_mask.png")
mask_path = f"./projects/{project_name}/mask/{mask_name}"
if os.path.exists(mask_path):
os.remove(mask_path)
# --- delete prediction results for every model
result_name = selected_image.name.replace(".png", "_pred.png")
result_path = f"./projects/{project_name}/pred/**/{result_name}"
result_fileLists = glob.glob(result_path)
for filePath in result_fileLists:
try:
os.remove(filePath)
except:
print("Error while deleting file : ", filePath)
# --- delete patches
patches = selected_image.name.replace(".png", "")
patches_path = f"./projects/{project_name}/patches"
patches_fileList = glob.glob(f'{patches_path}/{patches}_*_*.png')
# Iterate over the list of filepaths & remove each file.
for filePath in patches_fileList:
try:
os.remove(filePath)
except:
print("Error while deleting file : ", filePath)
# --- delete superpixels
superpixels_name = selected_image.name.replace(".png", "_superpixels.png")
superpixels_path = f"./projects/{project_name}/superpixels/{superpixels_name}"
if os.path.exists(superpixels_path):
os.remove(superpixels_path)
superpixels_boundary_name = selected_image.name.replace(".png", "_superpixels_boundary.png")
superpixels_boundary_path = f"./projects/{project_name}/superpixels_boundary/{superpixels_boundary_name}"
if os.path.exists(superpixels_boundary_path):
os.remove(superpixels_boundary_path)
# Todo: Remove the image patches from embedding
# Get the image list for the project
return jsonify(success=True), 204
@api.route("/api/<project_name>/image", methods=["POST"])
def upload_image(project_name):
current_app.logger.info(f'Uploading image for project {project_name} :')
# ---- check project exists first!
proj = Project.query.filter_by(name=project_name).first()
if proj is None:
return jsonify(error=f"project {project_name} doesn't exist"), 400
current_app.logger.info(f'Project = {str(proj.id)}')
file = request.files.get('file')
filename = file.filename
save_roi = False
pdest = f"./projects/{project_name}/patches/{filename}"
file.save(pdest)
img = get_img_metadata(pdest)
ret = insert_patch_into_DB(proj,project_name,img,save_roi)
return ret
@api.route("/api/<project_name>/roi/<roi_name>/mask", methods=["GET"])
def get_roimask(project_name, roi_name):
mask_folder = f"projects/{project_name}/mask/"
match = re.search(r"(.*)_(\d+)_(\d+)_roi.png", roi_name)
mask_name = f"{match.group(1)}_mask.png"
x = int(match.group(2))
y = int(match.group(3))
roi = cv2.imread(f"./projects/{project_name}/roi/{roi_name}")
if roi is None:
jsonify(error=f"ROI file {roi_name} does not exist"), 400
h = roi.shape[0]
w = roi.shape[1]
mask = cv2.imread(mask_folder + mask_name)
mask = mask[y:y + h, x:x + w, :]
success, mask_encoded = cv2.imencode('.png', mask)
response = make_response(mask_encoded.tobytes())
response.headers['Content-Type'] = 'image/png'
response.headers['Content-Disposition'] = f'inline; filename = "{roi_name.replace(".png", "_mask.png")}"'
return response
@api.route("/api/<project_name>/image/<image_name>/roimask", methods=["POST"])
def post_roimask(project_name, image_name):
current_app.logger.info(f'Uploading roi mask for project {project_name} and image {image_name}:')
proj = Project.query.filter_by(name=project_name).first()
if proj is None:
return jsonify(error=f"project {project_name} doesn't exist"), 400
current_app.logger.info(f'Project id = {str(proj.id)}')
force = request.form.get('force', False, type=bool)
selected_image = db.session.query(Image).filter_by(projId=proj.id,
name=image_name).first()
if selected_image is None:
return jsonify(error=f"{selected_image} inside of project {project_name} doesn't exist"), 400
roimask_url = request.form.get('roimask', None)
if not roimask_url:
return jsonify(error="no roimask provided"), 400
roimask_data = re.search(r'data:image/png;base64,(.*)', roimask_url).group(1)
roimask_decoded = base64.b64decode(roimask_data)
roimask = cv2.imdecode(np.frombuffer(roimask_decoded, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
roimask = cv2.cvtColor(roimask, cv2.COLOR_BGR2RGB)
if not np.all(np.isin(roimask, [0, 255])):
return jsonify(error="Non [0,255] incorrect values are saved in the roimask mask, please check"), 400
if roimask.shape[2] > 3:
return jsonify(error="Roi Mask has 4 dimensions? Possible Alpha Channel Issue?"), 400
h = roimask.shape[0]
w = roimask.shape[1]
x = int(request.form.get('pointx', -1))
y = int(request.form.get('pointy', -1))
if -1 == x or -1 == y:
return jsonify(error="no x , y location provided"), 402
img = cv2.imread(f"./projects/{project_name}/{image_name}")
if y + h > img.shape[0] or x + w > img.shape[1] or y < 0 or x < 0:
return jsonify(f"ROI not within image, roi xy ({x} ,{y}) vs image size ({img.shape[0]}, {img.shape[1]})"), 400
mask_name = f"projects/{project_name}/mask/{image_name.replace('.png', '_mask.png')}"
if not os.path.isfile(mask_name):
mask = np.zeros(img.shape, dtype=np.uint8)
else:
mask = cv2.cvtColor(cv2.imread(mask_name), cv2.COLOR_BGR2RGB)
roimaskold = mask[y:y + h, x:x + w, :]
if not force and np.any(roimaskold != 0):
current_app.logger.error('ROI exists at this position.')
return jsonify(error="ROI at this position already exists, enable force to overide"), 402
mask[y:y + h, x:x + w, :] = roimask
cv2.imwrite(mask_name, cv2.cvtColor(mask, cv2.COLOR_RGB2BGR))
roi_base_name = f'{image_name.replace(".png", "_")}{x}_{y}_roi.png'
roi_name = f'projects/{project_name}/roi/{roi_base_name}'
roi = img[y:y + h, x:x + w, :]
cv2.imwrite(roi_name, roi)
# --- update positive / negative stats
selected_image.ppixel = np.count_nonzero(mask[:, :, 1] == 255)
selected_image.npixel = np.count_nonzero(mask[:, :, 0] == 255)
# -- determine number of new objects from this roi, will need for statistics later
nobjects_roi = get_number_of_objects(roimask)
selected_image.nobjects = get_number_of_objects(mask)
# ----
parent_image = Image.query.filter_by(name=image_name, projId=proj.id).first()
rois = Roi.query.filter_by(imageId=parent_image.id)
newRoi = None
for r in rois:
if abs(r.x-x) <= 2 and abs(r.y - y) <= 2:
newRoi = r
else:
current_app.logger.info("Found ROI ({}) with different coordinates: ({},{}) against ({},{})".format(r.id,r.x,r.y,x,y))
if newRoi:
current_app.logger.info("ROI already stored. Original patch: {}".format(newRoi.alpath))
newRoi.nobjects = nobjects_roi
newRoi.path = roi_name
db.session.commit()
else:
current_app.logger.info('Storing roi to database:')
newRoi = Roi(name=roi_base_name, path=roi_name, imageId=parent_image.id,
width=w, height=h, x=x, y=y, nobjects = nobjects_roi,
date=datetime.now())
db.session.add(newRoi)
db.session.commit()
return jsonify(success=True, roi=newRoi.as_dict()), 201
@api.route("/api/<project_name>/roi/<roi_name>", methods=["GET"])
def get_roi(project_name, roi_name):
response = send_from_directory(f"./projects/{project_name}/roi/", roi_name)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = '-1'
return response
@api.route("/api/<project_name>/image/<image_name>/mask", methods=["GET"])
def get_mask(project_name, image_name):
response = send_from_directory(f"./projects/{project_name}/mask",
image_name.replace(".png", "_mask.png"))
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = '-1'
return response
@api.route("/api/<project_name>/image/<image_name>/prediction", methods=["GET"])
def get_prediction(project_name, image_name):
current_app.logger.info(f'Getting prediction for project {project_name} and image {image_name}')
project = Project.query.filter_by(name=project_name).first()
curr_image = Image.query.filter_by(projId=project.id, name=image_name).first()
if curr_image is None:
jsonify(error=f"Image {image_name} does not exist"), 400
modelid = request.args.get('model', get_latest_modelid(project_name), type=int)
current_app.logger.info(f'Model id = {str(modelid)}')
if modelid <= 0:
current_app.logger.warn(f"No DL model trained for {project_name} -- {image_name} -- {modelid}")
return jsonify(error="No AI model trained, so no AI results available yet."), 400
upload_folder = f"./projects/{project_name}/pred/{modelid}"
fname = image_name.replace(".png", "_pred.png")
full_fname = f"{upload_folder}/{fname}"
current_app.logger.info('Full filename for prediction = ' + full_fname)
print('Generating new prediction image:')
batchsize = config.getint('get_prediction', 'batchsize', fallback=32)
patchsize = config.getint('get_prediction', 'patchsize', fallback=256)
# run the command:
full_command = [sys.executable, "make_output_unet_cmd.py", f"-s{batchsize}", f"-p{patchsize}",
f"-m./projects/{project_name}/models/{modelid}/best_model.pth",
f"-o./projects/{project_name}/pred/{modelid}",
f"./projects/{project_name}/{image_name}", "--force"]
command_name = "generate_prediction"
return pool_get_image(project_name, command_name, full_command, full_fname, imageid=curr_image.id)
@api.route("/api/<project_name>/image/<image_name>/superpixels", methods=["GET"])
def get_superpixels(project_name, image_name):
current_app.logger.info(f'Getting superpixel for project {project_name} and image {image_name}')
latest_modelid = get_latest_modelid(project_name)
force = request.args.get('force', False, type=bool)
modelidreq = request.args.get('superpixel_run_id', latest_modelid, type=int)
current_app.logger.info(f'Model id = {str(modelidreq)}')
if modelidreq > latest_modelid:
return jsonify(error=f"Requested ModelID {modelidreq} greater than available models {latest_modelid}"), 400
project = Project.query.filter_by(name=project_name).first()
curr_image = Image.query.filter_by(projId=project.id, name=image_name).first()
superpixel_modelid = curr_image.superpixel_modelid
current_app.logger.info(f'The current superpixel_modelid of {image_name} = {str(superpixel_modelid)}')
upload_folder = f"./projects/{project_name}/superpixels"
spixel_fname = image_name.replace(".png", "_superpixels.png")
full_fname = f"{upload_folder}/{spixel_fname}"
current_app.logger.info('Full filename for superpixel = ' + full_fname)
batchsize = config.getint('superpixel', 'batchsize', fallback=32)
patchsize = config.getint('superpixel', 'patchsize', fallback=256)
approxcellsize = config.getint('superpixel', 'approxcellsize', fallback=20)
compactness = config.getfloat('superpixel', 'compactness', fallback=.01)
command_to_use = config.get("superpixel", 'command_to_use', fallback="make_superpixel.py")
if modelidreq < 0:
# We are using simple method, since we have no dl model
current_app.logger.warn(
f"No DL model trained for {project_name} -- {image_name} -- {modelidreq}, will use simple method")
command_to_use = "make_superpixel.py"
full_command = [sys.executable, command_to_use,
f"-p{patchsize}",
f"-x{batchsize}",
f"-c{compactness}",
f"-a{approxcellsize}",
f"-m./projects/{project_name}/models/{modelidreq}/best_model.pth",
f"-s./projects/{project_name}/superpixels/",
f"-o./projects/{project_name}/superpixels_boundary/",
f"./projects/{project_name}/{image_name}", "--force"]
current_app.logger.info(
f'We are running {command_to_use} to generate superpixels for IMAGE {image_name} in PROJECT {project_name} ')
current_app.logger.info(f'Superpixel command = {full_command}')
command_name = "generate_superpixel"
if modelidreq > superpixel_modelid or force:
try:
os.remove(full_fname)
except:
pass
return pool_get_image(project_name, command_name, full_command, full_fname, imageid=curr_image.id,
callback=get_superpixels_callback)
def get_superpixels_callback(result):
# update the job status in the database:
update_completed_job_status(result)
retval, jobid = result
engine = sqlalchemy.create_engine(get_database_uri())
dbretval = engine.connect().execute(f"select procout from jobid_{jobid} where procout like 'RETVAL:%'").first()
if dbretval is None:
# no retval, indicating superpixel didn't get to the end, leave everything as is
engine.dispose()
return
retvaldict = json.loads(dbretval[0].replace("RETVAL: ", ""))
if "model" in retvaldict: # for DL approach
modelid = retvaldict["model"].split("/")[4]
else:
modelid = -1
for img in retvaldict["output_file"]:
engine.connect().execute(
f"update image set superpixel_time = datetime(), superpixel_modelid = :modelid where path= :img", img=img,
modelid=modelid)
engine.dispose()
@api.route("/api/<project_name>/image/<image_name>/superpixels_boundary", methods=["GET"])
def get_superpixels_boundary(project_name, image_name):
upload_folder = f"./projects/{project_name}/superpixels_boundary"
spixel_fname = image_name.replace(".png", "_superpixels_boundary.png")
full_fname = f"{upload_folder}/{spixel_fname}"
oseg_fname = f'./projects/{project_name}/superpixels/{image_name.replace(".png", "_superpixels.png")}'
if not os.path.isfile(oseg_fname):
return jsonify(error="need to generate superpixels image first"), 400
folder, filename = os.path.split(full_fname)
response = send_from_directory(folder, filename)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = '-1'
return response
@api.route("/api/<project_name>/image/<image_name>/<direction>", methods=["GET"])
def prevnext_image(project_name, image_name, direction):
project = Project.query.filter_by(name=project_name).first()
curr_image = Image.query.filter_by(projId=project.id, name=image_name).first()
# To do: we can not prev the "first image" and "next" the last image, need to make it periodic
if (direction == "previous"):
image = Image.query.filter((Image.id < curr_image.id) & (Image.projId == project.id)) \
.order_by(Image.id.desc()).first()
else:
image = Image.query.filter((Image.id > curr_image.id) & (Image.projId == project.id)) \
.order_by(Image.id.asc()).first()
current_app.logger.info(f"{project_name} -- {image_name} --- {direction}")
if image is None:
errorMessage = "There is no " + direction + " image"
return jsonify(error=errorMessage), 400
else:
return jsonify(url=url_for('html.annotation', project_name=project_name, image_name=image.name)), 200
# ---- config work
@api.route('/api/config', methods=["GET"])
def getconfig(): # Front end can now keep track of the last lines sent and request all the "new" stuff
allsections = dict()
for section in config.sections():
sectionitems = []
for items in config[section].items():
sectionitems.append(items)
allsections[section] = sectionitems
return jsonify(allsections)
@api.route("/api/<project_name>/embedcsv", methods=["GET"])
def get_embed_csv(project_name):
project = Project.query.filter_by(name=project_name).first()
latest_modelid = get_latest_modelid(project_name)
selected_modelid = request.args.get('modelid', default=latest_modelid, type=int)
fname = f"./projects/{project_name}/models/{selected_modelid}/embedding.csv"
if selected_modelid > latest_modelid or selected_modelid < 0:
error_message = f"Your selected View Embed Model ID is {selected_modelid}. A valid Model ID ranges from 0 to {latest_modelid}."
current_app.logger.error(error_message)
return jsonify(
error=error_message), 400
if not os.path.exists(fname):
error_message = f'No embedding data available to render for Model {selected_modelid}.'
current_app.logger.error(error_message)
return jsonify(
error=error_message), 400
folder, filename = os.path.split(fname)
response = send_from_directory(folder, filename)
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = '-1'
return response
def get_number_of_objects(img):
_, nobjects = label(img[:, :, 1], return_num=True)
return nobjects
| 42.901141
| 161
| 0.675906
|
d06833db2bcaa438139b385cba35e0cedf6a1911
| 2,140
|
py
|
Python
|
api/controllers.py
|
iasmini/planet
|
710fc11d992145d41c32bdff3edd917db6e21837
|
[
"MIT"
] | null | null | null |
api/controllers.py
|
iasmini/planet
|
710fc11d992145d41c32bdff3edd917db6e21837
|
[
"MIT"
] | null | null | null |
api/controllers.py
|
iasmini/planet
|
710fc11d992145d41c32bdff3edd917db6e21837
|
[
"MIT"
] | null | null | null |
from sqlalchemy.exc import OperationalError
from sqlalchemy.sql import text
from flask import request, Blueprint, current_app
from flask_restful import Resource
from app.planet.models import Planet
bp = Blueprint('api', __name__)
class ApiResource(Resource):
@bp.route('/api/planets/')
def get(self):
""" Returns a list of planets """
# filters
climate = request.args.get('climate', None)
name = request.args.get('name', None)
sort = request.args.get('sort', None)
page = request.args.get('page', None, type=int)
if page and page < 0:
message = 'Página {page} não existe. Página mínima 1.'.format(page=page)
return {"status_code": 400, "response": message}
filters = ''
if climate:
filters = "climate='" + climate + "'"
if name:
if filters:
filters += " AND "
filters += "name LIKE '%" + name + "%'"
if filters:
rows = Planet.query.filter(text(filters))
if sort:
rows = rows.order_by(text(sort))
else:
if sort:
rows = Planet.query.order_by(text(sort))
else:
rows = Planet.query
try:
rows.count()
except OperationalError:
return {"status_code": 400, "message": "Ainda não existem planetas cadastrados."}
planets = list()
# exibe somente resultados de acordo com a pagina informada na url
if page:
rows = rows.paginate(page, current_app.config['ITEMS_PER_PAGE'], False)
for row in rows.items:
planets.append(row.to_dict())
else:
for row in rows:
planets.append(row.to_dict())
response = dict()
response['status_code'] = 200
response['response'] = dict()
if page:
response['response']['page'] = page
response['response']['items_per_page'] = current_app.config['ITEMS_PER_PAGE']
response['response']['results'] = planets
return response
| 28.533333
| 93
| 0.557009
|
34580aeba3d4dd995a0970b15174ffe440cd2eff
| 55
|
py
|
Python
|
core/model/__init__.py
|
rezmond/images_sort
|
a50630d5ee8be276452cfca0cec30d59cb8612b7
|
[
"MIT"
] | null | null | null |
core/model/__init__.py
|
rezmond/images_sort
|
a50630d5ee8be276452cfca0cec30d59cb8612b7
|
[
"MIT"
] | null | null | null |
core/model/__init__.py
|
rezmond/images_sort
|
a50630d5ee8be276452cfca0cec30d59cb8612b7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .model import MoverModel
| 13.75
| 29
| 0.636364
|
ace7f6c57189a154de750e3dd6cfdbd7e204e294
| 611
|
py
|
Python
|
openpifpaf_hand/__init__.py
|
DuncanZauss/openpifpaf_hand
|
bea3529d46e859060681a4ba180a8e8cee6a3f7b
|
[
"MIT"
] | null | null | null |
openpifpaf_hand/__init__.py
|
DuncanZauss/openpifpaf_hand
|
bea3529d46e859060681a4ba180a8e8cee6a3f7b
|
[
"MIT"
] | null | null | null |
openpifpaf_hand/__init__.py
|
DuncanZauss/openpifpaf_hand
|
bea3529d46e859060681a4ba180a8e8cee6a3f7b
|
[
"MIT"
] | null | null | null |
import openpifpaf
from .freihand import Freihand
from .rhd import RHD
from .cifonly import CifOnly
def register():
openpifpaf.DATAMODULES['freihand'] = Freihand
openpifpaf.DATAMODULES['rhd'] = RHD
openpifpaf.DECODERS.add(CifOnly)
openpifpaf.CHECKPOINT_URLS['shufflenetv2k16-hand'] = 'https://github.com/DuncanZauss/' \
'openpifpaf_assets/releases/download/v0.1.0/rhd_freihand_sk16.pkl.epoch600'
openpifpaf.CHECKPOINT_URLS['shufflenetv2k16-wb-hand'] = 'https://github.com/DuncanZauss/' \
'openpifpaf_assets/releases/download/v0.1.0/freihand_wholebody_sk16.pkl.epoch600'
| 38.1875
| 95
| 0.754501
|
d418c69c7d07f39b7c96a1e2ac08cbb0e7cf0e29
| 29,658
|
py
|
Python
|
source/lib/blueprints/byom/pipeline_definitions/templates_parameters.py
|
Snehitha12345/mlops-workload-orchestrator
|
d19d34f3ce2ed2b65abaaf0c7750b8be7cb369b3
|
[
"Apache-2.0"
] | 10
|
2022-01-28T23:48:11.000Z
|
2022-03-31T14:38:06.000Z
|
source/lib/blueprints/byom/pipeline_definitions/templates_parameters.py
|
Snehitha12345/mlops-workload-orchestrator
|
d19d34f3ce2ed2b65abaaf0c7750b8be7cb369b3
|
[
"Apache-2.0"
] | 1
|
2022-01-24T21:21:55.000Z
|
2022-01-24T21:21:55.000Z
|
source/lib/blueprints/byom/pipeline_definitions/templates_parameters.py
|
Snehitha12345/mlops-workload-orchestrator
|
d19d34f3ce2ed2b65abaaf0c7750b8be7cb369b3
|
[
"Apache-2.0"
] | 6
|
2022-01-31T04:55:19.000Z
|
2022-03-10T08:37:29.000Z
|
# #####################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
# #####################################################################################################################
from aws_cdk import core
class ParameteresFactory:
@staticmethod
def create_notification_email_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"NotificationEmail",
type="String",
description="email for pipeline outcome notifications",
allowed_pattern="^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$",
constraint_description="Please enter an email address with correct format (example@example.com)",
min_length=5,
max_length=320,
)
@staticmethod
def create_git_address_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"CodeCommitRepoAddress",
type="String",
description="AWS CodeCommit repository clone URL to connect to the framework.",
allowed_pattern=(
"^(((https:\/\/|ssh:\/\/)(git\-codecommit)\.[a-zA-Z0-9_.+-]+(amazonaws\.com\/)[a-zA-Z0-9-.]"
"+(\/)[a-zA-Z0-9-.]+(\/)[a-zA-Z0-9-.]+$)|^$)"
),
min_length=0,
max_length=320,
constraint_description=(
"CodeCommit address must follow the pattern: ssh or "
"https://git-codecommit.REGION.amazonaws.com/version/repos/REPONAME"
),
)
@staticmethod
def create_existing_bucket_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"ExistingS3Bucket",
type="String",
description="Name of existing S3 bucket to be used for ML assets. S3 Bucket must be in the same region as the deployed stack, and has versioning enabled. If not provided, a new S3 bucket will be created.",
allowed_pattern="((?=^.{3,63}$)(?!^(\d+\.)+\d+$)(^(([a-z0-9]|[a-z0-9][a-z0-9\-]*[a-z0-9])\.)*([a-z0-9]|[a-z0-9][a-z0-9\-]*[a-z0-9])$)|^$)",
min_length=0,
max_length=63,
)
@staticmethod
def create_existing_ecr_repo_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"ExistingECRRepo",
type="String",
description="Name of existing Amazon ECR repository for custom algorithms. If not provided, a new ECR repo will be created.",
allowed_pattern="((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)*[a-z0-9]+(?:[._-][a-z0-9]+)*|^$)",
min_length=0,
max_length=63,
)
@staticmethod
def create_account_id_parameter(scope: core.Construct, id: str, account_type: str) -> core.CfnParameter:
return core.CfnParameter(
scope,
id,
type="String",
description=f"AWS {account_type} account number where the CF template will be deployed",
allowed_pattern="^\d{12}$",
)
@staticmethod
def create_org_id_parameter(scope: core.Construct, id: str, account_type: str) -> core.CfnParameter:
return core.CfnParameter(
scope,
id,
type="String",
description=f"AWS {account_type} organizational unit id where the CF template will be deployed",
allowed_pattern="^ou-[0-9a-z]{4,32}-[a-z0-9]{8,32}$",
)
@staticmethod
def create_blueprint_bucket_name_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"BlueprintBucket",
type="String",
description="Bucket name for blueprints of different types of ML Pipelines.",
min_length=3,
)
@staticmethod
def create_data_capture_bucket_name_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"DataCaptureBucket",
type="String",
description="Bucket name where the data captured from SageMaker endpoint will be stored.",
min_length=3,
)
@staticmethod
def create_baseline_output_bucket_name_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"BaselineOutputBucket",
type="String",
description="Bucket name where the output of the baseline job will be stored.",
min_length=3,
)
@staticmethod
def create_batch_input_bucket_name_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"BatchInputBucket",
type="String",
description="Bucket name where the data input of the bact transform is stored.",
min_length=3,
)
@staticmethod
def create_assets_bucket_name_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"AssetsBucket",
type="String",
description="Bucket name where the model and baselines data are stored.",
min_length=3,
)
@staticmethod
def create_ground_truth_bucket_name_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"GroundTruthBucket",
type="String",
description="Bucket name where the ground truth data will be stored.",
min_length=3,
)
@staticmethod
def create_custom_algorithms_ecr_repo_arn_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"CustomAlgorithmsECRRepoArn",
type="String",
description="The arn of the Amazon ECR repository where custom algorithm image is stored (optional)",
allowed_pattern="(^arn:aws:ecr:(us(-gov)?|ap|ca|cn|eu|sa)-(central|(north|south)?(east|west)?)-\\d:\\d{12}:repository/.+|^$)",
constraint_description="Please enter valid ECR repo ARN",
min_length=0,
max_length=2048,
)
@staticmethod
def create_kms_key_arn_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"KmsKeyArn",
type="String",
description="The KMS ARN to encrypt the output of the batch transform job and instance volume (optional).",
allowed_pattern="(^arn:aws:kms:(us(-gov)?|ap|ca|cn|eu|sa)-(central|(north|south)?(east|west)?)-\d:\d{12}:key/.+|^$)",
constraint_description="Please enter kmsKey ARN",
min_length=0,
max_length=2048,
)
@staticmethod
def create_algorithm_image_uri_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"ImageUri",
type="String",
description="The algorithm image uri (build-in or custom)",
)
@staticmethod
def create_model_name_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope, "ModelName", type="String", description="An arbitrary name for the model.", min_length=1
)
@staticmethod
def create_stack_name_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope, "StackName", type="String", description="The name to assign to the deployed CF stack.", min_length=1
)
@staticmethod
def create_endpoint_name_parameter(scope: core.Construct, optional=False) -> core.CfnParameter:
return core.CfnParameter(
scope,
"EndpointName",
type="String",
description="The name of the AWS SageMaker's endpoint",
min_length=0 if optional else 1,
)
@staticmethod
def create_model_artifact_location_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"ModelArtifactLocation",
type="String",
description="Path to model artifact inside assets bucket.",
)
@staticmethod
def create_inference_instance_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"InferenceInstance",
type="String",
description="Inference instance that inference requests will be running on. E.g., ml.m5.large",
allowed_pattern="^[a-zA-Z0-9_.+-]+\.[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$",
min_length=7,
)
@staticmethod
def create_batch_inference_data_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"BatchInferenceData",
type="String",
description="S3 bucket path (including bucket name) to batch inference data file.",
)
@staticmethod
def create_batch_job_output_location_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"BatchOutputLocation",
type="String",
description="S3 path (including bucket name) to store the results of the batch job.",
)
@staticmethod
def create_data_capture_location_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"DataCaptureLocation",
type="String",
description="S3 path (including bucket name) to store captured data from the Sagemaker endpoint.",
min_length=3,
)
@staticmethod
def create_baseline_job_output_location_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"BaselineJobOutputLocation",
type="String",
description="S3 path (including bucket name) to store the Data Baseline Job's output.",
min_length=3,
)
@staticmethod
def create_monitoring_output_location_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"MonitoringOutputLocation",
type="String",
description="S3 path (including bucket name) to store the output of the Monitoring Schedule.",
min_length=3,
)
@staticmethod
def create_schedule_expression_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"ScheduleExpression",
type="String",
description="cron expression to run the monitoring schedule. E.g., cron(0 * ? * * *), cron(0 0 ? * * *), etc.",
allowed_pattern="^cron(\\S+\\s){5}\\S+$",
)
@staticmethod
def create_baseline_data_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"BaselineData",
type="String",
description="Location of the Baseline data in Assets S3 Bucket.",
)
@staticmethod
def create_instance_type_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"InstanceType",
type="String",
description="EC2 instance type that model monitoring jobs will be running on. E.g., ml.m5.large",
allowed_pattern="^[a-zA-Z0-9_.+-]+\.[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$",
min_length=7,
)
@staticmethod
def create_instance_volume_size_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"InstanceVolumeSize",
type="Number",
description="Instance volume size used in model monitoring jobs. E.g., 20",
)
@staticmethod
def create_baseline_max_runtime_seconds_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"BaselineMaxRuntimeSeconds",
type="String",
default="",
description="Optional Maximum runtime in seconds the baseline job is allowed to run. E.g., 3600",
)
@staticmethod
def create_monitor_max_runtime_seconds_parameter(scope: core.Construct, monitoring_type: str) -> core.CfnParameter:
max_default = "1800" if monitoring_type in ["ModelQuality", "ModelBias"] else "3600"
return core.CfnParameter(
scope,
"MonitorMaxRuntimeSeconds",
type="Number",
default=max_default,
description=(
f" Required Maximum runtime in seconds the job is allowed to run the {monitoring_type} baseline job. "
+ "For data quality and model explainability, this can be up to 3600 seconds for an hourly schedule. "
+ "For model bias and model quality hourly schedules, this can be up to 1800 seconds."
),
min_value=1,
max_value=86400,
)
@staticmethod
def create_baseline_job_name_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"BaselineJobName",
type="String",
description="Unique name of the data baseline job",
min_length=3,
max_length=63,
)
@staticmethod
def create_monitoring_schedule_name_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"MonitoringScheduleName",
type="String",
description="Unique name of the monitoring schedule job",
min_length=3,
max_length=63,
)
@staticmethod
def create_template_zip_name_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"TemplateZipFileName",
type="String",
allowed_pattern="^.*\.zip$",
description="The zip file's name containing the CloudFormation template and its parameters files",
)
@staticmethod
def create_template_file_name_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"TemplateFileName",
type="String",
allowed_pattern="^.*\.yaml$",
description="CloudFormation template's file name",
)
@staticmethod
def create_stage_params_file_name_parameter(scope: core.Construct, id: str, stage_type: str) -> core.CfnParameter:
return core.CfnParameter(
scope,
id,
type="String",
allowed_pattern="^.*\.json$",
description=f"parameters json file's name for the {stage_type} stage",
)
@staticmethod
def create_custom_container_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"CustomImage",
default="",
type="String",
description=(
"Should point to a zip file containing dockerfile and assets for building a custom model. "
"If empty it will be using containers from SageMaker Registry"
),
)
@staticmethod
def create_ecr_repo_name_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"ECRRepoName",
type="String",
description="Name of the Amazon ECR repository. This repo will be used to store custom algorithms images.",
allowed_pattern="(?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)*[a-z0-9]+(?:[._-][a-z0-9]+)*",
min_length=1,
)
@staticmethod
def create_image_tag_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope, "ImageTag", type="String", description="Docker image tag for the custom algorithm", min_length=1
)
@staticmethod
def create_delegated_admin_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"DelegatedAdminAccount",
type="String",
allowed_values=["Yes", "No"],
default="Yes",
description="Is a delegated administrator account used to deploy accross account",
)
@staticmethod
def create_detailed_error_message_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"AllowDetailedErrorMessage",
type="String",
allowed_values=["Yes", "No"],
default="Yes",
description="Allow including a detailed message of any server-side errors in the API call's response",
)
@staticmethod
def create_use_model_registry_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"UseModelRegistry",
type="String",
allowed_values=["Yes", "No"],
default="No",
description="Will Amazon SageMaker's Model Registry be used to provision models?",
)
@staticmethod
def create_model_registry_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"CreateModelRegistry",
type="String",
allowed_values=["Yes", "No"],
default="No",
description="Do you want the solution to create the SageMaker Model Package Group Name (i.e., Model Registry)",
)
@staticmethod
def create_model_package_group_name_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"ModelPackageGroupName",
type="String",
description="SageMaker model package group name",
min_length=0,
)
@staticmethod
def create_model_package_name_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"ModelPackageName",
allowed_pattern="(^arn:aws[a-z\-]*:sagemaker:[a-z0-9\-]*:[0-9]{12}:model-package/.*|^$)",
type="String",
description="The model name (version arn) in SageMaker's model package name group",
)
@staticmethod
def create_instance_count_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"MonitoringJobInstanceCount",
type="Number",
default="1",
description="Instance count used by model monitoring job. For example, 1",
)
@staticmethod
def create_ground_truth_s3_uri_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"MonitorGroundTruthInput",
type="String",
description="Amazon S3 prefix that contains the ground truth data",
min_length=3,
)
@staticmethod
def create_problem_type_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"ProblemType",
type="String",
allowed_values=["Regression", "BinaryClassification", "MulticlassClassification"],
description="Problem type. Possible values: Regression | BinaryClassification | MulticlassClassification",
)
@staticmethod
def create_inference_attribute_parameter(scope: core.Construct, job_type: str) -> core.CfnParameter:
return core.CfnParameter(
scope,
f"{job_type}InferenceAttribute",
type="String",
description="Index or JSONpath to locate predicted label(s)",
)
@staticmethod
def create_probability_attribute_parameter(scope: core.Construct, job_type: str) -> core.CfnParameter:
return core.CfnParameter(
scope,
f"{job_type}ProbabilityAttribute",
type="String",
description="Index or JSONpath to locate probabilities.",
)
@staticmethod
def create_ground_truth_attribute_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"BaselineGroundTruthAttribute",
type="String",
description="Index or JSONpath to locate ground truth label.",
)
@staticmethod
def create_probability_threshold_attribute_parameter(scope: core.Construct) -> core.CfnParameter:
return core.CfnParameter(
scope,
"ProbabilityThresholdAttribute",
type="String",
description="Threshold to convert probabilities to binaries",
)
@staticmethod
def create_model_predicted_label_config_parameter(scope):
return core.CfnParameter(
scope,
"ModelPredictedLabelConfig",
type="String",
description=(
"Dictionary provided as a json of the"
" sagemaker.clarify.ModelPredictedLabelConfig attributes ({'label':...,}). "
"Optional for a regression problem."
),
)
@staticmethod
def create_bias_config_parameter(scope):
return core.CfnParameter(
scope,
"BiasConfig",
type="String",
description=(
"Dictionary provided as a json using "
"of the sagemaker.clarify.BiasConfig attributes ({'label_values_or_threshold':...,})."
),
min_length=3,
)
@staticmethod
def create_shap_config_parameter(scope):
return core.CfnParameter(
scope,
"SHAPConfig",
type="String",
description=(
"Dictionary provided as a json "
"of the sagemaker.clarify.SHAPConfig attributes "
"({'baseline':...,})."
),
min_length=3,
)
@staticmethod
def create_model_scores_parameter(scope):
return core.CfnParameter(
scope,
"ExplainabilityModelScores",
type="String",
description=(
"A Python int/str provided as a string (e.g., using json.dumps(5)) "
"Index or JSONPath location in the model output for the predicted "
"scores to be explained. This is not required if the model output is a single score."
),
)
@staticmethod
def create_features_attribute_parameter(scope):
return core.CfnParameter(
scope,
"FeaturesAttribute",
type="String",
description="Index or JSONpath to locate features",
)
class ConditionsFactory:
@staticmethod
def create_custom_algorithms_ecr_repo_arn_provided_condition(
scope: core.Construct, custom_algorithms_ecr_repo_arn: core.CfnParameter
) -> core.CfnCondition:
return core.CfnCondition(
scope,
"CustomECRRepoProvided",
expression=core.Fn.condition_not(
core.Fn.condition_equals(custom_algorithms_ecr_repo_arn.value_as_string, "")
),
)
@staticmethod
def create_kms_key_arn_provided_condition(
scope: core.Construct, kms_key_arn: core.CfnParameter
) -> core.CfnCondition:
return core.CfnCondition(
scope,
"KmsKeyProvided",
expression=core.Fn.condition_not(core.Fn.condition_equals(kms_key_arn.value_as_string, "")),
)
@staticmethod
def create_git_address_provided_condition(
scope: core.Construct, git_address: core.CfnParameter
) -> core.CfnCondition:
return core.CfnCondition(
scope,
"GitAddressProvided",
expression=core.Fn.condition_not(core.Fn.condition_equals(git_address.value_as_string, "")),
)
@staticmethod
def create_existing_bucket_provided_condition(
scope: core.Construct, existing_bucket: core.CfnParameter
) -> core.CfnCondition:
return core.CfnCondition(
scope,
"S3BucketProvided",
expression=core.Fn.condition_not(core.Fn.condition_equals(existing_bucket.value_as_string, "")),
)
@staticmethod
def create_existing_ecr_provided_condition(
scope: core.Construct, existing_ecr_repo: core.CfnParameter
) -> core.CfnCondition:
return core.CfnCondition(
scope,
"ECRProvided",
expression=core.Fn.condition_not(core.Fn.condition_equals(existing_ecr_repo.value_as_string, "")),
)
@staticmethod
def create_new_bucket_condition(scope: core.Construct, existing_bucket: core.CfnParameter) -> core.CfnCondition:
return core.CfnCondition(
scope,
"CreateS3Bucket",
expression=core.Fn.condition_equals(existing_bucket.value_as_string, ""),
)
@staticmethod
def create_new_ecr_repo_condition(scope: core.Construct, existing_ecr_repo: core.CfnParameter) -> core.CfnCondition:
return core.CfnCondition(
scope,
"CreateECRRepo",
expression=core.Fn.condition_equals(existing_ecr_repo.value_as_string, ""),
)
@staticmethod
def create_delegated_admin_condition(
scope: core.Construct, delegated_admin_parameter: core.CfnParameter
) -> core.CfnCondition:
return core.CfnCondition(
scope,
"UseDelegatedAdmin",
expression=core.Fn.condition_equals(delegated_admin_parameter.value_as_string, "Yes"),
)
@staticmethod
def create_model_registry_condition(
scope: core.Construct, create_model_registry: core.CfnParameter
) -> core.CfnCondition:
return core.CfnCondition(
scope,
"CreateModelRegistryCondition",
expression=core.Fn.condition_equals(create_model_registry.value_as_string, "Yes"),
)
@staticmethod
def create_model_registry_provided_condition(
scope: core.Construct, model_package_name: core.CfnParameter
) -> core.CfnCondition:
return core.CfnCondition(
scope,
"ModelRegistryProvided",
expression=core.Fn.condition_not(core.Fn.condition_equals(model_package_name.value_as_string, "")),
)
@staticmethod
def create_endpoint_name_provided_condition(
scope: core.Construct, endpoint_name: core.CfnParameter
) -> core.CfnCondition:
return core.CfnCondition(
scope,
"EndpointNameProvided",
expression=core.Fn.condition_not(core.Fn.condition_equals(endpoint_name.value_as_string, "")),
)
@staticmethod
def create_problem_type_binary_classification_attribute_provided_condition(
scope: core.Construct, problem_type: core.CfnParameter, attribute: core.CfnParameter, attribute_name: str
) -> core.CfnCondition:
return core.CfnCondition(
scope,
f"ProblemTypeBinaryClassification{attribute_name}Provided",
expression=core.Fn.condition_and(
core.Fn.condition_equals(problem_type.value_as_string, "BinaryClassification"),
core.Fn.condition_not(core.Fn.condition_equals(attribute.value_as_string, "")),
),
)
@staticmethod
def create_problem_type_binary_classification_condition(
scope: core.Construct, problem_type: core.CfnParameter
) -> core.CfnCondition:
return core.CfnCondition(
scope,
"ProblemTypeBinaryClassification",
expression=core.Fn.condition_equals(problem_type.value_as_string, "BinaryClassification"),
)
@staticmethod
def create_attribute_provided_condition(scope, logical_id, attribute):
return core.CfnCondition(
scope,
logical_id,
expression=core.Fn.condition_not(core.Fn.condition_equals(attribute, "")),
)
| 39.126649
| 217
| 0.59677
|
ffba28891e664b68f644fdef865a68f35d23ce4f
| 848
|
py
|
Python
|
Day-3/aocd/2.py
|
karthikmurakonda/advents-of-2021
|
2dcfc994eeccb34e26a04d38afcf1d68374a84cd
|
[
"MIT"
] | null | null | null |
Day-3/aocd/2.py
|
karthikmurakonda/advents-of-2021
|
2dcfc994eeccb34e26a04d38afcf1d68374a84cd
|
[
"MIT"
] | null | null | null |
Day-3/aocd/2.py
|
karthikmurakonda/advents-of-2021
|
2dcfc994eeccb34e26a04d38afcf1d68374a84cd
|
[
"MIT"
] | null | null | null |
def recfuncO2(lines,i):
zeroes = []
ones = []
if len(lines) == 1:
return lines[0]
for line in lines:
if line[i] == '0':
zeroes.append(line)
else:
ones.append(line)
if len(zeroes) > len(ones):
return recfuncO2(zeroes, i+1)
else:
return recfuncO2(ones, i+1)
def recfuncCo2(lines,i):
zeroes = []
ones = []
if len(lines) == 1:
return lines[0]
for line in lines:
if line[i] == '0':
zeroes.append(line)
else:
ones.append(line)
if len(zeroes) > len(ones):
return recfuncCo2(ones, i+1)
else:
return recfuncCo2(zeroes, i+1)
with open('input.txt', 'r') as f:
lines = []
for line in f:
lines.append(line)
print(recfuncO2(lines,0))
print(recfuncCo2(lines,0))
| 23.555556
| 38
| 0.518868
|
1f6b83f9ba61cfaabbd3e1ab8ab276c79a6c17c6
| 4,176
|
py
|
Python
|
pirates/effects/SmokePillar.py
|
ksmit799/POTCO-PS
|
520d38935ae8df4b452c733a82c94dddac01e275
|
[
"Apache-2.0"
] | 8
|
2017-01-24T04:33:29.000Z
|
2020-11-01T08:36:24.000Z
|
pirates/effects/SmokePillar.py
|
ksmit799/Pirates-Online-Remake
|
520d38935ae8df4b452c733a82c94dddac01e275
|
[
"Apache-2.0"
] | 1
|
2017-03-02T18:05:17.000Z
|
2017-03-14T06:47:10.000Z
|
pirates/effects/SmokePillar.py
|
ksmit799/Pirates-Online-Remake
|
520d38935ae8df4b452c733a82c94dddac01e275
|
[
"Apache-2.0"
] | 11
|
2017-03-02T18:46:07.000Z
|
2020-11-01T08:36:26.000Z
|
# File: S (Python 2.4)
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.particles import ParticleEffect
from direct.particles import Particles
from direct.particles import ForceGroup
from EffectController import EffectController
from PooledEffect import PooledEffect
import random
class SmokePillar(PooledEffect, EffectController):
cardScale = 64.0
def __init__(self):
PooledEffect.__init__(self)
EffectController.__init__(self)
model = loader.loadModel('models/effects/particleMaps')
self.card = model.find('**/particleSmoke')
self.speed = 20.0
self.radius = 7.0
self.spriteScale = 1.0
if not SmokePillar.particleDummy:
SmokePillar.particleDummy = render.attachNewNode(ModelNode('SmokePillarParticleDummy'))
SmokePillar.particleDummy.setDepthWrite(0)
SmokePillar.particleDummy.setLightOff()
self.f = ParticleEffect.ParticleEffect('SmokePillar')
self.f.reparentTo(self)
self.p0 = Particles.Particles('particles-1')
self.p0.setFactory('ZSpinParticleFactory')
self.p0.setRenderer('SpriteParticleRenderer')
self.p0.setEmitter('SphereVolumeEmitter')
self.f.addParticles(self.p0)
f0 = ForceGroup.ForceGroup('gravity')
force0 = LinearVectorForce(Vec3(0.0, 0.0, -40.0), 1.0, 1)
force0.setActive(1)
f0.addForce(force0)
self.f.addForceGroup(f0)
def createTrack(self):
self.p0.setPoolSize(64)
self.p0.setBirthRate(0.29999999999999999)
self.p0.setLitterSize(10)
self.p0.setLitterSpread(4)
self.p0.setSystemLifespan(0.0)
self.p0.setLocalVelocityFlag(1)
self.p0.setSystemGrowsOlderFlag(0)
self.p0.setFloorZ(-10.0)
self.p0.factory.setLifespanBase(1.0)
self.p0.factory.setLifespanSpread(0.5)
self.p0.factory.setMassBase(1.0)
self.p0.factory.setMassSpread(0.94999999999999996)
self.p0.factory.setTerminalVelocityBase(2000.0)
self.p0.factory.setTerminalVelocitySpread(0.0)
self.p0.factory.setInitialAngle(0.0)
self.p0.factory.setInitialAngleSpread(30.0)
self.p0.factory.enableAngularVelocity(1)
self.p0.factory.setAngularVelocity(5.0)
self.p0.factory.setAngularVelocitySpread(1.0)
self.p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
self.p0.renderer.setUserAlpha(0.59999999999999998)
self.p0.renderer.setFromNode(self.card)
self.p0.renderer.setColor(Vec4(1.0, 1.0, 1.0, 1.0))
self.p0.renderer.setXScaleFlag(1)
self.p0.renderer.setYScaleFlag(1)
self.p0.renderer.setAnimAngleFlag(1)
self.p0.renderer.setInitialXScale(0.12 * self.spriteScale * self.cardScale)
self.p0.renderer.setFinalXScale(0.34999999999999998 * self.spriteScale * self.cardScale)
self.p0.renderer.setInitialYScale(0.12 * self.spriteScale * self.cardScale)
self.p0.renderer.setFinalYScale(0.34999999999999998 * self.spriteScale * self.cardScale)
self.p0.renderer.setNonanimatedTheta(0.0)
self.p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPNOBLEND)
self.p0.renderer.setAlphaDisable(0)
self.p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
self.p0.emitter.setAmplitude(5.0)
self.p0.emitter.setAmplitudeSpread(2.0)
self.p0.emitter.setOffsetForce(Vec3(5.0, 5.0, 60.0))
self.p0.emitter.setExplicitLaunchVector(Vec3(1.0, 0.0, 0.0))
self.p0.emitter.setRadiateOrigin(Point3(0.0, 0.0, 1.0))
self.p0.emitter.setRadius(self.radius)
self.track = Sequence(Func(self.p0.setBirthRate, 0.02), Func(self.p0.clearToInitial), Func(self.f.start, self, self.particleDummy), Func(self.f.reparentTo, self), Wait(0.29999999999999999), Func(self.p0.setBirthRate, 100), Wait(7.0), Func(self.cleanUpEffect))
def cleanUpEffect(self):
EffectController.cleanUpEffect(self)
self.checkInEffect(self)
def destroy(self):
EffectController.destroy(self)
PooledEffect.destroy(self)
| 43.5
| 267
| 0.695163
|
8bc62bf2c490498ea1dff6a417dc9c3797b16563
| 148
|
py
|
Python
|
app/grandchallenge/policies/views.py
|
njmhendrix/grand-challenge.org
|
9bc36f5e26561a78bd405e8ea5e4c0f86c95f011
|
[
"Apache-2.0"
] | 101
|
2018-04-11T14:48:04.000Z
|
2022-03-28T00:29:48.000Z
|
app/grandchallenge/policies/views.py
|
njmhendrix/grand-challenge.org
|
9bc36f5e26561a78bd405e8ea5e4c0f86c95f011
|
[
"Apache-2.0"
] | 1,733
|
2018-03-21T11:56:16.000Z
|
2022-03-31T14:58:30.000Z
|
app/grandchallenge/policies/views.py
|
njmhendrix/grand-challenge.org
|
9bc36f5e26561a78bd405e8ea5e4c0f86c95f011
|
[
"Apache-2.0"
] | 42
|
2018-06-08T05:49:07.000Z
|
2022-03-29T08:43:01.000Z
|
from django.views.generic import DetailView
from grandchallenge.policies.models import Policy
class PolicyDetail(DetailView):
model = Policy
| 18.5
| 49
| 0.810811
|
620231064fec8bef87a94b6fb0bf8045722c72bc
| 2,420
|
py
|
Python
|
apps/empleabilidad/models.py
|
osw4l/villas-de-san-pablo
|
89f00dfbbfbfee5111bd9852ddfbdb8727d10ed2
|
[
"MIT"
] | null | null | null |
apps/empleabilidad/models.py
|
osw4l/villas-de-san-pablo
|
89f00dfbbfbfee5111bd9852ddfbdb8727d10ed2
|
[
"MIT"
] | null | null | null |
apps/empleabilidad/models.py
|
osw4l/villas-de-san-pablo
|
89f00dfbbfbfee5111bd9852ddfbdb8727d10ed2
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.urls import reverse_lazy
from . import constants
# Create your models here.
class Vacante(models.Model):
cargo = models.CharField(max_length=255)
salario = models.PositiveIntegerField()
fecha = models.DateField()
class Meta:
verbose_name = 'Vacante'
verbose_name_plural = 'Vacantes'
def __str__(self):
return '{} ${}, {}'.format(self.cargo, self.salario, self.fecha)
def get_update_url(self):
return reverse_lazy('empleabilidad:editar_vacante',
kwargs={
'pk': self.pk
})
class VacantePersona(models.Model):
persona = models.ForeignKey('personas.Persona', related_name='item_c')
vacante = models.ForeignKey(Vacante)
fecha_contratacion = models.DateField(
blank=True,
null=True
)
tiempo_contrato = models.CharField(
max_length=50,
choices=constants.TIEMPO_CONTRATO
)
salario = models.PositiveIntegerField(
blank=True,
null=True
)
observaciones = models.CharField(
max_length=255,
blank=True,
null=True
)
class FormacionTrabajo(models.Model):
fecha_creacion = models.DateField()
nombre_programa = models.CharField(
max_length=255
)
def __str__(self):
return '{} creado en {}'.format(
self.nombre_programa,
self.fecha_creacion
)
def get_update_url(self):
return reverse_lazy('empleabilidad:editar_formacion_trabajo',
kwargs={
'pk': self.pk
})
def personas(self):
return FormacionTrabajoPersona.objects.filter(programa=self)
class FormacionTrabajoPersona(models.Model):
persona = models.ForeignKey('personas.Persona', related_name='item_d')
programa = models.ForeignKey(FormacionTrabajo)
fecha_inscripcion = models.DateField()
tipo_formacion = models.CharField(
max_length=30,
choices=constants.TIPO_FORMACION
)
estado = models.CharField(
max_length=50,
choices=constants.ESTADO_FORMACION
)
fecha_proceso = models.DateField(
blank=True,
null=True
)
observacion = models.CharField(
max_length=255,
blank=True,
null=True
)
| 26.304348
| 74
| 0.611157
|
7418ef242e1d4aaeeb30493420c6e069e91d3c69
| 7,615
|
py
|
Python
|
registration/api.py
|
chikko80/Insomniac
|
2d49a6d4e5a15eb63bddd9aace3cc872cf40b01a
|
[
"MIT"
] | null | null | null |
registration/api.py
|
chikko80/Insomniac
|
2d49a6d4e5a15eb63bddd9aace3cc872cf40b01a
|
[
"MIT"
] | null | null | null |
registration/api.py
|
chikko80/Insomniac
|
2d49a6d4e5a15eb63bddd9aace3cc872cf40b01a
|
[
"MIT"
] | null | null | null |
# This file provides the way of getting a phone number and an SMS confirmation code in "registration" process.
# The only two methods that should be exposed from this file are "get_phone_number" and "get_confirmation_code".
# You can find them at the end of this file. Choose one of ready-to-use implementations or create your own.
import json
from typing import Optional
from insomniac import network, HTTP_OK
from insomniac.sleeper import sleeper
from insomniac.utils import *
CONFIRMATION_CODE_MAX_ATTEMPTS_COUNT = (
5 # times to retry requesting of confirmation code
)
SMSPVA_COUNTRY_CODE = "RU" # or "US" or "ID" or any other else
SMSPVA_API_KEY = "your-api-key" # more info on the official smspva site http://smspva.com/new_theme_api.html
SMS_ACTIVATE_COUNTRY_CODE = 0 # 0 is Russia, other codes are listed in the docs https://sms-activate.ru/en/api2#number
SMS_ACTIVATE_API_KEY = "your-api-key" # more info on the official sms-activate.ru site https://sms-activate.ru/en/
class PhoneNumberData:
response_id = None
country_code = None
phone_number = None
def __init__(self, response_id, country_code, phone_number):
self.response_id = response_id
self.country_code = country_code
self.phone_number = phone_number
# -------- Simple implementation --------
def _get_phone_number_simple() -> Optional[PhoneNumberData]:
data = PhoneNumberData(0, None, None)
while data.country_code is None or data.phone_number is None:
user_input = input('Enter mobile phone (format "+7 1234567890"): ')
try:
data.country_code, data.phone_number = user_input.split(" ")
except ValueError:
continue
if data.country_code[0] != "+":
data.country_code = None
return data
def _get_confirmation_code_simple(_) -> Optional[str]:
return input("Enter confirmation code: ")
# -------- smspva.com API --------
def _get_phone_number_smspva() -> Optional[PhoneNumberData]:
url = (
f"http://smspva.com/priemnik.php?metod=get_number&service=opt16"
f"&country={SMSPVA_COUNTRY_CODE}"
f"&apikey={SMSPVA_API_KEY}"
)
code, body, fail_reason = network.get(url, "Mozilla/5.0")
if code == HTTP_OK and body is not None:
json_data = json.loads(body)
else:
print(
COLOR_FAIL
+ f"Cannot get phone number via smspva.com API: {code} ({fail_reason})"
+ COLOR_ENDC
)
return None
response_id = json_data["id"]
country_code = json_data["CountryCode"]
phone_number = json_data["number"]
phone_number_data = PhoneNumberData(response_id, country_code, phone_number)
return phone_number_data
def _get_confirmation_code_smspva(response_id) -> Optional[str]:
url = (
f"http://smspva.com/priemnik.php?metod=get_sms&service=opt16"
f"&country={SMSPVA_COUNTRY_CODE}"
f"&id={response_id}"
f"&apikey={SMSPVA_API_KEY}"
)
attempts_count = 0
while True:
sleeper.random_sleep(multiplier=8.0)
code, body, fail_reason = network.get(url, "Mozilla/5.0")
attempts_count += 1
if code == HTTP_OK and body is not None:
json_data = json.loads(body)
else:
print(
COLOR_FAIL
+ f"Cannot get confirmation code via smspva.com API: {code} ({fail_reason})"
+ COLOR_ENDC
)
return None
confirmation_code = json_data["sms"]
if confirmation_code is None:
if attempts_count >= CONFIRMATION_CODE_MAX_ATTEMPTS_COUNT:
print(
"Well, looks like Instagram isn't going to send SMS to this phone number"
)
return None
print("Let's wait a bit more: confirmation code isn't received yet")
else:
break
return confirmation_code
# -------- sms-activate.ru API --------
def _get_phone_number_sms_activate() -> Optional[PhoneNumberData]:
try:
import phonenumbers
except ImportError:
print(
COLOR_FAIL
+ f"Using sms-activate.ru API requires phonenumbers library."
+ COLOR_ENDC
)
print(
COLOR_FAIL
+ COLOR_BOLD
+ f"python3 -m pip install phonenumbers"
+ COLOR_ENDC
)
return None
url = (
f"https://sms-activate.ru/stubs/handler_api.php"
f"?api_key={SMS_ACTIVATE_API_KEY}"
f"&country={SMS_ACTIVATE_COUNTRY_CODE}"
f"&action=getNumber"
f"&service=ig"
)
code, body, fail_reason = network.get(url, "Mozilla/5.0")
if code != HTTP_OK or body is None:
print(
COLOR_FAIL
+ f"Cannot get phone number via sms-activate.ru API: {code} ({fail_reason})"
+ COLOR_ENDC
)
return None
response_regex = re.compile(r"ACCESS_NUMBER:(\d+):(\d+)")
match = response_regex.match(body.decode("utf-8"))
if match:
response_id = match.group(1)
full_phone_number = match.group(2)
else:
print(
COLOR_FAIL
+ f"Cannot parse sms-activate.ru response: {str(body)})"
+ COLOR_ENDC
)
return None
phone_number_object = phonenumbers.parse(f"+{full_phone_number}", None)
country_code = str(phone_number_object.country_code)
phone_number = str(phone_number_object.national_number)
phone_number_data = PhoneNumberData(response_id, country_code, phone_number)
return phone_number_data
def _get_confirmation_code_sms_activate(response_id) -> Optional[str]:
url = (
f"https://sms-activate.ru/stubs/handler_api.php"
f"?api_key={SMS_ACTIVATE_API_KEY}"
f"&id={response_id}"
f"&country={SMSPVA_COUNTRY_CODE}"
f"&action=getStatus"
)
attempts_count = 0
while True:
sleeper.random_sleep(multiplier=8.0)
code, body, fail_reason = network.get(url, "Mozilla/5.0")
attempts_count += 1
if code != HTTP_OK or body is None:
print(
COLOR_FAIL
+ f"Cannot get confirmation code via sms-activate.ru API: {code} ({fail_reason})"
+ COLOR_ENDC
)
return None
response_regex = re.compile(r"STATUS_OK:(\d+)")
match = response_regex.match(body.decode("utf-8"))
if match:
confirmation_code = match.group(1)
return confirmation_code
else:
if attempts_count >= CONFIRMATION_CODE_MAX_ATTEMPTS_COUNT:
print(
"Well, looks like Instagram isn't going to send SMS to this phone number"
)
return None
print("Let's wait a bit more: confirmation code isn't received yet")
# Choose either "simple" implementation (asks you to enter phone number and confirmation code in the terminal manually)
# or implementation via smspva.com API (automatically gets confirmation code from a remote SIM card).
#
# You can also write your own implementation! It just has to follow these rules: 1) get_phone_number() returns
# PhoneNumberData object and 2) get_confirmation_code(response_id) takes response_id argument from PhoneNumberData and
# returns confirmation code (string).
get_phone_number = _get_phone_number_simple # or _get_phone_number_smspva or _get_phone_number_sms_activate
get_confirmation_code = _get_confirmation_code_simple # or _get_confirmation_code_smspva or _get_confirmation_code_sms_activate
| 36.090047
| 128
| 0.645174
|
115216faa348cfe3e805cf6d5a6501cdddd4f685
| 6,863
|
py
|
Python
|
tests/unit/bokeh/application/handlers/test_code_runner.py
|
goncaloperes/bokeh
|
b857d2d17d7c19779bb0a7be2601d8238fb1d5e9
|
[
"BSD-3-Clause"
] | 1
|
2021-06-03T13:13:21.000Z
|
2021-06-03T13:13:21.000Z
|
tests/unit/bokeh/application/handlers/test_code_runner.py
|
goncaloperes/bokeh
|
b857d2d17d7c19779bb0a7be2601d8238fb1d5e9
|
[
"BSD-3-Clause"
] | 1
|
2021-04-21T19:44:07.000Z
|
2021-04-21T19:44:07.000Z
|
tests/unit/bokeh/application/handlers/test_code_runner.py
|
goncaloperes/bokeh
|
b857d2d17d7c19779bb0a7be2601d8238fb1d5e9
|
[
"BSD-3-Clause"
] | null | null | null |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import os
import sys
from os.path import abspath, dirname
from types import ModuleType
# Module under test
import bokeh.application.handlers.code_runner as bahc # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class TestCodeRunner:
# Public methods ----------------------------------------------------------
def test_init(self) -> None:
cr = bahc.CodeRunner("# test", "path", [])
assert cr.failed is False
assert cr.error is None
assert cr.error_detail is None
assert cr.ran is False
assert cr.source == "# test"
assert cr.path == "path"
def test_syntax_error_init(self) -> None:
cr = bahc.CodeRunner("This is a syntax error", "path", [])
assert cr.failed is True
assert "Invalid syntax in" in cr.error
assert cr.error_detail is not None
def test_package_error_init(self) -> None:
with pytest.raises(ValueError):
bahc.CodeRunner("This is a syntax error", "/foo/__init__.py", [], "package")
def test_new_module_success(self) -> None:
cr = bahc.CodeRunner("# test", "path", [])
m = cr.new_module()
assert isinstance(m, ModuleType)
assert m.__dict__['__name__'].startswith('bokeh_app_')
assert m.__dict__['__file__'] == abspath("path")
assert m.__dict__['__package__'] is None
def test_new_module_initpy(self) -> None:
cr = bahc.CodeRunner("# test", "/foo/__init__.py", [])
m = cr.new_module()
assert isinstance(m, ModuleType)
assert m.__dict__['__name__'].startswith('bokeh_app_')
assert m.__dict__['__file__'].endswith("__init__.py")
assert m.__dict__['__package__'] == m.__dict__['__name__']
def test_new_module_package(self) -> None:
cr = bahc.CodeRunner("# test", "/foo/__init__.py", [])
package = cr.new_module()
cr = bahc.CodeRunner("# test", "path", [], package=package)
m = cr.new_module()
assert isinstance(m, ModuleType)
assert m.__dict__['__name__'].startswith('bokeh_app_')
assert m.__dict__['__file__'] == abspath("path")
assert m.__dict__['__package__'] == package.__dict__["__name__"]
def test_new_module_resets_run_errors(self) -> None:
cr = bahc.CodeRunner("# test", "path", [])
cr._failed = True
m = cr.new_module()
assert isinstance(m, ModuleType)
assert m.__dict__['__name__'].startswith('bokeh_app_')
assert m.__dict__['__file__'] == abspath("path")
def test_new_module_returns_None_for_permanent_errors(self) -> None:
cr = bahc.CodeRunner("This is a syntax error", "path", [])
assert cr.failed is True
m = cr.new_module()
assert m is None
def test_reset_run_errors(self) -> None:
cr = bahc.CodeRunner("# test", "path", [])
cr._failed = True
cr._error = "error"
cr._error_detail = "detail"
cr.reset_run_errors()
assert cr.failed is False
assert cr.error is None
assert cr.error_detail is None
def test_reset_run_errors_leaves_permanent_errors(self) -> None:
cr = bahc.CodeRunner("This is a syntax error", "path", [])
cr._failed = True
cr.reset_run_errors()
assert cr.failed is True
assert cr.error is not None
assert cr.error_detail is not None
def test_run_sets_ran(self) -> None:
cr = bahc.CodeRunner("# test", "path", [])
m = cr.new_module()
assert not cr.ran
cr.run(m, lambda: None)
assert cr.ran
def test_run_runs_post_check(self) -> None:
cr = bahc.CodeRunner("# test", "path", [])
m = cr.new_module()
assert not cr.ran
result = {}
def post_check():
result['ran'] = True
cr.run(m, post_check)
assert cr.ran
assert result == dict(ran=True)
def test_run_fixups_argv(self) -> None:
cr = bahc.CodeRunner("import sys; argv = list(sys.argv)", "path", ["foo", 10])
assert not cr.ran
m = cr.new_module()
cr.run(m, lambda: None)
assert m.__dict__['argv'] == ["path", "foo", 10]
def test_run_fixups_path(self) -> None:
cr = bahc.CodeRunner("import sys; path = list(sys.path)", "/dir/to/path", ["foo", 10])
assert not cr.ran
m = cr.new_module()
cr.run(m, lambda: None)
assert m.__dict__['path'][0] == dirname("/dir/to/path")
assert m.__dict__['path'][1:] == sys.path
def test_run_restores_cwd(self) -> None:
old_cwd = os.getcwd()
cr = bahc.CodeRunner("import os; os.chdir('/')", "path", ["foo", 10])
assert not cr.ran
m = cr.new_module()
cr.run(m, lambda: None)
assert os.getcwd() == old_cwd
def test_run_restores_argv(self) -> None:
old_argv = list(sys.argv)
cr = bahc.CodeRunner("# test", "path", ["foo", 10])
assert not cr.ran
m = cr.new_module()
cr.run(m, lambda: None)
assert sys.argv == old_argv
def test_run_restores_path(self) -> None:
old_path = list(sys.path)
cr = bahc.CodeRunner("# test", "path", ["foo", 10])
assert not cr.ran
m = cr.new_module()
cr.run(m, lambda: None)
assert sys.path == old_path
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 37.298913
| 94
| 0.485502
|
c428193a9f89653283e7316d53985e33b0ba8b61
| 183
|
py
|
Python
|
Django_Intershala/feedback/urls.py
|
samir321-pixel/Django_Intershala
|
77aaa24a34873dab4c3302727d5f43986a99809e
|
[
"MIT"
] | 7
|
2021-03-08T17:09:39.000Z
|
2021-12-30T09:44:44.000Z
|
Django_Intershala/feedback/urls.py
|
samir321-pixel/Django_Intershala
|
77aaa24a34873dab4c3302727d5f43986a99809e
|
[
"MIT"
] | null | null | null |
Django_Intershala/feedback/urls.py
|
samir321-pixel/Django_Intershala
|
77aaa24a34873dab4c3302727d5f43986a99809e
|
[
"MIT"
] | 2
|
2021-03-03T11:35:05.000Z
|
2021-03-22T17:00:16.000Z
|
from django.urls import path
from .views import *
urlpatterns = [
path('post_feedback/', FeedBackViewsets.as_view()),
path('all_feedback/', AllFeedBackViewSets.as_view()),
]
| 22.875
| 57
| 0.721311
|
65c759b16543ecfb33b597692b79aa4500f0ab73
| 2,317
|
py
|
Python
|
app/core/config.py
|
chenyg0911/hxa-gloin
|
afd572ad653baadb93ff9cabc168e19125f402af
|
[
"MIT"
] | null | null | null |
app/core/config.py
|
chenyg0911/hxa-gloin
|
afd572ad653baadb93ff9cabc168e19125f402af
|
[
"MIT"
] | null | null | null |
app/core/config.py
|
chenyg0911/hxa-gloin
|
afd572ad653baadb93ff9cabc168e19125f402af
|
[
"MIT"
] | 1
|
2021-11-22T02:21:04.000Z
|
2021-11-22T02:21:04.000Z
|
from starlette.config import Config
import pytz
timezone = pytz.timezone('Asia/Shanghai')
config = Config("config")
project_name: str = config('project_name', cast=str, default='fastapi')
host: str = config('host', cast=str, default='0.0.0.0')
port: int = config('port', cast=int, default=8000)
debug: bool = config('debug', cast=bool, default=True)
version: str = config('version', cast=str, default='0.0.1')
allowed_hosts: str = config('allowed_hosts', cast=str, default='')
secret_key: str = config('secret_key', cast=str, default='welcome1')
api_key: str = config('api_key', cast=str, default='welcome1')
prefix_url: str = config('prefix_url', cast=str, default='/api')
# jwt
algorithm: str = config('algorithm', cast=str, default='HS256')
access_token_expire_minutes: int = config('access_token_expire_minutes', cast=int, default=60 * 24)
jwt_token_prefix: str = config('jwt_token_prefix', cast=str, default='Bearer')
# mongodb
database_url: str = config('database_url', cast=str,
default='mongodb://chromo:d2VsY29tZTEK@mongo:27017/chromoManager')
max_connections_count: int = config('max_connections_count', cast=int, default=10)
min_connections_count: int = config('min_connections_count', cast=int, default=10)
database_name: str = config('database_name', cast=str, default='chromoManager')
user_collection_name: str = config('user_collection_name', cast=str, default='user')
case_collection_name: str = config('case_collection_name', cast=str, default='case')
analysis_collection_name: str = config('analysis_collection_name', cast=str, default='analysis')
count_collection_name: str = config('count_collection_name', cast=str, default='count')
group_collection_name: str = config('group_collection_name', cast=str, default='group')
division_collection_name: str = config('division_collection_name', cast=str, default='division')
# redis
redis_host: str = config('redis_host', cast=str, default='127.0.0.1')
redis_port: int = config('redis_port', cast=int, default=6379)
redis_password: str = config('redis_password', cast=str, default=None)
# 导出路径
export_path: str = config('export_path', cast=str, default='D:\chromo-manager-export')
# 扫描路径
src_path: str = config('src_path', cast=str, default='/media/msd')
# 文件后缀
src_ext: str = config('src_ext', cast=str, default='MMI')
| 50.369565
| 99
| 0.744929
|
eb2f845f844e0d8354152498895be6eb1c6db1cc
| 69,513
|
py
|
Python
|
research/object_detection/core/losses_test.py
|
akshit-protonn/models
|
38c8c6fe4144c93d6aadd19981c2b90570c29eba
|
[
"Apache-2.0"
] | 82,518
|
2016-02-05T12:07:23.000Z
|
2022-03-31T23:09:47.000Z
|
research/object_detection/core/losses_test.py
|
akshit-protonn/models
|
38c8c6fe4144c93d6aadd19981c2b90570c29eba
|
[
"Apache-2.0"
] | 9,021
|
2016-03-08T01:02:05.000Z
|
2022-03-31T08:06:35.000Z
|
research/object_detection/core/losses_test.py
|
akshit-protonn/models
|
38c8c6fe4144c93d6aadd19981c2b90570c29eba
|
[
"Apache-2.0"
] | 54,341
|
2016-02-06T17:19:55.000Z
|
2022-03-31T10:27:44.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for google3.research.vale.object_detection.losses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.core import box_list
from object_detection.core import losses
from object_detection.core import matcher
from object_detection.utils import test_case
class WeightedL2LocalizationLossTest(test_case.TestCase):
def testReturnsCorrectWeightedLoss(self):
batch_size = 3
num_anchors = 10
code_size = 4
def graph_fn():
prediction_tensor = tf.ones([batch_size, num_anchors, code_size])
target_tensor = tf.zeros([batch_size, num_anchors, code_size])
weights = tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0]], tf.float32)
loss_op = losses.WeightedL2LocalizationLoss()
loss = tf.reduce_sum(loss_op(prediction_tensor, target_tensor,
weights=weights))
return loss
expected_loss = (3 * 5 * 4) / 2.0
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, expected_loss)
def testReturnsCorrectAnchorwiseLoss(self):
batch_size = 3
num_anchors = 16
code_size = 4
def graph_fn():
prediction_tensor = tf.ones([batch_size, num_anchors, code_size])
target_tensor = tf.zeros([batch_size, num_anchors, code_size])
weights = tf.ones([batch_size, num_anchors])
loss_op = losses.WeightedL2LocalizationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
return loss
expected_loss = np.ones((batch_size, num_anchors)) * 2
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, expected_loss)
def testReturnsCorrectNanLoss(self):
batch_size = 3
num_anchors = 10
code_size = 4
def graph_fn():
prediction_tensor = tf.ones([batch_size, num_anchors, code_size])
target_tensor = tf.concat([
tf.zeros([batch_size, num_anchors, code_size / 2]),
tf.ones([batch_size, num_anchors, code_size / 2]) * np.nan
], axis=2)
weights = tf.ones([batch_size, num_anchors])
loss_op = losses.WeightedL2LocalizationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights,
ignore_nan_targets=True)
loss = tf.reduce_sum(loss)
return loss
expected_loss = (3 * 5 * 4) / 2.0
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, expected_loss)
def testReturnsCorrectWeightedLossWithLossesMask(self):
batch_size = 4
num_anchors = 10
code_size = 4
def graph_fn():
prediction_tensor = tf.ones([batch_size, num_anchors, code_size])
target_tensor = tf.zeros([batch_size, num_anchors, code_size])
weights = tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0]], tf.float32)
losses_mask = tf.constant([True, False, True, True], tf.bool)
loss_op = losses.WeightedL2LocalizationLoss()
loss = tf.reduce_sum(loss_op(prediction_tensor, target_tensor,
weights=weights, losses_mask=losses_mask))
return loss
expected_loss = (3 * 5 * 4) / 2.0
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, expected_loss)
class WeightedSmoothL1LocalizationLossTest(test_case.TestCase):
def testReturnsCorrectLoss(self):
batch_size = 2
num_anchors = 3
code_size = 4
def graph_fn():
prediction_tensor = tf.constant([[[2.5, 0, .4, 0],
[0, 0, 0, 0],
[0, 2.5, 0, .4]],
[[3.5, 0, 0, 0],
[0, .4, 0, .9],
[0, 0, 1.5, 0]]], tf.float32)
target_tensor = tf.zeros([batch_size, num_anchors, code_size])
weights = tf.constant([[2, 1, 1],
[0, 3, 0]], tf.float32)
loss_op = losses.WeightedSmoothL1LocalizationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss)
return loss
exp_loss = 7.695
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectLossWithLossesMask(self):
batch_size = 3
num_anchors = 3
code_size = 4
def graph_fn():
prediction_tensor = tf.constant([[[2.5, 0, .4, 0],
[0, 0, 0, 0],
[0, 2.5, 0, .4]],
[[3.5, 0, 0, 0],
[0, .4, 0, .9],
[0, 0, 1.5, 0]],
[[3.5, 7., 0, 0],
[0, .4, 0, .9],
[2.2, 2.2, 1.5, 0]]], tf.float32)
target_tensor = tf.zeros([batch_size, num_anchors, code_size])
weights = tf.constant([[2, 1, 1],
[0, 3, 0],
[4, 3, 0]], tf.float32)
losses_mask = tf.constant([True, True, False], tf.bool)
loss_op = losses.WeightedSmoothL1LocalizationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights,
losses_mask=losses_mask)
loss = tf.reduce_sum(loss)
return loss
exp_loss = 7.695
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, exp_loss)
class WeightedIOULocalizationLossTest(test_case.TestCase):
def testReturnsCorrectLoss(self):
def graph_fn():
prediction_tensor = tf.constant([[[1.5, 0, 2.4, 1],
[0, 0, 1, 1],
[0, 0, .5, .25]]])
target_tensor = tf.constant([[[1.5, 0, 2.4, 1],
[0, 0, 1, 1],
[50, 50, 500.5, 100.25]]])
weights = [[1.0, .5, 2.0]]
loss_op = losses.WeightedIOULocalizationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss)
return loss
exp_loss = 2.0
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectLossWithNoLabels(self):
def graph_fn():
prediction_tensor = tf.constant([[[1.5, 0, 2.4, 1],
[0, 0, 1, 1],
[0, 0, .5, .25]]])
target_tensor = tf.constant([[[1.5, 0, 2.4, 1],
[0, 0, 1, 1],
[50, 50, 500.5, 100.25]]])
weights = [[1.0, .5, 2.0]]
losses_mask = tf.constant([False], tf.bool)
loss_op = losses.WeightedIOULocalizationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights,
losses_mask=losses_mask)
loss = tf.reduce_sum(loss)
return loss
exp_loss = 0.0
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, exp_loss)
class WeightedGIOULocalizationLossTest(test_case.TestCase):
def testReturnsCorrectLoss(self):
def graph_fn():
prediction_tensor = tf.constant([[[1.5, 0, 2.4, 1],
[0, 0, 1, 1],
[0, 0, 0, 0]]])
target_tensor = tf.constant([[[1.5, 0, 2.4, 1],
[0, 0, 1, 1],
[5, 5, 10, 10]]])
weights = [[1.0, .5, 2.0]]
loss_op = losses.WeightedGIOULocalizationLoss()
loss = loss_op(prediction_tensor,
target_tensor,
weights=weights)
loss = tf.reduce_sum(loss)
return loss
exp_loss = 3.5
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectLossWithNoLabels(self):
def graph_fn():
prediction_tensor = tf.constant([[[1.5, 0, 2.4, 1],
[0, 0, 1, 1],
[0, 0, .5, .25]]])
target_tensor = tf.constant([[[1.5, 0, 2.4, 1],
[0, 0, 1, 1],
[50, 50, 500.5, 100.25]]])
weights = [[1.0, .5, 2.0]]
losses_mask = tf.constant([False], tf.bool)
loss_op = losses.WeightedGIOULocalizationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights,
losses_mask=losses_mask)
loss = tf.reduce_sum(loss)
return loss
exp_loss = 0.0
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, exp_loss)
class WeightedSigmoidClassificationLossTest(test_case.TestCase):
def testReturnsCorrectLoss(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[100, 0, -100],
[-100, -100, 100]],
[[-100, 0, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -1]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
loss_op = losses.WeightedSigmoidClassificationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss)
return loss
exp_loss = -2 * math.log(.5)
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectAnchorWiseLoss(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[100, 0, -100],
[-100, -100, 100]],
[[-100, 0, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -1]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
loss_op = losses.WeightedSigmoidClassificationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss, axis=2)
return loss
exp_loss = np.matrix([[0, 0, -math.log(.5), 0],
[-math.log(.5), 0, 0, 0]])
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectLossWithClassIndices(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, -100, 100],
[100, -100, -100, -100],
[100, 0, -100, 100],
[-100, -100, 100, -100]],
[[-100, 0, 100, 100],
[-100, 100, -100, 100],
[100, 100, 100, 100],
[0, 0, -1, 100]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0, 0],
[1, 0, 0, 1],
[1, 0, 0, 0],
[0, 0, 1, 1]],
[[0, 0, 1, 0],
[0, 1, 0, 0],
[1, 1, 1, 0],
[1, 0, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]],
[[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 0, 0]]], tf.float32)
# Ignores the last class.
class_indices = tf.constant([0, 1, 2], tf.int32)
loss_op = losses.WeightedSigmoidClassificationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights,
class_indices=class_indices)
loss = tf.reduce_sum(loss, axis=2)
return loss
exp_loss = np.matrix([[0, 0, -math.log(.5), 0],
[-math.log(.5), 0, 0, 0]])
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectLossWithLossesMask(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[100, 0, -100],
[-100, -100, 100]],
[[-100, 0, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -1]],
[[-100, 0, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -100]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]], tf.float32)
losses_mask = tf.constant([True, True, False], tf.bool)
loss_op = losses.WeightedSigmoidClassificationLoss()
loss_per_anchor = loss_op(prediction_tensor, target_tensor,
weights=weights,
losses_mask=losses_mask)
loss = tf.reduce_sum(loss_per_anchor)
return loss
exp_loss = -2 * math.log(.5)
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, exp_loss)
def _logit(probability):
return math.log(probability / (1. - probability))
class SigmoidFocalClassificationLossTest(test_case.TestCase):
def testEasyExamplesProduceSmallLossComparedToSigmoidXEntropy(self):
def graph_fn():
prediction_tensor = tf.constant([[[_logit(0.97)],
[_logit(0.91)],
[_logit(0.73)],
[_logit(0.27)],
[_logit(0.09)],
[_logit(0.03)]]], tf.float32)
target_tensor = tf.constant([[[1],
[1],
[1],
[0],
[0],
[0]]], tf.float32)
weights = tf.constant([[[1], [1], [1], [1], [1], [1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0,
alpha=None)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights), axis=2)
sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor,
target_tensor,
weights=weights), axis=2)
return sigmoid_loss, focal_loss
sigmoid_loss, focal_loss = self.execute(graph_fn, [])
order_of_ratio = np.power(10,
np.floor(np.log10(sigmoid_loss / focal_loss)))
self.assertAllClose(order_of_ratio, [[1000, 100, 10, 10, 100, 1000]])
def testHardExamplesProduceLossComparableToSigmoidXEntropy(self):
def graph_fn():
prediction_tensor = tf.constant([[[_logit(0.55)],
[_logit(0.52)],
[_logit(0.50)],
[_logit(0.48)],
[_logit(0.45)]]], tf.float32)
target_tensor = tf.constant([[[1],
[1],
[1],
[0],
[0]]], tf.float32)
weights = tf.constant([[[1], [1], [1], [1], [1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0,
alpha=None)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights), axis=2)
sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor,
target_tensor,
weights=weights), axis=2)
return sigmoid_loss, focal_loss
sigmoid_loss, focal_loss = self.execute(graph_fn, [])
order_of_ratio = np.power(10,
np.floor(np.log10(sigmoid_loss / focal_loss)))
self.assertAllClose(order_of_ratio, [[1., 1., 1., 1., 1.]])
def testNonAnchorWiseOutputComparableToSigmoidXEntropy(self):
def graph_fn():
prediction_tensor = tf.constant([[[_logit(0.55)],
[_logit(0.52)],
[_logit(0.50)],
[_logit(0.48)],
[_logit(0.45)]]], tf.float32)
target_tensor = tf.constant([[[1],
[1],
[1],
[0],
[0]]], tf.float32)
weights = tf.constant([[[1], [1], [1], [1], [1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0,
alpha=None)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights))
sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor,
target_tensor,
weights=weights))
return sigmoid_loss, focal_loss
sigmoid_loss, focal_loss = self.execute(graph_fn, [])
order_of_ratio = np.power(10,
np.floor(np.log10(sigmoid_loss / focal_loss)))
self.assertAlmostEqual(order_of_ratio, 1.)
def testIgnoreNegativeExampleLossViaAlphaMultiplier(self):
def graph_fn():
prediction_tensor = tf.constant([[[_logit(0.55)],
[_logit(0.52)],
[_logit(0.50)],
[_logit(0.48)],
[_logit(0.45)]]], tf.float32)
target_tensor = tf.constant([[[1],
[1],
[1],
[0],
[0]]], tf.float32)
weights = tf.constant([[[1], [1], [1], [1], [1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0,
alpha=1.0)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights), axis=2)
sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor,
target_tensor,
weights=weights), axis=2)
return sigmoid_loss, focal_loss
sigmoid_loss, focal_loss = self.execute(graph_fn, [])
self.assertAllClose(focal_loss[0][3:], [0., 0.])
order_of_ratio = np.power(10,
np.floor(np.log10(sigmoid_loss[0][:3] /
focal_loss[0][:3])))
self.assertAllClose(order_of_ratio, [1., 1., 1.])
def testIgnorePositiveExampleLossViaAlphaMultiplier(self):
def graph_fn():
prediction_tensor = tf.constant([[[_logit(0.55)],
[_logit(0.52)],
[_logit(0.50)],
[_logit(0.48)],
[_logit(0.45)]]], tf.float32)
target_tensor = tf.constant([[[1],
[1],
[1],
[0],
[0]]], tf.float32)
weights = tf.constant([[[1], [1], [1], [1], [1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0,
alpha=0.0)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights), axis=2)
sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor,
target_tensor,
weights=weights), axis=2)
return sigmoid_loss, focal_loss
sigmoid_loss, focal_loss = self.execute(graph_fn, [])
self.assertAllClose(focal_loss[0][:3], [0., 0., 0.])
order_of_ratio = np.power(10,
np.floor(np.log10(sigmoid_loss[0][3:] /
focal_loss[0][3:])))
self.assertAllClose(order_of_ratio, [1., 1.])
def testSimilarToSigmoidXEntropyWithHalfAlphaAndZeroGammaUpToAScale(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[100, 0, -100],
[-100, -100, 100]],
[[-100, 0, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -1]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=0.5,
gamma=0.0)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = focal_loss_op(prediction_tensor, target_tensor,
weights=weights)
sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor,
weights=weights)
return sigmoid_loss, focal_loss
sigmoid_loss, focal_loss = self.execute(graph_fn, [])
self.assertAllClose(sigmoid_loss, focal_loss * 2)
def testSameAsSigmoidXEntropyWithNoAlphaAndZeroGamma(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[100, 0, -100],
[-100, -100, 100]],
[[-100, 0, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -1]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=None,
gamma=0.0)
sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
focal_loss = focal_loss_op(prediction_tensor, target_tensor,
weights=weights)
sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor,
weights=weights)
return sigmoid_loss, focal_loss
sigmoid_loss, focal_loss = self.execute(graph_fn, [])
self.assertAllClose(sigmoid_loss, focal_loss)
def testExpectedLossWithAlphaOneAndZeroGamma(self):
def graph_fn():
# All zeros correspond to 0.5 probability.
prediction_tensor = tf.constant([[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=1.0,
gamma=0.0)
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights))
return focal_loss
focal_loss = self.execute(graph_fn, [])
self.assertAllClose(
(-math.log(.5) * # x-entropy per class per anchor
1.0 * # alpha
8), # positives from 8 anchors
focal_loss)
def testExpectedLossWithAlpha75AndZeroGamma(self):
def graph_fn():
# All zeros correspond to 0.5 probability.
prediction_tensor = tf.constant([[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]], tf.float32)
focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=0.75,
gamma=0.0)
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights))
return focal_loss
focal_loss = self.execute(graph_fn, [])
self.assertAllClose(
(-math.log(.5) * # x-entropy per class per anchor.
((0.75 * # alpha for positives.
8) + # positives from 8 anchors.
(0.25 * # alpha for negatives.
8 * 2))), # negatives from 8 anchors for two classes.
focal_loss)
def testExpectedLossWithLossesMask(self):
def graph_fn():
# All zeros correspond to 0.5 probability.
prediction_tensor = tf.constant([[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0]],
[[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]], tf.float32)
losses_mask = tf.constant([True, True, False], tf.bool)
focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=0.75,
gamma=0.0)
focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
weights=weights,
losses_mask=losses_mask))
return focal_loss
focal_loss = self.execute(graph_fn, [])
self.assertAllClose(
(-math.log(.5) * # x-entropy per class per anchor.
((0.75 * # alpha for positives.
8) + # positives from 8 anchors.
(0.25 * # alpha for negatives.
8 * 2))), # negatives from 8 anchors for two classes.
focal_loss)
class WeightedSoftmaxClassificationLossTest(test_case.TestCase):
def testReturnsCorrectLoss(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[0.5, 0.5, 0.5],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
loss_op = losses.WeightedSoftmaxClassificationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss)
return loss
loss_output = self.execute(graph_fn, [])
exp_loss = - 1.5 * math.log(.5)
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectAnchorWiseLoss(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[0.5, 0.5, 0.5],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
loss_op = losses.WeightedSoftmaxClassificationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
return loss
loss_output = self.execute(graph_fn, [])
exp_loss = np.matrix([[0, 0, - 0.5 * math.log(.5), 0],
[-math.log(.5), 0, 0, 0]])
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectAnchorWiseLossWithHighLogitScaleSetting(self):
"""At very high logit_scale, all predictions will be ~0.33."""
def graph_fn():
# TODO(yonib): Also test logit_scale with anchorwise=False.
logit_scale = 10e16
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]], tf.float32)
loss_op = losses.WeightedSoftmaxClassificationLoss(
logit_scale=logit_scale)
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
return loss
uniform_distribution_loss = - math.log(.33333333333)
exp_loss = np.matrix([[uniform_distribution_loss] * 4,
[uniform_distribution_loss] * 4])
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectLossWithLossesMask(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[1, 0, 0]],
[[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[0.5, 0.5, 0.5],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]], tf.float32)
losses_mask = tf.constant([True, True, False], tf.bool)
loss_op = losses.WeightedSoftmaxClassificationLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights,
losses_mask=losses_mask)
loss = tf.reduce_sum(loss)
return loss
loss_output = self.execute(graph_fn, [])
exp_loss = - 1.5 * math.log(.5)
self.assertAllClose(loss_output, exp_loss)
class WeightedSoftmaxClassificationAgainstLogitsLossTest(test_case.TestCase):
def testReturnsCorrectLoss(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
target_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[100, -100, -100],
[-100, -100, 100]],
[[-100, -100, 100],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
weights = tf.constant([[1, 1, .5, 1],
[1, 1, 1, 1]], tf.float32)
weights_shape = tf.shape(weights)
weights_multiple = tf.concat(
[tf.ones_like(weights_shape), tf.constant([3])],
axis=0)
weights = tf.tile(tf.expand_dims(weights, 2), weights_multiple)
loss_op = losses.WeightedSoftmaxClassificationAgainstLogitsLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss)
return loss
loss_output = self.execute(graph_fn, [])
exp_loss = - 1.5 * math.log(.5)
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectAnchorWiseLoss(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
target_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[100, -100, -100],
[-100, -100, 100]],
[[-100, -100, 100],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
weights = tf.constant([[1, 1, .5, 1],
[1, 1, 1, 0]], tf.float32)
weights_shape = tf.shape(weights)
weights_multiple = tf.concat(
[tf.ones_like(weights_shape), tf.constant([3])],
axis=0)
weights = tf.tile(tf.expand_dims(weights, 2), weights_multiple)
loss_op = losses.WeightedSoftmaxClassificationAgainstLogitsLoss()
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
return loss
loss_output = self.execute(graph_fn, [])
exp_loss = np.matrix([[0, 0, - 0.5 * math.log(.5), 0],
[-math.log(.5), 0, 0, 0]])
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectAnchorWiseLossWithLogitScaleSetting(self):
def graph_fn():
logit_scale = 100.
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
target_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[0, 0, -100],
[-100, -100, 100]],
[[-100, 0, 0],
[-100, 100, -100],
[-100, 100, -100],
[100, -100, -100]]], tf.float32)
weights = tf.constant([[1, 1, .5, 1],
[1, 1, 1, 0]], tf.float32)
weights_shape = tf.shape(weights)
weights_multiple = tf.concat(
[tf.ones_like(weights_shape), tf.constant([3])],
axis=0)
weights = tf.tile(tf.expand_dims(weights, 2), weights_multiple)
loss_op = losses.WeightedSoftmaxClassificationAgainstLogitsLoss(
logit_scale=logit_scale)
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
return loss
# find softmax of the two prediction types above
softmax_pred1 = [np.exp(-1), np.exp(-1), np.exp(1)]
softmax_pred1 /= sum(softmax_pred1)
softmax_pred2 = [np.exp(0), np.exp(0), np.exp(-1)]
softmax_pred2 /= sum(softmax_pred2)
# compute the expected cross entropy for perfect matches
exp_entropy1 = sum(
[-x*np.log(x) for x in softmax_pred1])
exp_entropy2 = sum(
[-x*np.log(x) for x in softmax_pred2])
# weighted expected losses
exp_loss = np.matrix(
[[exp_entropy1, exp_entropy1, exp_entropy2*.5, exp_entropy1],
[exp_entropy2, exp_entropy1, exp_entropy1, 0.]])
loss_output = self.execute(graph_fn, [])
self.assertAllClose(loss_output, exp_loss)
class BootstrappedSigmoidClassificationLossTest(test_case.TestCase):
def testReturnsCorrectLossSoftBootstrapping(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, 0],
[100, -100, -100],
[100, -100, -100],
[-100, -100, 100]],
[[-100, -100, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -1]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
alpha = tf.constant(.5, tf.float32)
loss_op = losses.BootstrappedSigmoidClassificationLoss(
alpha, bootstrap_type='soft')
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss)
return loss
loss_output = self.execute(graph_fn, [])
exp_loss = -math.log(.5)
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectLossHardBootstrapping(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, 0],
[100, -100, -100],
[100, -100, -100],
[-100, -100, 100]],
[[-100, -100, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -1]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
alpha = tf.constant(.5, tf.float32)
loss_op = losses.BootstrappedSigmoidClassificationLoss(
alpha, bootstrap_type='hard')
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss)
return loss
loss_output = self.execute(graph_fn, [])
exp_loss = -math.log(.5)
self.assertAllClose(loss_output, exp_loss)
def testReturnsCorrectAnchorWiseLoss(self):
def graph_fn():
prediction_tensor = tf.constant([[[-100, 100, -100],
[100, -100, -100],
[100, 0, -100],
[-100, -100, 100]],
[[-100, 0, 100],
[-100, 100, -100],
[100, 100, 100],
[0, 0, -1]]], tf.float32)
target_tensor = tf.constant([[[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
[[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 0]]], tf.float32)
weights = tf.constant([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[0, 0, 0]]], tf.float32)
alpha = tf.constant(.5, tf.float32)
loss_op = losses.BootstrappedSigmoidClassificationLoss(
alpha, bootstrap_type='hard')
loss = loss_op(prediction_tensor, target_tensor, weights=weights)
loss = tf.reduce_sum(loss, axis=2)
return loss
loss_output = self.execute(graph_fn, [])
exp_loss = np.matrix([[0, 0, -math.log(.5), 0],
[-math.log(.5), 0, 0, 0]])
self.assertAllClose(loss_output, exp_loss)
class HardExampleMinerTest(test_case.TestCase):
def testHardMiningWithSingleLossType(self):
def graph_fn():
location_losses = tf.constant([[100, 90, 80, 0],
[0, 1, 2, 3]], tf.float32)
cls_losses = tf.constant([[0, 10, 50, 110],
[9, 6, 3, 0]], tf.float32)
box_corners = tf.constant([[0.1, 0.1, 0.9, 0.9],
[0.1, 0.1, 0.9, 0.9],
[0.1, 0.1, 0.9, 0.9],
[0.1, 0.1, 0.9, 0.9]], tf.float32)
decoded_boxlist_list = []
decoded_boxlist_list.append(box_list.BoxList(box_corners))
decoded_boxlist_list.append(box_list.BoxList(box_corners))
# Uses only location loss to select hard examples
loss_op = losses.HardExampleMiner(num_hard_examples=1,
iou_threshold=0.0,
loss_type='loc',
cls_loss_weight=1,
loc_loss_weight=1)
(loc_loss, cls_loss) = loss_op(location_losses, cls_losses,
decoded_boxlist_list)
return loc_loss, cls_loss
loc_loss_output, cls_loss_output = self.execute(graph_fn, [])
exp_loc_loss = 100 + 3
exp_cls_loss = 0 + 0
self.assertAllClose(loc_loss_output, exp_loc_loss)
self.assertAllClose(cls_loss_output, exp_cls_loss)
def testHardMiningWithBothLossType(self):
def graph_fn():
location_losses = tf.constant([[100, 90, 80, 0],
[0, 1, 2, 3]], tf.float32)
cls_losses = tf.constant([[0, 10, 50, 110],
[9, 6, 3, 0]], tf.float32)
box_corners = tf.constant([[0.1, 0.1, 0.9, 0.9],
[0.1, 0.1, 0.9, 0.9],
[0.1, 0.1, 0.9, 0.9],
[0.1, 0.1, 0.9, 0.9]], tf.float32)
decoded_boxlist_list = []
decoded_boxlist_list.append(box_list.BoxList(box_corners))
decoded_boxlist_list.append(box_list.BoxList(box_corners))
loss_op = losses.HardExampleMiner(num_hard_examples=1,
iou_threshold=0.0,
loss_type='both',
cls_loss_weight=1,
loc_loss_weight=1)
(loc_loss, cls_loss) = loss_op(location_losses, cls_losses,
decoded_boxlist_list)
return loc_loss, cls_loss
loc_loss_output, cls_loss_output = self.execute(graph_fn, [])
exp_loc_loss = 80 + 0
exp_cls_loss = 50 + 9
self.assertAllClose(loc_loss_output, exp_loc_loss)
self.assertAllClose(cls_loss_output, exp_cls_loss)
def testHardMiningNMS(self):
def graph_fn():
location_losses = tf.constant([[100, 90, 80, 0],
[0, 1, 2, 3]], tf.float32)
cls_losses = tf.constant([[0, 10, 50, 110],
[9, 6, 3, 0]], tf.float32)
box_corners = tf.constant([[0.1, 0.1, 0.9, 0.9],
[0.9, 0.9, 0.99, 0.99],
[0.1, 0.1, 0.9, 0.9],
[0.1, 0.1, 0.9, 0.9]], tf.float32)
decoded_boxlist_list = []
decoded_boxlist_list.append(box_list.BoxList(box_corners))
decoded_boxlist_list.append(box_list.BoxList(box_corners))
loss_op = losses.HardExampleMiner(num_hard_examples=2,
iou_threshold=0.5,
loss_type='cls',
cls_loss_weight=1,
loc_loss_weight=1)
(loc_loss, cls_loss) = loss_op(location_losses, cls_losses,
decoded_boxlist_list)
return loc_loss, cls_loss
loc_loss_output, cls_loss_output = self.execute(graph_fn, [])
exp_loc_loss = 0 + 90 + 0 + 1
exp_cls_loss = 110 + 10 + 9 + 6
self.assertAllClose(loc_loss_output, exp_loc_loss)
self.assertAllClose(cls_loss_output, exp_cls_loss)
def testEnforceNegativesPerPositiveRatio(self):
location_losses = np.array([[100, 90, 80, 0, 1, 2,
3, 10, 20, 100, 20, 3]], np.float32)
cls_losses = np.array([[0, 0, 100, 0, 90, 70,
0, 60, 0, 17, 13, 0]], np.float32)
box_corners = np.array([[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.5, 0.1],
[0.0, 0.0, 0.6, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.8, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 1.0, 0.1],
[0.0, 0.0, 1.1, 0.1],
[0.0, 0.0, 0.2, 0.1]], np.float32)
match_results = np.array([2, -1, 0, -1, -1, 1, -1, -1, -1, -1, -1, 3],
np.int32)
max_negatives_per_positive_list = [0.0, 0.5, 1.0, 1.5, 10]
exp_loc_loss_list = [80 + 2,
80 + 1 + 2,
80 + 1 + 2 + 10,
80 + 1 + 2 + 10 + 100,
80 + 1 + 2 + 10 + 100 + 20]
exp_cls_loss_list = [100 + 70,
100 + 90 + 70,
100 + 90 + 70 + 60,
100 + 90 + 70 + 60 + 17,
100 + 90 + 70 + 60 + 17 + 13]
# pylint: disable=cell-var-from-loop
for max_negatives_per_positive, exp_loc_loss, exp_cls_loss in zip(
max_negatives_per_positive_list, exp_loc_loss_list, exp_cls_loss_list):
def graph_fn():
loss_op = losses.HardExampleMiner(
num_hard_examples=None, iou_threshold=0.9999, loss_type='cls',
cls_loss_weight=1, loc_loss_weight=1,
max_negatives_per_positive=max_negatives_per_positive)
match_list = [matcher.Match(tf.constant(match_results))]
decoded_boxlist_list = [box_list.BoxList(tf.constant(box_corners))]
(loc_loss, cls_loss) = loss_op(tf.constant(location_losses),
tf.constant(cls_losses),
decoded_boxlist_list, match_list)
return loc_loss, cls_loss
loc_loss_output, cls_loss_output = self.execute_cpu(graph_fn, [])
self.assertAllClose(loc_loss_output, exp_loc_loss)
self.assertAllClose(cls_loss_output, exp_cls_loss)
# pylint: enable=cell-var-from-loop
def testEnforceNegativesPerPositiveRatioWithMinNegativesPerImage(self):
location_losses = np.array([[100, 90, 80, 0, 1, 2,
3, 10, 20, 100, 20, 3]], np.float32)
cls_losses = np.array([[0, 0, 100, 0, 90, 70,
0, 60, 0, 17, 13, 0]], np.float32)
box_corners = np.array([[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.5, 0.1],
[0.0, 0.0, 0.6, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 0.8, 0.1],
[0.0, 0.0, 0.2, 0.1],
[0.0, 0.0, 1.0, 0.1],
[0.0, 0.0, 1.1, 0.1],
[0.0, 0.0, 0.2, 0.1]], np.float32)
match_results = np.array([-1] * 12, np.int32)
min_negatives_per_image_list = [0, 1, 2, 4, 5, 6]
exp_loc_loss_list = [0,
80,
80 + 1,
80 + 1 + 2 + 10,
80 + 1 + 2 + 10 + 100,
80 + 1 + 2 + 10 + 100 + 20]
exp_cls_loss_list = [0,
100,
100 + 90,
100 + 90 + 70 + 60,
100 + 90 + 70 + 60 + 17,
100 + 90 + 70 + 60 + 17 + 13]
# pylint: disable=cell-var-from-loop
for min_negatives_per_image, exp_loc_loss, exp_cls_loss in zip(
min_negatives_per_image_list, exp_loc_loss_list, exp_cls_loss_list):
def graph_fn():
loss_op = losses.HardExampleMiner(
num_hard_examples=None, iou_threshold=0.9999, loss_type='cls',
cls_loss_weight=1, loc_loss_weight=1,
max_negatives_per_positive=3,
min_negatives_per_image=min_negatives_per_image)
match_list = [matcher.Match(tf.constant(match_results))]
decoded_boxlist_list = [box_list.BoxList(tf.constant(box_corners))]
(loc_loss, cls_loss) = loss_op(location_losses, cls_losses,
decoded_boxlist_list, match_list)
return loc_loss, cls_loss
loc_loss_output, cls_loss_output = self.execute_cpu(graph_fn, [])
self.assertAllClose(loc_loss_output, exp_loc_loss)
self.assertAllClose(cls_loss_output, exp_cls_loss)
# pylint: enable=cell-var-from-loop
LOG_2 = np.log(2)
LOG_3 = np.log(3)
class PenaltyReducedLogisticFocalLossTest(test_case.TestCase):
"""Testing loss function from Equation (1) in [1].
[1]: https://arxiv.org/abs/1904.07850
"""
def setUp(self):
super(PenaltyReducedLogisticFocalLossTest, self).setUp()
self._prediction = np.array([
# First batch
[[1 / 2, 1 / 4, 3 / 4],
[3 / 4, 1 / 3, 1 / 3]],
# Second Batch
[[0.0, 1.0, 1 / 2],
[3 / 4, 2 / 3, 1 / 3]]], np.float32)
self._prediction = np.log(self._prediction/(1 - self._prediction))
self._target = np.array([
# First batch
[[1.0, 0.91, 1.0],
[0.36, 0.84, 1.0]],
# Second Batch
[[0.01, 1.0, 0.75],
[0.96, 1.0, 1.0]]], np.float32)
def test_returns_correct_loss(self):
def graph_fn(prediction, target):
weights = tf.constant([
[[1.0], [1.0]],
[[1.0], [1.0]],
])
loss = losses.PenaltyReducedLogisticFocalLoss(alpha=2.0, beta=0.5)
computed_value = loss._compute_loss(prediction, target,
weights)
return computed_value
computed_value = self.execute(graph_fn, [self._prediction, self._target])
expected_value = np.array([
# First batch
[[1 / 4 * LOG_2,
0.3 * 0.0625 * (2 * LOG_2 - LOG_3),
1 / 16 * (2 * LOG_2 - LOG_3)],
[0.8 * 9 / 16 * 2 * LOG_2,
0.4 * 1 / 9 * (LOG_3 - LOG_2),
4 / 9 * LOG_3]],
# Second Batch
[[0.0,
0.0,
1 / 2 * 1 / 4 * LOG_2],
[0.2 * 9 / 16 * 2 * LOG_2,
1 / 9 * (LOG_3 - LOG_2),
4 / 9 * LOG_3]]])
self.assertAllClose(computed_value, expected_value, rtol=1e-3, atol=1e-3)
def test_returns_correct_loss_weighted(self):
def graph_fn(prediction, target):
weights = tf.constant([
[[1.0, 0.0, 1.0], [0.0, 0.0, 1.0]],
[[1.0, 1.0, 1.0], [0.0, 0.0, 0.0]],
])
loss = losses.PenaltyReducedLogisticFocalLoss(alpha=2.0, beta=0.5)
computed_value = loss._compute_loss(prediction, target,
weights)
return computed_value
computed_value = self.execute(graph_fn, [self._prediction, self._target])
expected_value = np.array([
# First batch
[[1 / 4 * LOG_2,
0.0,
1 / 16 * (2 * LOG_2 - LOG_3)],
[0.0,
0.0,
4 / 9 * LOG_3]],
# Second Batch
[[0.0,
0.0,
1 / 2 * 1 / 4 * LOG_2],
[0.0,
0.0,
0.0]]])
self.assertAllClose(computed_value, expected_value, rtol=1e-3, atol=1e-3)
class L1LocalizationLossTest(test_case.TestCase):
def test_returns_correct_loss(self):
def graph_fn():
loss = losses.L1LocalizationLoss()
pred = [[0.1, 0.2], [0.7, 0.5]]
target = [[0.9, 1.0], [0.1, 0.4]]
weights = [[1.0, 0.0], [1.0, 1.0]]
return loss._compute_loss(pred, target, weights)
computed_value = self.execute(graph_fn, [])
self.assertAllClose(computed_value, [[0.8, 0.0], [0.6, 0.1]], rtol=1e-6)
class WeightedDiceClassificationLoss(test_case.TestCase):
def test_compute_weights_1(self):
def graph_fn():
loss = losses.WeightedDiceClassificationLoss(squared_normalization=False)
pred = np.zeros((2, 3, 4), dtype=np.float32)
target = np.zeros((2, 3, 4), dtype=np.float32)
pred[0, 1, 0] = _logit(0.9)
pred[0, 2, 0] = _logit(0.1)
pred[0, 2, 2] = _logit(0.5)
pred[0, 1, 3] = _logit(0.1)
pred[1, 2, 3] = _logit(0.2)
pred[1, 1, 1] = _logit(0.3)
pred[1, 0, 2] = _logit(0.1)
target[0, 1, 0] = 1.0
target[0, 2, 2] = 1.0
target[0, 1, 3] = 1.0
target[1, 2, 3] = 1.0
target[1, 1, 1] = 0.0
target[1, 0, 2] = 0.0
weights = np.ones_like(target)
return loss._compute_loss(pred, target, weights)
dice_coeff = np.zeros((2, 4))
dice_coeff[0, 0] = 2 * 0.9 / 2.5
dice_coeff[0, 2] = 2 * 0.5 / 2.5
dice_coeff[0, 3] = 2 * 0.1 / 2.1
dice_coeff[1, 3] = 2 * 0.2 / 2.2
computed_value = self.execute(graph_fn, [])
self.assertAllClose(computed_value, 1 - dice_coeff, rtol=1e-6)
def test_compute_weights_set(self):
def graph_fn():
loss = losses.WeightedDiceClassificationLoss(squared_normalization=False)
pred = np.zeros((2, 3, 4), dtype=np.float32)
target = np.zeros((2, 3, 4), dtype=np.float32)
pred[0, 1, 0] = _logit(0.9)
pred[0, 2, 0] = _logit(0.1)
pred[0, 2, 2] = _logit(0.5)
pred[0, 1, 3] = _logit(0.1)
pred[1, 2, 3] = _logit(0.2)
pred[1, 1, 1] = _logit(0.3)
pred[1, 0, 2] = _logit(0.1)
target[0, 1, 0] = 1.0
target[0, 2, 2] = 1.0
target[0, 1, 3] = 1.0
target[1, 2, 3] = 1.0
target[1, 1, 1] = 0.0
target[1, 0, 2] = 0.0
weights = np.ones_like(target)
weights[:, :, 0] = 0.0
return loss._compute_loss(pred, target, weights)
dice_coeff = np.zeros((2, 4))
dice_coeff[0, 2] = 2 * 0.5 / 2.5
dice_coeff[0, 3] = 2 * 0.1 / 2.1
dice_coeff[1, 3] = 2 * 0.2 / 2.2
computed_value = self.execute(graph_fn, [])
self.assertAllClose(computed_value, 1 - dice_coeff, rtol=1e-6)
def test_class_indices(self):
def graph_fn():
loss = losses.WeightedDiceClassificationLoss(squared_normalization=False)
pred = np.zeros((2, 3, 4), dtype=np.float32)
target = np.zeros((2, 3, 4), dtype=np.float32)
pred[0, 1, 0] = _logit(0.9)
pred[0, 2, 0] = _logit(0.1)
pred[0, 2, 2] = _logit(0.5)
pred[0, 1, 3] = _logit(0.1)
pred[1, 2, 3] = _logit(0.2)
pred[1, 1, 1] = _logit(0.3)
pred[1, 0, 2] = _logit(0.1)
target[0, 1, 0] = 1.0
target[0, 2, 2] = 1.0
target[0, 1, 3] = 1.0
target[1, 2, 3] = 1.0
target[1, 1, 1] = 0.0
target[1, 0, 2] = 0.0
weights = np.ones_like(target)
return loss._compute_loss(pred, target, weights, class_indices=[0])
dice_coeff = np.zeros((2, 4))
dice_coeff[0, 0] = 2 * 0.9 / 2.5
computed_value = self.execute(graph_fn, [])
self.assertAllClose(computed_value, 1 - dice_coeff, rtol=1e-6)
if __name__ == '__main__':
tf.test.main()
| 44.616816
| 80
| 0.411606
|
fbf3ed2e3ce223cd64db89189a866251d81dd38a
| 9,453
|
py
|
Python
|
src/tools/gyp/pylib/gyp/MSVSUtil.py
|
zzilla/gbreakpad
|
02fd5a078bda4eb2fd7ee881c8d301bea2bf87fe
|
[
"BSD-3-Clause"
] | 33
|
2015-11-20T23:33:50.000Z
|
2022-02-25T21:28:41.000Z
|
deps/gyp/pylib/gyp/MSVSUtil.py
|
free1978/mapbox-gl-native
|
2a50fccd24e762d0de5a53bac358e5ddfea8d213
|
[
"BSD-2-Clause"
] | 5
|
2016-09-28T11:37:41.000Z
|
2022-02-05T11:08:44.000Z
|
deps/gyp/pylib/gyp/MSVSUtil.py
|
free1978/mapbox-gl-native
|
2a50fccd24e762d0de5a53bac358e5ddfea8d213
|
[
"BSD-2-Clause"
] | 11
|
2020-07-04T03:03:18.000Z
|
2022-03-17T10:19:19.000Z
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions shared amongst the Windows generators."""
import copy
import os
_TARGET_TYPE_EXT = {
'executable': '.exe',
'loadable_module': '.dll',
'shared_library': '.dll',
}
def _GetLargePdbShimCcPath():
"""Returns the path of the large_pdb_shim.cc file."""
this_dir = os.path.abspath(os.path.dirname(__file__))
src_dir = os.path.abspath(os.path.join(this_dir, '..', '..'))
win_data_dir = os.path.join(src_dir, 'data', 'win')
large_pdb_shim_cc = os.path.join(win_data_dir, 'large-pdb-shim.cc')
return large_pdb_shim_cc
def _DeepCopySomeKeys(in_dict, keys):
"""Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|.
Arguments:
in_dict: The dictionary to copy.
keys: The keys to be copied. If a key is in this list and doesn't exist in
|in_dict| this is not an error.
Returns:
The partially deep-copied dictionary.
"""
d = {}
for key in keys:
if key not in in_dict:
continue
d[key] = copy.deepcopy(in_dict[key])
return d
def _SuffixName(name, suffix):
"""Add a suffix to the end of a target.
Arguments:
name: name of the target (foo#target)
suffix: the suffix to be added
Returns:
Target name with suffix added (foo_suffix#target)
"""
parts = name.rsplit('#', 1)
parts[0] = '%s_%s' % (parts[0], suffix)
return '#'.join(parts)
def _ShardName(name, number):
"""Add a shard number to the end of a target.
Arguments:
name: name of the target (foo#target)
number: shard number
Returns:
Target name with shard added (foo_1#target)
"""
return _SuffixName(name, str(number))
def ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in new_target_dicts:
for deptype in ('dependencies', 'dependencies_original'):
dependencies = copy.copy(new_target_dicts[t].get(deptype, []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t][deptype] = new_dependencies
return (new_target_list, new_target_dicts)
def _GetPdbPath(target_dict, config_name, vars):
"""Returns the path to the PDB file that will be generated by a given
configuration.
The lookup proceeds as follows:
- Look for an explicit path in the VCLinkerTool configuration block.
- Look for an 'msvs_large_pdb_path' variable.
- Use '<(PRODUCT_DIR)/<(product_name).(exe|dll).pdb' if 'product_name' is
specified.
- Use '<(PRODUCT_DIR)/<(target_name).(exe|dll).pdb'.
Arguments:
target_dict: The target dictionary to be searched.
config_name: The name of the configuration of interest.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
The path of the corresponding PDB file.
"""
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.get('VCLinkerTool', {})
pdb_path = linker.get('ProgramDatabaseFile')
if pdb_path:
return pdb_path
variables = target_dict.get('variables', {})
pdb_path = variables.get('msvs_large_pdb_path', None)
if pdb_path:
return pdb_path
pdb_base = target_dict.get('product_name', target_dict['target_name'])
pdb_base = '%s%s.pdb' % (pdb_base, _TARGET_TYPE_EXT[target_dict['type']])
pdb_path = vars['PRODUCT_DIR'] + '/' + pdb_base
return pdb_path
def InsertLargePdbShims(target_list, target_dicts, vars):
"""Insert a shim target that forces the linker to use 4KB pagesize PDBs.
This is a workaround for targets with PDBs greater than 1GB in size, the
limit for the 1KB pagesize PDBs created by the linker by default.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
Tuple of the shimmed version of the inputs.
"""
# Determine which targets need shimming.
targets_to_shim = []
for t in target_dicts:
target_dict = target_dicts[t]
# We only want to shim targets that have msvs_large_pdb enabled.
if not int(target_dict.get('msvs_large_pdb', 0)):
continue
# This is intended for executable, shared_library and loadable_module
# targets where every configuration is set up to produce a PDB output.
# If any of these conditions is not true then the shim logic will fail
# below.
targets_to_shim.append(t)
large_pdb_shim_cc = _GetLargePdbShimCcPath()
for t in targets_to_shim:
target_dict = target_dicts[t]
target_name = target_dict.get('target_name')
base_dict = _DeepCopySomeKeys(target_dict,
['configurations', 'default_configuration', 'toolset'])
# This is the dict for copying the source file (part of the GYP tree)
# to the intermediate directory of the project. This is necessary because
# we can't always build a relative path to the shim source file (on Windows
# GYP and the project may be on different drives), and Ninja hates absolute
# paths (it ends up generating the .obj and .obj.d alongside the source
# file, polluting GYPs tree).
copy_suffix = 'large_pdb_copy'
copy_target_name = target_name + '_' + copy_suffix
full_copy_target_name = _SuffixName(t, copy_suffix)
shim_cc_basename = os.path.basename(large_pdb_shim_cc)
shim_cc_dir = vars['SHARED_INTERMEDIATE_DIR'] + '/' + copy_target_name
shim_cc_path = shim_cc_dir + '/' + shim_cc_basename
copy_dict = copy.deepcopy(base_dict)
copy_dict['target_name'] = copy_target_name
copy_dict['type'] = 'none'
copy_dict['sources'] = [ large_pdb_shim_cc ]
copy_dict['copies'] = [{
'destination': shim_cc_dir,
'files': [ large_pdb_shim_cc ]
}]
# This is the dict for the PDB generating shim target. It depends on the
# copy target.
shim_suffix = 'large_pdb_shim'
shim_target_name = target_name + '_' + shim_suffix
full_shim_target_name = _SuffixName(t, shim_suffix)
shim_dict = copy.deepcopy(base_dict)
shim_dict['target_name'] = shim_target_name
shim_dict['type'] = 'static_library'
shim_dict['sources'] = [ shim_cc_path ]
shim_dict['dependencies'] = [ full_copy_target_name ]
# Set up the shim to output its PDB to the same location as the final linker
# target.
for config_name, config in shim_dict.get('configurations').iteritems():
pdb_path = _GetPdbPath(target_dict, config_name, vars)
# A few keys that we don't want to propagate.
for key in ['msvs_precompiled_header', 'msvs_precompiled_source', 'test']:
config.pop(key, None)
msvs = config.setdefault('msvs_settings', {})
# Update the compiler directives in the shim target.
compiler = msvs.setdefault('VCCLCompilerTool', {})
compiler['DebugInformationFormat'] = '3'
compiler['ProgramDataBaseFileName'] = pdb_path
# Set the explicit PDB path in the appropriate configuration of the
# original target.
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.setdefault('VCLinkerTool', {})
linker['GenerateDebugInformation'] = 'true'
linker['ProgramDatabaseFile'] = pdb_path
# Add the new targets. They must go to the beginning of the list so that
# the dependency generation works as expected in ninja.
target_list.insert(0, full_copy_target_name)
target_list.insert(0, full_shim_target_name)
target_dicts[full_copy_target_name] = copy_dict
target_dicts[full_shim_target_name] = shim_dict
# Update the original target to depend on the shim target.
target_dict.setdefault('dependencies', []).append(full_shim_target_name)
return (target_list, target_dicts)
| 35.141264
| 80
| 0.698614
|
7a4b5f251565c7ea9d8fb1510a8a35adf9ad8604
| 2,233
|
py
|
Python
|
udp_handshake.py
|
Gavitron/pipulator
|
ca6e134451bc1d1f073aa965df4fa4877bf43311
|
[
"BSD-3-Clause"
] | 4
|
2015-11-28T17:45:56.000Z
|
2020-05-09T10:48:40.000Z
|
udp_handshake.py
|
Gavitron/pipulator
|
ca6e134451bc1d1f073aa965df4fa4877bf43311
|
[
"BSD-3-Clause"
] | null | null | null |
udp_handshake.py
|
Gavitron/pipulator
|
ca6e134451bc1d1f073aa965df4fa4877bf43311
|
[
"BSD-3-Clause"
] | null | null | null |
# udp listener
#
# Run this in the background, and you will appear to the pipboy app as an instance of Fallout4
import socket
import struct
import sys
import time
import json
# debouncer magic number
min_delta=100
#return current millis
def now():
return time.time()*1000000
#return millis since 'then'
def dif(then):
return now() - then
#return true if millis since last_seen is older than minimum debounce interval
def stale(last_seen):
return ( dif(last_seen) > min_delta )
# set some default globals
multicast_group = '224.3.29.71'
listen_address = ('', 28000)
ttl = struct.pack('b', 127) # Set the time-to-live for UDP messages. should be 1.
# here we go
# Create the socket
hand_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
hand_sock.bind(listen_address)
hand_sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
hand_sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
# Receive/respond loop
isRunning=True
last_seen = {}
print >>sys.stderr, '\nHANDSHAKE READY...'
while isRunning:
raw_data, address = hand_sock.recvfrom(1024)
nodeID = ':'.join(map(str,address))
print >>sys.stderr, 'HANDSHAKE recieved %d bytes, from: %s' % (len(raw_data), nodeID)
if not last_seen.get(nodeID):
print >>sys.stderr, 'HANDSHAKE new tuple: %s' % nodeID
last_seen[nodeID] = 0
if stale(last_seen[nodeID]):
print >>sys.stderr, 'HANDSHAKE old timestamp: %d diff: %d stale: %s' % (last_seen[nodeID],dif(last_seen[nodeID]),stale(last_seen[nodeID]))
udp_msg = json.loads(raw_data)
if udp_msg['cmd'] == 'autodiscover':
print >>sys.stderr, 'HANDSHAKE acknowledging discovery request from %s' % nodeID
reply = {}
reply['IsBusy'] = False
reply['MachineType'] = "PC"
hand_sock.sendto(json.dumps(reply), address)
else:
print >>sys.stderr, 'HANDSHAKE unrecognized request from %s\nHANDSHAKE content: %s' % (nodeID, udp_msg)
last_seen[nodeID] = now()
else:
print >>sys.stderr, 'HANDSHAKE ignoring duplicate request from %s' % nodeID
| 29.773333
| 145
| 0.727273
|
96f0dab6169d9969725df9c060061c10bfc2e64b
| 2,156
|
py
|
Python
|
zthumbor/loaders/zloader.py
|
yakkl/yakkl
|
89ecf4ee8998554a0634667067e16f428e4c480c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
zthumbor/loaders/zloader.py
|
yakkl/yakkl
|
89ecf4ee8998554a0634667067e16f428e4c480c
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2020-06-06T00:51:42.000Z
|
2022-02-10T21:38:40.000Z
|
zthumbor/loaders/zloader.py
|
yakkl/yakkl
|
89ecf4ee8998554a0634667067e16f428e4c480c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# See https://yakkl.readthedocs.io/en/latest/subsystems/thumbnailing.html
from __future__ import absolute_import
from six.moves import urllib
from tornado.concurrent import return_future
from thumbor.loaders import LoaderResult, file_loader, https_loader
from tc_aws.loaders import s3_loader
from thumbor.context import Context
from .helpers import (
separate_url_and_source_type,
THUMBOR_S3_TYPE, THUMBOR_LOCAL_FILE_TYPE, THUMBOR_EXTERNAL_TYPE
)
from typing import Any, Callable
import base64
import logging
def get_not_found_result():
# type: () -> LoaderResult
result = LoaderResult()
result.error = LoaderResult.ERROR_NOT_FOUND
result.successful = False
return result
@return_future
def load(context, url, callback):
# type: (Context, str, Callable[..., Any]) -> None
source_type, encoded_url = separate_url_and_source_type(url)
actual_url = base64.urlsafe_b64decode(urllib.parse.unquote(encoded_url))
if source_type not in (THUMBOR_S3_TYPE, THUMBOR_LOCAL_FILE_TYPE,
THUMBOR_EXTERNAL_TYPE):
callback(get_not_found_result())
logging.warning('INVALID SOURCE TYPE: ' + source_type)
return
if source_type == THUMBOR_S3_TYPE:
if actual_url.startswith('/user_uploads/'): # type: ignore # python 2 type differs from python 3 type
actual_url = actual_url[len('/user_uploads/'):]
else:
raise AssertionError("Unexpected s3 file.")
s3_loader.load(context, actual_url, callback)
elif source_type == THUMBOR_LOCAL_FILE_TYPE:
if actual_url.startswith('/user_uploads/'): # type: ignore # python 2 type differs from python 3 type
actual_url = actual_url[len('/user_uploads/'):]
local_file_path_prefix = 'files/'
else:
raise AssertionError("Unexpected local file.")
patched_local_url = local_file_path_prefix + actual_url # type: ignore # python 2 type differs from python 3 type
file_loader.load(context, patched_local_url, callback)
elif source_type == THUMBOR_EXTERNAL_TYPE:
https_loader.load(context, actual_url, callback)
| 39.2
| 122
| 0.719852
|
f1b7794396071f0e63502282f9d184a2955716c9
| 2,416
|
py
|
Python
|
002-pyopengl/PyOpenGL-Demo-3.0.1b1/PyOpenGL-Demo/proesch/nurbsCurve/nurbsCircle.py
|
lhl/vrdev
|
fc1a9af2b51d159c99c8779349ef3392a70ed9ed
|
[
"Apache-2.0"
] | 12
|
2015-12-02T02:36:36.000Z
|
2020-09-20T17:14:24.000Z
|
002-pyopengl/PyOpenGL-Demo-3.0.1b1/PyOpenGL-Demo/proesch/nurbsCurve/nurbsCircle.py
|
lhl/vrdev
|
fc1a9af2b51d159c99c8779349ef3392a70ed9ed
|
[
"Apache-2.0"
] | null | null | null |
002-pyopengl/PyOpenGL-Demo-3.0.1b1/PyOpenGL-Demo/proesch/nurbsCurve/nurbsCircle.py
|
lhl/vrdev
|
fc1a9af2b51d159c99c8779349ef3392a70ed9ed
|
[
"Apache-2.0"
] | 8
|
2016-11-02T11:17:04.000Z
|
2021-10-21T07:42:19.000Z
|
#!/usr/bin/python2.4
# Plot a circle using NURBS
#
# Copyright (C) 2007 "Peter Roesch" <Peter.Roesch@fh-augsburg.de>
#
# This code is licensed under the PyOpenGL License.
# Details are given in the file license.txt included in this distribution.
import sys
import math
from time import sleep
try:
from OpenGL.GLUT import *
from OpenGL.GL import *
from OpenGL.GLU import *
except:
print ''' Fehler: PyOpenGL nicht intalliert !!'''
sys.exit( )
animationAngle = 0.0
frameRate = 25
animationTime = 0
def animationStep( ):
"""Update animated parameters"""
global animationAngle
global frameRate
animationAngle += 0.3
while animationAngle > 360:
animationAngle -= 360
sleep( 1 / float( frameRate ) )
glutPostRedisplay( )
degree=3
s2=math.sqrt(2)/2.0
# Initialise circle control points.
circlePoints = [\
[0.0, 1.0, 0.0, 1.0],\
[s2, s2, 0.0, s2],\
[1.0, 0.0, 0.0, 1.0],\
[s2, -s2, 0.0, s2],\
[0.0, -1.0, 0.0, 1.0],\
[-s2, -s2, 0.0, s2],\
[-1.0, 0.0, 0.0, 1.0],\
[-s2, s2, 0.0, s2],\
]
# make sure circle is closed properly
circlePoints = circlePoints + [circlePoints[0], circlePoints[1]]
# initialise circle knots
circleKnots = [ 0.0 ] + \
[ float(i/2) for i in range( len( circlePoints ) + degree -1 )]
def display( ):
glClear( GL_COLOR_BUFFER_BIT )
glMatrixMode( GL_PROJECTION )
glLoadIdentity( )
xSize, ySize = glutGet( GLUT_WINDOW_WIDTH ), glutGet( GLUT_WINDOW_HEIGHT )
gluPerspective(60, float(xSize) / float(ySize), 0.1, 50)
glMatrixMode( GL_MODELVIEW )
glLoadIdentity( )
glTranslatef( 0, 0, -2 )
glRotatef( animationAngle, 0, 0, 1 )
global circlePoints, circleKnots
glColor3f(0, 1, 0)
glBegin(GL_LINE_STRIP)
for coord in circlePoints:
glVertex3f(coord[0], coord[1], coord[2]);
glEnd()
global nurb
glColor3f(1, 1, 1)
gluBeginCurve( nurb )
gluNurbsCurve ( nurb, circleKnots, circlePoints, GL_MAP1_VERTEX_4 )
gluEndCurve( nurb )
glutSwapBuffers( )
nurb=None
samplingTolerance=1.0
def init( ):
"""Glut init function."""
glClearColor ( 0, 0, 0, 0 )
global nurb
nurb = gluNewNurbsRenderer()
global samplingTolerance
glLineWidth(2.0)
gluNurbsProperty(nurb, GLU_SAMPLING_TOLERANCE, samplingTolerance)
glutInit( sys.argv )
glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGB )
glutInitWindowSize( 250, 250 )
glutInitWindowPosition( 100, 100 )
glutCreateWindow( sys.argv[0] )
init( )
glutDisplayFunc( display )
glutIdleFunc( animationStep )
glutMainLoop( )
| 24.16
| 75
| 0.701987
|
2b7b80ca5e3810a1867fef0c2245644acda0a538
| 37
|
py
|
Python
|
venv/lib/python3.6/encodings/cp874.py
|
JamesMusyoka/Blog
|
fdcb51cf4541bbb3b9b3e7a1c3735a0b1f45f0b5
|
[
"Unlicense"
] | 2
|
2019-04-17T13:35:50.000Z
|
2021-12-21T00:11:36.000Z
|
venv/lib/python3.6/encodings/cp874.py
|
JamesMusyoka/Blog
|
fdcb51cf4541bbb3b9b3e7a1c3735a0b1f45f0b5
|
[
"Unlicense"
] | 2
|
2021-03-31T19:51:24.000Z
|
2021-06-10T23:05:09.000Z
|
venv/lib/python3.6/encodings/cp874.py
|
JamesMusyoka/Blog
|
fdcb51cf4541bbb3b9b3e7a1c3735a0b1f45f0b5
|
[
"Unlicense"
] | 2
|
2019-10-01T08:47:35.000Z
|
2020-07-11T06:32:16.000Z
|
/usr/lib/python3.6/encodings/cp874.py
| 37
| 37
| 0.810811
|
ce2e156e20ad5f91ebd14f6139de4bbaf7a7606a
| 11,289
|
py
|
Python
|
grr/gui/api_call_handler_utils_test.py
|
mikecb/grr
|
52fdd977729af2a09a147301c55b8b7f1eccfa67
|
[
"Apache-2.0"
] | 2
|
2019-06-02T13:11:16.000Z
|
2019-06-25T13:30:46.000Z
|
grr/gui/api_call_handler_utils_test.py
|
mikecb/grr
|
52fdd977729af2a09a147301c55b8b7f1eccfa67
|
[
"Apache-2.0"
] | null | null | null |
grr/gui/api_call_handler_utils_test.py
|
mikecb/grr
|
52fdd977729af2a09a147301c55b8b7f1eccfa67
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""Contains tests for api_call_handler_utils."""
import hashlib
import os
import tarfile
import zipfile
import yaml
from grr.gui import api_call_handler_utils
from grr.lib import aff4
from grr.lib import flags
from grr.lib import test_lib
from grr.lib.aff4_objects import collects
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import crypto as rdf_crypto
from grr.lib.rdfvalues import paths as rdf_paths
class CollectionArchiveGeneratorTest(test_lib.GRRBaseTest):
"""Test for CollectionArchiveGenerator."""
def setUp(self):
super(CollectionArchiveGeneratorTest, self).setUp()
self.client_id = rdf_client.ClientURN("aff4:/C.0000000000000000")
path1 = self.client_id.Add("fs/os/foo/bar/hello1.txt")
with aff4.FACTORY.Create(
path1, aff4.AFF4MemoryStream, token=self.token) as fd:
fd.Write("hello1")
fd.Set(fd.Schema.HASH,
rdf_crypto.Hash(sha256=hashlib.sha256("hello1").digest()))
path2 = self.client_id.Add(u"fs/os/foo/bar/中国新闻网新闻中.txt")
with aff4.FACTORY.Create(
path2, aff4.AFF4MemoryStream, token=self.token) as fd:
fd.Write("hello2")
fd.Set(fd.Schema.HASH,
rdf_crypto.Hash(sha256=hashlib.sha256("hello2").digest()))
self.stat_entries = []
self.paths = [path1, path2]
for path in self.paths:
self.stat_entries.append(
rdf_client.StatEntry(pathspec=rdf_paths.PathSpec(
path="foo/bar/" + str(path).split("/")[-1],
pathtype=rdf_paths.PathSpec.PathType.OS)))
self.fd = None
def _GenerateArchive(
self,
collection,
archive_format=api_call_handler_utils.CollectionArchiveGenerator.ZIP,
predicate=None):
self.fd_path = os.path.join(self.temp_dir, "archive")
archive_generator = api_call_handler_utils.CollectionArchiveGenerator(
archive_format=archive_format,
predicate=predicate,
prefix="test_prefix",
description="Test description",
client_id=self.client_id)
with open(self.fd_path, "wb") as out_fd:
for chunk in archive_generator.Generate(collection, token=self.token):
out_fd.write(chunk)
self.fd = open(self.fd_path, "rb")
return self.fd, self.fd_path
def tearDown(self):
if self.fd:
self.fd.close()
super(CollectionArchiveGeneratorTest, self).tearDown()
def testSkipsFilesWithoutHashWhenZipArchiving(self):
for path in self.paths:
with aff4.FACTORY.Open(path, mode="rw", token=self.token) as fd:
fd.DeleteAttribute(fd.Schema.HASH)
_, fd_path = self._GenerateArchive(
self.stat_entries,
archive_format=api_call_handler_utils.CollectionArchiveGenerator.ZIP)
with zipfile.ZipFile(fd_path) as zip_fd:
names = zip_fd.namelist()
# Check that nothing was written except for the MANIFEST file.
self.assertEqual(len(names), 1)
self.assertEqual(names[0], "test_prefix/MANIFEST")
def testSkipsFilesWithoutHashWhenTarArchiving(self):
for path in self.paths:
with aff4.FACTORY.Open(path, mode="rw", token=self.token) as fd:
fd.DeleteAttribute(fd.Schema.HASH)
_, fd_path = self._GenerateArchive(
self.stat_entries,
archive_format=api_call_handler_utils.CollectionArchiveGenerator.TAR_GZ)
with tarfile.open(fd_path) as tar_fd:
infos = list(tar_fd)
# Check that nothing was written except for the MANIFEST file.
self.assertEqual(len(infos), 1)
self.assertEqual(infos[0].name, "test_prefix/MANIFEST")
def testCreatesZipContainingDeduplicatedCollectionFilesAndManifest(self):
_, fd_path = self._GenerateArchive(
self.stat_entries,
archive_format=api_call_handler_utils.CollectionArchiveGenerator.ZIP)
zip_fd = zipfile.ZipFile(fd_path)
names = sorted(zip_fd.namelist())
link1_name = "test_prefix/C.0000000000000000/fs/os/foo/bar/hello1.txt"
link2_name = ("test_prefix/C.0000000000000000/fs/os/foo/bar/"
"中国新闻网新闻中.txt")
link1_dest = ("test_prefix/hashes/91e9240f415223982edc345532630710"
"e94a7f52cd5f48f5ee1afc555078f0ab")
link2_dest = ("test_prefix/hashes/87298cc2f31fba73181ea2a9e6ef10dc"
"e21ed95e98bdac9c4e1504ea16f486e4")
manifest_name = "test_prefix/MANIFEST"
self.assertEqual(
names,
sorted([link1_name, link2_name, link1_dest, link2_dest, manifest_name]))
link_info = zip_fd.getinfo(link1_name)
self.assertEqual(link_info.external_attr, (0644 | 0120000) << 16)
self.assertEqual(link_info.create_system, 3)
link_contents = zip_fd.read(link1_name)
self.assertEqual(link_contents, "../../../../../../" + link1_dest)
dest_contents = zip_fd.read(link1_dest)
self.assertEqual(dest_contents, "hello1")
link_info = zip_fd.getinfo(link2_name)
self.assertEqual(link_info.external_attr, (0644 | 0120000) << 16)
self.assertEqual(link_info.create_system, 3)
link_contents = zip_fd.read(link2_name)
self.assertEqual(link_contents, "../../../../../../" + link2_dest)
dest_contents = zip_fd.read(link2_dest)
self.assertEqual(dest_contents, "hello2")
manifest = yaml.safe_load(zip_fd.read(manifest_name))
self.assertEqual(manifest, {
"description": "Test description",
"processed_files": 2,
"archived_files": 2,
"ignored_files": 0,
"failed_files": 0
})
def testCreatesTarContainingDeduplicatedCollectionFilesAndReadme(self):
_, fd_path = self._GenerateArchive(
self.stat_entries,
archive_format=api_call_handler_utils.CollectionArchiveGenerator.TAR_GZ)
with tarfile.open(fd_path) as tar_fd:
link1_name = "test_prefix/C.0000000000000000/fs/os/foo/bar/hello1.txt"
link2_name = ("test_prefix/C.0000000000000000/fs/os/foo/bar/"
"中国新闻网新闻中.txt")
link1_dest = ("test_prefix/hashes/91e9240f415223982edc345532630710"
"e94a7f52cd5f48f5ee1afc555078f0ab")
link2_dest = ("test_prefix/hashes/87298cc2f31fba73181ea2a9e6ef10dc"
"e21ed95e98bdac9c4e1504ea16f486e4")
link_info = tar_fd.getmember(link1_name)
self.assertEqual(link_info.linkname, "../../../../../../" + link1_dest)
self.assertEqual(tar_fd.extractfile(link1_dest).read(), "hello1")
link_info = tar_fd.getmember(link2_name)
self.assertEqual(link_info.linkname, "../../../../../../" + link2_dest)
self.assertEqual(tar_fd.extractfile(link2_dest).read(), "hello2")
manifest_fd = tar_fd.extractfile("test_prefix/MANIFEST")
self.assertEqual(
yaml.safe_load(manifest_fd.read()), {
"description": "Test description",
"processed_files": 2,
"archived_files": 2,
"ignored_files": 0,
"failed_files": 0
})
def testCorrectlyAccountsForFailedFiles(self):
path2 = u"aff4:/C.0000000000000000/fs/os/foo/bar/中国新闻网新闻中.txt"
with aff4.FACTORY.Create(path2, aff4.AFF4Image, token=self.token) as fd:
fd.Write("hello2")
# Delete a single chunk
aff4.FACTORY.Delete(
"aff4:/C.0000000000000000/fs/os/foo/bar/中国新闻网新闻中.txt"
"/0000000000",
token=self.token)
_, fd_path = self._GenerateArchive(
self.stat_entries,
archive_format=api_call_handler_utils.CollectionArchiveGenerator.ZIP)
zip_fd = zipfile.ZipFile(fd_path)
names = sorted(zip_fd.namelist())
link1_name = "test_prefix/C.0000000000000000/fs/os/foo/bar/hello1.txt"
link2_name = ("test_prefix/C.0000000000000000/fs/os/foo/bar/"
"中国新闻网新闻中.txt")
link1_dest = ("test_prefix/hashes/91e9240f415223982edc345532630710"
"e94a7f52cd5f48f5ee1afc555078f0ab")
manifest_name = "test_prefix/MANIFEST"
# Link 2 should be present, but the contents should be missing.
self.assertEqual(
names, sorted([link1_name, link1_dest, link2_name, manifest_name]))
link_info = zip_fd.getinfo(link1_name)
self.assertEqual(link_info.external_attr, (0644 | 0120000) << 16)
self.assertEqual(link_info.create_system, 3)
link_contents = zip_fd.read(link1_name)
self.assertEqual(link_contents, "../../../../../../" + link1_dest)
dest_contents = zip_fd.read(link1_dest)
self.assertEqual(dest_contents, "hello1")
manifest = yaml.safe_load(zip_fd.read(manifest_name))
self.assertEqual(manifest, {
"description":
"Test description",
"processed_files":
2,
"archived_files":
1,
"ignored_files":
0,
"failed_files":
1,
"failed_files_list":
[u"aff4:/C.0000000000000000/fs/os/foo/bar/中国新闻网新闻中.txt"]
})
def testIgnoresFilesNotMatchingPredicate(self):
_, fd_path = self._GenerateArchive(
self.stat_entries,
predicate=lambda fd: fd.urn.Basename().startswith("hello"),
archive_format=api_call_handler_utils.CollectionArchiveGenerator.ZIP)
zip_fd = zipfile.ZipFile(fd_path)
names = sorted(zip_fd.namelist())
# The archive is expected to contain 1 file contents blob, 1 link to this
# blob, and a manifest.
self.assertEqual(len(names), 3)
manifest = yaml.safe_load(zip_fd.read("test_prefix/MANIFEST"))
self.assertEqual(manifest, {
"description":
"Test description",
"processed_files":
2,
"archived_files":
1,
"ignored_files":
1,
"failed_files":
0,
"ignored_files_list":
[u"aff4:/C.0000000000000000/fs/os/foo/bar/中国新闻网新闻中.txt"]
})
class FilterCollectionTest(test_lib.GRRBaseTest):
"""Test for FilterCollection."""
def setUp(self):
super(FilterCollectionTest, self).setUp()
with aff4.FACTORY.Create(
"aff4:/tmp/foo/bar", collects.RDFValueCollection,
token=self.token) as fd:
for i in range(10):
fd.Add(rdf_paths.PathSpec(path="/var/os/tmp-%d" % i, pathtype="OS"))
self.fd = aff4.FACTORY.Open("aff4:/tmp/foo/bar", token=self.token)
def testFiltersByOffsetAndCount(self):
data = api_call_handler_utils.FilterCollection(self.fd, 2, 5, None)
self.assertEqual(len(data), 5)
self.assertEqual(data[0].path, "/var/os/tmp-2")
self.assertEqual(data[-1].path, "/var/os/tmp-6")
def testIngoresTooBigCount(self):
data = api_call_handler_utils.FilterCollection(self.fd, 0, 50, None)
self.assertEqual(len(data), 10)
self.assertEqual(data[0].path, "/var/os/tmp-0")
self.assertEqual(data[-1].path, "/var/os/tmp-9")
def testRaisesOnNegativeOffset(self):
with self.assertRaises(ValueError):
api_call_handler_utils.FilterCollection(self.fd, -10, 0, None)
def testRaisesOnNegativeCount(self):
with self.assertRaises(ValueError):
api_call_handler_utils.FilterCollection(self.fd, 0, -10, None)
def testFiltersByFilterString(self):
data = api_call_handler_utils.FilterCollection(self.fd, 0, 0, "tmp-8")
self.assertEqual(len(data), 1)
self.assertEqual(data[0].path, "/var/os/tmp-8")
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| 34.522936
| 80
| 0.679422
|
3444df11adbec00693842a0bafb1c5259d9769a3
| 12,459
|
py
|
Python
|
tofu/_plot.py
|
Louwrensth/tofu
|
df2841d24eaf223ae07d862ffaa33fdb2fc079d3
|
[
"MIT"
] | 1
|
2020-12-18T16:25:18.000Z
|
2020-12-18T16:25:18.000Z
|
tofu/_plot.py
|
Louwrensth/tofu
|
df2841d24eaf223ae07d862ffaa33fdb2fc079d3
|
[
"MIT"
] | 1
|
2020-12-18T16:35:08.000Z
|
2020-12-18T16:35:08.000Z
|
tofu/_plot.py
|
lasofivec/tofu
|
dbb9f433290e3058dfd04d67fbca157761b0a105
|
[
"MIT"
] | null | null | null |
""" Module providing a basic routine for plotting a shot overview """
# Common
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
# tofu
try:
from tofu.version import __version__
import tofu.utils as utils
except Exception:
from tofu.version import __version__
from .. import utils as utils
__all__ = ['plot_shotoverview']
_fs = (12,6)
__github = 'https://github.com/ToFuProject/tofu/issues'
_wintit = 'tofu-%s report issues / requests at %s'%(__version__, __github)
_dmargin = dict(left=0.04, right=0.99,
bottom=0.07, top=0.93,
wspace=0.25, hspace=0.12)
_fontsize = 8
_labelpad = 0
_dcol = {'Ip':'k', 'B':'b', 'Bt':'b',
'PLH1':(1.,0.,0.),'PLH2':(1.,0.5,0.),
'PIC1':'',
'Prad':(1.,0.,1.),
'q1rhot':(0.8,0.8,0.8),
'Ax':(0.,1.,0.)}
_lct = [plt.cm.tab20.colors[ii] for ii in [0,2,4,1,3,5]]
_ntMax = 3
def plot_shotoverview(db, ntMax=_ntMax, indt=0, config=None, inct=[1,5],
dcol=None, lct=_lct, fmt_t='06.3f',
fs=None, dmargin=None, tit=None, wintit=None,
fontsize=_fontsize, labelpad=_labelpad,
sharet=True, sharey=True, shareRZ=True,
connect=True, draw=True):
kh = _plot_shotoverview(db, ntMax=ntMax, indt=0, config=config, inct=inct,
dcol=dcol, lct=lct, fmt_t=fmt_t,
fs=fs, dmargin=dmargin, tit=tit, wintit=wintit,
fontsize=fontsize, labelpad=labelpad,
sharet=sharet, sharey=sharey, shareRZ=shareRZ,
connect=connect, draw=draw)
return kh
######################################################
# plot new
######################################################
def _plot_shotoverview_init(ns=1, sharet=True, sharey=True, shareRZ=True,
fontsize=_fontsize, fs=None,
wintit=None, dmargin=None):
# Fromat inputs
if fs is None:
fs = _fs
elif type(fs) is str and fs.lower()=='a4':
fs = (11.7,8.3)
if wintit is None:
wintit = _wintit
if dmargin is None:
dmargin = _dmargin
# Make figure and axes
fig = plt.figure(figsize=fs)
if wintit is not None:
fig.canvas.set_window_title(wintit)
axarr = GridSpec(ns, 3, **dmargin)
laxt = [None for ii in range(0,ns)]
laxc = [None for ii in range(0,ns)]
for ii in range(0,ns):
if ii == 0:
laxt[ii] = fig.add_subplot(axarr[ii,:2])
laxc[ii] = fig.add_subplot(axarr[ii,2])
sht = laxt[0] if sharet else None
shy = laxt[0] if sharey else None
shRZ = laxc[0] if shareRZ else None
else:
laxt[ii] = fig.add_subplot(axarr[ii,:2], sharex=sht, sharey=shy)
laxc[ii] = fig.add_subplot(axarr[ii,2], sharex=shRZ, sharey=shRZ)
if not shareRZ:
ax2.set_aspect('equal', adjustable='datalim')
laxc[-1].set_xlabel(r'$R$ ($m$)')
laxt[-1].set_xlabel(r'$t$ ($s$)', fontsize=fontsize)
# datalim or box must be chosen for shared axis depending on matplotlib
# version => let matplotlib decide until support for matplotlib 2.X.X is
# stopped
laxc[0].set_aspect('equal')#, adjustable='box')
xtxt = laxc[0].get_position().bounds[0]
dx = laxc[0].get_position().bounds[2]
Ytxt, DY = np.sum(laxc[0].get_position().bounds[1::2]), 0.1
axtxtt = fig.add_axes([xtxt, Ytxt, dx, DY], fc='None')
# xtxt, Ytxt, dx, DY = 0.01, 0.98, 0.15, 0.02
# axtxtg = fig.add_axes([xtxt, Ytxt, dx, DY], fc='None')
# Dict
dax = {'t':laxt,
'cross':laxc,
'txtt':[axtxtt]}
#'txtg':[axtxtg] # not useful, one group only
# Formatting
for kk in dax.keys():
for ii in range(0,len(dax[kk])):
dax[kk][ii].tick_params(labelsize=fontsize)
if 'txt' in kk:
dax[kk][ii].patch.set_alpha(0.)
for ss in ['left','right','bottom','top']:
dax[kk][ii].spines[ss].set_visible(False)
dax[kk][ii].set_xticks([]), dax[kk][ii].set_yticks([])
dax[kk][ii].set_xlim(0,1), dax[kk][ii].set_ylim(0,1)
return dax
def _plot_shotoverview(db, ntMax=_ntMax, indt=0, config=None, inct=[1,5],
dcol=None, lct=_lct, fmt_t='06.3f',
fs=None, dmargin=None, tit=None, wintit=None,
fontsize=_fontsize, labelpad=_labelpad,
sharet=True, sharey=True, shareRZ=True,
connect=True, draw=True):
#########
# Prepare
#########
fldict = dict(fontsize=fontsize, labelpad=labelpad)
# Preformat
if dcol is None:
dcol = _dcol
ls = sorted(list(db.keys()))
ns = len(ls)
lcol = ['k','b','r','m']
# Find common time limits
tlim = np.vstack([np.vstack([(np.nanmin(vv['t']), np.nanmax(vv['t']))
if 't' in vv.keys() else (-np.inf,np.inf)
for vv in db[ss].values()])
for ss in ls])
tlim = (np.min(tlim),np.max(tlim))
# Find common (R,Z) lims if config=None
lEq = ['Ax','X','Sep','q1']
if config is None:
Anycross = False
Rmin, Rmax = np.full((ns,),np.inf), np.full((ns,),-np.inf)
Zmin, Zmax = np.full((ns,),np.inf), np.full((ns,),-np.inf)
for ii in range(0,ns):
for kk in set(db[ls[ii]].keys()).intersection(lEq):
if db[ls[ii]][kk]['data2D'].ndim == 2:
Rmin[ii] = min(Rmin[ii],np.nanmin(db[ls[ii]][kk]['data2D'][:,0]))
Rmax[ii] = max(Rmax[ii],np.nanmax(db[ls[ii]][kk]['data2D'][:,0]))
Zmin[ii] = min(Zmin[ii],np.nanmin(db[ls[ii]][kk]['data2D'][:,1]))
Zmax[ii] = max(Zmax[ii],np.nanmax(db[ls[ii]][kk]['data2D'][:,1]))
else:
Rmin[ii] = min(Rmin[ii],np.nanmin(db[ls[ii]][kk]['data2D'][:,0,:]))
Rmax[ii] = max(Rmax[ii],np.nanmax(db[ls[ii]][kk]['data2D'][:,0,:]))
Zmin[ii] = min(Zmin[ii],np.nanmin(db[ls[ii]][kk]['data2D'][:,1,:]))
Zmax[ii] = max(Zmax[ii],np.nanmax(db[ls[ii]][kk]['data2D'][:,1,:]))
Anycross = True
Rlim = (np.nanmin(Rmin),np.nanmax(Rmax))
Zlim = (np.nanmin(Zmin),np.nanmax(Zmax))
if Anycross is False:
Rlim = (1,3)
Zlim = (-1,-1)
# time vectors and refs
lt = [None for ss in ls]
lidt = [0 for ss in ls]
for ii in range(0,ns):
for kk in set(db[ls[ii]].keys()).intersection(lEq):
lt[ii] = db[ls[ii]][kk]['t']
lidt[ii] = id(db[ls[ii]][kk]['t'])
break
else:
for kk in set(db[ls[ii]].keys()).difference(lEq):
lt[ii] = db[ls[ii]][kk]['t']
lidt[ii] = id(db[ls[ii]][kk]['t'])
break
else:
msg = "No reference time vector found for shot %s"%str(ls[ii])
warnings.warn(msg)
# dlextra id
for ii in range(0,ns):
for kk in set(db[ls[ii]].keys()).intersection(lEq):
db[ls[ii]][kk]['id'] = id(db[ls[ii]][kk]['data2D'])
##############
# Plot static
##############
dax = _plot_shotoverview_init(ns=ns, sharet=sharet, sharey=sharey,
shareRZ=shareRZ, fontsize=fontsize,
fs=fs, wintit=wintit, dmargin=dmargin)
fig = dax['t'][0].figure
if tit is None:
tit = r"overview of shots " + ', '.join(map('{0:05.0f}'.format,ls))
fig.suptitle(tit)
# Plot config and time traces
for ii in range(0,ns):
dd = db[ls[ii]]
# config
if config is not None:
dax['cross'][ii] = config.plot(proj='cross', lax=dax['cross'][ii],
element='P', dLeg=None, draw=False)
# time traces
for kk in set(dd.keys()).difference(lEq):
if 'c' in dd[kk].keys():
c = dd[kk]['c']
else:
c = dcol[kk]
lab = dd[kk]['label'] + ' (%s)'%dd[kk]['units']
dax['t'][ii].plot(dd[kk]['t'], dd[kk]['data'],
ls='-', lw=1., c=c, label=lab)
kk = 'Ax'
if kk in dd.keys():
if 'c' in dd[kk].keys():
c = dd[kk]['c']
else:
c = dcol[kk]
x = db[ls[ii]][kk]['data2D'][:,0]
y = db[ls[ii]][kk]['data2D'][:,1]
dax['t'][ii].plot(lt[ii], x,
lw=1., ls='-', label=r'$R_{Ax}$ (m)')
dax['t'][ii].plot(lt[ii], y,
lw=1., ls='-', label=r'$Z_{Ax}$ (m)')
dax['t'][0].axhline(0., ls='--', lw=1., c='k')
dax['t'][0].legend(bbox_to_anchor=(0.,1.01,1.,0.1), loc=3,
ncol=5, mode='expand', borderaxespad=0.,
prop={'size':fontsize})
dax['t'][0].set_xlim(tlim)
if config is None:
try: # DB
dax['cross'][0].set_xlim(Rlim)
dax['cross'][0].set_ylim(Zlim)
except Exception as err: # DB
print(Rlim, Zlim)
print(Rmin, Rmax)
print(Zmin, Zmax)
raise err
for ii in range(0,ns):
dax['t'][ii].set_ylabel('{0:05.0f} data'.format(ls[ii]), fontsize=fontsize)
dax['cross'][-1].set_ylabel(r'$Z$ ($m$)', fontsize=fontsize)
##################
# Interactivity dict
##################
dgroup = {'time': {'nMax':ntMax, 'key':'f1',
'defid':lidt[0], 'defax':dax['t'][0]}}
# Group info (make dynamic in later versions ?)
# msg = ' '.join(['%s: %s'%(v['key'],k) for k, v in dgroup.items()])
# l0 = dax['txtg'][0].text(0., 0., msg,
# color='k', fontweight='bold',
# fontsize=6., ha='left', va='center')
# dref
dref = dict([(lidt[ii], {'group':'time', 'val':lt[ii], 'inc':inct})
for ii in range(0,ns)])
# ddata
ddat = {}
for ii in range(0,ns):
for kk in set(db[ls[ii]].keys()).intersection(lEq):
ddat[db[ls[ii]][kk]['id']] = {'val':db[ls[ii]][kk]['data2D'],
'refids':[lidt[ii]]}
# dax
lax_fix = dax['cross'] + dax['txtt'] # + dax['txtg']
dax2 = dict([(dax['t'][ii], {'ref':{lidt[ii]:'x'}}) for ii in range(0,ns)])
dobj = {}
##################
# Populating dobj
# One-axes time txt
for jj in range(0,ntMax):
l0 = dax['txtt'][0].text((0.5+jj)/ntMax, 0., r'',
color='k', fontweight='bold',
fontsize=fontsize,
ha='left', va='bottom')
dobj[l0] = {'dupdate':{'txt':{'id':lidt[0], 'lrid':[lidt[0]],
'bstr':'{0:%s}'%fmt_t}},
'drefid':{lidt[0]:jj}}
# Time-dependent
nan2 = np.array([np.nan])
for ii in range(0,ns):
# time vlines
for jj in range(0,ntMax):
l0 = dax['t'][ii].axvline(np.nan,
c=lct[jj], ls='-', lw=1.)
dobj[l0] = {'dupdate':{'xdata':{'id':lidt[ii], 'lrid':[lidt[ii]]}},
'drefid':{lidt[ii]:jj}}
# Eq
for kk in set(db[ls[ii]].keys()).intersection(lEq):
id_ = db[ls[ii]][kk]['id']
for jj in range(0,ntMax):
l0, = dax['cross'][ii].plot(nan2, nan2,
ls='-', c=lct[jj], lw=1.)
dobj[l0] = {'dupdate':{'data':{'id':id_, 'lrid':[lidt[ii]]}},
'drefid':{lidt[ii]:jj}}
##################
# Instanciate KeyHandler
can = fig.canvas
can.draw()
kh = utils.KeyHandler_mpl(can=can,
dgroup=dgroup, dref=dref, ddata=ddat,
dobj=dobj, dax=dax2, lax_fix=lax_fix,
groupinit='time', follow=True)
if connect:
kh.disconnect_old()
kh.connect()
if draw:
fig.canvas.draw()
return kh
| 35.495726
| 87
| 0.468497
|
901ccdc53d7f3ba0b324d8b04476ecd40287ef62
| 2,604
|
py
|
Python
|
rdr_service/alembic/versions/eff24ee0e4da_add_fphh_to_ps.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 39
|
2017-10-13T19:16:27.000Z
|
2021-09-24T16:58:21.000Z
|
rdr_service/alembic/versions/eff24ee0e4da_add_fphh_to_ps.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 312
|
2017-09-08T15:42:13.000Z
|
2022-03-23T18:21:40.000Z
|
rdr_service/alembic/versions/eff24ee0e4da_add_fphh_to_ps.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 19
|
2017-09-15T13:58:00.000Z
|
2022-02-07T18:33:20.000Z
|
"""add_FPHH_to_ps
Revision ID: eff24ee0e4da
Revises: 4fa9a3846491, 88ea1bb98358
Create Date: 2021-10-20 12:55:54.689433
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = 'eff24ee0e4da'
down_revision = ('4fa9a3846491', '88ea1bb98358')
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('participant_summary', sa.Column('questionnaire_on_personal_and_family_health_history', rdr_service.model.utils.Enum(QuestionnaireStatus), nullable=True))
op.add_column('participant_summary', sa.Column('questionnaire_on_personal_and_family_health_history_authored', rdr_service.model.utils.UTCDateTime(), nullable=True))
op.add_column('participant_summary', sa.Column('questionnaire_on_personal_and_family_health_history_time', rdr_service.model.utils.UTCDateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('participant_summary', 'questionnaire_on_personal_and_family_health_history_time')
op.drop_column('participant_summary', 'questionnaire_on_personal_and_family_health_history_authored')
op.drop_column('participant_summary', 'questionnaire_on_personal_and_family_health_history')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| 40.061538
| 172
| 0.796851
|
ebde26d442251ecf0523cd3625d1e9cc88cdab2f
| 10,500
|
py
|
Python
|
ansible/roles/test/files/ptftests/py3/ip_in_ip_tunnel_test.py
|
congh-nvidia/sonic-mgmt
|
05094321ed58270ac06d1a0ef575a4ab9ea3ddd6
|
[
"Apache-2.0"
] | null | null | null |
ansible/roles/test/files/ptftests/py3/ip_in_ip_tunnel_test.py
|
congh-nvidia/sonic-mgmt
|
05094321ed58270ac06d1a0ef575a4ab9ea3ddd6
|
[
"Apache-2.0"
] | null | null | null |
ansible/roles/test/files/ptftests/py3/ip_in_ip_tunnel_test.py
|
congh-nvidia/sonic-mgmt
|
05094321ed58270ac06d1a0ef575a4ab9ea3ddd6
|
[
"Apache-2.0"
] | null | null | null |
'''
Description: This file contains the IPinIP test for dualtor testbed
Usage: Examples of how to start this script
/usr/bin/ptf --test-dir ptftests ip_in_ip_tunnel_test.IpinIPTunnelTest --platform-dir ptftests --qlen=2000 --platform remote -t hash_key_list=['src-port', 'dst-port', 'src-mac', 'dst-mac', 'src-ip'];server_ip='192.168.0.2';active_tor_ip='10.1.0.33';standby_tor_mac='d4:af:f7:4d:af:18';standby_tor_ip='10.1.0.32';ptf_portchannel_indices={u'PortChannel0001': [29], u'PortChannel0003': [33], u'PortChannel0002': [31], u'PortChannel0004': [35]} --relax --debug info --log-file /tmp/ip_in_ip_tunnel_test.2021-02-10-07:14:46.log --socket-recv-size 16384
'''
#---------------------------------------------------------------------
# Global imports
#---------------------------------------------------------------------
import logging
import random
from ipaddress import ip_address, IPv4Address
import ptf
from scapy.all import IP, IPv6, Ether
import ptf.packet as scapy
from ptf.base_tests import BaseTest
from ptf.mask import Mask
from ptf.testutils import *
# packet count for verifying traffic is forwarded via IPinIP tunnel
PACKET_NUM = 10000
# packet count for verifying traffic is not forwarded from standby tor to server directly
PACKET_NUM_FOR_NEGATIVE_CHECK = 100
DIFF = 0.25 # The valid range for balance check
SRC_IP_RANGE = ['8.0.0.0', '8.255.255.255']
SRC_IPV6_RANGE = ['20D0:A800:0:00::', '20D0:FFFF:0:00::FFFF']
TIMEOUT = 1
class IpinIPTunnelTest(BaseTest):
'''
@summary: Overview of functionality
This script send traffic to standby ToR, and capture traffic
on all portchannel interfaces to check balance.
'''
def __init__(self):
'''
@summary: constructor
'''
BaseTest.__init__(self)
self.test_params = test_params_get()
self.logger = logging.getLogger("IPinIPTunnel")
def setUp(self):
self.server_ip = self.test_params['server_ip']
self.server_port = int(self.test_params['server_port'])
self.vlan_mac = self.test_params['vlan_mac']
self.standby_tor_mac = self.test_params['standby_tor_mac']
self.active_tor_ip = self.test_params['active_tor_ip']
self.standby_tor_ip = self.test_params['standby_tor_ip']
self.ptf_portchannel_indices = self.test_params['ptf_portchannel_indices']
self.indice_to_portchannel = {}
for port_channel, indices in self.ptf_portchannel_indices.items():
for indice in indices:
self.indice_to_portchannel[indice] = port_channel
self.hash_key_list = self.test_params['hash_key_list']
self.dataplane = ptf.dataplane_instance
self.is_ipv4 = isinstance(ip_address(self.server_ip), IPv4Address)
def runTest(self):
"""
Entrypoint of test script.
"""
self.send_and_verify_packets()
def random_ip(self, begin, end):
"""
Generate a random IP from given ip range
"""
length = int(ip_address(end)) - int(ip_address(begin))
return str(ip_address(begin) + random.randint(0, length))
def generate_packet_to_server(self, hash_key):
"""
Generate a packet to server. The value of field in packet is filled with random value according to hash_key
"""
base_src_mac = self.dataplane.get_mac(0, 0)
ip_dst = self.server_ip
sport = random.randint(1, 65535) if hash_key == 'src-port' else 1234
dport = random.randint(1, 65535) if hash_key == 'dst-port' else 80
src_mac = (base_src_mac[:-5] + "%02x" % random.randint(0, 255) + ":" + "%02x" % random.randint(0, 255)) if hash_key == 'src-mac' else base_src_mac
dst_mac = self.standby_tor_mac
vlan_id = random.randint(1, 4094) if hash_key == 'vlan-id' else 0
if self.is_ipv4:
ip_src = self.random_ip(SRC_IP_RANGE[0], SRC_IP_RANGE[1]) if hash_key == 'src-ip' else SRC_IP_RANGE[0]
pkt = simple_tcp_packet(
pktlen=128 if vlan_id == 0 else 132,
eth_dst=dst_mac,
eth_src=src_mac,
dl_vlan_enable=False if vlan_id == 0 else True,
vlan_vid=vlan_id,
vlan_pcp=0,
ip_src=ip_src,
ip_dst=ip_dst,
tcp_sport=sport,
tcp_dport=dport,
ip_ttl=64
)
return pkt
else:
ip_src = self.random_ip(*SRC_IPV6_RANGE) if hash_key == 'src-ip' else SRC_IPV6_RANGE[0]
pkt = simple_tcpv6_packet(
pktlen=128 if vlan_id == 0 else 132,
eth_dst=dst_mac,
eth_src=src_mac,
dl_vlan_enable=False if vlan_id == 0 else True,
vlan_vid=vlan_id,
vlan_pcp=0,
ipv6_src=ip_src,
ipv6_dst=ip_dst,
tcp_sport=sport,
tcp_dport=dport,
ipv6_hlim=64
)
return pkt
def generate_expected_packet(self, inner_packet):
"""
Generate ip_in_ip packet for verifying.
"""
if self.is_ipv4:
inner_packet.ttl = inner_packet.ttl - 1
exp_tunnel_pkt = simple_ipv4ip_packet(
eth_dst="aa:aa:aa:aa:aa:aa",
eth_src=self.standby_tor_mac,
ip_src=self.standby_tor_ip,
ip_dst=self.active_tor_ip,
inner_frame=inner_packet[scapy.IP]
)
inner_packet.ttl = 64
else:
inner_packet.hlim = inner_packet.hlim - 1
exp_tunnel_pkt = simple_ipv4ip_packet(
eth_dst="aa:aa:aa:aa:aa:aa",
eth_src=self.standby_tor_mac,
ip_src=self.standby_tor_ip,
ip_dst=self.active_tor_ip,
inner_frame=inner_packet[scapy.IPv6]
)
inner_packet.hlim = 64
exp_tunnel_pkt[scapy.TCP] = inner_packet[scapy.TCP]
exp_tunnel_pkt = Mask(exp_tunnel_pkt)
exp_tunnel_pkt.set_do_not_care_scapy(scapy.Ether, "dst")
exp_tunnel_pkt.set_do_not_care_scapy(scapy.Ether, "src")
exp_tunnel_pkt.set_do_not_care_scapy(scapy.IP, "id") # since src and dst changed, ID would change too
exp_tunnel_pkt.set_do_not_care_scapy(scapy.IP, "ttl") # ttl in outer packet is set to 255
exp_tunnel_pkt.set_do_not_care_scapy(scapy.IP, "chksum") # checksum would differ as the IP header is not the same
return exp_tunnel_pkt
def generate_unexpected_packet(self, inner_pkt):
"""
Generate a packet that shouldn't be observed.
All packet should be forward via tunnel, so no packet should be observed on server port
"""
pkt = inner_pkt.copy()
pkt[Ether].src = self.vlan_mac
# TTL of packets from active tor to server is decreased by 1
if self.is_ipv4:
pkt[IP].ttl -= 1
else:
pkt[IPv6].hlim -= 1
unexpected_packet = Mask(pkt)
# Ignore dst mac
unexpected_packet.set_do_not_care_scapy(scapy.Ether, 'dst')
if self.is_ipv4:
# Ignore check sum
unexpected_packet.set_do_not_care_scapy(scapy.IP, "chksum")
#Ignore extra bytes
unexpected_packet.set_ignore_extra_bytes()
return unexpected_packet
def check_balance(self, pkt_distribution, hash_key):
portchannel_num = len(self.ptf_portchannel_indices)
expect_packet_num = PACKET_NUM / portchannel_num
pkt_num_lo = expect_packet_num * (1.0 - DIFF)
pkt_num_hi = expect_packet_num * (1.0 + DIFF)
self.logger.info("hash key = {}".format(hash_key))
self.logger.info("%-10s \t %10s \t %10s \t" % ("port(s)", "exp_cnt", "act_cnt"))
balance = True
for portchannel, count in pkt_distribution.items():
self.logger.info("%-10s \t %10s \t %10s \t" % (portchannel, str(expect_packet_num), str(count)))
if count < pkt_num_lo or count > pkt_num_hi:
balance = False
if not balance:
print("Check balance failed for {}".format(hash_key))
assert(balance)
def send_and_verify_packets(self):
"""
Send packet from ptf (T1) to standby ToR, and verify
"""
dst_ports = self.indice_to_portchannel.keys()
# Select the first ptf indice as src port
src_port = dst_ports[0]
# Step 1. verify no packet is received from standby_tor to server
for i in range(0, PACKET_NUM_FOR_NEGATIVE_CHECK):
inner_pkt = self.generate_packet_to_server('src-ip')
unexpected_packet = self.generate_unexpected_packet(inner_pkt)
self.dataplane.flush()
send_packet(self, src_port, inner_pkt)
verify_no_packet(test=self,
port_id=self.server_port,
pkt=unexpected_packet,
timeout=TIMEOUT)
# Step 2. verify packet is received from IPinIP tunnel and check balance
for hash_key in self.hash_key_list:
self.logger.info("Verifying traffic balance for hash key {}".format(hash_key))
pkt_distribution = {}
for i in range(0, PACKET_NUM):
inner_pkt = self.generate_packet_to_server(hash_key)
tunnel_pkt = self.generate_expected_packet(inner_pkt)
l3packet = inner_pkt.getlayer(IP) or inner_pkt.getlayer(IPv6)
self.logger.info("Sending packet dst_mac = {} src_mac = {} dst_ip = {} src_ip = {} from port {}" \
.format(inner_pkt[Ether].dst, inner_pkt[Ether].src, l3packet.dst, l3packet.src, src_port))
self.dataplane.flush()
send_packet(self, src_port, inner_pkt)
# Verify packet is received from IPinIP tunnel
idx, count = verify_packet_any_port(test=self,
pkt=tunnel_pkt,
ports=dst_ports,
device_number=0,
timeout=TIMEOUT)
pkt_distribution[self.indice_to_portchannel[dst_ports[idx]]] = pkt_distribution.get(self.indice_to_portchannel[dst_ports[idx]], 0) + 1
self.check_balance(pkt_distribution, hash_key)
| 44.871795
| 563
| 0.603333
|
3175e16cfde23cf8d114dfc9b298b9e0eda20d89
| 3,361
|
py
|
Python
|
tests/test_models.py
|
EDS-APHP/pylivy
|
0714e4e74e27c1a13b74228700bb3e800e9646fe
|
[
"MIT"
] | null | null | null |
tests/test_models.py
|
EDS-APHP/pylivy
|
0714e4e74e27c1a13b74228700bb3e800e9646fe
|
[
"MIT"
] | null | null | null |
tests/test_models.py
|
EDS-APHP/pylivy
|
0714e4e74e27c1a13b74228700bb3e800e9646fe
|
[
"MIT"
] | 1
|
2020-02-05T09:31:01.000Z
|
2020-02-05T09:31:01.000Z
|
import pytest
from livy.models import (
Version,
Session,
SessionKind,
SessionState,
Statement,
StatementState,
Output,
OutputStatus,
SparkRuntimeError,
)
@pytest.mark.parametrize(
"earlier, later",
[
("0.1.0", "0.2.0"),
("0.1.1", "0.2.0"),
("1.9.0", "2.0.0"),
("0.1.0", "0.1.1-withsuffix"),
("0.1.0-suffix", "0.1.1"),
],
)
def test_version_less_than(earlier, later):
assert Version(earlier) < Version(later)
@pytest.mark.parametrize(
"first, second",
[
("0.1.0", "0.1.0"),
("0.1.0", "0.1.0-withsuffix"),
("0.1.0-suffix", "0.1.0"),
],
)
def test_version_equals(first, second):
assert Version(first) == Version(second)
def test_session_from_json():
session_json = {
"id": 5,
"proxyUser": "user",
"kind": "pyspark",
"state": "idle",
}
expected = Session(5, "user", SessionKind.PYSPARK, SessionState.IDLE)
assert Session.from_json(session_json) == expected
def test_statement_from_json_no_output():
session_id = 5
statement_json = {"id": 10, "state": "running", "output": None}
expected = Statement(
session_id, statement_id=10, state=StatementState.RUNNING, output=None
)
assert Statement.from_json(session_id, statement_json) == expected
def test_statement_from_json_with_output(mocker):
mocker.patch.object(Output, "from_json")
session_id = 5
statement_json = {"id": 10, "state": "running", "output": "dummy output"}
expected = Statement(
session_id,
statement_id=10,
state=StatementState.RUNNING,
output=Output.from_json.return_value,
)
assert Statement.from_json(session_id, statement_json) == expected
Output.from_json.assert_called_once_with("dummy output")
def test_output_textdata_from_json():
output_json = {"status": "ok", "data": {"text/plain": "some output"}}
expected = Output(
OutputStatus.OK,
text="some output",
json=None,
ename=None,
evalue=None,
traceback=None,
)
assert Output.from_json(output_json) == expected
def test_output_jsondata_from_json():
output_json = {
"status": "ok",
"data": {"application/json": {"some": "data"}},
}
expected = Output(
OutputStatus.OK,
text=None,
json={"some": "data"},
ename=None,
evalue=None,
traceback=None,
)
assert Output.from_json(output_json) == expected
def test_output_error_from_json():
output_json = {
"status": "error",
"ename": "SomeException",
"evalue": "some error value",
"traceback": ["traceback line 1", "traceback line 2"],
}
expected = Output(
OutputStatus.ERROR,
text=None,
json=None,
ename="SomeException",
evalue="some error value",
traceback=["traceback line 1", "traceback line 2"],
)
assert Output.from_json(output_json) == expected
def test_output_raise_for_status():
ok_output = Output(OutputStatus.OK, None, None, None, None, None)
ok_output.raise_for_status()
error_output = Output(OutputStatus.ERROR, None, None, None, None, None)
with pytest.raises(SparkRuntimeError):
error_output.raise_for_status()
| 22.557047
| 78
| 0.60726
|
a9f4deea3b9f9fcd25abd402392c2541e0b77a51
| 9,857
|
py
|
Python
|
excelexporters/metertrend.py
|
823914102/myems-api
|
4684aa71266a4f7ec93f7a9ebc08cbccd2e9bca3
|
[
"MIT"
] | null | null | null |
excelexporters/metertrend.py
|
823914102/myems-api
|
4684aa71266a4f7ec93f7a9ebc08cbccd2e9bca3
|
[
"MIT"
] | null | null | null |
excelexporters/metertrend.py
|
823914102/myems-api
|
4684aa71266a4f7ec93f7a9ebc08cbccd2e9bca3
|
[
"MIT"
] | null | null | null |
import base64
import uuid
import os
from openpyxl.chart import (
PieChart,
BarChart,
Reference,
)
from openpyxl.styles import PatternFill, Border, Side, Alignment, Font
from openpyxl.drawing.image import Image
from openpyxl import Workbook
from openpyxl.chart.label import DataLabelList
####################################################################################################################
# PROCEDURES
# Step 1: Validate the report data
# Step 2: Generate excel file
# Step 3: Encode the excel file bytes to Base64
####################################################################################################################
def export(result,
name,
reporting_start_datetime_local,
reporting_end_datetime_local,
period_type):
####################################################################################################################
# Step 1: Validate the report data
####################################################################################################################
if result is None:
return None
####################################################################################################################
# Step 2: Generate excel file from the report data
####################################################################################################################
filename = generate_excel(result,
name,
reporting_start_datetime_local,
reporting_end_datetime_local,
period_type)
####################################################################################################################
# Step 3: Encode the excel file to Base64
####################################################################################################################
try:
with open(filename, 'rb') as binary_file:
binary_file_data = binary_file.read()
except IOError as ex:
pass
# Base64 encode the bytes
base64_encoded_data = base64.b64encode(binary_file_data)
# get the Base64 encoded data using human-readable characters.
base64_message = base64_encoded_data.decode('utf-8')
# delete the file from server
try:
os.remove(filename)
except NotImplementedError as ex:
pass
return base64_message
def generate_excel(report,
name,
reporting_start_datetime_local,
reporting_end_datetime_local,
period_type):
wb = Workbook()
ws = wb.active
# Row height
ws.row_dimensions[1].height = 118
for i in range(2, 6 + 1):
ws.row_dimensions[i].height = 30
ws.row_dimensions[7].height = 60
ws.row_dimensions[3].height = 50
# Col width
ws.column_dimensions['A'].width = 1.5
ws.column_dimensions['B'].width = 20.0
for i in range(ord('C'), ord('C')+16):
ws.column_dimensions[chr(i)].width = 15.0
# Font
name_font = Font(name='Constantia', size=15, bold=True)
title_font = Font(name='宋体', size=15, bold=True)
data_font = Font(name='Franklin Gothic Book', size=11)
table_fill = PatternFill(fill_type='solid', fgColor='1F497D')
f_border = Border(left=Side(border_style='medium', color='00000000'),
right=Side(border_style='medium', color='00000000'),
bottom=Side(border_style='medium', color='00000000'),
top=Side(border_style='medium', color='00000000')
)
b_border = Border(
bottom=Side(border_style='medium', color='00000000'),
)
b_c_alignment = Alignment(vertical='bottom',
horizontal='center',
text_rotation=0,
wrap_text=True,
shrink_to_fit=False,
indent=0)
c_c_alignment = Alignment(vertical='center',
horizontal='center',
text_rotation=0,
wrap_text=True,
shrink_to_fit=False,
indent=0)
b_r_alignment = Alignment(vertical='bottom',
horizontal='right',
text_rotation=0,
wrap_text=True,
shrink_to_fit=False,
indent=0)
c_r_alignment = Alignment(vertical='bottom',
horizontal='center',
text_rotation=0,
wrap_text=True,
shrink_to_fit=False,
indent=0)
# Img
img = Image("excelexporters/myems.png")
ws.add_image(img, 'B1')
# Title
ws['B3'].font = name_font
ws['B3'].alignment = b_r_alignment
ws['B3'] = 'Name:'
ws['C3'].border = b_border
ws['C3'].alignment = b_c_alignment
ws['C3'].font = name_font
ws['C3'] = name
ws['D3'].font = name_font
ws['D3'].alignment = b_r_alignment
ws['D3'] = 'Period:'
ws['E3'].border = b_border
ws['E3'].alignment = b_c_alignment
ws['E3'].font = name_font
ws['E3'] = period_type
ws['F3'].font = name_font
ws['F3'].alignment = b_r_alignment
ws['F3'] = 'Date:'
ws['G3'].border = b_border
ws['G3'].alignment = b_c_alignment
ws['G3'].font = name_font
ws['G3'] = reporting_start_datetime_local + "__" + reporting_end_datetime_local
ws.merge_cells("G3:H3")
if "reporting_period" not in report.keys() or \
"names" not in report['reporting_period'].keys() or len(report['reporting_period']['names']) == 0:
filename = str(uuid.uuid4()) + '.xlsx'
wb.save(filename)
return filename
################################################
# First: 趋势
# 6: title
# 7: table title
# 8~ table_data
################################################
has_data_flag = True
reporting_period_data = report['reporting_period']
if "names" not in reporting_period_data.keys() or \
reporting_period_data['names'] is None or \
len(reporting_period_data['names']) == 0:
has_data_flag = False
if "timestamps" not in reporting_period_data.keys() or \
reporting_period_data['timestamps'] is None or \
len(reporting_period_data['timestamps']) == 0:
has_data_flag = False
if "values" not in reporting_period_data.keys() or \
reporting_period_data['values'] is None or \
len(reporting_period_data['values']) == 0:
has_data_flag = False
ca = reporting_period_data['names']
ca_len = len(ca)
temp_max_row = 0
times = reporting_period_data['timestamps']
if has_data_flag:
ws['B6'].font = title_font
ws['B6'] = name + ' 趋势'
ws['B7'].fill = table_fill
ws['B7'].border = f_border
ws['B7'].alignment = c_c_alignment
ws['B7'] = '时间'
time = times[0]
has_data = False
max_row = 0
if len(time) > 0:
has_data = True
max_row = 8 + len(time)
print("max_row", max_row)
temp_max_row = max_row
if has_data:
for i in range(0, len(time)):
col = 'B'
row = str(8 + i)
# col = chr(ord('B') + i)
ws[col + row].font = title_font
ws[col + row].alignment = c_c_alignment
ws[col + row] = time[i]
ws[col + row].border = f_border
for i in range(0, ca_len):
# 38 title
col = chr(ord('C') + i)
ws[col + '7'].fill = table_fill
ws[col + '7'].font = title_font
ws[col + '7'].alignment = c_c_alignment
ws[col + '7'] = reporting_period_data['names'][i]
ws[col + '7'].border = f_border
# 39 data
time = times[i]
time_len = len(time)
for j in range(0, time_len):
row = str(8 + j)
# col = chr(ord('B') + i)
ws[col + row].font = title_font
ws[col + row].alignment = c_c_alignment
ws[col + row] = round(reporting_period_data['values'][i][j], 0)
ws[col + row].border = f_border
# bar
# 39~: bar
bar = BarChart()
labels = Reference(ws, min_col=2, min_row=8, max_row=max_row + 1)
bar_data = Reference(ws, min_col=3 + i, min_row=7, max_row=max_row + 1) # openpyxl bug
bar.add_data(bar_data, titles_from_data=True)
bar.set_categories(labels)
bar.height = 5.25 # cm 1.05*5 1.05cm = 30 pt
bar.width = 36
# pie.title = "Pies sold by category"
bar.dLbls = DataLabelList()
# bar.dLbls.showCatName = True # label show
bar.dLbls.showVal = True # val show
bar.dLbls.showPercent = True # percent show
# s1 = CharacterProperties(sz=1800) # font size *100
chart_col = chr(ord('B'))
chart_cell = chart_col + str(max_row + 2 + 10*i)
print("chart_cell", chart_cell)
ws.add_chart(bar, chart_cell)
else:
pass
for i in range(8, temp_max_row + 1 + 1):
ws.row_dimensions[i].height = 20
filename = str(uuid.uuid4()) + '.xlsx'
wb.save(filename)
return filename
| 37.337121
| 120
| 0.474891
|
7b0936ae1c4daf8405dfbf470a4810ef72de2247
| 10,710
|
py
|
Python
|
bpython/test/test_manual_readline.py
|
niloct/bpython
|
c94c8f833bbccc4124f2c836d1e248166a4facfe
|
[
"PSF-2.0"
] | null | null | null |
bpython/test/test_manual_readline.py
|
niloct/bpython
|
c94c8f833bbccc4124f2c836d1e248166a4facfe
|
[
"PSF-2.0"
] | null | null | null |
bpython/test/test_manual_readline.py
|
niloct/bpython
|
c94c8f833bbccc4124f2c836d1e248166a4facfe
|
[
"PSF-2.0"
] | null | null | null |
from bpython.curtsiesfrontend.manual_readline import (
left_arrow,
right_arrow,
beginning_of_line,
forward_word,
back_word,
end_of_line,
delete,
last_word_pos,
backspace,
delete_from_cursor_back,
delete_from_cursor_forward,
delete_rest_of_word,
delete_word_to_cursor,
transpose_character_before_cursor,
UnconfiguredEdits,
delete_word_from_cursor_back,
)
from bpython.test import unittest
class TestManualReadline(unittest.TestCase):
def setUp(self):
self._line = "this is my test string"
def tearDown(self):
pass
def test_left_arrow_at_zero(self):
pos = 0
expected = (pos, self._line)
result = left_arrow(pos, self._line)
self.assertEqual(expected, result)
def test_left_arrow_at_non_zero(self):
for i in range(1, len(self._line)):
expected = (i - 1, self._line)
result = left_arrow(i, self._line)
self.assertEqual(expected, result)
def test_right_arrow_at_end(self):
pos = len(self._line)
expected = (pos, self._line)
result = right_arrow(pos, self._line)
self.assertEqual(expected, result)
def test_right_arrow_at_non_end(self):
for i in range(len(self._line) - 1):
expected = (i + 1, self._line)
result = right_arrow(i, self._line)
self.assertEqual(expected, result)
def test_beginning_of_line(self):
expected = (0, self._line)
for i in range(len(self._line)):
result = beginning_of_line(i, self._line)
self.assertEqual(expected, result)
def test_end_of_line(self):
expected = (len(self._line), self._line)
for i in range(len(self._line)):
result = end_of_line(i, self._line)
self.assertEqual(expected, result)
def test_forward_word(self):
line = "going from here to_here"
start_pos = 11
next_word_pos = 15
expected = (next_word_pos, line)
result = forward_word(start_pos, line)
self.assertEqual(expected, result)
start_pos = 15
next_word_pos = 23
expected = (next_word_pos, line)
result = forward_word(start_pos, line)
self.assertEqual(expected, result)
def test_forward_word_tabs(self):
line = "going from here to_here"
start_pos = 11
next_word_pos = 15
expected = (next_word_pos, line)
result = forward_word(start_pos, line)
self.assertEqual(expected, result)
start_pos = 15
next_word_pos = 28
expected = (next_word_pos, line)
result = forward_word(start_pos, line)
self.assertEqual(expected, result)
def test_forward_word_end(self):
line = "going from here to_here"
start_pos = 16
next_word_pos = 23
expected = (next_word_pos, line)
result = forward_word(start_pos, line)
self.assertEqual(expected, result)
start_pos = 22
next_word_pos = 23
expected = (next_word_pos, line)
result = forward_word(start_pos, line)
self.assertEqual(expected, result)
start_pos = 23
next_word_pos = 23
expected = (next_word_pos, line)
result = forward_word(start_pos, line)
self.assertEqual(expected, result)
def test_forward_word_empty(self):
line = ""
start_pos = 0
next_word_pos = 0
expected = (next_word_pos, line)
result = forward_word(start_pos, line)
self.assertEqual(expected, result)
def test_back_word(self):
line = "going to here from_here"
start_pos = 14
prev_word_pos = 9
self.assertEqual(line[start_pos], "f")
self.assertEqual(line[prev_word_pos], "h")
expected = (prev_word_pos, line)
result = back_word(start_pos, line)
self.assertEqual(expected, result)
def test_last_word_pos(self):
line = "a word"
expected = 2
result = last_word_pos(line)
self.assertEqual(expected, result)
def test_last_word_pos_single_word(self):
line = "word"
expected = 0
result = last_word_pos(line)
self.assertEqual(expected, result)
def test_delete(self):
line = "deletion line"
pos = 3
expected = (3, "deltion line")
result = delete(pos, line)
self.assertEqual(expected, result)
def test_delete_from_cursor_back(self):
line = "everything before this will be deleted"
expected = (0, "this will be deleted")
result = delete_from_cursor_back(line.find("this"), line)
self.assertEqual(expected, result)
def test_delete_from_cursor_forward(self):
line = "everything after this will be deleted"
pos = line.find("this")
expected = (pos, "everything after ")
result = delete_from_cursor_forward(line.find("this"), line)[:-1]
self.assertEqual(expected, result)
self.assertEqual(delete_from_cursor_forward(0, ""), (0, "", ""))
def test_delete_rest_of_word(self):
self.try_stages_kill(
[
"z|s;df asdf d s;a;a",
"z|;df asdf d s;a;a",
"z| asdf d s;a;a",
"z| d s;a;a",
"z| s;a;a",
"z|;a;a",
"z|;a",
"z|",
"z|",
],
delete_rest_of_word,
)
def test_delete_word_to_cursor(self):
self.try_stages_kill(
[
" a;d sdf ;a;s;d; fjksald|a",
" a;d sdf ;a;s;d; |a",
" a;d sdf |a",
" a;d |a",
" |a",
"|a",
"|a",
],
delete_word_to_cursor,
)
def test_yank_prev_killed_text(self):
pass
def test_yank_prev_prev_killed_text(self):
pass
def try_stages(self, strings, func):
if not all("|" in s for s in strings):
raise ValueError("Need to use '|' to specify cursor")
stages = [(s.index("|"), s.replace("|", "")) for s in strings]
for (initial_pos, initial), (final_pos, final) in zip(
stages[:-1], stages[1:]
):
self.assertEqual(func(initial_pos, initial), (final_pos, final))
def try_stages_kill(self, strings, func):
if not all("|" in s for s in strings):
raise ValueError("Need to use '|' to specify cursor")
stages = [(s.index("|"), s.replace("|", "")) for s in strings]
for (initial_pos, initial), (final_pos, final) in zip(
stages[:-1], stages[1:]
):
self.assertEqual(
func(initial_pos, initial)[:-1], (final_pos, final)
)
def test_transpose_character_before_cursor(self):
self.try_stages(
[
"as|df asdf",
"ads|f asdf",
"adfs| asdf",
"adf s|asdf",
"adf as|sdf",
],
transpose_character_before_cursor,
)
def test_transpose_empty_line(self):
self.assertEqual(transpose_character_before_cursor(0, ""), (0, ""))
def test_transpose_first_character(self):
self.assertEqual(transpose_character_before_cursor(0, "a"), (0, "a"))
self.assertEqual(transpose_character_before_cursor(0, "as"), (0, "as"))
def test_transpose_end_of_line(self):
self.assertEqual(transpose_character_before_cursor(1, "a"), (1, "a"))
self.assertEqual(transpose_character_before_cursor(2, "as"), (2, "sa"))
def test_transpose_word_before_cursor(self):
pass
def test_backspace(self):
self.assertEqual(backspace(2, "as"), (1, "a"))
self.assertEqual(backspace(3, "as "), (2, "as"))
def test_delete_word_from_cursor_back(self):
self.try_stages_kill(
[
"asd;fljk asd;lfjas;dlkfj asdlk jasdf;ljk|",
"asd;fljk asd;lfjas;dlkfj asdlk jasdf;|",
"asd;fljk asd;lfjas;dlkfj asdlk |",
"asd;fljk asd;lfjas;dlkfj |",
"asd;fljk asd;lfjas;|",
"asd;fljk asd;|",
"asd;fljk |",
"asd;|",
"|",
"|",
],
delete_word_from_cursor_back,
)
self.try_stages_kill(
[" (( asdf |", " (( |", "|"], delete_word_from_cursor_back
)
class TestEdits(unittest.TestCase):
def setUp(self):
self.edits = UnconfiguredEdits()
def test_seq(self):
def f(cursor_offset, line):
return ("hi", 2)
self.edits.add("a", f)
self.assertIn("a", self.edits)
self.assertEqual(self.edits["a"], f)
self.assertEqual(
self.edits.call("a", cursor_offset=3, line="hello"), ("hi", 2)
)
with self.assertRaises(KeyError):
self.edits["b"]
with self.assertRaises(KeyError):
self.edits.call("b")
def test_functions_with_bad_signatures(self):
def f(something):
return (1, 2)
with self.assertRaises(TypeError):
self.edits.add("a", f)
def g(cursor_offset, line, something, something_else):
return (1, 2)
with self.assertRaises(TypeError):
self.edits.add("a", g)
def test_functions_with_bad_return_values(self):
def f(cursor_offset, line):
return ("hi",)
with self.assertRaises(ValueError):
self.edits.add("a", f)
def g(cursor_offset, line):
return ("hi", 1, 2, 3)
with self.assertRaises(ValueError):
self.edits.add("b", g)
def test_config(self):
def f(cursor_offset, line):
return ("hi", 2)
def g(cursor_offset, line):
return ("hey", 3)
self.edits.add_config_attr("att", f)
self.assertNotIn("att", self.edits)
class config:
att = "c"
key_dispatch = {"c": "c"}
configured_edits = self.edits.mapping_with_config(config, key_dispatch)
self.assertTrue(configured_edits.__contains__, "c")
self.assertNotIn("c", self.edits)
with self.assertRaises(NotImplementedError):
configured_edits.add_config_attr("att2", g)
with self.assertRaises(NotImplementedError):
configured_edits.add("d", g)
self.assertEqual(
configured_edits.call("c", cursor_offset=5, line="asfd"), ("hi", 2)
)
if __name__ == "__main__":
unittest.main()
| 31.22449
| 79
| 0.569188
|
f96b727d1d66289c6fa5e33d7922c2b5679883f8
| 858
|
py
|
Python
|
pcg_gazebo/parsers/sdf/length.py
|
TForce1/pcg_gazebo
|
9ff88016b7b6903236484958ca7c6ed9f8ffb346
|
[
"ECL-2.0",
"Apache-2.0"
] | 40
|
2020-02-04T18:16:49.000Z
|
2022-02-22T11:36:34.000Z
|
pcg_gazebo/parsers/sdf/length.py
|
awesomebytes/pcg_gazebo
|
4f335dd460ef7c771f1df78b46a92fad4a62cedc
|
[
"ECL-2.0",
"Apache-2.0"
] | 75
|
2020-01-23T13:40:50.000Z
|
2022-02-09T07:26:01.000Z
|
pcg_gazebo/parsers/sdf/length.py
|
GimpelZhang/gazebo_world_generator
|
eb7215499d0ddc972d804c988fadab1969579b1b
|
[
"ECL-2.0",
"Apache-2.0"
] | 18
|
2020-09-10T06:35:41.000Z
|
2022-02-20T19:08:17.000Z
|
# Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..types import XMLScalar
class Length(XMLScalar):
_NAME = 'length'
_TYPE = 'sdf'
def __init__(self):
super(Length, self).__init__(min_value=0)
| 34.32
| 74
| 0.744755
|
32239ede9768337363b6e385360720160d2c5f13
| 7,895
|
py
|
Python
|
app/api/spotify.py
|
janaSunrise/Spotify-Twitter-Banner
|
a3b5fc636ef4e79f3f96cc3dd5569eaff7d8a6c0
|
[
"MIT"
] | 20
|
2021-12-21T17:46:33.000Z
|
2022-01-22T15:23:24.000Z
|
app/api/spotify.py
|
janaSunrise/Spotify-Twitter-Banner
|
a3b5fc636ef4e79f3f96cc3dd5569eaff7d8a6c0
|
[
"MIT"
] | null | null | null |
app/api/spotify.py
|
janaSunrise/Spotify-Twitter-Banner
|
a3b5fc636ef4e79f3f96cc3dd5569eaff7d8a6c0
|
[
"MIT"
] | null | null | null |
import base64
import json
import sys
import time
import typing as t
import requests
from loguru import logger
from .route import Route
from ..config import Config
from ..utils import generate_oauth_url
PYTHON_VERSION = f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
class Spotify:
RETRY_ATTEMPTS = 5
USER_AGENT = f"Spotify Twitter Banner ({Config.GITHUB_REPO_URL}) - Python/{PYTHON_VERSION} Requests/{requests.__version__}"
def __init__(self, client_id: str, client_secret: str) -> None:
self.client_id = client_id
self.client_secret = client_secret
self.bearer_info = None
self.refresh_token = self.load_refresh_token()
# Get bearer info.
def get_bearer_info(self) -> t.Dict[str, t.Any]:
if not self.refresh_token:
raise Exception("No refresh token provided.")
token = self.generate_base64_token()
headers = {"Authorization": f"Basic {token}"}
data = {
"grant_type": "refresh_token",
"refresh_token": self.refresh_token
}
# Get the bearer info.
response = requests.post("https://accounts.spotify.com/api/token", headers=headers, data=data)
# Check if the request was successful.
if response.status_code != 200:
raise Exception("Failed to get bearer info.")
# Return the bearer info.
info = response.json()
if "error" in info:
raise Exception(f"Failed to get bearer info: {info['error']}")
return info
# Function to get the refresh token from code.
def get_refresh_token(self, code: str) -> t.Dict[str, t.Any]:
token = self.generate_base64_token()
headers = {
"Authorization": f"Basic {token}"
}
data = {
"grant_type": "authorization_code",
"code": code,
"redirect_uri": Config.SPOTIFY_REDIRECT_URI,
}
response = requests.post("https://accounts.spotify.com/api/token", headers=headers, data=data)
return response.json()
# Function to handle loading refresh token.
def load_refresh_token(self) -> str:
# Load refresh token from environment variable.
if Config.SPOTIFY_REFRESH_TOKEN:
return Config.SPOTIFY_REFRESH_TOKEN
# If not in environmental vars, load from JSON.
with open(Config.SPOTIFY_REFRESH_TOKEN_PATH, "r") as file:
token = json.load(file)
# Check if refresh token exists, If not do the workflow.
if "refresh_token" not in token:
logger.info("No refresh token found. Please follow the steps to get the refresh token.")
# Generate OAuth URL.
url = generate_oauth_url(self.client_id, Config.SPOTIFY_REDIRECT_URI, Config.SCOPES)
print(f"Please visit the following URL to authorize the application: {url}")
# Wait for user to input code.
code = input("Enter the value of code from URL query parameter: ")
if not code:
raise Exception("No code provided.")
# Get refresh token.
token = self.get_refresh_token(code)
# Calculate expired time
expires_in = int(token["expires_in"])
expires_at = time.time() + expires_in
token["expires_at"] = expires_at
# Save refresh token.
with open(Config.SPOTIFY_REFRESH_TOKEN_PATH, "w") as file:
json.dump(token, file)
# Return refresh token.
return token["refresh_token"]
def fetch(
self,
route: Route,
*,
headers: t.Optional[t.Dict[str, t.Any]] = None,
data: t.Optional[t.Any] = None
) -> t.Optional[t.Dict[str, t.Any]]:
if not headers:
headers = {}
# Check if Authorization exists.
if "Authorization" not in headers:
# Check if bearer info exists.
if self.bearer_info is None:
self.bearer_info = self.get_bearer_info()
# Set the Authorization header.
headers["Authorization"] = f"Bearer {self.bearer_info['access_token']}"
headers = {
"User-Agent": self.USER_AGENT,
"Content-Type": "application/json",
**headers
}
# Perform request with retries.
for _ in range(self.RETRY_ATTEMPTS):
response = requests.request(route.method, route.url, headers=headers, json=data)
logger.debug(f"[{route.method}] ({response.status_code}) {route.url}")
# Check if the request was successful.
if response.status_code == 200:
return response.json()
# Check if the request was a 429.
if response.status_code == 429:
# Get Retry-After header, and wait for it to clear.
retry_after = int(response.headers["Retry-After"])
# Wait for the Retry-After header to clear.
logger.info(f"Ratelimited. Waiting for {retry_after} seconds.")
time.sleep(retry_after)
continue
# Check if the request was a 401.
if response.status_code == 401:
logger.info("Bearer info expired. Refreshing.")
self.bearer_info = self.get_bearer_info()
continue
# Ignore anything 5xx
if response.status_code >= 500:
continue
# Check if the request was a 404.
if response.status_code == 404:
logger.warning(f"Failed to fetch: {route.url}. Route not found.")
return None
# Check if the request was a 403.
if response.status_code == 403:
logger.warning(f"Failed to fetch: {route.url}. Forbidden route.")
return None
# Utility methods.
def generate_base64_token(self) -> str:
return base64.b64encode(f"{self.client_id}:{self.client_secret}".encode()).decode("utf-8")
@staticmethod
def _form_url(url: str, data: t.Dict[str, t.Any]) -> str:
url += "?" + "&".join([f"{dict_key}={dict_value}" for dict_key, dict_value in data.items()])
return url
# Main endpoints.
def currently_playing(self) -> t.Optional[t.Dict[str, t.Any]]:
"""Get the currently playing song."""
route = Route("GET", "/me/player/currently-playing")
return self.fetch(route)
def is_playing(self) -> bool:
"""Check if the user is currently listening to music."""
currently_playing = self.currently_playing()
if currently_playing:
return currently_playing["is_playing"]
return False
def recently_played(
self,
limit: int = 20,
before: t.Optional[str] = None,
after: t.Optional[str] = None
) -> t.Dict[str, t.Any]:
"""Get recently played tracks."""
data: t.Dict[str, t.Any] = {"limit": limit}
if before:
data["before"] = before
if after:
data["after"] = after
route = Route(
"GET",
self._form_url("/me/player/recently-played", data)
)
return t.cast(dict, self.fetch(route))
def top_tracks(
self,
limit: int = 20,
offset: int = 0,
time_range: t.Optional[t.Literal["short_term", "medium_term", "long_term"]] = None
) -> t.Optional[t.Dict[str, t.Any]]:
"""Get top tracks of the user."""
data: t.Dict[str, t.Any] = {
"limit": limit,
"offset": offset
}
if time_range:
data["time_range"] = time_range
route = Route(
"GET",
self._form_url("/me/top/tracks", data)
)
return self.fetch(route)
| 31.834677
| 127
| 0.581127
|
92c58e696e9225edce9fbbe8a03f6d80720e9823
| 297
|
py
|
Python
|
opwen_email_server/constants/sendgrid.py
|
tezzytezzy/opwen-cloudserver
|
c3ebfe93d778cd789ab3df25c4580eedc0ae9b4a
|
[
"Apache-2.0"
] | null | null | null |
opwen_email_server/constants/sendgrid.py
|
tezzytezzy/opwen-cloudserver
|
c3ebfe93d778cd789ab3df25c4580eedc0ae9b4a
|
[
"Apache-2.0"
] | null | null | null |
opwen_email_server/constants/sendgrid.py
|
tezzytezzy/opwen-cloudserver
|
c3ebfe93d778cd789ab3df25c4580eedc0ae9b4a
|
[
"Apache-2.0"
] | null | null | null |
from typing_extensions import Final # noqa: F401
MAILBOX_URL = 'https://api.sendgrid.com/v3/user/webhooks/parse/settings' # type: Final # noqa: E501 # yapf: disable
INBOX_URL = 'https://mailserver.lokole.ca/api/email/sendgrid/{}' # type: Final
MX_RECORD = 'mx.sendgrid.net' # type: Final
| 37.125
| 118
| 0.717172
|
7230d4a4fbbe572197cb4f2e24330be5d786cbdc
| 1,511
|
py
|
Python
|
utils/data_labeler.py
|
Taher-Dohadwala/better-job-finder
|
607fec96c57a49feb8179db0bcae1f5969bcb0cd
|
[
"MIT"
] | null | null | null |
utils/data_labeler.py
|
Taher-Dohadwala/better-job-finder
|
607fec96c57a49feb8179db0bcae1f5969bcb0cd
|
[
"MIT"
] | null | null | null |
utils/data_labeler.py
|
Taher-Dohadwala/better-job-finder
|
607fec96c57a49feb8179db0bcae1f5969bcb0cd
|
[
"MIT"
] | null | null | null |
"""
This script is used to add labels to data for initial training
"""
"""
Data format for binary classifier
{
"text": "string",
"label": [
"neg",
"pos"
]
}
"""
import pandas as pd
import numpy as np
from curtsies.fmtfuncs import red, bold, green, on_blue, yellow, blue
PATH = "../data/raw/combined_data_withlabel.csv"
# helper function to reset labels
def quick_reset():
df = pd.read_csv("../data/raw/combined_data.csv")
df["Label"] = np.nan
df.to_csv("../data/raw/combined_data_withlabel.csv")
# load raw data in
df = pd.read_csv(PATH)
# finds place where previous labeling session ended
def find_continue_point(df):
t = df.index[df["Label"] == 999]
# First time labeling check
if not t.any():
start = 0
else:
start = int(t.tolist()[0])
return start
start = find_continue_point(df)
print(f"Starting at {start}")
jobs_to_label = df["Job Description"]
current = start
try:
# display job and ask for label
for idx,job in enumerate(jobs_to_label[start:]):
current = start + idx
print(yellow(f"Example number: {current}"))
print(job)
print(red("-"*100))
label = int(input("Label: "))
df.iloc[current,df.columns.get_loc("Label")] = label
# ctrl-c will end labeling session and save progress
except KeyboardInterrupt:
print(blue(f"ENDING AT: {current}"))
print(green("SAVING LABELING RESULTS"))
df.iloc[current,df.columns.get_loc("Label")] = 999
df.to_csv(PATH)
| 23.609375
| 69
| 0.649239
|
388c58802ad81ee77d68d276b7d797e33175eeea
| 68
|
py
|
Python
|
macaroni/types.py
|
rudineirk/py-macaroni
|
7b350478754fe9484d28535ea55e641b87a12b81
|
[
"MIT"
] | null | null | null |
macaroni/types.py
|
rudineirk/py-macaroni
|
7b350478754fe9484d28535ea55e641b87a12b81
|
[
"MIT"
] | null | null | null |
macaroni/types.py
|
rudineirk/py-macaroni
|
7b350478754fe9484d28535ea55e641b87a12b81
|
[
"MIT"
] | null | null | null |
class Error:
def __init__(self, data):
self.data = data
| 17
| 29
| 0.602941
|
09f7965bac524cf212d217592198280d0187e7e4
| 2,399
|
py
|
Python
|
docsrc/conf.py
|
synamedia-jenni/Jenni
|
44a25453d3f7dc08ca22f75b4d817dfa5c141904
|
[
"Apache-2.0"
] | 2
|
2021-05-11T15:47:52.000Z
|
2021-06-24T21:55:04.000Z
|
docsrc/conf.py
|
synamedia-jenni/Jenni
|
44a25453d3f7dc08ca22f75b4d817dfa5c141904
|
[
"Apache-2.0"
] | 2
|
2021-05-19T07:24:41.000Z
|
2021-06-24T21:54:19.000Z
|
docsrc/conf.py
|
synamedia-jenni/Jenni
|
44a25453d3f7dc08ca22f75b4d817dfa5c141904
|
[
"Apache-2.0"
] | 1
|
2021-05-14T10:37:53.000Z
|
2021-05-14T10:37:53.000Z
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("."))
sys.path.insert(0, os.path.dirname(os.path.abspath(".")))
# -- Project information -----------------------------------------------------
project = "Jenni"
copyright = "2021, Wouter Batelaan (Synamedia)"
author = "Wouter Batelaan (Synamedia)"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
# 'sphinx.ext.coverage',
"sphinx.ext.doctest",
"sphinx.ext.extlinks",
"sphinx.ext.ifconfig",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "jenni/tests"]
autodoc_default_options = {
# 'member-order': 'bysource',
"special-members": "__init__",
"undoc-members": True,
"exclude-members": "__weakref__",
}
smartquotes = False
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# nitpicky = True
| 32.418919
| 79
| 0.659441
|
41f6bb205cb87ea9fe6745fd8e031895ea83cec3
| 9,252
|
py
|
Python
|
pandaharvester/harvesterfifo/sqlite_fifo.py
|
nikmagini/panda-harvester
|
707955e7717bc6a15e722e857668b496d9563b85
|
[
"Apache-2.0"
] | 2
|
2017-04-13T10:39:53.000Z
|
2018-10-13T22:00:47.000Z
|
pandaharvester/harvesterfifo/sqlite_fifo.py
|
nikmagini/panda-harvester
|
707955e7717bc6a15e722e857668b496d9563b85
|
[
"Apache-2.0"
] | null | null | null |
pandaharvester/harvesterfifo/sqlite_fifo.py
|
nikmagini/panda-harvester
|
707955e7717bc6a15e722e857668b496d9563b85
|
[
"Apache-2.0"
] | null | null | null |
import os
import time
import re
import sqlite3
try:
from threading import get_ident
except ImportError:
from thread import get_ident
from pandaharvester.harvestercore.plugin_base import PluginBase
from pandaharvester.harvesterconfig import harvester_config
try:
memoryviewOrBuffer = buffer
except NameError:
memoryviewOrBuffer = memoryview
class SqliteFifo(PluginBase):
# template of SQL commands
_create_sql = (
'CREATE TABLE IF NOT EXISTS queue_table '
'('
' id INTEGER PRIMARY KEY,'
' item BLOB,'
' score REAL,'
' temporary INTEGER DEFAULT 0 '
')'
)
_create_index_sql = (
'CREATE INDEX IF NOT EXISTS score_index ON queue_table '
'(score)'
)
_count_sql = 'SELECT COUNT(id) FROM queue_table WHERE temporary = 0'
_iterate_sql = 'SELECT id, item, score FROM queue_table'
_write_lock_sql = 'BEGIN IMMEDIATE'
_exclusive_lock_sql = 'BEGIN EXCLUSIVE'
_push_sql = 'INSERT INTO queue_table (item,score) VALUES (?,?)'
_push_by_id_sql = 'INSERT INTO queue_table (id,item,score) VALUES (?,?,?)'
_lpop_get_sql_template = (
'SELECT {columns} FROM queue_table '
'WHERE temporary = 0 '
'ORDER BY score LIMIT 1'
)
_rpop_get_sql_template = (
'SELECT {columns} FROM queue_table '
'WHERE temporary = 0 '
'ORDER BY score DESC LIMIT 1'
)
_get_by_id_sql_template = (
'SELECT {columns} FROM queue_table '
'WHERE id = ? '
'AND temporary = {temp}'
)
_pop_del_sql = 'DELETE FROM queue_table WHERE id = ?'
_move_to_temp_sql = 'UPDATE queue_table SET temporary = 1 WHERE id = ?'
_del_sql_template = 'DELETE FROM queue_table WHERE id in ({0})'
_clear_delete_table_sql = 'DELETE FROM queue_table'
_clear_drop_table_sql = 'DROP TABLE IF EXISTS queue_table'
_clear_zero_id_sql = 'DELETE FROM sqlite_sequence WHERE name = "queue_table"'
_peek_sql = (
'SELECT id, item, score FROM queue_table '
'WHERE temporary = 0 '
'ORDER BY score LIMIT 1'
)
_restore_sql = 'UPDATE queue_table SET temporary = 0 WHERE temporary != 0'
_restore_sql_template = (
'UPDATE queue_table SET temporary = 0 '
'WHERE temporary != 0 AND id in ({0})'
)
# constructor
def __init__(self, **kwarg):
PluginBase.__init__(self, **kwarg)
if hasattr(self, 'database_filename'):
_db_filename = self.database_filename
else:
_db_filename = harvester_config.fifo.database_filename
_db_filename = re.sub('\$\(TITLE\)', self.titleName, _db_filename)
_db_filename = re.sub('\$\(AGENT\)', self.titleName, _db_filename)
self.db_path = os.path.abspath(_db_filename)
self._connection_cache = {}
with self._get_conn() as conn:
conn.execute(self._exclusive_lock_sql)
conn.execute(self._create_sql)
conn.execute(self._create_index_sql)
conn.commit()
def __len__(self):
with self._get_conn() as conn:
size = next(conn.execute(self._count_sql))[0]
return size
def __iter__(self):
with self._get_conn() as conn:
for id, obj_buf, score in conn.execute(self._iterate_sql):
yield bytes(obj_buf)
def _get_conn(self):
id = get_ident()
if id not in self._connection_cache:
self._connection_cache[id] = sqlite3.Connection(self.db_path, timeout=60)
return self._connection_cache[id]
def _pop(self, get_sql, timeout=None, protective=False):
keep_polling = True
wait = 0.1
max_wait = 2
tries = 0
last_attempt_timestamp = time.time()
with self._get_conn() as conn:
id = None
while keep_polling:
conn.execute(self._write_lock_sql)
cursor = conn.execute(get_sql)
try:
id, obj_buf, score = next(cursor)
keep_polling = False
except StopIteration:
# unlock the database
conn.commit()
now_timestamp = time.time()
if timeout is None or (now_timestamp - last_attempt_timestamp) >= timeout:
keep_polling = False
continue
tries += 1
time.sleep(wait)
wait = min(max_wait, tries/10.0 + wait)
if id is not None:
if protective:
conn.execute(self._move_to_temp_sql, (id,))
else:
conn.execute(self._pop_del_sql, (id,))
conn.commit()
return (id, bytes(obj_buf), score)
return None
def _peek(self, peek_sql_template, skip_item=False, id=None, temporary=False):
columns = 'id, item, score'
temp = 0
if skip_item:
columns = 'id, score'
if temporary:
temp = 1
peek_sql = peek_sql_template.format(columns=columns, temp=temp)
with self._get_conn() as conn:
if id is not None:
cursor = conn.execute(peek_sql, (id,))
else:
cursor = conn.execute(peek_sql)
try:
if skip_item:
id, score = next(cursor)
return id, None, score
else:
id, obj_buf, score = next(cursor)
return id, bytes(obj_buf), score
except StopIteration:
return None
# number of objects in queue
def size(self):
return len(self)
# enqueue with priority score
def put(self, obj, score):
retVal = False
obj_buf = memoryviewOrBuffer(obj)
with self._get_conn() as conn:
conn.execute(self._write_lock_sql)
cursor = conn.execute(self._push_sql, (obj_buf, score))
n_row = cursor.rowcount
if n_row == 1:
retVal = True
return retVal
# enqueue by id
def putbyid(self, id, obj, score):
retVal = False
obj_buf = memoryviewOrBuffer(obj)
with self._get_conn() as conn:
cursor = conn.execute(self._push_by_id_sql, (id, obj_buf, score))
n_row = cursor.rowcount
if n_row == 1:
retVal = True
return retVal
# dequeue the first object
def get(self, timeout=None, protective=False):
sql_str = self._lpop_get_sql_template.format(columns='id, item, score')
return self._pop(get_sql=sql_str, timeout=timeout, protective=protective)
# dequeue the last object
def getlast(self, timeout=None, protective=False):
sql_str = self._rpop_get_sql_template.format(columns='id, item, score')
return self._pop(get_sql=sql_str, timeout=timeout, protective=protective)
# get tuple of (id, item, score) of the first object without dequeuing it
def peek(self, skip_item=False):
return self._peek(self._lpop_get_sql_template, skip_item=skip_item)
# get tuple of (id, item, score) of the last object without dequeuing it
def peeklast(self, skip_item=False):
return self._peek(self._rpop_get_sql_template, skip_item=skip_item)
# get tuple of (id, item, score) of object by id without dequeuing it
def peekbyid(self, id, temporary=False, skip_item=False):
return self._peek(self._get_by_id_sql_template, skip_item=skip_item, id=id, temporary=temporary)
# drop all objects in queue and index and reset primary key auto_increment
def clear(self):
with self._get_conn() as conn:
conn.execute(self._exclusive_lock_sql)
conn.execute(self._clear_drop_table_sql)
try:
conn.execute(self._clear_zero_id_sql)
except sqlite3.OperationalError:
pass
conn.commit()
self.__init__()
# delete objects by list of id
def delete(self, ids):
if isinstance(ids, (list, tuple)):
placeholders_str = ','.join('?' * len(ids))
with self._get_conn() as conn:
conn.execute(self._exclusive_lock_sql)
cursor = conn.execute(self._del_sql_template.format(placeholders_str), ids)
n_row = cursor.rowcount
conn.commit()
return n_row
else:
raise TypeError('ids should be list or tuple')
# Move objects in temporary space to the queue
def restore(self, ids):
with self._get_conn() as conn:
conn.execute(self._exclusive_lock_sql)
if ids is None:
conn.execute(self._restore_sql)
elif isinstance(ids, (list, tuple)):
placeholders_str = ','.join('?' * len(ids))
conn.execute(self._restore_sql_template.format(placeholders_str), ids)
else:
raise TypeError('ids should be list or tuple or None')
| 37.156627
| 104
| 0.585927
|
fee12e55ced97bf46acdfb28dfda0dfb11a83ca4
| 680
|
py
|
Python
|
custom_model_runner/datarobot_drum/drum/exceptions.py
|
amperie/user-models
|
5236c50d0f20a7bac81acc5d1936a3502de2f5f3
|
[
"Apache-2.0"
] | null | null | null |
custom_model_runner/datarobot_drum/drum/exceptions.py
|
amperie/user-models
|
5236c50d0f20a7bac81acc5d1936a3502de2f5f3
|
[
"Apache-2.0"
] | 9
|
2021-11-10T20:16:41.000Z
|
2022-03-12T00:59:05.000Z
|
custom_model_runner/datarobot_drum/drum/exceptions.py
|
amperie/user-models
|
5236c50d0f20a7bac81acc5d1936a3502de2f5f3
|
[
"Apache-2.0"
] | 1
|
2021-06-17T22:05:33.000Z
|
2021-06-17T22:05:33.000Z
|
class DrumException(Exception):
"""Base drum exception"""
pass
class DrumCommonException(DrumException):
"""Raised in case of common errors in drum"""
pass
class DrumPerfTestTimeout(DrumException):
"""Raised when the perf-test case takes too long"""
pass
class DrumPerfTestOOM(DrumException):
""" Raised when the container running drum during perf test is OOM """
pass
class DrumPredException(DrumException):
""" Raised when prediction consistency check fails"""
pass
class DrumSchemaValidationException(DrumException):
""" Raised when the supplied schema in model_metadata does not match actual input or output data."""
| 20.606061
| 104
| 0.725
|
47cc0d9b9ce18ced2eefc84ee73e64bb2b0a855d
| 8,220
|
py
|
Python
|
picassos_palette/picassos_palette.py
|
Young-Picasso/AlgorithmicTrading
|
7a59b5980b4287394014f6630dab7aa3490e0a01
|
[
"MIT"
] | null | null | null |
picassos_palette/picassos_palette.py
|
Young-Picasso/AlgorithmicTrading
|
7a59b5980b4287394014f6630dab7aa3490e0a01
|
[
"MIT"
] | null | null | null |
picassos_palette/picassos_palette.py
|
Young-Picasso/AlgorithmicTrading
|
7a59b5980b4287394014f6630dab7aa3490e0a01
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@author: Wilhem Kornhauser
"""
class BaseBacktest:
"""Class for basic event based backtesting of trading strategies.
Attributes
==========
symbol: str
financial instrument to work with
start: str
start date for data selection
end: str
end date for data selection
amount: int, float
initial investment amount for strategy
ftc: float
fixed transaction cost per trade
ptc: float
proportional transaction cost per trade
Methods
=======
get_data:
retrieves and prepares data
plot_data:
plots the adjusted close values for the financial instrument
print_balance:
prints out current account cash balance
print_net_wealth:
prints out current account wealth (cash balance + current asset value)
place_buy_order:
places market buy order
place_sell_order:
places market sell order
close_out:
calculates the account net wealth at end of backtesting period. Does not close out open positions in calculation
"""
def __init__(self, symbol, start, end, amount, ftc=0.0, ptc=0.0, verbose=True):
if type(symbol) is not str:
raise ValueError('symbol must be str')
if type(start) is not str:
raise ValueError('start must be str')
if type(end) is not str:
raise ValueError('end must be str')
if type(amount) is not (int or float):
raise ValueError('amount must be int or float')
if type(ftc) is not float:
raise ValueError('ftc must be float')
if type(ptc) is not float:
raise ValueError('ptc must be float')
self.symbol = symbol # Set financial instrument
self.start = start # Set start date
self.end = end # Set end date
self.initial_amount = amount # Store initial amount in a psuedo-private attribute
self.amount = amount # Set starting cash balance value
self.ftc = ftc # Define fixed transaction costs per trade (ie 1 USD per transaction)
self.ptc = ptc # Defines proportional transaction costs per trade (ie 1% of transaction per transaction)
self.units = 0 # Units of the instrument (ie number of shares) in the portfolio initially
self.position = 0 # Sets initial position to market neutral
self.trade = 0 # Sets initial number of trades to 0
self.verbose = verbose # Set to True for full output (True by default)
self.get_data() # Call get_data() method
def __repr__(self):
return f'BaseBacktest(symbol={self.symbol}, start={self.start}, end={self.end}, amount={self.amount}, ftc={self.ftc}, ptc={self.ptc})'
def get_data(self):
"""
Retrieves and prepares data
"""
raw = pd.read_csv('http://hilpisch.com/pyalgo_eikon_eod_data.csv', index_col=0, parse_dates=True).dropna()
raw = pd.DataFrame(raw[self.symbol])
raw.loc[self.start:self.end]
raw.rename(columns={self.symbol: 'price'}, inplace=True)
raw['return'] = np.log(raw / raw.shift(1))
self.data = raw.dropna()
def plot_data(self, cols=None):
"""
Plots the closing prices for financial instrument
"""
if cols is None:
cols = ['Price']
self.data['price'].plot(figsize=(10, 6), title=self.symbol)
def get_date_price(self, bar):
"""
Returns the date and price for a bar
"""
date = str(self.data.index[bar])[:10]
price = self.data.price.iloc[bar]
return date, price
def print_balance(self, bar):
"""
Prints out current account cash balance
"""
date, price = self.get_date_price(bar)
print(f'{date} | current balance {self.amount:.2f}')
def print_net_wealth(self, bar):
"""
Prints out current account total wealth (cash balance + positions)
"""
date, price = self.get_date_price(bar)
net_wealth = self.units * price + self.amount
print(f'{date} | current net wealth {net_wealth:.2f}')
def place_buy_order(self, bar, units=None, amount=None):
"""
Simulates placing a market buy order
"""
date, price = self.get_date_price(bar)
if units is None:
units = int(amount / price) # Note that it is assumed the number of units is always None while the amount is amount of money to be spent buying units.
self.amount -= (units * price) * (1 + self.ptc) + self.ftc # Note there is no liquidity checking performed.
self.units+= units
self.trades += 1
if self.verbose:
print(f'{date} | buying {units} units at {price:.2f}')
self.print_balance(bar)
self.print_net_wealth(bar)
def place_sell_order(self, bar, units=None, amount=None):
"""
Simulates placing a market sell order
"""
date, price = self.get_date_price(bar)
if units is None:
units = int(amount / price) # As with place_buy_order, note that units is None and we are selling a set amount of wealth
self.amount += (units * price) * (1 - self.ptc) - self.ftc
self.units -= units
self.trades += 1
if self.verbose:
print(f'{date} | selling {units} units at {price:.2f}')
self.print_balance(bar)
self.print_net_wealth(bar)
def close_out(self, bar):
"""
Calculates accounts net wealth at end of backtest.
Does this by summing value of held assets and held cash.
Does not account for transaction fees required to close open positions.
"""
date, price = self.get_date_price(bar)
self.amount += self.units * price
self.units = 0
if self.verbose:
print(f'{date} | inventory {self.units} units at {price:.2f}')
print('=' * 55)
print(f'Final balance [$] {self.amount:.2f}')
performance = ((self.amount - self.initial_amount) / self.initial_amount * 100)
print(f'Net Performance [%] {performance:.2f}')
print(f'Trades executed [#] {self.trades:.2f}')
print('=' * 55)
def run_mean_reversion_strategy(self, SMA, threshold):
"""
Runs a backtest on a mean reversion-based strategy
Parameters
==========
SMA: int
simple moving average in days
threshold: float
absolute value for deviation-based signal relative to SMA
"""
msg = f'\n\nRunning mean reversion strategy | '
msg += f'SMA={SMA} & threshold={threshold}'
msg += f'\nfixed costs {self.ftc} | proportional costs {self.ptc}'
print(msg)
print('=' * 55)
# Clear data from previous runs
self.position = 0
self.trades = 0
self.amount = self.initial_amount
self.data['SMA'] = self.data['price'].rolling(SMA).mean()
for bar in range(SMA, len(self.data)):
if self.position == 0: # Checks if market position is neutral
if (self.data['price'].iloc[bar] < self.data['SMA'].iloc[bar] - threshold): # If market position is neutral, and this gives a buy indicator, buy.
self.place_buy_order(bar, amount=self.amount)
self.position = 1 # Sets market position to long
elif self.position == 1: # Checks if market position is long
if self.data['price'].iloc[bar] >= self.data['SMA'].iloc[bar]: # If market position is long, and this gives a sell signal, sell.
self.place_sell_order(bar, units=self.units)
self.position = 0 # Set market position to neutral
self.close_out(bar)
| 41.1
| 163
| 0.576277
|
93c626cbd7802e1784ff464373615be2aaa36386
| 2,328
|
py
|
Python
|
tests/test_crack_main.py
|
nialov/ALSA
|
65204bb43bba03daf600de9ed4825c4602ea73af
|
[
"MIT"
] | null | null | null |
tests/test_crack_main.py
|
nialov/ALSA
|
65204bb43bba03daf600de9ed4825c4602ea73af
|
[
"MIT"
] | null | null | null |
tests/test_crack_main.py
|
nialov/ALSA
|
65204bb43bba03daf600de9ed4825c4602ea73af
|
[
"MIT"
] | null | null | null |
"""
Tests for crack_main.py.
"""
import os
from pathlib import Path
import geopandas as gpd
import pytest
from shapely.geometry import LineString, MultiLineString
import tests
from alsa import crack_cls, crack_main
@pytest.mark.skipif(
os.environ.get("CI") is not None, reason="Tensorflow crashes on Github Actions."
)
def test_crack_main(tmp_path):
"""
Test crack_main.
"""
traces_path = tmp_path / "test_crack_main_traces.shp"
if not tests.KL5_TEST_WEIGHTS.exists():
pytest.xfail("Skipping test_crack_main as weights are missing.")
combined_nwork, orig_dims, geo_data, result_gdf = crack_main.crack_main(
work_dir=tmp_path,
img_path=tests.KL5_TEST_IMAGE,
area_file_path=list(tests.KL5_TEST_AREA_DIR.glob("*.shp"))[0],
unet_weights_path=tests.KL5_TEST_WEIGHTS,
predicted_output_path=traces_path,
)
assert isinstance(combined_nwork, crack_cls.CrackNetWork)
assert isinstance(geo_data, gpd.GeoDataFrame)
assert traces_path.exists()
assert isinstance(result_gdf, gpd.GeoDataFrame)
assert len(orig_dims) == 2
gdf = gpd.read_file(traces_path)
assert gdf.shape[0] > 0
assert all(
isinstance(geom, (LineString, MultiLineString)) for geom in gdf.geometry.values
)
@pytest.mark.parametrize(
"override_ridge_config_path", tests.test_resolve_ridge_config_overrides_params()
)
def test_resolve_ridge_config_overrides(override_ridge_config_path, tmp_path):
"""
Test resolve_ridge_config_overrides.
"""
result = crack_main.resolve_ridge_config_overrides(
override_ridge_config_path=override_ridge_config_path,
work_dir=tmp_path,
)
assert isinstance(result, dict)
if override_ridge_config_path is None:
assert len(result) == 0
else:
assert len(result) > 0
def test_resolve_ridge_config_overrides_default(tmp_path: Path):
"""
Test resolve_ridge_config_overrides.
"""
override_ridge_config_path = tmp_path / tests.SAMPLE_RIDGE_CONFIG_PATH.name
override_ridge_config_path.write_text(tests.SAMPLE_RIDGE_CONFIG_PATH.read_text())
result = crack_main.resolve_ridge_config_overrides(
override_ridge_config_path=None,
work_dir=tmp_path,
)
assert isinstance(result, dict)
assert len(result) > 0
| 29.468354
| 87
| 0.734107
|
3d9520231e0a2e37edf2d0b0b14f2604e5fc68cd
| 1,874
|
py
|
Python
|
aliyun-python-sdk-cloudauth/aliyunsdkcloudauth/request/v20190307/UpdateAppPackageRequest.py
|
leafcoder/aliyun-openapi-python-sdk
|
26b441ab37a5cda804de475fd5284bab699443f1
|
[
"Apache-2.0"
] | 1,001
|
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-cloudauth/aliyunsdkcloudauth/request/v20190307/UpdateAppPackageRequest.py
|
leafcoder/aliyun-openapi-python-sdk
|
26b441ab37a5cda804de475fd5284bab699443f1
|
[
"Apache-2.0"
] | 363
|
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-cloudauth/aliyunsdkcloudauth/request/v20190307/UpdateAppPackageRequest.py
|
leafcoder/aliyun-openapi-python-sdk
|
26b441ab37a5cda804de475fd5284bab699443f1
|
[
"Apache-2.0"
] | 682
|
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcloudauth.endpoint import endpoint_data
class UpdateAppPackageRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cloudauth', '2019-03-07', 'UpdateAppPackage')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Debug(self):
return self.get_query_params().get('Debug')
def set_Debug(self,Debug):
self.add_query_param('Debug',Debug)
def get_Platform(self):
return self.get_query_params().get('Platform')
def set_Platform(self,Platform):
self.add_query_param('Platform',Platform)
def get_PackageUrl(self):
return self.get_query_params().get('PackageUrl')
def set_PackageUrl(self,PackageUrl):
self.add_query_param('PackageUrl',PackageUrl)
def get_Id(self):
return self.get_query_params().get('Id')
def set_Id(self,Id):
self.add_query_param('Id',Id)
| 33.464286
| 75
| 0.756137
|
3fc8c98b5517b0e58839c92a594c29338887e804
| 7,460
|
py
|
Python
|
Python_Files/hydrolibs/data_download.py
|
montimaj/HydroSAR
|
c76022a914bd095ecae41d56a2a1dff5c1c1a970
|
[
"MIT"
] | 1
|
2022-03-01T08:46:46.000Z
|
2022-03-01T08:46:46.000Z
|
Python_Files/hydrolibs/data_download.py
|
xyt556/HydroSAR
|
2142c300e4cf48065626832fdeb9c4aa472627dc
|
[
"MIT"
] | null | null | null |
Python_Files/hydrolibs/data_download.py
|
xyt556/HydroSAR
|
2142c300e4cf48065626832fdeb9c4aa472627dc
|
[
"MIT"
] | null | null | null |
# Author: Sayantan Majumdar
# Email: smxnv@mst.edu
import ee
import requests
import zipfile
import os
import xmltodict
import geopandas as gpd
from glob import glob
from Python_Files.hydrolibs.sysops import copy_file
def download_gee_data(year_list, start_month, end_month, aoi_shp_file, outdir):
"""
Download MOD16 and PRISM data. MOD16 has to be divided by 10 (line 38) as its original scale is 0.1 mm/8 days.
:param year_list: List of years in %Y format
:param start_month: Start month in %m format
:param end_month: End month in %m format
:param aoi_shp_file: Area of interest shapefile (must be in WGS84)
:param outdir: Download directory
:return: None
"""
ee.Initialize()
mod16_collection = ee.ImageCollection("MODIS/006/MOD16A2")
prism_collection = ee.ImageCollection("OREGONSTATE/PRISM/AN81m")
aoi_shp = gpd.read_file(aoi_shp_file)
minx, miny, maxx, maxy = aoi_shp.geometry.total_bounds
gee_aoi = ee.Geometry.Rectangle([minx, miny, maxx, maxy])
for year in year_list:
start_date = ee.Date.fromYMD(year, start_month, 1)
if end_month == 12:
end_date = ee.Date.fromYMD(year + 1, 1, 1)
else:
end_date = ee.Date.fromYMD(year, end_month + 1, 1)
if end_month <= start_month:
start_date = ee.Date.fromYMD(year - 1, start_month, 1)
mod16_total = mod16_collection.select('ET').filterDate(start_date, end_date).sum().divide(10).toDouble()
prism_total = prism_collection.select('ppt').filterDate(start_date, end_date).sum().toDouble()
mod16_url = mod16_total.getDownloadUrl({
'scale': 1000,
'crs': 'EPSG:4326',
'region': gee_aoi
})
prism_url = prism_total.getDownloadUrl({
'scale': 1000,
'crs': 'EPSG:4326',
'region': gee_aoi
})
gee_vars = ['ET_', 'P_']
gee_links = [mod16_url, prism_url]
for gee_var, gee_url in zip(gee_vars, gee_links):
local_file_name = outdir + gee_var + str(year) + '.zip'
print('Dowloading', local_file_name, '...')
r = requests.get(gee_url, allow_redirects=True)
open(local_file_name, 'wb').write(r.content)
def download_ssebop_data(sse_link, year_list, start_month, end_month, outdir):
"""
Download SSEBop Data
:param sse_link: Main SSEBop link without file name
:param year_list: List of years
:param start_month: Start month in %m format
:param end_month: End month in %m format
:param outdir: Download directory
:return: None
"""
month_flag = False
month_list = []
actual_start_year = year_list[0]
if end_month <= start_month:
year_list = [actual_start_year - 1] + list(year_list)
month_flag = True
else:
month_list = range(start_month, end_month + 1)
for year in year_list:
print('Downloading SSEBop for', year, '...')
if month_flag:
month_list = list(range(start_month, 13))
if actual_start_year <= year < year_list[-1]:
month_list = list(range(1, end_month + 1)) + month_list
elif year == year_list[-1]:
month_list = list(range(1, end_month + 1))
for month in month_list:
month_str = str(month)
if 1 <= month <= 9:
month_str = '0' + month_str
url = sse_link + 'm' + str(year) + month_str + '.zip'
local_file_name = outdir + 'SSEBop_' + str(year) + month_str + '.zip'
r = requests.get(url, allow_redirects=True)
open(local_file_name, 'wb').write(r.content)
def download_cropland_data(aoi_shp_file, output_dir, year_list=(), cdl_year=None):
"""
Download USDA-NASS cropland data for the specified year range. For years before 2008, CDL 2008 will be replicated.
:param aoi_shp_file: Area of interest shapefile
:param output_dir: Output directory
:param year_list: List of years in %Y format
:param cdl_year: Set CDL year for using a single year for the entire model. If set to None, all available CDL
data for the years in year_list will be downloaded (Note: for years before 2008, CDL 2008 will be replicated if
cdl_year is None).
:return: None
"""
nass_proj_wkt = 'PROJCS["NAD_1983_Albers",' \
'GEOGCS["NAD83",' \
'DATUM["North_American_Datum_1983",' \
'SPHEROID["GRS 1980",6378137,298.257222101,' \
'AUTHORITY["EPSG","7019"]],' \
'TOWGS84[0,0,0,0,0,0,0],' \
'AUTHORITY["EPSG","6269"]],' \
'PRIMEM["Greenwich",0,' \
'AUTHORITY["EPSG","8901"]],' \
'UNIT["degree",0.0174532925199433,' \
'AUTHORITY["EPSG","9108"]],' \
'AUTHORITY["EPSG","4269"]],' \
'PROJECTION["Albers_Conic_Equal_Area"],' \
'PARAMETER["standard_parallel_1",29.5],' \
'PARAMETER["standard_parallel_2",45.5],' \
'PARAMETER["latitude_of_center",23],' \
'PARAMETER["longitude_of_center",-96],' \
'PARAMETER["false_easting",0],' \
'PARAMETER["false_northing",0],' \
'UNIT["meters",1]]'
aoi_shp = gpd.read_file(aoi_shp_file)
aoi_shp = aoi_shp.to_crs(nass_proj_wkt)
minx, miny, maxx, maxy = aoi_shp.geometry.total_bounds
outfile = None
years = []
if cdl_year:
years = [y for y in year_list]
year_list = [cdl_year]
for year in year_list[::-1]:
if year >= 2008:
print('Downloading CDL data for', str(year), '...')
nass_xml_url = ' https://nassgeodata.gmu.edu/axis2/services/CDLService/GetCDLFile?year=' + str(year) + \
'&bbox=' + str(minx) + ',' + str(miny) + ',' + str(maxx) + ',' + str(maxy)
r = requests.get(nass_xml_url, allow_redirects=True)
nass_data_url = xmltodict.parse(r.content)['ns1:GetCDLFileResponse']['returnURL']
r = requests.get(nass_data_url, allow_redirects=True)
outfile = output_dir + 'CDL_{}.tif'.format(year)
with open(outfile, 'wb') as cdl_out:
cdl_out.write(r.content)
else:
new_file = output_dir + 'CDL_{}.tif'.format(year)
copy_file(outfile, new_file, ext='')
for year in years:
if year != cdl_year:
new_file = output_dir + 'CDL_{}.tif'.format(year)
copy_file(outfile, new_file, ext='')
def extract_data(zip_dir, out_dir, rename_extracted_files=False):
"""
Extract data from zip file
:param zip_dir: Input zip directory
:param out_dir: Output directory to write extracted files
:param rename_extracted_files: Set True to rename extracted files according the original zip file name
:return: None
"""
print('Extracting zip files...')
for zip_file in glob(zip_dir + '*.zip'):
with zipfile.ZipFile(zip_file, 'r') as zip_ref:
if rename_extracted_files:
zip_info = zip_ref.infolist()[0]
zip_info.filename = zip_file[zip_file.rfind(os.sep) + 1: zip_file.rfind('.')] + '.tif'
zip_ref.extract(zip_info, path=out_dir)
else:
zip_ref.extractall(path=out_dir)
| 42.628571
| 118
| 0.599732
|
4e18464b9e33d10d5634b6b8ba7aa550e0d9aefc
| 6,921
|
py
|
Python
|
reformer_fastai/configs.py
|
tyoc213-contrib/reformer_fastai
|
a14de3d44e6983e34d462a3b4ec355436b701f9b
|
[
"MIT"
] | null | null | null |
reformer_fastai/configs.py
|
tyoc213-contrib/reformer_fastai
|
a14de3d44e6983e34d462a3b4ec355436b701f9b
|
[
"MIT"
] | null | null | null |
reformer_fastai/configs.py
|
tyoc213-contrib/reformer_fastai
|
a14de3d44e6983e34d462a3b4ec355436b701f9b
|
[
"MIT"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/21_experiment-configs.ipynb (unless otherwise specified).
__all__ = ['update_sig', 'ConfigBase', 'SyntheticConfig', 'TransformerLMConfigEnwik8', 'ReversibleLMConfigEnwik8',
'NHashesConfig', 'NLayersConfig']
# Cell
from fastcore.all import *
from fastai.basics import *
from .core import *
from .transformer import *
from .reformer import *
import json
from inspect import signature, Parameter
# Cell
def _dummy(): return
# Cell
def update_sig(d):
"Update signature of `f` from dict `d`"
d = {k:Parameter(k, Parameter.KEYWORD_ONLY, default=v) for k,v in d.items()}
def _f(f):
sig = signature(f)
sigd = dict(sig.parameters)
sigd.pop('kwargs')
sigd.update(d)
f.__signature__ = sig.replace(parameters=sigd.values())
return f
return _f
# Cell
class ConfigBase:
"Base class for Configs"
_d:dict = None
_model = _dummy
def __init__(self, *, verbose=False, warn=True, **kwargs):
self.validate()
for k,v in kwargs.items():
if k in self._d:
self._d[k]=v
if verbose: print(f'Setting `{k}` = {v}')
elif warn: print(f'Parameter `{k}` is not accepted by {self._model.__name__}. Skipped')
def validate(self):
assert exists(self._d), "_d missing. You might want to provide defaults for config"
assert self._model is not _dummy, "_model missing. Provide a model class"
def validate_arg(self, k):
assert k in self._d.keys(), f"{self._model.__name__} does not accept `{k}` argument"
def __getattr__(self, k):
try:
res = self._d[k]
except KeyError:
raise AttributeError(f"{type(self).__name__} does not have attribute `{k}`")
return res
def __setattr__(self, k, v):
self.validate_arg(k)
self._d[k] = v
def __getitem__(self, k):
return self._d[k]
def __setitem__(self, k, v):
self.validate_arg(k)
self._d[k] = v
def __repr__(self):
s = f"{self._model.__name__} config \n" + '-'*20
s += ''.join(f'\n{k:16}{v}' for k,v in self._d.items())
return s
def dict(self): return self._d
def save(self, fn, add_tstmp=False):
os.makedirs('exp_configs', exist_ok=True)
if add_tstmp:
tstmp = time.strftime("_%d_%m_%Y_%H:%M", time.gmtime())
fn += tstmp
with open(f'exp_configs/{fn}.json', 'w') as f:
json.dump(self.dict(), f)
@classmethod
def from_file(cls, fn):
with open(f'exp_configs/{fn}.json') as f:
d = json.load(f)
return cls(**d)
# Cell
class SyntheticConfig(ConfigBase):
"""
Config for Synthetic Experiment.
See https://arampacha.github.io/reformer_fastai/experiment.synthetic-task.html for details
"""
_model = LSHLM
_d = {
'vocab_sz':128,
'd_model':256,
'n_layers':1,
'n_heads':4,
'd_ff':256,
'attn_dropout':0.0,
'ff_dropout':0.0,
'emb_dropout':0.0,
'tie_weights':True,
'causal':True,
'pos_enc':'absolute',
'max_seq_len':1024,
'axial_shape':None,
'axial_emb_dims':None,
'pad_idx':None,
'prenorm':False,
'attn_bias':False,
'bucket_size':64,
'use_lsh':True,
'n_hashes':4,
'seed':123,
}
@update_sig(_d)
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Cell
class TransformerLMConfigEnwik8(ConfigBase):
"""
Config for enwik8 Experiment.
See https://arampacha.github.io/reformer_fastai/experiment.enwik8-baseline.html for details
"""
_model = TransformerLM
_d = {
'vocab_sz':256,
'd_model':1024,
'n_layers':3,
'n_heads':8,
'd_ff':4096,
'attn_dropout':0.1,
'ff_dropout':0.1,
'emb_dropout':0.1,
'tie_weights':True,
'causal':True,
'pos_enc':'axial',
'max_seq_len':2048,
'axial_shape':(64,32),
'axial_emb_dims':None,
'pad_idx':None,
'prenorm':False,
'attn_bias':False,
'shared_qk':False,
}
@update_sig(_d)
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Cell
class ReversibleLMConfigEnwik8(ConfigBase):
"""
Config for enwik8 Experiment.
See https://arampacha.github.io/reformer_fastai/experiment.enwik8-reversible.html for details
"""
_model = ReversibleLM
_d = {
'vocab_sz':256,
'd_model':1024,
'n_layers':3,
'n_heads':8,
'd_ff':4096,
'attn_dropout':0.1,
'ff_dropout':0.1,
'emb_dropout':0.1,
'tie_weights':True,
'causal':True,
'pos_enc':'axial',
'max_seq_len':2048,
'axial_shape':(64,32),
'axial_emb_dims':None,
'pad_idx':None,
'prenorm':False,
'attn_bias':False,
'rev_thres':0,
}
@update_sig(_d)
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Cell
class NHashesConfig(ConfigBase):
"""
Config for evaluating performance as function of `n_hashes`.
See https://arampacha.github.io/reformer_fastai/experiment.enwik8-n_hashes.html for details
"""
_model = LSHLM
_d = {
'vocab_sz':256,
'd_model':1024,
'n_layers':3,
'n_heads':8,
'd_ff':4096,
'attn_dropout':0.1,
'ff_dropout':0.1,
'emb_dropout':0.1,
'tie_weights':True,
'causal':True,
'pos_enc':'axial',
'max_seq_len':4096,
'axial_shape':None,
'axial_emb_dims':None,
'pad_idx':None,
'prenorm':False,
'attn_bias':False,
'bucket_size':64,
'use_lsh':True,
'n_hashes':2,
'seed':842,
}
@update_sig(_d)
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Cell
class NLayersConfig(ConfigBase):
"""
Config for evaluating performance as function of `n_layers`.
See https://arampacha.github.io/reformer_fastai/experiment.enwik8-n_layers.html for details
"""
_model = ReformerLM
_d = {
'vocab_sz':256,
'd_model':1024,
'n_layers':3,
'n_heads':8,
'd_ff':4096,
'ff_chunks':64,
'attn_dropout':0.1,
'ff_dropout':0.1,
'emb_dropout':0.1,
'tie_weights':True,
'causal':True,
'pos_enc':'axial',
'max_seq_len':2**15,
'axial_shape':None,
'axial_emb_dims':None,
'pad_idx':None,
'prenorm':False,
'attn_bias':False,
'bucket_size':64,
'use_lsh':True,
'n_hashes':8,
'rev_thres':0,
'seed':842,
}
@update_sig(_d)
def __init__(self, **kwargs):
super().__init__(**kwargs)
| 26.825581
| 114
| 0.566681
|
ac5ea71ba02662a1dfe7807b8b7abb271cd24f36
| 869
|
py
|
Python
|
src/user/models.py
|
mohdfayed/blog-django-ar
|
d966fe45dfabf9e757fe04ba406a98b2d699dbe9
|
[
"bzip2-1.0.6"
] | null | null | null |
src/user/models.py
|
mohdfayed/blog-django-ar
|
d966fe45dfabf9e757fe04ba406a98b2d699dbe9
|
[
"bzip2-1.0.6"
] | 9
|
2021-03-18T23:57:19.000Z
|
2022-03-12T00:16:13.000Z
|
src/user/models.py
|
mohdfayed/blog-django-ar
|
d966fe45dfabf9e757fe04ba406a98b2d699dbe9
|
[
"bzip2-1.0.6"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from PIL import Image # PIL is Pillow library
class Profile(models.Model):
image = models.ImageField(default='avatar.png', upload_to='profile_pics')
user = models.OneToOneField(User, on_delete=models.CASCADE)
def __str__(self):
return '{} profile.'.format(self.user.username)
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
img = Image.open(self.image.path)
if img.width > 300 or img.height > 300:
output_size = (300, 300)
img.thumbnail(output_size)
img.save(self.image.path)
def create_profile(sender, **kwarg):
if kwarg['created']:
Profile.objects.create(user=kwarg['instance'])
post_save.connect(create_profile, sender=User)
| 28.966667
| 77
| 0.675489
|
6a48c7735eac0a7bf3a8343830301d89450940fb
| 1,120
|
py
|
Python
|
str/isValid.py
|
hikair/leetcode_py
|
f917fd619fa25960e5d046aa512ac0d582f2084a
|
[
"Apache-2.0"
] | null | null | null |
str/isValid.py
|
hikair/leetcode_py
|
f917fd619fa25960e5d046aa512ac0d582f2084a
|
[
"Apache-2.0"
] | null | null | null |
str/isValid.py
|
hikair/leetcode_py
|
f917fd619fa25960e5d046aa512ac0d582f2084a
|
[
"Apache-2.0"
] | null | null | null |
# 有效的括号
# https://leetcode-cn.com/problems/valid-parentheses/
class Solution:
# 方法1
def isValid2(self, s):
stack = Stack()
for i in s:
if stack.isEmpty() == False and ord(i) - ord(stack.peek()) in [1, 2]:
stack.pop()
else:
stack.push(i)
return stack.isEmpty()
# 方法2
def isValid(self, s):
stack = []
for i in s:
if stack and stack[-1] + i in ['()', '[]', '{}']:
stack.pop()
else:
stack.append(i)
return len(stack) == 0
class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[-1]
s = Solution()
print(s.isValid('"(){}}{"')) # False
print(s.isValid('((')) # False
print(s.isValid('()')) # True
print(s.isValid('()[]{}')) # True
print(s.isValid('(]')) # False
print(s.isValid('([)]')) # False
print(s.isValid('{[]}')) # True
| 22.857143
| 81
| 0.491071
|
9a8576f070d9500958e0bee863d55041cb274867
| 3,649
|
py
|
Python
|
samples/client/petstore/python/petstore_api/models/array_of_number_only.py
|
VinayaSathyanarayana/swagger-codegen
|
dd1ed1231884f08804fd5726ab5beabac59d7abd
|
[
"Apache-2.0"
] | 1
|
2017-10-17T09:53:15.000Z
|
2017-10-17T09:53:15.000Z
|
samples/client/petstore/python/petstore_api/models/array_of_number_only.py
|
VinayaSathyanarayana/swagger-codegen
|
dd1ed1231884f08804fd5726ab5beabac59d7abd
|
[
"Apache-2.0"
] | null | null | null |
samples/client/petstore/python/petstore_api/models/array_of_number_only.py
|
VinayaSathyanarayana/swagger-codegen
|
dd1ed1231884f08804fd5726ab5beabac59d7abd
|
[
"Apache-2.0"
] | 3
|
2018-09-03T12:58:01.000Z
|
2021-02-19T06:00:30.000Z
|
# coding: utf-8
"""
Swagger Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\
OpenAPI spec version: 1.0.0
Contact: apiteam@swagger.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class ArrayOfNumberOnly(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, array_number=None):
"""
ArrayOfNumberOnly - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'array_number': 'list[float]'
}
self.attribute_map = {
'array_number': 'ArrayNumber'
}
self._array_number = array_number
@property
def array_number(self):
"""
Gets the array_number of this ArrayOfNumberOnly.
:return: The array_number of this ArrayOfNumberOnly.
:rtype: list[float]
"""
return self._array_number
@array_number.setter
def array_number(self, array_number):
"""
Sets the array_number of this ArrayOfNumberOnly.
:param array_number: The array_number of this ArrayOfNumberOnly.
:type: list[float]
"""
self._array_number = array_number
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 28.732283
| 160
| 0.583722
|
717dd6fca421bf2f99cd398c847f6532933c834c
| 1,048
|
py
|
Python
|
src/PyAutoMaker/input.py
|
boa9448/PyAutoMaker
|
146ca57aeec999e2663c124ca5c189aabf066b7f
|
[
"MIT"
] | 6
|
2022-03-15T08:51:32.000Z
|
2022-03-30T05:07:34.000Z
|
src/PyAutoMaker/input.py
|
boa9448/PyAutoMaker
|
146ca57aeec999e2663c124ca5c189aabf066b7f
|
[
"MIT"
] | null | null | null |
src/PyAutoMaker/input.py
|
boa9448/PyAutoMaker
|
146ca57aeec999e2663c124ca5c189aabf066b7f
|
[
"MIT"
] | null | null | null |
from typing import Union
from .input_base import *
from .arduino import ArduinoUtil
from .class_dd import DDUtil
class InputUtil:
def __init__(self, backend : Union[ArduinoUtil, DDUtil], args : tuple) -> None:
self.backend = backend(*args)
def key(self, key_code : int, key_status : int) -> None:
return self.backend.key(key_code, key_status)
def key_press(self, key_code : int) -> None:
return self.backend.key_press(key_code)
def key_release(self, key_code : int) -> None:
return self.backend.key_release(key_code)
def move(self, x : int , y : int, relative : bool) -> None:
return self.backend.move(x, y, relative)
def btn(self, button_code : int , button_status : int) -> None:
return self.backend.btn(button_code, button_status)
def btn_press(self, button_code : int) -> None:
self.backend.btn_press(button_code)
def btn_release(self, button_code : int) -> None:
self.backend.btn_release(button_code)
if __name__ == "__main__":
pass
| 30.823529
| 83
| 0.673664
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.