hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
75a987c5360234a08542c4c096ae81174a85b5c5 | 29,833 | py | Python | plotly/graph_objs/layout/_updatemenu.py | paulamool/plotly.py | 6121ac1f324e247e4e4b2964d65d7393377777c0 | [
"MIT"
] | 2 | 2020-03-24T11:41:14.000Z | 2021-01-14T07:59:43.000Z | plotly/graph_objs/layout/_updatemenu.py | paulamool/plotly.py | 6121ac1f324e247e4e4b2964d65d7393377777c0 | [
"MIT"
] | 1 | 2020-12-15T16:56:11.000Z | 2020-12-15T16:56:11.000Z | plotly/graph_objs/layout/_updatemenu.py | skeptycal/plotly.py | 2e5bf6e2f7c213295c405ece3e859f4d3f8030d1 | [
"MIT"
] | 4 | 2019-06-03T14:49:12.000Z | 2022-01-06T01:05:12.000Z | from plotly.basedatatypes import BaseLayoutHierarchyType
import copy
class Updatemenu(BaseLayoutHierarchyType):
# active
# ------
@property
def active(self):
"""
Determines which button (by index starting from 0) is
considered active.
The 'active' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
Returns
-------
int
"""
return self['active']
@active.setter
def active(self, val):
self['active'] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the update menu buttons.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['bgcolor']
@bgcolor.setter
def bgcolor(self, val):
self['bgcolor'] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the color of the border enclosing the update menu.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['bordercolor']
@bordercolor.setter
def bordercolor(self, val):
self['bordercolor'] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) of the border enclosing the update menu.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self['borderwidth']
@borderwidth.setter
def borderwidth(self, val):
self['borderwidth'] = val
# buttons
# -------
@property
def buttons(self):
"""
The 'buttons' property is a tuple of instances of
Button that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.updatemenu.Button
- A list or tuple of dicts of string/value properties that
will be passed to the Button constructor
Supported dict properties:
args
Sets the arguments values to be passed to the
Plotly method set in `method` on click.
execute
When true, the API method is executed. When
false, all other behaviors are the same and
command execution is skipped. This may be
useful when hooking into, for example, the
`plotly_buttonclicked` method and executing the
API command manually without losing the benefit
of the updatemenu automatically binding to the
state of the plot through the specification of
`method` and `args`.
label
Sets the text label to appear on the button.
method
Sets the Plotly method to be called on click.
If the `skip` method is used, the API
updatemenu will function as normal but will
perform no API calls and will not bind
automatically to state updates. This may be
used to create a component interface and attach
to updatemenu events manually via JavaScript.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
visible
Determines whether or not this button is
visible.
Returns
-------
tuple[plotly.graph_objs.layout.updatemenu.Button]
"""
return self['buttons']
@buttons.setter
def buttons(self, val):
self['buttons'] = val
# direction
# ---------
@property
def direction(self):
"""
Determines the direction in which the buttons are laid out,
whether in a dropdown menu or a row/column of buttons. For
`left` and `up`, the buttons will still appear in left-to-right
or top-to-bottom order respectively.
The 'direction' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'up', 'down']
Returns
-------
Any
"""
return self['direction']
@direction.setter
def direction(self, val):
self['direction'] = val
# font
# ----
@property
def font(self):
"""
Sets the font of the update menu button text.
The 'font' property is an instance of Font
that may be specified as:
- An instance of plotly.graph_objs.layout.updatemenu.Font
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.layout.updatemenu.Font
"""
return self['font']
@font.setter
def font(self, val):
self['font'] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['name']
@name.setter
def name(self, val):
self['name'] = val
# pad
# ---
@property
def pad(self):
"""
Sets the padding around the buttons or dropdown menu.
The 'pad' property is an instance of Pad
that may be specified as:
- An instance of plotly.graph_objs.layout.updatemenu.Pad
- A dict of string/value properties that will be passed
to the Pad constructor
Supported dict properties:
b
The amount of padding (in px) along the bottom
of the component.
l
The amount of padding (in px) on the left side
of the component.
r
The amount of padding (in px) on the right side
of the component.
t
The amount of padding (in px) along the top of
the component.
Returns
-------
plotly.graph_objs.layout.updatemenu.Pad
"""
return self['pad']
@pad.setter
def pad(self, val):
self['pad'] = val
# showactive
# ----------
@property
def showactive(self):
"""
Highlights active dropdown item or active button if true.
The 'showactive' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['showactive']
@showactive.setter
def showactive(self, val):
self['showactive'] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['templateitemname']
@templateitemname.setter
def templateitemname(self, val):
self['templateitemname'] = val
# type
# ----
@property
def type(self):
"""
Determines whether the buttons are accessible via a dropdown
menu or whether the buttons are stacked horizontally or
vertically
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['dropdown', 'buttons']
Returns
-------
Any
"""
return self['type']
@type.setter
def type(self, val):
self['type'] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not the update menu is visible.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['visible']
@visible.setter
def visible(self, val):
self['visible'] = val
# x
# -
@property
def x(self):
"""
Sets the x position (in normalized coordinates) of the update
menu.
The 'x' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self['x']
@x.setter
def x(self, val):
self['x'] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets the update menu's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the range selector.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'left', 'center', 'right']
Returns
-------
Any
"""
return self['xanchor']
@xanchor.setter
def xanchor(self, val):
self['xanchor'] = val
# y
# -
@property
def y(self):
"""
Sets the y position (in normalized coordinates) of the update
menu.
The 'y' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self['y']
@y.setter
def y(self, val):
self['y'] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets the update menu's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the range selector.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'top', 'middle', 'bottom']
Returns
-------
Any
"""
return self['yanchor']
@yanchor.setter
def yanchor(self, val):
self['yanchor'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'layout'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
active
Determines which button (by index starting from 0) is
considered active.
bgcolor
Sets the background color of the update menu buttons.
bordercolor
Sets the color of the border enclosing the update menu.
borderwidth
Sets the width (in px) of the border enclosing the
update menu.
buttons
plotly.graph_objs.layout.updatemenu.Button instance or
dict with compatible properties
direction
Determines the direction in which the buttons are laid
out, whether in a dropdown menu or a row/column of
buttons. For `left` and `up`, the buttons will still
appear in left-to-right or top-to-bottom order
respectively.
font
Sets the font of the update menu button text.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
pad
Sets the padding around the buttons or dropdown menu.
showactive
Highlights active dropdown item or active button if
true.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
type
Determines whether the buttons are accessible via a
dropdown menu or whether the buttons are stacked
horizontally or vertically
visible
Determines whether or not the update menu is visible.
x
Sets the x position (in normalized coordinates) of the
update menu.
xanchor
Sets the update menu's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the range selector.
y
Sets the y position (in normalized coordinates) of the
update menu.
yanchor
Sets the update menu's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the range selector.
"""
def __init__(
self,
arg=None,
active=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
buttons=None,
direction=None,
font=None,
name=None,
pad=None,
showactive=None,
templateitemname=None,
type=None,
visible=None,
x=None,
xanchor=None,
y=None,
yanchor=None,
**kwargs
):
"""
Construct a new Updatemenu object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.layout.Updatemenu
active
Determines which button (by index starting from 0) is
considered active.
bgcolor
Sets the background color of the update menu buttons.
bordercolor
Sets the color of the border enclosing the update menu.
borderwidth
Sets the width (in px) of the border enclosing the
update menu.
buttons
plotly.graph_objs.layout.updatemenu.Button instance or
dict with compatible properties
direction
Determines the direction in which the buttons are laid
out, whether in a dropdown menu or a row/column of
buttons. For `left` and `up`, the buttons will still
appear in left-to-right or top-to-bottom order
respectively.
font
Sets the font of the update menu button text.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
pad
Sets the padding around the buttons or dropdown menu.
showactive
Highlights active dropdown item or active button if
true.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
type
Determines whether the buttons are accessible via a
dropdown menu or whether the buttons are stacked
horizontally or vertically
visible
Determines whether or not the update menu is visible.
x
Sets the x position (in normalized coordinates) of the
update menu.
xanchor
Sets the update menu's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the range selector.
y
Sets the y position (in normalized coordinates) of the
update menu.
yanchor
Sets the update menu's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the range selector.
Returns
-------
Updatemenu
"""
super(Updatemenu, self).__init__('updatemenus')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.Updatemenu
constructor must be a dict or
an instance of plotly.graph_objs.layout.Updatemenu"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators.layout import (updatemenu as v_updatemenu)
# Initialize validators
# ---------------------
self._validators['active'] = v_updatemenu.ActiveValidator()
self._validators['bgcolor'] = v_updatemenu.BgcolorValidator()
self._validators['bordercolor'] = v_updatemenu.BordercolorValidator()
self._validators['borderwidth'] = v_updatemenu.BorderwidthValidator()
self._validators['buttons'] = v_updatemenu.ButtonsValidator()
self._validators['direction'] = v_updatemenu.DirectionValidator()
self._validators['font'] = v_updatemenu.FontValidator()
self._validators['name'] = v_updatemenu.NameValidator()
self._validators['pad'] = v_updatemenu.PadValidator()
self._validators['showactive'] = v_updatemenu.ShowactiveValidator()
self._validators['templateitemname'
] = v_updatemenu.TemplateitemnameValidator()
self._validators['type'] = v_updatemenu.TypeValidator()
self._validators['visible'] = v_updatemenu.VisibleValidator()
self._validators['x'] = v_updatemenu.XValidator()
self._validators['xanchor'] = v_updatemenu.XanchorValidator()
self._validators['y'] = v_updatemenu.YValidator()
self._validators['yanchor'] = v_updatemenu.YanchorValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('active', None)
self['active'] = active if active is not None else _v
_v = arg.pop('bgcolor', None)
self['bgcolor'] = bgcolor if bgcolor is not None else _v
_v = arg.pop('bordercolor', None)
self['bordercolor'] = bordercolor if bordercolor is not None else _v
_v = arg.pop('borderwidth', None)
self['borderwidth'] = borderwidth if borderwidth is not None else _v
_v = arg.pop('buttons', None)
self['buttons'] = buttons if buttons is not None else _v
_v = arg.pop('direction', None)
self['direction'] = direction if direction is not None else _v
_v = arg.pop('font', None)
self['font'] = font if font is not None else _v
_v = arg.pop('name', None)
self['name'] = name if name is not None else _v
_v = arg.pop('pad', None)
self['pad'] = pad if pad is not None else _v
_v = arg.pop('showactive', None)
self['showactive'] = showactive if showactive is not None else _v
_v = arg.pop('templateitemname', None)
self['templateitemname'
] = templateitemname if templateitemname is not None else _v
_v = arg.pop('type', None)
self['type'] = type if type is not None else _v
_v = arg.pop('visible', None)
self['visible'] = visible if visible is not None else _v
_v = arg.pop('x', None)
self['x'] = x if x is not None else _v
_v = arg.pop('xanchor', None)
self['xanchor'] = xanchor if xanchor is not None else _v
_v = arg.pop('y', None)
self['y'] = y if y is not None else _v
_v = arg.pop('yanchor', None)
self['yanchor'] = yanchor if yanchor is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 36.337393 | 86 | 0.56233 |
0432546c4baefb8c14598dd3f7f52d79b8212584 | 740 | py | Python | displayMenu.py | s18mbbustorff/AI_Hanabi_Assignment | 651699bdd77f10e72b49cdb2c62faeca585bdfa3 | [
"CNRI-Python"
] | null | null | null | displayMenu.py | s18mbbustorff/AI_Hanabi_Assignment | 651699bdd77f10e72b49cdb2c62faeca585bdfa3 | [
"CNRI-Python"
] | null | null | null | displayMenu.py | s18mbbustorff/AI_Hanabi_Assignment | 651699bdd77f10e72b49cdb2c62faeca585bdfa3 | [
"CNRI-Python"
] | null | null | null | def displayMenu(options):
'''
DISPLAYMENU Displays a menu of options, ask the user to choose an item
and returns the number of the menu item chosen.
* Input arguments: Menu options (array of strings)
* Output arguments: Chosen option (integer)
* Author: Mikkel N. Schmidt, mnsc@dtu.dk, 2015
'''
import numpy as np
from inputNumber import inputNumber
# Display menu options
for i in range(len(options)):
print("{:d}. {:s}".format(i+1, options[i]))
# Get a valid menu choice
choice = 0
while not(np.any(choice == np.arange(len(options))+1)):
choice = inputNumber("Please choose a menu item: ")
return choice
| 30.833333 | 76 | 0.601351 |
011e61b0464c986e5c77dd8886e8c6ac770566a5 | 222 | py | Python | restauration_policies.py | ThePernalonga/python-simple-anycast-wdm-simulator | 874a5aaa31ab14949144a88d47c0aa0aeb7547d1 | [
"MIT"
] | null | null | null | restauration_policies.py | ThePernalonga/python-simple-anycast-wdm-simulator | 874a5aaa31ab14949144a88d47c0aa0aeb7547d1 | [
"MIT"
] | null | null | null | restauration_policies.py | ThePernalonga/python-simple-anycast-wdm-simulator | 874a5aaa31ab14949144a88d47c0aa0aeb7547d1 | [
"MIT"
] | null | null | null | import abc
import numpy as np
import logging
class RoutingPolicy(abc.ABC):
def __init__(self):
self.env = None
self.name = None
@abc.abstractmethod
def restore(self, service):
pass
| 13.875 | 31 | 0.635135 |
3476ae1a06cd6022552dd64d76647ccd7498a122 | 580 | py | Python | dictionaries/bonus_scoring_system.py | MaggieIllustrations/softuni-github-programming | f5695cb14602f3d2974359f6d8734332acc650d3 | [
"MIT"
] | null | null | null | dictionaries/bonus_scoring_system.py | MaggieIllustrations/softuni-github-programming | f5695cb14602f3d2974359f6d8734332acc650d3 | [
"MIT"
] | null | null | null | dictionaries/bonus_scoring_system.py | MaggieIllustrations/softuni-github-programming | f5695cb14602f3d2974359f6d8734332acc650d3 | [
"MIT"
] | 1 | 2022-01-14T17:12:44.000Z | 2022-01-14T17:12:44.000Z | import math
students_count = int(input())
lectures_count = int(input())
additional_bonus = int(input())
max_bonus = 0
max_bonus_attendances = 0
if lectures_count > 0 and students_count > 0:
for student in range(students_count):
attendances = int(input())
total_bonus = attendances / lectures_count * (5 + additional_bonus)
if total_bonus >= max_bonus:
max_bonus = total_bonus
max_bonus_attendances = attendances
print(f'Max Bonus: {math.ceil(max_bonus)}.')
print(f'The student has attended {max_bonus_attendances} lectures.') | 32.222222 | 75 | 0.705172 |
dea5d0e08be5d43fa23667d9b0d439f30f199b99 | 18,989 | py | Python | syn/base/b/base.py | mbodenhamer/syn | aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258 | [
"MIT"
] | 1 | 2021-07-15T08:55:12.000Z | 2021-07-15T08:55:12.000Z | syn/base/b/base.py | mbodenhamer/syn | aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258 | [
"MIT"
] | 7 | 2021-01-07T23:51:57.000Z | 2021-12-13T19:50:57.000Z | syn/base/b/base.py | mbodenhamer/syn | aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258 | [
"MIT"
] | 2 | 2016-07-11T08:46:31.000Z | 2017-12-13T13:30:51.000Z | import os
import six
from copy import copy
from jinja2 import Template
from operator import itemgetter
from collections import Mapping
from .meta import Attr, Attrs, Meta, create_hook, preserve_attr_data, \
pre_create_hook
from syn.base_utils import AttrDict, ReflexiveDict, message, get_mod, \
get_typename, SeqDict, callables, istr, rgetattr, get_fullname
from syn.types import Type, pairs, estr, DiffersAtAttribute, hashable, \
SER_KEYS, serialize
#-------------------------------------------------------------------------------
# Templates
DIR = os.path.dirname(os.path.abspath(__file__))
TEMPLATES = os.path.join(DIR, 'templates')
with open(os.path.join(TEMPLATES, 'class.j2'), 'r') as f:
CLASS_TEMPLATE = Template(f.read())
CLASS_TEMPLATE.environment.trim_blocks = True
CLASS_TEMPLATE.environment.lstrip_blocks = True
#-------------------------------------------------------------------------------
# Hook Decorators
class _InitHook(object):
'''Dummy class to ensure that callable is really an init hook.'''
pass
class _CoerceHook(object):
'''Dummy class to ensure that callable is really a coerce hook.'''
pass
class _SetstateHook(object):
'''Dummy class to ensure that callable is really a setstate hook.'''
pass
def init_hook(f):
f.is_init_hook = _InitHook
return f
def coerce_hook(f):
f.is_coerce_hook = _CoerceHook
return f
def setstate_hook(f):
f.is_setstate_hook = _SetstateHook
return f
#-------------------------------------------------------------------------------
# Base
@six.add_metaclass(Meta)
class Base(object):
_attrs = Attrs()
_aliases = SeqDict()
_groups = ReflexiveDict('_all',
'_internal',
'copy_exclude',
'copy_copy',
'eq_exclude',
'hash_exclude',
'generate_exclude',
'getstate_exclude',
'repr_exclude',
'str_exclude',
'update_trigger')
_opts = AttrDict(args = (),
autodoc = True,
coerce_args = False,
id_equality = False,
init_validate = False,
make_hashable = False,
make_type_object = True,
optional_none = False,
repr_template = '',
register_subclasses = False)
_seq_opts = SeqDict(coerce_hooks = (),
init_hooks = (),
init_order = (),
create_hooks = (),
setstate_hooks = (),
metaclass_lookup = ('coerce_hooks',
'init_hooks',
'create_hooks',
'setstate_hooks'))
def __init__(self, *args, **kwargs):
_args = self._opts.args
for key in self._attrs.defaults:
if key in _args:
if len(args) > _args.index(key):
continue # This value has been supplied as a non-kw arg
if key not in kwargs:
kwargs[key] = self._attrs.defaults[key]
if _args:
if len(args) > len(_args):
raise TypeError('__init__ takes up to {} positional arguments '
'({} given)'.format(len(_args), len(args)))
for k, arg in enumerate(args):
key = _args[k]
if key in kwargs:
raise TypeError('__init__ got multiple values for argument '
'{}'.format(key))
kwargs[_args[k]] = arg
if self._opts.coerce_args:
for key, value in list(kwargs.items()):
typ = self._attrs.types[key]
if not typ.query(value):
kwargs[key] = typ.coerce(value)
if self._opts.optional_none:
for attr in self._attrs.optional:
if attr not in kwargs:
kwargs[attr] = None
if self._attrs.call:
for attr, call in self._attrs.call.items():
value = kwargs.get(attr, None)
if value is None:
kwargs[attr] = call()
else:
kwargs[attr] = call(value)
for attr, val in kwargs.items():
setattr(self, attr, val)
if self._seq_opts.init_order:
for attr in self._seq_opts.init_order:
if not hasattr(self, attr):
setattr(self, attr, self._attrs.init[attr](self))
if self._attrs.init:
for attr in (set(self._attrs.init) -
set(self._seq_opts.init_order)):
if not hasattr(self, attr):
setattr(self, attr, self._attrs.init[attr](self))
if self._data.init_hooks:
for hook in self._data.init_hooks:
hook(self)
if self._opts.init_validate:
self.validate()
def __eq__(self, other):
if self._opts.id_equality:
return self is other
if type(self) is not type(other):
return False
dct1 = self.to_dict(exclude=['eq_exclude'])
dct2 = other.to_dict(exclude=['eq_exclude'])
return dct1 == dct2
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __getstate__(self):
return self.to_dict(exclude=['getstate_exclude'])
def __repr__(self):
if self._opts.repr_template:
return self._repr_template()
out = '<' + get_mod(self) + '.' + get_typename(self) + ' '
out += str(self.to_dict(exclude=['repr_exclude']))
out += '>'
return out
def __setstate__(self, state):
for attr, val in state.items():
setattr(self, attr, val)
if self._data.setstate_hooks:
for hook in self._data.setstate_hooks:
hook(self)
def __str__(self):
return self.istr()
@classmethod
def coerce(cls, value, **kwargs):
if isinstance(value, Mapping):
dct = cls._dict_from_mapping(value)
else:
return cls(value)
if cls._data.coerce_hooks:
for hook in cls._data.coerce_hooks:
hook(dct)
if cls._opts.coerce_args:
return cls(**dct)
types = cls._attrs.types
attrs = {attr: types[attr].coerce(val, **kwargs)
for attr, val in dct.items()}
return cls(**attrs)
def __copy__(self, **kwargs):
kwargs['exclude'] = kwargs.get('exclude', []) + ['copy_exclude']
dct = self.to_dict(**kwargs)
for attr in self._groups.copy_copy:
if attr in dct:
dct[attr] = copy(dct[attr])
dct['_from_copy_'] = True
return type(self)(**dct)
def copy(self, **kwargs):
return self.__copy__(**kwargs)
@classmethod
@create_hook
def _create_init_hooks(cls):
hooks = cls._find_hooks('is_init_hook', _InitHook)
cls._data.init_hooks = list(cls._data.init_hooks) + hooks
@classmethod
@create_hook
def _create_coerce_hooks(cls):
hooks = cls._find_hooks('is_coerce_hook', _CoerceHook)
cls._data.coerce_hooks = list(cls._data.coerce_hooks) + hooks
@classmethod
@create_hook
def _create_setstate_hooks(cls):
hooks = cls._find_hooks('is_setstate_hook', _SetstateHook)
cls._data.setstate_hooks = list(cls._data.setstate_hooks) + hooks
@classmethod
def _dict_from_mapping(cls, value):
return dict(value)
@classmethod
def _dict_from_object(cls, obj):
return {attr: getattr(obj, attr) for attr in cls._attrs.types
if hasattr(obj, attr)}
@classmethod
def _dict_from_sequence(cls, seq):
return {cls._opts.args[k]: val for k, val in enumerate(seq)}
@classmethod
def _enumeration_value(cls, x, **kwargs):
kwargs = {}
for attr, typ in cls._attrs.types.items():
if attr in cls._groups.generate_exclude:
continue
kwargs[attr] = typ.enumeration_value(x, **kwargs)
return cls(**kwargs)
@classmethod
def _find_hooks(cls, hook_attr, hook_type):
funcs = callables(cls)
return [f for f in funcs.values()
if getattr(f, hook_attr, None) is hook_type]
@classmethod
def from_mapping(cls, value):
return cls(**cls._dict_from_mapping(value))
@classmethod
def from_object(cls, obj):
return cls(**cls._dict_from_object(obj))
@classmethod
def from_sequence(cls, seq):
if len(seq) > len(cls._opts.args):
raise ValueError("More elements in sequence than in object "
"positional args")
return cls(**cls._dict_from_sequence(seq))
@classmethod
def _generate(cls, **kwargs_):
exclude = set(kwargs_.get('exclude', []))
exclude.update(cls._groups.generate_exclude)
attrs = kwargs_.get('attrs', {})
kwargs = {}
for attr, typ in cls._attrs.types.items():
if attr in exclude:
continue
if attr in attrs:
kwargs[attr] = attrs[attr]
else:
kwargs[attr] = typ.generate(**kwargs_)
return cls(**kwargs)
@classmethod
def _generate_documentation_signature(cls, attrs):
sig = get_typename(cls) + '('
strs = []
for attr in attrs:
obj = cls._attrs[attr]
s = attr
if obj.default:
s += '=' + str(obj.default)
if obj.optional:
s = '[' + s + ']'
strs.append(s)
strs.append('**kwargs')
sig += ', '.join(strs)
sig += ')'
return sig
@classmethod
def _generate_documentation_attrspec(cls, attrs):
specs = []
for attr in attrs:
obj = cls._attrs[attr]
spec = attr
if obj.optional:
spec += ' [**Optional**]'
if obj.default is not None:
spec += ' (*default* = {})'.format(obj.default)
spec += ': {}'.format(obj.type.rst())
if obj.doc:
spec += '\n '
spec += obj.doc
specs.append(spec)
return '\n'.join(specs)
@classmethod
def _generate_documentation_optspec(cls):
specs = []
for opt, val in sorted(cls._opts.items(), key=itemgetter(0)):
specs.append('* {}: {}'.format(opt, val))
for opt, val in sorted(cls._seq_opts.items(), key=itemgetter(0)):
specs.append('* {}: {}'.format(opt, val))
return '\n'.join(specs)
@classmethod
def _generate_documentation_aliasspec(cls):
specs = []
for attr, als in cls._aliases.items():
specs.append('* {}: {}'.format(attr, ', '.join(als)))
return '\n'.join(specs)
@classmethod
def _generate_documentation_groupspec(cls):
specs = []
for attr, group in cls._groups.items():
if group:
specs.append('* {}: {}'.format(attr, ', '.join(sorted(group))))
return '\n'.join(specs)
@classmethod
@create_hook
def _generate_documentation(cls):
if not cls._get_opt('autodoc', default=True):
return
if rgetattr(cls, '__init__.__func__', False) is False:
return
args = cls._get_opt('args', default=())
kw_attrs = cls._data.kw_attrs
data = {}
data['signature'] = cls._generate_documentation_signature(args)
data['doc'] = cls.__doc__ if cls.__doc__ else ''
if cls.__init__.__func__.__doc__:
data['doc'] += '\n\n' + cls.__init__.__func__.__doc__
data['attrspec'] = cls._generate_documentation_attrspec(args)
data['kwattrspec'] = cls._generate_documentation_attrspec(kw_attrs)
data['optspec'] = cls._generate_documentation_optspec()
data['aliasspec'] = cls._generate_documentation_aliasspec()
data['groupspec'] = cls._generate_documentation_groupspec()
doc = CLASS_TEMPLATE.render(data)
cls.__doc__ = doc
@classmethod
@create_hook
def _make_type_object(cls):
if cls._class_data.clsname == 'Base' or not cls._opts.make_type_object:
return
class BaseChildGeneratedType(BaseType):
type = cls
@classmethod
@create_hook
def _set_hash(cls):
if cls._get_opt('make_hashable', False):
def hashf(self):
return hash(self._hashable())
setattr(cls, '__hash__', hashf)
def _estr(self, **kwargs):
kwargs = {attr: estr(val) for attr, val in pairs(self, **kwargs)}
argstr = ','.join('{}={}'.format(attr, val) for attr, val in kwargs.items())
return '{}({})'.format(get_typename(self), argstr)
def _find_ne(self, other, func, **kwargs):
for attr, value in pairs(self, exclude=['eq_exclude']):
if not func(value, getattr(other, attr)):
return DiffersAtAttribute(self, other, attr)
def _hashable(self, **kwargs):
items = [hashable(val, **kwargs) for val in self.to_tuple(exclude=['hash_exclude'])]
items.insert(0, get_fullname(self))
return tuple(items)
def _istr_attrs(self, base, pretty, indent):
strs = []
attrs = self.to_dict(exclude=['str_exclude'])
for attr, val in sorted(attrs.items(),
key=lambda x: \
self._data.attr_display_order.index(x[0])):
start = '{} = '.format(attr)
val_indent = indent + len(start)
tmp = start + istr(val, pretty, val_indent)
strs.append(tmp)
return base.join(strs)
def istr(self, pretty=False, indent=0, toplevel=False):
'''Returns a string that, if evaluated, produces an equivalent object.'''
ret = '{}('.format(get_typename(self))
if pretty and toplevel and indent:
ret = (' ' * indent) + ret
base = ','
if pretty:
indent += len(ret)
base += '\n' + ' ' * indent
else:
base += ' '
ret += self._istr_attrs(base, pretty, indent) + ')'
return ret
def pretty(self, indent=0):
'''Returns a pretty-printed version if istr().'''
return self.istr(pretty=True, indent=indent, toplevel=True)
def _repr_template(self):
dct = self.to_dict()
dct['__name__'] = get_typename(self)
dct['__mod__'] = get_mod(self)
template = self._opts.repr_template
return template.format(**dct)
def _serialize(self, dct, **kwargs):
kwargs = dict(kwargs)
exclude = list(kwargs.get('exclude', []))
if 'getstate_exclude' not in exclude:
exclude += ['getstate_exclude']
kwargs['exclude'] = exclude
dct[SER_KEYS.kwargs] = {attr: serialize(value, **kwargs)
for attr, value in pairs(self, **kwargs)}
return dct
def to_dict(self, **kwargs):
'''Convert the object into a dict of its declared attributes.
May exclude certain attribute groups by listing them in exclude=[].
May include certain attribute groups (to the exclusion of all others) by listing them in include=[].
'''
return dict(pairs(self, **kwargs))
def to_tuple(self, **kwargs):
'''Convert the object into a tuple of its declared attribute values.
'''
values = [val for attr, val in pairs(self, **kwargs)]
return tuple(values)
def validate(self):
'''Raise an exception if the object is missing required attributes, or if the attributes are of an invalid type.
'''
optional = self._attrs.optional
optional_none = self._opts.optional_none
for attr, typ in self._attrs.types.items():
if not hasattr(self, attr):
if attr in optional:
continue
raise AttributeError('Required attribute {} not defined'.
format(attr))
val = getattr(self, attr)
if optional_none:
if attr in optional and val is None:
continue
res, e = typ.query_exception(val)
if not res:
raise TypeError('Validation error for attribute {}: {}'.
format(attr, message(e)))
#-------------------------------------------------------------------------------
# Type Registration
class BaseType(Type):
type = Base
def attrs(self, **kwargs):
exclude = kwargs.get('exclude', [])
include = kwargs.get('include', [])
if include and exclude:
raise TypeError('Cannot specify both include and exclude')
if exclude:
exclude = self.obj._groups.union(*exclude)
else:
exclude = set()
if include:
exclude = self.obj._groups.complement(*include)
return sorted(attr for attr in self.obj._attrs.types
if attr not in exclude and hasattr(self.obj, attr))
#-------------------------------------------------------------------------------
# Alternate Attribute Specification Mixin
class Harvester(object):
@pre_create_hook
def _harvest_attrs(clsdata):
dct = {}
clsdct = clsdata['dct']
attrs = clsdct.get('_attrs', {})
required = clsdct.get('required', {})
optional = clsdct.get('optional', {})
default = clsdct.get('default', {})
for attr in required:
typ = required[attr]
if attr in default:
dct[attr] = Attr(typ, optional=False, default=default[attr])
else:
dct[attr] = Attr(typ, optional=False)
for attr in optional:
typ = optional[attr]
if attr in default:
dct[attr] = Attr(typ, default=default[attr], optional=True)
else:
dct[attr] = Attr(typ, optional=True)
preserve_attr_data(attrs, dct)
attrs.update(dct)
clsdct['_attrs'] = attrs
#-------------------------------------------------------------------------------
# __all__
__all__ = ('Base', 'BaseType', 'init_hook', 'coerce_hook', 'setstate_hook',
'Harvester')
#-------------------------------------------------------------------------------
| 32.7962 | 120 | 0.531834 |
76a685b18c17d08d081f6853edd8523afe98e2c3 | 2,295 | py | Python | tests/load/test_load_hpo.py | Clinical-Genomics/scout | 1ec4daa76093c2ffa4655612b63d325970253f58 | [
"BSD-3-Clause"
] | 111 | 2015-01-15T11:53:20.000Z | 2022-03-26T19:55:24.000Z | tests/load/test_load_hpo.py | Clinical-Genomics/scout | 1ec4daa76093c2ffa4655612b63d325970253f58 | [
"BSD-3-Clause"
] | 2,995 | 2015-01-15T16:14:20.000Z | 2022-03-31T13:36:32.000Z | tests/load/test_load_hpo.py | Clinical-Genomics/scout | 1ec4daa76093c2ffa4655612b63d325970253f58 | [
"BSD-3-Clause"
] | 55 | 2015-05-31T19:09:49.000Z | 2021-11-01T10:50:31.000Z | from scout.load.hpo import load_disease_terms, load_hpo, load_hpo_terms
from scout.utils.handle import get_file_handle
def test_load_disease_terms(gene_database, genemap_file, hpo_disease_handle):
adapter = gene_database
alias_genes = adapter.genes_by_alias()
# GIVEN a populated database with genes and no disease terms
assert len([term for term in adapter.disease_terms()]) == 0
genemap_handle = get_file_handle(genemap_file)
# WHEN loading the disease terms
load_disease_terms(
adapter=adapter,
genemap_lines=genemap_handle,
genes=alias_genes,
hpo_disease_lines=hpo_disease_handle,
)
# THEN make sure that the disease terms are in the database
disease_objs = adapter.disease_terms()
assert len([disease for disease in disease_objs]) > 0
def test_load_hpo_terms(gene_database, hpo_terms_handle, hpo_disease_handle):
adapter = gene_database
alias_genes = adapter.genes_by_alias()
# GIVEN a populated database with genes but no hpo terms
assert len([term for term in adapter.hpo_terms()]) == 0
assert len([gene for gene in adapter.all_genes()]) > 0
# WHEN loading the hpo terms
load_hpo_terms(
adapter=adapter,
hpo_lines=hpo_terms_handle,
hpo_gene_lines=hpo_disease_handle,
alias_genes=alias_genes,
)
# THEN make sure that the disease terms are in the database
hpo_terms_objs = adapter.hpo_terms()
assert len([term for term in hpo_terms_objs]) > 0
def test_load_hpo(
gene_database,
hpo_terms_handle,
hpo_disease_handle,
genemap_file,
):
adapter = gene_database
# GIVEN a populated database with genes but no hpo terms
assert len([term for term in adapter.hpo_terms()]) == 0
genemap_handle = get_file_handle(genemap_file)
# WHEN loading the disease and hpo terms
load_hpo(
adapter=gene_database,
hpo_lines=hpo_terms_handle,
hpo_gene_lines=hpo_disease_handle,
disease_lines=genemap_handle,
)
# THEN make sure that the disease terms are in the database
hpo_terms_objs = adapter.hpo_terms()
disease_objs = adapter.disease_terms()
assert len([term for term in hpo_terms_objs]) > 0
assert len([disease for disease in disease_objs]) > 0
| 31.013514 | 77 | 0.71939 |
ae9ec21575d8f94ca48199a08adae33513d98910 | 22,916 | py | Python | tests_async/system/requests/test_download.py | renovate-bot/google-resumable-media-python | 1f01b88d0ce05ca561359de1ad89b47c6c60c9b7 | [
"Apache-2.0"
] | null | null | null | tests_async/system/requests/test_download.py | renovate-bot/google-resumable-media-python | 1f01b88d0ce05ca561359de1ad89b47c6c60c9b7 | [
"Apache-2.0"
] | null | null | null | tests_async/system/requests/test_download.py | renovate-bot/google-resumable-media-python | 1f01b88d0ce05ca561359de1ad89b47c6c60c9b7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import copy
import hashlib
import http.client
import io
import os
import asyncio
from google.auth._default_async import default_async
import google.auth.transport._aiohttp_requests as tr_requests
import multidict
import pytest
import google._async_resumable_media.requests as resumable_requests
from google.resumable_media import _helpers
import google._async_resumable_media.requests.download as download_mod
from google.resumable_media import common
from tests.system import utils
CURR_DIR = os.path.dirname(os.path.realpath(__file__))
DATA_DIR = os.path.join(CURR_DIR, "..", "..", "data")
PLAIN_TEXT = "text/plain"
IMAGE_JPEG = "image/jpeg"
ENCRYPTED_ERR = b"The target object is encrypted by a customer-supplied encryption key."
NO_BODY_ERR = "The content for this response was already consumed"
NOT_FOUND_ERR = (
b"No such object: " + utils.BUCKET_NAME.encode("utf-8") + b"/does-not-exist.txt"
)
SIMPLE_DOWNLOADS = (resumable_requests.Download, resumable_requests.RawDownload)
@pytest.fixture(scope="session")
def event_loop(request):
"""Create an instance of the default event loop for each test session."""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
class CorruptingAuthorizedSession(tr_requests.AuthorizedSession):
"""A Requests Session class with credentials, which corrupts responses.
This class is used for testing checksum validation.
Args:
credentials (google.auth.credentials.Credentials): The credentials to
add to the request.
refresh_status_codes (Sequence[int]): Which HTTP status codes indicate
that credentials should be refreshed and the request should be
retried.
max_refresh_attempts (int): The maximum number of times to attempt to
refresh the credentials and retry the request.
kwargs: Additional arguments passed to the :class:`requests.Session`
constructor.
"""
EMPTY_MD5 = base64.b64encode(hashlib.md5(b"").digest()).decode("utf-8")
crc32c = _helpers._get_crc32c_object()
crc32c.update(b"")
EMPTY_CRC32C = base64.b64encode(crc32c.digest()).decode("utf-8")
async def request(self, method, url, data=None, headers=None, **kwargs):
"""Implementation of Requests' request."""
response = await tr_requests.AuthorizedSession.request(
self, method, url, data=data, headers=headers, **kwargs
)
temp = multidict.CIMultiDict(response.headers)
temp[_helpers._HASH_HEADER] = "crc32c={},md5={}".format(
self.EMPTY_CRC32C, self.EMPTY_MD5
)
response._headers = temp
return response
def get_path(filename):
return os.path.realpath(os.path.join(DATA_DIR, filename))
ALL_FILES = (
{
"path": get_path("image1.jpg"),
"content_type": IMAGE_JPEG,
"md5": "1bsd83IYNug8hd+V1ING3Q==",
"crc32c": "YQGPxA==",
"slices": (
slice(1024, 16386, None), # obj[1024:16386]
slice(None, 8192, None), # obj[:8192]
slice(-256, None, None), # obj[-256:]
slice(262144, None, None), # obj[262144:]
),
},
{
"path": get_path("image2.jpg"),
"content_type": IMAGE_JPEG,
"md5": "gdLXJltiYAMP9WZZFEQI1Q==",
"crc32c": "sxxEFQ==",
"slices": (
slice(1024, 16386, None), # obj[1024:16386]
slice(None, 8192, None), # obj[:8192]
slice(-256, None, None), # obj[-256:]
slice(262144, None, None), # obj[262144:]
),
},
{
"path": get_path("file.txt"),
"content_type": PLAIN_TEXT,
"md5": "XHSHAr/SpIeZtZbjgQ4nGw==",
"crc32c": "MeMHoQ==",
"slices": (),
},
{
"path": get_path("gzipped.txt.gz"),
"uncompressed": get_path("gzipped.txt"),
"content_type": PLAIN_TEXT,
"md5": "KHRs/+ZSrc/FuuR4qz/PZQ==",
"crc32c": "/LIRNg==",
"slices": (),
"metadata": {"contentEncoding": "gzip"},
},
)
def get_contents_for_upload(info):
with open(info["path"], "rb") as file_obj:
return file_obj.read()
def get_contents(info):
full_path = info.get("uncompressed", info["path"])
with open(full_path, "rb") as file_obj:
return file_obj.read()
def get_raw_contents(info):
full_path = info["path"]
with open(full_path, "rb") as file_obj:
return file_obj.read()
def get_blob_name(info):
full_path = info.get("uncompressed", info["path"])
return os.path.basename(full_path)
async def delete_blob(transport, blob_name):
metadata_url = utils.METADATA_URL_TEMPLATE.format(blob_name=blob_name)
response = await transport.request("DELETE", metadata_url)
assert response.status == http.client.NO_CONTENT
@pytest.fixture(scope="module")
async def secret_file(authorized_transport, bucket):
blob_name = "super-seekrit.txt"
data = b"Please do not tell anyone my encrypted seekrit."
upload_url = utils.SIMPLE_UPLOAD_TEMPLATE.format(blob_name=blob_name)
headers = utils.get_encryption_headers()
upload = resumable_requests.SimpleUpload(upload_url, headers=headers)
response = await upload.transmit(authorized_transport, data, PLAIN_TEXT)
assert response.status == http.client.OK
yield blob_name, data, headers
await delete_blob(authorized_transport, blob_name)
# Transport that returns corrupt data, so we can exercise checksum handling.
@pytest.fixture(scope="module")
async def corrupting_transport():
credentials, _ = default_async(scopes=(utils.GCS_RW_SCOPE,))
yield CorruptingAuthorizedSession(credentials)
@pytest.fixture(scope="module")
async def simple_file(authorized_transport, bucket):
blob_name = "basic-file.txt"
upload_url = utils.SIMPLE_UPLOAD_TEMPLATE.format(blob_name=blob_name)
upload = resumable_requests.SimpleUpload(upload_url)
data = b"Simple contents"
response = await upload.transmit(authorized_transport, data, PLAIN_TEXT)
assert response.status == http.client.OK
yield blob_name, data
await delete_blob(authorized_transport, blob_name)
@pytest.fixture(scope="module")
async def add_files(authorized_transport, bucket):
blob_names = []
for info in ALL_FILES:
to_upload = get_contents_for_upload(info)
blob_name = get_blob_name(info)
blob_names.append(blob_name)
if "metadata" in info:
upload = resumable_requests.MultipartUpload(utils.MULTIPART_UPLOAD)
metadata = copy.deepcopy(info["metadata"])
metadata["name"] = blob_name
response = await upload.transmit(
authorized_transport, to_upload, metadata, info["content_type"]
)
else:
upload_url = utils.SIMPLE_UPLOAD_TEMPLATE.format(blob_name=blob_name)
upload = resumable_requests.SimpleUpload(upload_url)
response = await upload.transmit(
authorized_transport, to_upload, info["content_type"]
)
assert response.status == http.client.OK
yield
# Clean-up the blobs we created.
for blob_name in blob_names:
await delete_blob(authorized_transport, blob_name)
async def check_tombstoned(download, transport):
assert download.finished
if isinstance(download, SIMPLE_DOWNLOADS):
with pytest.raises(ValueError) as exc_info:
await download.consume(transport)
assert exc_info.match("A download can only be used once.")
else:
with pytest.raises(ValueError) as exc_info:
await download.consume_next_chunk(transport)
assert exc_info.match("Download has finished.")
async def check_error_response(exc_info, status_code, message):
error = exc_info.value
response = error.response
assert response.status == status_code
content = await response.content.read()
assert content.startswith(message)
assert len(error.args) == 5
assert error.args[1] == status_code
assert error.args[3] == http.client.OK
assert error.args[4] == http.client.PARTIAL_CONTENT
class TestDownload(object):
@staticmethod
def _get_target_class():
return resumable_requests.Download
def _make_one(self, media_url, **kw):
return self._get_target_class()(media_url, **kw)
@staticmethod
def _get_contents(info):
return get_contents(info)
@staticmethod
async def _read_response_content(response):
content = await response.content()
return content
@pytest.mark.asyncio
@pytest.mark.parametrize("checksum", ["md5", "crc32c", None])
async def test_download_full(self, add_files, authorized_transport, checksum):
for info in ALL_FILES:
actual_contents = self._get_contents(info)
blob_name = get_blob_name(info)
# Create the actual download object.
media_url = utils.DOWNLOAD_URL_TEMPLATE.format(blob_name=blob_name)
download = self._make_one(media_url, checksum=checksum)
# Consume the resource.
response = await download.consume(authorized_transport)
response = tr_requests._CombinedResponse(response)
assert response.status == http.client.OK
content = await self._read_response_content(response)
assert content == actual_contents
await check_tombstoned(download, authorized_transport)
@pytest.mark.asyncio
async def test_extra_headers(self, authorized_transport, secret_file):
blob_name, data, headers = secret_file
# Create the actual download object.
media_url = utils.DOWNLOAD_URL_TEMPLATE.format(blob_name=blob_name)
download = self._make_one(media_url, headers=headers)
# Consume the resource.
response = await download.consume(authorized_transport)
assert response.status == http.client.OK
content = await response.content.read()
assert content == data
await check_tombstoned(download, authorized_transport)
# Attempt to consume the resource **without** the headers.
download_wo = self._make_one(media_url)
with pytest.raises(common.InvalidResponse) as exc_info:
await download_wo.consume(authorized_transport)
await check_error_response(exc_info, http.client.BAD_REQUEST, ENCRYPTED_ERR)
await check_tombstoned(download_wo, authorized_transport)
@pytest.mark.asyncio
async def test_non_existent_file(self, authorized_transport, bucket):
blob_name = "does-not-exist.txt"
media_url = utils.DOWNLOAD_URL_TEMPLATE.format(blob_name=blob_name)
download = self._make_one(media_url)
# Try to consume the resource and fail.
with pytest.raises(common.InvalidResponse) as exc_info:
await download.consume(authorized_transport)
await check_error_response(exc_info, http.client.NOT_FOUND, NOT_FOUND_ERR)
await check_tombstoned(download, authorized_transport)
@pytest.mark.asyncio
async def test_bad_range(self, simple_file, authorized_transport):
blob_name, data = simple_file
# Make sure we have an invalid range.
start = 32
end = 63
assert len(data) < start < end
# Create the actual download object.
media_url = utils.DOWNLOAD_URL_TEMPLATE.format(blob_name=blob_name)
download = self._make_one(media_url, start=start, end=end)
# Try to consume the resource and fail.
with pytest.raises(common.InvalidResponse) as exc_info:
await download.consume(authorized_transport)
await check_error_response(
exc_info,
http.client.REQUESTED_RANGE_NOT_SATISFIABLE,
b"Request range not satisfiable",
)
await check_tombstoned(download, authorized_transport)
def _download_slice(self, media_url, slice_):
assert slice_.step is None
end = None
if slice_.stop is not None:
end = slice_.stop - 1
return self._make_one(media_url, start=slice_.start, end=end)
@pytest.mark.asyncio
async def test_download_partial(self, add_files, authorized_transport):
for info in ALL_FILES:
actual_contents = self._get_contents(info)
blob_name = get_blob_name(info)
media_url = utils.DOWNLOAD_URL_TEMPLATE.format(blob_name=blob_name)
for slice_ in info["slices"]:
download = self._download_slice(media_url, slice_)
response = await download.consume(authorized_transport)
assert response.status == http.client.PARTIAL_CONTENT
content = await response.content.read()
assert content == actual_contents[slice_]
with pytest.raises(ValueError):
await download.consume(authorized_transport)
class TestRawDownload(TestDownload):
@staticmethod
def _get_target_class():
return resumable_requests.RawDownload
@staticmethod
def _get_contents(info):
return get_raw_contents(info)
@staticmethod
async def _read_response_content(response):
content = await tr_requests._CombinedResponse(response._response).raw_content()
return content
@pytest.mark.parametrize("checksum", ["md5", "crc32c"])
@pytest.mark.asyncio
async def test_corrupt_download(self, add_files, corrupting_transport, checksum):
for info in ALL_FILES:
blob_name = get_blob_name(info)
# Create the actual download object.
media_url = utils.DOWNLOAD_URL_TEMPLATE.format(blob_name=blob_name)
stream = io.BytesIO()
download = self._make_one(media_url, stream=stream, checksum=checksum)
# Consume the resource.
with pytest.raises(common.DataCorruption) as exc_info:
await download.consume(corrupting_transport)
assert download.finished
if checksum == "md5":
EMPTY_HASH = CorruptingAuthorizedSession.EMPTY_MD5
else:
EMPTY_HASH = CorruptingAuthorizedSession.EMPTY_CRC32C
msg = download_mod._CHECKSUM_MISMATCH.format(
download.media_url,
EMPTY_HASH,
info[checksum],
checksum_type=checksum.upper(),
)
assert exc_info.value.args == (msg,)
@pytest.mark.parametrize("checksum", ["md5", "crc32c"])
@pytest.mark.asyncio
async def test_corrupt_download_no_check(
self, add_files, corrupting_transport, checksum
):
for info in ALL_FILES:
blob_name = get_blob_name(info)
# Create the actual download object.
media_url = utils.DOWNLOAD_URL_TEMPLATE.format(blob_name=blob_name)
stream = io.BytesIO()
download = self._make_one(media_url, stream=stream, checksum=None)
# Consume the resource.
await download.consume(corrupting_transport)
assert download.finished
def get_chunk_size(min_chunks, total_bytes):
# Make sure the number of chunks **DOES NOT** evenly divide.
num_chunks = min_chunks
while total_bytes % num_chunks == 0:
num_chunks += 1
chunk_size = total_bytes // num_chunks
# Since we know an integer division has remainder, increment by 1.
chunk_size += 1
assert total_bytes < num_chunks * chunk_size
return num_chunks, chunk_size
async def consume_chunks(download, authorized_transport, total_bytes, actual_contents):
start_byte = download.start
end_byte = download.end
if end_byte is None:
end_byte = total_bytes - 1
num_responses = 0
while not download.finished:
response = await download.consume_next_chunk(authorized_transport)
num_responses += 1
next_byte = min(start_byte + download.chunk_size, end_byte + 1)
assert download.bytes_downloaded == next_byte - download.start
assert download.total_bytes == total_bytes
assert response.status == http.client.PARTIAL_CONTENT
# NOTE: Due to the consumption of the stream in the respone, the
# response object for async requests will be EOF at this point. In
# sync versions we could compare the content with the range of
# actual contents. Since streams aren't reversible, we can't do that
# here.
assert response.content.at_eof()
start_byte = next_byte
return num_responses, response
class TestChunkedDownload(object):
@staticmethod
def _get_target_class():
return resumable_requests.ChunkedDownload
def _make_one(self, media_url, chunk_size, stream, **kw):
return self._get_target_class()(media_url, chunk_size, stream, **kw)
@staticmethod
def _get_contents(info):
return get_contents(info)
@pytest.mark.asyncio
async def test_chunked_download_partial(self, add_files, authorized_transport):
for info in ALL_FILES:
actual_contents = self._get_contents(info)
blob_name = get_blob_name(info)
media_url = utils.DOWNLOAD_URL_TEMPLATE.format(blob_name=blob_name)
for slice_ in info["slices"]:
# Manually replace a missing start with 0.
start = 0 if slice_.start is None else slice_.start
# Chunked downloads don't support a negative index.
if start < 0:
continue
# First determine how much content is in the slice and
# use it to determine a chunking strategy.
total_bytes = len(actual_contents)
if slice_.stop is None:
end_byte = total_bytes - 1
end = None
else:
# Python slices DO NOT include the last index, though a byte
# range **is** inclusive of both endpoints.
end_byte = slice_.stop - 1
end = end_byte
num_chunks, chunk_size = get_chunk_size(7, end_byte - start + 1)
# Create the actual download object.
stream = io.BytesIO()
download = self._make_one(
media_url, chunk_size, stream, start=start, end=end
)
# Consume the resource in chunks.
num_responses, last_response = await consume_chunks(
download, authorized_transport, total_bytes, actual_contents
)
# Make sure the combined chunks are the whole slice.
assert stream.getvalue() == actual_contents[slice_]
# Check that we have the right number of responses.
assert num_responses == num_chunks
# Make sure the last chunk isn't the same size.
content = await last_response.content.read()
assert len(content) < chunk_size
await check_tombstoned(download, authorized_transport)
@pytest.mark.asyncio
async def test_chunked_with_extra_headers(self, authorized_transport, secret_file):
blob_name, data, headers = secret_file
num_chunks = 4
chunk_size = 12
assert (num_chunks - 1) * chunk_size < len(data) < num_chunks * chunk_size
# Create the actual download object.
media_url = utils.DOWNLOAD_URL_TEMPLATE.format(blob_name=blob_name)
stream = io.BytesIO()
download = self._make_one(media_url, chunk_size, stream, headers=headers)
# Consume the resource in chunks.
num_responses, last_response = await consume_chunks(
download, authorized_transport, len(data), data
)
# Make sure the combined chunks are the whole object.
assert stream.getvalue() == data
# Check that we have the right number of responses.
assert num_responses == num_chunks
# Make sure the last chunk isn't the same size.
content = await last_response.read()
assert len(content) < chunk_size
await check_tombstoned(download, authorized_transport)
# Attempt to consume the resource **without** the headers.
stream_wo = io.BytesIO()
download_wo = resumable_requests.ChunkedDownload(
media_url, chunk_size, stream_wo
)
with pytest.raises(common.InvalidResponse) as exc_info:
await download_wo.consume_next_chunk(authorized_transport)
assert stream_wo.tell() == 0
await check_error_response(exc_info, http.client.BAD_REQUEST, ENCRYPTED_ERR)
assert download_wo.invalid
class TestRawChunkedDownload(TestChunkedDownload):
@staticmethod
def _get_target_class():
return resumable_requests.RawChunkedDownload
@staticmethod
def _get_contents(info):
return get_raw_contents(info)
@pytest.mark.asyncio
async def test_chunked_download_full(self, add_files, authorized_transport):
for info in ALL_FILES:
actual_contents = self._get_contents(info)
blob_name = get_blob_name(info)
total_bytes = len(actual_contents)
num_chunks, chunk_size = get_chunk_size(7, total_bytes)
# Create the actual download object.
media_url = utils.DOWNLOAD_URL_TEMPLATE.format(blob_name=blob_name)
stream = io.BytesIO()
download = self._make_one(media_url, chunk_size, stream)
# Consume the resource in chunks.
num_responses, last_response = await consume_chunks(
download, authorized_transport, total_bytes, actual_contents
)
# Make sure the combined chunks are the whole object.
assert stream.getvalue() == actual_contents
# Check that we have the right number of responses.
assert num_responses == num_chunks
# Make sure the last chunk isn't the same size.
assert total_bytes % chunk_size != 0
content = await last_response.content.read()
assert len(content) < chunk_size
await check_tombstoned(download, authorized_transport)
| 37.6289 | 88 | 0.664514 |
629c9023a6506d24487046dcab2a098ca7fc54fa | 23,986 | py | Python | plugins/ledger/ledger.py | MrSlosh/Electrum-NYC | 9b83efafc5313246804ead462ff6266ee5d50b60 | [
"MIT"
] | 1 | 2018-02-27T23:40:07.000Z | 2018-02-27T23:40:07.000Z | plugins/ledger/ledger.py | MrSlosh/Electrum-NYC | 9b83efafc5313246804ead462ff6266ee5d50b60 | [
"MIT"
] | null | null | null | plugins/ledger/ledger.py | MrSlosh/Electrum-NYC | 9b83efafc5313246804ead462ff6266ee5d50b60 | [
"MIT"
] | null | null | null | from struct import pack, unpack
import hashlib
import sys
import traceback
from electrum_nyc import bitcoin
from electrum_nyc.bitcoin import TYPE_ADDRESS, int_to_hex, var_int
from electrum_nyc.i18n import _
from electrum_nyc.plugins import BasePlugin
from electrum_nyc.keystore import Hardware_KeyStore
from electrum_nyc.transaction import Transaction
from ..hw_wallet import HW_PluginBase
from electrum_nyc.util import print_error, is_verbose, bfh, bh2u, versiontuple
try:
import hid
from btchip.btchipComm import HIDDongleHIDAPI, DongleWait
from btchip.btchip import btchip
from btchip.btchipUtils import compress_public_key,format_transaction, get_regular_input_script, get_p2sh_input_script
from btchip.bitcoinTransaction import bitcoinTransaction
from btchip.btchipFirmwareWizard import checkFirmware, updateFirmware
from btchip.btchipException import BTChipException
BTCHIP = True
BTCHIP_DEBUG = is_verbose
except ImportError:
BTCHIP = False
MSG_NEEDS_FW_UPDATE_GENERIC = _('Firmware version too old. Please update at') + \
' https://www.ledgerwallet.com'
MSG_NEEDS_FW_UPDATE_SEGWIT = _('Firmware version (or "Litecoin" app) too old for Segwit support. Please update at') + \
' https://www.ledgerwallet.com'
MULTI_OUTPUT_SUPPORT = '1.1.4'
SEGWIT_SUPPORT = '1.1.9'
SEGWIT_SUPPORT_SPECIAL = '1.0.4'
class Ledger_Client():
def __init__(self, hidDevice):
self.dongleObject = btchip(hidDevice)
self.preflightDone = False
def is_pairable(self):
return True
def close(self):
self.dongleObject.dongle.close()
def timeout(self, cutoff):
pass
def is_initialized(self):
return True
def label(self):
return ""
def i4b(self, x):
return pack('>I', x)
def test_pin_unlocked(func):
"""Function decorator to test the Ledger for being unlocked, and if not,
raise a human-readable exception.
"""
def catch_exception(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except BTChipException as e:
if e.sw == 0x6982:
raise Exception(_('Your Ledger is locked. Please unlock it.'))
else:
raise
return catch_exception
@test_pin_unlocked
def get_xpub(self, bip32_path, xtype):
self.checkDevice()
# bip32_path is of the form 44'/0'/1'
# S-L-O-W - we don't handle the fingerprint directly, so compute
# it manually from the previous node
# This only happens once so it's bearable
#self.get_client() # prompt for the PIN before displaying the dialog if necessary
#self.handler.show_message("Computing master public key")
if xtype in ['p2wpkh', 'p2wsh'] and not self.supports_native_segwit():
raise Exception(MSG_NEEDS_FW_UPDATE_SEGWIT)
if xtype in ['p2wpkh-p2sh', 'p2wsh-p2sh'] and not self.supports_segwit():
raise Exception(MSG_NEEDS_FW_UPDATE_SEGWIT)
splitPath = bip32_path.split('/')
if splitPath[0] == 'm':
splitPath = splitPath[1:]
bip32_path = bip32_path[2:]
fingerprint = 0
if len(splitPath) > 1:
prevPath = "/".join(splitPath[0:len(splitPath) - 1])
nodeData = self.dongleObject.getWalletPublicKey(prevPath)
publicKey = compress_public_key(nodeData['publicKey'])
h = hashlib.new('ripemd160')
h.update(hashlib.sha256(publicKey).digest())
fingerprint = unpack(">I", h.digest()[0:4])[0]
nodeData = self.dongleObject.getWalletPublicKey(bip32_path)
publicKey = compress_public_key(nodeData['publicKey'])
depth = len(splitPath)
lastChild = splitPath[len(splitPath) - 1].split('\'')
childnum = int(lastChild[0]) if len(lastChild) == 1 else 0x80000000 | int(lastChild[0])
xpub = bitcoin.serialize_xpub(xtype, nodeData['chainCode'], publicKey, depth, self.i4b(fingerprint), self.i4b(childnum))
return xpub
def has_detached_pin_support(self, client):
try:
client.getVerifyPinRemainingAttempts()
return True
except BTChipException as e:
if e.sw == 0x6d00:
return False
raise e
def is_pin_validated(self, client):
try:
# Invalid SET OPERATION MODE to verify the PIN status
client.dongle.exchange(bytearray([0xe0, 0x26, 0x00, 0x00, 0x01, 0xAB]))
except BTChipException as e:
if (e.sw == 0x6982):
return False
if (e.sw == 0x6A80):
return True
raise e
def supports_multi_output(self):
return self.multiOutputSupported
def supports_segwit(self):
return self.segwitSupported
def supports_native_segwit(self):
return self.nativeSegwitSupported
def perform_hw1_preflight(self):
try:
firmwareInfo = self.dongleObject.getFirmwareVersion()
firmware = firmwareInfo['version']
self.multiOutputSupported = versiontuple(firmware) >= versiontuple(MULTI_OUTPUT_SUPPORT)
self.nativeSegwitSupported = versiontuple(firmware) >= versiontuple(SEGWIT_SUPPORT)
self.segwitSupported = self.nativeSegwitSupported or (firmwareInfo['specialVersion'] == 0x20 and versiontuple(firmware) >= versiontuple(SEGWIT_SUPPORT_SPECIAL))
if not checkFirmware(firmwareInfo):
self.dongleObject.dongle.close()
raise Exception(MSG_NEEDS_FW_UPDATE_GENERIC)
try:
self.dongleObject.getOperationMode()
except BTChipException as e:
if (e.sw == 0x6985):
self.dongleObject.dongle.close()
self.handler.get_setup( )
# Acquire the new client on the next run
else:
raise e
if self.has_detached_pin_support(self.dongleObject) and not self.is_pin_validated(self.dongleObject) and (self.handler is not None):
remaining_attempts = self.dongleObject.getVerifyPinRemainingAttempts()
if remaining_attempts != 1:
msg = "Enter your Ledger PIN - remaining attempts : " + str(remaining_attempts)
else:
msg = "Enter your Ledger PIN - WARNING : LAST ATTEMPT. If the PIN is not correct, the dongle will be wiped."
confirmed, p, pin = self.password_dialog(msg)
if not confirmed:
raise Exception('Aborted by user - please unplug the dongle and plug it again before retrying')
pin = pin.encode()
self.dongleObject.verifyPin(pin)
self.dongleObject.setAlternateCoinVersions(ADDRTYPE_P2PKH, ADDRTYPE_P2SH)
except BTChipException as e:
if (e.sw == 0x6faa):
raise Exception("Dongle is temporarily locked - please unplug it and replug it again")
if ((e.sw & 0xFFF0) == 0x63c0):
raise Exception("Invalid PIN - please unplug the dongle and plug it again before retrying")
if e.sw == 0x6f00 and e.message == 'Invalid channel':
# based on docs 0x6f00 might be a more general error, hence we also compare message to be sure
raise Exception("Invalid channel.\n"
"Please make sure that 'Browser support' is disabled on your device.")
raise e
def checkDevice(self):
if not self.preflightDone:
try:
self.perform_hw1_preflight()
except BTChipException as e:
if (e.sw == 0x6d00):
raise BaseException("Device not in Bitcoin mode")
raise e
self.preflightDone = True
def password_dialog(self, msg=None):
response = self.handler.get_word(msg)
if response is None:
return False, None, None
return True, response, response
class Ledger_KeyStore(Hardware_KeyStore):
hw_type = 'ledger'
device = 'Ledger'
def __init__(self, d):
Hardware_KeyStore.__init__(self, d)
# Errors and other user interaction is done through the wallet's
# handler. The handler is per-window and preserved across
# device reconnects
self.force_watching_only = False
self.signing = False
self.cfg = d.get('cfg', {'mode':0,'pair':''})
def dump(self):
obj = Hardware_KeyStore.dump(self)
obj['cfg'] = self.cfg
return obj
def get_derivation(self):
return self.derivation
def get_client(self):
return self.plugin.get_client(self).dongleObject
def get_client_electrum(self):
return self.plugin.get_client(self)
def give_error(self, message, clear_client = False):
print_error(message)
if not self.signing:
self.handler.show_error(message)
else:
self.signing = False
if clear_client:
self.client = None
raise Exception(message)
def address_id_stripped(self, address):
# Strip the leading "m/"
change, index = self.get_address_index(address)
derivation = self.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
return address_path[2:]
def decrypt_message(self, pubkey, message, password):
raise RuntimeError(_('Encryption and decryption are currently not supported for {}').format(self.device))
def sign_message(self, sequence, message, password):
self.signing = True
message = message.encode('utf8')
message_hash = hashlib.sha256(message).hexdigest().upper()
# prompt for the PIN before displaying the dialog if necessary
client = self.get_client()
address_path = self.get_derivation()[2:] + "/%d/%d"%sequence
self.handler.show_message("Signing message ...\r\nMessage hash: "+message_hash)
try:
info = self.get_client().signMessagePrepare(address_path, message)
pin = ""
if info['confirmationNeeded']:
pin = self.handler.get_auth( info ) # does the authenticate dialog and returns pin
if not pin:
raise UserWarning(_('Cancelled by user'))
pin = str(pin).encode()
signature = self.get_client().signMessageSign(pin)
except BTChipException as e:
if e.sw == 0x6a80:
self.give_error("Unfortunately, this message cannot be signed by the Ledger wallet. Only alphanumerical messages shorter than 140 characters are supported. Please remove any extra characters (tab, carriage return) and retry.")
else:
self.give_error(e, True)
except UserWarning:
self.handler.show_error(_('Cancelled by user'))
return ''
except Exception as e:
self.give_error(e, True)
finally:
self.handler.finished()
self.signing = False
# Parse the ASN.1 signature
rLength = signature[3]
r = signature[4 : 4 + rLength]
sLength = signature[4 + rLength + 1]
s = signature[4 + rLength + 2:]
if rLength == 33:
r = r[1:]
if sLength == 33:
s = s[1:]
# And convert it
return bytes([27 + 4 + (signature[0] & 0x01)]) + r + s
def sign_transaction(self, tx, password):
if tx.is_complete():
return
client = self.get_client()
self.signing = True
inputs = []
inputsPaths = []
pubKeys = []
chipInputs = []
redeemScripts = []
signatures = []
preparedTrustedInputs = []
changePath = ""
changeAmount = None
output = None
outputAmount = None
p2shTransaction = False
segwitTransaction = False
pin = ""
self.get_client() # prompt for the PIN before displaying the dialog if necessary
# Fetch inputs of the transaction to sign
derivations = self.get_tx_derivations(tx)
for txin in tx.inputs():
if txin['type'] == 'coinbase':
self.give_error("Coinbase not supported") # should never happen
if txin['type'] in ['p2sh']:
p2shTransaction = True
if txin['type'] in ['p2wpkh-p2sh', 'p2wsh-p2sh']:
if not self.get_client_electrum().supports_segwit():
self.give_error(MSG_NEEDS_FW_UPDATE_SEGWIT)
segwitTransaction = True
if txin['type'] in ['p2wpkh', 'p2wsh']:
if not self.get_client_electrum().supports_native_segwit():
self.give_error(MSG_NEEDS_FW_UPDATE_SEGWIT)
segwitTransaction = True
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
for i, x_pubkey in enumerate(x_pubkeys):
if x_pubkey in derivations:
signingPos = i
s = derivations.get(x_pubkey)
hwAddress = "%s/%d/%d" % (self.get_derivation()[2:], s[0], s[1])
break
else:
self.give_error("No matching x_key for sign_transaction") # should never happen
redeemScript = Transaction.get_preimage_script(txin)
inputs.append([txin['prev_tx'].raw, txin['prevout_n'], redeemScript, txin['prevout_hash'], signingPos, txin.get('sequence', 0xffffffff - 1) ])
inputsPaths.append(hwAddress)
pubKeys.append(pubkeys)
# Sanity check
if p2shTransaction:
for txin in tx.inputs():
if txin['type'] != 'p2sh':
self.give_error("P2SH / regular input mixed in same transaction not supported") # should never happen
txOutput = var_int(len(tx.outputs()))
for txout in tx.outputs():
output_type, addr, amount = txout
txOutput += int_to_hex(amount, 8)
script = tx.pay_script(output_type, addr)
txOutput += var_int(len(script)//2)
txOutput += script
txOutput = bfh(txOutput)
# Recognize outputs - only one output and one change is authorized
if not p2shTransaction:
if not self.get_client_electrum().supports_multi_output():
if len(tx.outputs()) > 2:
self.give_error("Transaction with more than 2 outputs not supported")
for _type, address, amount in tx.outputs():
assert _type == TYPE_ADDRESS
info = tx.output_info.get(address)
if (info is not None) and (len(tx.outputs()) != 1):
index, xpubs, m = info
changePath = self.get_derivation()[2:] + "/%d/%d"%index
changeAmount = amount
else:
output = address
outputAmount = amount
self.handler.show_message(_("Confirm Transaction on your Ledger device..."))
try:
# Get trusted inputs from the original transactions
for utxo in inputs:
sequence = int_to_hex(utxo[5], 4)
if segwitTransaction:
txtmp = bitcoinTransaction(bfh(utxo[0]))
tmp = bfh(utxo[3])[::-1]
tmp += bfh(int_to_hex(utxo[1], 4))
tmp += txtmp.outputs[utxo[1]].amount
chipInputs.append({'value' : tmp, 'witness' : True, 'sequence' : sequence})
redeemScripts.append(bfh(utxo[2]))
elif not p2shTransaction:
txtmp = bitcoinTransaction(bfh(utxo[0]))
trustedInput = self.get_client().getTrustedInput(txtmp, utxo[1])
trustedInput['sequence'] = sequence
chipInputs.append(trustedInput)
redeemScripts.append(txtmp.outputs[utxo[1]].script)
else:
tmp = bfh(utxo[3])[::-1]
tmp += bfh(int_to_hex(utxo[1], 4))
chipInputs.append({'value' : tmp, 'sequence' : sequence})
redeemScripts.append(bfh(utxo[2]))
# Sign all inputs
firstTransaction = True
inputIndex = 0
rawTx = tx.serialize()
self.get_client().enableAlternate2fa(False)
if segwitTransaction:
self.get_client().startUntrustedTransaction(True, inputIndex,
chipInputs, redeemScripts[inputIndex])
outputData = self.get_client().finalizeInputFull(txOutput)
outputData['outputData'] = txOutput
transactionOutput = outputData['outputData']
if outputData['confirmationNeeded']:
outputData['address'] = output
self.handler.finished()
pin = self.handler.get_auth( outputData ) # does the authenticate dialog and returns pin
if not pin:
raise UserWarning()
if pin != 'paired':
self.handler.show_message(_("Confirmed. Signing Transaction..."))
while inputIndex < len(inputs):
singleInput = [ chipInputs[inputIndex] ]
self.get_client().startUntrustedTransaction(False, 0,
singleInput, redeemScripts[inputIndex])
inputSignature = self.get_client().untrustedHashSign(inputsPaths[inputIndex], pin, lockTime=tx.locktime)
inputSignature[0] = 0x30 # force for 1.4.9+
signatures.append(inputSignature)
inputIndex = inputIndex + 1
else:
while inputIndex < len(inputs):
self.get_client().startUntrustedTransaction(firstTransaction, inputIndex,
chipInputs, redeemScripts[inputIndex])
outputData = self.get_client().finalizeInputFull(txOutput)
outputData['outputData'] = txOutput
if firstTransaction:
transactionOutput = outputData['outputData']
if outputData['confirmationNeeded']:
outputData['address'] = output
self.handler.finished()
pin = self.handler.get_auth( outputData ) # does the authenticate dialog and returns pin
if not pin:
raise UserWarning()
if pin != 'paired':
self.handler.show_message(_("Confirmed. Signing Transaction..."))
else:
# Sign input with the provided PIN
inputSignature = self.get_client().untrustedHashSign(inputsPaths[inputIndex], pin, lockTime=tx.locktime)
inputSignature[0] = 0x30 # force for 1.4.9+
signatures.append(inputSignature)
inputIndex = inputIndex + 1
if pin != 'paired':
firstTransaction = False
except UserWarning:
self.handler.show_error(_('Cancelled by user'))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.give_error(e, True)
finally:
self.handler.finished()
for i, txin in enumerate(tx.inputs()):
signingPos = inputs[i][4]
txin['signatures'][signingPos] = bh2u(signatures[i])
tx.raw = tx.serialize()
self.signing = False
def show_address(self, sequence, txin_type):
self.signing = True
client = self.get_client()
address_path = self.get_derivation()[2:] + "/%d/%d"%sequence
self.handler.show_message(_("Showing address ..."))
segwit = Transaction.is_segwit_inputtype(txin_type)
segwitNative = txin_type == 'p2wpkh'
try:
client.getWalletPublicKey(address_path, showOnScreen=True, segwit=segwit, segwitNative=segwitNative)
except BTChipException as e:
if e.sw == 0x6985: # cancelled by user
pass
else:
traceback.print_exc(file=sys.stderr)
self.handler.show_error(e)
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.handler.show_error(e)
finally:
self.handler.finished()
self.signing = False
class LedgerPlugin(HW_PluginBase):
libraries_available = BTCHIP
keystore_class = Ledger_KeyStore
client = None
DEVICE_IDS = [
(0x2581, 0x1807), # HW.1 legacy btchip
(0x2581, 0x2b7c), # HW.1 transitional production
(0x2581, 0x3b7c), # HW.1 ledger production
(0x2581, 0x4b7c), # HW.1 ledger test
(0x2c97, 0x0000), # Blue
(0x2c97, 0x0001) # Nano-S
]
def __init__(self, parent, config, name):
self.segwit = config.get("segwit")
HW_PluginBase.__init__(self, parent, config, name)
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def btchip_is_connected(self, keystore):
try:
self.get_client(keystore).getFirmwareVersion()
except Exception as e:
return False
return True
def get_btchip_device(self, device):
ledger = False
if (device.product_key[0] == 0x2581 and device.product_key[1] == 0x3b7c) or (device.product_key[0] == 0x2581 and device.product_key[1] == 0x4b7c) or (device.product_key[0] == 0x2c97):
ledger = True
dev = hid.device()
dev.open_path(device.path)
dev.set_nonblocking(True)
return HIDDongleHIDAPI(dev, ledger, BTCHIP_DEBUG)
def create_client(self, device, handler):
self.handler = handler
client = self.get_btchip_device(device)
if client is not None:
client = Ledger_Client(client)
return client
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
client.handler = self.create_handler(wizard)
client.get_xpub("m/44'/2'", 'standard') # TODO replace by direct derivation once Nano S > 1.1
def get_xpub(self, device_id, derivation, xtype, wizard):
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = self.create_handler(wizard)
client.checkDevice()
xpub = client.get_xpub(derivation, xtype)
return xpub
def get_client(self, keystore, force_pair=True):
# All client interaction should not be in the main GUI thread
#assert self.main_thread != threading.current_thread()
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
#if client:
# client.used()
if client is not None:
client.checkDevice()
return client
def show_address(self, wallet, address):
sequence = wallet.get_address_index(address)
txin_type = wallet.get_txin_type(address)
wallet.get_keystore().show_address(sequence, txin_type)
| 42.832143 | 242 | 0.588093 |
39406ab24188220857c9be319363b88478c15ce1 | 243 | py | Python | behaviors/vigir_behavior_walk_to_template/setup.py | team-vigir/vigir_behaviors | 6696e7b7aadb24bb5495475065cc7b10d80b7db4 | [
"BSD-3-Clause"
] | 5 | 2015-08-25T18:47:52.000Z | 2019-12-04T21:40:28.000Z | behaviors/vigir_behavior_walk_to_template/setup.py | team-vigir/vigir_behaviors | 6696e7b7aadb24bb5495475065cc7b10d80b7db4 | [
"BSD-3-Clause"
] | 2 | 2017-08-16T16:09:47.000Z | 2020-08-18T17:25:22.000Z | behaviors/vigir_behavior_walk_to_template/setup.py | team-vigir/vigir_behaviors | 6696e7b7aadb24bb5495475065cc7b10d80b7db4 | [
"BSD-3-Clause"
] | 5 | 2015-11-06T21:57:37.000Z | 2022-03-30T10:15:57.000Z | #!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup(
packages = ['vigir_behavior_walk_to_template'],
package_dir = {'': 'src'}
)
setup(**d) | 22.090909 | 60 | 0.753086 |
624ff9aeb8f3147cdc6de4c1e0d5744fc995a01e | 679 | py | Python | reply_engine.py | RITct/Lisa | f355f76ad2c802af4c8cbc0f6bdf72543e5399ac | [
"MIT"
] | null | null | null | reply_engine.py | RITct/Lisa | f355f76ad2c802af4c8cbc0f6bdf72543e5399ac | [
"MIT"
] | null | null | null | reply_engine.py | RITct/Lisa | f355f76ad2c802af4c8cbc0f6bdf72543e5399ac | [
"MIT"
] | null | null | null | from chatterbot import ChatBot
class reply_engine :
bot = None
def init(self):
self.bot = ChatBot("Lisa",
storage_adapter="chatterbot.adapters.storage.JsonDatabaseAdapter",
logic_adapter="chatterbot.adapters.logic.ClosestMatchAdapter",
io_adapter="chatterbot.adapters.io.NoOutputAdapter",
database="database/database.db")
def get_reply(self,data):
return self.bot.get_response(data)
def train(self):
trainingData = []
print "Input Converstion"
while True:
data = raw_input()
if data == "#":
break
trainingData.append(data)
self.bot.train(trainingData)
if __name__ == "__main__":
r = reply_engine()
r.init()
r.train() | 21.903226 | 72 | 0.706922 |
79406056abb80590dc0ab387cab25bc8240864a5 | 668 | py | Python | startup.py | ruthogunnnaike/SeeSec---IoT-Vulnerablity-Scanner | d2186421607af0ef3351b7d95c2478c4ac307931 | [
"MIT"
] | 7 | 2017-09-09T06:16:16.000Z | 2020-12-23T09:00:23.000Z | startup.py | eshcrow/SeeSec---IoT-Vulnerablity-Scanner | d2186421607af0ef3351b7d95c2478c4ac307931 | [
"MIT"
] | null | null | null | startup.py | eshcrow/SeeSec---IoT-Vulnerablity-Scanner | d2186421607af0ef3351b7d95c2478c4ac307931 | [
"MIT"
] | 6 | 2018-05-12T10:07:42.000Z | 2021-12-06T15:24:17.000Z |
def launch():
def launch():
from pox.log.level import launch
launch(DEBUG=True)
from pox.misc.firewall import launch
launch()
from pox.openflow.keepalive import launch
launch(interval=300)
from pox.forwarding.l3_learning import launch
launch()
from pox.proto.dhcpd import launch
launch()
from pox.proto.dns_spy import launch
launch()
from pox.host_tracker.host_tracker import launch
launch()
from pox.openflow.discovery import launch
launch() # 15 seconds
from pox.forwarding.l2_pairs import launch
launch()
| 20.875 | 56 | 0.615269 |
bbe3ccf099d4fb1dedbc0ee4d1dfbffc8e519d44 | 310 | py | Python | autocomplete_light/example_apps/non_admin_add_another/admin.py | bburan/django-autocomplete-light | 064676061b101d5d47655e8598b21cbaf7716ae8 | [
"MIT"
] | 1 | 2015-10-12T21:42:05.000Z | 2015-10-12T21:42:05.000Z | autocomplete_light/example_apps/non_admin_add_another/admin.py | bburan/django-autocomplete-light | 064676061b101d5d47655e8598b21cbaf7716ae8 | [
"MIT"
] | null | null | null | autocomplete_light/example_apps/non_admin_add_another/admin.py | bburan/django-autocomplete-light | 064676061b101d5d47655e8598b21cbaf7716ae8 | [
"MIT"
] | null | null | null | from django.contrib import admin
import autocomplete_light
from .models import NonAdminAddAnotherModel
class NonAdminAddAnotherModelAdmin(admin.ModelAdmin):
form = autocomplete_light.modelform_factory(NonAdminAddAnotherModel)
admin.site.register(NonAdminAddAnotherModel, NonAdminAddAnotherModelAdmin)
| 25.833333 | 74 | 0.867742 |
0f64b8b334c18fc2124a2e14ca24b02da448c33a | 3,689 | py | Python | scripts/anim.py | qguv/binjgb-1 | 00b917c57cf177e101e6ee31887d5d87cc4afbda | [
"MIT"
] | null | null | null | scripts/anim.py | qguv/binjgb-1 | 00b917c57cf177e101e6ee31887d5d87cc4afbda | [
"MIT"
] | null | null | null | scripts/anim.py | qguv/binjgb-1 | 00b917c57cf177e101e6ee31887d5d87cc4afbda | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (C) 2016 Ben Smith
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
#
from __future__ import print_function
import argparse
import multiprocessing
import os
import shutil
import sys
import tempfile
import time
import common
OUT_ANIM_DIR = os.path.join(common.OUT_DIR, 'anim')
OUT_SCREENSHOT_DIR = os.path.join(common.OUT_DIR, 'screenshot')
DEFAULT_ANIM_FRAMES = 2400
DEFAULT_SCREENSHOT_FRAMES = 300
CONTROLLER_INPUT_FILE = os.path.join(common.SCRIPT_DIR, 'input_move_right.txt')
def ChangeExt(path, new_ext):
return os.path.splitext(path)[0] + new_ext
def ChangeDir(new_dir, path):
return os.path.join(new_dir, os.path.basename(path))
def MakeDir(dir_name):
try:
os.makedirs(dir_name)
except OSError as e:
print(e)
def ConvertPPMstoMP4(tempdir, src):
srcs = ChangeExt(src, '.%08d.ppm')
dst = ChangeDir(OUT_ANIM_DIR, ChangeExt(src, '.mp4'))
common.Run('ffmpeg',
'-y',
'-framerate', '60',
'-i', srcs,
'-c:v', 'libx264',
'-r', '30',
'-pix_fmt', 'yuv420p',
dst)
def Run(rom, options):
start_time = time.time()
tempdir = None
try:
tempdir = tempfile.mkdtemp(prefix='rom_anims')
try:
if options.screenshot:
default_img = ChangeDir(OUT_SCREENSHOT_DIR, ChangeExt(rom, '.ppm'))
common.RunTester(rom, DEFAULT_SCREENSHOT_FRAMES, default_img,
controller_input=CONTROLLER_INPUT_FILE)
else:
default_img = ChangeDir(tempdir, ChangeExt(rom, '.ppm'))
common.RunTester(rom, DEFAULT_ANIM_FRAMES, default_img,
controller_input=CONTROLLER_INPUT_FILE, animate=True)
ConvertPPMstoMP4(tempdir, default_img)
except common.Error as e:
print(str(e))
finally:
if tempdir:
shutil.rmtree(tempdir)
duration = time.time() - start_time
return rom, duration
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--list', action='store_true',
help='list matching ROMs')
parser.add_argument('-j', '--num-processes',
type=int, default=multiprocessing.cpu_count(),
help='num processes.')
parser.add_argument('-C', '--dir', help='search for ROMs in dir')
parser.add_argument('--screenshot', action='store_true',
help='Just grab screenshots')
parser.add_argument('patterns', metavar='pattern', nargs='*',
help='test patterns.')
options = parser.parse_args(args)
pattern_re = common.MakePatternRE(options.patterns)
roms = common.GetMatchedRoms(pattern_re, options.dir)
if options.list:
for rom in roms:
print(rom)
return 0
MakeDir(OUT_ANIM_DIR)
MakeDir(OUT_SCREENSHOT_DIR)
start_time = time.time()
pool = multiprocessing.Pool(options.num_processes)
try:
results = [pool.apply_async(Run, (rom, options)) for rom in roms]
started = 0
completed = 0
while results:
new_results = []
for result in results:
if result.ready():
completed += 1
rom, duration = result.get(0)
print('[%d/%d] %s (%.3fs)' % (completed, len(roms), rom, duration))
else:
new_results.append(result)
time.sleep(0.01)
results = new_results
pool.close()
finally:
pool.terminate()
pool.join()
duration = time.time() - start_time
print('total time: %.3fs' % duration)
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt as e:
print(e)
| 27.736842 | 79 | 0.641637 |
b90f6c7146a94bc400065daf31a83dca7975e93b | 44,237 | py | Python | ambari-server/src/test/python/stacks/2.6/ZEPPELIN/interpreter_json_generated.py | zyclove/ambari | 1032f0f54cb7b312b9a3b37570cd840f4e1e89d4 | [
"Apache-2.0"
] | null | null | null | ambari-server/src/test/python/stacks/2.6/ZEPPELIN/interpreter_json_generated.py | zyclove/ambari | 1032f0f54cb7b312b9a3b37570cd840f4e1e89d4 | [
"Apache-2.0"
] | null | null | null | ambari-server/src/test/python/stacks/2.6/ZEPPELIN/interpreter_json_generated.py | zyclove/ambari | 1032f0f54cb7b312b9a3b37570cd840f4e1e89d4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python2
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
template = '\n{\n "interpreterSettings": {\n "2CKEKWY8Z": {\n "id": "2CKEKWY8Z",\n "name": "angular",\n "group": "angular",\n "properties": {},\n "status": "READY",\n "interpreterGroup": [\n {\n "name": "angular",\n "class": "org.apache.zeppelin.angular.AngularInterpreter",\n "defaultInterpreter": false,\n "editor": {\n "editOnDblClick": true\n }\n }\n ],\n "dependencies": [],\n "option": {\n "remote": true,\n "port": -1,\n "perNote": "shared",\n "perUser": "shared",\n "isExistingProcess": false,\n "setPermission": false,\n "users": [],\n "isUserImpersonate": false\n }\n },\n "2CKX8WPU1": {\n "id": "2CKX8WPU1",\n "name": "spark",\n "group": "spark",\n "properties": {\n "spark.executor.memory": "512m",\n "args": "",\n "zeppelin.spark.printREPLOutput": "true",\n "spark.cores.max": "",\n "zeppelin.dep.additionalRemoteRepository": "spark-packages,http://dl.bintray.com/spark-packages/maven,false;",\n "zeppelin.spark.sql.stacktrace": "false",\n "zeppelin.spark.importImplicit": "true",\n "zeppelin.spark.concurrentSQL": "false",\n "zeppelin.spark.useHiveContext": "true",\n "zeppelin.pyspark.python": "python",\n "zeppelin.dep.localrepo": "local-repo",\n "zeppelin.R.knitr": "true",\n "zeppelin.spark.maxResult": "1000",\n "master": "yarn-client",\n "spark.app.name": "Zeppelin",\n "zeppelin.R.image.width": "100%",\n "zeppelin.R.render.options": "out.format \\u003d \\u0027html\\u0027, comment \\u003d NA, echo \\u003d FALSE, results \\u003d \\u0027asis\\u0027, message \\u003d F, warning \\u003d F",\n "zeppelin.R.cmd": "R"\n },\n "status": "READY",\n "interpreterGroup": [\n {\n "name": "spark",\n "class": "org.apache.zeppelin.spark.SparkInterpreter",\n "defaultInterpreter": true,\n "editor": {\n "language": "scala"\n }\n },\n {\n "name": "sql",\n "class": "org.apache.zeppelin.spark.SparkSqlInterpreter",\n "defaultInterpreter": false,\n "editor": {\n "language": "sql"\n }\n },\n {\n "name": "dep",\n "class": "org.apache.zeppelin.spark.DepInterpreter",\n "defaultInterpreter": false,\n "editor": {\n "language": "scala"\n }\n },\n {\n "name": "pyspark",\n "class": "org.apache.zeppelin.spark.PySparkInterpreter",\n "defaultInterpreter": false,\n "editor": {\n "language": "python"\n }\n },\n {\n "name": "r",\n "class": "org.apache.zeppelin.spark.SparkRInterpreter",\n "defaultInterpreter": false,\n "editor": {\n "language": "r"\n }\n }\n ],\n "dependencies": [],\n "option": {\n "remote": true,\n "port": -1,\n "perNote": "shared",\n "perUser": "shared",\n "isExistingProcess": false,\n "setPermission": false,\n "users": [],\n "isUserImpersonate": false\n }\n },\n "2CK8A9MEG": {\n "id": "2CK8A9MEG",\n "name": "jdbc",\n "group": "jdbc",\n "properties": {\n "default.password": "",\n "zeppelin.jdbc.auth.type": "",\n "common.max_count": "1000",\n "zeppelin.jdbc.principal": "",\n "default.user": "gpadmin",\n "default.url": "jdbc:postgresql://localhost:5432/",\n "default.driver": "org.postgresql.Driver",\n "zeppelin.jdbc.keytab.location": "",\n "zeppelin.jdbc.concurrent.use": "true",\n "zeppelin.jdbc.concurrent.max_connection": "10"\n },\n "status": "READY",\n "interpreterGroup": [\n {\n "name": "sql",\n "class": "org.apache.zeppelin.jdbc.JDBCInterpreter",\n "defaultInterpreter": false,\n "editor": {\n "language": "sql",\n "editOnDblClick": false\n }\n }\n ],\n "dependencies": [],\n "option": {\n "remote": true,\n "port": -1,\n "perNote": "shared",\n "perUser": "shared",\n "isExistingProcess": false,\n "setPermission": false,\n "users": [],\n "isUserImpersonate": false\n }\n },\n "2CKX6DGQZ": {\n "id": "2CKX6DGQZ",\n "name": "livy",\n "group": "livy",\n "properties": {\n "zeppelin.livy.pull_status.interval.millis": "1000",\n "livy.spark.executor.memory": "",\n "zeppelin.livy.session.create_timeout": "120",\n "zeppelin.livy.principal": "",\n "zeppelin.livy.spark.sql.maxResult": "1000",\n "zeppelin.livy.keytab": "",\n "zeppelin.livy.concurrentSQL": "false",\n "zeppelin.livy.spark.sql.field.truncate": "true",\n "livy.spark.executor.cores": "",\n "zeppelin.livy.displayAppInfo": "false",\n "zeppelin.livy.url": "http://localhost:8998",\n "livy.spark.dynamicAllocation.minExecutors": "",\n "livy.spark.driver.cores": "",\n "livy.spark.jars.packages": "",\n "livy.spark.dynamicAllocation.enabled": "",\n "livy.spark.executor.instances": "",\n "livy.spark.dynamicAllocation.cachedExecutorIdleTimeout": "",\n "livy.spark.dynamicAllocation.maxExecutors": "",\n "livy.spark.dynamicAllocation.initialExecutors": "",\n "livy.spark.driver.memory": ""\n },\n "status": "READY",\n "interpreterGroup": [\n {\n "name": "spark",\n "class": "org.apache.zeppelin.livy.LivySparkInterpreter",\n "defaultInterpreter": true,\n "editor": {\n "language": "scala",\n "editOnDblClick": false\n }\n },\n {\n "name": "sql",\n "class": "org.apache.zeppelin.livy.LivySparkSQLInterpreter",\n "defaultInterpreter": false,\n "editor": {\n "language": "sql",\n "editOnDblClick": false\n }\n },\n {\n "name": "pyspark",\n "class": "org.apache.zeppelin.livy.LivyPySparkInterpreter",\n "defaultInterpreter": false,\n "editor": {\n "language": "python",\n "editOnDblClick": false\n }\n },\n {\n "name": "pyspark3",\n "class": "org.apache.zeppelin.livy.LivyPySpark3Interpreter",\n "defaultInterpreter": false,\n "editor": {\n "language": "python",\n "editOnDblClick": false\n }\n },\n {\n "name": "sparkr",\n "class": "org.apache.zeppelin.livy.LivySparkRInterpreter",\n "defaultInterpreter": false,\n "editor": {\n "language": "r",\n "editOnDblClick": false\n }\n }\n ],\n "dependencies": [],\n "option": {\n "remote": true,\n "port": -1,\n "perNote": "shared",\n "perUser": "scoped",\n "isExistingProcess": false,\n "setPermission": false,\n "users": [],\n "isUserImpersonate": false\n }\n },\n "2CKAY1A8Y": {\n "id": "2CKAY1A8Y",\n "name": "md",\n "group": "md",\n "properties": {\n "markdown.parser.type": "markdown4j"\n },\n "status": "READY",\n "interpreterGroup": [\n {\n "name": "md",\n "class": "org.apache.zeppelin.markdown.Markdown",\n "defaultInterpreter": false,\n "editor": {\n "language": "markdown",\n "editOnDblClick": true\n }\n }\n ],\n "dependencies": [],\n "option": {\n "remote": true,\n "port": -1,\n "perNote": "shared",\n "perUser": "shared",\n "isExistingProcess": false,\n "setPermission": false,\n "users": [],\n "isUserImpersonate": false\n }\n },\n "2CHS8UYQQ": {\n "id": "2CHS8UYQQ",\n "name": "sh",\n "group": "sh",\n "properties": {\n "zeppelin.shell.keytab.location": "",\n "shell.command.timeout.millisecs": "60000",\n "zeppelin.shell.principal": "",\n "zeppelin.shell.auth.type": ""\n },\n "status": "READY",\n "interpreterGroup": [\n {\n "name": "sh",\n "class": "org.apache.zeppelin.shell.ShellInterpreter",\n "defaultInterpreter": false,\n "editor": {\n "language": "sh",\n "editOnDblClick": false\n }\n }\n ],\n "dependencies": [],\n "option": {\n "remote": true,\n "port": -1,\n "perNote": "shared",\n "perUser": "shared",\n "isExistingProcess": false,\n "setPermission": false,\n "users": [],\n "isUserImpersonate": false\n }\n }\n },\n "interpreterBindings": {},\n "interpreterRepositories": [\n {\n "id": "central",\n "type": "default",\n "url": "http://repo1.maven.org/maven2/",\n "releasePolicy": {\n "enabled": true,\n "updatePolicy": "daily",\n "checksumPolicy": "warn"\n },\n "snapshotPolicy": {\n "enabled": true,\n "updatePolicy": "daily",\n "checksumPolicy": "warn"\n },\n "mirroredRepositories": [],\n "repositoryManager": false\n },\n {\n "id": "local",\n "type": "default",\n "url": "file:///home/zeppelin/.m2/repository",\n "releasePolicy": {\n "enabled": true,\n "updatePolicy": "daily",\n "checksumPolicy": "warn"\n },\n "snapshotPolicy": {\n "enabled": true,\n "updatePolicy": "daily",\n "checksumPolicy": "warn"\n },\n "mirroredRepositories": [],\n "repositoryManager": false\n }\n ]\n}\n'
template_after_base = '{\n "interpreterSettings": {\n "2CHS8UYQQ": {\n "status": "READY", \n "group": "sh", \n "name": "sh", \n "id": "2CHS8UYQQ", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": false, \n "language": "sh"\n }, \n "defaultInterpreter": false, \n "name": "sh", \n "class": "org.apache.zeppelin.shell.ShellInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {\n "shell.command.timeout.millisecs": "60000", \n "zeppelin.shell.auth.type": "", \n "zeppelin.shell.keytab.location": "", \n "zeppelin.shell.principal": ""\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2CKAY1A8Y": {\n "status": "READY", \n "group": "md", \n "name": "md", \n "id": "2CKAY1A8Y", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": true, \n "language": "markdown"\n }, \n "defaultInterpreter": false, \n "name": "md", \n "class": "org.apache.zeppelin.markdown.Markdown"\n }\n ], \n "dependencies": [], \n "properties": {\n "markdown.parser.type": "markdown4j"\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2CKX8WPU1": {\n "status": "READY", \n "group": "spark", \n "name": "spark", \n "id": "2CKX8WPU1", \n "interpreterGroup": [\n {\n "editor": {\n "language": "scala"\n }, \n "defaultInterpreter": true, \n "name": "spark", \n "class": "org.apache.zeppelin.spark.SparkInterpreter"\n }, \n {\n "editor": {\n "language": "sql"\n }, \n "defaultInterpreter": false, \n "name": "sql", \n "class": "org.apache.zeppelin.spark.SparkSqlInterpreter"\n }, \n {\n "editor": {\n "language": "scala"\n }, \n "defaultInterpreter": false, \n "name": "dep", \n "class": "org.apache.zeppelin.spark.DepInterpreter"\n }, \n {\n "editor": {\n "language": "python"\n }, \n "defaultInterpreter": false, \n "name": "pyspark", \n "class": "org.apache.zeppelin.spark.PySparkInterpreter"\n }, \n {\n "editor": {\n "language": "r"\n }, \n "defaultInterpreter": false, \n "name": "r", \n "class": "org.apache.zeppelin.spark.SparkRInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {\n "zeppelin.dep.additionalRemoteRepository": "spark-packages,http://dl.bintray.com/spark-packages/maven,false;", \n "zeppelin.dep.localrepo": "local-repo", \n "zeppelin.spark.useHiveContext": "true", \n "zeppelin.spark.printREPLOutput": "true", \n "zeppelin.R.image.width": "100%", \n "zeppelin.spark.importImplicit": "true", \n "spark.app.name": "Zeppelin", \n "args": "", \n "zeppelin.spark.sql.stacktrace": "false", \n "zeppelin.spark.concurrentSQL": "false", \n "zeppelin.R.cmd": "R", \n "master": "yarn-client", \n "zeppelin.pyspark.python": "python", \n "zeppelin.R.knitr": "true", \n "zeppelin.R.render.options": "out.format = \'html\', comment = NA, echo = FALSE, results = \'asis\', message = F, warning = F", \n "spark.executor.memory": "512m", \n "zeppelin.spark.maxResult": "1000", \n "spark.cores.max": ""\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2CK8A9MEG": {\n "status": "READY", \n "group": "jdbc", \n "name": "jdbc", \n "id": "2CK8A9MEG", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": false, \n "language": "sql"\n }, \n "defaultInterpreter": false, \n "name": "sql", \n "class": "org.apache.zeppelin.jdbc.JDBCInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {\n "common.max_count": "1000", \n "zeppelin.jdbc.keytab.location": "", \n "zeppelin.jdbc.concurrent.max_connection": "10", \n "default.user": "gpadmin", \n "zeppelin.jdbc.auth.type": "", \n "default.url": "jdbc:postgresql://localhost:5432/", \n "default.driver": "org.postgresql.Driver", \n "zeppelin.jdbc.concurrent.use": "true", \n "default.password": "", \n "zeppelin.jdbc.principal": ""\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2C4U48MY3_spark2": {\n "status": "READY", \n "group": "spark", \n "name": "spark2", \n "id": "2C4U48MY3_spark2", \n "interpreterGroup": [\n {\n "defaultInterpreter": true, \n "name": "spark", \n "class": "org.apache.zeppelin.spark.SparkInterpreter"\n }, \n {\n "defaultInterpreter": false, \n "name": "sql", \n "class": "org.apache.zeppelin.spark.SparkSqlInterpreter"\n }, \n {\n "defaultInterpreter": false, \n "name": "dep", \n "class": "org.apache.zeppelin.spark.DepInterpreter"\n }, \n {\n "defaultInterpreter": false, \n "name": "pyspark", \n "class": "org.apache.zeppelin.spark.PySparkInterpreter"\n }, \n {\n "defaultInterpreter": false, \n "name": "r", \n "class": "org.apache.zeppelin.spark.SparkRInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {\n "zeppelin.dep.additionalRemoteRepository": "spark-packages,http://dl.bintray.com/spark-packages/maven,false;", \n "zeppelin.dep.localrepo": "local-repo", \n "zeppelin.spark.useHiveContext": "true", \n "zeppelin.spark.printREPLOutput": "true", \n "zeppelin.R.image.width": "100%", \n "zeppelin.spark.importImplicit": "true", \n "spark.app.name": "Zeppelin", \n "args": "", \n "zeppelin.spark.sql.stacktrace": "false", \n "zeppelin.spark.concurrentSQL": "false", \n "zeppelin.R.cmd": "R", \n "master": "local[*]", \n "zeppelin.pyspark.python": "python", \n "zeppelin.R.knitr": "true", \n "zeppelin.R.render.options": "out.format = \'html\', comment = NA, echo = FALSE, results = \'asis\', message = F, warning = F", \n "spark.executor.memory": "", \n "zeppelin.spark.maxResult": "1000", \n "spark.cores.max": ""\n }, \n "option": {\n "setPermission": false, \n "perNoteProcess": false, \n "remote": true, \n "perNoteSession": false, \n "isExistingProcess": false, \n "port": -1\n }\n }, \n "2CKEKWY8Z": {\n "status": "READY", \n "group": "angular", \n "name": "angular", \n "id": "2CKEKWY8Z", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": true\n }, \n "defaultInterpreter": false, \n "name": "angular", \n "class": "org.apache.zeppelin.angular.AngularInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {}, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2C8A4SZ9T_livy2": {\n "status": "READY", \n "group": "livy", \n "name": "livy2", \n "id": "2C8A4SZ9T_livy2", \n "interpreterGroup": [\n {\n "defaultInterpreter": false, \n "class": "org.apache.zeppelin.livy.LivySparkInterpreter", \n "name": "spark", \n "editor": {\n "editOnDblClick": false, \n "language": "scala"\n }\n }, \n {\n "defaultInterpreter": false, \n "class": "org.apache.zeppelin.livy.LivySparkSQLInterpreter", \n "name": "sql", \n "editor": {\n "editOnDblClick": false, \n "language": "sql"\n }\n }, \n {\n "defaultInterpreter": false, \n "class": "org.apache.zeppelin.livy.LivyPySparkInterpreter", \n "name": "pyspark", \n "editor": {\n "editOnDblClick": false, \n "language": "python"\n }\n }, \n {\n "defaultInterpreter": false, \n "class": "org.apache.zeppelin.livy.LivyPySpark3Interpreter", \n "name": "pyspark3", \n "editor": {\n "editOnDblClick": false, \n "language": "python"\n }\n }, \n {\n "defaultInterpreter": false, \n "class": "org.apache.zeppelin.livy.LivySparkRInterpreter", \n "name": "sparkr", \n "editor": {\n "editOnDblClick": false, \n "language": "r"\n }\n }, \n {\n "defaultInterpreter": false, \n "name": "shared", \n "class": "org.apache.zeppelin.livy.LivySharedInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {\n "zeppelin.livy.keytab": "", \n "zeppelin.livy.spark.sql.maxResult": "1000", \n "livy.spark.executor.instances": "", \n "livy.spark.executor.memory": "", \n "livy.spark.dynamicAllocation.enabled": "", \n "livy.spark.dynamicAllocation.cachedExecutorIdleTimeout": "", \n "livy.spark.dynamicAllocation.initialExecutors": "", \n "zeppelin.livy.principal": "", \n "zeppelin.livy.session.create_timeout": "120", \n "livy.spark.driver.memory": "", \n "livy.spark.jars.packages": "", \n "livy.spark.dynamicAllocation.maxExecutors": "", \n "zeppelin.livy.concurrentSQL": "false", \n "zeppelin.livy.displayAppInfo": "true", \n "livy.spark.dynamicAllocation.minExecutors": "", \n "zeppelin.livy.url": "http://localhost:8998", \n "zeppelin.livy.pull_status.interval.millis": "1000", \n "livy.spark.driver.cores": "", \n "livy.spark.executor.cores": ""\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "scoped", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2CKX6DGQZ": {\n "status": "READY", \n "group": "livy", \n "name": "livy", \n "id": "2CKX6DGQZ", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": false, \n "language": "scala"\n }, \n "defaultInterpreter": true, \n "name": "spark", \n "class": "org.apache.zeppelin.livy.LivySparkInterpreter"\n }, \n {\n "editor": {\n "editOnDblClick": false, \n "language": "sql"\n }, \n "defaultInterpreter": false, \n "name": "sql", \n "class": "org.apache.zeppelin.livy.LivySparkSQLInterpreter"\n }, \n {\n "editor": {\n "editOnDblClick": false, \n "language": "python"\n }, \n "defaultInterpreter": false, \n "name": "pyspark", \n "class": "org.apache.zeppelin.livy.LivyPySparkInterpreter"\n }, \n {\n "editor": {\n "editOnDblClick": false, \n "language": "python"\n }, \n "defaultInterpreter": false, \n "name": "pyspark3", \n "class": "org.apache.zeppelin.livy.LivyPySpark3Interpreter"\n }, \n {\n "editor": {\n "editOnDblClick": false, \n "language": "r"\n }, \n "defaultInterpreter": false, \n "name": "sparkr", \n "class": "org.apache.zeppelin.livy.LivySparkRInterpreter"\n }, \n {\n "defaultInterpreter": false, \n "name": "shared", \n "class": "org.apache.zeppelin.livy.LivySharedInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {\n "livy.spark.dynamicAllocation.initialExecutors": "", \n "zeppelin.livy.keytab": "", \n "zeppelin.livy.spark.sql.maxResult": "1000", \n "livy.spark.executor.instances": "", \n "livy.spark.driver.memory": "", \n "livy.spark.executor.memory": "", \n "livy.spark.dynamicAllocation.enabled": "", \n "livy.spark.dynamicAllocation.cachedExecutorIdleTimeout": "", \n "livy.spark.driver.cores": "", \n "zeppelin.livy.session.create_timeout": "120", \n "zeppelin.livy.principal": "", \n "livy.spark.jars.packages": "", \n "livy.spark.dynamicAllocation.maxExecutors": "", \n "zeppelin.livy.concurrentSQL": "false", \n "zeppelin.livy.displayAppInfo": "false", \n "livy.spark.dynamicAllocation.minExecutors": "", \n "zeppelin.livy.url": "http://localhost:8998", \n "zeppelin.livy.spark.sql.field.truncate": "true", \n "zeppelin.livy.pull_status.interval.millis": "1000", \n "livy.spark.executor.cores": ""\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "scoped", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }\n }, \n "interpreterBindings": {}, \n "interpreterRepositories": [\n {\n "releasePolicy": {\n "checksumPolicy": "warn", \n "enabled": true, \n "updatePolicy": "daily"\n }, \n "mirroredRepositories": [], \n "snapshotPolicy": {\n "checksumPolicy": "warn", \n "enabled": true, \n "updatePolicy": "daily"\n }, \n "url": "http://repo1.maven.org/maven2/", \n "repositoryManager": false, \n "type": "default", \n "id": "central"\n }, \n {\n "releasePolicy": {\n "checksumPolicy": "warn", \n "enabled": true, \n "updatePolicy": "daily"\n }, \n "mirroredRepositories": [], \n "snapshotPolicy": {\n "checksumPolicy": "warn", \n "enabled": true, \n "updatePolicy": "daily"\n }, \n "url": "file:///home/zeppelin/.m2/repository", \n "repositoryManager": false, \n "type": "default", \n "id": "local"\n }\n ]\n}'
template_after_without_spark_and_livy = '{\n "interpreterSettings": {\n "2CHS8UYQQ": {\n "status": "READY", \n "group": "sh", \n "name": "sh", \n "id": "2CHS8UYQQ", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": false, \n "language": "sh"\n }, \n "defaultInterpreter": false, \n "name": "sh", \n "class": "org.apache.zeppelin.shell.ShellInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {\n "shell.command.timeout.millisecs": "60000", \n "zeppelin.shell.auth.type": "", \n "zeppelin.shell.keytab.location": "", \n "zeppelin.shell.principal": ""\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2CKAY1A8Y": {\n "status": "READY", \n "group": "md", \n "name": "md", \n "id": "2CKAY1A8Y", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": true, \n "language": "markdown"\n }, \n "defaultInterpreter": false, \n "name": "md", \n "class": "org.apache.zeppelin.markdown.Markdown"\n }\n ], \n "dependencies": [], \n "properties": {\n "markdown.parser.type": "markdown4j"\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2CKX8WPU1": {\n "status": "READY", \n "group": "spark", \n "name": "spark", \n "id": "2CKX8WPU1", \n "interpreterGroup": [\n {\n "editor": {\n "language": "scala"\n }, \n "defaultInterpreter": true, \n "name": "spark", \n "class": "org.apache.zeppelin.spark.SparkInterpreter"\n }, \n {\n "editor": {\n "language": "sql"\n }, \n "defaultInterpreter": false, \n "name": "sql", \n "class": "org.apache.zeppelin.spark.SparkSqlInterpreter"\n }, \n {\n "editor": {\n "language": "scala"\n }, \n "defaultInterpreter": false, \n "name": "dep", \n "class": "org.apache.zeppelin.spark.DepInterpreter"\n }, \n {\n "editor": {\n "language": "python"\n }, \n "defaultInterpreter": false, \n "name": "pyspark", \n "class": "org.apache.zeppelin.spark.PySparkInterpreter"\n }, \n {\n "editor": {\n "language": "r"\n }, \n "defaultInterpreter": false, \n "name": "r", \n "class": "org.apache.zeppelin.spark.SparkRInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {\n "zeppelin.dep.additionalRemoteRepository": "spark-packages,http://dl.bintray.com/spark-packages/maven,false;", \n "zeppelin.dep.localrepo": "local-repo", \n "zeppelin.spark.useHiveContext": "true", \n "zeppelin.spark.printREPLOutput": "true", \n "zeppelin.R.image.width": "100%", \n "zeppelin.spark.importImplicit": "true", \n "spark.app.name": "Zeppelin", \n "args": "", \n "zeppelin.spark.sql.stacktrace": "false", \n "zeppelin.spark.concurrentSQL": "false", \n "SPARK_HOME": "/usr/hdp/current/spark-client/", \n "zeppelin.R.cmd": "R", \n "master": "yarn-client", \n "zeppelin.pyspark.python": "python", \n "zeppelin.R.knitr": "true", \n "zeppelin.R.render.options": "out.format = \'html\', comment = NA, echo = FALSE, results = \'asis\', message = F, warning = F", \n "spark.executor.memory": "512m", \n "zeppelin.spark.maxResult": "1000", \n "spark.cores.max": ""\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2CK8A9MEG": {\n "status": "READY", \n "group": "jdbc", \n "name": "jdbc", \n "id": "2CK8A9MEG", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": false, \n "language": "sql"\n }, \n "defaultInterpreter": false, \n "name": "sql", \n "class": "org.apache.zeppelin.jdbc.JDBCInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {\n "common.max_count": "1000", \n "zeppelin.jdbc.keytab.location": "", \n "zeppelin.jdbc.concurrent.max_connection": "10", \n "default.user": "gpadmin", \n "zeppelin.jdbc.auth.type": "", \n "default.url": "jdbc:postgresql://localhost:5432/", \n "default.driver": "org.postgresql.Driver", \n "zeppelin.jdbc.concurrent.use": "true", \n "default.password": "", \n "zeppelin.jdbc.principal": ""\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2CKEKWY8Z": {\n "status": "READY", \n "group": "angular", \n "name": "angular", \n "id": "2CKEKWY8Z", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": true\n }, \n "defaultInterpreter": false, \n "name": "angular", \n "class": "org.apache.zeppelin.angular.AngularInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {}, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }\n }, \n "interpreterBindings": {}, \n "interpreterRepositories": [\n {\n "releasePolicy": {\n "checksumPolicy": "warn", \n "enabled": true, \n "updatePolicy": "daily"\n }, \n "mirroredRepositories": [], \n "snapshotPolicy": {\n "checksumPolicy": "warn", \n "enabled": true, \n "updatePolicy": "daily"\n }, \n "url": "http://repo1.maven.org/maven2/", \n "repositoryManager": false, \n "type": "default", \n "id": "central"\n }, \n {\n "releasePolicy": {\n "checksumPolicy": "warn", \n "enabled": true, \n "updatePolicy": "daily"\n }, \n "mirroredRepositories": [], \n "snapshotPolicy": {\n "checksumPolicy": "warn", \n "enabled": true, \n "updatePolicy": "daily"\n }, \n "url": "file:///home/zeppelin/.m2/repository", \n "repositoryManager": false, \n "type": "default", \n "id": "local"\n }\n ]\n}'
template_after_kerberos = '{\n "interpreterSettings": {\n "2CHS8UYQQ": {\n "status": "READY", \n "group": "sh", \n "name": "sh", \n "id": "2CHS8UYQQ", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": false, \n "language": "sh"\n }, \n "defaultInterpreter": false, \n "name": "sh", \n "class": "org.apache.zeppelin.shell.ShellInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {\n "shell.command.timeout.millisecs": "60000", \n "zeppelin.shell.auth.type": "", \n "zeppelin.shell.keytab.location": "", \n "zeppelin.shell.principal": ""\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2CKAY1A8Y": {\n "status": "READY", \n "group": "md", \n "name": "md", \n "id": "2CKAY1A8Y", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": true, \n "language": "markdown"\n }, \n "defaultInterpreter": false, \n "name": "md", \n "class": "org.apache.zeppelin.markdown.Markdown"\n }\n ], \n "dependencies": [], \n "properties": {\n "markdown.parser.type": "markdown4j"\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2CKX8WPU1": {\n "status": "READY", \n "group": "spark", \n "name": "spark", \n "id": "2CKX8WPU1", \n "interpreterGroup": [\n {\n "editor": {\n "language": "scala"\n }, \n "defaultInterpreter": true, \n "name": "spark", \n "class": "org.apache.zeppelin.spark.SparkInterpreter"\n }, \n {\n "editor": {\n "language": "sql"\n }, \n "defaultInterpreter": false, \n "name": "sql", \n "class": "org.apache.zeppelin.spark.SparkSqlInterpreter"\n }, \n {\n "editor": {\n "language": "scala"\n }, \n "defaultInterpreter": false, \n "name": "dep", \n "class": "org.apache.zeppelin.spark.DepInterpreter"\n }, \n {\n "editor": {\n "language": "python"\n }, \n "defaultInterpreter": false, \n "name": "pyspark", \n "class": "org.apache.zeppelin.spark.PySparkInterpreter"\n }, \n {\n "editor": {\n "language": "r"\n }, \n "defaultInterpreter": false, \n "name": "r", \n "class": "org.apache.zeppelin.spark.SparkRInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {\n "zeppelin.dep.additionalRemoteRepository": "spark-packages,http://dl.bintray.com/spark-packages/maven,false;", \n "zeppelin.dep.localrepo": "local-repo", \n "zeppelin.spark.useHiveContext": "true", \n "zeppelin.spark.printREPLOutput": "true", \n "spark.yarn.principal": "", \n "zeppelin.R.image.width": "100%", \n "zeppelin.spark.importImplicit": "true", \n "spark.app.name": "Zeppelin", \n "args": "", \n "zeppelin.spark.sql.stacktrace": "false", \n "zeppelin.spark.concurrentSQL": "false", \n "spark.yarn.keytab": "", \n "zeppelin.R.cmd": "R", \n "master": "yarn-client", \n "zeppelin.pyspark.python": "python", \n "zeppelin.R.knitr": "true", \n "zeppelin.R.render.options": "out.format = \'html\', comment = NA, echo = FALSE, results = \'asis\', message = F, warning = F", \n "spark.executor.memory": "512m", \n "zeppelin.spark.maxResult": "1000", \n "spark.cores.max": ""\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2CK8A9MEG": {\n "status": "READY", \n "group": "jdbc", \n "name": "jdbc", \n "id": "2CK8A9MEG", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": false, \n "language": "sql"\n }, \n "defaultInterpreter": false, \n "name": "sql", \n "class": "org.apache.zeppelin.jdbc.JDBCInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {\n "common.max_count": "1000", \n "zeppelin.jdbc.keytab.location": "", \n "zeppelin.jdbc.concurrent.max_connection": "10", \n "default.user": "gpadmin", \n "zeppelin.jdbc.auth.type": "SIMPLE", \n "default.url": "jdbc:postgresql://localhost:5432/", \n "default.driver": "org.postgresql.Driver", \n "zeppelin.jdbc.concurrent.use": "true", \n "default.password": "", \n "zeppelin.jdbc.principal": ""\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2CKEKWY8Z": {\n "status": "READY", \n "group": "angular", \n "name": "angular", \n "id": "2CKEKWY8Z", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": true\n }, \n "defaultInterpreter": false, \n "name": "angular", \n "class": "org.apache.zeppelin.angular.AngularInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {}, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2CKX6DGQZ": {\n "status": "READY", \n "group": "livy", \n "name": "livy", \n "id": "2CKX6DGQZ", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": false, \n "language": "scala"\n }, \n "defaultInterpreter": true, \n "name": "spark", \n "class": "org.apache.zeppelin.livy.LivySparkInterpreter"\n }, \n {\n "editor": {\n "editOnDblClick": false, \n "language": "sql"\n }, \n "defaultInterpreter": false, \n "name": "sql", \n "class": "org.apache.zeppelin.livy.LivySparkSQLInterpreter"\n }, \n {\n "editor": {\n "editOnDblClick": false, \n "language": "python"\n }, \n "defaultInterpreter": false, \n "name": "pyspark", \n "class": "org.apache.zeppelin.livy.LivyPySparkInterpreter"\n }, \n {\n "editor": {\n "editOnDblClick": false, \n "language": "python"\n }, \n "defaultInterpreter": false, \n "name": "pyspark3", \n "class": "org.apache.zeppelin.livy.LivyPySpark3Interpreter"\n }, \n {\n "editor": {\n "editOnDblClick": false, \n "language": "r"\n }, \n "defaultInterpreter": false, \n "name": "sparkr", \n "class": "org.apache.zeppelin.livy.LivySparkRInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {\n "livy.spark.dynamicAllocation.initialExecutors": "", \n "zeppelin.livy.keytab": "", \n "zeppelin.livy.spark.sql.maxResult": "1000", \n "livy.spark.executor.instances": "", \n "livy.spark.driver.memory": "", \n "livy.spark.executor.memory": "", \n "livy.spark.dynamicAllocation.enabled": "", \n "livy.spark.dynamicAllocation.cachedExecutorIdleTimeout": "", \n "livy.spark.driver.cores": "", \n "zeppelin.livy.session.create_timeout": "120", \n "zeppelin.livy.principal": "", \n "livy.spark.jars.packages": "", \n "livy.spark.dynamicAllocation.maxExecutors": "", \n "zeppelin.livy.concurrentSQL": "false", \n "zeppelin.livy.displayAppInfo": "false", \n "livy.spark.dynamicAllocation.minExecutors": "", \n "zeppelin.livy.url": "http://localhost:8998", \n "zeppelin.livy.spark.sql.field.truncate": "true", \n "zeppelin.livy.pull_status.interval.millis": "1000", \n "livy.spark.executor.cores": ""\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "scoped", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }\n }, \n "interpreterBindings": {}, \n "interpreterRepositories": [\n {\n "releasePolicy": {\n "checksumPolicy": "warn", \n "enabled": true, \n "updatePolicy": "daily"\n }, \n "mirroredRepositories": [], \n "snapshotPolicy": {\n "checksumPolicy": "warn", \n "enabled": true, \n "updatePolicy": "daily"\n }, \n "url": "http://repo1.maven.org/maven2/", \n "repositoryManager": false, \n "type": "default", \n "id": "central"\n }, \n {\n "releasePolicy": {\n "checksumPolicy": "warn", \n "enabled": true, \n "updatePolicy": "daily"\n }, \n "mirroredRepositories": [], \n "snapshotPolicy": {\n "checksumPolicy": "warn", \n "enabled": true, \n "updatePolicy": "daily"\n }, \n "url": "file:///home/zeppelin/.m2/repository", \n "repositoryManager": false, \n "type": "default", \n "id": "local"\n }\n ]\n}' | 1,638.407407 | 15,625 | 0.503086 |
bd152f9a1abf41fccab518aea9751f4edf1ce6d9 | 2,232 | py | Python | tests/test_builtins.py | ybastide/cst-test | 8499188c91e07b6df9196119ad70bab2f072ca0a | [
"MIT"
] | null | null | null | tests/test_builtins.py | ybastide/cst-test | 8499188c91e07b6df9196119ad70bab2f072ca0a | [
"MIT"
] | 1 | 2020-03-12T20:15:11.000Z | 2020-03-12T20:15:11.000Z | tests/test_builtins.py | ybastide/cst-test | 8499188c91e07b6df9196119ad70bab2f072ca0a | [
"MIT"
] | null | null | null | import os
import sys
from tests.check import check_result
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
def test_map_01():
source = """
def foo():
results = [1, 2, 3]
return map(lambda i: i * 2, results)
"""
expected = """
from builtins import map
def foo():
results = [1, 2, 3]
return list(map(lambda i: i * 2, results))
"""
check_result(source, expected)
def test_map_02():
source = """
def foo():
results = [1, 2, 3]
return map(lambda i: i * 2, results) # comment
"""
expected = """
from builtins import map
def foo():
results = [1, 2, 3]
return list(map(lambda i: i * 2, results)) # comment
"""
check_result(source, expected)
def test_map_03():
source = """
from builtins import map
def foo():
results = [1, 2, 3]
return map(lambda i: i * 2, results)
"""
expected = """
from builtins import map
def foo():
results = [1, 2, 3]
return map(lambda i: i * 2, results)
"""
check_result(source, expected)
def test_map_04():
source = """
import os
import re
def foo():
results = [1, 2, 3]
return map(lambda i: i * 2, results)
"""
expected = """
import os
import re
from builtins import map
def foo():
results = [1, 2, 3]
return list(map(lambda i: i * 2, results))
"""
check_result(source, expected)
def test_map_05():
source = """
from builtins import str as unicode
def foo():
results = [1, 2, 3]
return map(lambda i: i * 2, results)
"""
expected = """
from builtins import map, str as unicode
def foo():
results = [1, 2, 3]
return list(map(lambda i: i * 2, results))
"""
check_result(source, expected)
def test_map_06():
source = """
print(foo.map())
"""
expected = """
print(foo.map())
"""
check_result(source, expected)
def test_range_01():
source = """
print(range(5))
"""
expected = """
from builtins import range
print(list(range(5)))
"""
check_result(source, expected)
def test_xrange_01():
source = """
print(list(xrange(5)))
for i in xrange(4):
print(i)
"""
expected = """
from builtins import range
print(list(range(5)))
for i in range(4):
print(i)
"""
check_result(source, expected)
| 15.5 | 62 | 0.596774 |
6bcc4121b2aeff419542726ad86826c3f91613a5 | 44,543 | py | Python | haystack/retriever/dense.py | mcschmitz/haystack | 14195bfb09bdf1ddbd4b20a1fb18dc23f61f61b6 | [
"Apache-2.0"
] | null | null | null | haystack/retriever/dense.py | mcschmitz/haystack | 14195bfb09bdf1ddbd4b20a1fb18dc23f61f61b6 | [
"Apache-2.0"
] | null | null | null | haystack/retriever/dense.py | mcschmitz/haystack | 14195bfb09bdf1ddbd4b20a1fb18dc23f61f61b6 | [
"Apache-2.0"
] | null | null | null | import logging
from abc import abstractmethod
from typing import List, Union, Optional
import torch
import numpy as np
from pathlib import Path
from farm.utils import initialize_device_settings
from tqdm.auto import tqdm
from transformers import AutoTokenizer, AutoModel
from torch import nn
from haystack.document_store.base import BaseDocumentStore
from haystack import Document
from haystack.retriever.base import BaseRetriever
from transformers import AutoTokenizer
from farm.infer import Inferencer
from farm.modeling.tokenization import Tokenizer
from farm.modeling.language_model import LanguageModel
from farm.modeling.biadaptive_model import BiAdaptiveModel
from farm.modeling.prediction_head import TextSimilarityHead
from farm.data_handler.processor import TextSimilarityProcessor, InferenceProcessor
from farm.data_handler.data_silo import DataSilo
from farm.data_handler.dataloader import NamedDataLoader
from farm.modeling.optimization import initialize_optimizer
from farm.train import Trainer
from torch.utils.data.sampler import SequentialSampler
logger = logging.getLogger(__name__)
class DensePassageRetriever(BaseRetriever):
"""
Retriever that uses a bi-encoder (one transformer for query, one transformer for passage).
See the original paper for more details:
Karpukhin, Vladimir, et al. (2020): "Dense Passage Retrieval for Open-Domain Question Answering."
(https://arxiv.org/abs/2004.04906).
"""
def __init__(self,
document_store: BaseDocumentStore,
query_embedding_model: Union[Path, str] = "facebook/dpr-question_encoder-single-nq-base",
passage_embedding_model: Union[Path, str] = "facebook/dpr-ctx_encoder-single-nq-base",
single_model_path: Optional[Union[Path, str]] = None,
model_version: Optional[str] = None,
max_seq_len_query: int = 64,
max_seq_len_passage: int = 256,
top_k: int = 10,
use_gpu: bool = True,
batch_size: int = 16,
embed_title: bool = True,
use_fast_tokenizers: bool = True,
infer_tokenizer_classes: bool = False,
similarity_function: str = "dot_product",
global_loss_buffer_size: int = 150000,
progress_bar: bool = True
):
"""
Init the Retriever incl. the two encoder models from a local or remote model checkpoint.
The checkpoint format matches huggingface transformers' model format
**Example:**
```python
| # remote model from FAIR
| DensePassageRetriever(document_store=your_doc_store,
| query_embedding_model="facebook/dpr-question_encoder-single-nq-base",
| passage_embedding_model="facebook/dpr-ctx_encoder-single-nq-base")
| # or from local path
| DensePassageRetriever(document_store=your_doc_store,
| query_embedding_model="model_directory/question-encoder",
| passage_embedding_model="model_directory/context-encoder")
```
:param document_store: An instance of DocumentStore from which to retrieve documents.
:param query_embedding_model: Local path or remote name of question encoder checkpoint. The format equals the
one used by hugging-face transformers' modelhub models
Currently available remote names: ``"facebook/dpr-question_encoder-single-nq-base"``
:param passage_embedding_model: Local path or remote name of passage encoder checkpoint. The format equals the
one used by hugging-face transformers' modelhub models
Currently available remote names: ``"facebook/dpr-ctx_encoder-single-nq-base"``
:param model_version: The version of model to use from the HuggingFace model hub. Can be tag name, branch name, or commit hash.
:param max_seq_len_query: Longest length of each query sequence. Maximum number of tokens for the query text. Longer ones will be cut down."
:param max_seq_len_passage: Longest length of each passage/context sequence. Maximum number of tokens for the passage text. Longer ones will be cut down."
:param top_k: How many documents to return per query.
:param use_gpu: Whether to use gpu or not
:param batch_size: Number of questions or passages to encode at once
:param embed_title: Whether to concatenate title and passage to a text pair that is then used to create the embedding.
This is the approach used in the original paper and is likely to improve performance if your
titles contain meaningful information for retrieval (topic, entities etc.) .
The title is expected to be present in doc.meta["name"] and can be supplied in the documents
before writing them to the DocumentStore like this:
{"text": "my text", "meta": {"name": "my title"}}.
:param use_fast_tokenizers: Whether to use fast Rust tokenizers
:param infer_tokenizer_classes: Whether to infer tokenizer class from the model config / name.
If `False`, the class always loads `DPRQuestionEncoderTokenizer` and `DPRContextEncoderTokenizer`.
:param similarity_function: Which function to apply for calculating the similarity of query and passage embeddings during training.
Options: `dot_product` (Default) or `cosine`
:param global_loss_buffer_size: Buffer size for all_gather() in DDP.
Increase if errors like "encoded data exceeds max_size ..." come up
:param progress_bar: Whether to show a tqdm progress bar or not.
Can be helpful to disable in production deployments to keep the logs clean.
"""
# save init parameters to enable export of component config as YAML
self.set_config(
document_store=document_store, query_embedding_model=query_embedding_model,
passage_embedding_model=passage_embedding_model,
model_version=model_version, max_seq_len_query=max_seq_len_query, max_seq_len_passage=max_seq_len_passage,
top_k=top_k, use_gpu=use_gpu, batch_size=batch_size, embed_title=embed_title,
use_fast_tokenizers=use_fast_tokenizers, infer_tokenizer_classes=infer_tokenizer_classes,
similarity_function=similarity_function, progress_bar=progress_bar,
)
self.document_store = document_store
self.batch_size = batch_size
self.progress_bar = progress_bar
self.top_k = top_k
if document_store is None:
logger.warning("DensePassageRetriever initialized without a document store. "
"This is fine if you are performing DPR training. "
"Otherwise, please provide a document store in the constructor.")
elif document_store.similarity != "dot_product":
logger.warning(f"You are using a Dense Passage Retriever model with the {document_store.similarity} function. "
"We recommend you use dot_product instead. "
"This can be set when initializing the DocumentStore")
self.device, _ = initialize_device_settings(use_cuda=use_gpu)
self.infer_tokenizer_classes = infer_tokenizer_classes
tokenizers_default_classes = {
"query": "DPRQuestionEncoderTokenizer",
"passage": "DPRContextEncoderTokenizer"
}
if self.infer_tokenizer_classes:
tokenizers_default_classes["query"] = None # type: ignore
tokenizers_default_classes["passage"] = None # type: ignore
self.model_type = "default"
if isinstance(passage_embedding_model, nn.Module) and isinstance(query_embedding_model, nn.Module):
self.model_type = "self-written"
# Init & Load Encoders
self.query_tokenizer = Tokenizer.load(pretrained_model_name_or_path=query_embedding_model,
revision=model_version,
do_lower_case=True,
use_fast=use_fast_tokenizers,
tokenizer_class=tokenizers_default_classes["query"])
self.query_encoder = LanguageModel.load(pretrained_model_name_or_path=query_embedding_model,
revision=model_version,
language_model_class="DPRQuestionEncoder")
self.passage_tokenizer = Tokenizer.load(pretrained_model_name_or_path=passage_embedding_model,
revision=model_version,
do_lower_case=True,
use_fast=use_fast_tokenizers,
tokenizer_class=tokenizers_default_classes["passage"])
self.passage_encoder = LanguageModel.load(pretrained_model_name_or_path=passage_embedding_model,
revision=model_version,
language_model_class="DPRContextEncoder")
self.processor = TextSimilarityProcessor(query_tokenizer=self.query_tokenizer,
passage_tokenizer=self.passage_tokenizer,
max_seq_len_passage=max_seq_len_passage,
max_seq_len_query=max_seq_len_query,
label_list=["hard_negative", "positive"],
metric="text_similarity_metric",
embed_title=embed_title,
num_hard_negatives=0,
num_positives=1)
prediction_head = TextSimilarityHead(similarity_function=similarity_function, global_loss_buffer_size=global_loss_buffer_size)
self.model = BiAdaptiveModel(
language_model1=self.query_encoder,
language_model2=self.passage_encoder,
prediction_heads=[prediction_head],
embeds_dropout_prob=0.1,
lm1_output_types=["per_sequence"],
lm2_output_types=["per_sequence"],
device=self.device,
)
if single_model_path is None:
if self.model_type == "default":
self.query_tokenizer = Tokenizer.load(pretrained_model_name_or_path=query_embedding_model,
revision=model_version,
do_lower_case=True,
use_fast=use_fast_tokenizers,
tokenizer_class=tokenizers_default_classes["query"])
self.query_encoder = LanguageModel.load(pretrained_model_name_or_path=query_embedding_model,
revision=model_version,
language_model_class="DPRQuestionEncoder")
self.passage_tokenizer = Tokenizer.load(pretrained_model_name_or_path=passage_embedding_model,
revision=model_version,
do_lower_case=True,
use_fast=use_fast_tokenizers,
tokenizer_class=tokenizers_default_classes["passage"])
self.passage_encoder = LanguageModel.load(pretrained_model_name_or_path=passage_embedding_model,
revision=model_version,
language_model_class="DPRContextEncoder")
else:
self.query_tokenizer = AutoTokenizer.from_pretrained(
pretrained_model_name_or_path=query_embedding_model.model_name)
self.query_encoder = query_embedding_model
self.passage_tokenizer = AutoTokenizer.from_pretrained(
pretrained_model_name_or_path=passage_embedding_model.model_name)
self.passage_encoder = passage_embedding_model
self.processor = TextSimilarityProcessor(query_tokenizer=self.query_tokenizer,
passage_tokenizer=self.passage_tokenizer,
max_seq_len_passage=max_seq_len_passage,
max_seq_len_query=max_seq_len_query,
label_list=["hard_negative", "positive"],
metric="text_similarity_metric",
embed_title=embed_title,
num_hard_negatives=0,
num_positives=1)
prediction_head = TextSimilarityHead(similarity_function=similarity_function)
self.model = BiAdaptiveModel(
language_model1=self.query_encoder,
language_model2=self.passage_encoder,
prediction_heads=[prediction_head],
embeds_dropout_prob=0.1,
lm1_output_types=["per_sequence"],
lm2_output_types=["per_sequence"],
device=self.device,
)
else:
self.processor = TextSimilarityProcessor.load_from_dir(single_model_path)
self.processor.max_seq_len_passage = max_seq_len_passage
self.processor.max_seq_len_query = max_seq_len_query
self.processor.embed_title = embed_title
self.processor.num_hard_negatives = 0
self.processor.num_positives = 1 # during indexing of documents only one embedding is created
self.model = BiAdaptiveModel.load(single_model_path, device=self.device)
self.model.connect_heads_with_processor(self.processor.tasks, require_labels=False)
def retrieve(self, query: str, filters: dict = None, top_k: Optional[int] = None, index: str = None) -> List[Document]:
"""
Scan through documents in DocumentStore and return a small number documents
that are most relevant to the query.
:param query: The query
:param filters: A dictionary where the keys specify a metadata field and the value is a list of accepted values for that field
:param top_k: How many documents to return per query.
:param index: The name of the index in the DocumentStore from which to retrieve documents
"""
if top_k is None:
top_k = self.top_k
if not self.document_store:
logger.error("Cannot perform retrieve() since DensePassageRetriever initialized with document_store=None")
return []
if index is None:
index = self.document_store.index
query_emb = self.embed_queries(texts=[query])
documents = self.document_store.query_by_embedding(query_emb=query_emb[0], top_k=top_k, filters=filters, index=index)
return documents
def _get_predictions(self, dicts):
"""
Feed a preprocessed dataset to the model and get the actual predictions (forward pass + formatting).
:param dicts: list of dictionaries
examples:[{'query': "where is florida?"}, {'query': "who wrote lord of the rings?"}, ...]
[{'passages': [{
"title": 'Big Little Lies (TV series)',
"text": 'series garnered several accolades. It received..',
"label": 'positive',
"external_id": '18768923'},
{"title": 'Framlingham Castle',
"text": 'Castle on the Hill "Castle on the Hill" is a song by English..',
"label": 'positive',
"external_id": '19930582'}, ...]
:return: dictionary of embeddings for "passages" and "query"
"""
dataset, tensor_names, _, baskets = self.processor.dataset_from_dicts(
dicts, indices=[i for i in range(len(dicts))], return_baskets=True
)
data_loader = NamedDataLoader(
dataset=dataset, sampler=SequentialSampler(dataset), batch_size=self.batch_size, tensor_names=tensor_names
)
all_embeddings = {"query": [], "passages": []}
self.model.eval()
# When running evaluations etc., we don't want a progress bar for every single query
if len(dataset) == 1:
disable_tqdm=True
else:
disable_tqdm = not self.progress_bar
with tqdm(total=len(data_loader)*self.batch_size, unit=" Docs", desc=f"Create embeddings", position=1,
leave=False, disable=disable_tqdm) as progress_bar:
for batch in data_loader:
batch = {key: batch[key].to(self.device) for key in batch}
# get logits
with torch.no_grad():
query_embeddings, passage_embeddings = self.model.forward(**batch)[0]
if query_embeddings is not None:
all_embeddings["query"].append(query_embeddings.cpu().numpy())
if passage_embeddings is not None:
all_embeddings["passages"].append(passage_embeddings.cpu().numpy())
progress_bar.update(self.batch_size)
if all_embeddings["passages"]:
all_embeddings["passages"] = np.concatenate(all_embeddings["passages"])
if all_embeddings["query"]:
all_embeddings["query"] = np.concatenate(all_embeddings["query"])
return all_embeddings
def embed_queries(self, texts: List[str]) -> List[np.ndarray]:
"""
Create embeddings for a list of queries using the query encoder
:param texts: Queries to embed
:return: Embeddings, one per input queries
"""
queries = [{'query': q} for q in texts]
result = self._get_predictions(queries)["query"]
return result
def embed_passages(self, docs: List[Document]) -> List[np.ndarray]:
"""
Create embeddings for a list of passages using the passage encoder
:param docs: List of Document objects used to represent documents / passages in a standardized way within Haystack.
:return: Embeddings of documents / passages shape (batch_size, embedding_dim)
"""
passages = [{'passages': [{
"title": d.meta["name"] if d.meta and "name" in d.meta else "",
"text": d.text,
"label": d.meta["label"] if d.meta and "label" in d.meta else "positive",
"external_id": d.id}]
} for d in docs]
embeddings = self._get_predictions(passages)["passages"]
return embeddings
def train(self,
data_dir: str,
train_filename: str,
dev_filename: str = None,
test_filename: str = None,
max_sample: int = None,
max_processes: int = 128,
dev_split: float = 0,
batch_size: int = 2,
embed_title: bool = True,
num_hard_negatives: int = 1,
num_positives: int = 1,
n_epochs: int = 3,
evaluate_every: int = 1000,
n_gpu: int = 1,
learning_rate: float = 1e-5,
epsilon: float = 1e-08,
weight_decay: float = 0.0,
num_warmup_steps: int = 100,
grad_acc_steps: int = 1,
use_amp: str = None,
optimizer_name: str = "TransformersAdamW",
optimizer_correct_bias: bool = True,
save_dir: str = "../saved_models/dpr",
query_encoder_save_dir: str = "query_encoder",
passage_encoder_save_dir: str = "passage_encoder"
):
"""
train a DensePassageRetrieval model
:param data_dir: Directory where training file, dev file and test file are present
:param train_filename: training filename
:param dev_filename: development set filename, file to be used by model in eval step of training
:param test_filename: test set filename, file to be used by model in test step after training
:param max_sample: maximum number of input samples to convert. Can be used for debugging a smaller dataset.
:param max_processes: the maximum number of processes to spawn in the multiprocessing.Pool used in DataSilo.
It can be set to 1 to disable the use of multiprocessing or make debugging easier.
:param dev_split: The proportion of the train set that will sliced. Only works if dev_filename is set to None
:param batch_size: total number of samples in 1 batch of data
:param embed_title: whether to concatenate passage title with each passage. The default setting in official DPR embeds passage title with the corresponding passage
:param num_hard_negatives: number of hard negative passages(passages which are very similar(high score by BM25) to query but do not contain the answer
:param num_positives: number of positive passages
:param n_epochs: number of epochs to train the model on
:param evaluate_every: number of training steps after evaluation is run
:param n_gpu: number of gpus to train on
:param learning_rate: learning rate of optimizer
:param epsilon: epsilon parameter of optimizer
:param weight_decay: weight decay parameter of optimizer
:param grad_acc_steps: number of steps to accumulate gradient over before back-propagation is done
:param use_amp: Whether to use automatic mixed precision (AMP) or not. The options are:
"O0" (FP32)
"O1" (Mixed Precision)
"O2" (Almost FP16)
"O3" (Pure FP16).
For more information, refer to: https://nvidia.github.io/apex/amp.html
:param optimizer_name: what optimizer to use (default: TransformersAdamW)
:param num_warmup_steps: number of warmup steps
:param optimizer_correct_bias: Whether to correct bias in optimizer
:param save_dir: directory where models are saved
:param query_encoder_save_dir: directory inside save_dir where query_encoder model files are saved
:param passage_encoder_save_dir: directory inside save_dir where passage_encoder model files are saved
"""
self.processor.embed_title = embed_title
self.processor.data_dir = Path(data_dir)
self.processor.train_filename = train_filename
self.processor.dev_filename = dev_filename
self.processor.test_filename = test_filename
self.processor.max_sample = max_sample
self.processor.dev_split = dev_split
self.processor.num_hard_negatives = num_hard_negatives
self.processor.num_positives = num_positives
self.model.connect_heads_with_processor(self.processor.tasks, require_labels=True)
data_silo = DataSilo(processor=self.processor, batch_size=batch_size, distributed=False, max_processes=max_processes)
# 5. Create an optimizer
self.model, optimizer, lr_schedule = initialize_optimizer(
model=self.model,
learning_rate=learning_rate,
optimizer_opts={"name": optimizer_name, "correct_bias": optimizer_correct_bias,
"weight_decay": weight_decay, "eps": epsilon},
schedule_opts={"name": "LinearWarmup", "num_warmup_steps": num_warmup_steps},
n_batches=len(data_silo.loaders["train"]),
n_epochs=n_epochs,
grad_acc_steps=grad_acc_steps,
device=self.device,
use_amp=use_amp
)
# 6. Feed everything to the Trainer, which keeps care of growing our model and evaluates it from time to time
trainer = Trainer(
model=self.model,
optimizer=optimizer,
data_silo=data_silo,
epochs=n_epochs,
n_gpu=n_gpu,
lr_schedule=lr_schedule,
evaluate_every=evaluate_every,
device=self.device,
use_amp=use_amp
)
# 7. Let it grow! Watch the tracked metrics live on the public mlflow server: https://public-mlflow.deepset.ai
trainer.train()
self.model.save(Path(save_dir), lm1_name=query_encoder_save_dir, lm2_name=passage_encoder_save_dir)
self.query_tokenizer.save_pretrained(f"{save_dir}/{query_encoder_save_dir}")
self.passage_tokenizer.save_pretrained(f"{save_dir}/{passage_encoder_save_dir}")
def save(self, save_dir: Union[Path, str], query_encoder_dir: str = "query_encoder",
passage_encoder_dir: str = "passage_encoder"):
"""
Save DensePassageRetriever to the specified directory.
:param save_dir: Directory to save to.
:param query_encoder_dir: Directory in save_dir that contains query encoder model.
:param passage_encoder_dir: Directory in save_dir that contains passage encoder model.
:return: None
"""
save_dir = Path(save_dir)
self.model.save(save_dir, lm1_name=query_encoder_dir, lm2_name=passage_encoder_dir)
save_dir = str(save_dir)
self.query_tokenizer.save_pretrained(save_dir + f"/{query_encoder_dir}")
self.passage_tokenizer.save_pretrained(save_dir + f"/{passage_encoder_dir}")
@classmethod
def load(cls,
load_dir: Union[Path, str],
document_store: BaseDocumentStore,
max_seq_len_query: int = 64,
max_seq_len_passage: int = 256,
use_gpu: bool = True,
batch_size: int = 16,
embed_title: bool = True,
use_fast_tokenizers: bool = True,
similarity_function: str = "dot_product",
query_encoder_dir: str = "query_encoder",
passage_encoder_dir: str = "passage_encoder",
infer_tokenizer_classes: bool = False
):
"""
Load DensePassageRetriever from the specified directory.
"""
load_dir = Path(load_dir)
dpr = cls(
document_store=document_store,
query_embedding_model=Path(load_dir) / query_encoder_dir,
passage_embedding_model=Path(load_dir) / passage_encoder_dir,
max_seq_len_query=max_seq_len_query,
max_seq_len_passage=max_seq_len_passage,
use_gpu=use_gpu,
batch_size=batch_size,
embed_title=embed_title,
use_fast_tokenizers=use_fast_tokenizers,
similarity_function=similarity_function,
infer_tokenizer_classes=infer_tokenizer_classes
)
logger.info(f"DPR model loaded from {load_dir}")
return dpr
class EmbeddingRetriever(BaseRetriever):
def __init__(
self,
document_store: BaseDocumentStore,
embedding_model: str,
model_version: Optional[str] = None,
use_gpu: bool = True,
model_format: str = "farm",
pooling_strategy: str = "reduce_mean",
emb_extraction_layer: int = -1,
top_k: int = 10,
progress_bar: bool = True
):
"""
:param document_store: An instance of DocumentStore from which to retrieve documents.
:param embedding_model: Local path or name of model in Hugging Face's model hub such as ``'deepset/sentence_bert'``
:param model_version: The version of model to use from the HuggingFace model hub. Can be tag name, branch name, or commit hash.
:param use_gpu: Whether to use gpu or not
:param model_format: Name of framework that was used for saving the model. Options:
- ``'farm'``
- ``'transformers'``
- ``'sentence_transformers'``
:param pooling_strategy: Strategy for combining the embeddings from the model (for farm / transformers models only).
Options:
- ``'cls_token'`` (sentence vector)
- ``'reduce_mean'`` (sentence vector)
- ``'reduce_max'`` (sentence vector)
- ``'per_token'`` (individual token vectors)
:param emb_extraction_layer: Number of layer from which the embeddings shall be extracted (for farm / transformers models only).
Default: -1 (very last layer).
:param top_k: How many documents to return per query.
:param progress_bar: If true displays progress bar during embedding.
"""
# save init parameters to enable export of component config as YAML
self.set_config(
document_store=document_store, embedding_model=embedding_model, model_version=model_version,
use_gpu=use_gpu, model_format=model_format, pooling_strategy=pooling_strategy,
emb_extraction_layer=emb_extraction_layer, top_k=top_k,
)
self.document_store = document_store
self.embedding_model = embedding_model
self.model_format = model_format
self.model_version = model_version
self.use_gpu = use_gpu
self.pooling_strategy = pooling_strategy
self.emb_extraction_layer = emb_extraction_layer
self.top_k = top_k
self.progress_bar = progress_bar
logger.info(f"Init retriever using embeddings of model {embedding_model}")
self.embedding_encoder = _EmbeddingEncoderFactory.get_embedding_retriever_impl(self, model_format)
if model_format == "farm" or model_format == "transformers":
logger.info(
f"Init retriever using embeddings of model {embedding_model}")
self.embedding_model = Inferencer.load(
embedding_model, revision=model_version, task_type="embeddings", extraction_strategy=self.pooling_strategy,
extraction_layer=self.emb_extraction_layer, gpu=use_gpu, batch_size=4, max_seq_len=512, num_processes=0
)
# Check that document_store has the right similarity function
similarity = document_store.similarity
# If we are using a sentence transformer model
if "sentence" in embedding_model.lower() and similarity != "cosine":
logger.warning(f"You seem to be using a Sentence Transformer with the {similarity} function. "
f"We recommend using cosine instead. "
f"This can be set when initializing the DocumentStore")
elif "dpr" in embedding_model.lower() and similarity != "dot_product":
logger.warning(f"You seem to be using a DPR model with the {similarity} function. "
f"We recommend using dot_product instead. "
f"This can be set when initializing the DocumentStore")
elif model_format == "sentence_transformers":
logger.info(
f"Init retriever using embeddings of model {embedding_model}")
try:
from sentence_transformers import SentenceTransformer
except ImportError:
raise ImportError("Can't find package `sentence-transformers` \n"
"You can install it via `pip install sentence-transformers` \n"
"For details see https://github.com/UKPLab/sentence-transformers ")
# pretrained embedding models coming from: https://github.com/UKPLab/sentence-transformers#pretrained-models
# e.g. 'roberta-base-nli-stsb-mean-tokens'
if use_gpu:
device = "cuda"
else:
device = "cpu"
self.embedding_model = SentenceTransformer(embedding_model, device=device)
if document_store.similarity != "cosine":
logger.warning(
f"You are using a Sentence Transformer with the {document_store.similarity} function. "
f"We recommend using cosine instead. "
f"This can be set when initializing the DocumentStore")
elif model_format == "self-written":
logger.info(
f"Init retriever using embeddings of model {type(embedding_model).__name__}")
self.embedding_model = embedding_model
else:
raise NotImplementedError
def retrieve(self, query: str, filters: dict = None, top_k: Optional[int] = None, index: str = None) -> List[Document]:
"""
Scan through documents in DocumentStore and return a small number documents
that are most relevant to the query.
:param query: The query
:param filters: A dictionary where the keys specify a metadata field and the value is a list of accepted values for that field
:param top_k: How many documents to return per query.
:param index: The name of the index in the DocumentStore from which to retrieve documents
"""
if top_k is None:
top_k = self.top_k
if index is None:
index = self.document_store.index
query_emb = self.embed_queries(texts=[query])
documents = self.document_store.query_by_embedding(query_emb=query_emb[0], filters=filters,
top_k=top_k, index=index)
return documents
def embed_queries(self, texts: List[str]) -> List[np.ndarray]:
"""
Create embeddings for a list of queries.
:param texts: Queries to embed
:return: Embeddings, one per input queries
"""
# for backward compatibility: cast pure str input
if isinstance(texts, str):
texts = [texts]
assert isinstance(texts, list), "Expecting a list of texts, i.e. create_embeddings(texts=['text1',...])"
return self.embedding_encoder.embed_queries(texts)
def embed_passages(self, docs: List[Document]) -> List[np.ndarray]:
"""
Create embeddings for a list of passages.
if self.model_format == "farm" or self.model_format == "transformers":
# TODO: FARM's `sample_to_features_text` need to fix following warning -
# tokenization_utils.py:460: FutureWarning: `is_pretokenized` is deprecated and will be removed in a future version, use `is_split_into_words` instead.
emb = self.embedding_model.inference_from_dicts(dicts=[{"text": t} for t in texts])
emb = [(r["vec"]) for r in emb]
elif self.model_format == "sentence_transformers":
# texts can be a list of strings or a list of [title, text]
# get back list of numpy embedding vectors
emb = self.embedding_model.encode(texts, batch_size=200, show_progress_bar=False)
emb = [r for r in emb]
elif self.model_format == "self-written":
pred = self.embedding_model.predict(texts)
emb = [p for p in pred]
return emb
:param docs: List of documents to embed
:return: Embeddings, one per input passage
"""
return self.embedding_encoder.embed_passages(docs)
class _EmbeddingEncoder:
@abstractmethod
def embed_queries(self, texts: List[str]) -> List[np.ndarray]:
"""
Create embeddings for a list of queries.
:param texts: Queries to embed
:return: Embeddings, one per input queries
"""
pass
@abstractmethod
def embed_passages(self, docs: List[Document]) -> List[np.ndarray]:
"""
Create embeddings for a list of passages.
:param docs: List of documents to embed
:return: Embeddings, one per input passage
"""
pass
class _EmbeddingEncoderFactory:
@staticmethod
def get_embedding_retriever_impl(retriever: EmbeddingRetriever, model_format: str) -> _EmbeddingEncoder:
if model_format == "farm" or model_format == "transformers":
return _DefaultEmbeddingEncoder(retriever)
elif model_format == "sentence_transformers":
return _SentenceTransformersEmbeddingEncoder(retriever)
elif model_format == "retribert":
return _RetribertEmbeddingEncoder(retriever)
else:
raise ValueError(f"Unknown retriever embedding model format {model_format}")
class _DefaultEmbeddingEncoder(_EmbeddingEncoder):
def __init__(
self,
retriever: EmbeddingRetriever
):
self.embedding_model = Inferencer.load(
retriever.embedding_model, revision=retriever.model_version, task_type="embeddings",
extraction_strategy=retriever.pooling_strategy,
extraction_layer=retriever.emb_extraction_layer, gpu=retriever.use_gpu,
batch_size=4, max_seq_len=512, num_processes=0
)
# Check that document_store has the right similarity function
similarity = retriever.document_store.similarity
# If we are using a sentence transformer model
if "sentence" in retriever.embedding_model.lower() and similarity != "cosine":
logger.warning(f"You seem to be using a Sentence Transformer with the {similarity} function. "
f"We recommend using cosine instead. "
f"This can be set when initializing the DocumentStore")
elif "dpr" in retriever.embedding_model.lower() and similarity != "dot_product":
logger.warning(f"You seem to be using a DPR model with the {similarity} function. "
f"We recommend using dot_product instead. "
f"This can be set when initializing the DocumentStore")
def embed(self, texts: Union[List[List[str]], List[str], str]) -> List[np.ndarray]:
# TODO: FARM's `sample_to_features_text` need to fix following warning -
# tokenization_utils.py:460: FutureWarning: `is_pretokenized` is deprecated and will be removed in a future version, use `is_split_into_words` instead.
emb = self.embedding_model.inference_from_dicts(dicts=[{"text": t} for t in texts])
emb = [(r["vec"]) for r in emb]
return emb
def embed_queries(self, texts: List[str]) -> List[np.ndarray]:
return self.embed(texts)
def embed_passages(self, docs: List[Document]) -> List[np.ndarray]:
passages = [d.text for d in docs] # type: ignore
return self.embed(passages)
class _SentenceTransformersEmbeddingEncoder(_EmbeddingEncoder):
def __init__(
self,
retriever: EmbeddingRetriever
):
try:
from sentence_transformers import SentenceTransformer
except ImportError:
raise ImportError("Can't find package `sentence-transformers` \n"
"You can install it via `pip install sentence-transformers` \n"
"For details see https://github.com/UKPLab/sentence-transformers ")
# pretrained embedding models coming from: https://github.com/UKPLab/sentence-transformers#pretrained-models
# e.g. 'roberta-base-nli-stsb-mean-tokens'
device, _ = initialize_device_settings(use_cuda=retriever.use_gpu)
self.embedding_model = SentenceTransformer(retriever.embedding_model, device=device)
self.show_progress_bar = retriever.progress_bar
document_store = retriever.document_store
if document_store.similarity != "cosine":
logger.warning(
f"You are using a Sentence Transformer with the {document_store.similarity} function. "
f"We recommend using cosine instead. "
f"This can be set when initializing the DocumentStore")
def embed(self, texts: Union[List[List[str]], List[str], str]) -> List[np.ndarray]:
# texts can be a list of strings or a list of [title, text]
# get back list of numpy embedding vectors
emb = self.embedding_model.encode(texts, batch_size=200, show_progress_bar=self.show_progress_bar)
emb = [r for r in emb]
return emb
def embed_queries(self, texts: List[str]) -> List[np.ndarray]:
return self.embed(texts)
def embed_passages(self, docs: List[Document]) -> List[np.ndarray]:
passages = [[d.meta["name"] if d.meta and "name" in d.meta else "", d.text] for d in docs] # type: ignore
return self.embed(passages)
class _RetribertEmbeddingEncoder(_EmbeddingEncoder):
def __init__(
self,
retriever: EmbeddingRetriever
):
self.progress_bar = retriever.progress_bar
if retriever.use_gpu and torch.cuda.is_available():
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
embedding_tokenizer = AutoTokenizer.from_pretrained(retriever.embedding_model,
use_fast_tokenizers=True)
self.embedding_model = AutoModel.from_pretrained(retriever.embedding_model).to(self.device)
self.processor = InferenceProcessor(tokenizer=embedding_tokenizer,
max_seq_len=embedding_tokenizer.max_len_single_sentence)
def embed_queries(self, texts: List[str]) -> List[np.ndarray]:
queries = [{"text": q} for q in texts]
dataloader = self._create_dataloader(queries)
embeddings: List[np.ndarray] = []
disable_tqdm = True if len(dataloader) == 1 else not self.progress_bar
for i, batch in enumerate(tqdm(dataloader, desc=f"Creating Embeddings", unit=" Batches", disable=disable_tqdm)):
batch = {key: batch[key].to(self.device) for key in batch}
with torch.no_grad():
q_reps = self.embedding_model.embed_questions(input_ids=batch["input_ids"],
attention_mask=batch["padding_mask"]).cpu().numpy()
embeddings.append(q_reps)
return np.concatenate(embeddings)
def embed_passages(self, docs: List[Document]) -> List[np.ndarray]:
doc_text = [{"text": d.text} for d in docs]
dataloader = self._create_dataloader(doc_text)
embeddings: List[np.ndarray] = []
disable_tqdm = True if len(dataloader) == 1 else not self.progress_bar
for i, batch in enumerate(tqdm(dataloader, desc=f"Creating Embeddings", unit=" Batches", disable=disable_tqdm)):
batch = {key: batch[key].to(self.device) for key in batch}
with torch.no_grad():
q_reps = self.embedding_model.embed_answers(input_ids=batch["input_ids"],
attention_mask=batch["padding_mask"]).cpu().numpy()
embeddings.append(q_reps)
return np.concatenate(embeddings)
def _create_dataloader(self, text_to_encode: List[dict]) -> NamedDataLoader:
dataset, tensor_names, _ = self.processor.dataset_from_dicts(text_to_encode,
indices=[i for i in range(len(text_to_encode))])
dataloader = NamedDataLoader(dataset=dataset, sampler=SequentialSampler(dataset),
batch_size=32, tensor_names=tensor_names)
return dataloader
| 52.964328 | 171 | 0.618391 |
aaaa134f7b13eed9ccce107e8818099de6c7d49d | 520 | py | Python | main.py | chahatraj/VehicleTracker | cfea1d4a148be87eab024dd6fd92525397afa5b7 | [
"MIT"
] | null | null | null | main.py | chahatraj/VehicleTracker | cfea1d4a148be87eab024dd6fd92525397afa5b7 | [
"MIT"
] | null | null | null | main.py | chahatraj/VehicleTracker | cfea1d4a148be87eab024dd6fd92525397afa5b7 | [
"MIT"
] | null | null | null | import ultrasonic)
import accelerometer
import camera
import time
import gsm
velocity,speed = ultrasonic.ultrasonic()
if(speed>40):
print ‘Rule Break : Out of speed’
x,y,z=accelerometer. accelerometer()
str=time.time() + ‘.jpg’
no_plate=camera.camera(str)
gsm.gsm(no_plate)
opt=input( ‘Do you want to search a particular vehicle? : (y/n)’)
if opt==’y’ :
no_plate_check=input(‘Enter vehicle no : ’)
if no_plate==no_plate_check :
print ‘Found at time ’ + str – ‘.py’
else :
print ‘Not found’ | 24.761905 | 66 | 0.688462 |
7b2617953741fe24eb7a0a83d474fe03b2365bbe | 4,937 | py | Python | tests/extensions/test_pluck.py | open-contracting/kingfisher-collect | 2fbbd6361a0ec959e0603343a4b363f97fae3815 | [
"BSD-3-Clause"
] | 7 | 2020-07-24T13:15:37.000Z | 2021-12-11T22:40:07.000Z | tests/extensions/test_pluck.py | open-contracting/kingfisher-collect | 2fbbd6361a0ec959e0603343a4b363f97fae3815 | [
"BSD-3-Clause"
] | 418 | 2020-04-27T22:15:27.000Z | 2022-03-31T23:49:34.000Z | tests/extensions/test_pluck.py | open-contracting/kingfisher-collect | 2fbbd6361a0ec959e0603343a4b363f97fae3815 | [
"BSD-3-Clause"
] | 6 | 2020-05-28T16:06:53.000Z | 2021-03-16T02:54:15.000Z | import os
from glob import glob
from tempfile import TemporaryDirectory
import pytest
from scrapy import Request
from scrapy.exceptions import StopDownload
from kingfisher_scrapy.base_spider import BaseSpider, CompressedFileSpider
from kingfisher_scrapy.extensions import Pluck
from kingfisher_scrapy.items import PluckedItem
from tests import spider_with_crawler
def test_disabled():
with TemporaryDirectory() as tmpdirname:
spider = spider_with_crawler(settings={'KINGFISHER_PLUCK_PATH': tmpdirname})
extension = Pluck.from_crawler(spider.crawler)
item = PluckedItem({'value': '2020-10-01'})
extension.item_scraped(item, spider)
extension.spider_closed(spider, 'itemcount')
assert not glob(os.path.join(tmpdirname, 'pluck*.csv'))
def test_item_scraped():
with TemporaryDirectory() as tmpdirname:
spider = spider_with_crawler(settings={'KINGFISHER_PLUCK_PATH': tmpdirname}, release_pointer='/date')
extension = Pluck.from_crawler(spider.crawler)
item = PluckedItem({'value': '2020-10-01'})
extension.item_scraped(item, spider)
with open(os.path.join(tmpdirname, 'pluck-release-date.csv')) as f:
assert '2020-10-01,test\n' == f.read()
# Only one item from the same spider is written.
extension.item_scraped(item, spider)
with open(os.path.join(tmpdirname, 'pluck-release-date.csv')) as f:
assert '2020-10-01,test\n' == f.read()
def test_spider_closed_with_items():
with TemporaryDirectory() as tmpdirname:
spider = spider_with_crawler(settings={'KINGFISHER_PLUCK_PATH': tmpdirname}, release_pointer='/date')
extension = Pluck.from_crawler(spider.crawler)
item = PluckedItem({'value': '2020-10-01'})
extension.item_scraped(item, spider)
extension.spider_closed(spider, 'itemcount')
with open(os.path.join(tmpdirname, 'pluck-release-date.csv')) as f:
assert '2020-10-01,test\n' == f.read()
def test_spider_closed_without_items():
with TemporaryDirectory() as tmpdirname:
spider = spider_with_crawler(settings={'KINGFISHER_PLUCK_PATH': tmpdirname}, release_pointer='/date')
extension = Pluck.from_crawler(spider.crawler)
extension.spider_closed(spider, 'itemcount')
with open(os.path.join(tmpdirname, 'pluck-release-date.csv')) as f:
assert 'closed: itemcount,test\n' == f.read()
def test_bytes_received_stop_download():
with TemporaryDirectory() as tmpdirname:
spider = spider_with_crawler(settings={'KINGFISHER_PLUCK_PATH': tmpdirname,
'KINGFISHER_PLUCK_MAX_BYTES': 1}, release_pointer='/date')
extension = Pluck.from_crawler(spider.crawler)
request = Request('http://example.com', meta={'file_name': 'test.json'})
with pytest.raises(StopDownload):
extension.bytes_received(data=b'12345', spider=spider, request=request)
assert extension.max_bytes == 1
def test_bytes_received_dont_stop_download():
with TemporaryDirectory() as tmpdirname:
spider = spider_with_crawler(settings={'KINGFISHER_PLUCK_PATH': tmpdirname,
'KINGFISHER_PLUCK_MAX_BYTES': 10}, release_pointer='/date')
extension = Pluck.from_crawler(spider.crawler)
request = Request('http://example.com', meta={'file_name': 'test.json'})
extension.bytes_received(data=b'12345', spider=spider, request=request)
assert extension.total_bytes_received == 5
assert extension.max_bytes == 10
@pytest.mark.parametrize('test_request,spider_class,attributes', [
(Request('http://example.com', callback=lambda item: item, meta={'file_name': 'test.json'}), BaseSpider, {}),
(Request('http://example.com', meta={'file_name': 'test.rar'}), CompressedFileSpider, {}),
(Request('http://example.com', meta={'file_name': 'test.zip'}), CompressedFileSpider, {}),
(Request('http://example.com', meta={'file_name': 'test.xlsx'}), BaseSpider, {'unflatten': True}),
(Request('http://example.com', meta={'file_name': 'test.json'}), BaseSpider, {'root_path': 'item'}),
(Request('http://example.com', meta={'file_name': 'test.json'}), BaseSpider, {'dont_truncate': True}),
])
def test_bytes_received_ignored_requests(test_request, spider_class, attributes):
with TemporaryDirectory() as tmpdirname:
spider = spider_with_crawler(spider_class=spider_class, release_pointer='/date',
settings={'KINGFISHER_PLUCK_PATH': tmpdirname, 'KINGFISHER_PLUCK_MAX_BYTES': 10})
for attr, value in attributes.items():
setattr(spider, attr, value)
extension = Pluck.from_crawler(spider.crawler)
extension.bytes_received(data=b'12345', spider=spider, request=test_request)
assert extension.total_bytes_received == 0
| 42.930435 | 118 | 0.686044 |
e2b5fca93baff01385457bec8f1b901c021aca0a | 4,508 | py | Python | face.py | Lysovenko/bpc | bd396c40e09f38adedec0de27d0ee178fa056888 | [
"Apache-2.0"
] | null | null | null | face.py | Lysovenko/bpc | bd396c40e09f38adedec0de27d0ee178fa056888 | [
"Apache-2.0"
] | null | null | null | face.py | Lysovenko/bpc | bd396c40e09f38adedec0de27d0ee178fa056888 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Serhiy Lysovenko
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Making a face of the application
"""
from tkinter import Tk, Menu, PhotoImage
from tkinter.ttk import Panedwindow, Frame, Sizegrip, Button
from os.path import dirname, join, isdir
from local_fs import Local_fs
from weakref import ref
from panel import Panel
from settings import Config
from file_consts import *
import buttons
class Face:
def __init__(self, root):
self.config = Config("tkc.ini")
root.title("Tkinter Commander")
root.protocol("WM_DELETE_WINDOW", self.on_delete)
self.root = root
root.geometry(self.config.get("fm_geometry"))
root.grid_columnconfigure(0, weight=1)
root.grid_rowconfigure(1, weight=1)
pw = Panedwindow(root, orient="horizontal", takefocus=False)
frame = Frame(pw)
self.left = Panel(frame, Local_fs(), self.config)
pw.add(frame)
pw.pane(frame, weight=1)
frame = Frame(pw)
self.right = Panel(frame, Local_fs(), self.config)
self.right.oposite = ref(self.left)
self.left.oposite = ref(self.right)
self.left.activate()
pw.add(frame)
pw.pane(frame, weight=1)
pw.grid(column=0, row=1, columnspan=2, sticky="senw")
self.add_menu()
self.add_btns()
root.tk.call(
"wm", "iconphoto", root._w,
PhotoImage(file=join(dirname(__file__), "data", "favicon.gif")))
def add_menu(self):
top = self.root
top["menu"] = menubar = Menu(top)
menu_file = Menu(menubar)
menu_settings = Menu(menubar)
menu_help = Menu(menubar)
menubar.add_cascade(menu=menu_file, label=_("File"))
menubar.add_cascade(menu=menu_settings, label=_("Settings"))
menubar.add_cascade(menu=menu_help, label=_("Help"))
menu_settings.add_command(label=_("Pannel Settings"),
command=self.dlg_panset)
def add_btns(self):
root = self.root
frame = Frame(root)
for key, text, command in (
(3, _("F3 View"), self.on_F3),
(4, _("F4 Edit"), self.on_F4), (5, _("F5 Copy"), self.on_F5),
(6, _("F6 Move"), self.on_F6),
(7, _("F7 Make Directory"), self.on_F7),
(8, _("F8 Remove"), self.on_F8),
(10, _("F10 Exit"), self.on_F10)):
btn = Button(frame, text=text, command=command, takefocus=False)
btn.pack(side="left", fill="x", expand=True)
root.bind_all("<F%d>" % key, func=command)
sz = Sizegrip(frame)
sz.pack(side="right", anchor="se")
frame.grid(column=0, row=2, columnspan=2, sticky="we")
def get_panels(self):
"returns (active, passive) panels"
if self.left.is_active:
return self.left, self.right
return self.right, self.left
def on_delete(self):
self.config["fm_geometry"] = self.root.geometry()
self.config.save()
self.root.destroy()
def on_F3(self, evt=None):
print("F3")
def on_F4(self, evt=None):
print("F4")
def on_F5(self, evt=None):
buttons.copy_button(self)
def on_F6(self, evt=None):
print("F6")
def on_F7(self, evt=None):
print("F7")
def on_F8(self, evt=None):
print("F8")
def on_F10(self, evt=None):
self.on_delete()
def dlg_panset(self):
from dialogs import Dlg_panset
Dlg_panset(self.root, self.config)
def start_face():
try:
import gettext
except ImportError:
__builtins__.__dict__["_"] = str
else:
localedir = join(dirname(__file__), "i18n", "locale")
if isdir(localedir):
gettext.install("TkC", localedir=localedir)
else:
gettext.install("TkC")
root = Tk(className="commander")
Face(root)
root.mainloop()
if __name__ == "__main__":
start_face()
| 31.971631 | 77 | 0.609583 |
e33552e0c85386bc849235ddd212c1a3f9fe7aef | 6,396 | py | Python | tensorflow_examples/lite/model_maker/core/task/audio_classifier_test.py | tansaku/examples | cc121d3354ff7f9814b6eee881dce6e6c55d0e68 | [
"Apache-2.0"
] | null | null | null | tensorflow_examples/lite/model_maker/core/task/audio_classifier_test.py | tansaku/examples | cc121d3354ff7f9814b6eee881dce6e6c55d0e68 | [
"Apache-2.0"
] | null | null | null | tensorflow_examples/lite/model_maker/core/task/audio_classifier_test.py | tansaku/examples | cc121d3354ff7f9814b6eee881dce6e6c55d0e68 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from scipy.io import wavfile
import tensorflow.compat.v2 as tf
from tensorflow_examples.lite.model_maker.core import compat
from tensorflow_examples.lite.model_maker.core.data_util import audio_dataloader
from tensorflow_examples.lite.model_maker.core.export_format import ExportFormat
from tensorflow_examples.lite.model_maker.core.task import audio_classifier
from tensorflow_examples.lite.model_maker.core.task.model_spec import audio_spec
class BrowserFFTWithoutPreprocessing(audio_spec.BrowserFFTSpec):
def preprocess_ds(self, ds, is_training=False, cache_fn=None):
_ = is_training
@tf.function
def _crop(wav, label):
wav = wav[:self.expected_waveform_len]
return wav, label
ds = ds.map(_crop)
if cache_fn:
ds = cache_fn(ds)
return ds
class YAMNetWithoutPreprcessing(audio_spec.YAMNetSpec):
def preprocess_ds(self, ds, is_training=False, cache_fn=None):
@tf.function
def _crop(wav, label):
wav = wav[:audio_spec.YAMNetSpec.EXPECTED_WAVEFORM_LENGTH]
return wav, label
ds = ds.map(_crop)
return ds
def write_sample(root,
category,
file_name,
sample_rate,
duration_sec,
dtype=np.int16):
os.makedirs(os.path.join(root, category), exist_ok=True)
xs = np.random.rand(int(sample_rate * duration_sec),) * (1 << 15)
xs = xs.astype(dtype)
full_path = os.path.join(root, category, file_name)
wavfile.write(full_path, sample_rate, xs)
return full_path
class AudioClassifierTest(tf.test.TestCase):
def testBrowserFFT(self):
self._test_spec(audio_spec.BrowserFFTSpec(),
BrowserFFTWithoutPreprocessing())
def testYAMNet(self):
self._test_spec(audio_spec.YAMNetSpec(), YAMNetWithoutPreprcessing())
def testConfusionMatrix(self):
spec = audio_spec.BrowserFFTSpec()
temp_folder = self.get_temp_dir()
cat1 = write_sample(temp_folder, 'cat', '1.wav', 44100, duration_sec=1)
cat2 = write_sample(temp_folder, 'cat', '2.wav', 44100, duration_sec=2)
dog1 = write_sample(temp_folder, 'dog', '1.wav', 44100, duration_sec=3)
dog2 = write_sample(temp_folder, 'dog', '2.wav', 44100, duration_sec=4)
index_to_labels = ['cat', 'dog']
# Prepare data.
ds = tf.data.Dataset.from_tensor_slices(([cat1, cat2, dog1,
dog2], [0, 0, 1, 1]))
data_loader = audio_dataloader.DataLoader(ds, len(ds), index_to_labels,
spec)
# Train a floating point model.
task = audio_classifier.create(data_loader, spec, batch_size=1, epochs=15)
confusion_matrx = task.confusion_matrix(data_loader)
# BrowserFFTSpec generates 1 sample for 1 second audio so there are
# 10 samples in total.
self.assertEqual(tf.math.reduce_sum(confusion_matrx), 10)
# confusion_matrix is of shape (truth, predication)
# We have 2 classes, 3 cat samples and 7 dog samples.
self.assertEqual(confusion_matrx.shape, (2, 2))
self.assertAllEqual(
tf.math.reduce_sum(confusion_matrx, axis=-1).numpy(), np.array([3, 7]))
def _test_spec(self, train_spec, tflite_eval_spec):
temp_folder = self.get_temp_dir()
cat1 = write_sample(temp_folder, 'cat', '1.wav', 44100, duration_sec=1)
cat2 = write_sample(temp_folder, 'cat', '2.wav', 44100, duration_sec=2)
dog1 = write_sample(temp_folder, 'dog', '1.wav', 44100, duration_sec=3)
dog2 = write_sample(temp_folder, 'dog', '2.wav', 44100, duration_sec=4)
index_to_labels = ['cat', 'dog']
np.random.seed(123)
tf.random.set_seed(123)
# Prepare data.
ds = tf.data.Dataset.from_tensor_slices(([cat1, cat2, dog1,
dog2], [0, 0, 1, 1]))
data_loader = audio_dataloader.DataLoader(ds, len(ds), index_to_labels,
train_spec)
# Train a floating point model.
task = audio_classifier.create(
data_loader, train_spec, batch_size=1, epochs=15)
# Evaluate trained model
_, acc = task.evaluate(data_loader)
# Better than random guessing.
self.assertGreater(acc, .5)
# Export the model to saved model.
output_path = os.path.join(train_spec.model_dir, 'saved_model')
task.export(train_spec.model_dir, export_format=ExportFormat.SAVED_MODEL)
self.assertTrue(os.path.isdir(output_path))
self.assertNotEqual(len(os.listdir(output_path)), 0)
# Export the model to TFLite.
output_path = os.path.join(train_spec.model_dir, 'float.tflite')
task.export(
train_spec.model_dir,
tflite_filename='float.tflite',
export_format=ExportFormat.TFLITE)
self.assertTrue(tf.io.gfile.exists(output_path))
self.assertGreater(os.path.getsize(output_path), 0)
# Evaluate accurarcy on TFLite model.
# Create a new dataset without preprocessing since preprocessing has been
# packaged inside TFLite model.
tflite_dataloader = audio_dataloader.DataLoader(ds, len(ds),
index_to_labels,
tflite_eval_spec)
# Evaluate accurarcy on float model.
result = task.evaluate_tflite(
output_path,
tflite_dataloader,
# Skip yamnet output during TFLite evaluation.
postprocess_fn=lambda x: x[-1])
self.assertGreaterEqual(result['accuracy'], .5)
if __name__ == '__main__':
# Load compressed models from tensorflow_hub
os.environ['TFHUB_MODEL_LOAD_FORMAT'] = 'COMPRESSED'
compat.setup_tf_behavior(tf_version=2)
tf.test.main()
| 36.135593 | 80 | 0.685897 |
74c293a8c32a82861444be732ece9da493e5e4b1 | 11,543 | py | Python | Botnets/App/App Web/PDG-env/lib/python3.6/site-packages/pip/__init__.py | i2tResearch/Ciberseguridad_web | ac3dd934a60628532e3538369cb145d9a8f33e4f | [
"MIT"
] | 9 | 2021-10-01T22:02:58.000Z | 2021-11-09T17:48:45.000Z | Botnets/App/App Web/PDG-env/lib/python3.6/site-packages/pip/__init__.py | i2tResearch/Ciberseguridad_web | ac3dd934a60628532e3538369cb145d9a8f33e4f | [
"MIT"
] | null | null | null | Botnets/App/App Web/PDG-env/lib/python3.6/site-packages/pip/__init__.py | i2tResearch/Ciberseguridad_web | ac3dd934a60628532e3538369cb145d9a8f33e4f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import absolute_import
import locale
import logging
import os
import optparse
import warnings
import sys
import re
# 2016-06-17 barry@debian.org: urllib3 1.14 added optional support for socks,
# but if invoked (i.e. imported), it will issue a warning to stderr if socks
# isn't available. requests unconditionally imports urllib3's socks contrib
# module, triggering this warning. The warning breaks DEP-8 tests (because of
# the stderr output) and is just plain annoying in normal usage. I don't want
# to add socks as yet another dependency for pip, nor do I want to allow-stder
# in the DEP-8 tests, so just suppress the warning. pdb tells me this has to
# be done before the import of pip.vcs.
try:
from pip._vendor.requests.packages.urllib3.exceptions import DependencyWarning
except ImportError:
from urllib3.exceptions import DependencyWarning
warnings.filterwarnings("ignore", category=DependencyWarning) # noqa
from pip.exceptions import InstallationError, CommandError, PipError
from pip.utils import get_installed_distributions, get_prog
from pip.utils import deprecation, dist_is_editable
from pip.vcs import git, mercurial, subversion, bazaar # noqa
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.commands import get_summaries, get_similar_commands
from pip.commands import commands_dict
try:
from pip._vendor.requests.packages.urllib3.exceptions import (
InsecureRequestWarning,
)
except ImportError:
from urllib3.exceptions import (
InsecureRequestWarning,
)
# assignment for flake8 to be happy
# This fixes a peculiarity when importing via __import__ - as we are
# initialising the pip module, "from pip import cmdoptions" is recursive
# and appears not to work properly in that situation.
import pip.cmdoptions
cmdoptions = pip.cmdoptions
# The version as used in the setup.py and the docs conf.py
__version__ = "9.0.1"
logger = logging.getLogger(__name__)
# Hide the InsecureRequestWarning from urllib3
warnings.filterwarnings("ignore", category=InsecureRequestWarning)
def autocomplete():
"""Command and option completion for the main option parser (and options)
and its subcommands (and options).
Enable by sourcing one of the completion shell scripts (bash, zsh or fish).
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'PIP_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
current = cwords[cword - 1]
except IndexError:
current = ''
subcommands = [cmd for cmd, summary in get_summaries()]
options = []
# subcommand
try:
subcommand_name = [w for w in cwords if w in subcommands][0]
except IndexError:
subcommand_name = None
parser = create_main_parser()
# subcommand options
if subcommand_name:
# special case: 'help' subcommand has no options
if subcommand_name == 'help':
sys.exit(1)
# special case: list locally installed dists for uninstall command
if subcommand_name == 'uninstall' and not current.startswith('-'):
installed = []
lc = current.lower()
for dist in get_installed_distributions(local_only=True):
if dist.key.startswith(lc) and dist.key not in cwords[1:]:
installed.append(dist.key)
# if there are no dists installed, fall backend to option completion
if installed:
for dist in installed:
print(dist)
sys.exit(1)
subcommand = commands_dict[subcommand_name]()
options += [(opt.get_opt_string(), opt.nargs)
for opt in subcommand.parser.option_list_all
if opt.help != optparse.SUPPRESS_HELP]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [(x, v) for (x, v) in options if x not in prev_opts]
# filter options by current input
options = [(k, v) for k, v in options if k.startswith(current)]
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
else:
# show main parser options only when necessary
if current.startswith('-') or current.startswith('--'):
opts = [i.option_list for i in parser.option_groups]
opts.append(parser.option_list)
opts = (o for it in opts for o in it)
subcommands += [i.get_opt_string() for i in opts
if i.help != optparse.SUPPRESS_HELP]
print(' '.join([x for x in subcommands if x.startswith(current)]))
sys.exit(1)
def create_main_parser():
parser_kw = {
'usage': '\n%prog <command> [options]',
'add_help_option': False,
'formatter': UpdatingDefaultsHelpFormatter(),
'name': 'global',
'prog': get_prog(),
}
parser = ConfigOptionParser(**parser_kw)
parser.disable_interspersed_args()
pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
parser.version = 'pip %s from %s (python %s)' % (
__version__, pip_pkg_dir, sys.version[:3])
# add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
parser.add_option_group(gen_opts)
parser.main = True # so the help formatter knows
# create command listing for description
command_summaries = get_summaries()
description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]
parser.description = '\n'.join(description)
return parser
def parseopts(args):
parser = create_main_parser()
# Note: parser calls disable_interspersed_args(), so the result of this
# call is to split the initial args into the general options before the
# subcommand and everything else.
# For example:
# args: ['--timeout=5', 'install', '--user', 'INITools']
# general_options: ['--timeout==5']
# args_else: ['install', '--user', 'INITools']
general_options, args_else = parser.parse_args(args)
# --version
if general_options.version:
sys.stdout.write(parser.version)
sys.stdout.write(os.linesep)
sys.exit()
# pip || pip help -> print_help()
if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
parser.print_help()
sys.exit()
# the subcommand name
cmd_name = args_else[0]
if cmd_name not in commands_dict:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(' - '.join(msg))
# all the args without the subcommand
cmd_args = args[:]
cmd_args.remove(cmd_name)
return cmd_name, cmd_args
def check_isolated(args):
isolated = False
if "--isolated" in args:
isolated = True
return isolated
def main(args=None):
if args is None:
args = sys.argv[1:]
# Configure our deprecation warnings to be sent through loggers
deprecation.install_warning_logger()
autocomplete()
try:
cmd_name, cmd_args = parseopts(args)
except PipError as exc:
sys.stderr.write("ERROR: %s" % exc)
sys.stderr.write(os.linesep)
sys.exit(1)
# Needed for locale.getpreferredencoding(False) to work
# in pip.utils.encoding.auto_decode
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error as e:
# setlocale can apparently crash if locale are uninitialized
logger.debug("Ignoring error %s when setting locale", e)
command = commands_dict[cmd_name](isolated=check_isolated(cmd_args))
return command.main(cmd_args)
# ###########################################################
# # Writing freeze files
class FrozenRequirement(object):
def __init__(self, name, req, editable, comments=()):
self.name = name
self.req = req
self.editable = editable
self.comments = comments
_rev_re = re.compile(r'-r(\d+)$')
_date_re = re.compile(r'-(20\d\d\d\d\d\d)$')
@classmethod
def from_dist(cls, dist, dependency_links):
location = os.path.normcase(os.path.abspath(dist.location))
comments = []
from pip.vcs import vcs, get_src_requirement
if dist_is_editable(dist) and vcs.get_backend_name(location):
editable = True
try:
req = get_src_requirement(dist, location)
except InstallationError as exc:
logger.warning(
"Error when trying to get requirement for VCS system %s, "
"falling backend to uneditable format", exc
)
req = None
if req is None:
logger.warning(
'Could not determine repository location of %s', location
)
comments.append(
'## !! Could not determine repository location'
)
req = dist.as_requirement()
editable = False
else:
editable = False
req = dist.as_requirement()
specs = req.specs
assert len(specs) == 1 and specs[0][0] in ["==", "==="], \
'Expected 1 spec with == or ===; specs = %r; dist = %r' % \
(specs, dist)
version = specs[0][1]
ver_match = cls._rev_re.search(version)
date_match = cls._date_re.search(version)
if ver_match or date_match:
svn_backend = vcs.get_backend('svn')
if svn_backend:
svn_location = svn_backend().get_location(
dist,
dependency_links,
)
if not svn_location:
logger.warning(
'Warning: cannot find svn location for %s', req)
comments.append(
'## FIXME: could not find svn URL in dependency_links '
'for this package:'
)
else:
comments.append(
'# Installing as editable to satisfy requirement %s:' %
req
)
if ver_match:
rev = ver_match.group(1)
else:
rev = '{%s}' % date_match.group(1)
editable = True
req = '%s@%s#egg=%s' % (
svn_location,
rev,
cls.egg_name(dist)
)
return cls(dist.project_name, req, editable, comments)
@staticmethod
def egg_name(dist):
name = dist.egg_name()
match = re.search(r'-py\d\.\d$', name)
if match:
name = name[:match.start()]
return name
def __str__(self):
req = self.req
if self.editable:
req = '-e %s' % req
return '\n'.join(list(self.comments) + [str(req)]) + '\n'
if __name__ == '__main__':
sys.exit(main())
| 34.050147 | 80 | 0.601923 |
96ed85674c803b839b1d7dee04cac436916d844a | 59,962 | py | Python | src/oci/ocvp/sddc_client.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/ocvp/sddc_client.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/ocvp/sddc_client.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from __future__ import absolute_import
from oci._vendor import requests # noqa: F401
from oci._vendor import six
from oci import retry # noqa: F401
from oci.base_client import BaseClient
from oci.config import get_config_value_or_default, validate_config
from oci.signer import Signer
from oci.util import Sentinel, get_signer_from_authentication_type, AUTHENTICATION_TYPE_FIELD_NAME
from .models import ocvp_type_mapping
missing = Sentinel("Missing")
class SddcClient(object):
"""
Use this API to manage your [Oracle Cloud VMware Solution](/iaas/Content/VMware/Concepts/ocvsoverview.htm).
"""
def __init__(self, config, **kwargs):
"""
Creates a new service client
:param dict config:
Configuration keys and values as per `SDK and Tool Configuration <https://docs.cloud.oracle.com/Content/API/Concepts/sdkconfig.htm>`__.
The :py:meth:`~oci.config.from_file` method can be used to load configuration from a file. Alternatively, a ``dict`` can be passed. You can validate_config
the dict using :py:meth:`~oci.config.validate_config`
:param str service_endpoint: (optional)
The endpoint of the service to call using this client. For example ``https://iaas.us-ashburn-1.oraclecloud.com``. If this keyword argument is
not provided then it will be derived using the region in the config parameter. You should only provide this keyword argument if you have an explicit
need to specify a service endpoint.
:param timeout: (optional)
The connection and read timeouts for the client. The default values are connection timeout 10 seconds and read timeout 60 seconds. This keyword argument can be provided
as a single float, in which case the value provided is used for both the read and connection timeouts, or as a tuple of two floats. If
a tuple is provided then the first value is used as the connection timeout and the second value as the read timeout.
:type timeout: float or tuple(float, float)
:param signer: (optional)
The signer to use when signing requests made by the service client. The default is to use a :py:class:`~oci.signer.Signer` based on the values
provided in the config parameter.
One use case for this parameter is for `Instance Principals authentication <https://docs.cloud.oracle.com/Content/Identity/Tasks/callingservicesfrominstances.htm>`__
by passing an instance of :py:class:`~oci.auth.signers.InstancePrincipalsSecurityTokenSigner` as the value for this keyword argument
:type signer: :py:class:`~oci.signer.AbstractBaseSigner`
:param obj retry_strategy: (optional)
A retry strategy to apply to all calls made by this service client (i.e. at the client level). There is no retry strategy applied by default.
Retry strategies can also be applied at the operation level by passing a ``retry_strategy`` keyword argument as part of calling the operation.
Any value provided at the operation level will override whatever is specified at the client level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
"""
validate_config(config, signer=kwargs.get('signer'))
if 'signer' in kwargs:
signer = kwargs['signer']
elif AUTHENTICATION_TYPE_FIELD_NAME in config:
signer = get_signer_from_authentication_type(config)
else:
signer = Signer(
tenancy=config["tenancy"],
user=config["user"],
fingerprint=config["fingerprint"],
private_key_file_location=config.get("key_file"),
pass_phrase=get_config_value_or_default(config, "pass_phrase"),
private_key_content=config.get("key_content")
)
base_client_init_kwargs = {
'regional_client': True,
'service_endpoint': kwargs.get('service_endpoint'),
'base_path': '/20200501',
'service_endpoint_template': 'https://ocvps.{region}.oci.{secondLevelDomain}',
'skip_deserialization': kwargs.get('skip_deserialization', False)
}
if 'timeout' in kwargs:
base_client_init_kwargs['timeout'] = kwargs.get('timeout')
self.base_client = BaseClient("sddc", config, signer, ocvp_type_mapping, **base_client_init_kwargs)
self.retry_strategy = kwargs.get('retry_strategy')
def cancel_downgrade_hcx(self, sddc_id, **kwargs):
"""
Cancel the pending SDDC downgrade from HCX Enterprise to HCX Advanced
:param str sddc_id: (required)
The `OCID`__ of the SDDC.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations (for example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
may be rejected).
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique identifier for the request. If you need to contact Oracle about a particular
request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/ocvp/cancel_downgrade_hcx.py.html>`__ to see an example of how to use cancel_downgrade_hcx API.
"""
resource_path = "/sddcs/{sddcId}/actions/cancelDowngradeHcx"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"cancel_downgrade_hcx got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"sddcId": sddc_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def change_sddc_compartment(self, sddc_id, change_sddc_compartment_details, **kwargs):
"""
Moves an SDDC into a different compartment within the same tenancy. For information
about moving resources between compartments, see
`Moving Resources to a Different Compartment`__.
__ https://docs.cloud.oracle.com/iaas/Content/Identity/Tasks/managingcompartments.htm#moveRes
:param str sddc_id: (required)
The `OCID`__ of the SDDC.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param oci.ocvp.models.ChangeSddcCompartmentDetails change_sddc_compartment_details: (required)
Request to change the compartment of the specified SDDC
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique identifier for the request. If you need to contact Oracle about a particular
request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations (for example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
may be rejected).
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/ocvp/change_sddc_compartment.py.html>`__ to see an example of how to use change_sddc_compartment API.
"""
resource_path = "/sddcs/{sddcId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_sddc_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"sddcId": sddc_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_sddc_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_sddc_compartment_details)
def create_sddc(self, create_sddc_details, **kwargs):
"""
Creates an Oracle Cloud VMware Solution software-defined data center (SDDC).
Use the :class:`WorkRequest` operations to track the
creation of the SDDC.
**Important:** You must configure the SDDC's networking resources with the security rules detailed in `Security Rules for Oracle Cloud VMware Solution SDDCs`__. Otherwise, provisioning the SDDC will fail. The rules are based on the requirements set by VMware.
__ https://docs.cloud.oracle.com/iaas/Content/VMware/Reference/ocvssecurityrules.htm
:param oci.ocvp.models.CreateSddcDetails create_sddc_details: (required)
Details for the SDDC.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations (for example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
may be rejected).
:param str opc_request_id: (optional)
Unique identifier for the request. If you need to contact Oracle about a particular
request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/ocvp/create_sddc.py.html>`__ to see an example of how to use create_sddc API.
"""
resource_path = "/sddcs"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_sddc got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_sddc_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_sddc_details)
def delete_sddc(self, sddc_id, **kwargs):
"""
Deletes the specified SDDC, along with the other resources that were
created with the SDDC. For example: the Compute instances, DNS records,
and so on.
Use the :class:`WorkRequest` operations to track the
deletion of the SDDC.
:param str sddc_id: (required)
The `OCID`__ of the SDDC.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique identifier for the request. If you need to contact Oracle about a particular
request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/ocvp/delete_sddc.py.html>`__ to see an example of how to use delete_sddc API.
"""
resource_path = "/sddcs/{sddcId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_sddc got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"sddcId": sddc_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def downgrade_hcx(self, downgrade_hcx_details, sddc_id, **kwargs):
"""
Downgrade the specified SDDC from HCX Enterprise to HCX Advanced
:param oci.ocvp.models.DowngradeHcxDetails downgrade_hcx_details: (required)
The HCX on-premise license keys to be reserved when downgrade from HCX Enterprise to HCX Advanced.
:param str sddc_id: (required)
The `OCID`__ of the SDDC.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations (for example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
may be rejected).
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique identifier for the request. If you need to contact Oracle about a particular
request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/ocvp/downgrade_hcx.py.html>`__ to see an example of how to use downgrade_hcx API.
"""
resource_path = "/sddcs/{sddcId}/actions/downgradeHcx"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"downgrade_hcx got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"sddcId": sddc_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=downgrade_hcx_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=downgrade_hcx_details)
def get_sddc(self, sddc_id, **kwargs):
"""
Gets the specified SDDC's information.
:param str sddc_id: (required)
The `OCID`__ of the SDDC.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_request_id: (optional)
Unique identifier for the request. If you need to contact Oracle about a particular
request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.ocvp.models.Sddc`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/ocvp/get_sddc.py.html>`__ to see an example of how to use get_sddc API.
"""
resource_path = "/sddcs/{sddcId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_sddc got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"sddcId": sddc_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Sddc")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Sddc")
def list_sddcs(self, compartment_id, **kwargs):
"""
Lists the SDDCs in the specified compartment. The list can be
filtered by display name or availability domain.
:param str compartment_id: (required)
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str compute_availability_domain: (optional)
The name of the availability domain that the Compute instances are running in.
Example: `Uocm:PHX-AD-1`
:param str display_name: (optional)
A filter to return only resources that match the given display name exactly.
:param int limit: (optional)
For list pagination. The maximum number of results per page, or items to return in a paginated
\"List\" call. For important details about how pagination works, see
`List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str page: (optional)
For list pagination. The value of the `opc-next-page` response header from the previous \"List\"
call. For important details about how pagination works, see
`List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str sort_order: (optional)
The sort order to use, either ascending (`ASC`) or descending (`DESC`). The DISPLAYNAME sort order
is case sensitive.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort by. You can provide one sort order (`sortOrder`). Default order for
TIMECREATED is descending. Default order for DISPLAYNAME is ascending. The DISPLAYNAME
sort order is case sensitive.
**Note:** In general, some \"List\" operations (for example, `ListInstances`) let you
optionally filter by availability domain if the scope of the resource type is within a
single availability domain. If you call one of these \"List\" operations without specifying
an availability domain, the resources are grouped by availability domain, then sorted.
Allowed values are: "timeCreated", "displayName"
:param str opc_request_id: (optional)
Unique identifier for the request. If you need to contact Oracle about a particular
request, please provide the request ID.
:param str lifecycle_state: (optional)
The lifecycle state of the resource.
Allowed values are: "CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.ocvp.models.SddcCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/ocvp/list_sddcs.py.html>`__ to see an example of how to use list_sddcs API.
"""
resource_path = "/sddcs"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"compute_availability_domain",
"display_name",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id",
"lifecycle_state"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_sddcs got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["timeCreated", "displayName"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"computeAvailabilityDomain": kwargs.get("compute_availability_domain", missing),
"displayName": kwargs.get("display_name", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="SddcCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="SddcCollection")
def list_supported_skus(self, compartment_id, **kwargs):
"""
Lists supported SKUs. Oracle Cloud Infrastructure VMware Solution supports the following billing interval SKUs:
HOUR, MONTH, ONE_YEAR, and THREE_YEARS.
:param str compartment_id: (required)
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param int limit: (optional)
For list pagination. The maximum number of results per page, or items to return in a paginated
\"List\" call. For important details about how pagination works, see
`List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str page: (optional)
For list pagination. The value of the `opc-next-page` response header from the previous \"List\"
call. For important details about how pagination works, see
`List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str opc_request_id: (optional)
Unique identifier for the request. If you need to contact Oracle about a particular
request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.ocvp.models.SupportedSkuSummaryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/ocvp/list_supported_skus.py.html>`__ to see an example of how to use list_supported_skus API.
"""
resource_path = "/supportedSkus"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"limit",
"page",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_supported_skus got unknown kwargs: {!r}".format(extra_kwargs))
query_params = {
"compartmentId": compartment_id,
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="SupportedSkuSummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="SupportedSkuSummaryCollection")
def list_supported_vmware_software_versions(self, compartment_id, **kwargs):
"""
Lists the versions of bundled VMware software supported by the Oracle Cloud
VMware Solution.
:param str compartment_id: (required)
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param int limit: (optional)
For list pagination. The maximum number of results per page, or items to return in a paginated
\"List\" call. For important details about how pagination works, see
`List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str page: (optional)
For list pagination. The value of the `opc-next-page` response header from the previous \"List\"
call. For important details about how pagination works, see
`List Pagination`__.
__ https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine
:param str opc_request_id: (optional)
Unique identifier for the request. If you need to contact Oracle about a particular
request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.ocvp.models.SupportedVmwareSoftwareVersionCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/ocvp/list_supported_vmware_software_versions.py.html>`__ to see an example of how to use list_supported_vmware_software_versions API.
"""
resource_path = "/supportedVmwareSoftwareVersions"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"limit",
"page",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_supported_vmware_software_versions got unknown kwargs: {!r}".format(extra_kwargs))
query_params = {
"compartmentId": compartment_id,
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="SupportedVmwareSoftwareVersionCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="SupportedVmwareSoftwareVersionCollection")
def refresh_hcx_license_status(self, sddc_id, **kwargs):
"""
Refresh HCX on-premise licenses status of the specified SDDC.
:param str sddc_id: (required)
The `OCID`__ of the SDDC.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations (for example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
may be rejected).
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique identifier for the request. If you need to contact Oracle about a particular
request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/ocvp/refresh_hcx_license_status.py.html>`__ to see an example of how to use refresh_hcx_license_status API.
"""
resource_path = "/sddcs/{sddcId}/actions/refreshHcxLicenses"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"refresh_hcx_license_status got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"sddcId": sddc_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def update_sddc(self, sddc_id, update_sddc_details, **kwargs):
"""
Updates the specified SDDC.
**Important:** Updating an SDDC affects only certain attributes in the `Sddc`
object and does not affect the VMware environment currently running in
the SDDC. For more information, see
:func:`update_sddc_details`.
:param str sddc_id: (required)
The `OCID`__ of the SDDC.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param oci.ocvp.models.UpdateSddcDetails update_sddc_details: (required)
The information to be updated.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique identifier for the request. If you need to contact Oracle about a particular
request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.ocvp.models.Sddc`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/ocvp/update_sddc.py.html>`__ to see an example of how to use update_sddc API.
"""
resource_path = "/sddcs/{sddcId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_sddc got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"sddcId": sddc_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_sddc_details,
response_type="Sddc")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_sddc_details,
response_type="Sddc")
def upgrade_hcx(self, sddc_id, **kwargs):
"""
Upgrade the specified SDDC from HCX Advanced to HCX Enterprise.
:param str sddc_id: (required)
The `OCID`__ of the SDDC.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations (for example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
may be rejected).
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique identifier for the request. If you need to contact Oracle about a particular
request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/ocvp/upgrade_hcx.py.html>`__ to see an example of how to use upgrade_hcx API.
"""
resource_path = "/sddcs/{sddcId}/actions/upgradeHcx"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"upgrade_hcx got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"sddcId": sddc_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
| 47.325967 | 267 | 0.641823 |
7c30fa8381d8ea6cfd04719d9e99b071408dfef5 | 721 | py | Python | app/common/forms.py | MalisPierre/django_docker | 9cbd413b898f88fc0a91908a1ba255b197cae249 | [
"MIT"
] | null | null | null | app/common/forms.py | MalisPierre/django_docker | 9cbd413b898f88fc0a91908a1ba255b197cae249 | [
"MIT"
] | null | null | null | app/common/forms.py | MalisPierre/django_docker | 9cbd413b898f88fc0a91908a1ba255b197cae249 | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth.models import User
class UserConnectionForm(forms.Form):
pseudo = forms.CharField(label='Nom du compte')
password = forms.CharField(widget=forms.PasswordInput())
class UserCreationForm(forms.Form):
email = forms.CharField(label='email')
pseudo = forms.CharField(label='Nom du compte')
password = forms.CharField(widget=forms.PasswordInput())
class ChangePasswordForm(forms.Form):
password = forms.CharField(widget=forms.PasswordInput())
class ChangeEmailForm(forms.Form):
email = forms.CharField(label='email')
class UserConnectionModelForm(forms.ModelForm):
class Meta:
model = User
fields = ['username', 'password'] | 31.347826 | 60 | 0.736477 |
f10edbf72d06600f6ed53dd193bcad3066bba48f | 3,898 | py | Python | setup.py | JWDebelius/qiita | 3378e0fabe40a846691600e5de4fb72a3db70dd1 | [
"BSD-3-Clause"
] | null | null | null | setup.py | JWDebelius/qiita | 3378e0fabe40a846691600e5de4fb72a3db70dd1 | [
"BSD-3-Clause"
] | null | null | null | setup.py | JWDebelius/qiita | 3378e0fabe40a846691600e5de4fb72a3db70dd1 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -----------------------------------------------------------------------------
# Copyright (c) 2013, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from setuptools import setup
from glob import glob
__version__ = "0.0.1-dev"
classes = """
Development Status :: 2 - Pre-Alpha
License :: OSI Approved :: BSD License
Topic :: Scientific/Engineering :: Bio-Informatics
Topic :: Software Development :: Libraries :: Application Frameworks
Topic :: Software Development :: Libraries :: Python Modules
Programming Language :: Python
Programming Language :: Python :: 2.7
Programming Language :: Python :: Implementation :: CPython
Operating System :: POSIX :: Linux
Operating System :: MacOS :: MacOS X
"""
long_description = """Qiita: Spot Patterns"""
classifiers = [s.strip() for s in classes.split('\n') if s]
setup(name='qiita-spots',
version=__version__,
long_description=long_description,
license="BSD",
description='Qiita: Spot Patterns',
author="Qiita development team",
author_email="mcdonadt@colorado.edu",
url='https://github.com/biocore/qiita',
test_suite='nose.collector',
packages=['qiita_core',
'qiita_db',
'qiita_pet',
'qiita_pet/handlers',
'qiita_ware'
],
package_data={'qiita_core': ['support_files/config_test.txt'],
'qiita_db': [
'support_files/*.sql',
'support_files/patches/*.sql',
'support_files/patches/python_patches/*.py',
'support_files/test_data/preprocessed_data/*',
'support_files/test_data/processed_data/*',
'support_files/test_data/raw_data/*',
'support_files/test_data/analysis/*',
'support_files/test_data/reference/*',
'support_files/test_data/job/*.txt',
'support_files/test_data/job/2_test_folder/*',
'support_files/test_data/uploads/1/a_folder/*.txt',
'support_files/test_data/uploads/1/.hidden_file.txt',
'support_files/test_data/uploads/1/uploaded_file.txt',
'support_files/test_data/templates/*',
'support_files/work_data/*'],
'qiita_pet': [
'static/css/*.css', 'static/img/*.png',
'static/img/*.gif', 'static/img/*.ico',
'static/js/*.js', 'static/vendor/css/*.css',
'static/vendor/css/images/*.png',
'static/vendor/fonts/glyphicons*.*',
'static/vendor/images/*.png',
'static/vendor/js/*.js',
'results/admin/jobname/*.html',
'templates/*.html']},
scripts=glob('scripts/*'),
extras_require={'test': ["nose >= 0.10.1", "pep8", 'mock'],
'doc': ["Sphinx >= 1.2.2", "sphinx-bootstrap-theme"]},
install_requires=['psycopg2', 'click == 1.0', 'future==0.13.0',
'bcrypt', 'pandas >= 0.15', 'numpy >= 1.7',
'tornado==3.1.1', 'toredis', 'redis',
'ipython[all] >= 2.4.1, < 2.5', 'pyparsing',
'h5py >= 2.3.1', 'biom-format', 'natsort', 'networkx',
'scikit-bio >= 0.2.3, < 0.3.0', 'wtforms == 2.0.1',
'qiime >= 1.9.0, < 1.10.0'],
classifiers=classifiers
)
| 44.804598 | 79 | 0.502309 |
21f554b7431f4960095c54accf38a0772caee45e | 2,411 | py | Python | openpyxlzip/worksheet/tests/test_protection.py | ankitJoshi03/openpyxlzip | f3b8aa2f80f9d8bc31ce5fcf05c822d88d2ff647 | [
"MIT"
] | null | null | null | openpyxlzip/worksheet/tests/test_protection.py | ankitJoshi03/openpyxlzip | f3b8aa2f80f9d8bc31ce5fcf05c822d88d2ff647 | [
"MIT"
] | null | null | null | openpyxlzip/worksheet/tests/test_protection.py | ankitJoshi03/openpyxlzip | f3b8aa2f80f9d8bc31ce5fcf05c822d88d2ff647 | [
"MIT"
] | null | null | null | # Copyright (c) 2010-2020 openpyxlzip
import pytest
from openpyxlzip.tests.helper import compare_xml
from openpyxlzip.xml.functions import tostring, fromstring
@pytest.fixture
def SheetProtection():
from ..protection import SheetProtection
return SheetProtection
class TestSheetProtection:
def test_ctor(self, SheetProtection):
prot = SheetProtection()
xml = tostring(prot.to_tree())
expected = """
<sheetProtection
autoFilter="1" deleteColumns="1" deleteRows="1" formatCells="1"
formatColumns="1" formatRows="1" insertColumns="1" insertHyperlinks="1"
insertRows="1" objects="0" pivotTables="1" scenarios="0"
selectLockedCells="0" selectUnlockedCells="0" sheet="0" sort="1" />
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_other_algorithm(self, SheetProtection):
expected = """
<sheetProtection algorithmName="SHA-512"
hashValue="if3R9NkPcYybPSvhGnDay3dHdlEpnDplQxMFdS6pcOsTx8mvOHMJvO/43khiN7blBWLyRrcQYcHq+ksgEjsFEw=="
saltValue="XuCDcUHMeBxDIehjhnxRuw==" spinCount="100000" sheet="1" objects="1" scenarios="1"
formatCells="0" formatColumns="0" formatRows="0" insertRows="0" deleteColumns="0" sort="1" insertColumns="1"
insertHyperlinks="1" autoFilter="1" deleteRows="0" pivotTables="1" selectUnlockedCells="1"
selectLockedCells="1"/>
"""
node = fromstring(expected)
prot = SheetProtection.from_tree(node)
xml = tostring(prot.to_tree())
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_bool(self, SheetProtection):
prot = SheetProtection()
assert bool(prot) is False
prot.enable()
assert bool(prot) is True
def test_ctor_with_password(SheetProtection):
prot = SheetProtection(password="secret")
assert prot.password == "DAA7"
@pytest.mark.parametrize("password, already_hashed, value",
[
('secret', False, 'DAA7'),
('secret', True, 'secret'),
])
def test_explicit_password(SheetProtection, password, already_hashed, value):
prot = SheetProtection()
prot.set_password(password, already_hashed)
assert prot.password == value
assert prot.sheet == True
| 34.442857 | 117 | 0.647864 |
4697e932fabfda31167bdf81b3896aabbdb6af68 | 53 | py | Python | atlas/testing/acceptance/mixins/run_process.py | DeepLearnI/atlas | 8aca652d7e647b4e88530b93e265b536de7055ed | [
"Apache-2.0"
] | 296 | 2020-03-16T19:55:00.000Z | 2022-01-10T19:46:05.000Z | atlas/testing/acceptance/mixins/run_process.py | DeepLearnI/atlas | 8aca652d7e647b4e88530b93e265b536de7055ed | [
"Apache-2.0"
] | 57 | 2020-03-17T11:15:57.000Z | 2021-07-10T14:42:27.000Z | atlas/testing/acceptance/mixins/run_process.py | DeepLearnI/atlas | 8aca652d7e647b4e88530b93e265b536de7055ed | [
"Apache-2.0"
] | 38 | 2020-03-17T21:06:05.000Z | 2022-02-08T03:19:34.000Z |
from foundations_spec.extensions import run_process | 17.666667 | 51 | 0.886792 |
005be7063523ffbb4907ee081acec486b57bf09b | 8,367 | py | Python | google/cloud/dialogflowcx_v3/services/experiments/transports/base.py | galz10/python-dialogflow-cx | e24bdfd499952199dfbdaa5634061653da8ae1db | [
"Apache-2.0"
] | null | null | null | google/cloud/dialogflowcx_v3/services/experiments/transports/base.py | galz10/python-dialogflow-cx | e24bdfd499952199dfbdaa5634061653da8ae1db | [
"Apache-2.0"
] | null | null | null | google/cloud/dialogflowcx_v3/services/experiments/transports/base.py | galz10/python-dialogflow-cx | e24bdfd499952199dfbdaa5634061653da8ae1db | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.dialogflowcx_v3.types import experiment
from google.cloud.dialogflowcx_v3.types import experiment as gcdc_experiment
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflowcx",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class ExperimentsTransport(abc.ABC):
"""Abstract transport class for Experiments."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
)
DEFAULT_HOST: str = "dialogflow.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_experiments: gapic_v1.method.wrap_method(
self.list_experiments, default_timeout=None, client_info=client_info,
),
self.get_experiment: gapic_v1.method.wrap_method(
self.get_experiment, default_timeout=None, client_info=client_info,
),
self.create_experiment: gapic_v1.method.wrap_method(
self.create_experiment, default_timeout=None, client_info=client_info,
),
self.update_experiment: gapic_v1.method.wrap_method(
self.update_experiment, default_timeout=None, client_info=client_info,
),
self.delete_experiment: gapic_v1.method.wrap_method(
self.delete_experiment, default_timeout=None, client_info=client_info,
),
self.start_experiment: gapic_v1.method.wrap_method(
self.start_experiment, default_timeout=None, client_info=client_info,
),
self.stop_experiment: gapic_v1.method.wrap_method(
self.stop_experiment, default_timeout=None, client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def list_experiments(
self,
) -> Callable[
[experiment.ListExperimentsRequest],
Union[
experiment.ListExperimentsResponse,
Awaitable[experiment.ListExperimentsResponse],
],
]:
raise NotImplementedError()
@property
def get_experiment(
self,
) -> Callable[
[experiment.GetExperimentRequest],
Union[experiment.Experiment, Awaitable[experiment.Experiment]],
]:
raise NotImplementedError()
@property
def create_experiment(
self,
) -> Callable[
[gcdc_experiment.CreateExperimentRequest],
Union[gcdc_experiment.Experiment, Awaitable[gcdc_experiment.Experiment]],
]:
raise NotImplementedError()
@property
def update_experiment(
self,
) -> Callable[
[gcdc_experiment.UpdateExperimentRequest],
Union[gcdc_experiment.Experiment, Awaitable[gcdc_experiment.Experiment]],
]:
raise NotImplementedError()
@property
def delete_experiment(
self,
) -> Callable[
[experiment.DeleteExperimentRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def start_experiment(
self,
) -> Callable[
[experiment.StartExperimentRequest],
Union[experiment.Experiment, Awaitable[experiment.Experiment]],
]:
raise NotImplementedError()
@property
def stop_experiment(
self,
) -> Callable[
[experiment.StopExperimentRequest],
Union[experiment.Experiment, Awaitable[experiment.Experiment]],
]:
raise NotImplementedError()
__all__ = ("ExperimentsTransport",)
| 36.537118 | 101 | 0.657225 |
305b46b49e1bd63c70736f60b2c4f5924c497d72 | 679 | py | Python | punchline/music/forms.py | ychab/punchline | 0054fd1a0c588173c9963714477b012579c13051 | [
"MIT"
] | null | null | null | punchline/music/forms.py | ychab/punchline | 0054fd1a0c588173c9963714477b012579c13051 | [
"MIT"
] | null | null | null | punchline/music/forms.py | ychab/punchline | 0054fd1a0c588173c9963714477b012579c13051 | [
"MIT"
] | null | null | null | from django import forms
from dal import autocomplete
from .models import Punchline, Song
class SongAdminForm(forms.ModelForm):
class Meta:
model = Song
fields = '__all__'
widgets = {
'album': autocomplete.ModelSelect2(url='admin:album-autocomplete'),
}
class PunchlineAdminForm(forms.ModelForm):
class Meta:
model = Punchline
fields = '__all__'
widgets = {
'artist': autocomplete.ModelSelect2(url='admin:artist-autocomplete'),
'song': autocomplete.ModelSelect2(
url='admin:song-autocomplete',
forward=['artist'],
),
}
| 22.633333 | 81 | 0.589102 |
d40e8235dcca93ae965ddfe6ca8a2306db1cb7db | 288 | py | Python | neuralnetwork/code/simulations/__init__.py | realwsq/MuscleSpindleCircuitsModel | 4418b180559ea1464ee6139161af7e6bf3762c50 | [
"MIT"
] | 5 | 2018-11-13T15:22:14.000Z | 2022-03-23T17:00:38.000Z | neuralnetwork/code/simulations/__init__.py | realwsq/MuscleSpindleCircuitsModel | 4418b180559ea1464ee6139161af7e6bf3762c50 | [
"MIT"
] | null | null | null | neuralnetwork/code/simulations/__init__.py | realwsq/MuscleSpindleCircuitsModel | 4418b180559ea1464ee6139161af7e6bf3762c50 | [
"MIT"
] | 1 | 2020-05-28T15:40:11.000Z | 2020-05-28T15:40:11.000Z | from Simulation import Simulation
from CellsRecording import CellsRecording
from ForwardSimulation import ForwardSimulation
from ForSimMuscleSpindles import ForSimMuscleSpindles
from ForSimSpinalModulation import ForSimSpinalModulation
from CollisionEesNatural import CollisionEesNatural
| 41.142857 | 57 | 0.916667 |
182aa18308fa2a5527008e23a16ed5629d2b4c72 | 3,405 | py | Python | fboss/py/fboss/thrift_clients.py | fakeNetflix/facebook-repo-fboss | b4b64540c779022bcbeff72376549d0addf3f7e7 | [
"BSD-3-Clause"
] | null | null | null | fboss/py/fboss/thrift_clients.py | fakeNetflix/facebook-repo-fboss | b4b64540c779022bcbeff72376549d0addf3f7e7 | [
"BSD-3-Clause"
] | null | null | null | fboss/py/fboss/thrift_clients.py | fakeNetflix/facebook-repo-fboss | b4b64540c779022bcbeff72376549d0addf3f7e7 | [
"BSD-3-Clause"
] | null | null | null | #
# Copyright (c) 2004-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from neteng.fboss.ctrl import FbossCtrl
from neteng.fboss.qsfp import QsfpService
from neteng.fboss.pcap_pubsub import PcapPushSubscriber
from thrift.protocol.THeaderProtocol import THeaderProtocol
from thrift.transport.THeaderTransport import THeaderTransport
from thrift.transport.TSocket import TSocket
#################
# This class wraps the autogenerated thrift class.
# All of the functions in fboss/agent/if/ctrl.thrift get inherited into
# here.
#
# For example:
# with PlainTextFbossAgentClientDontUseInFb(host) as client:
# print(client.getAllPortInfo()) # dump all of the port stats
class PlainTextFbossAgentClientDontUseInFb(FbossCtrl.Client):
DEFAULT_PORT = 5909
def __init__(self, host, port=None, timeout=5.0):
self.host = host
if port is None:
port = self.DEFAULT_PORT
self._socket = TSocket(host, port)
# TSocket.setTimeout() takes a value in milliseconds
self._socket.setTimeout(timeout * 1000)
self._transport = THeaderTransport(self._socket)
self._protocol = THeaderProtocol(self._transport)
self._transport.open()
FbossCtrl.Client.__init__(self, self._protocol)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self._transport.close()
class PcapPushSubClient(PcapPushSubscriber.Client):
DEFAULT_PORT = 5911
def __init__(self, host, port=None, timeout=5.0):
self.host = host
if port is None:
port = self.DEFAULT_PORT
self._socket = TSocket(host, port)
# TSocket.setTimeout() takes a value in milliseconds
self._socket.setTimeout(timeout * 1000)
self._transport = THeaderTransport(self._socket)
self._protocol = THeaderProtocol(self._transport)
self._transport.open()
PcapPushSubscriber.Client.__init__(self, self._protocol)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self._transport.close()
class QsfpServiceClient(QsfpService.Client):
DEFAULT_PORT = 5910
DEFAULT_TIMEOUT = 10.0
# we ignore the value of port
def __init__(self, host, port=None, timeout=None):
# In a box with all 32 QSFP ports populated, it takes about 7.5s right
# now to read all 32 QSFP ports. So, put the defaut timeout to 10s.
self.host = host
timeout = timeout or self.DEFAULT_TIMEOUT
self._socket = TSocket(host, self.DEFAULT_PORT)
# TSocket.setTimeout() takes a value in milliseconds
self._socket.setTimeout(timeout * 1000)
self._transport = THeaderTransport(self._socket)
self._protocol = THeaderProtocol(self._transport)
self._transport.open()
QsfpService.Client.__init__(self, self._protocol)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self._transport.close()
| 32.740385 | 78 | 0.703377 |
066f7a4ce791cf844d6e0124085c72aa0bfb6d1b | 2,085 | py | Python | ppdet/modeling/architectures/deepsort.py | leakyH/PaddleDetection | aa15eb945711baf248177a02d4d3dd3bd3abc4e8 | [
"Apache-2.0"
] | 7,782 | 2019-10-25T09:39:37.000Z | 2022-03-31T13:44:14.000Z | ppdet/modeling/architectures/deepsort.py | leakyH/PaddleDetection | aa15eb945711baf248177a02d4d3dd3bd3abc4e8 | [
"Apache-2.0"
] | 3,499 | 2019-10-29T12:37:40.000Z | 2022-03-31T14:51:56.000Z | ppdet/modeling/architectures/deepsort.py | leakyH/PaddleDetection | aa15eb945711baf248177a02d4d3dd3bd3abc4e8 | [
"Apache-2.0"
] | 1,874 | 2019-10-28T04:21:58.000Z | 2022-03-31T05:41:21.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
from ppdet.core.workspace import register, create
from .meta_arch import BaseArch
from ppdet.modeling.mot.utils import Detection, get_crops, scale_coords, clip_box
__all__ = ['DeepSORT']
@register
class DeepSORT(BaseArch):
"""
DeepSORT network, see https://arxiv.org/abs/1703.07402
Args:
detector (object): detector model instance
reid (object): reid model instance
tracker (object): tracker instance
"""
__category__ = 'architecture'
def __init__(self,
detector='YOLOv3',
reid='PCBPyramid',
tracker='DeepSORTTracker'):
super(DeepSORT, self).__init__()
self.detector = detector
self.reid = reid
self.tracker = tracker
@classmethod
def from_config(cls, cfg, *args, **kwargs):
if cfg['detector'] != 'None':
detector = create(cfg['detector'])
else:
detector = None
reid = create(cfg['reid'])
tracker = create(cfg['tracker'])
return {
"detector": detector,
"reid": reid,
"tracker": tracker,
}
def _forward(self):
crops = self.inputs['crops']
features = self.reid(crops)
return features
def get_pred(self):
return self._forward()
| 29.785714 | 81 | 0.644604 |
650271b415616a2a73af7e25a82b003a2c3203c0 | 4,977 | py | Python | cuppy_sensors_actuators/sensors/Sensors.py | adinaborta/Cuppy | e6099e345e8b3fa4d25fde1e5a6485c6558922e5 | [
"Unlicense"
] | null | null | null | cuppy_sensors_actuators/sensors/Sensors.py | adinaborta/Cuppy | e6099e345e8b3fa4d25fde1e5a6485c6558922e5 | [
"Unlicense"
] | null | null | null | cuppy_sensors_actuators/sensors/Sensors.py | adinaborta/Cuppy | e6099e345e8b3fa4d25fde1e5a6485c6558922e5 | [
"Unlicense"
] | null | null | null | import time
import os
import uuid
from cuppy.settings import MQTT_PORT, MQTT_BROKER_URL, DEBUG, BASE_DIR, MQTT_DEBUG_PRINTS
from cuppy.cuppy.models import MQTTSensorActuatorClient, MQTTCentralClient
from background_task import background
"""Background tasks.
The sensors and actuators and everythign related to mqtt, need to be run in parallel
and as far away from the WSGI side of things as possible.
See django-background-tasks https://django-background-tasks.readthedocs.io/en/latest/
"""
# def read_should_stop(sensor):
# current_value = 0
# try:
# with open(os.path.join(BASE_DIR, "cuppy_sensors_actuators/sensors/stop_" + sensor.value_file), 'r') as f:
# try:
# current_value = int(f.read().strip())
# except:
# current_value = 0
# except:
# with open(os.path.join(BASE_DIR, "cuppy_sensors_actuators/sensors/stop_" + sensor.value_file), 'w') as f:
# f.write("0")
# return current_value
# def write_should_stop(sensor, value):
# with open(os.path.join(BASE_DIR, "cuppy_sensors_actuators/sensors/stop_" + sensor.value_file), 'w') as f:
# f.write(str(value))
class MQTTSensor:
def __init__(self, p_mqtt_client) -> None:
self.mqtt_client = p_mqtt_client
self.sensor_file = p_mqtt_client.value_file
def get_central_client() -> MQTTCentralClient:
central_client = MQTTCentralClient.objects.all().first()
if central_client is None:
central_client = MQTTCentralClient()
central_client.save()
return central_client
def connect_mqtt(self):
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("Connected to MQTT Broker!")
else:
print("Failed to connect, return code %d\n", rc)
self.mqtt_client.mqtt_client.on_connect = on_connect
self.mqtt_client.mqtt_client.connect_async(MQTT_BROKER_URL, MQTT_PORT)
def publish(self):
should_stop = self.mqtt_client.should_stop_loop
while True and not should_stop:
time.sleep(2)
try:
with open(os.path.join(BASE_DIR, "cuppy_sensors_actuators/sensors/" + self.sensor_file), 'r') as f:
try:
current_value = float(f.read().strip())
except:
current_value = 0.0
except:
with open(os.path.join(BASE_DIR, "cuppy_sensors_actuators/sensors/" + self.sensor_file), 'w') as f:
f.write("")
## Write 0 in that file if you want it to continue, 1 if you want it to stop
self.mqtt_client.refresh_from_db()
should_stop = self.mqtt_client.should_stop_loop
# print(should_stop)
result = self.mqtt_client.mqtt_client.publish(self.mqtt_client.topic, current_value)
# result: [0, 1]
status = result[0]
if DEBUG and MQTT_DEBUG_PRINTS:
if status == 0:
print(f"Sent `{current_value}` to topic `{self.mqtt_client.topic}`")
else:
print(f"Failed to send message to topic {self.mqtt_client.topic}")
# Finished. Exit.
if DEBUG:
print("Stopped sensors")
self.mqtt_client.mqtt_client.loop_stop()
if DEBUG:
def on_disconnect(client, userdata, rc):
if rc == 0:
print("Disconnected to MQTT Broker!")
else:
print("Failed to disconnect, return code %d\n", rc)
self.mqtt_client.mqtt_client.on_disconnect = on_disconnect
self.mqtt_client.mqtt_client.disconnect()
@background(schedule=0)
def run(client_id):
"""
Run this method to connect to the broker and start sending data
"""
try:
mqtt_client = MQTTSensorActuatorClient.objects.get(client_id=uuid.UUID(client_id))
except:
# ermmmmm wtf happened????
return
mqtt_sensor = MQTTSensor(mqtt_client)
mqtt_sensor.mqtt_client.mqtt_client.loop_start()
mqtt_sensor.connect_mqtt()
mqtt_sensor.publish()
# if __name__ == "__main__":
# run()
@background(schedule=0)
def start_all_sensors():
if DEBUG:
print("started sensors")
sensors = MQTTSensorActuatorClient.objects.filter(is_actuator=False)
for s in sensors:
s.should_stop_loop = False
s.save()
s.refresh_from_db()
MQTTSensor.run(str(s.client_id))
if DEBUG:
print(s.topic)
@background(schedule=0)
def stop_all_sensors():
if DEBUG:
print("Stopping sensors")
sensors = MQTTSensorActuatorClient.objects.filter(is_actuator=False)
for s in sensors:
s.should_stop_loop = True
s.save()
s.refresh_from_db() | 34.089041 | 115 | 0.609001 |
7e29c0cc2e31f83354af9d3052d2653538febf4f | 334 | py | Python | code-everyday-challenge/n69_money_change.py | ved93/deliberate-practice-challenges | 2fccdbb9d2baaa16f888055c081a8d04804c0045 | [
"MIT"
] | null | null | null | code-everyday-challenge/n69_money_change.py | ved93/deliberate-practice-challenges | 2fccdbb9d2baaa16f888055c081a8d04804c0045 | [
"MIT"
] | null | null | null | code-everyday-challenge/n69_money_change.py | ved93/deliberate-practice-challenges | 2fccdbb9d2baaa16f888055c081a8d04804c0045 | [
"MIT"
] | null | null | null |
#the min coins with denom 1,5, 10 that changes m
def main(m,coins):
result = []
while m > 0:
max_coins=max(c for c in coins if c <= m)
m-=max_coins
result.append(max_coins)
return '+'.join(map(str, result))
if __name__ == "__main__":
coins = [1,5,10]
m = 28
print(main(m, coins)) | 18.555556 | 49 | 0.562874 |
ac1685a4d3588bab1d4d83deadc07d0f4d21d78f | 1,973 | py | Python | gui_app/utils/BaseimageUtil.py | cloudconductor/cloud_conductor_gui | 8929815adfc6442daed5d320bfb7942d6917c3ea | [
"Apache-2.0"
] | null | null | null | gui_app/utils/BaseimageUtil.py | cloudconductor/cloud_conductor_gui | 8929815adfc6442daed5d320bfb7942d6917c3ea | [
"Apache-2.0"
] | null | null | null | gui_app/utils/BaseimageUtil.py | cloudconductor/cloud_conductor_gui | 8929815adfc6442daed5d320bfb7942d6917c3ea | [
"Apache-2.0"
] | null | null | null | from ..utils import ApiUtil
from ..utils import StringUtil
from ..utils.ApiUtil import Url
def get_baseimege_list(code, token, id=None):
if StringUtil.isEmpty(code):
return None
if StringUtil.isEmpty(token):
return None
url = Url.baseImageList
data = {
'auth_token': token,
'cloud_id': id
}
list = ApiUtil.requestGet(url, code, data)
return list
def create_baseimage(code, token, form):
if StringUtil.isEmpty(token):
return None
if StringUtil.isEmpty(form):
return None
# -- URL set
url = Url.baseImageCreate
# -- Set the value to the form
data = put_baseimage(token, form)
# -- API call, get a response
response = ApiUtil.requestPost(url, code, data)
return response
def get_baseimage_detail(code, token, id):
if StringUtil.isEmpty(code):
return None
if StringUtil.isEmpty(token):
return None
if StringUtil.isEmpty(id):
return None
url = Url.baseImageDetail(id, Url.url)
data = {
'auth_token': token,
'id': id,
}
baseimage = ApiUtil.requestGet(url, code, data)
return StringUtil.deleteNullDict(baseimage)
def put_baseimage(token, form):
data = {}
# -- Set the value to the form
data = {
'auth_token': token,
'cloud_id': form.get('cloud_id', ''),
'ssh_username': form.get('ssh_username', ''),
'source_image': form.get('source_image', ''),
'platform': form.get('platform', ''),
'platform_version': form.get('platform_version', '')
}
return data
def delete_baseimage(code, token, id):
if StringUtil.isEmpty(id):
return None
if StringUtil.isEmpty(token):
return None
# -- URL set
url = Url.baseImageDelete(id, Url.url)
# -- Set the value to the form
data = {'auth_token': token}
# -- API call, get a response
ApiUtil.requestDelete(url, code, data)
| 21.215054 | 60 | 0.613786 |
8f1e33768defc5587a34945e15bb9e0cfeaeaab2 | 34,549 | py | Python | benchmark/common.py | Semanti1/smarts_baseline | 77dbc350f37ae7735c74a2b8f1585c2818ac3421 | [
"MIT"
] | null | null | null | benchmark/common.py | Semanti1/smarts_baseline | 77dbc350f37ae7735c74a2b8f1585c2818ac3421 | [
"MIT"
] | null | null | null | benchmark/common.py | Semanti1/smarts_baseline | 77dbc350f37ae7735c74a2b8f1585c2818ac3421 | [
"MIT"
] | null | null | null | # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import math
import gym
import cv2
import numpy as np
from typing import Dict, Sequence
from collections import defaultdict
from ray.rllib.agents.callbacks import DefaultCallbacks
from ray.rllib.evaluation import MultiAgentEpisode, RolloutWorker
from ray.rllib.env import BaseEnv
from ray.rllib.policy import Policy
from ray import logger
from smarts.core.sensors import Observation
from smarts.core.utils.math import vec_2d
from smarts.core.controllers import ActionSpaceType
from smarts.core.scenario import PositionalGoal
SPACE_LIB = dict(
# normalized distance to lane center
distance_to_center=lambda _: gym.spaces.Box(
low=-float("inf"), high=float("inf"), shape=(1,)
),
heading_errors=lambda look: gym.spaces.Box(
low=-float("inf"), high=float("inf"), shape=(look[0],),
),
speed=lambda _: gym.spaces.Box(low=-float("inf"), high=float("inf"), shape=(1,)),
steering=lambda _: gym.spaces.Box(low=-float("inf"), high=float("inf"), shape=(1,)),
goal_relative_pos=lambda _: gym.spaces.Box(
low=-float("inf"), high=float("inf"), shape=(2,)
),
neighbor=lambda neighbor_num: gym.spaces.Box(
low=-float("inf"), high=float("inf"), shape=(neighbor_num * 5,),
),
img_gray=lambda shape: gym.spaces.Box(
low=-float("inf"), high=float("inf"), shape=shape
),
lane_its_info=lambda _: gym.spaces.Box(
low=-float("inf"), high=float("inf"), shape=(16,)
),
# To discover micro information around ego car in 16*16m ogm
# proximity array around ego car
proximity=lambda _: gym.spaces.Box(low=-1e10, high=1e10, shape=(8,)),
)
def _cal_angle(vec):
if vec[1] < 0:
base_angle = math.pi
base_vec = np.array([-1.0, 0.0])
else:
base_angle = 0.0
base_vec = np.array([1.0, 0.0])
cos = vec.dot(base_vec) / np.sqrt(vec.dot(vec) + base_vec.dot(base_vec))
angle = math.acos(cos)
return angle + base_angle
def _get_closest_vehicles(ego, neighbor_vehicles, n):
ego_pos = ego.position[:2]
groups = {i: (None, 1e10) for i in range(n)}
partition_size = math.pi * 2.0 / n
# get partition
for v in neighbor_vehicles:
v_pos = v.position[:2]
rel_pos_vec = np.asarray([v_pos[0] - ego_pos[0], v_pos[1] - ego_pos[1]])
# calculate its partitions
angle = _cal_angle(rel_pos_vec)
i = int(angle / partition_size)
dist = np.sqrt(rel_pos_vec.dot(rel_pos_vec))
if dist < groups[i][1]:
groups[i] = (v, dist)
return groups
def proximity_detection(OGM):
"""
Detects other vehicles in the vicinity of the ego vehicle
hard coded for OGM(64, 64, 0.25)
"""
boxes = []
boxes += [
OGM[11:25, 23:27], # front left
OGM[11:25, 27:37], # front center
OGM[11:25, 37:41], # front right
OGM[25:39, 23:27], # left
OGM[25:39, 37:41], # right
OGM[41:53, 23:27], # back left
OGM[41:53, 27:37], # back center
OGM[41:53, 37:41], # back right
]
output = np.array([b.max() > 0 for b in boxes], np.float32)
return output
def heading_to_degree(heading):
# +y = 0 rad. Note the 0 means up direction
return np.degrees(heading % (2 * math.pi))
def heading_to_vec(heading):
# axis x: right, y:up
angle = (heading + math.pi * 0.5) % (2 * math.pi)
return np.array([math.cos(angle), math.sin(angle)])
def trans_ego_center(ego_lane_index, origin_info):
# transform lane ttc and dist to make ego lane in the array center
assert len(origin_info) == 5
# index need to be set to zero
# 4: [0,1], 3:[0], 2:[], 1:[4], 0:[3,4]
zero_index = [[3, 4], [4], [], [0], [0, 1]]
zero_index = zero_index[ego_lane_index]
origin_info[zero_index] = 0
new_info = np.roll(origin_info, 2 - ego_lane_index)
return new_info
class ActionSpace:
@staticmethod
def from_type(space_type):
if space_type == ActionSpaceType.Continuous:
return gym.spaces.Box(
low=np.array([0.0, 0.0, -1.0]),
high=np.array([1.0, 1.0, 1.0]),
dtype=np.float32,
)
elif space_type == ActionSpaceType.Lane:
return gym.spaces.Discrete(4)
else:
raise NotImplementedError
lane_crash_flag = False
intersection_crash_flag = False
class CalObs:
@staticmethod
def cal_goal_relative_pos(env_obs: Observation, _):
""" Return normalized relative position (2-dimensional). """
ego_state = env_obs.ego_vehicle_state
goal = ego_state.mission.goal
assert isinstance(goal, PositionalGoal), goal
ego_pos = ego_state.position[:2]
goal_pos = goal.position # the position of mission goal is 2-dimensional.
vector = np.asarray([goal_pos[0] - ego_pos[0], goal_pos[1] - ego_pos[1]])
# space = SPACE_LIB["goal_relative_pos"](None)
# return vector / (space.high - space.low)
return vector
@staticmethod
def cal_distance_to_center(env_obs: Observation, _):
""" Calculate the signed distance to the center of the current lane. """
ego = env_obs.ego_vehicle_state
waypoint_paths = env_obs.waypoint_paths
wps = [path[0] for path in waypoint_paths]
closest_wp = min(wps, key=lambda wp: wp.dist_to(ego.position))
signed_dist_to_center = closest_wp.signed_lateral_error(ego.position)
# lane_hwidth = closest_wp.lane_width * 0.5
# norm_dist_from_center = signed_dist_to_center / lane_hwidth
# dist = np.asarray([norm_dist_from_center])
dist = np.asarray([signed_dist_to_center])
return dist
@staticmethod
def cal_heading_errors(env_obs: Observation, *args):
look_ahead, look_type = args
ego = env_obs.ego_vehicle_state
waypoint_paths = env_obs.waypoint_paths
wps = [path[0] for path in waypoint_paths]
closest_wp = min(wps, key=lambda wp: wp.dist_to(ego.position))
closest_path = waypoint_paths[closest_wp.lane_index]
closest_path_len = len(closest_path)
if look_type == "continuous":
wp_indices = np.arange(look_ahead)
else:
wp_indices = np.array([0, 1, 2, 3, 5, 8, 13, 21, 34, 50])[:look_ahead]
first_larger_value = np.argmax(wp_indices > closest_path_len - 1)
if first_larger_value == 0:
pass
else:
wp_indices[first_larger_value:] = wp_indices[first_larger_value - 1]
closest_path_wps = [closest_path[i] for i in wp_indices]
heading_errors = [
math.sin(math.radians(wp.relative_heading(ego.heading)))
for wp in closest_path_wps
]
return np.asarray(heading_errors)
@staticmethod
def cal_speed(env_obs: Observation, _):
ego = env_obs.ego_vehicle_state
res = np.asarray([ego.speed])
# return res * 3.6 / 120
return res * 3.6
@staticmethod
def cal_steering(env_obs: Observation, _):
ego = env_obs.ego_vehicle_state
return np.asarray([ego.steering / (0.5 * math.pi)])
@staticmethod
def cal_neighbor(env_obs: Observation, closest_neighbor_num):
ego = env_obs.ego_vehicle_state
neighbor_vehicle_states = env_obs.neighborhood_vehicle_states
# dist, speed, ttc, pos
features = np.zeros((closest_neighbor_num, 5))
# fill neighbor vehicles into closest_neighboor_num areas
surrounding_vehicles = _get_closest_vehicles(
ego, neighbor_vehicle_states, n=closest_neighbor_num
)
heading_angle = ego.heading + math.pi / 2.0
ego_heading_vec = np.asarray([math.cos(heading_angle), math.sin(heading_angle)])
for i, v in surrounding_vehicles.items():
if v[0] is None:
continue
v = v[0]
rel_pos = np.asarray(
list(map(lambda x: x[0] - x[1], zip(v.position[:2], ego.position[:2])))
)
rel_dist = np.sqrt(rel_pos.dot(rel_pos))
v_heading_angle = math.radians(v.heading)
v_heading_vec = np.asarray(
[math.cos(v_heading_angle), math.sin(v_heading_angle)]
)
ego_heading_norm_2 = ego_heading_vec.dot(ego_heading_vec)
rel_pos_norm_2 = rel_pos.dot(rel_pos)
v_heading_norm_2 = v_heading_vec.dot(v_heading_vec)
ego_cosin = ego_heading_vec.dot(rel_pos) / np.sqrt(
ego_heading_norm_2 + rel_pos_norm_2
)
v_cosin = v_heading_vec.dot(rel_pos) / np.sqrt(
v_heading_norm_2 + rel_pos_norm_2
)
rel_speed = 0
if ego_cosin <= 0 and v_cosin > 0:
rel_speed = 0
else:
rel_speed = ego.speed * ego_cosin - v.speed * v_cosin
ttc = min(rel_dist / max(1e-5, rel_speed), 5.0)
features[i, :] = np.asarray(
[rel_dist, rel_speed, ttc, rel_pos[0], rel_pos[1]]
)
return features.reshape((-1,))
@staticmethod
def cal_ego_lane_dist_and_speed(env_obs: Observation, observe_lane_num):
"""Calculate the distance from ego vehicle to its front vehicles (if have) at observed lanes,
also the relative speed of the front vehicle which positioned at the same lane.
"""
ego = env_obs.ego_vehicle_state
waypoint_paths = env_obs.waypoint_paths
wps = [path[0] for path in waypoint_paths]
closest_wp = min(wps, key=lambda wp: wp.dist_to(ego.position))
wps_with_lane_dist = []
for path_idx, path in enumerate(waypoint_paths):
lane_dist = 0.0
for w1, w2 in zip(path, path[1:]):
wps_with_lane_dist.append((w1, path_idx, lane_dist))
lane_dist += np.linalg.norm(w2.pos - w1.pos)
wps_with_lane_dist.append((path[-1], path_idx, lane_dist))
# TTC calculation along each path
ego_closest_wp = min(wps, key=lambda wp: wp.dist_to(ego.position))
wps_on_lane = [
(wp, path_idx, dist)
for wp, path_idx, dist in wps_with_lane_dist
# if wp.lane_id == v.lane_id
]
ego_lane_index = closest_wp.lane_index
lane_dist_by_path = [1] * len(waypoint_paths)
ego_lane_dist = [0] * observe_lane_num
speed_of_closest = 0.0
for v in env_obs.neighborhood_vehicle_states:
nearest_wp, path_idx, lane_dist = min(
wps_on_lane,
key=lambda tup: np.linalg.norm(tup[0].pos - vec_2d(v.position)),
)
if np.linalg.norm(nearest_wp.pos - vec_2d(v.position)) > 2:
# this vehicle is not close enough to the path, this can happen
# if the vehicle is behind the ego, or ahead past the end of
# the waypoints
continue
# relative_speed_m_per_s = (ego.speed - v.speed) * 1000 / 3600
# relative_speed_m_per_s = max(abs(relative_speed_m_per_s), 1e-5)
dist_wp_vehicle_vector = vec_2d(v.position) - vec_2d(nearest_wp.pos)
direction_vector = np.array(
[
math.cos(math.radians(nearest_wp.heading)),
math.sin(math.radians(nearest_wp.heading)),
]
).dot(dist_wp_vehicle_vector)
dist_to_vehicle = lane_dist + np.sign(direction_vector) * (
np.linalg.norm(vec_2d(nearest_wp.pos) - vec_2d(v.position))
)
lane_dist = dist_to_vehicle / 100.0
if lane_dist_by_path[path_idx] > lane_dist:
if ego_closest_wp.lane_index == v.lane_index:
speed_of_closest = (v.speed - ego.speed) / 120.0
lane_dist_by_path[path_idx] = min(lane_dist_by_path[path_idx], lane_dist)
# current lane is centre
flag = observe_lane_num // 2
ego_lane_dist[flag] = lane_dist_by_path[ego_lane_index]
max_lane_index = len(lane_dist_by_path) - 1
if max_lane_index == 0:
right_sign, left_sign = 0, 0
else:
right_sign = -1 if ego_lane_index + 1 > max_lane_index else 1
left_sign = -1 if ego_lane_index - 1 >= 0 else 1
ego_lane_dist[flag + right_sign] = lane_dist_by_path[
ego_lane_index + right_sign
]
ego_lane_dist[flag + left_sign] = lane_dist_by_path[ego_lane_index + left_sign]
res = np.asarray(ego_lane_dist + [speed_of_closest])
return res
@staticmethod
def cal_lane_its_info(env_obs: Observation, _):
"""
cal neighbour info includes lane info and intersection info
"""
# init flag, dist, ttc, headings
global lane_crash_flag
global intersection_crash_flag
lane_crash_flag = False
intersection_crash_flag = False
# default 10s
lane_ttc = np.array([1] * 5, dtype=float)
# default 100m
lane_dist = np.array([1] * 5, dtype=float)
# default 120km/h
closest_lane_nv_rel_speed = 1
intersection_ttc = 1
intersection_distance = 1
closest_its_nv_rel_speed = 1
# default 100m
closest_its_nv_rel_pos = np.array([1, 1])
wp_paths = env_obs.waypoint_paths
ego = env_obs.ego_vehicle_state
neighborhood_vehicle_states = env_obs.neighborhood_vehicle_states
closest_wps = [path[0] for path in wp_paths]
# distance of vehicle from center of lane
ego_closest_wp = min(closest_wps, key=lambda wp: wp.dist_to(ego.position))
ego_lane_index = ego_closest_wp.lane_index
# here to set invalid value to 0
wp_paths_num = len(wp_paths)
lane_ttc[wp_paths_num:] = 0
lane_dist[wp_paths_num:] = 0
features = np.concatenate(
[
trans_ego_center(ego_lane_index, lane_ttc),
trans_ego_center(ego_lane_index, lane_dist),
[
closest_lane_nv_rel_speed,
intersection_ttc,
intersection_distance,
closest_its_nv_rel_speed,
],
closest_its_nv_rel_pos,
]
)
# return if no neighbour vehicle or off the routes(no waypoint paths)
if not neighborhood_vehicle_states or not wp_paths_num:
return features
# merge waypoint paths (consider might not the same length)
merge_waypoint_paths = []
for wp_path in wp_paths:
merge_waypoint_paths += wp_path
wp_poses = np.array([wp.pos for wp in merge_waypoint_paths])
# compute neighbour vehicle closest wp
nv_poses = np.array([nv.position for nv in neighborhood_vehicle_states])
nv_wp_distance = np.linalg.norm(
nv_poses[:, :2][:, np.newaxis] - wp_poses, axis=2
)
nv_closest_wp_index = np.argmin(nv_wp_distance, axis=1)
nv_closest_distance = np.min(nv_wp_distance, axis=1)
# get not in same lane id social vehicles(intersect vehicles and behind vehicles)
wp_lane_ids = np.array([wp.lane_id for wp in merge_waypoint_paths])
nv_lane_ids = np.array([nv.lane_id for nv in neighborhood_vehicle_states])
not_in_same_lane_id = nv_lane_ids[:, np.newaxis] != wp_lane_ids
not_in_same_lane_id = np.all(not_in_same_lane_id, axis=1)
ego_edge_id = ego.lane_id[1:-2] if ego.lane_id[0] == "-" else ego.lane_id[:-2]
nv_edge_ids = np.array(
[
nv.lane_id[1:-2] if nv.lane_id[0] == "-" else nv.lane_id[:-2]
for nv in neighborhood_vehicle_states
]
)
not_in_ego_edge_id = nv_edge_ids[:, np.newaxis] != ego_edge_id
not_in_ego_edge_id = np.squeeze(not_in_ego_edge_id, axis=1)
is_not_closed_nv = not_in_same_lane_id & not_in_ego_edge_id
not_closed_nv_index = np.where(is_not_closed_nv)[0]
# filter sv not close to the waypoints including behind the ego or ahead past the end of the waypoints
close_nv_index = np.where(nv_closest_distance < 2)[0]
if not close_nv_index.size:
pass
else:
close_nv = [neighborhood_vehicle_states[i] for i in close_nv_index]
# calculate waypoints distance to ego car along the routes
wps_with_lane_dist_list = []
for wp_path in wp_paths:
path_wp_poses = np.array([wp.pos for wp in wp_path])
wp_poses_shift = np.roll(path_wp_poses, 1, axis=0)
wps_with_lane_dist = np.linalg.norm(
path_wp_poses - wp_poses_shift, axis=1
)
wps_with_lane_dist[0] = 0
wps_with_lane_dist = np.cumsum(wps_with_lane_dist)
wps_with_lane_dist_list += wps_with_lane_dist.tolist()
wps_with_lane_dist_list = np.array(wps_with_lane_dist_list)
# get neighbour vehicle closest waypoints index
nv_closest_wp_index = nv_closest_wp_index[close_nv_index]
# ego car and neighbour car distance, not very accurate since use the closest wp
ego_nv_distance = wps_with_lane_dist_list[nv_closest_wp_index]
# get neighbour vehicle lane index
nv_lane_index = np.array(
[merge_waypoint_paths[i].lane_index for i in nv_closest_wp_index]
)
# get wp path lane index
lane_index_list = [wp_path[0].lane_index for wp_path in wp_paths]
for i, lane_index in enumerate(lane_index_list):
# get same lane vehicle
same_lane_nv_index = np.where(nv_lane_index == lane_index)[0]
if not same_lane_nv_index.size:
continue
same_lane_nv_distance = ego_nv_distance[same_lane_nv_index]
closest_nv_index = same_lane_nv_index[np.argmin(same_lane_nv_distance)]
closest_nv = close_nv[closest_nv_index]
closest_nv_speed = closest_nv.speed
closest_nv_heading = closest_nv.heading
# radius to degree
closest_nv_heading = heading_to_degree(closest_nv_heading)
closest_nv_pos = closest_nv.position[:2]
bounding_box = closest_nv.bounding_box
# map the heading to make it consistent with the position coordination
map_heading = (closest_nv_heading + 90) % 360
map_heading_radius = np.radians(map_heading)
nv_heading_vec = np.array(
[np.cos(map_heading_radius), np.sin(map_heading_radius)]
)
nv_heading_vertical_vec = np.array(
[-nv_heading_vec[1], nv_heading_vec[0]]
)
# get four edge center position (consider one vehicle take over two lanes when change lane)
# maybe not necessary
closest_nv_front = closest_nv_pos + bounding_box.length * nv_heading_vec
closest_nv_behind = (
closest_nv_pos - bounding_box.length * nv_heading_vec
)
closest_nv_left = (
closest_nv_pos + bounding_box.width * nv_heading_vertical_vec
)
closest_nv_right = (
closest_nv_pos - bounding_box.width * nv_heading_vertical_vec
)
edge_points = np.array(
[
closest_nv_front,
closest_nv_behind,
closest_nv_left,
closest_nv_right,
]
)
ep_wp_distance = np.linalg.norm(
edge_points[:, np.newaxis] - wp_poses, axis=2
)
ep_closed_wp_index = np.argmin(ep_wp_distance, axis=1)
ep_closed_wp_lane_index = set(
[merge_waypoint_paths[i].lane_index for i in ep_closed_wp_index]
+ [lane_index]
)
min_distance = np.min(same_lane_nv_distance)
if ego_closest_wp.lane_index in ep_closed_wp_lane_index:
if min_distance < 6:
lane_crash_flag = True
nv_wp_heading = (
closest_nv_heading
- heading_to_degree(
merge_waypoint_paths[
nv_closest_wp_index[closest_nv_index]
].heading
)
) % 360
# find those car just get from intersection lane into ego lane
if nv_wp_heading > 30 and nv_wp_heading < 330:
relative_close_nv_heading = (
closest_nv_heading - heading_to_degree(ego.heading)
)
# map nv speed to ego car heading
map_close_nv_speed = closest_nv_speed * np.cos(
np.radians(relative_close_nv_heading)
)
closest_lane_nv_rel_speed = min(
closest_lane_nv_rel_speed,
(map_close_nv_speed - ego.speed) * 3.6 / 120,
)
else:
closest_lane_nv_rel_speed = min(
closest_lane_nv_rel_speed,
(closest_nv_speed - ego.speed) * 3.6 / 120,
)
relative_speed_m_per_s = ego.speed - closest_nv_speed
if abs(relative_speed_m_per_s) < 1e-5:
relative_speed_m_per_s = 1e-5
ttc = min_distance / relative_speed_m_per_s
# normalized into 10s
ttc /= 10
for j in ep_closed_wp_lane_index:
if min_distance / 100 < lane_dist[j]:
# normalize into 100m
lane_dist[j] = min_distance / 100
if ttc <= 0:
continue
if j == ego_closest_wp.lane_index:
if ttc < 0.1:
lane_crash_flag = True
if ttc < lane_ttc[j]:
lane_ttc[j] = ttc
# get vehicles not in the waypoints lane
if not not_closed_nv_index.size:
pass
else:
filter_nv = [neighborhood_vehicle_states[i] for i in not_closed_nv_index]
nv_pos = np.array([nv.position for nv in filter_nv])[:, :2]
nv_heading = heading_to_degree(np.array([nv.heading for nv in filter_nv]))
nv_speed = np.array([nv.speed for nv in filter_nv])
ego_pos = ego.position[:2]
ego_heading = heading_to_degree(ego.heading)
ego_speed = ego.speed
nv_to_ego_vec = nv_pos - ego_pos
line_heading = (
(np.arctan2(nv_to_ego_vec[:, 1], nv_to_ego_vec[:, 0]) * 180 / np.pi)
- 90
) % 360
nv_to_line_heading = (nv_heading - line_heading) % 360
ego_to_line_heading = (ego_heading - line_heading) % 360
# judge two heading whether will intersect
same_region = (nv_to_line_heading - 180) * (
ego_to_line_heading - 180
) > 0 # both right of line or left of line
ego_to_nv_heading = ego_to_line_heading - nv_to_line_heading
valid_relative_angle = (
(nv_to_line_heading - 180 > 0) & (ego_to_nv_heading > 0)
) | ((nv_to_line_heading - 180 < 0) & (ego_to_nv_heading < 0))
# emit behind vehicles
valid_intersect_angle = np.abs(line_heading - ego_heading) < 90
# emit patient vehicles which stay in the intersection
not_patient_nv = nv_speed > 0.01
# get valid intersection sv
intersect_sv_index = np.where(
same_region
& valid_relative_angle
& valid_intersect_angle
& not_patient_nv
)[0]
if not intersect_sv_index.size:
pass
else:
its_nv_pos = nv_pos[intersect_sv_index][:, :2]
its_nv_speed = nv_speed[intersect_sv_index]
its_nv_to_line_heading = nv_to_line_heading[intersect_sv_index]
line_heading = line_heading[intersect_sv_index]
# ego_to_line_heading = ego_to_line_heading[intersect_sv_index]
# get intersection closest vehicle
ego_nv_distance = np.linalg.norm(its_nv_pos - ego_pos, axis=1)
ego_closest_its_nv_index = np.argmin(ego_nv_distance)
ego_closest_its_nv_distance = ego_nv_distance[ego_closest_its_nv_index]
line_heading = line_heading[ego_closest_its_nv_index]
ego_to_line_heading = (
heading_to_degree(ego_closest_wp.heading) - line_heading
) % 360
ego_closest_its_nv_speed = its_nv_speed[ego_closest_its_nv_index]
its_closest_nv_to_line_heading = its_nv_to_line_heading[
ego_closest_its_nv_index
]
# rel speed along ego-nv line
closest_nv_rel_speed = ego_speed * np.cos(
np.radians(ego_to_line_heading)
) - ego_closest_its_nv_speed * np.cos(
np.radians(its_closest_nv_to_line_heading)
)
closest_nv_rel_speed_m_s = closest_nv_rel_speed
if abs(closest_nv_rel_speed_m_s) < 1e-5:
closest_nv_rel_speed_m_s = 1e-5
ttc = ego_closest_its_nv_distance / closest_nv_rel_speed_m_s
intersection_ttc = min(intersection_ttc, ttc / 10)
intersection_distance = min(
intersection_distance, ego_closest_its_nv_distance / 100
)
# transform relative pos to ego car heading coordinate
rotate_axis_angle = np.radians(90 - ego_to_line_heading)
closest_its_nv_rel_pos = (
np.array(
[
ego_closest_its_nv_distance * np.cos(rotate_axis_angle),
ego_closest_its_nv_distance * np.sin(rotate_axis_angle),
]
)
/ 100
)
closest_its_nv_rel_speed = min(
closest_its_nv_rel_speed, -closest_nv_rel_speed * 3.6 / 120
)
if ttc < 0:
pass
else:
intersection_ttc = min(intersection_ttc, ttc / 10)
intersection_distance = min(
intersection_distance, ego_closest_its_nv_distance / 100
)
# if to collide in 3s, make it slow down
if ttc < 2 or ego_closest_its_nv_distance < 6:
intersection_crash_flag = True
features = np.concatenate(
[
trans_ego_center(ego_lane_index, lane_ttc),
trans_ego_center(ego_lane_index, lane_dist),
[
closest_lane_nv_rel_speed,
intersection_ttc,
intersection_distance,
closest_its_nv_rel_speed,
],
closest_its_nv_rel_pos,
]
)
return features
@staticmethod
def cal_img_gray(env_obs: Observation, *args):
# args = (height, width)
resize = args
def rgb2gray(rgb):
return np.dot(rgb[..., :3], [0.2989, 0.5870, 0.1140])
rgb_ndarray = env_obs.top_down_rgb.data
gray_scale = (
cv2.resize(
rgb2gray(rgb_ndarray), dsize=resize, interpolation=cv2.INTER_CUBIC
)
/ 255.0
)
return gray_scale
@staticmethod
def cal_proximity(env_obs: Observation, _):
proximity = proximity_detection(env_obs.occupancy_grid_map[1])
return proximity
class SimpleCallbacks(DefaultCallbacks):
"""See example from (>=0.8.6): https://github.com/ray-project/ray/blob/master/rllib/examples
/custom_metrics_and_callbacks.py"""
def on_episode_start(
self,
worker: RolloutWorker,
base_env: BaseEnv,
policies: Dict[str, Policy],
episode: MultiAgentEpisode,
**kwargs,
):
logger.info("episode {} started".format(episode.episode_id))
episode.user_data["ego_speed"] = defaultdict(lambda: [])
episode.user_data["step_heading_error"] = dict()
def on_episode_step(
self,
worker: RolloutWorker,
base_env: BaseEnv,
episode: MultiAgentEpisode,
**kwargs,
):
ego_speed = episode.user_data["ego_speed"]
for agent_id, obs in episode._agent_to_last_raw_obs.items():
if isinstance(obs, list):
obs = obs[-1] # keep the lastest frame
if isinstance(obs, dict):
ego_speed[agent_id].append(obs.get("speed", -1.0))
def on_episode_end(
self,
worker: RolloutWorker,
base_env: BaseEnv,
policies: Dict[str, Policy],
episode: MultiAgentEpisode,
**kwargs,
):
ego_speed = episode.user_data["ego_speed"]
mean_ego_speed = {
agent_id: np.mean(speed_hist) for agent_id, speed_hist in ego_speed.items()
}
distance_travelled = dict()
for _id, info in episode._agent_to_last_info.items():
if info.get("_group_info"):
for i, _info in enumerate(info["_group_info"]):
distance_travelled[f"{_id}:AGENT-{i}"] = np.mean(_info["score"])
else:
distance_travelled[_id] = np.mean(info["score"])
speed_list = list(map(lambda x: round(x, 3), mean_ego_speed.values()))
dist_list = list(map(lambda x: round(x, 3), distance_travelled.values()))
reward_list = list(map(lambda x: round(x, 3), episode.agent_rewards.values()))
episode.custom_metrics[f"mean_ego_speed"] = sum(speed_list) / max(
1, len(speed_list)
)
episode.custom_metrics[f"distance_travelled"] = sum(dist_list) / max(
1, len(dist_list)
)
logger.info(f"episode {episode.episode_id} ended with {episode.length} steps")
class ActionAdapter:
@staticmethod
def from_type(space_type):
if space_type == ActionSpaceType.Continuous:
return ActionAdapter.continuous_action_adapter
elif space_type == ActionSpaceType.Lane:
return ActionAdapter.discrete_action_adapter
else:
raise NotImplementedError
@staticmethod
def continuous_action_adapter(policy_action):
assert len(policy_action) == 3
return np.asarray(policy_action)
@staticmethod
def discrete_action_adapter(policy_action):
if isinstance(policy_action, (list, tuple, np.ndarray)):
action = np.argmax(policy_action)
else:
action = policy_action
if action == 0:
return "keep_lane"
elif action == 1:
return "slow_down"
elif action == 2:
return "change_lane_left"
elif action == 3:
return "change_lane_right"
def subscribe_features(**kwargs):
res = dict()
for k, config in kwargs.items():
if bool(config):
res[k] = SPACE_LIB[k](config)
return res
def cal_obs(env_obs, space, feature_configs):
if isinstance(space, gym.spaces.Dict):
obs_np = {}
for name in space.spaces:
if hasattr(CalObs, f"cal_{name}"):
args = (
(feature_configs[name],)
if not isinstance(feature_configs[name], Sequence)
else feature_configs[name]
)
obs_np[name] = getattr(CalObs, f"cal_{name}")(env_obs, *args)
elif isinstance(space, gym.spaces.Tuple):
obs_np = []
assert isinstance(env_obs, Sequence)
for obs, sub_space in zip(env_obs, space.spaces):
obs_np.append(cal_obs(obs, sub_space, feature_configs))
else:
raise TypeError(f"Unexpected space type={type(space)}")
return obs_np
def get_distance_from_center(env_obs):
ego_state = env_obs.ego_vehicle_state
wp_paths = env_obs.waypoint_paths
closest_wps = [path[0] for path in wp_paths]
# distance of vehicle from center of lane
closest_wp = min(closest_wps, key=lambda wp: wp.dist_to(ego_state.position))
signed_dist_from_center = closest_wp.signed_lateral_error(ego_state.position)
lane_hwidth = closest_wp.lane_width * 0.5
norm_dist_from_center = signed_dist_from_center / lane_hwidth
return norm_dist_from_center
| 38.175691 | 110 | 0.591652 |
1f35022201a73b8853f06d4d5298a130b3a2eaf8 | 434 | py | Python | python/FTPServer.py | mlecriva/SPL_Meter | 626d405ef979a36da3100f2b200b5dae9ee094c7 | [
"MIT"
] | 3 | 2019-07-29T21:20:24.000Z | 2019-07-30T08:52:32.000Z | python/FTPServer.py | mlecriva/SPL_Meter | 626d405ef979a36da3100f2b200b5dae9ee094c7 | [
"MIT"
] | null | null | null | python/FTPServer.py | mlecriva/SPL_Meter | 626d405ef979a36da3100f2b200b5dae9ee094c7 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
authorizer = DummyAuthorizer()
authorizer.add_user(
"test", "test", "/Users/username", perm="elradfmw")
handler = FTPHandler
handler.authorizer = authorizer
handler.passive_ports = range(60000, 61000)
server = FTPServer(("172.20.10.3", 2000), handler)
server.serve_forever()
| 25.529412 | 55 | 0.778802 |
22ed5cd61679611f836ae7afe47474b65c7b678a | 1,392 | py | Python | setup.py | greschd/aiida-wannier90 | 747529e8c9fe2df3e65c806a9060a55eb4baa9a0 | [
"MIT"
] | 8 | 2019-10-31T23:51:50.000Z | 2021-03-30T04:48:17.000Z | setup.py | greschd/aiida-wannier90 | 747529e8c9fe2df3e65c806a9060a55eb4baa9a0 | [
"MIT"
] | 84 | 2017-07-17T20:28:15.000Z | 2022-02-16T20:29:10.000Z | setup.py | greschd/aiida-wannier90 | 747529e8c9fe2df3e65c806a9060a55eb4baa9a0 | [
"MIT"
] | 12 | 2017-11-20T08:54:35.000Z | 2022-01-26T12:38:12.000Z | # -*- coding: utf-8 -*-
################################################################################
# Copyright (c), AiiDA team and individual contributors. #
# All rights reserved. #
# This file is part of the AiiDA-wannier90 code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-wannier90 #
# For further information on the license, see the LICENSE.txt file #
################################################################################
import json
import os
from setuptools import setup, find_packages
if __name__ == '__main__':
THIS_FOLDER = os.path.dirname(os.path.abspath(__file__))
with open(
os.path.join(THIS_FOLDER, 'setup.json'), 'r', encoding='utf-8'
) as info:
kwargs = json.load(info)
with open(
os.path.join(THIS_FOLDER, 'README.md'), 'r', encoding='utf-8'
) as readme:
long_description = readme.read()
setup(
include_package_data=True,
setup_requires=['reentry'],
reentry_register=True,
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(exclude=['aiida']),
**kwargs
)
| 38.666667 | 80 | 0.490661 |
8df634aa7d3a1a254cfd012b72661087336a9337 | 341 | py | Python | plugins/labels/kivy.py | OleksandrBlack/electrum-safecoin | 71a383635f9f2c3b50649376daabb6ba610431c2 | [
"MIT"
] | null | null | null | plugins/labels/kivy.py | OleksandrBlack/electrum-safecoin | 71a383635f9f2c3b50649376daabb6ba610431c2 | [
"MIT"
] | null | null | null | plugins/labels/kivy.py | OleksandrBlack/electrum-safecoin | 71a383635f9f2c3b50649376daabb6ba610431c2 | [
"MIT"
] | 1 | 2020-01-31T22:01:23.000Z | 2020-01-31T22:01:23.000Z | from .labels import LabelsPlugin
from electrum_safecoin.plugins import hook
class Plugin(LabelsPlugin):
@hook
def load_wallet(self, wallet, window):
self.window = window
self.start_wallet(wallet)
def on_pulled(self, wallet):
self.print_error('on pulled')
self.window._trigger_update_history()
| 22.733333 | 45 | 0.70088 |
d1f4bb98614dba31c964123975c9b9faa08b93bd | 148 | py | Python | faq/tests/__init__.py | Kirembu/django-helpline-faq | 4cd9ef3aa310833bf2fe1cfb9bdc8c67913ab27f | [
"BSD-3-Clause"
] | 46 | 2015-02-01T22:33:00.000Z | 2022-02-27T05:25:11.000Z | faq/tests/__init__.py | jhensley/django-faq | fc680d6be1deaa035e4bb2e752bb57db3eb0e096 | [
"BSD-3-Clause"
] | 2 | 2015-02-28T11:28:33.000Z | 2015-03-15T21:03:37.000Z | faq/tests/__init__.py | jhensley/django-faq | fc680d6be1deaa035e4bb2e752bb57db3eb0e096 | [
"BSD-3-Clause"
] | 23 | 2015-03-12T15:06:27.000Z | 2021-09-30T03:19:15.000Z | from faq.tests.test_admin import *
from faq.tests.test_models import *
from faq.tests.test_templatetags import *
from faq.tests.test_views import *
| 29.6 | 41 | 0.810811 |
937a617088a7e25674a9e2a4c6ab9d381d714435 | 13,377 | py | Python | fedot/utilities/ts_gapfilling.py | rozlana-g/FEDOT | a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c | [
"BSD-3-Clause"
] | 1 | 2021-11-09T10:24:38.000Z | 2021-11-09T10:24:38.000Z | fedot/utilities/ts_gapfilling.py | rozlana-g/FEDOT | a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c | [
"BSD-3-Clause"
] | null | null | null | fedot/utilities/ts_gapfilling.py | rozlana-g/FEDOT | a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from scipy import interpolate
from fedot.core.data.data import InputData
from fedot.core.repository.dataset_types import DataTypesEnum
from fedot.core.repository.tasks import Task, TaskTypesEnum, TsForecastingParams
class SimpleGapFiller:
"""
Base class used for filling in the gaps in time series with simple methods.
Methods from the SimpleGapFiller class can be used for comparison with more
complex models in class ModelGapFiller
:param gap_value: value, which identify gap elements in array
"""
def __init__(self, gap_value: float = -100.0):
self.gap_value = gap_value
def linear_interpolation(self, input_data):
"""
Method allows to restore missing values in an array
using linear interpolation
:param input_data: array with gaps
:return: array without gaps
"""
output_data = np.array(input_data)
# The indices of the known elements
non_nan = np.ravel(np.argwhere(output_data != self.gap_value))
# All known elements in the array
masked_array = output_data[non_nan]
f_interploate = interpolate.interp1d(non_nan, masked_array)
x = np.arange(0, len(output_data))
output_data = f_interploate(x)
return output_data
def local_poly_approximation(self, input_data, degree: int = 2,
n_neighbors: int = 5):
"""
Method allows to restore missing values in an array
using Savitzky-Golay filter
:param input_data: array with gaps
:param degree: degree of a polynomial function
:param n_neighbors: number of neighboring known elements of the time
series that the approximation is based on
:return: array without gaps
"""
output_data = np.array(input_data)
i_gaps = np.ravel(np.argwhere(output_data == self.gap_value))
# Iterately fill in the gaps in the time series
for gap_index in i_gaps:
# Indexes of known elements (updated at each iteration)
i_known = np.argwhere(output_data != self.gap_value)
i_known = np.ravel(i_known)
# Based on the indexes we calculate how far from the gap
# the known values are located
id_distances = np.abs(i_known - gap_index)
# Now we know the indices of the smallest values in the array,
# so sort indexes
sorted_idx = np.argsort(id_distances)
nearest_values = []
nearest_indices = []
for i in sorted_idx[:n_neighbors]:
time_index = i_known[i]
nearest_values.append(output_data[time_index])
nearest_indices.append(time_index)
nearest_values = np.array(nearest_values)
nearest_indices = np.array(nearest_indices)
local_coefs = np.polyfit(nearest_indices, nearest_values, degree)
est_value = np.polyval(local_coefs, gap_index)
output_data[gap_index] = est_value
return output_data
def batch_poly_approximation(self, input_data, degree: int = 3,
n_neighbors: int = 10):
"""
Method allows to restore missing values in an array using
batch polynomial approximations.
Approximation is applied not for individual omissions, but for
intervals of omitted values
:param input_data: array with gaps
:param degree: degree of a polynomial function
:param n_neighbors: the number of neighboring known elements of
time series that the approximation is based on
:return: array without gaps
"""
output_data = np.array(input_data)
# Gap indices
gap_list = np.ravel(np.argwhere(output_data == self.gap_value))
new_gap_list = self._parse_gap_ids(gap_list)
# Iterately fill in the gaps in the time series
for gap in new_gap_list:
# Find the center point of the gap
center_index = int((gap[0] + gap[-1]) / 2)
# Indexes of known elements (updated at each iteration)
i_known = np.argwhere(output_data != self.gap_value)
i_known = np.ravel(i_known)
# Based on the indexes we calculate how far from the gap
# the known values are located
id_distances = np.abs(i_known - center_index)
# Now we know the indices of the smallest values in the array,
# so sort indexes
sorted_idx = np.argsort(id_distances)
# Nearest known values to the gap
nearest_values = []
# And their indexes
nearest_indices = []
for i in sorted_idx[:n_neighbors]:
# Getting the index value for the series - output_data
time_index = i_known[i]
# Using this index, we get the value of each of the "neighbors"
nearest_values.append(output_data[time_index])
nearest_indices.append(time_index)
nearest_values = np.array(nearest_values)
nearest_indices = np.array(nearest_indices)
# Local approximation by an n-th degree polynomial
local_coefs = np.polyfit(nearest_indices, nearest_values, degree)
# Estimate our interval according to the selected coefficients
est_value = np.polyval(local_coefs, gap)
output_data[gap] = est_value
return output_data
def _parse_gap_ids(self, gap_list: list) -> list:
"""
Method allows parsing source array with gaps indexes
:param gap_list: array with indexes of gaps in array
:return: a list with separated gaps in continuous intervals
"""
new_gap_list = []
local_gaps = []
for index, gap in enumerate(gap_list):
if index == 0:
local_gaps.append(gap)
else:
prev_gap = gap_list[index - 1]
if gap - prev_gap > 1:
# There is a "gap" between gaps
new_gap_list.append(local_gaps)
local_gaps = []
local_gaps.append(gap)
else:
local_gaps.append(gap)
new_gap_list.append(local_gaps)
return new_gap_list
class ModelGapFiller(SimpleGapFiller):
"""
Class used for filling in the gaps in time series
:param gap_value: value, which mask gap elements in array
:param pipeline: TsForecastingPipeline object for filling in the gaps
"""
def __init__(self, gap_value, pipeline):
super().__init__(gap_value)
self.pipeline = pipeline
def forward_inverse_filling(self, input_data):
"""
Method fills in the gaps in the input array using forward and inverse
directions of predictions
:param input_data: data with gaps to filling in the gaps in it
:return: array without gaps
"""
output_data = np.array(input_data)
# Gap indices
gap_list = np.ravel(np.argwhere(output_data == self.gap_value))
new_gap_list = self._parse_gap_ids(gap_list)
# Iterately fill in the gaps in the time series
for batch_index in range(len(new_gap_list)):
preds = []
weights = []
# Two predictions are generated for each gap - forward and backward
for direction_function in [self._forward, self._inverse]:
weights_list, predicted_list = direction_function(output_data,
batch_index,
new_gap_list)
weights.append(weights_list)
preds.append(predicted_list)
preds = np.array(preds)
weights = np.array(weights)
result = np.average(preds, axis=0, weights=weights)
gap = new_gap_list[batch_index]
# Replace gaps in an array with prediction values
output_data[gap] = result
return output_data
def forward_filling(self, input_data):
"""
Method fills in the gaps in the input array using graph with only
forward direction (i.e. time series forecasting)
:param input_data: data with gaps to filling in the gaps in it
:return: array without gaps
"""
output_data = np.array(input_data)
# Gap indices
gap_list = np.ravel(np.argwhere(output_data == self.gap_value))
new_gap_list = self._parse_gap_ids(gap_list)
# Iterately fill in the gaps in the time series
for gap in new_gap_list:
# The entire time series is used for training until the gap
timeseries_train_part = output_data[:gap[0]]
# Adaptive prediction interval length
len_gap = len(gap)
# Pipeline for the task of filling in gaps
predicted = self.__pipeline_fit_predict(timeseries_train_part,
len_gap)
# Replace gaps in an array with prediction values
output_data[gap] = predicted
return output_data
def _forward(self, timeseries_data, batch_index, new_gap_list):
"""
The time series method makes a forward forecast based on the part
of the time series that is located to the left of the gap.
:param timeseries_data: one-dimensional array of a time series
:param batch_index: index of the interval (batch) with a gap
:param new_gap_list: array with nested lists of gap indexes
:return weights_list: numpy array with prediction weights for
averaging
:return predicted_values: numpy array with prediction values in the
gap
"""
gap = new_gap_list[batch_index]
timeseries_train_part = timeseries_data[:gap[0]]
# Adaptive prediction interval length
len_gap = len(gap)
predicted_values = self.__pipeline_fit_predict(timeseries_train_part,
len_gap)
weights_list = np.arange(len_gap, 0, -1)
return weights_list, predicted_values
def _inverse(self, timeseries_data, batch_index, new_gap_list):
"""
The time series method makes an inverse forecast based on the part
of the time series that is located to the right of the gap.
:param timeseries_data: one-dimensional array of a time series
:param batch_index: index of the interval (batch) with a gap
:param new_gap_list: array with nested lists of gap indexes
:return weights_list: numpy array with prediction weights for
averaging
:return predicted_values: numpy array with prediction values in the
gap
"""
gap = new_gap_list[batch_index]
# If the interval with a gap is the last one in the array
if batch_index == len(new_gap_list) - 1:
timeseries_train_part = timeseries_data[(gap[-1] + 1):]
else:
next_gap = new_gap_list[batch_index + 1]
timeseries_train_part = timeseries_data[(gap[-1] + 1):next_gap[0]]
timeseries_train_part = np.flip(timeseries_train_part)
# Adaptive prediction interval length
len_gap = len(gap)
predicted_values = self.__pipeline_fit_predict(timeseries_train_part,
len_gap)
predicted_values = np.flip(predicted_values)
weights_list = np.arange(1, (len_gap + 1), 1)
return weights_list, predicted_values
def __pipeline_fit_predict(self, timeseries_train: np.array, len_gap: int):
"""
The method makes a prediction as a sequence of elements based on a
training sample. There are two main parts: fit model and predict.
:param timeseries_train: part of the time series for training the model
:param len_gap: number of elements in the gap
:return: array without gaps
"""
task = Task(TaskTypesEnum.ts_forecasting,
TsForecastingParams(forecast_length=len_gap))
input_data = InputData(idx=np.arange(0, len(timeseries_train)),
features=timeseries_train,
target=timeseries_train,
task=task,
data_type=DataTypesEnum.ts)
# Making predictions for the missing part in the time series
self.pipeline.fit_from_scratch(input_data)
# "Test data" for making prediction for a specific length
start_forecast = len(timeseries_train)
end_forecast = start_forecast + len_gap
idx_test = np.arange(start_forecast, end_forecast)
test_data = InputData(idx=idx_test,
features=timeseries_train,
target=None,
task=task,
data_type=DataTypesEnum.ts)
predicted_values = self.pipeline.predict(test_data)
predicted_values = np.ravel(np.array(predicted_values.predict))
return predicted_values
| 38.22 | 80 | 0.615086 |
7db61a020a2c5f6a7fd411989693f79cdb2b0218 | 959 | py | Python | autosk_dev_test/utilities/policies_theano.py | hmendozap/master-arbeit-files | 5c1b90bc4a424313234b84bad405799de6f8d2ed | [
"MIT"
] | 2 | 2018-01-18T06:25:21.000Z | 2018-12-11T07:43:09.000Z | autosk_dev_test/utilities/policies_theano.py | hmendozap/master-arbeit-files | 5c1b90bc4a424313234b84bad405799de6f8d2ed | [
"MIT"
] | 1 | 2016-03-29T07:55:18.000Z | 2016-03-29T07:55:18.000Z | autosk_dev_test/utilities/policies_theano.py | hmendozap/master-arbeit-files | 5c1b90bc4a424313234b84bad405799de6f8d2ed | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
"""
Policies modules
"""
__author__ = 'mendozah'
import theano
import theano.tensor as T
initial_lr = 0.1
value_gamma = 0.8
value_epoch = 3.0
value_power = 0.75
lr = theano.shared(initial_lr, 'lr')
epoch, gamma, powr, step = T.scalars('epoch', 'gm', 'powr', 'step')
lr_policy = 'fixed'
if lr_policy == 'inv':
decay = T.power(1 + gamma * epoch, -powr)
elif lr_policy == 'exp':
decay = gamma ** epoch
elif lr_policy == 'step':
decay = T.switch(T.eq(T.mod_check(epoch, step), 0),
T.power(gamma, T.floor_div(epoch, step)),
1.0)
elif lr_policy == 'fixed':
decay = T.constant(1.0, name='fixed', dtype='float32')
policy_update = theano.function([gamma, epoch, powr],
decay,
updates=[(lr, lr * decay)],
on_unused_input='warn')
policy_update(value_gamma, value_epoch, value_power)
| 25.918919 | 67 | 0.571429 |
db0c073745d24dee662ab71688fe18dafcd9de72 | 4,320 | py | Python | onfido/migrations/0010_convert_raw_fields_to_jsonb.py | snicks1/django-onfido | 7288552c6a156d022539d4d22d7f5a0236018ada | [
"MIT"
] | null | null | null | onfido/migrations/0010_convert_raw_fields_to_jsonb.py | snicks1/django-onfido | 7288552c6a156d022539d4d22d7f5a0236018ada | [
"MIT"
] | null | null | null | onfido/migrations/0010_convert_raw_fields_to_jsonb.py | snicks1/django-onfido | 7288552c6a156d022539d4d22d7f5a0236018ada | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-11-15 22:50
from __future__ import unicode_literals
from django.db import migrations
from ..db.fields import JSONField
class Migration(migrations.Migration):
dependencies = [
('onfido', '0009_add_is_clear_field'),
]
operations = [
migrations.AlterField(
model_name='applicant',
name='raw',
field=JSONField(blank=True, help_text='The raw JSON returned from the API.', null=True),
),
migrations.AlterField(
model_name='check',
name='raw',
field=JSONField(blank=True, help_text='The raw JSON returned from the API.', null=True),
),
migrations.AlterField(
model_name='report',
name='raw',
field=JSONField(blank=True, help_text='The raw JSON returned from the API.', null=True),
),
migrations.AlterField(
model_name='event',
name='raw',
field=JSONField(blank=True, help_text='The raw JSON returned from the API.', null=True),
),
]
# Below is an example of a full parallel conversion that creates a new field
# copies across the data from the old field, drops the old field and renames
# the new field to the original name. Only required if for some reason a
# straight ALTER statement won't work on existing data. For reference, the
# SQL is: ALTER TABLE "onfido_report" ALTER COLUMN "raw" TYPE jsonb USING "raw"::jsonb;
# def migrate_old_to_new(klass):
# """Copy the JSON in the TextField across to the new JSONField."""
# logger.info("Converting {}".format(klass))
# for obj in klass.objects.order_by('-id'):
# obj.new_raw = obj.raw
# logger.info("Saving {}.{}".format(klass, obj.id))
# obj.save(update_fields=['new_raw'])
# def migrate_applicants(apps, schema_editor):
# migrate_old_to_new(apps.get_model("onfido", "Applicant"))
# def migrate_checks(apps, schema_editor):
# migrate_old_to_new(apps.get_model("onfido", "Check"))
# def migrate_reports(apps, schema_editor):
# migrate_old_to_new(apps.get_model("onfido", "Report"))
# def migrate_events(apps, schema_editor):
# migrate_old_to_new(apps.get_model("onfido", "Event"))
# class Migration(migrations.Migration):
# dependencies = [
# ('onfido', '0009_add_is_clear_field'),
# ]
# operations = [
# # create the new fields
# migrations.AddField(
# model_name='applicant',
# name='new_raw',
# field=JSONField(blank=True, help_text='The raw JSON returned from the API.', null=True),
# ),
# migrations.AddField(
# model_name='check',
# name='new_raw',
# field=JSONField(blank=True, help_text='The raw JSON returned from the API.', null=True),
# ),
# migrations.AddField(
# model_name='report',
# name='new_raw',
# field=JSONField(blank=True, help_text='The raw JSON returned from the API.', null=True),
# ),
# migrations.AddField(
# model_name='event',
# name='new_raw',
# field=JSONField(blank=True, help_text='The raw JSON returned from the API.', null=True),
# ),
# # 2. migrate the data across from the old fields
# migrations.RunPython(migrate_applicants),
# migrations.RunPython(migrate_checks),
# migrations.RunPython(migrate_reports),
# migrations.RunPython(migrate_events),
# # 3. remove the old fields
# migrations.RemoveField(model_name='applicant', name='raw'),
# migrations.RemoveField(model_name='check', name='raw'),
# migrations.RemoveField(model_name='report', name='raw'),
# migrations.RemoveField(model_name='event', name='raw'),
# # 4. rename the new field to the same name as the old field
# migrations.RenameField(model_name='applicant', old_name='new_raw', new_name='raw'),
# migrations.RenameField(model_name='check', old_name='new_raw', new_name='raw'),
# migrations.RenameField(model_name='report', old_name='new_raw', new_name='raw'),
# migrations.RenameField(model_name='event', old_name='new_raw', new_name='raw'),
# ]
| 37.241379 | 102 | 0.627083 |
8ca0e29cdfb3e612765d6e67072f93d940a802bc | 231 | py | Python | anyway/database.py | ShaiEdy/anyway | ea669ad089d37fe0933a4596397bcb2d0b4f5de9 | [
"BSD-3-Clause"
] | 1 | 2020-07-16T16:51:17.000Z | 2020-07-16T16:51:17.000Z | anyway/database.py | ShaiEdy/anyway | ea669ad089d37fe0933a4596397bcb2d0b4f5de9 | [
"BSD-3-Clause"
] | 5 | 2020-07-30T08:30:04.000Z | 2021-06-25T15:39:48.000Z | anyway/database.py | ShaiEdy/anyway | ea669ad089d37fe0933a4596397bcb2d0b4f5de9 | [
"BSD-3-Clause"
] | null | null | null | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from . import config
engine = create_engine(config.SQLALCHEMY_DATABASE_URI, convert_unicode=True, echo=False)
Base = declarative_base()
| 28.875 | 88 | 0.844156 |
cf8ad7ccc113d75e6bf78d20339e9500907eefb3 | 4,892 | py | Python | .history/my_classes/ScopesClosuresAndDecorators/GlobalLocalScopes_20210710134347.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | .history/my_classes/ScopesClosuresAndDecorators/GlobalLocalScopes_20210710134347.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | .history/my_classes/ScopesClosuresAndDecorators/GlobalLocalScopes_20210710134347.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | """ Global and local Scopes
Scopes and Namespaces
When an object is assigned to a variable # a = 10
that variable points to some object
and we say that the variable (name) is bound to that object
That object can be accessed using that name in various parts of our code
# ### I can't reference that (a) just anywhere in my code!
That variable name and it's binding (name and object) only "exist" in specific parts of our code
The porton of code where that name/binding is defined, is called the lexical scope of the variable
These bindings are stored in namespaces
(each scope has its own namespace)
The global scope
The global scope is essentially the module scope
It spans a single file only
There is no concept of a truly global (across all the modules in our app) scope in Python
The only exception to this are some of the built=in globally available objects, such as:
True False None dict print
The built-in global variables can be used anywhere inside our module
including inside any function
Global scopes are nested inside the built-in scope
Built-in Scope
Module 1 name spaces
Scope name var1 0xA345E
space func1 0xFF34A
Module 2
Scope name
space
If I reference a variable name inside a scope and Python does ot find it in that scope's namespace
Examples
module1.py Python does not find True or print in the current (module/global) scope
print(True) So, it looks for them in the enclosing scope -> build-in
Finds them there -> True
module2.py Python does not find a or print in the current (module/global) scope
print(a) So, it looks for them in the enclosing scope -> built-in
Find print, but not a -> run-time Name Error
module3.py
print = lambda x: 'hello {0}!'.format(x)
s = print('world') Python finds print in the module scope
So it uses it
s -> hello world
The Local Scope
When we create functions, we can create variable names inside those functions (using assignments)
e.g. a = 10
Variables defined inside a function qre not created until the function is called
Every time the function is called, a new scope is created
Variables defined inside the function are assigned to the scope -> Function Local scope
-> Local scope
The actual object the variable references could be different each time the function is called
(this is why recursion works!)
Examples
my_func
def my_func(a,b): a
c = a * b b
return c c
my_func
my_func('z', 2) a-> 'z'
b->2
c->'zz' \
same names, different local scopes
my_func /
my_func(10, 5) a->10
b->5
c->50
Nested scopes
Module scopes are often nested inside Built-in scopes
Local are inside of built-in scopes.
Python looks first at the local scope, then at the Module scope and then the built-in scope
When I call a function a second time, a new local scope is created
Name space lookups
When requesting the object bound to a variable name:
e.g. print(a) # don't worry about "print"
Python will try to find the object bound to the variable
Python will try to find the object bound to the variable # a
in the current local scope first
works up the chain of enclosing scopes
When my_func (var) finishes running, the scope is gone too!
and the reference count of the object var was bound to (referenced) and is decremented
We say that var goes out of scope
Accessing the global scope from the local scope
When retrieving the value of a global variable from inside a function, Python automatically searches the local scope's namespace, and up the chain of all the enclosing scope namespaces
local -> global -> built-in
Modifying a global variables value from inside the function
a = 0
assignment -> Python interprets this as a local variable (at compile-time)>
->The local variable (a) masks the global variable a
/assignment -> Python interprets this as a local variable variable (at compile-time)
/ -> the local variable a masks the global variable a
def my_func(): /
a = 100 </
print(a)
my_func() # 100
print(a) # 0
""" | 29.46988 | 184 | 0.611611 |
486dfb815db32866a7a7ffbfcf796d17e1d56dde | 2,549 | py | Python | examples/nin_cifar10.py | deep-learning-algorithm/PyNet | 354c7ee88a712a1f5069d58a0be4a6cbfaeab861 | [
"Apache-2.0"
] | 8 | 2020-11-22T02:22:55.000Z | 2022-03-16T12:18:03.000Z | examples/nin_cifar10.py | zjZSTU/PyNet | 354c7ee88a712a1f5069d58a0be4a6cbfaeab861 | [
"Apache-2.0"
] | null | null | null | examples/nin_cifar10.py | zjZSTU/PyNet | 354c7ee88a712a1f5069d58a0be4a6cbfaeab861 | [
"Apache-2.0"
] | 4 | 2020-12-10T09:21:56.000Z | 2021-04-19T02:25:01.000Z | # -*- coding: utf-8 -*-
# @Time : 19-6-21 下午2:45
# @Author : zj
from pynet import nn, models, vision
import pynet.models.utils as utils
from pynet.vision.data import cifar
import numpy as np
import time
data_path = '~/data/decompress_cifar_10'
epochs = 100
batch_size = 128
momentum = 0.9
learning_rate = 1e-3
reg = 1e-3
p_h = 0.5
def nin_train():
x_train, x_test, y_train, y_test = cifar.load_cifar10(data_path, shuffle=True)
# 标准化
x_train = x_train / 255.0 - 0.5
x_test = x_test / 255.0 - 0.5
net = models.nin(in_channels=3, p_h=p_h)
criterion = nn.CrossEntropyLoss()
accuracy = vision.Accuracy()
loss_list = []
train_list = []
test_list = []
best_train_accuracy = 0.995
best_test_accuracy = 0.995
range_list = np.arange(0, x_train.shape[0] - batch_size, step=batch_size)
for i in range(epochs):
total_loss = 0
num = 0
start = time.time()
for j in range_list:
data = x_train[j:j + batch_size]
labels = y_train[j:j + batch_size]
scores = net(data)
loss = criterion(scores, labels)
total_loss += loss
num += 1
grad_out = criterion.backward()
net.backward(grad_out)
net.update(lr=learning_rate, reg=reg)
end = time.time()
print('one epoch need time: %.3f' % (end - start))
print('epoch: %d loss: %f' % (i + 1, total_loss / num))
loss_list.append(total_loss / num)
if (i % 20) == 19:
# # 每隔20次降低学习率
# learning_rate *= 0.5
train_accuracy = accuracy.compute_v2(x_train, y_train, net, batch_size=batch_size)
test_accuracy = accuracy.compute_v2(x_test, y_test, net, batch_size=batch_size)
train_list.append(train_accuracy)
test_list.append(test_accuracy)
print(loss_list)
print(train_list)
print(test_list)
if train_accuracy > best_train_accuracy and test_accuracy > best_test_accuracy:
path = 'nin-epochs-%d.pkl' % (i + 1)
utils.save_params(net.get_params(), path=path)
break
draw = vision.Draw()
draw(loss_list, xlabel='迭代/20次')
draw.multi_plot((train_list, test_list), ('训练集', '测试集'),
title='精度图', xlabel='迭代/20次', ylabel='精度值', save_path='acc.png')
if __name__ == '__main__':
start = time.time()
nin_train()
end = time.time()
print('training need time: %.3f' % (end - start))
| 28.322222 | 94 | 0.588466 |
3ce5bc0fec3c2942c8d2572be6c9ed84fba7c1b9 | 769 | py | Python | tests/test_message_handler.py | lucenaproject/lucena2 | 48660adb57ed3aa030bd9376852af1ac24b1f7ca | [
"MIT"
] | null | null | null | tests/test_message_handler.py | lucenaproject/lucena2 | 48660adb57ed3aa030bd9376852af1ac24b1f7ca | [
"MIT"
] | null | null | null | tests/test_message_handler.py | lucenaproject/lucena2 | 48660adb57ed3aa030bd9376852af1ac24b1f7ca | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
from lucena.message_handler import MessageHandler
class TestMessageHandler(unittest.TestCase):
def setUp(self):
super(TestMessageHandler, self).setUp()
def test_more_properties_win(self):
mh1 = MessageHandler(
{'a': 123, 'b': 999, 'c': 'hello'},
None
)
mh2 = MessageHandler(
{'a': 123, 'b': 999},
None
)
self.assertTrue(mh1 < mh2)
def test_alphabetical_order_win(self):
mh1 = MessageHandler(
{'a': 123, 'b': 999, 'c': 'hello'},
None
)
mh2 = MessageHandler(
{'a': 123, 'b': 999, 'd': 'hello'},
None
)
self.assertTrue(mh1 < mh2)
| 23.30303 | 49 | 0.508453 |
a85c92f6cf48760173f3c378b8ed27dfe86fd158 | 12,153 | py | Python | tests/unit/__init__.py | affiliprint/python-intercom | d7d9de8deba3db1acc3a779c6f258d47620c16f4 | [
"MIT"
] | 77 | 2018-01-12T20:14:37.000Z | 2022-03-21T04:57:24.000Z | tests/unit/__init__.py | affiliprint/python-intercom | d7d9de8deba3db1acc3a779c6f258d47620c16f4 | [
"MIT"
] | 40 | 2018-01-11T01:43:37.000Z | 2022-02-16T19:24:35.000Z | tests/unit/__init__.py | affiliprint/python-intercom | d7d9de8deba3db1acc3a779c6f258d47620c16f4 | [
"MIT"
] | 60 | 2018-03-20T00:38:56.000Z | 2022-02-22T15:49:24.000Z | # -*- coding: utf-8 -*-
import json
import os
from mock import Mock
DIRPATH = os.path.dirname(__file__)
FIXTURES = os.path.join(DIRPATH, 'fixtures')
def create_response(status, fixture=None):
def request(*args, **kwargs):
response = Mock()
response.status_code = status
if fixture:
fixture_path = os.path.join(FIXTURES, fixture)
response.content = open(fixture_path).read()
return response
return request
def local_response(**params):
def _call(*args, **kwargs):
response = Mock()
reply = {}
for name, value in list(kwargs.items()):
reply[name] = value
for name, value in list(params.items()):
reply[name] = value
response.content = json.dumps(reply)
response.status_code = 200
return response
return _call
def mock_response(content, status_code=200, encoding='utf-8', headers=None):
if headers is None:
headers = {
'x-ratelimit-limit': 500,
'x-ratelimit-remaining': 500,
'x-ratelimit-reset': 1427932858
}
return Mock(
content=content, status_code=status_code, encoding=encoding, headers=headers)
def get_user(email="bob@example.com", name="Joe Schmoe"):
return {
"type": "user",
"id": "aaaaaaaaaaaaaaaaaaaaaaaa",
"user_id": 'id-from-customers-app',
"email": email,
"name": name,
"avatar": {
"type": "avatar",
"image_url": "https://graph.facebook.com/1/picture?width=24&height=24"
},
"app_id": "the-app-id",
"created_at": 1323422442,
"custom_attributes": {"a": "b", "b": 2},
"companies": {
"type": "company.list",
"companies": [
{
"type": "company",
"company_id": "123",
"id": "bbbbbbbbbbbbbbbbbbbbbbbb",
"app_id": "the-app-id",
"name": "Company 1",
"remote_created_at": 1390936440,
"created_at": 1401970114,
"updated_at": 1401970114,
"last_request_at": 1401970113,
"monthly_spend": 0,
"session_count": 0,
"user_count": 1,
"tag_ids": [],
"custom_attributes": {
"category": "Tech"
}
}
]
},
"session_count": 123,
"unsubscribed_from_emails": True,
"last_request_at": 1401970113,
"created_at": 1401970114,
"remote_created_at": 1393613864,
"updated_at": 1401970114,
"user_agent_data": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"social_profiles": {
"type": "social_profile.list",
"social_profiles": [
{
"type": "social_profile",
"name": "twitter",
"url": "http://twitter.com/abc",
"username": "abc",
"id": None
},
{
"type": "social_profile",
"name": "twitter",
"username": "abc2",
"url": "http://twitter.com/abc2",
"id": None
},
{
"type": "social_profile",
"name": "facebook",
"url": "http://facebook.com/abc",
"username": "abc",
"id": "1234242"
},
{
"type": "social_profile",
"name": "quora",
"url": "http://facebook.com/abc",
"username": "abc",
"id": "1234242"
}
]
},
"location_data": {
"type": "location_data",
"city_name": 'Dublin',
"continent_code": 'EU',
"country_name": 'Ireland',
"latitude": '90',
"longitude": '10',
"postal_code": 'IE',
"region_name": 'Europe',
"timezone": '+1000',
"country_code": "IRL"
}
}
def get_company(name):
return {
"type": "company",
"id": "531ee472cce572a6ec000006",
"name": name,
"plan": {
"type": "plan",
"id": "1",
"name": "Paid"
},
"company_id": "6",
"remote_created_at": 1394531169,
"created_at": 1394533506,
"updated_at": 1396874658,
"monthly_spend": 49,
"session_count": 26,
"user_count": 10,
"custom_attributes": {
"paid_subscriber": True,
"team_mates": 0
}
}
def get_event(name="the-event-name"):
return {
"type": "event",
"event_name": name,
"created_at": 1389913941,
"user_id": "314159",
"metadata": {
"type": "user",
"invitee_email": "pi@example.org",
"invite_code": "ADDAFRIEND"
}
}
def page_of_users(include_next_link=False):
page = {
"type": "user.list",
"pages": {
"type": "pages",
"page": 1,
"next": None,
"per_page": 50,
"total_pages": 7
},
"users": [
get_user("user1@example.com"),
get_user("user2@example.com"),
get_user("user3@example.com")],
"total_count": 314
}
if include_next_link:
page["pages"]["next"] = "https://api.intercom.io/users?per_page=50&page=2"
return page
def users_scroll(include_users=False): # noqa
# a "page" of results from the Scroll API
if include_users:
users = [
get_user("user1@example.com"),
get_user("user2@example.com"),
get_user("user3@example.com")
]
else:
users = []
return {
"type": "user.list",
"scroll_param": "da6bbbac-25f6-4f07-866b-b911082d7",
"users": users
}
def page_of_events(include_next_link=False):
page = {
"type": "event.list",
"pages": {
"next": None,
},
"events": [
get_event("invited-friend"),
get_event("bought-sub")],
}
if include_next_link:
page["pages"]["next"] = "https://api.intercom.io/events?type=user&intercom_user_id=55a3b&before=144474756550" # noqa
return page
def page_of_companies(include_next_link=False):
page = {
"type": "company.list",
"pages": {
"type": "pages",
"page": 1,
"next": None,
"per_page": 50,
"total_pages": 7
},
"companies": [
get_company('ACME A'),
get_company('ACME B'),
get_company('ACME C')
],
"total_count": 3
}
if include_next_link:
page["pages"]["next"] = "https://api.intercom.io/companies?per_page=50&page=2"
return page
test_tag = {
"id": "4f73428b5e4dfc000b000112",
"name": "Test Tag",
"segment": False,
"tagged_user_count": 2
}
test_subscription = {
"type": "notification_subscription",
"id": "nsub_123456789",
"created_at": 1410368642,
"updated_at": 1410368642,
"service_type": "web",
"app_id": "3qmk5gyg",
"url": "http://example.com",
"self": "https://api.intercom.io/subscriptions/nsub_123456789",
"topics": ["user.created", "conversation.user.replied", "conversation.admin.replied"],
"active": True,
"metadata": {},
"hub_secret": None,
"mode": "point",
"links": {
"sent": "https://api.intercom.io/subscriptions/nsub_123456789/sent",
"retry": "https://api.intercom.io/subscriptions/nsub_123456789/retry",
"errors": "https://api.intercom.io/subscriptions/nsub_123456789/errors"
},
"notes": []
}
test_user_notification = {
"type": "notification_event",
"id": "notif_123456-56465-546546",
"topic": "user.created",
"app_id": "aaaaaa",
"data": {
"type": "notification_event_data",
"item": {
"type": "user",
"id": "aaaaaaaaaaaaaaaaaaaaaaaa",
"user_id": None,
"email": "joe@example.com",
"name": "Joe Schmoe",
"avatar": {
"type": "avatar",
"image_url": None
},
"app_id": "aaaaa",
"companies": {
"type": "company.list",
"companies": []
},
"location_data": {
},
"last_request_at": None,
"created_at": "1401970114",
"remote_created_at": None,
"updated_at": "1401970114",
"session_count": 0,
"social_profiles": {
"type": "social_profile.list",
"social_profiles": []
},
"unsubscribed_from_emails": False,
"user_agent_data": None,
"tags": {
"type": "tag.list",
"tags": []
},
"segments": {
"type": "segment.list",
"segments": []
},
"custom_attributes": {
}
}
},
"delivery_status": None,
"delivery_attempts": 1,
"delivered_at": 0,
"first_sent_at": 1410188629,
"created_at": 1410188628,
"links": {},
"self": None
}
test_conversation_notification = {
"type": "notification_event",
"id": "notif_123456-56465-546546",
"topic": "conversation.user.created",
"app_id": "aaaaa",
"data": {
"type": "notification_event_data",
"item": {
"type": "conversation",
"id": "123456789",
"created_at": "1410335293",
"updated_at": "1410335293",
"user": {
"type": "user",
"id": "540f1de7112d3d1d51001637",
"name": "Kill Bill",
"email": "bill@bill.bill"
},
"assignee": {
"type": "nobody_admin",
"id": None
},
"conversation_message": {
"type": "conversation_message",
"id": "321546",
"subject": "",
"body": "<p>An important message</p>",
"author": {
"type": "user",
"id": "aaaaaaaaaaaaaaaaaaaaaa",
"name": "Kill Bill",
"email": "bill@bill.bill"
},
"attachments": []
},
"conversation_parts": {
"type": "conversation_part.list",
"conversation_parts": [
{
"type": "conversation_part",
"id": "4412",
"part_type": "comment",
"body": "<p>Hi Jane, it's all great thanks!</p>",
"created_at": 1400857494,
"updated_at": 1400857494,
"notified_at": 1400857587,
"assigned_to": None,
"author": {
"type": "user",
"id": "536e564f316c83104c000020"
},
"attachments": []
}
]
},
"open": None,
"read": True,
"links": {
"conversation_web": "https://app.intercom.io/a/apps/aaaaaa/inbox/all/conversations/123456789"
}
}
},
"delivery_status": None,
"delivery_attempts": 1,
"delivered_at": 0,
"first_sent_at": 1410335293,
"created_at": 1410335293,
"links": {},
"self": "http://example.com/resource/url/"
}
| 29.713936 | 148 | 0.45931 |
5f6f284dc1e541bfb75c4803ab7d153e1f5b3927 | 15,672 | py | Python | project.py | Kajitaku/ud_item_catalog | 9c07d6bee3d2ccb9fb5d2575b7e15dd31f4b67ec | [
"MIT"
] | null | null | null | project.py | Kajitaku/ud_item_catalog | 9c07d6bee3d2ccb9fb5d2575b7e15dd31f4b67ec | [
"MIT"
] | null | null | null | project.py | Kajitaku/ud_item_catalog | 9c07d6bee3d2ccb9fb5d2575b7e15dd31f4b67ec | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request, redirect
from flask import make_response, jsonify, url_for, flash, session
from flask import session as login_session
from sqlalchemy import create_engine, asc
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Catalog, Item, User
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import httplib2
import json
import requests
import random
import string
from datetime import datetime
# Load google client secret for OAUHT
CLIENT_ID = json.loads(
open('client_secrets.json', 'r').read())['web']['client_id']
app = Flask(__name__)
# Connect to Database and create database session
engine = create_engine('sqlite:///catalogapp.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# show login form
@app.route('/login')
def showLogin():
state = ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in xrange(32))
# Set anti-forgery state token
login_session['state'] = state
return render_template('login.html', STATE=state)
# Connect google acount with OAuth
@app.route('/gconnect', methods=['POST'])
def gconnect():
# Validate state token
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
print "Token's client ID does not match app's."
response.headers['Content-Type'] = 'application/json'
return response
stored_access_token = login_session.get('access_token')
stored_gplus_id = login_session.get('gplus_id')
if stored_access_token is not None and gplus_id == stored_gplus_id:
response = make_response(
json.dumps('Current user is already connected.'),
200)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
# ADD PROVIDER TO LOGIN SESSION
login_session['provider'] = 'google'
# see if user exists, if it doesn't make a new one
user_id = getUserID(data["email"])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;border-radius: 150px;'
output += '-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
flash("you are now logged in as %s" % login_session['username'])
print "done!"
return output
# Disconnect google acount with OAuth
@app.route('/gdisconnect')
def gdisconnect():
# Only disconnect a connected user.
access_token = login_session.get('access_token')
if access_token is None:
response = make_response(
json.dumps('Current user not connected.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token
h = httplib2.Http()
result = h.request(url, 'GET')[0]
if result['status'] == '200':
response = make_response(json.dumps('Successfully disconnected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
else:
response = make_response(json.dumps(
'Failed to revoke token for given user.', 400))
response.headers['Content-Type'] = 'application/json'
return response
def createUser(login_session):
newUser = User(name=login_session['username'], email=login_session[
'email'], picture=login_session['picture'])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(email=login_session['email']).one()
return user.id
def getUserInfo(user_id):
user = session.query(User).filter_by(id=user_id).one()
return user
def getUserID(email):
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except:
return None
# Disconnect based on provider
@app.route('/disconnect')
def disconnect():
if 'provider' in login_session:
if login_session['provider'] == 'google':
gdisconnect()
del login_session['gplus_id']
del login_session['access_token']
if login_session['provider'] == 'facebook':
fbdisconnect()
del login_session['facebook_id']
del login_session['username']
del login_session['email']
del login_session['picture']
del login_session['user_id']
del login_session['provider']
flash("You have successfully been logged out.")
return redirect(url_for('showCatalogs'))
else:
flash("You were not logged in")
return redirect(url_for('showCatalogs'))
# Show all catalogs
@app.route('/')
@app.route('/catalog/')
def showCatalogs():
catalogs = session.query(Catalog).order_by(asc(Catalog.name))
items = session.query(Item).order_by(asc(Item.created_at)).limit(10)
return render_template(
'catalog/catalogs.html',
catalogs=catalogs,
items=items)
# Show all catalogs json with related items
@app.route('/catalog/json')
def showCatalogsJson():
catalogs = session.query(Catalog).order_by(asc(Catalog.name)).all()
return jsonify(catalogs=[c.serialize for c in catalogs])
# Create a new catalog
@app.route('/catalog/new/', methods=['GET', 'POST'])
def newcatalog():
# check logined or not
if 'username' not in login_session:
return redirect('/login')
if request.method == 'POST':
newcatalog = catalog(
name=request.form['name'], user_id=login_session['user_id'])
session.add(newcatalog)
session.commit()
flash('New catalog %s Successfully Created' % newcatalog.name)
return redirect(url_for('showCatalogs'))
else:
catalogs = session.query(Catalog).order_by(asc(Catalog.name))
return render_template('catalog/newCatalog.html', catalogs=catalogs)
# Edit a catalog
@app.route('/catalog/<int:catalog_id>/edit/', methods=['GET', 'POST'])
def editcatalog(catalog_id):
# check logined or not
if 'username' not in login_session:
return redirect('/login')
editedCatalog = session.query(Catalog).filter_by(id=catalog_id).one()
if request.method == 'POST':
# check a current user is authorized or not
if editedCatalog.user_id is not login_session['user_id']:
flash('Authorization is failed')
return redirect(url_for('newcatalog'))
editedCatalog.name = request.form['name']
session.add(editedCatalog)
session.commit()
flash('catalog Successfully Edited')
return redirect(url_for('showCatalogs'))
else:
catalogs = session.query(Catalog).order_by(asc(Catalog.name))
return render_template(
'catalog/editCatalog.html',
catalog=editedCatalog,
catalogs=catalogs)
# Delete a catalog
@app.route('/catalog/<int:catalog_id>/delete/', methods=['GET', 'POST'])
def deleteCatalog(catalog_id):
# check logined or not
if 'username' not in login_session:
return redirect('/login')
deletedCatalog = session.query(Catalog).filter_by(id=catalog_id).one()
if request.method == 'POST':
# check a current user is authorized or not
if deletedCatalog.user_id is not login_session['user_id']:
flash('Authorization is failed')
return redirect(url_for('newcatalog'))
session.delete(deletedCatalog)
session.commit()
flash('catalog Successfully Deleted')
return redirect(url_for('showCatalogs'))
else:
catalogs = session.query(Catalog).order_by(asc(Catalog.name))
return render_template(
'catalog/deleteCatalog.html',
catalog=deletedCatalog,
catalogs=catalogs)
# Show catalog's all items
@app.route('/catalog/<int:catalog_id>/item/')
def showItems(catalog_id):
catalog = session.query(Catalog).filter_by(id=catalog_id).one()
items = session.query(Item).filter_by(catalog_id=catalog.id)
catalogs = session.query(Catalog).order_by(asc(Catalog.name))
return render_template(
'item/items.html',
items=items.all(),
catalog=catalog,
catalogs=catalogs,
catalog_item_count=items.count())
# Show items json
@app.route('/catalog/<int:catalog_id>/item/json')
def showItemsJson(catalog_id):
items = session.query(Item).filter_by(
catalog_id=catalog_id).all()
return jsonify(items=[i.serialize for i in items])
# Create a new item
@app.route('/catalog/<int:catalog_id>/item/new/', methods=['GET', 'POST'])
def newItem(catalog_id):
# check logined or not
if 'username' not in login_session:
return redirect('/login')
if request.method == 'POST':
now = datetime.now()
newItem = Item(
title=request.form['title'],
description=request.form['description'],
catalog_id=request.form['catalog_id'],
user_id=login_session['user_id'],
created_at=now,
updated_at=now
)
session.add(newItem)
flash('New Item %s Successfully Created' % newItem.title)
session.commit()
return redirect(url_for('showItems', catalog_id=catalog_id))
else:
catalogs = session.query(Catalog).order_by(asc(Catalog.name))
return render_template(
'item/newItem.html',
catalogs=catalogs,
catalog_id=catalog_id)
# show a item
@app.route(
'/catalog/<int:catalog_id>/item/<int:item_id>/',
methods=['GET', 'POST'])
def showItem(catalog_id, item_id):
item = session.query(Item).filter_by(id=item_id).one()
catalogs = session.query(Catalog).order_by(asc(Catalog.name))
catalog = session.query(Catalog).filter_by(id=catalog_id).one()
return render_template(
'item/item.html',
item=item,
catalog=catalog,
catalogs=catalogs)
# Show item's json
@app.route(
'/catalog/<int:catalog_id>/item/<int:item_id>/json',
methods=['GET', 'POST'])
def showItemJson(catalog_id, item_id):
item = session.query(Item).filter_by(
id=item_id).one()
return jsonify(item=item.serialize)
# Edit a item
@app.route(
'/catalog/<int:catalog_id>/item/<int:item_id>/edit/',
methods=['GET', 'POST'])
def editItem(catalog_id, item_id):
# check logined or not
if 'username' not in login_session:
return redirect('/login')
editedItem = session.query(Item).filter_by(id=item_id).one()
if request.method == 'POST':
# check a current user is authorized or not
if editedItem.user_id is not login_session['user_id']:
flash('Authorization is failed')
return redirect(url_for('newItem', catalog_id=catalog_id))
editedItem.title = request.form['title']
editedItem.description = request.form['description']
editedItem.updated_at = datetime.now()
session.add(editedItem)
session.commit()
flash('Item Successfully Edited')
return redirect(
url_for(
'showItem',
catalog_id=editedItem.catalog_id,
item_id=editedItem.id)
)
else:
catalog = session.query(Catalog).filter_by(id=catalog_id).one()
catalogs = session.query(Catalog).order_by(asc(Catalog.name))
return render_template(
'item/editItem.html',
item=editedItem,
catalog=catalog,
catalogs=catalogs)
# Delete a item
@app.route(
'/catalog/<int:catalog_id>/item/<int:item_id>/delete/',
methods=['GET', 'POST'])
def deleteItem(catalog_id, item_id):
# check logined or not
if 'username' not in login_session:
return redirect('/login')
deletedItem = session.query(Item).filter_by(id=item_id).one()
if request.method == 'POST':
# check a current user is authorized or not
if deletedItem.user_id is not login_session['user_id']:
flash('Authorization is failed')
return redirect(url_for('newItem', catalog_id=catalog_id))
session.delete(deletedItem)
session.commit()
flash('Item Successfully Deleted')
return redirect(
url_for('showItems', catalog_id=deletedItem.catalog_id))
else:
catalog = session.query(Catalog).filter_by(id=catalog_id).one()
catalogs = session.query(Catalog).order_by(asc(Catalog.name))
return render_template(
'item/deleteItem.html',
item=deletedItem,
catalog=catalog,
catalogs=catalogs)
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host='0.0.0.0', port=5000)
| 36.110599 | 80 | 0.637379 |
2fc8f14ffe3746d00919dcfa911f5a28c8959c2f | 2,511 | py | Python | spider/author_page_thread.py | xingkong906/CitationRecommend | 14009e44b464de855480b081366a72ebae63c76a | [
"Apache-2.0"
] | 3 | 2021-02-27T03:04:27.000Z | 2021-07-11T13:44:02.000Z | spider/author_page_thread.py | xingkong906/CitationRecommend | 14009e44b464de855480b081366a72ebae63c76a | [
"Apache-2.0"
] | 1 | 2021-07-12T00:47:57.000Z | 2021-09-14T13:40:12.000Z | spider/author_page_thread.py | xingkong906/CitationRecommend | 14009e44b464de855480b081366a72ebae63c76a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# !/usr/bin/python3
"""
-------------------------------------------------
File Name: paper_page_thread.py
Author : Carl
Author_email: xingkong906@outlook.com
date: paper_page_thread.py
Description : 对author_page的多线程
-------------------------------------------------
# If this run wrong, don't ask me , I don't know why;
# If this run right, thank god, and I don't know why.
# Maybe the answer, my friend, is blowing in the wind.
-------------------------------------------------
"""
import threading
from time import time
from spider.author_page import AuthorPage
from util.Log import get_logger
from logging import Logger
class AuthorPageThread(object):
def __init__(self, things, thread_num=6):
"""
:param things: 作者页的链接list
:param thread_num:
"""
self.thread_num = thread_num
self.things = things
self.loggers = [get_logger(__name__ + str(x), __name__ + str(x)) for x in range(1, thread_num + 1)]
def get_range(self):
# 完成范围的均分
ranges = []
length = len(self.things)
n = self.thread_num
t = 0
for i in range(0, n):
offset = int(length / n)
obj = None
if i == (n - 1):
obj = self.things[t:]
else:
obj = self.things[t:t + offset]
ranges.append(obj)
t += offset
return ranges
@staticmethod
def do_something(things, logger: Logger):
for i in things:
logger.info("开始处理author:" + i)
try:
author_page = AuthorPage(i)
author_page.run()
except Exception as e:
logger.error("发生错误:%s\t%s" % (i, e))
logger.info("完成处理author:" + i)
def start(self):
thread_list = []
n = 1
for ran in self.get_range():
thread = threading.Thread(target=self.do_something, args=(ran, self.loggers[n - 1]))
n += 1
thread.start()
thread_list.append(thread)
for i in thread_list:
i.join()
print("进程执行完成")
if __name__ == '__main__':
# for t in threads:
# t.setDaemon(True)
# t.start()
#
# print("all over %s" % ctime())
s = time()
down = AuthorPageThread(range(3), 10)
print(down.get_range())
down.start()
e = time()
print("The time spent on this program is %f s" % (e - s))
| 27.593407 | 107 | 0.510155 |
648c38dd33234bf03b6961d692af889db8860619 | 2,285 | py | Python | docs/conf.py | brokad/synthpy | 1dbef795a90af1c8902aeb81be7a5572fe2d8f0b | [
"Apache-2.0"
] | 5 | 2020-12-11T08:58:16.000Z | 2021-04-15T15:16:27.000Z | docs/conf.py | brokad/synthpy | 1dbef795a90af1c8902aeb81be7a5572fe2d8f0b | [
"Apache-2.0"
] | null | null | null | docs/conf.py | brokad/synthpy | 1dbef795a90af1c8902aeb81be7a5572fe2d8f0b | [
"Apache-2.0"
] | 2 | 2021-02-08T16:26:14.000Z | 2021-02-10T12:22:09.000Z | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import synthpy
# -- Project information -----------------------------------------------------
project = 'Synth'
copyright = '2020 - OpenQuery'
author = 'GetSynth'
# The full version, including alpha/beta/rc tags
release = synthpy.__version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pydata_sphinx_theme'
html_theme_options = {
"show_prev_next": True,
}
html_title = "Synth - NoSQL Synthetic Data Engine"
html_logo = 'images/getsynth_identicon.png'
html_favicon = 'images/getsynth_favicon.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 31.30137 | 79 | 0.677462 |
64d04505c643441cb53b75ee587aca7db5cf3833 | 9,607 | py | Python | datacube_ows/ogc.py | opendatacube/datacube-wms | 573bd69370e95ce95006f0b77cdcd75f49c3552a | [
"Apache-2.0"
] | 4 | 2017-11-02T04:22:30.000Z | 2018-05-01T14:16:23.000Z | datacube_ows/ogc.py | opendatacube/datacube-wms | 573bd69370e95ce95006f0b77cdcd75f49c3552a | [
"Apache-2.0"
] | 33 | 2018-05-23T01:32:06.000Z | 2018-11-05T01:07:09.000Z | datacube_ows/ogc.py | opendatacube/datacube-wms | 573bd69370e95ce95006f0b77cdcd75f49c3552a | [
"Apache-2.0"
] | 7 | 2017-10-09T00:09:44.000Z | 2018-07-27T00:41:19.000Z | # This file is part of datacube-ows, part of the Open Data Cube project.
# See https://opendatacube.org for more information.
#
# Copyright (c) 2017-2021 OWS Contributors
# SPDX-License-Identifier: Apache-2.0
import sys
import traceback
from time import monotonic
from flask import g, render_template, request
from flask_log_request_id import current_request_id
from datacube_ows import __version__
from datacube_ows.cube_pool import cube
from datacube_ows.legend_generator import create_legend_for_style
from datacube_ows.ogc_exceptions import OGCException, WMSException
from datacube_ows.ogc_utils import (capture_headers, get_service_base_url,
lower_get_args, resp_headers)
from datacube_ows.ows_configuration import get_config
from datacube_ows.protocol_versions import supported_versions
from datacube_ows.startup_utils import * # pylint: disable=wildcard-import,unused-wildcard-import
from datacube_ows.wcs1 import WCS_REQUESTS
from datacube_ows.wms import WMS_REQUESTS
# Logging intialisation
_LOG = initialise_logger()
initialise_ignorable_warnings()
# Initialisation of external libraries - controlled by environment variables.
initialise_debugging(_LOG)
initialise_sentry(_LOG)
initialise_aws_credentials(_LOG)
# Prepare parsed configuration object
cfg = parse_config_file()
# Initialise Flask
app = initialise_flask(__name__)
babel = initialise_babel(cfg, app)
# Initialisation of external libraries that depend on Flask
# (controlled by environment variables)
metrics = initialise_prometheus(app, _LOG)
# Protocol/Version lookup table
OWS_SUPPORTED = supported_versions()
# Prometheus Metrics
prometheus_ows_ogc_metric = metrics.histogram(
"ows_ogc",
"Summary by OGC request protocol, version, operation, layer, and HTTP Status",
labels={
'query_request': lambda: request.args.get('request', "NONE").upper(),
'query_service': lambda: request.args.get('service', "NONE").upper(),
'query_version': lambda: request.args.get('version'),
'query_layer': lambda: (request.args.get('query_layers') # WMS GetFeatureInfo
or request.args.get('layers') # WMS
or request.args.get('layer') # WMTS
or request.args.get('coverage') # WCS 1.x
or request.args.get('coverageid') # WCS 2.x
),
'status': lambda r: r.status_code,
}
)
# Flask Routes
@app.route('/')
@prometheus_ows_ogc_metric
def ogc_impl():
#pylint: disable=too-many-branches
nocase_args = lower_get_args()
nocase_args = capture_headers(request, nocase_args)
service = nocase_args.get("service", "").upper()
if service:
return ogc_svc_impl(service.lower())
# create dummy env if not exists
try:
# service argument is only required (in fact only defined) by OGC for
# GetCapabilities requests. As long as we are persisting with a single
# routing end point for all services, we must derive the service from the request
# parameter.
# This is a quick hack to fix #64. Service and operation routing could be
# handled more elegantly.
op = nocase_args.get("request", "").upper()
if op in WMS_REQUESTS:
return ogc_svc_impl("wms")
elif op in WCS_REQUESTS:
return ogc_svc_impl("wcs")
elif op:
# Should we return a WMS or WCS exception if there is no service specified?
# Defaulting to WMS because that's what we already have.
raise WMSException("Invalid service and/or request", locator="Service and request parameters")
else:
cfg = get_config() # pylint: disable=redefined-outer-name
url = nocase_args.get('Host', nocase_args['url_root'])
base_url = get_service_base_url(cfg.allowed_urls, url)
return (render_template(
"index.html",
cfg=cfg,
supported=OWS_SUPPORTED,
base_url=base_url,
version=__version__,
),
200,
resp_headers({"Content-Type": "text/html"}))
except OGCException as e:
_LOG.error("Handled Error: %s", repr(e.errors))
return e.exception_response()
except Exception as e: # pylint: disable=broad-except
tb = sys.exc_info()[2]
ogc_e = WMSException("Unexpected server error: %s" % str(e), http_response=500)
return ogc_e.exception_response(traceback=traceback.extract_tb(tb))
def ogc_svc_impl(svc):
svc_support = OWS_SUPPORTED.get(svc)
nocase_args = lower_get_args()
nocase_args = capture_headers(request, nocase_args)
service = nocase_args.get("service", svc).upper()
# Is service activated in config?
try:
if not svc_support:
raise WMSException(f"Invalid service: {svc}",
valid_keys=[
service.service
for service in OWS_SUPPORTED.values()
if service.activated()
],
code=WMSException.OPERATION_NOT_SUPPORTED,
locator="service parameter")
if not svc_support.activated():
raise svc_support.default_exception_class("Invalid service and/or request", locator="Service and request parameters")
# Does service match path (if supplied)
if service != svc_support.service_upper:
raise svc_support.default_exception_class("Invalid service", locator="Service parameter")
version = nocase_args.get("version")
version_support = svc_support.negotiated_version(version)
except OGCException as e:
return e.exception_response()
try:
return version_support.router(nocase_args)
except OGCException as e:
return e.exception_response()
except Exception as e: #pylint: disable=broad-except
tb = sys.exc_info()[2]
ogc_e = version_support.exception_class("Unexpected server error: %s" % str(e), http_response=500)
return ogc_e.exception_response(traceback=traceback.extract_tb(tb))
@app.route('/wms')
@prometheus_ows_ogc_metric
def ogc_wms_impl():
return ogc_svc_impl("wms")
@app.route('/wmts')
@prometheus_ows_ogc_metric
def ogc_wmts_impl():
return ogc_svc_impl("wmts")
@app.route('/wcs')
@prometheus_ows_ogc_metric
def ogc_wcs_impl():
return ogc_svc_impl("wcs")
@app.route('/ping')
@metrics.summary('ows_heartbeat_pings', "Ping durations", labels={"status": lambda r: r.status})
def ping():
db_ok = False
with cube() as dc:
if dc:
# pylint: disable=protected-access
with dc.index._db.give_me_a_connection() as conn:
results = conn.execute("""
SELECT *
FROM wms.product_ranges
LIMIT 1"""
)
for r in results:
db_ok = True
if db_ok:
return (render_template("ping.html", status="Up"), 200, resp_headers({"Content-Type": "text/html"}))
else:
return (render_template("ping.html", status="Down"), 500, resp_headers({"Content-Type": "text/html"}))
@app.route("/legend/<string:layer>/<string:style>/legend.png")
@metrics.histogram('ows_legends', "Legend query durations", labels={
"layer": lambda: request.path.split("/")[2],
"style": lambda: request.path.split("/")[3],
"status": lambda r: r.status,
})
def legend(layer, style, dates=None):
# pylint: disable=redefined-outer-name
cfg = get_config()
product = cfg.product_index.get(layer)
if not product:
return ("Unknown Layer", 404, resp_headers({"Content-Type": "text/plain"}))
if dates is None:
args = lower_get_args()
ndates = int(args.get("ndates", 0))
else:
ndates = len(dates)
try:
img = create_legend_for_style(product, style, ndates)
except WMSException as e:
return (str(e), e.http_response, resp_headers({"Content-Type": "text/plain"}))
if not img:
return ("Unknown Style", 404, resp_headers({"Content-Type": "text/plain"}))
return img
# Flask middleware
@app.after_request
def append_request_id(response):
response.headers.add("X-REQUEST-ID", current_request_id())
return response
@app.before_request
def start_timer():
# pylint: disable=assigning-non-slot
g.ogc_start_time = monotonic()
@app.after_request
def log_time_and_request_response(response):
time_taken = int((monotonic() - g.ogc_start_time) * 1000)
# request.environ.get('HTTP_X_REAL_IP') captures requester ip on a local docker container via gunicorn
if request.environ.get('HTTP_X_REAL_IP'):
ip = request.environ.get('HTTP_X_REAL_IP')
# request.environ.get('HTTP_X_FORWARDED_FOR') captures request IP forwarded by ingress/loadbalancer
elif request.environ.get('HTTP_X_FORWARDED_FOR'):
ip = request.environ.get('HTTP_X_FORWARDED_FOR')
# request.environ.get('REMOTE_ADDR') is standard internal IP address
elif request.environ.get('REMOTE_ADDR'):
ip = request.environ.get('REMOTE_ADDR')
else:
ip = 'Not found'
_LOG.info("ip: %s request: %s returned status: %d and took: %d ms", ip, request.url, response.status_code, time_taken)
return response
| 37.381323 | 129 | 0.649943 |
5e3bfea07b0b9494ba94a2e754bc86a459e55cab | 23,015 | py | Python | contrib/devtools/copyright_header.py | wbdeedcoin/wagerr | d746af73ac26cae1ff8336abf9373238b7c352a2 | [
"MIT"
] | 4 | 2018-06-16T20:08:19.000Z | 2018-08-22T15:44:58.000Z | contrib/devtools/copyright_header.py | wbdeedcoin/wagerr | d746af73ac26cae1ff8336abf9373238b7c352a2 | [
"MIT"
] | null | null | null | contrib/devtools/copyright_header.py | wbdeedcoin/wagerr | d746af73ac26cae1ff8336abf9373238b7c352a2 | [
"MIT"
] | 7 | 2018-06-06T18:51:07.000Z | 2018-09-08T15:17:04.000Z | #!/usr/bin/env python3
# Copyright (c) 2016-2018 The Bitcoin Core developers
# Copyright (c) 2018 The Wagerr Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import re
import fnmatch
import sys
import subprocess
import datetime
import os
################################################################################
# file filtering
################################################################################
EXCLUDE = [
# libsecp256k1:
'src/secp256k1/include/secp256k1.h',
'src/secp256k1/include/secp256k1_ecdh.h',
'src/secp256k1/include/secp256k1_recovery.h',
'src/secp256k1/include/secp256k1_schnorr.h',
'src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.c',
'src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.h',
'src/secp256k1/src/java/org_bitcoin_Secp256k1Context.c',
'src/secp256k1/src/java/org_bitcoin_Secp256k1Context.h',
# univalue:
'src/univalue/test/object.cpp',
'src/univalue/lib/univalue_escapes.h',
# auto generated:
'src/qt/bitcoinstrings.cpp',
'src/chainparamsseeds.h',
# other external copyrights:
'src/tinyformat.h',
'src/leveldb/util/env_win.cc',
'src/crypto/ctaes/bench.c',
'test/functional/test_framework/bignum.py',
# python init:
'*__init__.py',
]
EXCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in EXCLUDE]))
INCLUDE = ['*.h', '*.cpp', '*.cc', '*.c', '*.py']
INCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in INCLUDE]))
def applies_to_file(filename):
return ((EXCLUDE_COMPILED.match(filename) is None) and
(INCLUDE_COMPILED.match(filename) is not None))
################################################################################
# obtain list of files in repo according to INCLUDE and EXCLUDE
################################################################################
GIT_LS_CMD = 'git ls-files'
def call_git_ls():
out = subprocess.check_output(GIT_LS_CMD.split(' '))
return [f for f in out.decode("utf-8").split('\n') if f != '']
def get_filenames_to_examine():
filenames = call_git_ls()
return sorted([filename for filename in filenames if
applies_to_file(filename)])
################################################################################
# define and compile regexes for the patterns we are looking for
################################################################################
COPYRIGHT_WITH_C = 'Copyright \(c\)'
COPYRIGHT_WITHOUT_C = 'Copyright'
ANY_COPYRIGHT_STYLE = '(%s|%s)' % (COPYRIGHT_WITH_C, COPYRIGHT_WITHOUT_C)
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
YEAR_LIST = '(%s)(, %s)+' % (YEAR, YEAR)
ANY_YEAR_STYLE = '(%s|%s)' % (YEAR_RANGE, YEAR_LIST)
ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE = ("%s %s" % (ANY_COPYRIGHT_STYLE,
ANY_YEAR_STYLE))
ANY_COPYRIGHT_COMPILED = re.compile(ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE)
def compile_copyright_regex(copyright_style, year_style, name):
return re.compile('%s %s %s' % (copyright_style, year_style, name))
EXPECTED_HOLDER_NAMES = [
"Satoshi Nakamoto\n",
"The Bitcoin Core developers\n",
"The Bitcoin Core developers \n",
"Bitcoin Core Developers\n",
"the Bitcoin Core developers\n",
"The Bitcoin developers\n",
"Wagerr Dev\n",
"Wagerr Tor\n",
"The Wagerr Core developers\n",
"The Wagerr Core developers \n",
"Wagerr Core Developers\n",
"the Wagerr Core developers\n",
"The Wagerr developers\n",
"The PIVX Core developers\n",
"The PIVX Core developers \n",
"PIVX Core Developers\n",
"the PIVX Core developers\n",
"The PIVX developers\n",
"The Pivx Core developers\n",
"The Pivx Core developers \n",
"Pivx Core Developers\n",
"the Pivx Core developers\n",
"The Pivx developers\n",
"The LevelDB Authors\. All rights reserved\.\n",
"BitPay Inc\.\n",
"BitPay, Inc\.\n",
"University of Illinois at Urbana-Champaign\.\n",
"MarcoFalke\n",
"Pieter Wuille\n",
"Pieter Wuille +\*\n",
"Pieter Wuille, Gregory Maxwell +\*\n",
"Pieter Wuille, Andrew Poelstra +\*\n",
"Andrew Poelstra +\*\n",
"Wladimir J. van der Laan\n",
"Jeff Garzik\n",
"Diederik Huys, Pieter Wuille +\*\n",
"Thomas Daede, Cory Fields +\*\n",
"Jan-Klaas Kollhof\n",
"Sam Rushing\n",
"ArtForz -- public domain half-a-node\n",
]
DOMINANT_STYLE_COMPILED = {}
YEAR_LIST_STYLE_COMPILED = {}
WITHOUT_C_STYLE_COMPILED = {}
for holder_name in EXPECTED_HOLDER_NAMES:
DOMINANT_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_RANGE, holder_name))
YEAR_LIST_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_LIST, holder_name))
WITHOUT_C_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITHOUT_C, ANY_YEAR_STYLE,
holder_name))
################################################################################
# search file contents for copyright message of particular category
################################################################################
def get_count_of_copyrights_of_any_style_any_holder(contents):
return len(ANY_COPYRIGHT_COMPILED.findall(contents))
def file_has_dominant_style_copyright_for_holder(contents, holder_name):
match = DOMINANT_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_year_list_style_copyright_for_holder(contents, holder_name):
match = YEAR_LIST_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_without_c_style_copyright_for_holder(contents, holder_name):
match = WITHOUT_C_STYLE_COMPILED[holder_name].search(contents)
return match is not None
################################################################################
# get file info
################################################################################
def read_file(filename):
return open(os.path.abspath(filename), 'r').read()
def gather_file_info(filename):
info = {}
info['filename'] = filename
c = read_file(filename)
info['contents'] = c
info['all_copyrights'] = get_count_of_copyrights_of_any_style_any_holder(c)
info['classified_copyrights'] = 0
info['dominant_style'] = {}
info['year_list_style'] = {}
info['without_c_style'] = {}
for holder_name in EXPECTED_HOLDER_NAMES:
has_dominant_style = (
file_has_dominant_style_copyright_for_holder(c, holder_name))
has_year_list_style = (
file_has_year_list_style_copyright_for_holder(c, holder_name))
has_without_c_style = (
file_has_without_c_style_copyright_for_holder(c, holder_name))
info['dominant_style'][holder_name] = has_dominant_style
info['year_list_style'][holder_name] = has_year_list_style
info['without_c_style'][holder_name] = has_without_c_style
if has_dominant_style or has_year_list_style or has_without_c_style:
info['classified_copyrights'] = info['classified_copyrights'] + 1
return info
################################################################################
# report execution
################################################################################
SEPARATOR = '-'.join(['' for _ in range(80)])
def print_filenames(filenames, verbose):
if not verbose:
return
for filename in filenames:
print("\t%s" % filename)
def print_report(file_infos, verbose):
print(SEPARATOR)
examined = [i['filename'] for i in file_infos]
print("%d files examined according to INCLUDE and EXCLUDE fnmatch rules" %
len(examined))
print_filenames(examined, verbose)
print(SEPARATOR)
print('')
zero_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 0]
print("%4d with zero copyrights" % len(zero_copyrights))
print_filenames(zero_copyrights, verbose)
one_copyright = [i['filename'] for i in file_infos if
i['all_copyrights'] == 1]
print("%4d with one copyright" % len(one_copyright))
print_filenames(one_copyright, verbose)
two_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 2]
print("%4d with two copyrights" % len(two_copyrights))
print_filenames(two_copyrights, verbose)
three_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 3]
print("%4d with three copyrights" % len(three_copyrights))
print_filenames(three_copyrights, verbose)
four_or_more_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] >= 4]
print("%4d with four or more copyrights" % len(four_or_more_copyrights))
print_filenames(four_or_more_copyrights, verbose)
print('')
print(SEPARATOR)
print('Copyrights with dominant style:\ne.g. "Copyright (c)" and '
'"<year>" or "<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
dominant_style = [i['filename'] for i in file_infos if
i['dominant_style'][holder_name]]
if len(dominant_style) > 0:
print("%4d with '%s'" % (len(dominant_style),
holder_name.replace('\n', '\\n')))
print_filenames(dominant_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with year list style:\ne.g. "Copyright (c)" and '
'"<year1>, <year2>, ...":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
year_list_style = [i['filename'] for i in file_infos if
i['year_list_style'][holder_name]]
if len(year_list_style) > 0:
print("%4d with '%s'" % (len(year_list_style),
holder_name.replace('\n', '\\n')))
print_filenames(year_list_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with no "(c)" style:\ne.g. "Copyright" and "<year>" or '
'"<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
without_c_style = [i['filename'] for i in file_infos if
i['without_c_style'][holder_name]]
if len(without_c_style) > 0:
print("%4d with '%s'" % (len(without_c_style),
holder_name.replace('\n', '\\n')))
print_filenames(without_c_style, verbose)
print('')
print(SEPARATOR)
unclassified_copyrights = [i['filename'] for i in file_infos if
i['classified_copyrights'] < i['all_copyrights']]
print("%d with unexpected copyright holder names" %
len(unclassified_copyrights))
print_filenames(unclassified_copyrights, verbose)
print(SEPARATOR)
def exec_report(base_directory, verbose):
original_cwd = os.getcwd()
os.chdir(base_directory)
filenames = get_filenames_to_examine()
file_infos = [gather_file_info(f) for f in filenames]
print_report(file_infos, verbose)
os.chdir(original_cwd)
################################################################################
# report cmd
################################################################################
REPORT_USAGE = """
Produces a report of all copyright header notices found inside the source files
of a repository.
Usage:
$ ./copyright_header.py report <base_directory> [verbose]
Arguments:
<base_directory> - The base directory of a bitcoin source code repository.
[verbose] - Includes a list of every file of each subcategory in the report.
"""
def report_cmd(argv):
if len(argv) == 2:
sys.exit(REPORT_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad <base_directory>: %s" % base_directory)
if len(argv) == 3:
verbose = False
elif argv[3] == 'verbose':
verbose = True
else:
sys.exit("*** unknown argument: %s" % argv[2])
exec_report(base_directory, verbose)
################################################################################
# query git for year of last change
################################################################################
GIT_LOG_CMD = "git log --pretty=format:%%ai %s"
def call_git_log(filename):
out = subprocess.check_output((GIT_LOG_CMD % filename).split(' '))
return out.decode("utf-8").split('\n')
def get_git_change_years(filename):
git_log_lines = call_git_log(filename)
if len(git_log_lines) == 0:
return [datetime.date.today().year]
# timestamp is in ISO 8601 format. e.g. "2016-09-05 14:25:32 -0600"
return [line.split(' ')[0].split('-')[0] for line in git_log_lines]
def get_most_recent_git_change_year(filename):
return max(get_git_change_years(filename))
################################################################################
# read and write to file
################################################################################
def read_file_lines(filename):
f = open(os.path.abspath(filename), 'r')
file_lines = f.readlines()
f.close()
return file_lines
def write_file_lines(filename, file_lines):
f = open(os.path.abspath(filename), 'w')
f.write(''.join(file_lines))
f.close()
################################################################################
# update header years execution
################################################################################
COPYRIGHT = 'Copyright \(c\)'
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
HOLDER = 'The Bitcoin Core developers'
UPDATEABLE_LINE_COMPILED = re.compile(' '.join([COPYRIGHT, YEAR_RANGE, HOLDER]))
def get_updatable_copyright_line(file_lines):
index = 0
for line in file_lines:
if UPDATEABLE_LINE_COMPILED.search(line) is not None:
return index, line
index = index + 1
return None, None
def parse_year_range(year_range):
year_split = year_range.split('-')
start_year = year_split[0]
if len(year_split) == 1:
return start_year, start_year
return start_year, year_split[1]
def year_range_to_str(start_year, end_year):
if start_year == end_year:
return start_year
return "%s-%s" % (start_year, end_year)
def create_updated_copyright_line(line, last_git_change_year):
copyright_splitter = 'Copyright (c) '
copyright_split = line.split(copyright_splitter)
# Preserve characters on line that are ahead of the start of the copyright
# notice - they are part of the comment block and vary from file-to-file.
before_copyright = copyright_split[0]
after_copyright = copyright_split[1]
space_split = after_copyright.split(' ')
year_range = space_split[0]
start_year, end_year = parse_year_range(year_range)
if end_year == last_git_change_year:
return line
return (before_copyright + copyright_splitter +
year_range_to_str(start_year, last_git_change_year) + ' ' +
' '.join(space_split[1:]))
def update_updatable_copyright(filename):
file_lines = read_file_lines(filename)
index, line = get_updatable_copyright_line(file_lines)
if not line:
print_file_action_message(filename, "No updatable copyright.")
return
last_git_change_year = get_most_recent_git_change_year(filename)
new_line = create_updated_copyright_line(line, last_git_change_year)
if line == new_line:
print_file_action_message(filename, "Copyright up-to-date.")
return
file_lines[index] = new_line
write_file_lines(filename, file_lines)
print_file_action_message(filename,
"Copyright updated! -> %s" % last_git_change_year)
def exec_update_header_year(base_directory):
original_cwd = os.getcwd()
os.chdir(base_directory)
for filename in get_filenames_to_examine():
update_updatable_copyright(filename)
os.chdir(original_cwd)
################################################################################
# update cmd
################################################################################
UPDATE_USAGE = """
Updates all the copyright headers of "The Bitcoin Core developers" which were
changed in a year more recent than is listed. For example:
// Copyright (c) <firstYear>-<lastYear> The Bitcoin Core developers
will be updated to:
// Copyright (c) <firstYear>-<lastModifiedYear> The Bitcoin Core developers
where <lastModifiedYear> is obtained from the 'git log' history.
This subcommand also handles copyright headers that have only a single year. In those cases:
// Copyright (c) <year> The Bitcoin Core developers
will be updated to:
// Copyright (c) <year>-<lastModifiedYear> The Bitcoin Core developers
where the update is appropriate.
Usage:
$ ./copyright_header.py update <base_directory>
Arguments:
<base_directory> - The base directory of a bitcoin source code repository.
"""
def print_file_action_message(filename, action):
print("%-52s %s" % (filename, action))
def update_cmd(argv):
if len(argv) != 3:
sys.exit(UPDATE_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad base_directory: %s" % base_directory)
exec_update_header_year(base_directory)
################################################################################
# inserted copyright header format
################################################################################
def get_header_lines(header, start_year, end_year):
lines = header.split('\n')[1:-1]
lines[0] = lines[0] % year_range_to_str(start_year, end_year)
return [line + '\n' for line in lines]
CPP_HEADER = '''
// Copyright (c) %s The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_cpp_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(CPP_HEADER, start_year, end_year))
PYTHON_HEADER = '''
# Copyright (c) %s The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_python_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(PYTHON_HEADER, start_year, end_year))
################################################################################
# query git for year of last change
################################################################################
def get_git_change_year_range(filename):
years = get_git_change_years(filename)
return min(years), max(years)
################################################################################
# check for existing core copyright
################################################################################
def file_already_has_core_copyright(file_lines):
index, _ = get_updatable_copyright_line(file_lines)
return index != None
################################################################################
# insert header execution
################################################################################
def file_has_hashbang(file_lines):
if len(file_lines) < 1:
return False
if len(file_lines[0]) <= 2:
return False
return file_lines[0][:2] == '#!'
def insert_python_header(filename, file_lines, start_year, end_year):
if file_has_hashbang(file_lines):
insert_idx = 1
else:
insert_idx = 0
header_lines = get_python_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(insert_idx, line)
write_file_lines(filename, file_lines)
def insert_cpp_header(filename, file_lines, start_year, end_year):
header_lines = get_cpp_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(0, line)
write_file_lines(filename, file_lines)
def exec_insert_header(filename, style):
file_lines = read_file_lines(filename)
if file_already_has_core_copyright(file_lines):
sys.exit('*** %s already has a copyright by The Bitcoin Core developers'
% (filename))
start_year, end_year = get_git_change_year_range(filename)
if style == 'python':
insert_python_header(filename, file_lines, start_year, end_year)
else:
insert_cpp_header(filename, file_lines, start_year, end_year)
################################################################################
# insert cmd
################################################################################
INSERT_USAGE = """
Inserts a copyright header for "The Bitcoin Core developers" at the top of the
file in either Python or C++ style as determined by the file extension. If the
file is a Python file and it has a '#!' starting the first line, the header is
inserted in the line below it.
The copyright dates will be set to be:
"<year_introduced>-<current_year>"
where <year_introduced> is according to the 'git log' history. If
<year_introduced> is equal to <current_year>, the date will be set to be:
"<current_year>"
If the file already has a copyright for "The Bitcoin Core developers", the
script will exit.
Usage:
$ ./copyright_header.py insert <file>
Arguments:
<file> - A source file in the bitcoin repository.
"""
def insert_cmd(argv):
if len(argv) != 3:
sys.exit(INSERT_USAGE)
filename = argv[2]
if not os.path.isfile(filename):
sys.exit("*** bad filename: %s" % filename)
_, extension = os.path.splitext(filename)
if extension not in ['.h', '.cpp', '.cc', '.c', '.py']:
sys.exit("*** cannot insert for file extension %s" % extension)
if extension == '.py':
style = 'python'
else:
style = 'cpp'
exec_insert_header(filename, style)
################################################################################
# UI
################################################################################
USAGE = """
copyright_header.py - utilities for managing copyright headers of 'The Bitcoin
Core developers' in repository source files.
Usage:
$ ./copyright_header <subcommand>
Subcommands:
report
update
insert
To see subcommand usage, run them without arguments.
"""
SUBCOMMANDS = ['report', 'update', 'insert']
if __name__ == "__main__":
if len(sys.argv) == 1:
sys.exit(USAGE)
subcommand = sys.argv[1]
if subcommand not in SUBCOMMANDS:
sys.exit(USAGE)
if subcommand == 'report':
report_cmd(sys.argv)
elif subcommand == 'update':
update_cmd(sys.argv)
elif subcommand == 'insert':
insert_cmd(sys.argv)
| 36.473851 | 92 | 0.601651 |
caf60757ee270b3e7bd23cf83f1d04397d727a47 | 75 | py | Python | backend/wsgi.py | sunwenquan/iSurvey | bc14a102e56b33fb4b3612b3bef98183f6e46612 | [
"Apache-2.0"
] | null | null | null | backend/wsgi.py | sunwenquan/iSurvey | bc14a102e56b33fb4b3612b3bef98183f6e46612 | [
"Apache-2.0"
] | null | null | null | backend/wsgi.py | sunwenquan/iSurvey | bc14a102e56b33fb4b3612b3bef98183f6e46612 | [
"Apache-2.0"
] | null | null | null | # backend/wsgi.py
from surveyapi.app import create_app
app = create_app()
| 15 | 36 | 0.773333 |
7357bd85d97ac9cdde0970631f110a942d40f711 | 182 | py | Python | docker/test_settings.py | uw-it-aca/spotseeker_server | 1d8a5bf98b76fdcb807ed4cd32f939bb7e9aa66c | [
"Apache-2.0"
] | 5 | 2015-03-12T00:36:33.000Z | 2022-02-24T16:41:25.000Z | docker/test_settings.py | uw-it-aca/spotseeker_server | 1d8a5bf98b76fdcb807ed4cd32f939bb7e9aa66c | [
"Apache-2.0"
] | 133 | 2016-02-03T23:54:45.000Z | 2022-03-30T21:33:58.000Z | docker/test_settings.py | uw-it-aca/spotseeker_server | 1d8a5bf98b76fdcb807ed4cd32f939bb7e9aa66c | [
"Apache-2.0"
] | 6 | 2015-01-07T23:21:15.000Z | 2017-12-07T08:26:33.000Z | """ These settings used on automatic unit test runs to reset a customized
setting that cause some unit tests to fail."""
from .settings import *
SPOTSEEKER_SEARCH_FILTERS = []
| 26 | 73 | 0.747253 |
ed6ba534e234239b0d2fad6557e5872e6a9ff2d2 | 1,608 | py | Python | tests/cli/test_cmds_list.py | sobolevn/paasta | 8b87e0b13816c09b3d063b6d3271e6c7627fd264 | [
"Apache-2.0"
] | 1,711 | 2015-11-10T18:04:56.000Z | 2022-03-23T08:53:16.000Z | tests/cli/test_cmds_list.py | sobolevn/paasta | 8b87e0b13816c09b3d063b6d3271e6c7627fd264 | [
"Apache-2.0"
] | 1,689 | 2015-11-10T17:59:04.000Z | 2022-03-31T20:46:46.000Z | tests/cli/test_cmds_list.py | sobolevn/paasta | 8b87e0b13816c09b3d063b6d3271e6c7627fd264 | [
"Apache-2.0"
] | 267 | 2015-11-10T19:17:16.000Z | 2022-02-08T20:59:52.000Z | # Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from paasta_tools.cli.cmds.list import paasta_list
@mock.patch("paasta_tools.cli.cmds.list.list_services", autospec=True)
def test_list_paasta_list(mock_list_services, capfd):
""" paasta_list print each service returned by get_services """
mock_services = ["service_1", "service_2"]
mock_list_services.return_value = mock_services
args = mock.MagicMock()
args.print_instances = False
paasta_list(args)
output, _ = capfd.readouterr()
assert output == "service_1\nservice_2\n"
@mock.patch("paasta_tools.cli.cmds.list.list_service_instances", autospec=True)
def test_list_paasta_list_instances(mock_list_service_instances, capfd):
""" paasta_list print each service.instance """
mock_services = ["service_1.main", "service_2.canary"]
mock_list_service_instances.return_value = mock_services
args = mock.MagicMock()
args.print_instances = True
paasta_list(args)
output, _ = capfd.readouterr()
assert output == "service_1.main\nservice_2.canary\n"
| 34.212766 | 79 | 0.752488 |
e538f174f7dc0889b9703e29da74ebbac15667d1 | 227 | py | Python | weltgeist/__init__.py | samgeen/Weltgeist | c7d52e879bb3473cecbb06651b5e76dac3020da6 | [
"MIT"
] | null | null | null | weltgeist/__init__.py | samgeen/Weltgeist | c7d52e879bb3473cecbb06651b5e76dac3020da6 | [
"MIT"
] | null | null | null | weltgeist/__init__.py | samgeen/Weltgeist | c7d52e879bb3473cecbb06651b5e76dac3020da6 | [
"MIT"
] | null | null | null | # Decides what to import from this folder
# NOTE: I removed "graphics" from this list because pyglet seems to segfault pdb on my machine
from . import analyticsolutions, cooling, gravity, integrator, radiation, sources, units | 75.666667 | 95 | 0.788546 |
ab2c0507c90986273c2b98a56f2e98b04da3ff60 | 1,413 | py | Python | tests/test_qsvm_regressor.py | Qulacs-Osaka/scikit-qulacs | f13d6a36ef0dae1e7d4fdda6f01bcb2346f3cb3e | [
"MIT"
] | 4 | 2021-07-02T03:53:44.000Z | 2022-03-18T02:05:44.000Z | tests/test_qsvm_regressor.py | Qulacs-Osaka/scikit-qulacs | f13d6a36ef0dae1e7d4fdda6f01bcb2346f3cb3e | [
"MIT"
] | 109 | 2021-07-01T07:10:24.000Z | 2022-03-30T08:26:57.000Z | tests/test_qsvm_regressor.py | Qulacs-Osaka/scikit-qulacs | f13d6a36ef0dae1e7d4fdda6f01bcb2346f3cb3e | [
"MIT"
] | 2 | 2021-11-10T07:01:11.000Z | 2021-12-25T21:29:30.000Z | import random
import numpy as np
from numpy.random import RandomState
from sklearn.metrics import mean_squared_error
from skqulacs.circuit import create_ibm_embedding_circuit
from skqulacs.qsvm import QSVR
def func_to_learn(x):
return np.sin(x[0] * x[1] * 2)
def generate_noisy_sine(x_min: float, x_max: float, num_x: int):
seed = 0
random_state = RandomState(seed)
x_train = []
y_train = []
for _ in range(num_x):
xa = x_min + (x_max - x_min) * random.random()
xb = x_min + (x_max - x_min) * random.random()
xc = 0
xd = 0
x_train.append([xa, xb, xc, xd])
y_train.append(func_to_learn([xa, xb, xc, xd]))
# 2要素だと量子的な複雑さが足りず、 精度が悪いため、ダミーの2bitを加えて4bitにしている。
mag_noise = 0.05
y_train += mag_noise * random_state.randn(num_x)
return x_train, y_train
def test_noisy_sine():
x_min = -0.5
x_max = 0.5
num_x = 300
num_test = 100
x_train, y_train = generate_noisy_sine(x_min, x_max, num_x)
x_test, y_test = generate_noisy_sine(x_min, x_max, num_test)
n_qubit = 6
circuit = create_ibm_embedding_circuit(n_qubit)
qsvm = QSVR(circuit)
qsvm.fit(x_train, y_train)
y_pred = qsvm.predict(x_test)
loss = mean_squared_error(y_pred, y_test)
assert loss < 0.008
# 2要素のSVMを試してみる
# sin(x1*x2*2)をフィッティングさせる
def main():
test_noisy_sine()
if __name__ == "__main__":
main()
| 23.949153 | 64 | 0.664544 |
c025fde41143cc9465b5f07e6b5791aa8be04a9f | 19,762 | py | Python | src/plot/base_plot.py | tsikes/Frhodo | de2f6d6c72afa2825a718dde98794ee93c3c61c8 | [
"BSD-3-Clause"
] | 3 | 2021-03-27T16:31:18.000Z | 2021-03-31T03:41:42.000Z | src/plot/base_plot.py | tsikes/Frhodo | de2f6d6c72afa2825a718dde98794ee93c3c61c8 | [
"BSD-3-Clause"
] | 1 | 2021-01-27T06:07:56.000Z | 2021-01-27T06:07:56.000Z | src/plot/base_plot.py | tsikes/Frhodo | de2f6d6c72afa2825a718dde98794ee93c3c61c8 | [
"BSD-3-Clause"
] | 2 | 2020-06-19T16:11:53.000Z | 2021-01-28T17:40:09.000Z | # This file is part of Frhodo. Copyright © 2020, UChicago Argonne, LLC
# and licensed under BSD-3-Clause. See License.txt in the top-level
# directory for license and copyright information.
from qtpy.QtWidgets import QMenu, QAction
from qtpy import QtCore, QtGui
from copy import deepcopy
from matplotlib import figure as mplfigure
from matplotlib.backend_bases import key_press_handler
# This should make plotting backend qt binding indifferent
if QtCore.qVersion().split('.')[0] == '5':
from matplotlib.backends.backend_qt5agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
elif QtCore.qVersion().split('.')[0] == '4':
from matplotlib.backends.backend_qt4agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
import matplotlib as mpl
#import matplotlib.style as mplstyle
#mplstyle.use('fast')
#mpl.use("module://mplcairo.qt") # This implements mplcairo, faster/more accurate. Issues with other OSes?
import numpy as np
from convert_units import OoM
from plot.custom_mplscale import *
from plot.custom_mpl_ticker_formatter import *
from timeit import default_timer as timer
class Base_Plot(QtCore.QObject):
def __init__(self, parent, widget, mpl_layout):
super().__init__(parent)
self.parent = parent
self.widget = widget
self.mpl_layout = mpl_layout
self.fig = mplfigure.Figure()
mpl.scale.register_scale(AbsoluteLogScale)
mpl.scale.register_scale(BiSymmetricLogScale)
# Set plot variables
self.x_zoom_constraint = False
self.y_zoom_constraint = False
self.create_canvas()
self.NavigationToolbar(self.canvas, self.widget, coordinates=True)
# AutoScale
self.autoScale = [True, True]
self.i = 0
# Connect Signals
self._draw_event_signal = self.canvas.mpl_connect('draw_event', self._draw_event)
self.canvas.mpl_connect('button_press_event', lambda event: self.click(event))
self.canvas.mpl_connect('key_press_event', lambda event: self.key_press(event))
# self.canvas.mpl_connect('key_release_event', lambda event: self.key_release(event))
self._draw_event()
def create_canvas(self):
self.canvas = FigureCanvas(self.fig)
self.mpl_layout.addWidget(self.canvas)
self.canvas.setFocusPolicy(QtCore.Qt.StrongFocus)
self.canvas.draw()
# Set scales
scales = {'linear': True, 'log': 0, 'abslog': 0, 'bisymlog': 0}
for ax in self.ax:
ax.scale = {'x': scales, 'y': deepcopy(scales)}
ax.ticklabel_format(scilimits=(-4, 4), useMathText=True)
# Get background
self.background_data = self.canvas.copy_from_bbox(ax.bbox)
def _find_calling_axes(self, event):
for axes in self.ax: # identify calling axis
if axes == event or (hasattr(event, 'inaxes') and event.inaxes == axes):
return axes
def set_xlim(self, axes, x):
if not self.autoScale[0]: return # obey autoscale right click option
if axes.get_xscale() in ['linear']:
# range = np.abs(np.max(x) - np.min(x))
# min = np.min(x) - range*0.05
# if min < 0:
# min = 0
# xlim = [min, np.max(x) + range*0.05]
xlim = [np.min(x), np.max(x)]
if 'log' in axes.get_xscale():
abs_x = np.abs(x)
abs_x = abs_x[np.nonzero(abs_x)] # exclude 0's
if axes.get_xscale() in ['log', 'abslog', 'bisymlog']:
min_data = np.ceil(np.log10(np.min(abs_x)))
max_data = np.floor(np.log10(np.max(abs_x)))
xlim = [10**(min_data-1), 10**(max_data+1)]
if np.isnan(xlim).any() or np.isinf(xlim).any():
pass
elif xlim != axes.get_xlim(): # if xlim changes
axes.set_xlim(xlim)
def set_ylim(self, axes, y):
if not self.autoScale[1]: return # obey autoscale right click option
min_data = np.array(y)[np.isfinite(y)].min()
max_data = np.array(y)[np.isfinite(y)].max()
if min_data == max_data:
min_data -= 10**-1
max_data += 10**-1
if axes.get_yscale() == 'linear':
range = np.abs(max_data - min_data)
ylim = [min_data - range*0.1, max_data + range*0.1]
elif axes.get_yscale() in ['log', 'abslog']:
abs_y = np.abs(y)
abs_y = abs_y[np.nonzero(abs_y)] # exclude 0's
abs_y = abs_y[np.isfinite(abs_y)] # exclude nan, inf
if abs_y.size == 0: # if no data, assign
ylim = [10**-7, 10**-1]
else:
min_data = np.ceil(np.log10(np.min(abs_y)))
max_data = np.floor(np.log10(np.max(abs_y)))
ylim = [10**(min_data-1), 10**(max_data+1)]
elif axes.get_yscale() == 'bisymlog':
min_sign = np.sign(min_data)
max_sign = np.sign(max_data)
if min_sign > 0:
min_data = np.ceil(np.log10(np.abs(min_data)))
elif min_data == 0 or max_data == 0:
pass
else:
min_data = np.floor(np.log10(np.abs(min_data)))
if max_sign > 0:
max_data = np.floor(np.log10(np.abs(max_data)))
elif min_data == 0 or max_data == 0:
pass
else:
max_data = np.ceil(np.log10(np.abs(max_data)))
# TODO: ylim could be incorrect for neg/neg, checked for pos/pos, pos/neg
ylim = [min_sign*10**(min_data-min_sign), max_sign*10**(max_data+max_sign)]
if ylim != axes.get_ylim(): # if ylim changes, update
axes.set_ylim(ylim)
def update_xylim(self, axes, xlim=[], ylim=[], force_redraw=True):
data = self._get_data(axes)
# on creation, there is no data, don't update
if np.shape(data['x'])[0] < 2 or np.shape(data['y'])[0] < 2:
return
for (axis, lim) in zip(['x', 'y'], [xlim, ylim]):
# Set Limits
if len(lim) == 0:
eval('self.set_' + axis + 'lim(axes, data["' + axis + '"])')
else:
eval('axes.set_' + axis + 'lim(lim)')
# If bisymlog, also update scaling, C
if eval('axes.get_' + axis + 'scale()') == 'bisymlog':
self._set_scale(axis, 'bisymlog', axes)
''' # TODO: Do this some day, probably need to create
annotation during canvas creation
# Move exponent
exp_loc = {'x': (.89, .01), 'y': (.01, .96)}
eval(f'axes.get_{axis}axis().get_offset_text().set_visible(False)')
ax_max = eval(f'max(axes.get_{axis}ticks())')
oom = np.floor(np.log10(ax_max)).astype(int)
axes.annotate(fr'$\times10^{oom}$', xy=exp_loc[axis],
xycoords='axes fraction')
'''
if force_redraw:
self._draw_event() # force a draw
def _get_data(self, axes): # NOT Generic
# get experimental data for axes
data = {'x': [], 'y': []}
if 'exp_data' in axes.item:
data_plot = axes.item['exp_data'].get_offsets().T
if np.shape(data_plot)[1] > 1:
data['x'] = data_plot[0,:]
data['y'] = data_plot[1,:]
# append sim_x if it exists
if 'sim_data' in axes.item and hasattr(axes.item['sim_data'], 'raw_data'):
if axes.item['sim_data'].raw_data.size > 0:
data['x'] = np.append(data['x'], axes.item['sim_data'].raw_data[:,0])
elif 'weight_unc_fcn' in axes.item:
data['x'] = axes.item['weight_unc_fcn'].get_xdata()
data['y'] = axes.item['weight_unc_fcn'].get_ydata()
elif any(key in axes.item for key in ['density', 'qq_data', 'sim_data']):
name = np.intersect1d(['density', 'qq_data'], list(axes.item.keys()))[0]
for n, coord in enumerate(['x', 'y']):
xyrange = np.array([])
for item in axes.item[name]:
if name == 'qq_data':
coordData = item.get_offsets()
if coordData.size == 0:
continue
else:
coordData = coordData[:,n]
elif name == 'density':
coordData = eval('item.get_' + coord + 'data()')
coordData = np.array(coordData)[np.isfinite(coordData)]
if coordData.size == 0:
continue
xyrange = np.append(xyrange, [coordData.min(), coordData.max()])
xyrange = np.reshape(xyrange, (-1,2))
data[coord] = [np.min(xyrange[:,0]), np.max(xyrange[:,1])]
return data
def _set_scale(self, coord, type, event, update_xylim=False):
def RoundToSigFigs(x, p):
x = np.asarray(x)
x_positive = np.where(np.isfinite(x) & (x != 0), np.abs(x), 10**(p-1))
mags = 10 ** (p - 1 - np.floor(np.log10(x_positive)))
return np.round(x * mags) / mags
# find correct axes
axes = self._find_calling_axes(event)
# for axes in self.ax:
# if axes == event or (hasattr(event, 'inaxes') and event.inaxes == axes):
# break
# Set scale menu boolean
if coord == 'x':
shared_axes = axes.get_shared_x_axes().get_siblings(axes)
else:
shared_axes = axes.get_shared_y_axes().get_siblings(axes)
for shared in shared_axes:
shared.scale[coord] = dict.fromkeys(shared.scale[coord], False) # sets all types: False
shared.scale[coord][type] = True # set selected type: True
# Apply selected scale
if type == 'linear':
str = 'axes.set_{:s}scale("{:s}")'.format(coord, 'linear')
elif type == 'log':
str = 'axes.set_{0:s}scale("{1:s}", nonpos{0:s}="mask")'.format(coord, 'log')
elif type == 'abslog':
str = 'axes.set_{:s}scale("{:s}")'.format(coord, 'abslog')
elif type == 'bisymlog':
# default string to evaluate
str = 'axes.set_{0:s}scale("{1:s}")'.format(coord, 'bisymlog')
data = self._get_data(axes)[coord]
if len(data) != 0:
finite_data = np.array(data)[np.isfinite(data)] # ignore nan and inf
min_data = finite_data.min()
max_data = finite_data.max()
if min_data != max_data:
# if zero is within total range, find largest pos or neg range
if np.sign(max_data) != np.sign(min_data):
processed_data = [finite_data[finite_data>=0], finite_data[finite_data<=0]]
C = 0
for data in processed_data:
range = np.abs(data.max() - data.min())
if range > C:
C = range
max_data = data.max()
else:
C = np.abs(max_data-min_data)
C *= 10**(OoM(max_data) + 2) # scaling factor TODO: + 1 looks loglike, + 2 linear like
C = RoundToSigFigs(C, 1) # round to 1 significant figure
str = 'axes.set_{0:s}scale("{1:s}", C={2:e})'.format(coord, 'bisymlog', C)
eval(str)
if type == 'linear' and coord == 'x':
formatter = MathTextSciSIFormatter(useOffset=False, useMathText=True)
axes.xaxis.set_major_formatter(formatter)
elif type == 'linear' and coord == 'y':
formatter = mpl.ticker.ScalarFormatter(useOffset=False, useMathText=True)
formatter.set_powerlimits([-3, 4])
axes.yaxis.set_major_formatter(formatter)
if update_xylim:
self.update_xylim(axes)
def _animate_items(self, bool=True):
for axis in self.ax:
axis.xaxis.set_animated(bool)
axis.yaxis.set_animated(bool)
if axis.get_legend() is not None:
axis.get_legend().set_animated(bool)
for item in axis.item.values():
if isinstance(item, list):
for subItem in item:
if isinstance(subItem, dict):
subItem['line'].set_animated(bool)
else:
subItem.set_animated(bool)
else:
item.set_animated(bool)
def _draw_items_artist(self):
self.canvas.restore_region(self.background_data)
for axis in self.ax:
axis.draw_artist(axis.xaxis)
axis.draw_artist(axis.yaxis)
for item in axis.item.values():
if isinstance(item, list):
for subItem in item:
if isinstance(subItem, dict):
axis.draw_artist(subItem['line'])
else:
axis.draw_artist(subItem)
else:
axis.draw_artist(item)
if axis.get_legend() is not None:
axis.draw_artist(axis.get_legend())
self.canvas.update()
def set_background(self):
self.canvas.mpl_disconnect(self._draw_event_signal)
self.canvas.draw() # for when shock changes. Without signal disconnect, infinite loop
self._draw_event_signal = self.canvas.mpl_connect('draw_event', self._draw_event)
self.background_data = self.canvas.copy_from_bbox(self.fig.bbox)
def _draw_event(self, event=None): # After redraw (new/resizing window), obtain new background
self._animate_items(True)
self.set_background()
self._draw_items_artist()
#self.canvas.draw_idle()
def clear_plot(self, ignore=[], draw=True):
for axis in self.ax:
if axis.get_legend() is not None:
axis.get_legend().remove()
for item in axis.item.values():
if hasattr(item, 'set_offsets'): # clears all data points
if 'scatter' not in ignore:
item.set_offsets(([np.nan, np.nan]))
elif hasattr(item, 'set_xdata') and hasattr(item, 'set_ydata'):
if 'line' not in ignore:
item.set_xdata([np.nan, np.nan]) # clears all lines
item.set_ydata([np.nan, np.nan])
elif hasattr(item, 'set_text'): # clears all text boxes
if 'text' not in ignore:
item.set_text('')
if draw:
self._draw_event()
def click(self, event):
if event.button == 3: # if right click
if not self.toolbar.mode:
self._popup_menu(event)
# if self.toolbar._active is 'ZOOM': # if zoom is on, turn off
# self.toolbar.press_zoom(event) # cancels current zooom
# self.toolbar.zoom() # turns zoom off
elif event.dblclick: # if double right click, go to default view
self.toolbar.home()
def key_press(self, event):
if event.key == 'escape':
if self.toolbar.mode == 'zoom rect': # if zoom is on, turn off
self.toolbar.zoom() # turns zoom off
elif self.toolbar.mode == 'pan/zoom':
self.toolbar.pan()
# elif event.key == 'shift':
elif event.key == 'x': # Does nothing, would like to make sticky constraint zoom/pan
self.x_zoom_constraint = not self.x_zoom_constraint
elif event.key == 'y': # Does nothing, would like to make sticky constraint zoom/pan
self.y_zoom_constraint = not self.y_zoom_constraint
elif event.key in ['s', 'l', 'L', 'k']: pass
else:
key_press_handler(event, self.canvas, self.toolbar)
# def key_release(self, event):
# print(event.key, 'released')
def NavigationToolbar(self, *args, **kwargs):
## Add toolbar ##
self.toolbar = CustomNavigationToolbar(self.canvas, self.widget, coordinates=True)
self.mpl_layout.addWidget(self.toolbar)
def _popup_menu(self, event):
axes = self._find_calling_axes(event) # find axes calling right click
if axes is None: return
pos = self.parent.mapFromGlobal(QtGui.QCursor().pos())
popup_menu = QMenu(self.parent)
xScaleMenu = popup_menu.addMenu('x-scale')
yScaleMenu = popup_menu.addMenu('y-scale')
for coord in ['x', 'y']:
menu = eval(coord + 'ScaleMenu')
for type in axes.scale[coord].keys():
action = QAction(type, menu, checkable=True)
if axes.scale[coord][type]: # if it's checked
action.setEnabled(False)
else:
action.setEnabled(True)
menu.addAction(action)
action.setChecked(axes.scale[coord][type])
fcn = lambda event, coord=coord, type=type: self._set_scale(coord, type, axes, True)
action.triggered.connect(fcn)
# Create menu for AutoScale options X Y All
popup_menu.addSeparator()
autoscale_options = ['AutoScale X', 'AutoScale Y', 'AutoScale All']
for n, text in enumerate(autoscale_options):
action = QAction(text, menu, checkable=True)
if n < len(self.autoScale):
action.setChecked(self.autoScale[n])
else:
action.setChecked(all(self.autoScale))
popup_menu.addAction(action)
action.toggled.connect(lambda event, n=n: self._setAutoScale(n, event, axes))
popup_menu.exec_(self.parent.mapToGlobal(pos))
def _setAutoScale(self, choice, event, axes):
if choice == len(self.autoScale):
for n in range(len(self.autoScale)):
self.autoScale[n] = event
else:
self.autoScale[choice] = event
if event: # if something toggled true, update limits
self.update_xylim(axes)
class CustomNavigationToolbar(NavigationToolbar):
# hide buttons
NavigationToolbar.toolitems = (('Home', 'Reset original view', 'home', 'home'),
('Back', 'Back to previous view', 'back', 'back'),
('Forward', 'Forward to next view', 'forward', 'forward'),
(None, None, None, None),
('Pan', 'Pan axes with left mouse, zoom with right', 'move', 'pan'),
('Zoom', 'Zoom to rectangle', 'zoom_to_rect', 'zoom'),
# ('Subplots', 'Configure subplots', 'subplots', 'configure_subplots'),
(None, None, None, None),
('Save', 'Save the figure', 'filesave', 'save_figure'))
| 42.682505 | 107 | 0.532284 |
188bb539c01da5b3fa8b6ba2eea0f8a30102db5c | 68,330 | py | Python | python/paddle/fluid/layers/tensor.py | LiYuRio/Paddle | dbd6e2df9d074973b7ee177e2d6b96ed2318008e | [
"Apache-2.0"
] | 1 | 2021-12-27T02:40:41.000Z | 2021-12-27T02:40:41.000Z | python/paddle/fluid/layers/tensor.py | LiYuRio/Paddle | dbd6e2df9d074973b7ee177e2d6b96ed2318008e | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/layers/tensor.py | LiYuRio/Paddle | dbd6e2df9d074973b7ee177e2d6b96ed2318008e | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unlessf required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import math
import numpy
import warnings
from ..layer_helper import LayerHelper
from ..param_attr import ParamAttr
from ..initializer import Initializer
from ..framework import _current_expected_place, convert_np_dtype_to_dtype_, _non_static_mode, _varbase_creator, device_guard, _in_legacy_dygraph, in_dygraph_mode
from ..framework import Variable
from ..initializer import Constant
from ..core import VarDesc
from .. import core
from .layer_function_generator import templatedoc
from . import utils
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype
from paddle.utils import deprecated
from .utils import check_shape
from paddle import _C_ops
__all__ = [
'create_tensor',
'create_parameter',
'create_global_var',
'cast',
'tensor_array_to_tensor',
'concat',
'sums',
'assign',
'fill_constant_batch_size_like',
'fill_constant',
'argmin',
'argmax',
'argsort',
'ones',
'zeros',
'reverse',
'has_inf',
'has_nan',
'isfinite',
'range',
'linspace',
'zeros_like',
'ones_like',
'diag',
'eye',
'triu',
]
def create_tensor(dtype, name=None, persistable=False):
"""
Create a variable, which will hold a Tensor with data type dtype.
Args:
dtype(string|numpy.dtype): the data type of Tensor to be created, the
data type is bool, float16, float32, float64, int8, int16, int32 and int64.
name(string, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
persistable(bool): Set the persistable flag of the create tensor.
default value is False.
Returns:
Variable: The tensor to be created according to dtype.
Examples:
.. code-block:: python
import paddle.fluid as fluid
tensor = fluid.layers.create_tensor(dtype='float32')
"""
check_dtype(dtype, 'dtype', [
'bool', 'float16', 'float32', 'float64', 'int8', 'int32', 'int32',
'int64'
], 'create_tensor')
helper = LayerHelper("create_tensor", **locals())
return helper.create_variable(
name=helper.name, dtype=dtype, persistable=persistable)
def create_parameter(shape,
dtype,
name=None,
attr=None,
is_bias=False,
default_initializer=None):
"""
:api_attr: Static Graph
This function creates a parameter. The parameter is a learnable variable, which can have
gradient, and can be optimized.
NOTE: this is a very low-level API. This API is useful when you create
operator by your self. instead of using layers.
Parameters:
shape (list of int): Shape of the parameter
dtype (str): Data type of the parameter
name (str, optional): For detailed information, please refer to
:ref:`api_guide_Name` . Usually name is no need to set and None by default.
attr (ParamAttr, optional): Attributes of the parameter
is_bias (bool, optional): This can affect which default initializer is chosen
when default_initializer is None. If is_bias,
initializer.Constant(0.0) will be used. Otherwise,
Xavier() will be used.
default_initializer (Initializer, optional): Initializer for the parameter
Returns:
The created parameter.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
W = paddle.static.create_parameter(shape=[784, 200], dtype='float32')
"""
check_type(shape, 'shape', (list, tuple, numpy.ndarray), 'create_parameter')
for item in shape:
check_type(item, 'item of shape',
(int, numpy.uint8, numpy.int8, numpy.int16, numpy.int32,
numpy.int64), 'create_parameter')
check_dtype(dtype, 'dtype', [
'bool', 'float16', 'float32', 'float64', 'int8', 'int16', 'int32',
'int64', 'uint8'
], 'create_parameter')
check_type(attr, 'attr', (type(None), ParamAttr), 'create_parameter')
check_type(default_initializer, 'default_initializer',
(type(None), Initializer), 'create_parameter')
helper = LayerHelper("create_parameter", **locals())
if attr is None:
attr = ParamAttr(name=name)
return helper.create_parameter(attr, shape,
convert_dtype(dtype), is_bias,
default_initializer)
def create_global_var(shape,
value,
dtype,
persistable=False,
force_cpu=False,
name=None):
"""
This function creates a new tensor variable with value in the global block(block 0).
Parameters:
shape (list[int]|tuple[int]): Shape of the variable
value (float): The value of the variable. The new created
variable will be filled with it.
dtype (str): Data type of the variable
persistable (bool, optional): If this variable is persistable.
Default: False
force_cpu (bool, optional): Force this variable to be on CPU.
Default: False
name (str, optional): For detailed information, please refer to
:ref:`api_guide_Name` . Usually name is no need to set and None by default.
Returns:
Variable: The created Variable
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
var = paddle.static.create_global_var(shape=[2,3], value=1.0, dtype='float32',
persistable=True, force_cpu=True, name='new_var')
"""
check_type(shape, 'shape', (list, tuple, numpy.ndarray),
'create_global_var')
for item in shape:
check_type(item, 'item of shape',
(int, numpy.uint8, numpy.int8, numpy.int16, numpy.int32,
numpy.int64), 'create_global_var')
check_dtype(dtype, 'dtype', [
'bool',
'float16',
'float32',
'float64',
'int8',
'int16',
'int32',
'int64',
'uint8',
'uint16',
], 'create_global_var')
helper = LayerHelper("global_var", **locals())
var = helper.create_global_variable(
dtype=dtype,
shape=shape,
persistable=persistable,
name=name,
stop_gradient=True)
helper.set_variable_initializer(
var, initializer=Constant(
value=float(value), force_cpu=force_cpu))
return var
def cast(x, dtype):
"""
This OP takes in the Tensor :attr:`x` with :attr:`x.dtype` and casts it
to the output with :attr:`dtype`. It's meaningless if the output dtype
equals the input dtype, but it's fine if you do so.
Args:
x(Tensor): An input N-D Tensor with data type bool, float16,
float32, float64, int32, int64, uint8.
dtype(np.dtype|str): Data type of the output:
bool, float16, float32, float64, int8, int32, int64, uint8.
Returns:
Tensor: A Tensor with the same shape as input's.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([2, 3, 4], 'float64')
y = paddle.cast(x, 'uint8')
"""
if in_dygraph_mode():
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
return _C_ops.final_state_cast(x, dtype)
if _non_static_mode():
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
out = _C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
return out
check_variable_and_dtype(x, 'x', [
'bool', 'float16', 'float32', 'float64', 'int16', 'int32', 'int64',
'uint8', 'uint16'
], 'cast')
check_dtype(dtype, 'dtype', [
'bool', 'float16', 'float32', 'float64', 'int8', 'int16', 'int32',
'int64', 'uint8', 'uint16'
], 'cast')
helper = LayerHelper('cast', **locals())
out = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=x.stop_gradient)
helper.append_op(
type='cast',
inputs={'X': [x]},
outputs={'Out': [out]},
attrs={'in_dtype': x.dtype,
'out_dtype': out.dtype})
return out
def concat(input, axis=0, name=None):
"""
This OP concatenates the input along the axis.
Args:
input(list|tuple|Tensor): ``input`` can be Tensor, Tensor list or Tensor tuple which is with data type
bool, float16, float32, float64, int32, int64. All the Tensors in ``input`` must have the same data type.
axis(int|Tensor, optional): Specify the axis to operate on the input Tensors.
It's a scalar with data type int or a Tensor with shape [1] and data type int32 or int64.
The effective range is [-R, R), where R is Rank(x). When ``axis < 0``, it works the same way
as ``axis+R``. Default is 0.
name (str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: A Tensor with the same data type as ``input``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
in1 = np.array([[1, 2, 3],
[4, 5, 6]])
in2 = np.array([[11, 12, 13],
[14, 15, 16]])
in3 = np.array([[21, 22],
[23, 24]])
with fluid.dygraph.guard():
x1 = fluid.dygraph.to_variable(in1)
x2 = fluid.dygraph.to_variable(in2)
x3 = fluid.dygraph.to_variable(in3)
# When the axis is negative, the real axis is (axis + Rank(x)).
# As follows, axis is -1, Rank(x) is 2, the real axis is 1
out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1)
out2 = fluid.layers.concat(input=[x1, x2], axis=0)
print(out1.numpy())
# [[ 1 2 3 11 12 13 21 22]
# [ 4 5 6 14 15 16 23 24]]
print(out2.numpy())
# [[ 1 2 3]
# [ 4 5 6]
# [11 12 13]
# [14 15 16]]
"""
if in_dygraph_mode():
if isinstance(axis, Variable):
axis = axis.numpy()
axis = axis.item(0)
if not isinstance(input, Variable):
input = [t for t in input if t.shape.count(0) == 0]
return _C_ops.final_state_concat(input, axis)
if _in_legacy_dygraph():
if isinstance(axis, Variable):
axis = axis.numpy()
axis = axis.item(0)
if not isinstance(input, Variable):
input = [t for t in input if t.shape.count(0) == 0]
out = _varbase_creator()
_C_ops.concat(input, out, 'axis', axis)
return out
check_type(input, 'input', (list, tuple, Variable), 'concat')
if not isinstance(input, Variable):
for id, x in enumerate(input):
check_variable_and_dtype(
x, 'input[' + str(id) + ']',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
'concat')
if x.dtype != input[0].dtype:
raise TypeError(
"All the Tensors in the input must have the same data type.")
else:
input = [input]
check_type(axis, 'axis', (int, Variable), 'concat')
if isinstance(axis, Variable):
check_dtype(
axis.dtype, 'axis', ['int32', 'int64'], 'concat',
"The data type of axis must be int32 or int64 when axis is a Tensor")
helper = LayerHelper('concat', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if input[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
# NOTE(liym27): Don't remove this if branch!
# This feature is supported for Dynamic-to-Static, because after transformed, the type of inputs[0]
# is LOD_TENSOR_ARRAY in some scenarios. And this feature can be used in static mode.
assert len(input) == 1, "If the elements of 'input' in concat are Variable(LoDTensorArray), " \
"number of the elements must be 1, but received %s." % len(input)
out_index = helper.create_variable_for_type_inference(dtype="int32")
helper.append_op(
type='tensor_array_to_tensor',
inputs={'X': input[0]},
outputs={'Out': [out],
'OutIndex': [out_index]},
attrs={'axis': axis,
'use_stack': False})
else:
inputs = {'X': input}
attrs = {}
if isinstance(axis, Variable):
axis.stop_gradient = True
inputs['AxisTensor'] = axis
else:
attrs['axis'] = axis
helper.append_op(
type='concat', inputs=inputs, outputs={'Out': [out]}, attrs=attrs)
return out
def tensor_array_to_tensor(input, axis=1, name=None, use_stack=False):
r"""
This function concatenates or stacks all tensors in the input LoDTensorArray
along the axis mentioned and returns that as the output.
For Example:
.. code-block:: text
Case 1:
Given:
input.data = {[[0.6, 0.1, 0.3],
[0.5, 0.3, 0.2]],
[[1.3],
[1.8]],
[[2.3, 2.1],
[2.5, 2.4]]}
axis = 1, use_stack = False
Then:
output.data = [[0.6, 0.1, 0.3, 1.3, 2.3, 2.1],
[0.5, 0.3, 0.2, 1.8, 2.5, 2.4]]
output_index.data = [3, 1, 2]
Case 2:
Given:
input.data = {[[0.6, 0.1],
[0.5, 0.3]],
[[0.3, 1.3],
[0.2, 1.8]],
[[2.3, 2.1],
[2.5, 2.4]]}
axis = 1, use_stack = True
Then:
output.data = [[[0.6, 0.1]
[0.3, 1.3]
[2.3, 2.1],
[[0.5, 0.3]
[0.2, 1.8]
[2.5, 2.4]]]
output_index.data = [2, 2, 2]
Args:
input(Variable): A LodTensorArray variable.
axis(int): The axis along which the tensors in attr::`input` will be
concatenated or stacked.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
use_stack(bool): Act as concat_op or stack_op. For stack mode, all
tensors in the tensor array must have the same shape.
Returns:
Variable: The concatenated or stacked tensor variable.
Variable: A 1-D tensor variable with int32 data type. The data in this \
tensor contains all input including tensors' sizes along the axis.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
x0 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
x1 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
i = fluid.layers.fill_constant(shape=[1], dtype="int64", value=0)
array = fluid.layers.create_array(dtype='float32')
fluid.layers.array_write(x0, i, array)
fluid.layers.array_write(x1, i + 1, array)
output, output_index = fluid.layers.tensor_array_to_tensor(input=array)
"""
if _non_static_mode():
assert isinstance(
input, list), "The 'input' in tensor_array_to_tensor must be list"
from .nn import stack, concat
from ..dygraph import to_variable
op = stack if use_stack else concat
res = op(input, axis=axis)
sizes = to_variable(
numpy.array(list(map(lambda x: int(x.shape[axis]), input))))
return res, sizes
check_type(input, 'input', (list, Variable), 'tensor_array_to_tensor')
if isinstance(input, list):
for i, input_x in enumerate(input):
check_type(input_x, 'input[' + str(i) + ']', Variable,
'tensor_array_to_tensor')
helper = LayerHelper('tensor_array_to_tensor', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
out_index = helper.create_variable_for_type_inference(dtype="int32")
helper.append_op(
type='tensor_array_to_tensor',
inputs={'X': input},
outputs={'Out': [out],
'OutIndex': [out_index]},
attrs={'axis': axis,
'use_stack': use_stack})
return out, out_index
def sums(input, out=None):
r"""
This function computes the sum of multiple input Tensors elementwisely.
- Case 1, sum of 3 Tensors
.. code-block:: text
# Input Tensors
x0.shape = [2, 3]
x0.data = [[1., 2., 3.],
[4., 5., 6.]]
x1.shape = [2, 3]
x1.data = [[10., 20., 30.],
[40., 50., 60.]]
x2.shape = [2, 3]
x2.data = [[100., 200., 300.],
[400., 500., 600.]]
# Output Tensor
out.shape = [2, 3]
out.data = [[111., 222., 333.],
[444., 555., 666.]]
Args:
input (list): A list of Variables which hold input Tensors with the same
data type and shape. Optional data types are: float32, float64, int32, int64.
out (Variable, optional): Output Tensor. It can be any existing Variable.
The default value is None, then a new Variable will be created and returned.
Returns:
Variable: The sum of inputs. The shape and data type is the same with input. \
If :code:`out` is not None, the returned value is :code:`out` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
x0 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=1)
x1 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=2)
x2 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=3)
x3 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=0)
# Sum of multiple Tensors, the result is stored to a new Variable sum0 (sum0=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
sum0 = fluid.layers.sums(input=[x0, x1, x2])
# Sum of multiple Tensors, sum1 and x3 represents the same Variable (x3=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
sum1 = fluid.layers.sums(input=[x0, x1, x2], out=x3)
"""
check_type(input, 'input', (Variable, tuple, list), 'sums')
if isinstance(input, list) or isinstance(input, tuple):
for input_section in input:
check_variable_and_dtype(input_section, "input", \
['float16', 'float32', 'float64', 'int32', 'int64'], 'sums')
else:
check_variable_and_dtype(input, "input", \
['float16', 'float32', 'float64', 'int32', 'int64'], 'sums')
helper = LayerHelper('sum', **locals())
if out is None:
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
else:
check_variable_and_dtype(
out, "out", ['float32', 'float64', 'int32', 'int64'], 'sums')
helper.append_op(
type='sum',
inputs={'X': input},
outputs={'Out': out},
attrs={'use_mkldnn': False})
return out
def assign(input, output=None):
"""
The OP copies the :attr:`input` to the :attr:`output`.
Parameters:
input (Tensor|numpy.ndarray|list|tuple|scalar): A tensor, numpy ndarray, tuple/list of scalar,
or scalar. Its data type supports float16, float32, float64, int32, int64, and bool.
Note: the float64 data will be converted to float32 because of current platform protobuf
data limitation.
output (Tensor, optional): A tensor. If :attr:`output` is None, a new tensor will
be created as :attr:`output`. Default: None.
Returns:
Tensor: A tensor with the same shape, data type and value as :attr:`input`.
Examples:
.. code-block:: python
import paddle
import numpy as np
data = paddle.full(shape=[3, 2], fill_value=2.5, dtype='float64') # [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
array = np.array([[1, 1],
[3, 4],
[1, 3]]).astype(np.int64)
result1 = paddle.zeros(shape=[3, 3], dtype='float32')
paddle.assign(array, result1) # result1 = [[1, 1], [3 4], [1, 3]]
result2 = paddle.assign(data) # result2 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
result3 = paddle.assign(np.array([[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]], dtype='float32')) # result3 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
"""
helper = LayerHelper('assign', **locals())
check_type(input, 'input', (Variable, numpy.ndarray, list, tuple, float,
int, bool), 'assign')
is_inplace = True if output is not None else False
if numpy.isscalar(input) and not isinstance(input, str):
input = numpy.array([input])
elif isinstance(input, (list, tuple)):
input = numpy.array(input)
# NOTE(Aurelius84): Why we judge core.VarBase?
# In case of @to_static, a VarBase can be as input of `assign`,
# but _non_static_mode()==False under @to_static, which means
# isinstance(VarBase, Variable) == False. It will cause return None
# after this api.
if isinstance(input, (Variable, core.VarBase)):
if _non_static_mode():
if output is None:
if _in_legacy_dygraph():
output = core.VarBase()
else:
output = core.eager.Tensor()
_C_ops.assign(input, output)
else:
check_dtype(input.dtype, 'input', [
'float16', 'uint16', 'float32', 'float64', 'int32', 'int64',
'uint8', 'bool'
], 'assign', '(When the type of input in assign is Variable.)')
if output is None:
output = helper.create_variable_for_type_inference(
dtype=input.dtype)
helper.append_op(
type='assign', inputs={'X': [input]},
outputs={'Out': [output]})
elif isinstance(input, numpy.ndarray):
# Not support [var, var, ...] currently.
if len(input.shape) > 0 and any(isinstance(x, Variable) for x in input):
raise TypeError(
"Required type(input) numpy.ndarray, but found `list(Variable)` in input."
)
dtype = convert_np_dtype_to_dtype_(input.dtype)
if dtype == VarDesc.VarType.FP64:
# Setting FP64 numpy data is not supported in Paddle, so we
# use FP32 here
warnings.warn(
"paddle.assign doesn't support float64 input now due "
"to current platform protobuf data limitation, we convert "
"it to float32")
dtype = VarDesc.VarType.FP32
if dtype == VarDesc.VarType.BOOL:
value_name = "bool_values"
values = [int(v) for v in input.flat]
elif dtype == VarDesc.VarType.FP32:
value_name = "fp32_values"
values = [float(v) for v in input.flat]
elif dtype == VarDesc.VarType.INT32:
value_name = "int32_values"
values = [int(v) for v in input.flat]
elif dtype == VarDesc.VarType.INT64:
value_name = "int64_values"
values = [int(v) for v in input.flat]
else:
raise TypeError(
"When the type of 'input' in assign is numpy.ndarray, "
"the data type of 'input' must be bool, float32, int32 or int64, but "
"received %s." % convert_dtype(dtype))
if input.size > 1024 * 1024:
raise ValueError("The size of input is too big. Please consider "
"saving it to file and 'load_op' to load it")
if output is None:
output = helper.create_variable_for_type_inference(
dtype=input.dtype)
helper.append_op(
type='assign_value',
outputs={'Out': [output]},
attrs={
'dtype': dtype,
'shape': list(input.shape),
value_name: values
})
if is_inplace and _non_static_mode():
output._bump_inplace_version()
return output
def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
"""
This OP creates a Tensor with specified `shape` and `dtype`, and
initializes it with a constant specified by `value`.
The attribute `stop_gradient` of the created Tensor is set to True.
Args:
shape(list|tuple|Tensor): Shape of the output Tensor, the data type of ``shape`` is int32 or int64.
If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].
If ``shape`` is an Tensor, it should be an 1-D Tensor with date type int32 or int64.
dtype(np.dtype|str): Data type of the output Tensor which can
be float16, float32, float64, uint8, int16, int32, int64.
value(bool|float|int|Tensor): The constant value used to initialize
the Tensor to be created. If ``value`` is an Tensor, it should be an 1-D Tensor.
force_cpu(bool, optional): data should be on CPU if it's true, default value is False.
out(Tensor, optional): Optional output which can be any created
Tensor that meets the requirements to store the result of operation.
if ``out`` is None, a new Tensor will be create to store the result.
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: Tensor which is created according to shape and dtype.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# attr shape is a list which doesn't contain Tensor.
data1 = fluid.layers.fill_constant(shape=[2,1], value=0, dtype='int64') # data1=[[0],[0]]
data2 = fluid.layers.fill_constant(shape=[2,1], value=5, dtype='int64', out=data1)
# data1=[[5], [5]] data2=[[5], [5]]
# attr shape is a list which contains Tensor.
positive_2 = fluid.layers.fill_constant([1], "int32", 2)
data3 = fluid.layers.fill_constant(shape=[1, positive_2], dtype='float32', value=1.5) # data3=[[1.5, 1.5]]
# attr shape is a Tensor.
shape = fluid.layers.fill_constant([2], "int32", 2) # shape=[2,2]
data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]]
# attr value is a Tensor.
val = fluid.layers.fill_constant([1], "float32", 2.0) # val=[2.0]
data5 = fluid.layers.fill_constant(shape=[2,1], value=val, dtype='float32') #data5=[[2.0],[2.0]]
"""
attrs = {'force_cpu': force_cpu}
dtype = convert_dtype(dtype)
if not isinstance(value, Variable):
if dtype in ['uint8', 'int16', 'int32', 'int64']:
attrs['str_value'] = str(int(value))
attrs['value'] = int(value)
else:
attrs['str_value'] = str(float(value))
attrs['value'] = float(value)
if _non_static_mode():
shape = utils.convert_shape_to_list(shape)
if out is None:
out = _varbase_creator(dtype=dtype)
if isinstance(value, Variable):
if dtype in ['uint8', 'int16', 'int32', 'int64']:
attrs['str_value'] = str(int(value.numpy().item(0)))
else:
attrs['str_value'] = str(float(value.numpy().item(0)))
_C_ops.fill_constant(out, 'value',
float(value), 'force_cpu', force_cpu, 'dtype',
out.dtype, 'str_value', attrs['str_value'],
'shape', shape)
out.stop_gradient = True
return out
helper = LayerHelper("fill_constant", **locals())
inputs = {}
if isinstance(value, Variable):
if convert_dtype(value.dtype) != dtype:
value = cast(value, dtype)
inputs['ValueTensor'] = value
check_shape(shape)
check_dtype(dtype, 'dtype', [
'bool', 'float16', 'float32', 'float64', 'uint8', 'int16', 'int32',
'int64', 'complex64', 'complex128'
], 'fill_constant')
check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant')
if out is not None:
check_variable_and_dtype(out, 'out', [convert_dtype(dtype)],
'fill_constant')
helper = LayerHelper("fill_constant", **locals())
utils.get_shape_tensor_inputs(
inputs=inputs, attrs=attrs, shape=shape, op_type='fill_constant')
if out is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
attrs['dtype'] = out.dtype
helper.append_op(
type='fill_constant',
inputs=inputs,
outputs={'Out': [out]},
attrs=attrs,
stop_gradient=True)
out.stop_gradient = True
return out
@deprecated(since='1.8.0', update_to="paddle.fluid.layers.fill_constant")
@templatedoc()
def fill_constant_batch_size_like(input,
shape,
dtype,
value,
input_dim_idx=0,
output_dim_idx=0,
force_cpu=False):
"""
This OP creates a Tesnor according the shape and dtype, and initializes the
Tensor with the constants provided in ``value``. When the input is LoDTensor
and the input_dim_idx is 0, the output_dim_idx dimension is set to the value
of the batch_size input by the input, the Stop_gradient attribute of the created
Tensor is False by default.
Args:
input(Variable): Tensor which data type is float32, float64, int32 and int64.
shape(list): The shape of Tensor to be created, Tensor's shape may be changed
according the input.
dtype(np.dtype|core.VarDesc.VarType|str): The data type of created Tensor which
can be float32, float64, int32, int64.
value(float|int): The constant value used to initialize the Tensor to be created.
input_dim_idx(int): When the value is 0 and the input is LoDTensor, the output_dim_idx
dimension of the created Tensor is set to the batch_size value of input.
The default value is 0.
output_dim_idx(int): Used to specify which dimension of Tensor is created to be set
the value of batch_size of input Tensor. The default value is 0.
force_cpu(bool): data should be on CPU if it's true, default value is False.
Returns:
Variable: Tensor which will be created according to dtype.
Examples:
.. code-block:: python
import paddle.fluid as fluid
like = fluid.layers.fill_constant(shape=[1,2], value=10, dtype='int64') #like=[[10, 10]]
data = fluid.layers.fill_constant_batch_size_like(
input=like, shape=[1], value=0, dtype='int64') #like=[[10, 10]] data=[0]
"""
helper = LayerHelper("fill_constant_batch_size_like", **locals())
out = helper.create_variable_for_type_inference(dtype=dtype)
attrs = {
'shape': shape,
'dtype': out.dtype,
'value': float(value),
'input_dim_idx': input_dim_idx,
'output_dim_idx': output_dim_idx,
'force_cpu': force_cpu
}
if convert_dtype(dtype) in ['int64', 'int32']:
attrs['str_value'] = str(int(value))
else:
attrs['str_value'] = str(float(value))
helper.append_op(
type='fill_constant_batch_size_like',
inputs={'Input': input},
outputs={'Out': [out]},
attrs=attrs)
out.stop_gradient = True
return out
def argmin(x, axis=0):
"""
:alias_main: paddle.argmin
:alias: paddle.argmin,paddle.tensor.argmin,paddle.tensor.search.argmin
:old_api: paddle.fluid.layers.argmin
**argmin**
This OP computes the indices of the min elements of the input tensor's
element along the provided axis.
Args:
x(Variable): An input N-D Tensor with type float32, float64, int16,
int32, int64, uint8.
axis(int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is Rank(x). when axis<0, it works the same way
as axis+R. Default is 0.
Returns:
Variable: A Tensor with data type int64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
in1 = np.array([[[5,8,9,5],
[0,0,1,7],
[6,9,2,4]],
[[5,2,4,2],
[4,7,7,9],
[1,7,0,6]]])
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(in1)
out1 = fluid.layers.argmin(x=x, axis=-1)
out2 = fluid.layers.argmin(x=x, axis=0)
out3 = fluid.layers.argmin(x=x, axis=1)
out4 = fluid.layers.argmin(x=x, axis=2)
print(out1.numpy())
# [[0 0 2]
# [1 0 2]]
print(out2.numpy())
# [[0 1 1 1]
# [0 0 0 0]
# [1 1 1 0]]
print(out3.numpy())
# [[1 1 1 2]
# [2 0 2 0]]
print(out4.numpy())
# [[0 0 2]
# [1 0 2]]
"""
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'],
'argmin')
helper = LayerHelper("arg_min", **locals())
out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
helper.append_op(
type='arg_min',
inputs={'X': x},
outputs={'Out': [out]},
attrs={'axis': axis})
out.stop_gradient = True
return out
def argmax(x, axis=0):
"""
**argmax**
This OP computes the indices of the max elements of the input tensor's
element along the provided axis.
Args:
x(Variable): An input N-D Tensor with type float32, float64, int16,
int32, int64, uint8.
axis(int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is Rank(x). when axis<0, it works the same way
as axis+R. Default is 0.
Returns:
Variable: A Tensor with data type int64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
in1 = np.array([[[5,8,9,5],
[0,0,1,7],
[6,9,2,4]],
[[5,2,4,2],
[4,7,7,9],
[1,7,0,6]]])
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(in1)
out1 = fluid.layers.argmax(x=x, axis=-1)
out2 = fluid.layers.argmax(x=x, axis=0)
out3 = fluid.layers.argmax(x=x, axis=1)
out4 = fluid.layers.argmax(x=x, axis=2)
print(out1.numpy())
# [[2 3 1]
# [0 3 1]]
print(out2.numpy())
# [[0 0 0 0]
# [1 1 1 1]
# [0 0 0 1]]
print(out3.numpy())
# [[2 2 0 1]
# [0 1 1 1]]
print(out4.numpy())
# [[2 3 1]
# [0 3 1]]
"""
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'],
'argmax')
helper = LayerHelper("arg_max", **locals())
out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
helper.append_op(
type='arg_max',
inputs={'X': x},
outputs={'Out': [out]},
attrs={'axis': axis})
out.stop_gradient = True
return out
def argsort(input, axis=-1, descending=False, name=None):
"""
:alias_main: paddle.argsort
:alias: paddle.argsort,paddle.tensor.argsort,paddle.tensor.search.argsort
:old_api: paddle.fluid.layers.argsort
This OP sorts the input along the given axis, and returns sorted output
data Varibale and its corresponding index Variable with the same shape as
:attr:`input`.
Args:
input(Variable): An input N-D Tensor with type float32, float64, int16,
int32, int64, uint8.
axis(int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is Rank(x). when axis<0, it works the same way
as axis+R. Default is 0.
descending(bool, optional) : Descending is a flag, if set to true,
algorithm will sort by descending order, else sort by
ascending order. Default is false.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
tuple: A tuple of sorted data Variable(with the same shape and data
type as input) and the sorted indices(with the same shape as input's
and with data type int64).
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
in1 = np.array([[[5,8,9,5],
[0,0,1,7],
[6,9,2,4]],
[[5,2,4,2],
[4,7,7,9],
[1,7,0,6]]]).astype(np.float32)
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(in1)
out1 = fluid.layers.argsort(input=x, axis=-1)
out2 = fluid.layers.argsort(input=x, axis=0)
out3 = fluid.layers.argsort(input=x, axis=1)
print(out1[0].numpy())
# [[[5. 5. 8. 9.]
# [0. 0. 1. 7.]
# [2. 4. 6. 9.]]
# [[2. 2. 4. 5.]
# [4. 7. 7. 9.]
# [0. 1. 6. 7.]]]
print(out1[1].numpy())
# [[[0 3 1 2]
# [0 1 2 3]
# [2 3 0 1]]
# [[1 3 2 0]
# [0 1 2 3]
# [2 0 3 1]]]
print(out2[0].numpy())
# [[[5. 2. 4. 2.]
# [0. 0. 1. 7.]
# [1. 7. 0. 4.]]
# [[5. 8. 9. 5.]
# [4. 7. 7. 9.]
# [6. 9. 2. 6.]]]
print(out3[0].numpy())
# [[[0. 0. 1. 4.]
# [5. 8. 2. 5.]
# [6. 9. 9. 7.]]
# [[1. 2. 0. 2.]
# [4. 7. 4. 6.]
# [5. 7. 7. 9.]]]
"""
check_variable_and_dtype(
input, 'input',
['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'], 'argsort')
helper = LayerHelper("argsort", **locals())
out = helper.create_variable_for_type_inference(
dtype=input.dtype, stop_gradient=True)
ids = helper.create_variable_for_type_inference(
VarDesc.VarType.INT64, stop_gradient=True)
helper.append_op(
type='argsort',
inputs={'X': input},
outputs={'Out': out,
'Indices': ids},
attrs={'axis': axis,
'descending': descending})
return out, ids
def ones(shape, dtype, force_cpu=False):
"""
The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 1.
Its :attr:`stop_gradient` will be set to True to stop gradient computation.
Parameters:
shape(tuple|list|Tensor): Shape of output Tensor, the data type of shape is int32 or int64.
dtype (np.dtype|str): Data type of output Tensor, it supports
bool, float16, float32, float64, int32 and int64.
force_cpu (bool, optional): Whether force to store the output Tensor in CPU memory.
If :attr:`force_cpu` is False, the output Tensor will be stored in running device memory.
Default: False.
Returns:
Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 1.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data0 = fluid.layers.ones(shape=[2, 4], dtype='float32') # [[1., 1., 1., 1.], [1., 1., 1., 1.]]
# shape is a Tensor
shape = fluid.layers.fill_constant(shape=[2], dtype='int32', value=2)
data1 = fluid.layers.ones(shape=shape, dtype='int32') #[[1, 1], [1, 1]]
"""
return fill_constant(value=1.0, **locals())
def zeros(shape, dtype, force_cpu=False, name=None):
"""
The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 0.
Its :attr:`stop_gradient` will be set to True to stop gradient computation.
Parameters:
shape(tuple|list|Tensor): Shape of output Tensor, the data type of ``shape`` is int32 or int64.
dtype (np.dtype|str): Data type of output Tensor, it supports
bool, float16, float32, float64, int32 and int64.
force_cpu (bool, optional): Whether force to store the output Tensor in CPU memory.
If :attr:`force_cpu` is False, the output Tensor will be stored in running device memory.
Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 0.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]]
# shape is a Tensor
shape = fluid.layers.fill_constant(shape=[2], dtype='int32', value=2)
data1 = fluid.layers.zeros(shape=shape, dtype='int32') #[[0, 0], [0, 0]]
"""
return fill_constant(value=0.0, **locals())
def reverse(x, axis):
"""
:alias_main: paddle.reverse
:alias: paddle.reverse,paddle.tensor.reverse,paddle.tensor.manipulation.reverse
:old_api: paddle.fluid.layers.reverse
The OP reverses the tensor :attr:`x` along the given :attr:`axis`.
.. code-block:: text
Case 1:
Given a LoDTensor:
x = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
axis = [0, 1]
Then:
output = [[8, 7, 6], [5, 4, 3], [2, 1, 0]]
Case 2:
Given a LoDTensorArray:
x = {[[0, 1], [2, 3]],
[[4, 5, 6]],
[[7],[8], [9]]}
axis = 0
Then:
output = {[[7],[8], [9]],
[[4, 5, 6]],
[[0, 1], [2, 3]]}
Parameters:
x (Variable): A tensor or LoDTensorArray to be reversed, its data type supports bool, float32, float64, int32, int64 and uint8.
If input is a LoDTensorArray, returns a new reversed LoDTensorArray without changing the internal order of each inner tensor.
axis (int|tuple|list): A dimension or a set of dimensions of :attr:`x` to reverse. Must be
in the range [-rank( :attr:`x` ), rank( :attr:`x` )). If it is a tuple or a list, reversing
will be apply on each axis in the tuple or list. If input is a LoDTensorArray, the value of axis shall be 0, or a
list [0] or tuple (0, ) with shape [1].
Returns:
Variable: The reversed tensor with the same shape and data type as :attr:`x`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
data = fluid.layers.assign(np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype='float32')) # [[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]]
result1 = fluid.layers.reverse(data, 0) # [[6., 7., 8.], [3., 4., 5.], [0., 1., 2.]]
result2 = fluid.layers.reverse(data, [0, 1]) # [[8., 7., 6.], [5., 4., 3.], [2., 1., 0.]]
# example of LoDTensorArray
data1 = fluid.layers.assign(np.array([[0, 1, 2]], dtype='float32'))
data2 = fluid.layers.assign(np.array([[3, 4, 5]], dtype='float32'))
tensor_array = fluid.layers.create_array(dtype='float32')
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
fluid.layers.array_write(data1, i, tensor_array)
fluid.layers.array_write(data2, i+1, tensor_array)
reversed_tensor_array = fluid.layers.reverse(tensor_array, 0) # {[[3, 4, 5]], [[0, 1, 2]]}
"""
check_variable_and_dtype(
x, 'x', ('float32', 'float64', 'int32', 'int64', 'uint8'), 'reverse')
check_type(axis, 'axis', (int, tuple, list), 'reverse')
if isinstance(axis, int):
axis = [axis]
helper = LayerHelper("reverse", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='reverse',
inputs={'X': x},
outputs={'Out': [out]},
attrs={'axis': axis})
return out
def save(x, file_path, overwrite=True):
"""
Saves a variable as a file.
Args:
x(variable): The Tensor/LoDTensor to be saved.
file_path(str): The file path where the variable will be saved.
overwrite(bool): Whether or not cover the given file when it has already
existed. If it's set 'False' and the file is existed, a runtime
error will be thrown.
"""
helper = LayerHelper("save", **locals())
helper.append_op(
type="save",
inputs={"input": x},
outputs={},
args={"file_path": file_path,
"overwrite": overwrite})
def save_combine(x, file_path, overwrite=True):
"""
Saves a list of variables into a single file.
Args:
x(list): A list of Tensor/LoDTensor variables to be saved together in
a single file.
file_path(str): The file path where variables will be saved.
overwrite(bool): Whether or not cover the given file when it has already
existed. If it's set 'False' and the file is existed, a runtime
error will be thrown.
Returns:
There is no return value.
Examples:
.. code-block:: python
import paddle.fluid as fluid
v1 = fluid.layers.data(name="data",
shape=(4, 6),
dtype="float32")
v2 = fluid.layers.data(name="data",
shape=(6, 8, 4),
dtype="float32")
normed = fluid.layers.save_combine([v1, v2], file_path="output")
"""
helper = LayerHelper("save_combine", **locals())
helper.append_op(
type="save_combine",
inputs={"input": x},
outputs={},
args={"file_path": file_path,
"overwrite": overwrite})
def load_combine(out, file_path):
"""
Loads a list of variable from a single file.
Args:
out(list): The list of variables to be read from the disk file.
file_path(str): The path of the disk file.
"""
helper = LayerHelper("load_combine", **locals())
helper.append_op(
type="load_combine",
inputs={},
output={"Out": out},
args={"file_path": file_path})
def has_inf(x):
"""
Test if any of x contains an infinity number
Args:
x (Tensor): The Tensor to be checked.
Returns:
Tensor: The tensor storing the output, only a bool value, indicating that whether there is infinity number in x or not.
Examples:
.. code-block:: python
import paddle
data = paddle.randn(shape=[4, 32, 32], dtype="float32")
res = paddle.fluid.layers.has_inf(data)
# [False]
"""
if _non_static_mode():
return _C_ops.isinf(x)
check_type(x, 'x', (Variable), 'has_inf')
helper = LayerHelper("isinf", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type="isinf", inputs={"X": x}, outputs={"Out": out})
return out
def has_nan(x):
"""
Test if any of x contains a NAN
Args:
x (Tensor): The Tensor to be checked.
Returns:
Tensor: The tensor variable storing the output, only a bool value, indicating that whether there is NAN in x or not.
Examples:
.. code-block:: python
import paddle
data = paddle.randn(shape=[2,3], dtype="float32")
res = paddle.fluid.layers.has_nan(data)
# [False]
"""
if _non_static_mode():
return _C_ops.isnan(x)
check_type(x, 'x', (Variable), 'has_nan')
helper = LayerHelper("isnan", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type="isnan", inputs={"X": x}, outputs={"Out": out})
return out
def isfinite(x):
"""
Test if any of x contains an infinity/NAN number. If all the elements are finite,
returns true, else false.
Args:
x(Tensor): The Tensor to be checked.
Returns:
Tensor: The tensor storing the output, contains a bool value.
Examples:
.. code-block:: python
import paddle
x = paddle.rand(shape=[4, 6], dtype='float32')
y = paddle.fluid.layers.isfinite(x)
print(y)
"""
check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
"isfinite")
helper = LayerHelper("isfinite", **locals())
out = helper.create_variable_for_type_inference(dtype='bool')
helper.append_op(type="isfinite", inputs={"X": x}, outputs={"Out": out})
return out
def range(start, end, step, dtype, name=None):
"""
This OP returns a 1-D Tensor with spaced values within a given interval.
Values are generated into the half-open interval [``start``, ``end``) with
the ``step``. (the interval including ``start`` but excluding ``end``).
If ``dtype`` is float32 or float64, we advise adding a small epsilon to
``end`` to avoid floating point rounding errors when comparing against ``end``.
Parameters:
start(float|int|Tensor): Start of interval. The interval includes this
value. If ``start`` is a Tensor, it is a 1-D Tensor with shape [1],
with data type int32, int64, float32, float64.
end(float|int|Tensor): End of interval. The interval does not include
this value. If ``end`` is a Tensor, it is a 1-D Tensor with shape
[1], with data type int32, int64, float32, float64.
step(float|int|Tensor): Spacing between values. For any out, it is
the istance between two adjacent values, out[i+1] - out[i]. If
``step`` is a Tensor, it is a 1-D Tensor with shape [1], with data
type int32, int64, float32, float64.
dtype(str|np.dtype|core.VarDesc.VarType, optional): The data type of the
output tensor. Supported data types: int32, int64, float32, float64.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: A 1-D Tensor with values from the interval [``start``, ``end``)
taken with common difference ``step`` beginning from ``start``. Its
data type is set by ``dtype``.
Raises:
TypeError: If ``dtype`` is not int32, int64, float32, float64.
examples:
.. code-block:: python
import paddle.fluid as fluid
out1 = fluid.layers.range(0, 10, 2, 'int32')
# [0, 2, 4, 6, 8]
start_var = fluid.layers.fill_constant([1], 'int64', 3)
out2 = fluid.layers.range(start_var, 7, 1, 'int64')
# [3, 4, 5, 6]
"""
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if not isinstance(start, Variable):
with device_guard("cpu"):
start = fill_constant([1], dtype, start, force_cpu=True)
elif start.dtype != dtype:
start = cast(start, dtype)
if not isinstance(end, Variable):
with device_guard("cpu"):
end = fill_constant([1], dtype, end, force_cpu=True)
elif end.dtype != dtype:
end = cast(end, dtype)
if not isinstance(step, Variable):
with device_guard("cpu"):
step = fill_constant([1], dtype, step, force_cpu=True)
elif step.dtype != dtype:
step = cast(step, dtype)
if in_dygraph_mode():
return _C_ops.final_state_arange(start, end, step, dtype,
_current_expected_place())
if _in_legacy_dygraph():
out = _C_ops.range(start, end, step)
out.stop_gradient = True
return out
out_shape = None
if not isinstance(start, Variable) and not isinstance(
end, Variable) and not isinstance(step, Variable):
out_shape = [int(math.ceil((end - start) / step))]
check_dtype(dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'],
'range/arange')
helper = LayerHelper('range', **locals())
out = helper.create_variable_for_type_inference(dtype, shape=out_shape)
helper.append_op(
type='range',
inputs={'Start': start,
'End': end,
'Step': step},
outputs={'Out': out})
out.stop_gradient = True
return out
def linspace(start, stop, num, dtype=None, name=None):
r"""
This OP return fixed number of evenly spaced values within a given interval.
Args:
start(int|float|Tensor): The input :attr:`start` is start variable of range. It is a scalar, \
or a Tensor of shape [1] with input data type int32, int64, float32 or float64.
stop(int|float|Tensor): The input :attr:`stop` is start variable of range. It is a scalar, \
or a Tensor of shape [1] with input data type int32, int64, float32 or float64.
num(int|Tensor): The input :attr:`num` is given num of the sequence. It is an int scalar, \
or a Tensor of shape [1] with data type int32.
dtype(np.dtype|str, optional): The data type of output tensor, it could be
int32, int64, float32 and float64. Default: if None, the data type is float32.
name(str, optional): Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`.Default: None.
Returns:
Tensor: the output data type will be float32, float64. The 1-D tensor with fixed number of evenly spaced values, \
the data shape of this tensor is :math:`[num]` . If the :attr:`num` is set 1, the output tensor just has \
the value with input :attr:`start`.
Examples:
.. code-block:: python
import paddle
data = paddle.linspace(0, 10, 5, 'float32') # [0.0, 2.5, 5.0, 7.5, 10.0]
data = paddle.linspace(0, 10, 1, 'float32') # [0.0]
"""
if dtype is None:
dtype = 'float32'
tensor_num = num
tensor_start = start
tensor_stop = stop
if not isinstance(num, Variable):
check_type(num, 'num', (int), 'linspace')
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if not isinstance(start, Variable):
with device_guard("cpu"):
tensor_start = fill_constant([1], dtype, start)
if not isinstance(stop, Variable):
with device_guard("cpu"):
tensor_stop = fill_constant([1], dtype, stop)
if not isinstance(num, Variable):
with device_guard("cpu"):
tensor_num = fill_constant([1], 'int32', num)
if _non_static_mode():
return _C_ops.linspace(tensor_start, tensor_stop, tensor_num, 'dtype',
dtype)
helper = LayerHelper("linspace", **locals())
start_dtype = convert_dtype(tensor_start.dtype)
stop_dtype = convert_dtype(tensor_stop.dtype)
out_dtype = convert_dtype(dtype)
if isinstance(start, Variable):
check_dtype(start.dtype, 'start',
['float32', 'float64', 'int32', 'int64'], 'linspace')
else:
check_type(start, 'start', (int, float), 'linspace')
if isinstance(stop, Variable):
check_dtype(stop.dtype, 'stop',
['float32', 'float64', 'int32', 'int64'], 'linspace')
else:
check_type(stop, 'stop', (int, float), 'linspace')
if isinstance(num, Variable):
check_dtype(num.dtype, 'num', ['int32'], 'linspace')
check_dtype(dtype, 'dtype', ['int32', 'int64', 'float32', 'float64'],
'linspace')
if ((stop_dtype == "float64" or start_dtype == "float64") and
out_dtype in ["float32", "int32"]) or ((stop_dtype == "int64" or
start_dtype == "int64") and
out_dtype == "int32"):
raise ValueError(
"The dtype of start/stop is {}/{} but the attr(dtype) of linspace is {}, "
"which may cause data type overflows. Please reset attr(dtype) of linspace."
.format(start_dtype, stop_dtype, dtype))
out = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op(
type='linspace',
inputs={'Start': tensor_start,
'Stop': tensor_stop,
'Num': tensor_num},
attrs={'dtype': dtype},
outputs={'Out': [out]})
if isinstance(num, int):
out.desc.set_shape((num, ))
return out
def zeros_like(x, out=None):
"""
This OP creates a zeros tensor which has identical shape and dtype
with `x`.
Args:
x(Variable): The input tensor which specifies shape and dtype, the
input data dtype could be bool, float32, float64, int32, int64.
out(Variable, optional): If is :attr:`None` , the op will create the
variable as output, the data type and shape of this variable will
be same as input :attr:`x`. If is a tensor, the data type and shape
need to be same as input :attr:`x`. The default value is :attr:`None` .
Returns:
Variable: The N-D tensor, the element in tensor is related to input
data type, if the input data type is bool, the output value is
False, otherwise is zero. The output shape is the same as the input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', dtype='float32', shape=[3])
data = fluid.layers.zeros_like(x) # [0.0, 0.0, 0.0]
"""
check_variable_and_dtype(
x, "x", ['bool', 'float32', 'float64', 'int32', 'int64'], 'ones_like')
helper = LayerHelper("zeros_like", **locals())
if out is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
check_variable_and_dtype(
out, "out", ['bool', 'float32', 'float64', 'int32', 'int64'],
'zeros_like')
helper.append_op(
type='fill_zeros_like', inputs={'X': [x]}, outputs={'Out': [out]})
out.stop_gradient = True
return out
@deprecated(since="2.0.0", update_to="paddle.diag")
def diag(diagonal):
r"""
:alias_main: paddle.diag
:alias: paddle.diag,paddle.tensor.diag,paddle.tensor.creation.diag
:old_api: paddle.fluid.layers.diag
This OP creates a square matrix which has diagonal values specified by input :attr:`diagonal`.
Args:
diagonal(Variable|numpy.ndarray): The input tensor should be 1D tensor, the input shape is :math:`[ N]` , \
specifying diagonal values by this input tensor. The input data type should be float32, float64, int32, int64.
Returns:
Variable, the output data type is the same as input data type.: The tensor variable storing the square matrix, \
the diagonal values specified by input :attr:`diagonal`. the output shape is :math:`[N, N]` with two dims.
Examples:
.. code-block:: python
# [[3, 0, 0]
# [0, 4, 0]
# [0, 0, 5]
import paddle.fluid as fluid
import numpy as np
diagonal = np.arange(3, 6, dtype='int32')
data = fluid.layers.diag(diagonal)
# diagonal.shape=(3,) data.shape=(3, 3)
"""
check_type(diagonal, 'diagonal', (Variable, numpy.ndarray), 'diag')
check_dtype(diagonal.dtype, 'diagonal',
['float32', 'float64', 'int32', 'int64'], 'diag')
helper = LayerHelper("diag", **locals())
if not isinstance(diagonal, Variable):
diagonal = assign(diagonal)
out = helper.create_variable_for_type_inference(dtype=diagonal.dtype)
helper.append_op(
type='diag', inputs={'Diagonal': [diagonal]}, outputs={'Out': [out]})
out.stop_gradient = True
return out
def eye(num_rows,
num_columns=None,
batch_shape=None,
dtype='float32',
name=None):
"""
This function constructs a or a batch of 2-D tensor with ones on the diagonal and zeros elsewhere.
Args:
num_rows(int): the number of rows in each batch tensor.
num_columns(int, optional): the number of columns in each batch tensor.
If None, default: num_rows.
batch_shape(list, optional): If provided, the returned tensor will have a leading
batch size of this shape, the data type of ``batch_shape`` is int. Default is None.
dtype(np.dtype|str, optional): The data type of the returned tensor.
It should be int32, int64, float16, float32, float64, default is 'float32'.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: An identity Tensor or LoDTensor of shape batch_shape + [num_rows, num_columns].
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.eye(3, dtype='int32')
# [[1, 0, 0]
# [0, 1, 0]
# [0, 0, 1]]
data = fluid.layers.eye(2, 3, dtype='int32')
# [[1, 0, 0]
# [0, 1, 0]]
data = fluid.layers.eye(2, batch_shape=[3])
# Construct a batch of 3 identity tensors, each 2 x 2.
# data[i, :, :] is a 2 x 2 identity tensor, i = 0, 1, 2.
"""
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if num_columns is not None:
if not isinstance(num_columns, int) or num_columns < 0:
raise TypeError("num_columns should be a non-negative int")
else:
num_columns = num_rows
if in_dygraph_mode():
out = _C_ops.final_state_eye(num_rows, num_columns, dtype,
_current_expected_place())
elif _in_legacy_dygraph():
out = _C_ops.eye('dtype', dtype, 'num_rows', num_rows, 'num_columns',
num_columns)
else:
helper = LayerHelper("eye", **locals())
check_dtype(dtype, 'dtype',
['float16', 'float32', 'float64', 'int32', 'int64'], 'eye')
if not isinstance(num_rows, int) or num_rows < 0:
raise TypeError("num_rows should be a non-negative int")
out = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op(
type='eye',
inputs={},
outputs={'Out': [out]},
attrs={
'num_rows': num_rows,
'num_columns': num_columns,
'dtype': dtype
},
stop_gradient=True)
if batch_shape is not None:
re_shape = [1] * len(batch_shape)
re_shape = re_shape + [num_rows, num_columns]
expand_times = batch_shape + [1, 1]
if _non_static_mode():
out = _C_ops.reshape(out, 'shape', re_shape)
return _C_ops.expand(out, None, 'expand_times', expand_times)
if not isinstance(batch_shape, list):
raise TypeError("batch_shape should be a list")
for batch_val in (batch_shape):
if batch_val <= 0:
raise TypeError("batch_shape should be a positive int list")
from .nn import reshape, expand
out = reshape(x=out, shape=re_shape)
out = expand(x=out, expand_times=expand_times)
out.stop_gradient = True
return out
def ones_like(x, out=None):
"""
**ones_like**
This function creates a ones tensor which has identical shape and dtype
with `x`.
Args:
x(Variable): The input tensor which specifies shape and dtype.
out(Variable): The output tensor.
Returns:
out(Variable): The tensor variable storing the output.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', dtype='float32', shape=[3], append_batch_size=False)
data = fluid.layers.ones_like(x) # [1.0, 1.0, 1.0]
"""
check_variable_and_dtype(
x, "x", ['bool', 'float32', 'float64', 'int32', 'int64'], 'ones_like')
helper = LayerHelper("ones_like", **locals())
if out is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
check_variable_and_dtype(
out, "out", ['bool', 'float32', 'float64', 'int32', 'int64'],
'ones_like')
helper.append_op(
type='fill_any_like',
inputs={'X': [x]},
attrs={'value': 1.0},
outputs={'Out': [out]})
return out
@deprecated(since="2.0.0", update_to="paddle.triu")
def triu(input, diagonal=0, name=None):
import paddle
return paddle.tensor.triu(x=input, diagonal=diagonal, name=name)
| 37.585259 | 162 | 0.566311 |
d9dab1f3a5663f6c37df62514f648412d7013960 | 7,282 | py | Python | code/abalation_study.py | fbenites/TRANSLIT | a1caffff2e7fef63ca4dfe8ff32eddbfb95b5678 | [
"CC0-1.0"
] | 4 | 2020-07-19T17:28:42.000Z | 2022-02-18T01:24:23.000Z | code/abalation_study.py | fbenites/TRANSLIT | a1caffff2e7fef63ca4dfe8ff32eddbfb95b5678 | [
"CC0-1.0"
] | 7 | 2021-03-19T11:38:23.000Z | 2022-03-12T00:19:20.000Z | code/abalation_study.py | fbenites/TRANSLIT | a1caffff2e7fef63ca4dfe8ff32eddbfb95b5678 | [
"CC0-1.0"
] | 2 | 2021-06-26T12:35:26.000Z | 2022-03-06T07:38:29.000Z | import shelve
import data_generator
def print_table(headers, rows):
print("\\begin{table}\n\\begin{tabular}{"+"".join(["|c|"]*len(headers))+"}\n")
print("&".join(headers)+"\\\\\n")
for row in rowHeaders:
print(row+"\\\\\n")
print("\\end{tabular}\n\\caption{}\n\\label{}\n\\end{table}\n")
if "amazon_names" not in globals():
amazon_names_s = shelve.open("../artefacts/amazon_data.shf")
amazon_names = dict([(tk,amazon_names_s[tk]) for tk in amazon_names_s])
amazon_names_s.close()
nal=list(amazon_names.keys())
#normalize
amazon_names_normed = {}
for i1,k in enumerate(amazon_names):
amazon_names_normed[i1]=['en_'+k]+ [tk[0]+'_'+tk[1] for tk in amazon_names[k]]
#select 20'000 positive, negative and similar
import os
fpath_an="../artefacts/amazon_normed.pkl"
if not os.path.exists(fpath_an):
import pickle
pickle.dump(amazon_names_normed, open(fpath_an,"wb"))
ama_nor_pairs=data_generator.generate_pairs(amazon_names_normed, 42, 7000, 7000, 7000, fpath=fpath_an)
if "wiki_names_cleaned" not in globals():
wiki_names_cleaned_s = shelve.open("../artefacts/wiki_names_cleaned.shf")
wiki_names_cleaned = dict([(tk,wiki_names_cleaned_s[tk]) for tk in wiki_names_cleaned_s])
wiki_names_cleaned_s.close()
#normalize
wiki_names_cleaned_normed = {}
for i1,k in enumerate(wiki_names_cleaned):
wiki_names_cleaned_normed[i1]=['en_'+k]+ [tk[0]+'_'+tk[1] for tk in wiki_names_cleaned[k]]
#select 20'000 positive, negative and similar
import os
fpath_an="../artefacts/wiki_normed.pkl"
if not os.path.exists(fpath_an):
import pickle
pickle.dump(wiki_names_cleaned_normed, open(fpath_an,"wb"))
wiki_nor_pairs=data_generator.generate_pairs(wiki_names_cleaned_normed, 42, 7000, 7000, 7000, fpath=fpath_an)
if "jrc_names" not in globals():
jrc_names_s = shelve.open("../artefacts/jrc_names_new.shf")
jrc_names = dict([(tk,jrc_names_s[tk]) for tk in jrc_names_s])
jrc_names_s.close()
jrc_names_normed = {}
for i1,k in enumerate(jrc_names):
jrc_names_normed[i1]= [tk[0]+'_'+tk[1] for tk in jrc_names[k]]
#select 20'000 positive, negative and similar
import os
fpath_an="../artefacts/jrc_names_normed.pkl"
if not os.path.exists(fpath_an):
import pickle
pickle.dump(jrc_names_normed, open(fpath_an,"wb"))
jrc_nor_pairs=data_generator.generate_pairs(jrc_names_normed, 42, 7000, 7000, 7000, fpath=fpath_an)
# together corpus
if "gen_pars" not in globals():
import pickle
translit_gen_pars = pickle.load(open("../artefacts/gen_pairs.pkl","rb"))
# get only 21k from the translit_gen_pars
import numpy as np
np.random.seed(42)
rids = np.random.permutation(range(len(translit_gen_pars)))[:21000]
translit_pars = [translit_gen_pars[tk] for tk in rids]
# CV on each dataset
from sklearn.model_selection import KFold
datasets_names = ["jrc", "wiki","ama","translit"]
datasets = [ jrc_nor_pairs, wiki_nor_pairs,
ama_nor_pairs, translit_pars]
scores=[]
for i1,ds in enumerate(datasets):
kf = KFold(n_splits=10)
np.random.seed(42)
score_ds=[]
for train_index, test_index in kf.split(np.random.permutation(range(len(ds)))):
score_ds.append(do_classification(train_index, test_index, [ds[tk][2] for tk in train_index],
[ds[tk][2] for tk in test_index],ds))
scores.append(score_ds)
rows=[]
for tk,tj,tn in zip(np.mean(scores,1),np.var(scores,1),datasets_names ):
row=tn
for tl,tm in zip(tk,tj):
row+="&"+"{:0.3f}".format(tl)+"$\\pm$"+"{:0.3f}".format(tm)
rows.append(row)
print_table(["Dataset","RF","SVM","SVM Char"],rows)
# train on one test on other
#
scores_base=[]
for i1,ds in enumerate(datasets[:-1]):
scores_base.append(classification_experiments.do_classification(range(len(translit_pars)), range(len(translit_pars),len(translit_pars)+len(ds)),
[translit_pars[tk][2] for tk in range(len(translit_pars))],
[ds[tk][2] for tk in range(len(ds))],translit_pars+ds))
scores_mixed=[]
for i1,ds in enumerate(datasets):
kf = KFold(n_splits=10)
np.random.seed(42)
score_ds=[]
for train_index, test_index in kf.split(np.random.permutation(range(len(ds)))):
score_ds.append(classification_experiments.do_classification(list(range(len(translit_pars)))+(len(translit_pars)+train_index).tolist(), len(translit_pars)+test_index,
[translit_pars[tk][2] for tk in range(len(translit_pars))]+[ds[tk][2] for tk in train_index],
[ds[tk][2] for tk in test_index],translit_pars+ds))
scores_mixed.append(score_ds)
rows=[]
for tk,tj,tn in zip(np.mean(scores_mixed,1),np.var(scores_mixed,1),datasets_names ):
row=tn
for tl,tm in zip(tk,tj):
row+="&"+"{:0.3f}".format(tl)+"$\\pm$"+"{:0.3f}".format(tm)
rows.append(row)
print_table(["Dataset","RF","SVM","SVM Char"],rows)
scores_increasing=[]
rids = np.random.permutation(range(len(translit_gen_pars)))
for i in [1000,10000,20000,40000,80000,160000]:
kf = KFold(n_splits=10)
np.random.seed(42)
translit_pars_t = [translit_gen_pars[rids[tk]] for tk in range(i)]
ds=translit_pars_t
score_ds=[]
for train_index, test_index in kf.split(np.random.permutation(range(len(ds)))):
score_ds.append(classification_experiments.do_classification(train_index, test_index, [ds[tk][2] for tk in train_index],
[ds[tk][2] for tk in test_index],ds))
scores_increasing.append(score_ds)
rows=[]
for tk,tj,tn in zip(np.mean(scores_increasing,1),np.var(scores_increasing,1),["1k","10k","20k","40k","80k","160k"] ):
row=tn
for tl,tm in zip(tk,tj):
row+="&"+"{:0.3f}".format(tl)+"$\\pm$"+"{:0.3f}".format(tm)
rows.append(row)
print_table(["Dataset","RF","SVM","SVM Char"],rows)
import matplotlib.pyplot as plt
import numpy as np
ind=[1,2,3,4,5,6]
p0=plt.errorbar(ind, np.mean(scores_increasing,1)[:,0], np.var(scores_increasing,1)[:,0], fmt='-o')
p1=plt.errorbar(ind, np.mean(scores_increasing,1)[:,1], np.var(scores_increasing,1)[:,1], fmt='-o')
p2=plt.errorbar(ind, np.mean(scores_increasing,1)[:,2], np.var(scores_increasing,1)[:,2], fmt='-o')
plt.xticks(ind, ('1k', '10k', '20k', '40k', '80k','160k'))
plt.legend((p0[0],p1[0], p2[0]), ('RF', 'SVM', "SVM-TF-IDF"))
plt.title("Dependency of the Accuracy on Increasing Pair Sample")
plt.savefig("inc_plot.pdf")
| 36.964467 | 174 | 0.602582 |
682392edbe8bdab7b518dfe3a063320260e059da | 989 | py | Python | courses/machine_learning/deepdive/08_image/flowersmodel/setup.py | joydeepmitra/Machine-Learning | ff887fae1e317e79c8e49208fc273c336cb79077 | [
"Apache-2.0"
] | 58 | 2019-05-16T00:12:11.000Z | 2022-03-14T06:12:12.000Z | courses/machine_learning/deepdive/08_image/flowersmodel/setup.py | joydeepmitra/Machine-Learning | ff887fae1e317e79c8e49208fc273c336cb79077 | [
"Apache-2.0"
] | 8 | 2020-01-28T22:26:41.000Z | 2022-02-09T23:48:28.000Z | courses/machine_learning/deepdive/08_image/flowersmodel/setup.py | joydeepmitra/Machine-Learning | ff887fae1e317e79c8e49208fc273c336cb79077 | [
"Apache-2.0"
] | 60 | 2018-12-10T20:59:36.000Z | 2022-02-01T03:18:01.000Z | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = [
]
setup(
name='flowersmodel',
version='0.1',
author = 'Google',
author_email = 'training-feedback@cloud.google.com',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description='Image model in Cloud ML Engine',
requires=[]
)
| 30.90625 | 74 | 0.744186 |
15b31f79ff32e7d93927a308bb857a3a7ed0406f | 6,692 | py | Python | model/caption.py | Haochen-Luo/fairseq-image-captioning-master | 17ae057de992de869ba0bbb41dd5de0333fc6076 | [
"Apache-2.0"
] | 282 | 2019-11-28T13:44:05.000Z | 2022-03-26T17:34:35.000Z | model/caption.py | Haochen-Luo/fairseq-image-captioning-master | 17ae057de992de869ba0bbb41dd5de0333fc6076 | [
"Apache-2.0"
] | 29 | 2019-11-30T12:24:19.000Z | 2022-02-24T17:40:25.000Z | model/caption.py | Haochen-Luo/fairseq-image-captioning-master | 17ae057de992de869ba0bbb41dd5de0333fc6076 | [
"Apache-2.0"
] | 61 | 2020-01-21T14:35:46.000Z | 2022-03-19T17:01:18.000Z | import torch
import torch.nn.functional as F
from model import modules
from fairseq.models import FairseqEncoder, BaseFairseqModel
from fairseq.models import register_model, register_model_architecture, transformer
def create_padding_mask(src_tokens, src_lengths):
padding_mask = torch.zeros(src_tokens.shape[:2],
dtype=torch.bool,
device=src_tokens.device)
for i, src_length in enumerate(src_lengths):
padding_mask[i, src_length:] = 1
return padding_mask
class SimplisticCaptioningEncoder(FairseqEncoder):
def __init__(self, args):
super().__init__(dictionary=None)
self.feature_projection = modules.FeatureProjection(args) \
if not args.no_projection else None
self.spatial_encoding = modules.SpatialEncoding(args) \
if args.feature_spatial_encoding else None
def forward(self, src_tokens, src_lengths, src_locations, **kwargs):
x = src_tokens
if self.feature_projection is not None:
x = self.feature_projection(src_tokens)
if self.spatial_encoding is not None:
x += self.spatial_encoding(src_locations)
# B x T x C -> T x B x C
enc_out = x.transpose(0, 1)
# compute padding mask
enc_padding_mask = create_padding_mask(src_tokens, src_lengths)
return transformer.EncoderOut(encoder_out=enc_out,
encoder_padding_mask=enc_padding_mask,
encoder_embedding=None,
encoder_states=None)
def reorder_encoder_out(self, encoder_out, new_order):
enc_out = encoder_out.encoder_out
enc_padding_mask = encoder_out.encoder_padding_mask
return transformer.EncoderOut(encoder_out=enc_out.index_select(1, new_order),
encoder_padding_mask=enc_padding_mask.index_select(0, new_order),
encoder_embedding=None,
encoder_states=None)
class TransformerCaptioningEncoder(transformer.TransformerEncoder):
def __init__(self, args):
super().__init__(args, None, modules.FeatureProjection(args))
self.spatial_encoding = modules.SpatialEncoding(args) \
if args.feature_spatial_encoding else None
def forward(self, src_tokens, src_lengths, src_locations, **kwargs):
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(src_tokens)
if self.spatial_encoding is not None:
x += self.spatial_encoding(src_locations)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = create_padding_mask(src_tokens, src_lengths)
# encoder layers
for layer in self.layers:
x = layer(x, encoder_padding_mask)
if self.layer_norm:
x = self.layer_norm(x)
return transformer.EncoderOut(encoder_out=x,
encoder_padding_mask=encoder_padding_mask,
encoder_embedding=None,
encoder_states=None)
class CaptioningModel(BaseFairseqModel):
@staticmethod
def add_args(parser):
transformer.TransformerModel.add_args(parser)
parser.add_argument('--features-dim', type=int, default=2048,
help='visual features dimension')
parser.add_argument('--feature-spatial-encoding', default=False, action='store_true',
help='use feature spatial encoding')
@classmethod
def build_model(cls, args, task):
transformer.base_architecture(args)
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = transformer.DEFAULT_MAX_TARGET_POSITIONS
captions_dict = task.target_dictionary
encoder = cls.do_build_encoder(args)
decoder = cls.do_build_decoder(args, captions_dict)
return cls.do_build_model(encoder, decoder)
@classmethod
def do_build_model(cls, encoder, decoder):
raise NotImplementedError
@classmethod
def do_build_encoder(cls, args):
raise NotImplementedError
@classmethod
def do_build_decoder(cls, args, captions_dict):
decoder_embedding = transformer.Embedding(num_embeddings=len(captions_dict),
embedding_dim=args.decoder_embed_dim,
padding_idx=captions_dict.pad())
return transformer.TransformerDecoder(args, captions_dict, decoder_embedding)
def __init__(self, encoder, decoder):
super().__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
decoder_out = self.decoder(prev_output_tokens, encoder_out=encoder_out, **kwargs)
return decoder_out
def forward_decoder(self, prev_output_tokens, **kwargs):
return self.decoder(prev_output_tokens, **kwargs)
def max_decoder_positions(self):
return self.decoder.max_positions()
@register_model('default-captioning-model')
class DefaultCaptioningModel(CaptioningModel):
@classmethod
def do_build_encoder(cls, args):
return TransformerCaptioningEncoder(args)
@classmethod
def do_build_model(cls, encoder, decoder):
return DefaultCaptioningModel(encoder, decoder)
@register_model('simplistic-captioning-model')
class SimplisticCaptioningModel(CaptioningModel):
@staticmethod
def add_args(parser):
CaptioningModel.add_args(parser)
parser.add_argument('--no-projection', default=False, action='store_true',
help='do not project visual features')
@classmethod
def do_build_encoder(cls, args):
return SimplisticCaptioningEncoder(args)
@classmethod
def do_build_model(cls, encoder, decoder):
return SimplisticCaptioningModel(encoder, decoder)
@register_model_architecture('default-captioning-model', 'default-captioning-arch')
def default_captioning_arch(args):
args.encoder_layers = getattr(args, 'encoder_layers', 3)
@register_model_architecture('simplistic-captioning-model', 'simplistic-captioning-arch')
def simplistic_captioning_arch(args):
if args.no_projection:
args.encoder_embed_dim = args.features_dim
| 36.369565 | 103 | 0.661835 |
0454cd6e24e2c22fdcf4c0b5b66177bbbad8aa98 | 9,491 | py | Python | testdebug/testBase/testBase.py | telamonian/testdebug | 98b4bc8afaec2c86c859ad4f441934c19fe45be6 | [
"BSD-3-Clause"
] | null | null | null | testdebug/testBase/testBase.py | telamonian/testdebug | 98b4bc8afaec2c86c859ad4f441934c19fe45be6 | [
"BSD-3-Clause"
] | null | null | null | testdebug/testBase/testBase.py | telamonian/testdebug | 98b4bc8afaec2c86c859ad4f441934c19fe45be6 | [
"BSD-3-Clause"
] | null | null | null | import errno
import os
from pathlib import Path
import random
import shutil
from six import print_
import sys
import string
import tarfile
import time
import traceback
from .asserter import Asserter
__all__ = ['TestBase', 'color']
class _TextColor:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
@classmethod
def color(cls, color, s):
colorCode = getattr(cls, color)
return colorCode + s + cls.ENDC
color = _TextColor.color
class TestBase(Asserter):
# the dir where test data is stored and tmp test files are written to
testDirPath = None
# the file or list of files to read from prior to the test
testFilePath = None
# the path to use when writing out a temporary test file
testOutputPath = None
# default values for things to stick on the end of temporary test file names
testOutputDelimiter = '_-_'
testOutputSuffix = 'writeTest'
# flag to indicate that a temporary test file has been written
testOutputWritten = False
# path to an archive containing test data, if any. The tar should following the naming convention that foo.tar.gz unpacks to foo. The archive will automatically be unpacked and cleaned up as appropriate
# NB: be careful! Any dir/file foo that happens to be in the directory when we unpack/clean up foo.tar.gz will be deleted
testTarPath = None
# set if/when the tarfile at cls.testTarPath is unpacked
testTarUnpackedFilesPath = None
@staticmethod
def getTestOutputPath(testOutputPath, outputDelimiter=None, outputSuffix=None, randPath=None):
if outputDelimiter is None: outputDelimiter = TestBase.testOutputDelimiter
if outputSuffix is None: outputSuffix = TestBase.testOutputSuffix
if randPath is None: randPath = True
p = Path(testOutputPath)
# add suffix to output path, if requested
if outputSuffix:
p = p.with_name(outputDelimiter.join((p.stem, outputSuffix)) + p.suffix)
# randomize the output path, if requested
if randPath:
p = TestBase.randomizedPath(p)
return p
@staticmethod
def randStr(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.SystemRandom().choice(chars) for _ in range(size))
@staticmethod
def randomizedPath(p, size=6, chars=string.ascii_uppercase + string.digits):
p = Path(p)
return p.with_name(p.stem + TestBase.randStr(size=size, chars=chars) + p.suffix)
@classmethod
def getTestNames(cls):
"""Get the test methods names from the tests class attr if it exists, or find them in the cls based on their names having the 'test' prefix.
"""
try:
for testName in cls.tests:
yield testName
except AttributeError:
for testName in (testName for testName in dir(cls) if testName[:4]=='test' and callable(getattr(cls, testName))):
yield testName
@classmethod
def runSimple(cls, failslow=False):
"""Use this method if you just want to execute your tests as normal Python code rather than unittests within some framework
Required for catching fatal exceptions when using a debugger (these are apparently swallowed by unittest and its brethren)
"""
# add an excepthook that flushes any tester output and then pauses for a tic (to finish the flush)
old_excepthook = sys.excepthook
def excepthook(type, value, traceback):
sys.stdout.flush()
time.sleep(.05)
old_excepthook(type, value, traceback)
sys.excepthook = excepthook
# modify stderr.write so that progress bars won't clobber tester output
# TODO: simplify, expand to deal with stdout, output from things other than progress bars, etc
stderrWrite = sys.stderr.write
def stderrWriteWithConvert(*args, **kwargs):
if stderrWriteWithConvert.doConvert:
if args[0].startswith('\r'):
args = ('\n' + args[0],) + args[1:]
stderrWriteWithConvert.didConvert = True
stderrWriteWithConvert.doConvert = False
return stderrWrite(*args, **kwargs)
stderrWriteWithConvert.doConvert = False
stderrWriteWithConvert.didConvert = False
sys.stderr.write = stderrWriteWithConvert
print_('running test class: %s' % color('HEADER', cls.__name__), flush=True)
# test class start time
testClassStartTime = time.time()
testPassCount = 0
testFailCount = 0
obj = cls()
# per test class setup/teardown via context manager
with cls.setUpClassContext():
for testName in obj.getTestNames():
testMethod = obj.__getattribute__(testName)
print_('\t%-60s' % (color('OKBLUE', testName) + '...'), end='', flush=True)
stderrWriteWithConvert.doConvert = True
# test case start time
testCaseStartTime = time.time()
try:
# per test case setup/teardown via context manager
with obj:
# the actual test code
testMethod()
extraSpace = '\t\t' if stderrWriteWithConvert.didConvert else ''
print_(color('OKGREEN', extraSpace + 'passed') + ', %10.3f seconds' % (time.time() - testCaseStartTime), flush=True)
testPassCount += 1
except Exception as e:
extraSpace = '\t\t' if stderrWriteWithConvert.didConvert else ''
print_(color('FAIL', extraSpace + 'failed') + ', %10.3f seconds' % (time.time() - testCaseStartTime), end='\n\n', flush=True)
if not failslow:
raise e
else:
print_(color('WARNING', traceback.format_exc()))
testFailCount += 1
finally:
stderrWriteWithConvert.didConvert = False
totalTestTime = time.time() - testClassStartTime
print_('total %.3f seconds\n' % (totalTestTime))
return testPassCount,testFailCount,totalTestTime
def setUp(self):
pass
def tearDown(self):
pass
def __enter__(self):
self.setUp()
def __exit__(self, excType, excValue, traceback):
self.tearDown()
#### stuff for enabling the ez creation and teardown of test files
@classmethod
def setUpClass(cls):
# untar archived test data, if any
if cls.testTarPath is not None:
testTar = tarfile.open(str(cls.testTarPath))
# assume that the root dir of the first entity in the tarfile is the containing directory for everything in the tarfile
cls.testTarUnpackedFilesPath = cls.testTarPath.parent / testTar.getnames()[0].split('/')[0]
# extract the tarfile
testTar.extractall(str(cls.testTarPath.parent))
# write out a test file if the .writeTestFile cls method has been overridden in a subclass
cls.testOutputWritten = cls.writeTestFile(testOutputPath=cls.testOutputPath)
@classmethod
def tearDownClass(cls):
# clean up the test file written at the start of the test suite, if any
if cls.testOutputWritten:
cls.cleanTestFile(testOutputPath=cls.testOutputPath)
# if any archived test data was unpacked, clean that up
if cls.testTarUnpackedFilesPath is not None:
# remove the whole tree of the unpacked tarfile
shutil.rmtree(str(cls.testTarUnpackedFilesPath))
# for good measure
cls.testTarUnpackedFilesPath = None
@classmethod
def setUpClassContext(cls):
"""Returns a context manager that runs setUpClass on enter and tearDownClass on exit
"""
class SetUpClassContext(object):
def __enter__(self):
cls.setUpClass()
@classmethod
def __exit__(self, excType, excValue, traceback):
cls.tearDownClass()
return SetUpClassContext()
@classmethod
def cleanAll(cls, testOutputRoot, dryrun=True, verbose=True):
"""Removes all files whose name ends in testOutputDelimiter + testOutputSuffix recursively in testOutputRoot
"""
for pth in (Path(p)/Path(f) for p,ds,fs in os.walk(testOutputRoot) for f in fs):
if pth.stem.split(cls.testOutputDelimiter)[-1].startswith(cls.testOutputSuffix):
if dryrun:
if verbose:
print('dryrun, would have removed: %s' % pth)
else:
pth.unlink()
if verbose:
print('removed: %s' % pth)
@classmethod
def cleanTestFile(cls, testOutputPath, doRaise=False):
try:
Path(str(testOutputPath)).unlink()
except OSError as e:
# errno.ENOENT = no such file or directory
if doRaise or e.errno != errno.ENOENT:
raise e
@classmethod
def writeTestFile(cls, testOutputPath):
"""Child versions should return True if a test file is successfully written out
"""
return False
| 37.219608 | 206 | 0.622906 |
37e3ddde738e3bd481be64da1055982df624d6a7 | 106,361 | py | Python | salt/minion.py | styro/salt | d087d94dca02ca8bf53a6c21b94944bc7957522c | [
"Apache-2.0"
] | null | null | null | salt/minion.py | styro/salt | d087d94dca02ca8bf53a6c21b94944bc7957522c | [
"Apache-2.0"
] | null | null | null | salt/minion.py | styro/salt | d087d94dca02ca8bf53a6c21b94944bc7957522c | [
"Apache-2.0"
] | 1 | 2021-12-02T15:30:00.000Z | 2021-12-02T15:30:00.000Z | # -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import
from __future__ import print_function
import copy
import errno
import fnmatch
import hashlib
import logging
import multiprocessing
import os
import re
import salt
import signal
import sys
import threading
import time
import traceback
import types
from random import randint, shuffle
from salt.ext.six.moves import range
# Import third party libs
try:
import zmq
HAS_ZMQ = True
except ImportError:
# Running in local, zmq not needed
HAS_ZMQ = False
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
# Import salt libs
from salt.exceptions import (
AuthenticationError, CommandExecutionError, CommandNotFoundError,
SaltInvocationError, SaltReqTimeoutError, SaltClientError,
SaltSystemExit, SaltSyndicMasterError
)
import salt.client
import salt.crypt
import salt.loader
import salt.payload
import salt.utils
import salt.utils.args
import salt.utils.event
import salt.utils.minion
import salt.utils.schedule
import salt.defaults.exitcodes
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.ext.six import string_types
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
import salt.syspaths
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get('file_client', 'remote') == 'local' and
not opts.get('use_master_when_local', False)):
check_dns = False
if check_dns is True:
# Because I import salt.log below I need to re-import salt.utils here
import salt.utils
try:
ret['master_ip'] = \
salt.utils.dns_check(opts['master'], True, opts['ipv6'])
except SaltClientError:
if opts['retry_dns']:
while True:
import salt.log
msg = ('Master hostname: \'{0}\' not found. Retrying in {1} '
'seconds').format(opts['master'], opts['retry_dns'])
if salt.log.is_console_configured():
log.error(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.dns_check(
opts['master'], True, opts['ipv6']
)
break
except SaltClientError:
pass
else:
ret['master_ip'] = '127.0.0.1'
except SaltSystemExit:
err = 'Master address: {0} could not be resolved. Invalid or unresolveable address.'.format(
opts.get('master', 'Unknown'))
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning('Master ip address changed from {0} to {1}'.format(opts['master_ip'],
ret['master_ip'])
)
ret['master_uri'] = 'tcp://{ip}:{port}'.format(ip=ret['master_ip'],
port=opts['master_port'])
return ret
def get_proc_dir(cachedir):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
'''
fn_ = os.path.join(cachedir, 'proc')
if not os.path.isdir(fn_):
# proc_dir is not present, create it
os.makedirs(fn_)
return fn_
def parse_args_and_kwargs(func, args, data=None):
'''
Wrap load_args_and_kwargs
'''
salt.utils.warn_until(
'Boron',
'salt.minion.parse_args_and_kwargs() has been renamed to '
'salt.minion.load_args_and_kwargs(). Please change this function call '
'before the Boron release of Salt.'
)
return load_args_and_kwargs(func, args, data=data)
def load_args_and_kwargs(func, args, data=None):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, string_types):
string_arg, string_kwarg = salt.utils.args.parse_input([arg], condition=False) # pylint: disable=W0632
if string_arg:
# Don't append the version that was just derived from parse_cli
# above, that would result in a 2nd call to
# salt.utils.cli.yamlify_arg(), which could mangle the input.
_args.append(arg)
elif string_kwarg:
salt.utils.warn_until(
'Boron',
'The list of function args and kwargs should be parsed '
'by salt.utils.args.parse_input() before calling '
'salt.minion.load_args_and_kwargs().'
)
if argspec.keywords or next(iter(string_kwarg.keys())) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}'.format(arg))
continue
# if the arg is a dict with __kwarg__ == True, then its a kwarg
elif isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
for key, val in arg.items():
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}'.format(arg))
continue
else:
_args.append(arg)
if invalid_kwargs:
raise SaltInvocationError(
'The following keyword arguments are not valid: {0}'
.format(', '.join(invalid_kwargs))
)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in data.items():
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
class SMinion(object):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
opts['grains'] = salt.loader.grains(opts)
self.opts = opts
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
if isinstance(self.opts['master'], list):
masters = self.opts['master']
if self.opts['random_master'] is True:
shuffle(masters)
for master in masters:
self.opts['master'] = master
self.opts.update(resolve_dns(opts))
try:
self.gen_modules()
break
except SaltClientError:
log.warning(('Attempted to authenticate with master '
'{0} and failed'.format(master)))
continue
else:
if self.opts['random_master'] is True:
log.warning('random_master is True but there is only one master specified. Ignoring.')
self.opts.update(resolve_dns(opts))
self.gen_modules(initial_load=True)
else:
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
).compile_pillar()
self.functions = salt.loader.minion_mods(self.opts, include_errors=True)
self.function_errors = self.functions['_errors']
self.functions.pop('_errors') # Keep the funcs clean
self.returners = salt.loader.returners(self.opts, self.functions)
self.states = salt.loader.states(self.opts, self.functions)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
def _init_context_and_poller(self):
self.context = zmq.Context()
self.poller = zmq.Poller()
def _prepare_minion_event_system(self):
# Prepare the minion event system
#
# Start with the publish socket
self._init_context_and_poller()
hash_type = getattr(hashlib, self.opts.get('hash_type', 'md5'))
# Only use the first 10 chars to keep longer hashes from exceeding the
# max socket path length.
id_hash = hash_type(self.opts['id']).hexdigest()[:10]
epub_sock_path = os.path.join(
self.opts['sock_dir'],
'minion_event_{0}_pub.ipc'.format(id_hash)
)
if os.path.exists(epub_sock_path):
os.unlink(epub_sock_path)
epull_sock_path = os.path.join(
self.opts['sock_dir'],
'minion_event_{0}_pull.ipc'.format(id_hash)
)
if os.path.exists(epull_sock_path):
os.unlink(epull_sock_path)
self.epub_sock = self.context.socket(zmq.PUB)
if self.opts.get('ipc_mode', '') == 'tcp':
epub_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_pub_port']
)
epull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_pull_port']
)
else:
epub_uri = 'ipc://{0}'.format(epub_sock_path)
salt.utils.check_ipc_path_max_len(epub_uri)
epull_uri = 'ipc://{0}'.format(epull_sock_path)
salt.utils.check_ipc_path_max_len(epull_uri)
log.debug(
'{0} PUB socket URI: {1}'.format(
self.__class__.__name__, epub_uri
)
)
log.debug(
'{0} PULL socket URI: {1}'.format(
self.__class__.__name__, epull_uri
)
)
# Check to make sure the sock_dir is available, create if not
default_minion_sock_dir = os.path.join(
salt.syspaths.SOCK_DIR,
'minion'
)
minion_sock_dir = self.opts.get('sock_dir', default_minion_sock_dir)
if not os.path.isdir(minion_sock_dir):
# Let's try to create the directory defined on the configuration
# file
try:
os.makedirs(minion_sock_dir, 0o755)
except OSError as exc:
log.error('Could not create SOCK_DIR: {0}'.format(exc))
# Let's not fail yet and try using the default path
if minion_sock_dir == default_minion_sock_dir:
# We're already trying the default system path, stop now!
raise
if not os.path.isdir(default_minion_sock_dir):
try:
os.makedirs(default_minion_sock_dir, 0o755)
except OSError as exc:
log.error('Could not create SOCK_DIR: {0}'.format(exc))
# Let's stop at this stage
raise
# Create the pull socket
self.epull_sock = self.context.socket(zmq.PULL)
# Securely bind the event sockets
if self.opts.get('ipc_mode', '') != 'tcp':
old_umask = os.umask(0o177)
try:
log.info('Starting pub socket on {0}'.format(epub_uri))
self.epub_sock.bind(epub_uri)
log.info('Starting pull socket on {0}'.format(epull_uri))
self.epull_sock.bind(epull_uri)
finally:
if self.opts.get('ipc_mode', '') != 'tcp':
os.umask(old_umask)
@staticmethod
def process_schedule(minion, loop_interval):
try:
minion.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
return loop_interval
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None):
self.opts = salt.config.minion_config(opts['conf_file'])
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.functions = salt.loader.minion_mods(
self.opts,
whitelist=self.whitelist,
initial_load=initial_load)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts, self.functions)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MultiMinion(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
# timeout for one of the minions to auth with a master
MINION_CONNECT_TIMEOUT = 5
def __init__(self, opts):
super(MultiMinion, self).__init__(opts)
def minions(self):
'''
Return a dict of minion generators bound to the tune_in method
dict of master -> minion_mapping, the mapping contains:
opts: options used to create the minion
last: last auth attempt time
auth_wait: time to wait for next auth attempt
minion: minion object
generator: generator function (non-blocking tune_in)
'''
if not isinstance(self.opts['master'], list):
log.error(
'Attempting to start a multimaster system with one master')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
ret = {}
for master in set(self.opts['master']):
s_opts = copy.copy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
ret[master] = {'opts': s_opts,
'last': time.time(),
'auth_wait': s_opts['acceptance_wait_time']}
try:
minion = Minion(s_opts, self.MINION_CONNECT_TIMEOUT, False)
ret[master]['minion'] = minion
ret[master]['generator'] = minion.tune_in_no_block()
except SaltClientError as exc:
log.error('Error while bringing up minion for multi-master. Is master at {0} responding?'.format(master))
return ret
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
'''
self._prepare_minion_event_system()
self.poller.register(self.epull_sock, zmq.POLLIN)
# Prepare the minion generators
minions = self.minions()
loop_interval = int(self.opts['loop_interval'])
auth_wait = self.opts['acceptance_wait_time']
max_wait = self.opts['acceptance_wait_time_max']
while True:
package = None
for minion in minions.values():
if isinstance(minion, dict):
minion = minion['minion']
if not hasattr(minion, 'schedule'):
continue
loop_interval = self.process_schedule(minion, loop_interval)
socks = dict(self.poller.poll(1))
if socks.get(self.epull_sock) == zmq.POLLIN:
try:
package = self.epull_sock.recv(zmq.NOBLOCK)
except Exception:
pass
masters = list(minions.keys())
shuffle(masters)
# Do stuff per minion that we have
for master in masters:
minion = minions[master]
# if we haven't connected yet, lets attempt some more.
# make sure to keep separate auth_wait times, since these
# are separate masters
if 'generator' not in minion:
if time.time() - minion['auth_wait'] > minion['last']:
minion['last'] = time.time()
if minion['auth_wait'] < max_wait:
minion['auth_wait'] += auth_wait
try:
t_minion = Minion(minion['opts'], self.MINION_CONNECT_TIMEOUT, False)
minions[master]['minion'] = t_minion
minions[master]['generator'] = t_minion.tune_in_no_block()
minions[master]['auth_wait'] = self.opts['acceptance_wait_time']
except SaltClientError:
log.error('Error while bring up minion for multi-master. Is master {0} responding?'.format(master))
continue
else:
continue
# run scheduled jobs if you have them
loop_interval = self.process_schedule(minion['minion'], loop_interval)
# if you have an event to handle, do it on a single minion
# (first one to not throw an exception)
if package:
# If we need to expand this, we may want to consider a specific header
# or another approach entirely.
if package.startswith('_minion_mine'):
for multi_minion in minions:
try:
minions[master]['minion'].handle_event(package)
except Exception:
pass
else:
try:
minion['minion'].handle_event(package)
package = None
self.epub_sock.send(package)
except Exception:
pass
# have the Minion class run anything it has to run
next(minion['generator'])
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True): # pylint: disable=W0231
'''
Pass in the options dict
'''
self._running = None
# Warn if ZMQ < 3.2
if HAS_ZMQ:
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split('.')]
)
if zmq_version_info < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup the of the opts grains, so we can log from the grains
# module
opts['grains'] = salt.loader.grains(opts)
# evaluate the master to connect to and authenticate with it
opts['master'] = self.eval_master(opts,
timeout,
safe)
self.functions, self.returners, self.function_errors = self._load_modules()
self.opts['pillar'] = salt.pillar.get_pillar(
opts,
opts['grains'],
opts['id'],
opts['environment'],
funcs=self.functions
).compile_pillar()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.proc_dir = get_proc_dir(opts['cachedir'])
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
# add default scheduling jobs to the minions scheduler
if 'mine.update' in self.functions:
log.info('Added mine.update to scheduler')
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': opts['mine_interval'],
'jid_include': True,
'maxrunning': 2
}
})
# add master_alive job if enabled
if self.opts['master_alive_interval'] > 0:
self.schedule.add_job({
'__master_alive':
{
'function': 'status.master',
'seconds': opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'kwargs': {'master_ip': self.opts['master'],
'connected': True}
}
})
self.grains_cache = self.opts['grains']
# store your hexid to subscribe to zmq, hash since zmq filters are prefix
# matches this way we can avoid collisions
self.hexid = hashlib.sha1(self.opts['id']).hexdigest()
if 'proxy' in self.opts['pillar']:
log.debug('I am {0} and I need to start some proxies for {1}'.format(self.opts['id'],
self.opts['pillar']['proxy']))
for p in self.opts['pillar']['proxy']:
log.debug('Starting {0} proxy.'.format(p))
pid = os.fork()
if pid > 0:
continue
else:
proxyminion = salt.ProxyMinion()
proxyminion.start(self.opts['pillar']['proxy'][p])
self.clean_die(signal.SIGTERM, None)
else:
log.debug('I am {0} and I am not supposed to start any proxies. '
'(Likely not a problem)'.format(self.opts['id']))
# __init__() from MinionBase is called in Minion.eval_master()
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False):
'''
Evaluates and returns the current master address. In standard mode, just calls
authenticate() with the given master address.
With master_type=func evaluates the current master address from the given
module and then calls authenticate().
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to connect is used to authenticate() and
then returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# check if master_type was altered from its default
if opts['master_type'] != 'str':
# check for a valid keyword
if opts['master_type'] == 'func':
# split module and function and try loading the module
mod, fun = opts['master'].split('.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise TypeError
# we take whatever the module returns as master address
opts['master'] = master_mod[mod + '.' + fun]()
except TypeError:
msg = ('Failed to evaluate master address from '
'module \'{0}\''.format(opts['master']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info('Evaluated master from module: {0}'.format(master_mod))
# if failover is set, master has to be of type list
elif opts['master_type'] == 'failover':
if isinstance(opts['master'], list):
log.info('Got list of available master addresses:'
' {0}'.format(opts['master']))
if opts['master_shuffle']:
shuffle(opts['master'])
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'{0}\''.format(opts['master']))
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
log.info('Removing possibly failed master {0} from list of'
' masters'.format(opts['master']))
# create new list of master with the possibly failed one removed
opts['master'] = [x for x in opts['master_list'] if opts['master'] != x]
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
# shuffle the masters and then loop through them
local_masters = copy.copy(opts['master'])
for master in local_masters:
opts['master'] = master
opts.update(resolve_dns(opts))
super(Minion, self).__init__(opts)
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in self.opts:
self.opts['master_list'] = local_masters
try:
if self.authenticate(timeout, safe) != 'full':
conn = True
break
except SaltClientError:
msg = ('Master {0} could not be reached, trying '
'next master (if any)'.format(opts['master']))
log.info(msg)
continue
if not conn:
self.connected = False
msg = ('No master could be reached or all masters denied '
'the minions connection attempt.')
log.error(msg)
else:
self.connected = True
return opts['master']
# single master sign in
else:
opts.update(resolve_dns(opts))
super(Minion, self).__init__(opts)
if self.authenticate(timeout, safe) == 'full':
self.connected = False
msg = ('master {0} rejected the minions connection because too '
'many minions are already connected.'.format(opts['master']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
else:
self.connected = True
return opts['master']
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in self.opts.items():
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _load_modules(self, force_refresh=False):
'''
Return the functions and the returners loaded up from the loader
module
'''
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug('modules_max_memory set, enforcing a maximum of {0}'.format(self.opts['modules_max_memory']))
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).get_memory_info()
mem_limit = rss + vms + self.opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh)
if self.opts.get('multimaster', False):
s_opts = copy.copy(self.opts)
functions = salt.loader.minion_mods(s_opts)
else:
functions = salt.loader.minion_mods(self.opts)
returners = salt.loader.returners(self.opts, functions)
errors = {}
if '_errors' in functions:
errors = functions['_errors']
functions.pop('_errors')
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
return functions, returners, errors
def _fire_master(self, data=None, tag=None, events=None, pretag=None):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
elif not data and tag:
load['data'] = {}
load['tag'] = tag
else:
return
channel = salt.transport.Channel.factory(self.opts)
try:
result = channel.send(load)
except Exception:
log.info("fire_master failed: {0}".format(traceback.format_exc()))
def _handle_payload(self, payload):
'''
Takes a payload from the master publisher and does whatever the
master wants done.
'''
{'aes': self._handle_aes,
'pub': self._handle_pub,
'clear': self._handle_clear}[payload['enc']](payload['load'],
payload['sig'] if 'sig' in payload else None)
def _handle_aes(self, load, sig=None):
'''
Takes the AES encrypted load, checks the signature if pub signatures
are turned on, decrypts it, and runs the encapsulated instructions
'''
# Verify that the signature is valid
master_pubkey_path = os.path.join(self.opts['pki_dir'], 'minion_master.pub')
if sig and self.functions['config.get']('sign_pub_messages'):
if not salt.crypt.verify_signature(master_pubkey_path, load, sig):
raise AuthenticationError('Message signature failed to validate.')
try:
data = self.crypticle.loads(load)
except AuthenticationError:
# decryption of the payload failed, try to re-auth but wait
# random seconds if set in config with random_reauth_delay
if 'random_reauth_delay' in self.opts:
reauth_delay = randint(0, float(self.opts['random_reauth_delay']))
# This mitigates the issue wherein a long-running job might not return
# on a master key rotation. However, new commands issued during the re-auth
# splay period will still fail to return.
if not salt.utils.minion.running(self.opts):
log.debug('Waiting {0} seconds to re-authenticate'.format(reauth_delay))
time.sleep(reauth_delay)
else:
log.warning('Ignoring re-auth delay because jobs are running')
self.authenticate()
data = self.crypticle.loads(load)
# Verify that the publication is valid
if 'tgt' not in data or 'jid' not in data or 'fun' not in data \
or 'arg' not in data:
return
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in data:
match_func = getattr(self.matcher,
'{0}_match'.format(data['tgt_type']), None)
if match_func is None:
return
if data['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = data.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(data['tgt'], delimiter=delimiter):
return
elif not match_func(data['tgt']):
return
else:
if not self.matcher.glob_match(data['tgt']):
return
# If the minion does not have the function, don't execute,
# this prevents minions that could not load a minion module
# from returning a predictable exception
#if data['fun'] not in self.functions:
# return
if 'user' in data:
log.info(
'User {0[user]} Executing command {0[fun]} with jid '
'{0[jid]}'.format(data)
)
else:
log.info(
'Executing command {0[fun]} with jid {0[jid]}'.format(data)
)
log.debug('Command details {0}'.format(data))
self._handle_decoded_payload(data)
def _handle_pub(self, load):
'''
Handle public key payloads
'''
pass
def _handle_clear(self, load):
'''
Handle un-encrypted transmissions
'''
pass
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
if isinstance(data['fun'], string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
target = Minion._thread_multi_return
else:
target = Minion._thread_return
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
# If we are running in multi-master mode, re-inject opts into module funcs
if instance.opts.get('multimaster', False):
for func in instance.functions:
sys.modules[instance.functions[func].__module__].__opts__ = self.opts
if self.opts['multiprocessing']:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
process = multiprocessing.Process(
target=target, args=(instance, self.opts, data)
)
else:
process = threading.Thread(
target=target, args=(instance, self.opts, data),
name=data['jid']
)
process.start()
if not sys.platform.startswith('win'):
process.join()
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if not minion_instance:
minion_instance = cls(opts)
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing']:
salt.utils.daemonize_if(opts)
salt.utils.appendproctitle(data['jid'])
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID {0}'.format(sdata['pid']))
with salt.utils.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
if function_name in minion_instance.functions:
try:
func = minion_instance.functions[data['fun']]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
sys.modules[func.__module__].__context__['retcode'] = 0
return_data = func(*args, **kwargs)
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], str(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
ret['retcode'] = sys.modules[func.__module__].__context__.get(
'retcode',
0
)
ret['success'] = True
except CommandNotFoundError as exc:
msg = 'Command required for {0!r} not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
except CommandExecutionError as exc:
log.error(
'A command in {0!r} had a problem: {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
except SaltInvocationError as exc:
log.error(
'Problem executing {0!r}: {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing {0!r}: {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
except TypeError as exc:
msg = ('TypeError encountered executing {0}: {1}. See '
'debug log for more info.').format(function_name, exc)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
else:
ret['return'] = '{0!r} is not available.'.format(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: {0!r}'.format(minion_instance.function_errors[mod_name])
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
minion_instance._return_pub(ret)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
log.error(traceback.format_exc())
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
salt.utils.appendproctitle(data['jid'])
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if not minion_instance:
minion_instance = cls(opts)
ret = {
'return': {},
'success': {},
}
for ind in range(0, len(data['fun'])):
ret['success'][data['fun'][ind]] = False
try:
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
ret['return'][data['fun'][ind]] = func(*args, **kwargs)
ret['success'][data['fun'][ind]] = True
except Exception as exc:
trb = traceback.format_exc()
log.warning(
'The minion function caused an exception: {0}'.format(
exc
)
)
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
minion_instance._return_pub(ret)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
def _return_pub(self, ret, ret_cmd='_return'):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: {0}'.format(jid))
channel = salt.transport.Channel.factory(self.opts)
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['id'],
'jid': jid,
'fun': fun,
'load': ret.get('__load__')}
load['return'] = {}
for key, value in ret.items():
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in list(ret.items()):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], string_types):
load['out'] = ret['out']
else:
log.error('Invalid outputter {0}. This is likely a bug.'
.format(ret['out']))
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
fn_ = os.path.join(
self.opts['cachedir'],
'minion_jobs',
load['jid'],
'return.p')
jdir = os.path.dirname(fn_)
if not os.path.isdir(jdir):
os.makedirs(jdir)
salt.utils.fopen(fn_, 'w+b').write(self.serial.dumps(ret))
try:
ret_val = channel.send(load)
except SaltReqTimeoutError:
msg = ('The minion failed to return the job information for job '
'{0}. This is often due to the master being shut down or '
'overloaded. If the master is running consider increasing '
'the worker_threads value.').format(jid)
log.warn(msg)
return ''
log.trace('ret_val = {0}'.format(ret_val))
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _set_tcp_keepalive(self):
if hasattr(zmq, 'TCP_KEEPALIVE'):
self.socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
def _set_reconnect_ivl(self):
recon_delay = self.opts['recon_default']
if self.opts['recon_randomize']:
recon_delay = randint(self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max']
)
log.debug("Generated random reconnect delay between '{0}ms' and '{1}ms' ({2})".format(
self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max'],
recon_delay)
)
log.debug("Setting zmq_reconnect_ivl to '{0}ms'".format(recon_delay))
self.socket.setsockopt(zmq.RECONNECT_IVL, recon_delay)
def _set_reconnect_ivl_max(self):
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
log.debug("Setting zmq_reconnect_ivl_max to '{0}ms'".format(
self.opts['recon_default'] + self.opts['recon_max'])
)
self.socket.setsockopt(
zmq.RECONNECT_IVL_MAX, self.opts['recon_max']
)
def _set_ipv4only(self):
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.socket.setsockopt(zmq.IPV4ONLY, 0)
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# dup name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def _setsockopts(self):
if self.opts['zmq_filtering']:
# TODO: constants file for "broadcast"
self.socket.setsockopt(zmq.SUBSCRIBE, 'broadcast')
self.socket.setsockopt(zmq.SUBSCRIBE, self.hexid)
else:
self.socket.setsockopt(zmq.SUBSCRIBE, '')
self.socket.setsockopt(zmq.IDENTITY, self.opts['id'])
self._set_ipv4only()
self._set_reconnect_ivl_max()
self._set_tcp_keepalive()
@property
def master_pub(self):
'''
Return the master publish port
'''
return 'tcp://{ip}:{port}'.format(ip=self.opts['master_ip'],
port=self.publish_port)
def authenticate(self, timeout=60, safe=True):
'''
Authenticate with the master, this method breaks the functional
paradigm, it will update the master information from a fresh sign
in, signing in can occur as often as needed to keep up with the
revolving master AES key.
'''
log.debug(
'Attempting to authenticate with the Salt Master at {0}'.format(
self.opts['master_ip']
)
)
auth = salt.crypt.Auth(self.opts)
self.tok = auth.gen_token('salt')
acceptance_wait_time = self.opts['acceptance_wait_time']
acceptance_wait_time_max = self.opts['acceptance_wait_time_max']
if not acceptance_wait_time_max:
acceptance_wait_time_max = acceptance_wait_time
while True:
creds = auth.sign_in(timeout, safe)
if creds == 'full':
return creds
elif creds != 'retry':
log.info('Authentication with master at {0} successful!'.format(self.opts['master_ip']))
break
log.info('Waiting for minion key to be accepted by the master.')
if acceptance_wait_time:
log.info('Waiting {0} seconds before retry.'.format(acceptance_wait_time))
time.sleep(acceptance_wait_time)
if acceptance_wait_time < acceptance_wait_time_max:
acceptance_wait_time += acceptance_wait_time
log.debug('Authentication wait time is {0}'.format(acceptance_wait_time))
self.aes = creds['aes']
if self.opts.get('syndic_master_publish_port'):
self.publish_port = self.opts.get('syndic_master_publish_port')
else:
self.publish_port = creds['publish_port']
self.crypticle = salt.crypt.Crypticle(self.opts, self.aes)
def module_refresh(self, force_refresh=False):
'''
Refresh the functions and returners.
'''
self.functions, self.returners, _ = self._load_modules(force_refresh)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
def pillar_refresh(self, force_refresh=False):
'''
Refresh the pillar
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
).compile_pillar()
self.module_refresh(force_refresh)
def manage_schedule(self, package):
'''
Refresh the functions and returners.
'''
tag, data = salt.utils.event.MinionEvent.unpack(package)
func = data.get('func', None)
name = data.get('name', None)
schedule = data.get('schedule', None)
where = data.get('where', None)
if func == 'delete':
self.schedule.delete_job(name)
elif func == 'add':
self.schedule.add_job(schedule)
elif func == 'modify':
self.schedule.modify_job(name, schedule, where)
elif func == 'enable':
self.schedule.enable_schedule()
elif func == 'disable':
self.schedule.disable_schedule()
elif func == 'enable_job':
self.schedule.enable_job(name, where)
elif func == 'run_job':
self.schedule.run_job(name, where)
elif func == 'disable_job':
self.schedule.disable_job(name, where)
elif func == 'reload':
self.schedule.reload(schedule)
def environ_setenv(self, package):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
tag, data = salt.utils.event.MinionEvent.unpack(package)
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def clean_die(self, signum, frame):
'''
Python does not handle the SIGTERM cleanly, if it is signaled exit
the minion process cleanly
'''
self._running = False
exit(0)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This {0} was scheduled to stop. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
elif self._running is True:
log.error(
'This {0} is already running. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
try:
log.info(
'{0} is starting as user \'{1}\''.format(
self.__class__.__name__,
salt.utils.get_user()
)
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting {0}'.format(
self.__class__.__name__
),
exc_info=err
)
def _mine_send(self, package):
'''
Send mine data to the master
'''
sreq = salt.transport.Channel.factory(self.opts)
load = salt.utils.event.SaltEvent.unpack(package)[1]
ret = sreq.send(load)
return ret
def handle_event(self, package):
'''
Handle an event from the epull_sock (all local minion events)
'''
log.debug('Handling event {0!r}'.format(package))
if package.startswith('module_refresh'):
self.module_refresh()
elif package.startswith('pillar_refresh'):
self.pillar_refresh()
elif package.startswith('manage_schedule'):
self.manage_schedule(package)
elif package.startswith('grains_refresh'):
if self.grains_cache != self.opts['grains']:
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
elif package.startswith('environ_setenv'):
self.environ_setenv(package)
elif package.startswith('_minion_mine'):
self._mine_send(package)
elif package.startswith('fire_master'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
log.debug('Forwarding master event tag={tag}'.format(tag=data['tag']))
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
elif package.startswith('__master_disconnected'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
# if the master disconnect event is for a different master, raise an exception
if data['master'] != self.opts['master']:
raise Exception()
if self.connected:
# we are not connected anymore
self.connected = False
# modify the scheduled job to fire only on reconnect
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master_ip': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
log.info('Connection to master {0} lost'.format(self.opts['master']))
if self.opts['master_type'] == 'failover':
log.info('Trying to tune in to next master from master-list')
# if eval_master finds a new master for us, self.connected
# will be True again on successfull master authentication
self.opts['master'] = self.eval_master(opts=self.opts,
failed=True)
if self.connected:
# re-init the subsystems to work with the new master
log.info('Re-initialising subsystems for new '
'master {0}'.format(self.opts['master']))
del self.socket
del self.context
del self.poller
self._init_context_and_poller()
self.socket = self.context.socket(zmq.SUB)
self._set_reconnect_ivl()
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
self.poller.register(self.epull_sock, zmq.POLLIN)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master_ip': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
elif package.startswith('__master_connected'):
# handle this event only once. otherwise it will pollute the log
if not self.connected:
log.info('Connection to master {0} re-established'.format(self.opts['master']))
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master_ip': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
# Main Minion Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
# Properly exit if a SIGTERM is signalled
signal.signal(signal.SIGTERM, self.clean_die)
log.debug('Minion {0!r} trying to tune in'.format(self.opts['id']))
self._prepare_minion_event_system()
self.socket = self.context.socket(zmq.SUB)
self._set_reconnect_ivl()
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
self.poller.register(self.epull_sock, zmq.POLLIN)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
salt.utils.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
loop_interval = int(self.opts['loop_interval'])
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
ping_interval = self.opts.get('ping_interval', 0) * 60
ping_at = None
while self._running is True:
loop_interval = self.process_schedule(self, loop_interval)
try:
socks = self._do_poll(loop_interval)
if ping_interval > 0:
if socks or not ping_at:
ping_at = time.time() + ping_interval
if ping_at < time.time():
log.debug('Ping master')
self._fire_master('ping', 'minion_ping')
ping_at = time.time() + ping_interval
self._do_socket_recv(socks)
# Check the event system
if socks.get(self.epull_sock) == zmq.POLLIN:
package = self.epull_sock.recv(zmq.NOBLOCK)
try:
self.handle_event(package)
self.epub_sock.send(package)
except Exception:
log.debug('Exception while handling events', exc_info=True)
# Add an extra fallback in case a forked process leeks through
multiprocessing.active_children()
except zmq.ZMQError as exc:
# The interrupt caused by python handling the
# SIGCHLD. Throws this error with errno == EINTR.
# Nothing to receive on the zmq socket throws this error
# with EAGAIN.
# Both are safe to ignore
if exc.errno != errno.EAGAIN and exc.errno != errno.EINTR:
log.critical('Unexpected ZMQError while polling minion',
exc_info=True)
continue
except SaltClientError:
raise
except Exception:
log.critical(
'An exception occurred while polling the minion',
exc_info=True
)
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
self._pre_tune()
self._init_context_and_poller()
self.socket = self.context.socket(zmq.SUB)
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
self._fire_master_minion_start()
loop_interval = int(self.opts['loop_interval'])
# On first startup execute a state run if configured to do so
self._state_run()
while self._running is True:
try:
socks = self._do_poll(loop_interval)
self._do_socket_recv(socks)
# Check the event system
except zmq.ZMQError:
# If a zeromq error happens recover
yield True
except Exception:
log.critical(
'An exception occurred while polling the minion',
exc_info=True
)
yield True
def _do_poll(self, loop_interval):
log.trace('Check main poller timeout {0}'.format(loop_interval))
return dict(self.poller.poll(
loop_interval * 1000)
)
def _do_socket_recv(self, socks):
if socks.get(self.socket) == zmq.POLLIN:
# topic filtering is done at the zmq level, so we just strip it
messages = self.socket.recv_multipart(zmq.NOBLOCK)
messages_len = len(messages)
# if it was one message, then its old style
if messages_len == 1:
payload = self.serial.loads(messages[0])
# 2 includes a header which says who should do it
elif messages_len == 2:
payload = self.serial.loads(messages[1])
else:
raise Exception(('Invalid number of messages ({0}) in zeromq pub'
'message from master').format(len(messages_len)))
log.trace('Handling payload')
self._handle_payload(payload)
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if getattr(self, 'poller', None) is not None:
if isinstance(self.poller.sockets, dict):
for socket in self.poller.sockets.keys():
if socket.closed is False:
socket.close()
self.poller.unregister(socket)
else:
for socket in self.poller.sockets:
if socket[0].closed is False:
socket[0].close()
self.poller.unregister(socket[0])
if hasattr(self, 'epub_sock') and self.epub_sock.closed is False:
self.epub_sock.close()
if hasattr(self, 'epull_sock') and self.epull_sock.closed is False:
self.epull_sock.close()
if hasattr(self, 'socket') and self.socket.closed is False:
self.socket.close()
if hasattr(self, 'context') and self.context.closed is False:
self.context.term()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts, **kwargs):
self._syndic_interface = opts.get('interface')
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
opts['auth_safemode'] = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
def _handle_aes(self, load, sig=None):
'''
Takes the AES encrypted load, decrypts it, and runs the encapsulated
instructions
'''
# If the AES authentication has changed, re-authenticate
try:
data = self.crypticle.loads(load)
except AuthenticationError:
self.authenticate()
data = self.crypticle.loads(load)
# Verify that the publication is valid
if 'tgt' not in data or 'jid' not in data or 'fun' not in data \
or 'arg' not in data:
return
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
if 'user' in data:
log.debug(
'User {0[user]} Executing syndic command {0[fun]} with '
'jid {0[jid]}'.format(
data
)
)
else:
log.debug(
'Executing syndic command {0[fun]} with jid {0[jid]}'.format(
data
)
)
log.debug('Command details: {0}'.format(data))
self._handle_decoded_payload(data)
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in ('master_id', # which master the job came from
'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
# Send out the publication
self.local.pub(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
user=data.get('user', ''),
**kwargs)
def _setsockopts(self):
# no filters for syndication masters, unless we want to maintain a
# list of all connected minions and update the filter
self.socket.setsockopt(zmq.SUBSCRIBE, '')
self.socket.setsockopt(zmq.IDENTITY, self.opts['id'])
self._set_reconnect_ivl_max()
self._set_tcp_keepalive()
self._set_ipv4only()
def _fire_master_syndic_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start'
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
)
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
self._init_context_and_poller()
self.socket = self.context.socket(zmq.SUB)
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
loop_interval = int(self.opts['loop_interval'])
self._fire_master_syndic_start()
while True:
try:
socks = dict(self.poller.poll(loop_interval * 1000))
if socks.get(self.socket) == zmq.POLLIN:
self._process_cmd_socket()
except zmq.ZMQError:
yield True
except Exception:
log.critical(
'An exception occurred while polling the minion',
exc_info=True
)
yield True
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
signal.signal(signal.SIGTERM, self.clean_die)
log.debug('Syndic {0!r} trying to tune in'.format(self.opts['id']))
self._init_context_and_poller()
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
self.local.opts['interface'] = self._syndic_interface
# register the event sub to the poller
self.poller.register(self.local.event.sub)
# Start with the publish socket
# Share the poller with the event object
self.socket = self.context.socket(zmq.SUB)
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
# Send an event to the master that the minion is live
self._fire_master_syndic_start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
loop_interval = int(self.opts['loop_interval'])
self._reset_event_aggregation()
while True:
try:
# Do all the maths in seconds
timeout = loop_interval
if self.event_forward_timeout is not None:
timeout = min(timeout,
self.event_forward_timeout - time.time())
if timeout >= 0:
log.trace('Polling timeout: %f', timeout)
socks = dict(self.poller.poll(timeout * 1000))
else:
# This shouldn't really happen.
# But there's no harm being defensive
log.warning('Negative timeout in syndic main loop')
socks = {}
if socks.get(self.socket) == zmq.POLLIN:
self._process_cmd_socket()
if socks.get(self.local.event.sub) == zmq.POLLIN:
self._process_event_socket()
if self.event_forward_timeout is not None and \
self.event_forward_timeout < time.time():
self._forward_events()
# We don't handle ZMQErrors like the other minions
# I've put explicit handling around the receive calls
# in the process_*_socket methods. If we see any other
# errors they may need some kind of handling so log them
# for now.
except Exception:
log.critical(
'An exception occurred while polling the syndic',
exc_info=True
)
def _process_cmd_socket(self):
try:
messages = self.socket.recv_multipart(zmq.NOBLOCK)
messages_len = len(messages)
idx = None
if messages_len == 1:
idx = 0
elif messages_len == 2:
idx = 1
else:
raise SaltSyndicMasterError('Syndication master received message of invalid len ({0}/2)'.format(messages_len))
payload = self.serial.loads(messages[idx])
except zmq.ZMQError as e:
# Swallow errors for bad wakeups or signals needing processing
if e.errno != errno.EAGAIN and e.errno != errno.EINTR:
raise
log.trace('Handling payload')
self._handle_payload(payload)
def _reset_event_aggregation(self):
self.jids = {}
self.raw_events = []
self.event_forward_timeout = None
def _process_event_socket(self):
tout = time.time() + self.opts['syndic_max_event_process_time']
while tout > time.time():
try:
event = self.local.event.get_event_noblock()
except zmq.ZMQError as e:
# EAGAIN indicates no more events at the moment
# EINTR some kind of signal maybe someone trying
# to get us to quit so escape our timeout
if e.errno == errno.EAGAIN or e.errno == errno.EINTR:
break
raise
log.trace('Got event {0}'.format(event['tag']))
if self.event_forward_timeout is None:
self.event_forward_timeout = (
time.time() + self.opts['syndic_event_forward_timeout']
)
if salt.utils.is_jid(event['tag']) and 'return' in event['data']:
if 'jid' not in event['data']:
# Not a job return
continue
jdict = self.jids.setdefault(event['tag'], {})
if not jdict:
jdict['__fun__'] = event['data'].get('fun')
jdict['__jid__'] = event['data']['jid']
jdict['__load__'] = {}
fstr = '{0}.get_jid'.format(self.opts['master_job_cache'])
jdict['__load__'].update(
self.mminion.returners[fstr](event['data']['jid'])
)
if 'master_id' in event['data']:
jdict['master_id'] = event['data']['master_id']
jdict[event['data']['id']] = event['data']['return']
else:
# Add generic event aggregation here
if 'retcode' not in event['data']:
self.raw_events.append(event)
def _forward_events(self):
log.trace('Forwarding events')
if self.raw_events:
self._fire_master(events=self.raw_events,
pretag=tagify(self.opts['id'], base='syndic'),
)
for jid in self.jids:
self._return_pub(self.jids[jid], '_syndic_return')
self._reset_event_aggregation()
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
self.poller = None
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
class MultiSyndic(MinionBase):
'''
Make a MultiSyndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
Note: jobs will be returned best-effort to the requesting master. This also means
(since we are using zmq) that if a job was fired and the master disconnects
between the publish and return, that the return will end up in a zmq buffer
in this Syndic headed to that original master.
In addition, since these classes all seem to use a mix of blocking and non-blocking
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~60s attempting to re-auth
with the down master
'''
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
def __init__(self, opts):
opts['loop_interval'] = 1
super(MultiSyndic, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# create all of the syndics you need
self.master_syndics = {}
for master in set(self.opts['master']):
s_opts = copy.copy(self.opts)
s_opts['master'] = master
self.master_syndics[master] = {'opts': s_opts,
'auth_wait': s_opts['acceptance_wait_time'],
'dead_until': 0}
self._connect_to_master(master)
# TODO: do we need all of this?
def _connect_to_master(self, master):
'''
Attempt to connect to master, including back-off for each one
return boolean of wether you connected or not
'''
if master not in self.master_syndics:
log.error('Unable to connect to {0}, not in the list of masters'.format(master))
return False
minion = self.master_syndics[master]
# if we need to be dead for a while, stay that way
if minion['dead_until'] > time.time():
return False
if time.time() - minion['auth_wait'] > minion.get('last', 0):
try:
t_minion = Syndic(minion['opts'],
timeout=self.SYNDIC_CONNECT_TIMEOUT,
safe=False,
)
self.master_syndics[master]['syndic'] = t_minion
self.master_syndics[master]['generator'] = t_minion.tune_in_no_block()
self.master_syndics[master]['auth_wait'] = self.opts['acceptance_wait_time']
self.master_syndics[master]['dead_until'] = 0
return True
except SaltClientError:
log.error('Error while bring up minion for multi-syndic. Is master {0} responding?'.format(master))
# re-use auth-wait as backoff for syndic
minion['dead_until'] = time.time() + minion['auth_wait']
if minion['auth_wait'] < self.opts['acceptance_wait_time_max']:
minion['auth_wait'] += self.opts['acceptance_wait_time']
return False
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
if kwargs is None:
kwargs = {}
for master, syndic_dict in self.iter_master_options(master_id):
if 'syndic' not in syndic_dict:
continue
if syndic_dict['dead_until'] > time.time():
log.error('Unable to call {0} on {1}, that syndic is dead for now'.format(func, master_id))
continue
try:
getattr(syndic_dict['syndic'], func)(*args, **kwargs)
return
except SaltClientError:
log.error('Unable to call {0} on {1}, trying another...'.format(func, master_id))
# re-use auth-wait as backoff for syndic
syndic_dict['dead_until'] = time.time() + syndic_dict['auth_wait']
if syndic_dict['auth_wait'] < self.opts['acceptance_wait_time_max']:
syndic_dict['auth_wait'] += self.opts['acceptance_wait_time']
continue
log.critical('Unable to call {0} on any masters!'.format(func))
def iter_master_options(self, master_id=None):
'''
Iterate (in order) over your options for master
'''
masters = list(self.master_syndics.keys())
shuffle(masters)
if master_id not in self.master_syndics:
master_id = masters.pop(0)
else:
masters.remove(master_id)
while True:
yield master_id, self.master_syndics[master_id]
if len(masters) == 0:
break
master_id = masters.pop(0)
def _reset_event_aggregation(self):
self.jids = {}
self.raw_events = []
self.event_forward_timeout = None
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
log.debug('MultiSyndic {0!r} trying to tune in'.format(self.opts['id']))
# Share the poller with the event object
self.poller = self.local.event.poller
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
loop_interval = int(self.opts['loop_interval'])
self._reset_event_aggregation()
while True:
try:
# Do all the maths in seconds
timeout = loop_interval
if self.event_forward_timeout is not None:
timeout = min(timeout,
self.event_forward_timeout - time.time())
if timeout >= 0:
log.trace('Polling timeout: %f', timeout)
socks = dict(self.poller.poll(timeout * 1000))
else:
# This shouldn't really happen.
# But there's no harm being defensive
log.warning('Negative timeout in syndic main loop')
socks = {}
# check all of your master_syndics, have them do their thing
for master_id, syndic_dict in self.master_syndics.items():
# if not connected, lets try
if 'generator' not in syndic_dict:
# if we couldn't connect, lets try later
if not self._connect_to_master(master_id):
continue
next(syndic_dict['generator'])
# events
if socks.get(self.local.event.sub) == zmq.POLLIN:
self._process_event_socket()
if (self.event_forward_timeout is not None and
self.event_forward_timeout < time.time()):
self._forward_events()
# We don't handle ZMQErrors like the other minions
# I've put explicit handling around the receive calls
# in the process_*_socket methods. If we see any other
# errors they may need some kind of handling so log them
# for now.
except Exception:
log.critical(
'An exception occurred while polling the syndic',
exc_info=True
)
def _process_event_socket(self):
tout = time.time() + self.opts['syndic_max_event_process_time']
while tout > time.time():
try:
event = self.local.event.get_event_noblock()
except zmq.ZMQError as e:
# EAGAIN indicates no more events at the moment
# EINTR some kind of signal maybe someone trying
# to get us to quit so escape our timeout
if e.errno == errno.EAGAIN or e.errno == errno.EINTR:
break
raise
log.trace('Got event {0}'.format(event['tag']))
if self.event_forward_timeout is None:
self.event_forward_timeout = (
time.time() + self.opts['syndic_event_forward_timeout']
)
if salt.utils.is_jid(event['tag']) and 'return' in event['data']:
if 'jid' not in event['data']:
# Not a job return
continue
jdict = self.jids.setdefault(event['tag'], {})
if not jdict:
jdict['__fun__'] = event['data'].get('fun')
jdict['__jid__'] = event['data']['jid']
jdict['__load__'] = {}
fstr = '{0}.get_jid'.format(self.opts['master_job_cache'])
jdict['__load__'].update(
self.mminion.returners[fstr](event['data']['jid'])
)
if 'master_id' in event['data']:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = event['data']['master_id']
jdict[event['data']['id']] = event['data']['return']
else:
# Add generic event aggregation here
if 'retcode' not in event['data']:
self.raw_events.append(event)
def _forward_events(self):
log.trace('Forwarding events')
if self.raw_events:
self._call_syndic('_fire_master',
kwargs={'events': self.raw_events,
'pretag': tagify(self.opts['id'], base='syndic')},
)
for jid, jid_ret in self.jids.items():
self._call_syndic('_return_pub', args=(jid_ret, '_syndic_return'), master_id=jid_ret.get('__master_id__'))
self._reset_event_aggregation()
class Matcher(object):
'''
Use to return the value for matching calls from the master
'''
def __init__(self, opts, functions=None):
self.opts = opts
self.functions = functions
def confirm_top(self, match, data, nodegroups=None):
'''
Takes the data passed to a top file environment and determines if the
data matches this minion
'''
matcher = 'compound'
if not data:
log.error('Received bad data when setting the match from the top '
'file')
return False
for item in data:
if isinstance(item, dict):
if 'match' in item:
matcher = item['match']
if hasattr(self, matcher + '_match'):
funcname = '{0}_match'.format(matcher)
if matcher == 'nodegroup':
return getattr(self, funcname)(match, nodegroups)
return getattr(self, funcname)(match)
else:
log.error('Attempting to match with unknown matcher: {0}'.format(
matcher
))
return False
def glob_match(self, tgt):
'''
Returns true if the passed glob matches the id
'''
if not isinstance(tgt, str):
return False
return fnmatch.fnmatch(self.opts['id'], tgt)
def pcre_match(self, tgt):
'''
Returns true if the passed pcre regex matches
'''
return bool(re.match(tgt, self.opts['id']))
def list_match(self, tgt):
'''
Determines if this host is on the list
'''
if isinstance(tgt, string_types):
tgt = tgt.split(',')
return bool(self.opts['id'] in tgt)
def grain_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the grains glob match
'''
log.debug('grains target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['grains'], tgt, delimiter=delimiter
)
def grain_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Matches a grain based on regex
'''
log.debug('grains pcre target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains pcre match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['grains'], tgt,
delimiter=delimiter, regex_match=True)
def data_match(self, tgt):
'''
Match based on the local data store on the minion
'''
if self.functions is None:
self.functions = salt.loader.minion_mods(self.opts)
comps = tgt.split(':')
if len(comps) < 2:
return False
val = self.functions['data.getval'](comps[0])
if val is None:
# The value is not defined
return False
if isinstance(val, list):
# We are matching a single component to a single list member
for member in val:
if fnmatch.fnmatch(str(member).lower(), comps[1].lower()):
return True
return False
if isinstance(val, dict):
if comps[1] in val:
return True
return False
return bool(fnmatch.fnmatch(
val,
comps[1],
))
def pillar_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar glob match
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter
)
def pillar_exact_match(self, tgt, delimiter=':'):
'''
Reads in the pillar match, no globbing
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['pillar'],
tgt,
delimiter=delimiter,
exact_match=True)
def ipcidr_match(self, tgt):
'''
Matches based on ip address or CIDR notation
'''
num_parts = len(tgt.split('/'))
if num_parts > 2:
# Target is not valid CIDR
return False
elif num_parts == 2:
# Target is CIDR
return salt.utils.network.in_subnet(
tgt,
addrs=self.opts['grains'].get('ipv4', [])
)
else:
# Target is an IPv4 address
import socket
try:
socket.inet_aton(tgt)
except socket.error:
# Not a valid IPv4 address
return False
else:
return tgt in self.opts['grains'].get('ipv4', [])
def range_match(self, tgt):
'''
Matches based on range cluster
'''
if HAS_RANGE:
range_ = seco.range.Range(self.opts['range_server'])
try:
return self.opts['grains']['fqdn'] in range_.expand(tgt)
except seco.range.RangeException as exc:
log.debug('Range exception in compound match: {0}'.format(exc))
return False
return False
def compound_match(self, tgt):
'''
Runs the compound target check
'''
if not isinstance(tgt, string_types):
log.debug('Compound target received that is not a string')
return False
ref = {'G': 'grain',
'P': 'grain_pcre',
'I': 'pillar',
'L': 'list',
'S': 'ipcidr',
'E': 'pcre'}
if HAS_RANGE:
ref['R'] = 'range'
results = []
opers = ['and', 'or', 'not', '(', ')']
tokens = tgt.split()
for match in tokens:
# Try to match tokens from the compound target, first by using
# the 'G, X, I, L, S, E' matcher types, then by hostname glob.
if '@' in match and match[1] == '@':
comps = match.split('@')
matcher = ref.get(comps[0])
if not matcher:
# If an unknown matcher is called at any time, fail out
return False
results.append(
str(
getattr(self, '{0}_match'.format(matcher))(
'@'.join(comps[1:])
)
)
)
elif match in opers:
# We didn't match a target, so append a boolean operator or
# subexpression
if results or match in ['(', ')']:
if match == 'not':
if results[-1] == 'and':
pass
elif results[-1] == 'or':
pass
else:
results.append('and')
results.append(match)
else:
# seq start with oper, fail
if match not in ['(', ')']:
return False
else:
# The match is not explicitly defined, evaluate it as a glob
results.append(str(self.glob_match(match)))
results = ' '.join(results)
try:
return eval(results) # pylint: disable=W0123
except Exception:
log.error('Invalid compound target: {0} for results: {1}'.format(tgt, results))
return False
return False
def nodegroup_match(self, tgt, nodegroups):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if tgt in nodegroups:
return self.compound_match(
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
def __init__(self, opts, timeout=60, safe=True): # pylint: disable=W0231
'''
Pass in the options dict
'''
self._running = None
# Warn if ZMQ < 3.2
if HAS_ZMQ:
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split('.')]
)
if zmq_version_info < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup the of the opts grains, so we can log from the grains
# module
# print opts['proxymodule']
fq_proxyname = 'proxy.'+opts['proxy']['proxytype']
self.proxymodule = salt.loader.proxy(opts, fq_proxyname)
opts['proxyobject'] = self.proxymodule[opts['proxy']['proxytype']+'.Proxyconn'](opts['proxy'])
opts['id'] = opts['proxyobject'].id(opts)
opts.update(resolve_dns(opts))
self.opts = opts
self.authenticate(timeout, safe)
self.functions, self.returners, self.function_errors = self._load_modules()
self.opts['pillar'] = salt.pillar.get_pillar(
opts,
opts['grains'],
opts['id'],
opts['environment'],
funcs=self.functions
).compile_pillar()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.proc_dir = get_proc_dir(opts['cachedir'])
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
self.grains_cache = self.opts['grains']
# self._running = True
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
return super(ProxyMinion, self)._prep_mod_opts()
def _load_modules(self, force_refresh=False):
'''
Return the functions and the returners loaded up from the loader
module
'''
return super(ProxyMinion, self)._load_modules(force_refresh=force_refresh)
| 39.451409 | 127 | 0.535168 |
47e6c449f1d37b09711853d0578517bb712a3710 | 8,195 | py | Python | ionoscloud/models/ip_blocks.py | ionos-cloud/sdk-python | bb22b5b93505b25de6aebae97c523a6c2242ec2e | [
"Apache-2.0"
] | null | null | null | ionoscloud/models/ip_blocks.py | ionos-cloud/sdk-python | bb22b5b93505b25de6aebae97c523a6c2242ec2e | [
"Apache-2.0"
] | 6 | 2021-11-26T16:18:51.000Z | 2022-02-18T10:08:49.000Z | ionoscloud/models/ip_blocks.py | ionos-cloud/sdk-python | bb22b5b93505b25de6aebae97c523a6c2242ec2e | [
"Apache-2.0"
] | 1 | 2021-04-20T09:29:17.000Z | 2021-04-20T09:29:17.000Z | # coding: utf-8
"""
CLOUD API
An enterprise-grade Infrastructure is provided as a Service (IaaS) solution that can be managed through a browser-based \"Data Center Designer\" (DCD) tool or via an easy to use API. The API allows you to perform a variety of management tasks such as spinning up additional servers, adding volumes, adjusting networking, and so forth. It is designed to allow users to leverage the same power and flexibility found within the DCD visual tool. Both tools are consistent with their concepts and lend well to making the experience smooth and intuitive. # noqa: E501
The version of the OpenAPI document: 5.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from ionoscloud.configuration import Configuration
class IpBlocks(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'type': 'Type',
'href': 'str',
'items': 'list[IpBlock]',
'offset': 'float',
'limit': 'float',
'links': 'PaginationLinks',
}
attribute_map = {
'id': 'id',
'type': 'type',
'href': 'href',
'items': 'items',
'offset': 'offset',
'limit': 'limit',
'links': '_links',
}
def __init__(self, id=None, type=None, href=None, items=None, offset=None, limit=None, links=None, local_vars_configuration=None): # noqa: E501
"""IpBlocks - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._type = None
self._href = None
self._items = None
self._offset = None
self._limit = None
self._links = None
self.discriminator = None
if id is not None:
self.id = id
if type is not None:
self.type = type
if href is not None:
self.href = href
if items is not None:
self.items = items
if offset is not None:
self.offset = offset
if limit is not None:
self.limit = limit
if links is not None:
self.links = links
@property
def id(self):
"""Gets the id of this IpBlocks. # noqa: E501
The resource's unique identifier # noqa: E501
:return: The id of this IpBlocks. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this IpBlocks.
The resource's unique identifier # noqa: E501
:param id: The id of this IpBlocks. # noqa: E501
:type id: str
"""
self._id = id
@property
def type(self):
"""Gets the type of this IpBlocks. # noqa: E501
The type of object that has been created # noqa: E501
:return: The type of this IpBlocks. # noqa: E501
:rtype: Type
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this IpBlocks.
The type of object that has been created # noqa: E501
:param type: The type of this IpBlocks. # noqa: E501
:type type: Type
"""
self._type = type
@property
def href(self):
"""Gets the href of this IpBlocks. # noqa: E501
URL to the object representation (absolute path) # noqa: E501
:return: The href of this IpBlocks. # noqa: E501
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this IpBlocks.
URL to the object representation (absolute path) # noqa: E501
:param href: The href of this IpBlocks. # noqa: E501
:type href: str
"""
self._href = href
@property
def items(self):
"""Gets the items of this IpBlocks. # noqa: E501
Array of items in that collection # noqa: E501
:return: The items of this IpBlocks. # noqa: E501
:rtype: list[IpBlock]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this IpBlocks.
Array of items in that collection # noqa: E501
:param items: The items of this IpBlocks. # noqa: E501
:type items: list[IpBlock]
"""
self._items = items
@property
def offset(self):
"""Gets the offset of this IpBlocks. # noqa: E501
the offset specified in the request (or, if none was specified, the default offset of 0) # noqa: E501
:return: The offset of this IpBlocks. # noqa: E501
:rtype: float
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this IpBlocks.
the offset specified in the request (or, if none was specified, the default offset of 0) # noqa: E501
:param offset: The offset of this IpBlocks. # noqa: E501
:type offset: float
"""
self._offset = offset
@property
def limit(self):
"""Gets the limit of this IpBlocks. # noqa: E501
the limit specified in the request (or, if none was specified use the endpoint's default pagination limit) # noqa: E501
:return: The limit of this IpBlocks. # noqa: E501
:rtype: float
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this IpBlocks.
the limit specified in the request (or, if none was specified use the endpoint's default pagination limit) # noqa: E501
:param limit: The limit of this IpBlocks. # noqa: E501
:type limit: float
"""
self._limit = limit
@property
def links(self):
"""Gets the links of this IpBlocks. # noqa: E501
:return: The links of this IpBlocks. # noqa: E501
:rtype: PaginationLinks
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this IpBlocks.
:param links: The links of this IpBlocks. # noqa: E501
:type links: PaginationLinks
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IpBlocks):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, IpBlocks):
return True
return self.to_dict() != other.to_dict()
| 27.046205 | 568 | 0.570958 |
f8e43cd0986078e8447ad7eb02ce9cf35b2c6fd1 | 1,586 | py | Python | ebi/core.py | ray1005yb/ebi | 327121aefaad32f836ddc9344a1f0e16aa7c0c20 | [
"MIT"
] | 22 | 2015-11-20T06:47:26.000Z | 2021-06-02T03:35:43.000Z | ebi/core.py | ray1005yb/ebi | 327121aefaad32f836ddc9344a1f0e16aa7c0c20 | [
"MIT"
] | 10 | 2016-01-22T09:34:59.000Z | 2019-04-03T09:22:07.000Z | ebi/core.py | ray1005yb/ebi | 327121aefaad32f836ddc9344a1f0e16aa7c0c20 | [
"MIT"
] | 9 | 2015-11-30T02:34:27.000Z | 2020-03-11T08:28:44.000Z | from __future__ import absolute_import
from __future__ import print_function
import argparse
import logging
import boto3
from ebcli.lib import aws as ebaws
from .commands.bgdeploy import apply_args as apply_args_bgdeploy
from .commands.clonedeploy import apply_args as apply_args_clonedeploy
from .commands.create import apply_args as apply_args_create
from .commands.deploy import apply_args as apply_args_deploy
def main():
""" Main function called from console_scripts
"""
logger = logging.getLogger('ebi')
logger.propagate = True
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
parser_bgdeploy = subparsers.add_parser('bgdeploy')
parser_clonedeploy = subparsers.add_parser('clonedeploy')
parser_create = subparsers.add_parser('create')
parser_deploy = subparsers.add_parser('deploy')
apply_args_bgdeploy(parser_bgdeploy)
apply_args_clonedeploy(parser_clonedeploy)
apply_args_create(parser_create)
apply_args_deploy(parser_deploy)
parsed = parser.parse_args()
if not hasattr(parsed, 'func'):
parser.print_help()
return
conf = {}
if parsed.profile:
conf['profile_name'] = parsed.profile
if parsed.region:
conf['region_name'] = parsed.region
boto3.setup_default_session(**conf)
session = boto3._get_default_session()
ebaws.set_region(session._session.get_config_variable('region'))
ebaws.set_profile(session.profile_name)
parsed.func(parsed)
| 30.5 | 70 | 0.753468 |
75d0ad44529f87d7e91e1657902e8daad33d7650 | 23,457 | py | Python | src/util/mar.py | yrahul3910/src | a8cf12d634cbe30e64aefeb9ed3cbfc56d55db40 | [
"MIT"
] | null | null | null | src/util/mar.py | yrahul3910/src | a8cf12d634cbe30e64aefeb9ed3cbfc56d55db40 | [
"MIT"
] | null | null | null | src/util/mar.py | yrahul3910/src | a8cf12d634cbe30e64aefeb9ed3cbfc56d55db40 | [
"MIT"
] | null | null | null | from __future__ import print_function, division
import pickle
from pdb import set_trace
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
import csv
from collections import Counter
from sklearn import svm
import matplotlib.pyplot as plt
import time
import os
class MAR(object):
def __init__(self):
self.fea_num = 4000
self.step = 10
self.enough = 30
self.kept=50
self.atleast=100
def create(self,filename):
self.filename=filename
self.name=self.filename.split(".")[0]
self.flag=True
self.hasLabel=True
self.record={"x":[],"pos":[]}
self.body={}
self.est = []
self.est_num = 0
self.last_pos=0
self.last_neg=0
try:
## if model already exists, load it ##
self = self.load()
except:
## otherwise read from file ##
try:
self.loadfile()
self.preprocess()
self.save()
except:
## cannot find file in workspace ##
self.flag=False
self.enable_est=False
return self
### Use previous knowledge, labeled only
def create_old(self, filename):
with open("../workspace/coded/" + str(filename), "r") as csvfile:
content = [x for x in csv.reader(csvfile, delimiter=',')]
fields = ["Document Title", "Abstract", "Year", "PDF Link", "code", "time"]
header = content[0]
ind0 = header.index("code")
self.last_pos = len([c[ind0] for c in content[1:] if c[ind0] == "yes"])
self.last_neg = len([c[ind0] for c in content[1:] if c[ind0] == "no"])
for field in fields:
ind = header.index(field)
if field == "time":
self.body[field].extend([float(c[ind]) for c in content[1:] if c[ind0] != "undetermined"])
else:
self.body[field].extend([c[ind] for c in content[1:] if c[ind0] != "undetermined"])
try:
ind = header.index("label")
self.body["label"].extend([c[ind] for c in content[1:] if c[ind0]!="undetermined"])
except:
self.body["label"].extend(["unknown"] * (len([c[ind0] for c in content[1:] if c[ind0]!="undetermined"])))
try:
ind = header.index("fixed")
self.body["fixed"].extend([c[ind] for c in content[1:] if c[ind0]!="undetermined"])
except:
self.body["fixed"].extend([0] * (len([c[ind0] for c in content[1:] if c[ind0]!="undetermined"])))
self.preprocess()
self.save()
def loadfile(self):
with open("../workspace/data/" + str(self.filename), "r") as csvfile:
content = [x for x in csv.reader(csvfile, delimiter=',')]
fields = ["Document Title", "Abstract", "Year", "PDF Link"]
header = content[0]
for field in fields:
try:
ind = header.index(field)
self.body[field] = [c[ind].decode("utf8","ignore") for c in content[1:]]
except:
self.body[field] = [""]*(len(content) - 1)
try:
ind = header.index("label")
self.body["label"] = [c[ind] for c in content[1:]]
except:
self.hasLabel=False
self.body["label"] = ["unknown"] * (len(content) - 1)
try:
ind = header.index("code")
self.body["code"] = [c[ind] for c in content[1:]]
except:
self.body["code"]=['undetermined']*(len(content) - 1)
try:
ind = header.index("time")
self.body["time"] = [c[ind] for c in content[1:]]
except:
self.body["time"]=[0]*(len(content) - 1)
try:
ind = header.index("fixed")
self.body["fixed"] = [c[ind] for c in content[1:]]
except:
self.body["fixed"]=[0]*(len(content) - 1)
return
def create_lda(self,filename):
self.filename=filename
self.name=self.filename.split(".")[0]
self.flag=True
self.hasLabel=True
self.record={"x":[],"pos":[]}
self.body={}
self.est_num=[]
self.lastprob=0
self.offset=0.5
self.interval=3
self.last_pos=0
self.last_neg=0
try:
## if model already exists, load it ##
return self.load()
except:
## otherwise read from file ##
try:
self.loadfile()
self.preprocess()
import lda
from scipy.sparse import csr_matrix
lda1 = lda.LDA(n_topics=100, alpha=0.1, eta=0.01, n_iter=200)
self.csr_mat = csr_matrix(lda1.fit_transform(self.csr_mat))
self.save()
except:
## cannot find file in workspace ##
self.flag=False
return self
def export_feature(self):
with open("../workspace/coded/feature_" + str(self.name) + ".csv", "wb") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
for i in xrange(self.csr_mat.shape[0]):
for j in range(self.csr_mat.indptr[i],self.csr_mat.indptr[i+1]):
csvwriter.writerow([i+1,self.csr_mat.indices[j]+1,self.csr_mat.data[j]])
return
def get_numbers(self):
total = len(self.body["code"]) - self.last_pos - self.last_neg
pos = Counter(self.body["code"])["yes"] - self.last_pos
neg = Counter(self.body["code"])["no"] - self.last_neg
try:
tmp=self.record['x'][-1]
except:
tmp=-1
if int(pos+neg)>tmp:
self.record['x'].append(int(pos+neg))
self.record['pos'].append(int(pos))
self.pool = np.where(np.array(self.body['code']) == "undetermined")[0]
self.labeled = list(set(range(len(self.body['code']))) - set(self.pool))
return pos, neg, total
def export(self):
fields = ["Document Title", "Abstract", "Year", "PDF Link", "label", "code","time"]
with open("../workspace/coded/" + str(self.name) + ".csv", "wb") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
csvwriter.writerow(fields)
## sort before export
time_order = np.argsort(self.body["time"])[::-1]
yes = [c for c in time_order if self.body["code"][c]=="yes"]
no = [c for c in time_order if self.body["code"][c] == "no"]
und = [c for c in time_order if self.body["code"][c] == "undetermined"]
##
for ind in yes+no+und:
csvwriter.writerow([self.body[field][ind] for field in fields])
return
def preprocess(self):
### Combine title and abstract for training ###########
content = [self.body["Document Title"][index] + " " + self.body["Abstract"][index] for index in
xrange(len(self.body["Document Title"]))]
#######################################################
### Feature selection by tfidf in order to keep vocabulary ###
tfidfer = TfidfVectorizer(lowercase=True, stop_words="english", norm=None, use_idf=True, smooth_idf=False,
sublinear_tf=False,decode_error="ignore")
tfidf = tfidfer.fit_transform(content)
weight = tfidf.sum(axis=0).tolist()[0]
kept = np.argsort(weight)[-self.fea_num:]
self.voc = np.array(tfidfer.vocabulary_.keys())[np.argsort(tfidfer.vocabulary_.values())][kept]
##############################################################
### Term frequency as feature, L2 normalization ##########
tfer = TfidfVectorizer(lowercase=True, stop_words="english", norm=u'l2', use_idf=False,
vocabulary=self.voc,decode_error="ignore")
# tfer = TfidfVectorizer(lowercase=True, stop_words="english", norm=None, use_idf=False,
# vocabulary=self.voc,decode_error="ignore")
self.csr_mat=tfer.fit_transform(content)
########################################################
return
## save model ##
def save(self):
with open("memory/"+str(self.name)+".pickle","w") as handle:
pickle.dump(self,handle)
## load model ##
def load(self):
with open("memory/" + str(self.name) + ".pickle", "r") as handle:
tmp = pickle.load(handle)
return tmp
def estimate_curve(self, clf, reuse=False, num_neg=0):
from sklearn import linear_model
import random
def prob_sample(probs):
order = np.argsort(probs)[::-1]
count = 0
can = []
sample = []
for i, x in enumerate(probs[order]):
count = count + x
can.append(order[i])
if count >= 1:
# sample.append(np.random.choice(can,1)[0])
sample.append(can[0])
count = 0
can = []
return sample
poses = np.where(np.array(self.body['code']) == "yes")[0]
negs = np.where(np.array(self.body['code']) == "no")[0]
poses = np.array(poses)[np.argsort(np.array(self.body['time'])[poses])[self.last_pos:]]
negs = np.array(negs)[np.argsort(np.array(self.body['time'])[negs])[self.last_neg:]]
###############################################
# prob = clf.predict_proba(self.csr_mat)[:,:1]
prob1 = clf.decision_function(self.csr_mat)
prob = np.array([[x] for x in prob1])
# prob = self.csr_mat
y = np.array([1 if x == 'yes' else 0 for x in self.body['code']])
y0 = np.copy(y)
if len(poses) and reuse:
all = list(set(poses) | set(negs) | set(self.pool))
else:
all = range(len(y))
pos_num_last = Counter(y0)[1]
lifes = 1
life = lifes
while (True):
C = Counter(y[all])[1]/ num_neg
es = linear_model.LogisticRegression(penalty='l2', fit_intercept=True, C=C)
es.fit(prob[all], y[all])
pos_at = list(es.classes_).index(1)
pre = es.predict_proba(prob[self.pool])[:, pos_at]
y = np.copy(y0)
sample = prob_sample(pre)
for x in self.pool[sample]:
y[x] = 1
pos_num = Counter(y)[1]
if pos_num == pos_num_last:
life = life - 1
if life == 0:
break
else:
life = lifes
pos_num_last = pos_num
esty = pos_num - self.last_pos
pre = es.predict_proba(prob)[:, pos_at]
return esty, pre
## Train model ##
def train(self,pne=True,weighting=True):
clf = svm.SVC(kernel='linear', probability=True, class_weight='balanced') if weighting else svm.SVC(kernel='linear', probability=True)
poses = np.where(np.array(self.body['code']) == "yes")[0]
negs = np.where(np.array(self.body['code']) == "no")[0]
left = poses
decayed = list(left) + list(negs)
unlabeled = np.where(np.array(self.body['code']) == "undetermined")[0]
try:
unlabeled = np.random.choice(unlabeled,size=np.max((len(decayed),2*len(left),self.atleast)),replace=False)
except:
pass
if not pne:
unlabeled=[]
labels=np.array([x if x!='undetermined' else 'no' for x in self.body['code']])
all_neg=list(negs)+list(unlabeled)
sample = list(decayed) + list(unlabeled)
clf.fit(self.csr_mat[sample], labels[sample])
## aggressive undersampling ##
if len(poses)>=self.enough:
train_dist = clf.decision_function(self.csr_mat[all_neg])
pos_at = list(clf.classes_).index("yes")
if pos_at:
train_dist=-train_dist
negs_sel = np.argsort(train_dist)[::-1][:len(left)]
sample = list(left) + list(np.array(all_neg)[negs_sel])
clf.fit(self.csr_mat[sample], labels[sample])
elif pne:
train_dist = clf.decision_function(self.csr_mat[unlabeled])
pos_at = list(clf.classes_).index("yes")
if pos_at:
train_dist = -train_dist
unlabel_sel = np.argsort(train_dist)[::-1][:int(len(unlabeled) / 2)]
sample = list(decayed) + list(np.array(unlabeled)[unlabel_sel])
clf.fit(self.csr_mat[sample], labels[sample])
uncertain_id, uncertain_prob = self.uncertain(clf)
certain_id, certain_prob = self.certain(clf)
if self.enable_est:
if self.last_pos>0 and len(poses)-self.last_pos>0:
self.est_num, self.est = self.estimate_curve(clf, reuse=True, num_neg=len(sample)-len(left))
else:
self.est_num, self.est = self.estimate_curve(clf, reuse=False, num_neg=len(sample)-len(left))
return uncertain_id, self.est[uncertain_id], certain_id, self.est[certain_id], clf
else:
return uncertain_id, uncertain_prob, certain_id, certain_prob, clf
## reuse
def train_reuse(self,pne=True):
pne=True
clf = svm.SVC(kernel='linear', probability=True)
poses = np.where(np.array(self.body['code']) == "yes")[0]
negs = np.where(np.array(self.body['code']) == "no")[0]
left = np.array(poses)[np.argsort(np.array(self.body['time'])[poses])[self.last_pos:]]
negs = np.array(negs)[np.argsort(np.array(self.body['time'])[negs])[self.last_neg:]]
if len(left)==0:
return [], [], self.random(), []
decayed = list(left) + list(negs)
unlabeled = np.where(np.array(self.body['code']) == "undetermined")[0]
try:
unlabeled = np.random.choice(unlabeled, size=np.max((len(decayed), self.atleast)), replace=False)
except:
pass
if not pne:
unlabeled = []
labels = np.array([x if x != 'undetermined' else 'no' for x in self.body['code']])
all_neg = list(negs) + list(unlabeled)
sample = list(decayed) + list(unlabeled)
clf.fit(self.csr_mat[sample], labels[sample])
## aggressive undersampling ##
if len(poses) >= self.enough:
train_dist = clf.decision_function(self.csr_mat[all_neg])
pos_at = list(clf.classes_).index("yes")
if pos_at:
train_dist=-train_dist
negs_sel = np.argsort(train_dist)[::-1][:len(left)]
sample = list(left) + list(np.array(all_neg)[negs_sel])
clf.fit(self.csr_mat[sample], labels[sample])
elif pne:
train_dist = clf.decision_function(self.csr_mat[unlabeled])
pos_at = list(clf.classes_).index("yes")
if pos_at:
train_dist = -train_dist
unlabel_sel = np.argsort(train_dist)[::-1][:int(len(unlabeled) / 2)]
sample = list(decayed) + list(np.array(unlabeled)[unlabel_sel])
clf.fit(self.csr_mat[sample], labels[sample])
uncertain_id, uncertain_prob = self.uncertain(clf)
certain_id, certain_prob = self.certain(clf)
if self.enable_est:
self.est_num, self.est = self.estimate_curve(clf, reuse=False, num_neg=len(sample)-len(left))
return uncertain_id, self.est[uncertain_id], certain_id, self.est[certain_id], clf
else:
return uncertain_id, uncertain_prob, certain_id, certain_prob, clf
## Get suspecious codes
def susp(self,clf):
thres_pos = 1
thres_neg = 0.5
length_pos = 10
length_neg = 10
poses = np.where(np.array(self.body['code']) == "yes")[0]
negs = np.where(np.array(self.body['code']) == "no")[0]
# poses = np.array(poses)[np.argsort(np.array(self.body['time'])[poses])[self.last_pos:]]
# negs = np.array(negs)[np.argsort(np.array(self.body['time'])[negs])[self.last_neg:]]
poses = np.array(poses)[np.where(np.array(self.body['fixed'])[poses] == 0)[0]]
negs = np.array(negs)[np.where(np.array(self.body['fixed'])[negs] == 0)[0]]
if len(poses)>0:
pos_at = list(clf.classes_).index("yes")
prob_pos = clf.predict_proba(self.csr_mat[poses])[:,pos_at]
# se_pos = np.argsort(prob_pos)[:length_pos]
se_pos = np.argsort(prob_pos)
# se_pos = [s for s in se_pos if prob_pos[s]<thres_pos]
sel_pos = poses[se_pos]
probs_pos = prob_pos[se_pos]
else:
sel_pos = np.array([])
probs_pos = np.array([])
if len(negs)>0:
if clf:
neg_at = list(clf.classes_).index("no")
prob_neg = clf.predict_proba(self.csr_mat[negs])[:,neg_at]
# se_neg = np.argsort(prob_neg)[:length_neg]
se_neg = np.argsort(prob_neg)
# se_neg = [s for s in se_neg if prob_neg[s]<thres_neg]
sel_neg = negs[se_neg]
probs_neg = prob_neg[se_neg]
else:
sel_neg = negs
probs_neg = np.array([])
else:
sel_neg = np.array([])
probs_neg = np.array([])
return sel_pos, probs_pos, sel_neg, probs_neg
## BM25 ##
def BM25(self,query):
b=0.75
k1=1.5
### Combine title and abstract for training ###########
content = [self.body["Document Title"][index] + " " + self.body["Abstract"][index] for index in
xrange(len(self.body["Document Title"]))]
#######################################################
### Feature selection by tfidf in order to keep vocabulary ###
tfidfer = TfidfVectorizer(lowercase=True, stop_words="english", norm=None, use_idf=False, smooth_idf=False,
sublinear_tf=False, decode_error="ignore")
tf = tfidfer.fit_transform(content)
d_avg = np.mean(np.sum(tf, axis=1))
score = {}
for word in query:
score[word]=[]
id= tfidfer.vocabulary_[word]
df = sum([1 for wc in tf[:,id] if wc>0])
idf = np.log((len(content)-df+0.5)/(df+0.5))
for i in xrange(len(content)):
score[word].append(idf*tf[i,id]/(tf[i,id]+k1*((1-b)+b*np.sum(tf[0],axis=1)[0,0]/d_avg)))
self.bm = np.sum(score.values(),axis=0)
def BM25_get(self):
ids = self.pool[np.argsort(self.bm[self.pool])[::-1][:self.step]]
scores = self.bm[ids]
return ids, scores
## Get certain ##
def certain(self,clf):
pos_at = list(clf.classes_).index("yes")
prob = clf.predict_proba(self.csr_mat[self.pool])[:,pos_at]
order = np.argsort(prob)[::-1][:self.step]
return np.array(self.pool)[order],np.array(prob)[order]
## Get uncertain ##
def uncertain(self,clf):
pos_at = list(clf.classes_).index("yes")
prob = clf.predict_proba(self.csr_mat[self.pool])[:, pos_at]
train_dist = clf.decision_function(self.csr_mat[self.pool])
order = np.argsort(np.abs(train_dist))[:self.step] ## uncertainty sampling by distance to decision plane
# order = np.argsort(np.abs(prob-0.5))[:self.step] ## uncertainty sampling by prediction probability
return np.array(self.pool)[order], np.array(prob)[order]
## Get random ##
def random(self):
return np.random.choice(self.pool,size=np.min((self.step,len(self.pool))),replace=False)
## Format ##
def format(self,id,prob=[]):
result=[]
for ind,i in enumerate(id):
tmp = {key: self.body[key][i] for key in self.body}
tmp["id"]=str(i)
if prob!=[]:
tmp["prob"]=prob[ind]
result.append(tmp)
return result
## Code candidate studies ##
def code(self,id,label):
if self.body['code'][id] == label:
self.body['fixed'][id] = 1
self.body["code"][id] = label
self.body["time"][id] = time.time()
## Plot ##
def plot(self):
font = {'family': 'normal',
'weight': 'bold',
'size': 20}
plt.rc('font', **font)
paras = {'lines.linewidth': 5, 'legend.fontsize': 20, 'axes.labelsize': 30, 'legend.frameon': False,
'figure.autolayout': True, 'figure.figsize': (16, 8)}
plt.rcParams.update(paras)
fig = plt.figure()
order = np.argsort(np.array(self.body['time'])[self.labeled])
seq = np.array(self.body['code'])[np.array(self.labeled)[order]]
counter = 0
rec = [0]
for s in seq:
if s=='yes':
counter+=1
rec.append(counter)
plt.plot(range(len(rec)), rec)
# plt.plot(self.record['x'], self.record["pos"])
### estimation ####
# if self.enable_est:
# if self.record["pos"][-1] > int(self.est_num/2) and self.record["pos"][-1] < self.est_num:
# est = self.est[self.pool]
# order = np.argsort(est)[::-1]
# xx = [self.record["x"][-1]]
# yy = [self.record["pos"][-1]]
# for x in xrange(int(len(order) / self.step)):
# delta = sum(est[order[x * self.step:(x + 1) * self.step]])
# if delta >= 0.1:
# yy.append(yy[-1] + delta)
# xx.append(xx[-1] + self.step)
# else:
# break
# plt.plot(xx, yy, "-.")
####################
plt.ylabel("Relevant Found")
plt.xlabel("Documents Reviewed")
name=self.name+ "_" + str(int(time.time()))+".png"
dir = "./static/image"
for file in os.listdir(dir):
os.remove(os.path.join(dir, file))
plt.savefig("./static/image/" + name)
plt.close(fig)
return name
def get_allpos(self):
return len([1 for c in self.body["label"] if c=="yes"])-self.last_pos
## Restart ##
def restart(self):
os.remove("./memory/"+self.name+".pickle")
## Get missed relevant docs ##
def get_rest(self):
rest=[x for x in xrange(len(self.body['label'])) if self.body['label'][x]=='yes' and self.body['code'][x]!='yes']
rests={}
# fields = ["Document Title", "Abstract", "Year", "PDF Link"]
fields = ["Document Title"]
for r in rest:
rests[r]={}
for f in fields:
rests[r][f]=self.body[f][r]
return rests
| 37.894992 | 143 | 0.517585 |
893b7894e961d9eca2d140b0194cb67f4bcdb84d | 348 | py | Python | centreOfMass.py | Robbie1977/NRRDtools | e16f1e49fccadc5f717f55b7c2c3dc49ec96f89f | [
"MIT"
] | 1 | 2015-02-23T11:41:45.000Z | 2015-02-23T11:41:45.000Z | centreOfMass.py | Robbie1977/NRRDtools | e16f1e49fccadc5f717f55b7c2c3dc49ec96f89f | [
"MIT"
] | 2 | 2016-04-07T11:07:01.000Z | 2016-06-24T13:23:24.000Z | centreOfMass.py | Robbie1977/NRRDtools | e16f1e49fccadc5f717f55b7c2c3dc49ec96f89f | [
"MIT"
] | null | null | null | import numpy as np
import sys, os
import nrrd
from scipy import ndimage
if (len(sys.argv) < 2):
print('Error: missing arguments!')
print('e.g. python centreOfMass.py imageIn.nrrd')
else:
Iin = str(sys.argv[1])
data1, header1 = nrrd.read(Iin)
print(list(np.array(ndimage.measurements.center_of_mass(data1),dtype=np.int)))
| 23.2 | 82 | 0.689655 |
fdc9f95b8a35d678ffca46744686cebdf5e981fe | 2,106 | py | Python | Examples/FilterProgressReporting/FilterProgressReporting.py | nathantspencer/SimpleElastix | a9641c1197e58a4ff614145e9ba5ca43c2833ebf | [
"Apache-2.0"
] | 350 | 2017-05-22T14:23:55.000Z | 2022-03-26T07:11:28.000Z | Examples/FilterProgressReporting/FilterProgressReporting.py | nathantspencer/SimpleElastix | a9641c1197e58a4ff614145e9ba5ca43c2833ebf | [
"Apache-2.0"
] | 382 | 2017-05-19T06:43:28.000Z | 2022-03-30T16:16:59.000Z | Examples/FilterProgressReporting/FilterProgressReporting.py | nathantspencer/SimpleElastix | a9641c1197e58a4ff614145e9ba5ca43c2833ebf | [
"Apache-2.0"
] | 121 | 2017-05-24T07:26:13.000Z | 2022-03-24T07:20:09.000Z | #!/usr/bin/env python
#=========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#=========================================================================
from __future__ import print_function
import SimpleITK as sitk
import sys
import os
if len ( sys.argv ) < 4:
print( "Usage: "+sys.argv[0]+ " <input> <variance> <output>" )
sys.exit ( 1 )
##! [python director command]
class MyCommand(sitk.Command):
def __init__(self, po):
# required
super(MyCommand,self).__init__()
self.processObject = po
def Execute(self):
print("{0} Progress: {1:1.2f}".format(self.processObject.GetName(),self.processObject.GetProgress()))
##! [python director command]
reader = sitk.ImageFileReader()
reader.SetFileName ( sys.argv[1] )
image = reader.Execute()
pixelID = image.GetPixelID()
gaussian = sitk.DiscreteGaussianImageFilter()
gaussian.SetVariance( float ( sys.argv[2] ) )
##! [python lambda command]
gaussian.AddCommand(sitk.sitkStartEvent, lambda: print("StartEvent"))
gaussian.AddCommand(sitk.sitkEndEvent, lambda: print("EndEvent"))
##! [python lambda command]
cmd = MyCommand(gaussian)
gaussian.AddCommand(sitk.sitkProgressEvent, cmd)
image = gaussian.Execute ( image )
caster = sitk.CastImageFilter()
caster.SetOutputPixelType( pixelID )
image = caster.Execute( image )
writer = sitk.ImageFileWriter()
writer.SetFileName ( sys.argv[3] )
writer.Execute ( image );
if ( not "SITK_NOSHOW" in os.environ ):
sitk.Show( image, "Simple Gaussian" )
| 29.25 | 109 | 0.667142 |
ceac4936401713d7a8c857bb830f1b6e09c31efb | 13,810 | py | Python | visualise.py | simoncrowe/bookchain-network-visualisation | ce46db2596c31745bb8588ca84172a24b2789c10 | [
"BSD-3-Clause"
] | null | null | null | visualise.py | simoncrowe/bookchain-network-visualisation | ce46db2596c31745bb8588ca84172a24b2789c10 | [
"BSD-3-Clause"
] | 1 | 2021-06-01T23:35:33.000Z | 2021-06-01T23:35:33.000Z | visualise.py | simoncrowe/bookchain-network-visualisation | ce46db2596c31745bb8588ca84172a24b2789c10 | [
"BSD-3-Clause"
] | null | null | null | """Consumes queue-router's /traffic API and outputs an OpenGL visualisation
Adapted from: https://github.com/glumpy/glumpy/blob/master/examples/graph.py
"""
from collections import OrderedDict
import configparser
import json
import os
import random
import threading
from glumpy import app, collections
from glumpy.transforms import Position, OrthographicProjection, Viewport
import numpy as np
import redis
from scipy.spatial.distance import cdist
import requests
MODULE_PARENT_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
config = configparser.ConfigParser()
config.read(os.path.join(MODULE_PARENT_DIRECTORY, 'config.ini'))
QUEUE_ROUTER_HOST = config['QUEUE_ROUTER']['host']
QUEUE_ROUTER_TOKEN = config['QUEUE_ROUTER']['token']
TRAFFIC_API_CALL_INTERVAL = float(
config['QUEUE_ROUTER']['traffic_api_call_interval']
)
TIME_INCREMENT_INTERVAL = float(
config['QUEUE_ROUTER']['time_increment_interval']
)
OPEN_GL_BACKEND = config['DISPLAY']['backend']
FULL_SCREEN = config['DISPLAY']['full_screen'] == 'True'
DISPLAY_WIDTH = int(config['DISPLAY']['width'])
DISPLAY_HEIGHT = int(config['DISPLAY']['height'])
NODES_MARGIN = float(config['DISPLAY']['nodes_margin'])
MAXIMUM_NODES = int(config['NETWORK']['maximum_nodes'])
NODE_SIZE = int(config['NETWORK']['node_size'])
EDGE_WIDTH = float(config['NETWORK']['edge_width'])
ATTRACTION = float(config['NETWORK']['attraction'])
REPULSION = float(config['NETWORK']['repulsion'])
NODE_DISTANCE = float(config['NETWORK']['node_distance'])
CONNECTION_DISPLAY_CURVE_SCALE = float(
config['NETWORK']['connection_display_curve_scale']
)
CONNECTION_DISPLAY_CURVE_ADDEND = float(
config['NETWORK']['connection_display_curve_addend']
)
CONNECTION_DISPLAY_DURATION_SCALE = float(
config['NETWORK']['connection_display_duration_scale']
)
BASE_CONNECTION_DISPLAY_DURATION = float(
config['NETWORK']['base_connection_display_duration']
)
redis_client = redis.Redis()
class RouterClock:
time = None
def __init__(self):
self.time = self.get_router_time()
self.increment_time()
@staticmethod
def get_router_time():
return requests.get(
'http://{host}/time?token={token}'.format(
host=QUEUE_ROUTER_HOST,
token=QUEUE_ROUTER_TOKEN,
)
).json()
def increment_time(self):
self.time += TIME_INCREMENT_INTERVAL
threading.Timer(
TIME_INCREMENT_INTERVAL,
self.increment_time
).start()
class NodePositionManager:
"""Manages addressed node positions based on router traffic data."""
positions = OrderedDict()
def get_positions(self, traffic_data):
self.prune_inactive(traffic_data)
self.create_any_new(traffic_data)
return np.array(list(self.positions.values()))
def get_connections(self, traffic_data, router_time):
connections = np.zeros((len(self.positions), len(self.positions)))
for from_index, from_address in enumerate(self.positions.keys()):
from_traffic = traffic_data[from_address]
for to_index, to_address in enumerate(self.positions.keys()):
if (
from_traffic and
to_address in from_traffic and
self.should_show_connection(
from_traffic[to_address],
router_time,
)
):
connections[from_index, to_index] = 1
return connections
def set_positions(self, position_array):
for index, address in enumerate(self.positions.keys()):
self.positions[address] = position_array[index]
def prune_inactive(self, traffic_data):
self.positions = {
address: position for address, position in self.positions.items()
if address in traffic_data
}
def create_any_new(self, traffic_data):
for address in traffic_data:
if address not in self.positions:
self.positions[address] = self.random_node_position()
def should_show_connection(self, traffic_data, router_time):
return (
router_time <
traffic_data['time'] +
+ BASE_CONNECTION_DISPLAY_DURATION +
self.get_additional_connection_duration(traffic_data['length'])
)
def get_additional_connection_duration(self, length):
"""Gets duration based on number of chars
using a curve that asymptotes.
"""
return (
(
(
length /
(length ** 2)
)
* CONNECTION_DISPLAY_CURVE_SCALE
) + CONNECTION_DISPLAY_CURVE_ADDEND
) * CONNECTION_DISPLAY_DURATION_SCALE
@staticmethod
def random_node_position():
return np.array(
[
random.randint(
NODES_MARGIN,
DISPLAY_WIDTH - NODES_MARGIN
),
random.randint(
NODES_MARGIN,
DISPLAY_HEIGHT - NODES_MARGIN
),
0
],
dtype=np.float32
)
class NetworkVisualisation:
"""Encapsulates the Glumpy objects and display logic."""
node_position_manger = None
router_clock = None
node_count = 0
window = None
master_joins = None
markers = None
segments = None
node_positions = None
connections = None
connection_sources = None
connection_destinations = None
joins = None
join_sources = None
join_destinations = None
def __init__(self):
self.node_position_manger = NodePositionManager()
self.router_clock = RouterClock()
def run(self):
app.use(OPEN_GL_BACKEND)
self.window = app.Window(
width=DISPLAY_WIDTH,
height=DISPLAY_HEIGHT,
color=(0, 0, 0, 1),
fullscreen=FULL_SCREEN,
)
self.master_joins = self.get_master_joins(MAXIMUM_NODES)
self.transform = OrthographicProjection(Position(), aspect=None)
self.viewport = Viewport()
@self.window.event
def on_draw(dt):
self.update()
self.initialise()
def stop(self):
self.window.close()
def initialise(self):
traffic_data = self.get_node_traffic_data()
self.initialise_graph_state(traffic_data)
self.initialise_markers()
self.initialise_segments()
app.run()
def initialise_graph_state(self, traffic_data):
self.node_count = len(traffic_data)
self.node_positions = self.node_position_manger.get_positions(
traffic_data
)
self.connections = self.node_position_manger.get_connections(
traffic_data,
self.router_clock.time
)
self.connection_sources, self.connection_destinations = np.nonzero(
self.connections
)
self.joins = self.master_joins[:self.node_count, :self.node_count]
self.join_sources, self.join_destinations = np.nonzero(self.joins)
def update(self):
traffic_data = self.get_node_traffic_data()
if traffic_data:
connections_changed, node_count_changed = self.update_graph_state(
traffic_data
)
if node_count_changed:
self.initialise_markers()
if connections_changed:
self.initialise_segments()
self.stabilise_graph()
self.window.clear()
self.segments.draw()
self.markers.draw()
if connections_changed or node_count_changed:
app.run()
def update_graph_state(self, traffic_data):
latest_node_count = len(traffic_data)
self.node_positions = self.node_position_manger.get_positions(
traffic_data
)
connections_changed = False
new_connections = self.node_position_manger.get_connections(
traffic_data,
self.router_clock.time
)
if (
self.connections is not None
and not np.array_equal(new_connections, self.connections)
):
self.connections = new_connections
self.connection_sources, self.connection_destinations = np.nonzero(
self.connections
)
connections_changed = True
self.joins = self.master_joins[:latest_node_count, :latest_node_count]
self.join_sources, self.join_destinations = np.nonzero(self.joins)
node_count_changed = False
if self.node_count != latest_node_count:
self.node_count = latest_node_count
node_count_changed = True
return connections_changed, node_count_changed
def initialise_markers(self):
self.markers = collections.MarkerCollection(
marker='disc',
transform=self.transform,
viewport=self.viewport
)
self.markers.append(
self.node_positions,
size=NODE_SIZE,
linewidth=0,
itemsize=1,
fg_color=(1, 1, 1, 1),
bg_color=(1, 1, 1, 1)
)
self.window.attach(self.transform)
self.window.attach(self.viewport)
def initialise_segments(self):
self.segments = collections.SegmentCollection(
'agg',
transform=self.transform,
viewport=self.viewport
)
if np.count_nonzero(self.connections):
self.segments.append(
self.node_positions[self.connection_sources],
self.node_positions[self.connection_destinations],
linewidth=EDGE_WIDTH,
itemsize=1,
color=(1, 1, 1, 1)
)
else:
# Add dummy segments
self.segments.append(
np.array([np.array([0,0,0], dtype=np.float32)]),
np.array([np.array([0,0,0], dtype=np.float32)]),
linewidth=EDGE_WIDTH,
itemsize=1,
color=(0.0, 0.0, 0.0, 1)
)
self.window.attach(self.transform)
self.window.attach(self.viewport)
def stabilise_graph(self):
"""Ensures stable relative positioning of nodes."""
positions_x = self.node_positions[:, 0]
positions_y = self.node_positions[:, 1]
positions = self.node_positions[:, :2]
# Global nodes centering
center_x, center_y = self.window.width/2, self.window.height/2
positions += 0.01 * ([center_x, center_y] - positions)
nodes_count = len(self.node_positions)
if nodes_count > 1:
# Linked nodes attraction
distances = (
self.node_positions[self.join_sources] -
self.node_positions[self.join_destinations]
)
L = np.maximum(np.sqrt((distances*distances).sum(axis=1)),1)
L = (L - NODE_DISTANCE)/L
distances *= ATTRACTION * L[:,np.newaxis]
positions_x -= 0.5 * np.bincount(
self.join_sources,
distances[:, 0],
minlength=nodes_count
)
positions_y -= .5 * np.bincount(
self.join_sources,
distances[:, 1],
minlength=nodes_count
)
positions_x += 0.5 * np.bincount(
self.join_destinations,
distances[:, 0],
minlength=nodes_count
)
positions_y += 0.5 * np.bincount(
self.join_destinations,
distances[:, 1],
minlength=nodes_count
)
# Global nodes repulsion
dist = np.maximum(cdist(positions, positions, 'sqeuclidean'), 1)
distances = np.empty((nodes_count, nodes_count, 2))
distances[..., 0] = np.subtract.outer(positions_x,positions_x) / dist
distances[..., 1] = np.subtract.outer(positions_y,positions_y) / dist
distance_sums = distances.sum(axis=1)
positions += (
REPULSION * distance_sums
/ np.sqrt(((distance_sums * distance_sums).sum(axis=0)))
)
# Update self.markers and self.segments
self.markers["position"] = self.node_positions
if np.count_nonzero(self.connections):
# Update segment positions if any connections active
self.segments["P0"] = np.repeat(
self.node_positions[self.connection_sources], 4, axis=0
)
self.segments["P1"] = np.repeat(
self.node_positions[self.connection_destinations], 4, axis=0
)
# Update node position manager
self.node_position_manger.set_positions(self.node_positions)
@staticmethod
def get_node_traffic_data():
return json.loads(redis_client.get('traffic').decode())
@staticmethod
def get_master_joins(max_nodes):
return np.array(
[
[0, 1, 1, 0, 1, 0, 1, 0, 0, 0],
[1, 0, 1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 0, 1, 1, 0, 1, 0, 0, 0],
[0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 0, 0, 0, 0, 1],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0, 0, 0],
]
)
| 31.457859 | 81 | 0.585663 |
12db50771487256b50f412ca46c79617ec01840d | 1,269 | py | Python | sprox/_widgetselector.py | carl-wallace/sprox | 69c8639b86318c28bbaad36125232d144d8be380 | [
"MIT"
] | 3 | 2015-07-03T16:31:22.000Z | 2018-04-19T04:26:02.000Z | sprox/_widgetselector.py | carl-wallace/sprox | 69c8639b86318c28bbaad36125232d144d8be380 | [
"MIT"
] | 8 | 2015-02-23T23:01:50.000Z | 2021-07-06T14:10:26.000Z | sprox/_widgetselector.py | carl-wallace/sprox | 69c8639b86318c28bbaad36125232d144d8be380 | [
"MIT"
] | 7 | 2015-06-14T04:07:53.000Z | 2020-04-28T13:50:50.000Z | """
widgetselecter Module
this contains the class which allows the ViewConfig to select the appropriate widget for the given field
Classes:
Name Description
WidgetSelecter Parent Class
SAWidgetSelector Selecter Based on sqlalchemy field types
DatabaseViewWidgetSelector Database View always selects the same widget
TableDefWidgetSelector Table def fields use the same widget
Exceptions:
None
Functions:
None
Copyright (c) 2007 Christopher Perkins
Original Version by Christopher Perkins 2007Database
Released under MIT license.
"""
try: #pragma: no cover
from tw2.core import Widget
from tw2.forms.widgets import *
except ImportError as e: #pragma: no cover
from tw.api import Widget
from tw.forms.fields import *
from sprox.widgets import *
class WidgetSelector(object):
def select(self, field):
return Widget
class EntitiesViewWidgetSelector(WidgetSelector):
def select(self, field):
return EntityLabelWidget
class EntityDefWidgetSelector(WidgetSelector):
def select(self, field):
return EntityDefWidget
class RecordViewWidgetSelector(WidgetSelector):
def select(self, field):
return RecordFieldWidget
| 25.38 | 104 | 0.72498 |
1edec41430d61099ab3400a63b0a30b857d6c741 | 158,653 | py | Python | tests/unit/gapic/firestore_admin_v1/test_firestore_admin.py | anna-hope/python-firestore | aa7594c93b2d7480ac4283a1d1abafe76aa7a353 | [
"Apache-2.0"
] | null | null | null | tests/unit/gapic/firestore_admin_v1/test_firestore_admin.py | anna-hope/python-firestore | aa7594c93b2d7480ac4283a1d1abafe76aa7a353 | [
"Apache-2.0"
] | null | null | null | tests/unit/gapic/firestore_admin_v1/test_firestore_admin.py | anna-hope/python-firestore | aa7594c93b2d7480ac4283a1d1abafe76aa7a353 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.firestore_admin_v1.services.firestore_admin import (
FirestoreAdminAsyncClient,
)
from google.cloud.firestore_admin_v1.services.firestore_admin import (
FirestoreAdminClient,
)
from google.cloud.firestore_admin_v1.services.firestore_admin import pagers
from google.cloud.firestore_admin_v1.services.firestore_admin import transports
from google.cloud.firestore_admin_v1.types import database
from google.cloud.firestore_admin_v1.types import database as gfa_database
from google.cloud.firestore_admin_v1.types import field
from google.cloud.firestore_admin_v1.types import field as gfa_field
from google.cloud.firestore_admin_v1.types import firestore_admin
from google.cloud.firestore_admin_v1.types import index
from google.cloud.firestore_admin_v1.types import index as gfa_index
from google.cloud.firestore_admin_v1.types import operation as gfa_operation
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert FirestoreAdminClient._get_default_mtls_endpoint(None) is None
assert (
FirestoreAdminClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
FirestoreAdminClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
FirestoreAdminClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
FirestoreAdminClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
FirestoreAdminClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize(
"client_class", [FirestoreAdminClient, FirestoreAdminAsyncClient,]
)
def test_firestore_admin_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "firestore.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.FirestoreAdminGrpcTransport, "grpc"),
(transports.FirestoreAdminGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_firestore_admin_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [FirestoreAdminClient, FirestoreAdminAsyncClient,]
)
def test_firestore_admin_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "firestore.googleapis.com:443"
def test_firestore_admin_client_get_transport_class():
transport = FirestoreAdminClient.get_transport_class()
available_transports = [
transports.FirestoreAdminGrpcTransport,
]
assert transport in available_transports
transport = FirestoreAdminClient.get_transport_class("grpc")
assert transport == transports.FirestoreAdminGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(FirestoreAdminClient, transports.FirestoreAdminGrpcTransport, "grpc"),
(
FirestoreAdminAsyncClient,
transports.FirestoreAdminGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
FirestoreAdminClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FirestoreAdminClient),
)
@mock.patch.object(
FirestoreAdminAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FirestoreAdminAsyncClient),
)
def test_firestore_admin_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(FirestoreAdminClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(FirestoreAdminClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(FirestoreAdminClient, transports.FirestoreAdminGrpcTransport, "grpc", "true"),
(
FirestoreAdminAsyncClient,
transports.FirestoreAdminGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(FirestoreAdminClient, transports.FirestoreAdminGrpcTransport, "grpc", "false"),
(
FirestoreAdminAsyncClient,
transports.FirestoreAdminGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
FirestoreAdminClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FirestoreAdminClient),
)
@mock.patch.object(
FirestoreAdminAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FirestoreAdminAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_firestore_admin_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class", [FirestoreAdminClient, FirestoreAdminAsyncClient]
)
@mock.patch.object(
FirestoreAdminClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FirestoreAdminClient),
)
@mock.patch.object(
FirestoreAdminAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(FirestoreAdminAsyncClient),
)
def test_firestore_admin_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(FirestoreAdminClient, transports.FirestoreAdminGrpcTransport, "grpc"),
(
FirestoreAdminAsyncClient,
transports.FirestoreAdminGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_firestore_admin_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
FirestoreAdminClient,
transports.FirestoreAdminGrpcTransport,
"grpc",
grpc_helpers,
),
(
FirestoreAdminAsyncClient,
transports.FirestoreAdminGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_firestore_admin_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_firestore_admin_client_client_options_from_dict():
with mock.patch(
"google.cloud.firestore_admin_v1.services.firestore_admin.transports.FirestoreAdminGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = FirestoreAdminClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
FirestoreAdminClient,
transports.FirestoreAdminGrpcTransport,
"grpc",
grpc_helpers,
),
(
FirestoreAdminAsyncClient,
transports.FirestoreAdminGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_firestore_admin_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"firestore.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/datastore",
),
scopes=None,
default_host="firestore.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [firestore_admin.CreateIndexRequest, dict,])
def test_create_index(request_type, transport: str = "grpc"):
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_index), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_index(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.CreateIndexRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_index_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_index), "__call__") as call:
client.create_index()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.CreateIndexRequest()
@pytest.mark.asyncio
async def test_create_index_async(
transport: str = "grpc_asyncio", request_type=firestore_admin.CreateIndexRequest
):
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_index), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_index(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.CreateIndexRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_index_async_from_dict():
await test_create_index_async(request_type=dict)
def test_create_index_field_headers():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = firestore_admin.CreateIndexRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_index), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_index(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_index_field_headers_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = firestore_admin.CreateIndexRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_index), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_index(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_index_flattened():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_index), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_index(
parent="parent_value", index=gfa_index.Index(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].index
mock_val = gfa_index.Index(name="name_value")
assert arg == mock_val
def test_create_index_flattened_error():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_index(
firestore_admin.CreateIndexRequest(),
parent="parent_value",
index=gfa_index.Index(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_index_flattened_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_index), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_index(
parent="parent_value", index=gfa_index.Index(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].index
mock_val = gfa_index.Index(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_index_flattened_error_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_index(
firestore_admin.CreateIndexRequest(),
parent="parent_value",
index=gfa_index.Index(name="name_value"),
)
@pytest.mark.parametrize("request_type", [firestore_admin.ListIndexesRequest, dict,])
def test_list_indexes(request_type, transport: str = "grpc"):
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_indexes), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = firestore_admin.ListIndexesResponse(
next_page_token="next_page_token_value",
)
response = client.list_indexes(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.ListIndexesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListIndexesPager)
assert response.next_page_token == "next_page_token_value"
def test_list_indexes_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_indexes), "__call__") as call:
client.list_indexes()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.ListIndexesRequest()
@pytest.mark.asyncio
async def test_list_indexes_async(
transport: str = "grpc_asyncio", request_type=firestore_admin.ListIndexesRequest
):
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_indexes), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
firestore_admin.ListIndexesResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_indexes(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.ListIndexesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListIndexesAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_indexes_async_from_dict():
await test_list_indexes_async(request_type=dict)
def test_list_indexes_field_headers():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = firestore_admin.ListIndexesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_indexes), "__call__") as call:
call.return_value = firestore_admin.ListIndexesResponse()
client.list_indexes(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_indexes_field_headers_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = firestore_admin.ListIndexesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_indexes), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
firestore_admin.ListIndexesResponse()
)
await client.list_indexes(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_indexes_flattened():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_indexes), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = firestore_admin.ListIndexesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_indexes(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_indexes_flattened_error():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_indexes(
firestore_admin.ListIndexesRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_indexes_flattened_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_indexes), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = firestore_admin.ListIndexesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
firestore_admin.ListIndexesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_indexes(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_indexes_flattened_error_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_indexes(
firestore_admin.ListIndexesRequest(), parent="parent_value",
)
def test_list_indexes_pager(transport_name: str = "grpc"):
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_indexes), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
firestore_admin.ListIndexesResponse(
indexes=[index.Index(), index.Index(), index.Index(),],
next_page_token="abc",
),
firestore_admin.ListIndexesResponse(indexes=[], next_page_token="def",),
firestore_admin.ListIndexesResponse(
indexes=[index.Index(),], next_page_token="ghi",
),
firestore_admin.ListIndexesResponse(
indexes=[index.Index(), index.Index(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_indexes(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, index.Index) for i in results)
def test_list_indexes_pages(transport_name: str = "grpc"):
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_indexes), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
firestore_admin.ListIndexesResponse(
indexes=[index.Index(), index.Index(), index.Index(),],
next_page_token="abc",
),
firestore_admin.ListIndexesResponse(indexes=[], next_page_token="def",),
firestore_admin.ListIndexesResponse(
indexes=[index.Index(),], next_page_token="ghi",
),
firestore_admin.ListIndexesResponse(
indexes=[index.Index(), index.Index(),],
),
RuntimeError,
)
pages = list(client.list_indexes(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_indexes_async_pager():
client = FirestoreAdminAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_indexes), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
firestore_admin.ListIndexesResponse(
indexes=[index.Index(), index.Index(), index.Index(),],
next_page_token="abc",
),
firestore_admin.ListIndexesResponse(indexes=[], next_page_token="def",),
firestore_admin.ListIndexesResponse(
indexes=[index.Index(),], next_page_token="ghi",
),
firestore_admin.ListIndexesResponse(
indexes=[index.Index(), index.Index(),],
),
RuntimeError,
)
async_pager = await client.list_indexes(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, index.Index) for i in responses)
@pytest.mark.asyncio
async def test_list_indexes_async_pages():
client = FirestoreAdminAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_indexes), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
firestore_admin.ListIndexesResponse(
indexes=[index.Index(), index.Index(), index.Index(),],
next_page_token="abc",
),
firestore_admin.ListIndexesResponse(indexes=[], next_page_token="def",),
firestore_admin.ListIndexesResponse(
indexes=[index.Index(),], next_page_token="ghi",
),
firestore_admin.ListIndexesResponse(
indexes=[index.Index(), index.Index(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_indexes(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [firestore_admin.GetIndexRequest, dict,])
def test_get_index(request_type, transport: str = "grpc"):
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_index), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = index.Index(
name="name_value",
query_scope=index.Index.QueryScope.COLLECTION,
state=index.Index.State.CREATING,
)
response = client.get_index(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.GetIndexRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, index.Index)
assert response.name == "name_value"
assert response.query_scope == index.Index.QueryScope.COLLECTION
assert response.state == index.Index.State.CREATING
def test_get_index_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_index), "__call__") as call:
client.get_index()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.GetIndexRequest()
@pytest.mark.asyncio
async def test_get_index_async(
transport: str = "grpc_asyncio", request_type=firestore_admin.GetIndexRequest
):
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_index), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
index.Index(
name="name_value",
query_scope=index.Index.QueryScope.COLLECTION,
state=index.Index.State.CREATING,
)
)
response = await client.get_index(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.GetIndexRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, index.Index)
assert response.name == "name_value"
assert response.query_scope == index.Index.QueryScope.COLLECTION
assert response.state == index.Index.State.CREATING
@pytest.mark.asyncio
async def test_get_index_async_from_dict():
await test_get_index_async(request_type=dict)
def test_get_index_field_headers():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = firestore_admin.GetIndexRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_index), "__call__") as call:
call.return_value = index.Index()
client.get_index(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_index_field_headers_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = firestore_admin.GetIndexRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_index), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index.Index())
await client.get_index(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_index_flattened():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_index), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = index.Index()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_index(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_index_flattened_error():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_index(
firestore_admin.GetIndexRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_index_flattened_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_index), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = index.Index()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index.Index())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_index(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_index_flattened_error_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_index(
firestore_admin.GetIndexRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [firestore_admin.DeleteIndexRequest, dict,])
def test_delete_index(request_type, transport: str = "grpc"):
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_index), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_index(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.DeleteIndexRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_index_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_index), "__call__") as call:
client.delete_index()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.DeleteIndexRequest()
@pytest.mark.asyncio
async def test_delete_index_async(
transport: str = "grpc_asyncio", request_type=firestore_admin.DeleteIndexRequest
):
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_index), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_index(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.DeleteIndexRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_index_async_from_dict():
await test_delete_index_async(request_type=dict)
def test_delete_index_field_headers():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = firestore_admin.DeleteIndexRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_index), "__call__") as call:
call.return_value = None
client.delete_index(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_index_field_headers_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = firestore_admin.DeleteIndexRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_index), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_index(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_index_flattened():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_index), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_index(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_index_flattened_error():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_index(
firestore_admin.DeleteIndexRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_index_flattened_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_index), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_index(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_index_flattened_error_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_index(
firestore_admin.DeleteIndexRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [firestore_admin.GetFieldRequest, dict,])
def test_get_field(request_type, transport: str = "grpc"):
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_field), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = field.Field(name="name_value",)
response = client.get_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.GetFieldRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, field.Field)
assert response.name == "name_value"
def test_get_field_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_field), "__call__") as call:
client.get_field()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.GetFieldRequest()
@pytest.mark.asyncio
async def test_get_field_async(
transport: str = "grpc_asyncio", request_type=firestore_admin.GetFieldRequest
):
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_field), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
field.Field(name="name_value",)
)
response = await client.get_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.GetFieldRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, field.Field)
assert response.name == "name_value"
@pytest.mark.asyncio
async def test_get_field_async_from_dict():
await test_get_field_async(request_type=dict)
def test_get_field_field_headers():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = firestore_admin.GetFieldRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_field), "__call__") as call:
call.return_value = field.Field()
client.get_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_field_field_headers_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = firestore_admin.GetFieldRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_field), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(field.Field())
await client.get_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_field_flattened():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_field), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = field.Field()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_field(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_field_flattened_error():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_field(
firestore_admin.GetFieldRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_field_flattened_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_field), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = field.Field()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(field.Field())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_field(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_field_flattened_error_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_field(
firestore_admin.GetFieldRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [firestore_admin.UpdateFieldRequest, dict,])
def test_update_field(request_type, transport: str = "grpc"):
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_field), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.update_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.UpdateFieldRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_field_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_field), "__call__") as call:
client.update_field()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.UpdateFieldRequest()
@pytest.mark.asyncio
async def test_update_field_async(
transport: str = "grpc_asyncio", request_type=firestore_admin.UpdateFieldRequest
):
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_field), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.update_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.UpdateFieldRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_field_async_from_dict():
await test_update_field_async(request_type=dict)
def test_update_field_field_headers():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = firestore_admin.UpdateFieldRequest()
request.field.name = "field.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_field), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.update_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "field.name=field.name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_field_field_headers_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = firestore_admin.UpdateFieldRequest()
request.field.name = "field.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_field), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.update_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "field.name=field.name/value",) in kw["metadata"]
def test_update_field_flattened():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_field), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_field(field=gfa_field.Field(name="name_value"),)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].field
mock_val = gfa_field.Field(name="name_value")
assert arg == mock_val
def test_update_field_flattened_error():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_field(
firestore_admin.UpdateFieldRequest(),
field=gfa_field.Field(name="name_value"),
)
@pytest.mark.asyncio
async def test_update_field_flattened_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_field), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_field(field=gfa_field.Field(name="name_value"),)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].field
mock_val = gfa_field.Field(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_field_flattened_error_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_field(
firestore_admin.UpdateFieldRequest(),
field=gfa_field.Field(name="name_value"),
)
@pytest.mark.parametrize("request_type", [firestore_admin.ListFieldsRequest, dict,])
def test_list_fields(request_type, transport: str = "grpc"):
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_fields), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = firestore_admin.ListFieldsResponse(
next_page_token="next_page_token_value",
)
response = client.list_fields(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.ListFieldsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListFieldsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_fields_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_fields), "__call__") as call:
client.list_fields()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.ListFieldsRequest()
@pytest.mark.asyncio
async def test_list_fields_async(
transport: str = "grpc_asyncio", request_type=firestore_admin.ListFieldsRequest
):
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_fields), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
firestore_admin.ListFieldsResponse(next_page_token="next_page_token_value",)
)
response = await client.list_fields(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.ListFieldsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListFieldsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_fields_async_from_dict():
await test_list_fields_async(request_type=dict)
def test_list_fields_field_headers():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = firestore_admin.ListFieldsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_fields), "__call__") as call:
call.return_value = firestore_admin.ListFieldsResponse()
client.list_fields(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_fields_field_headers_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = firestore_admin.ListFieldsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_fields), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
firestore_admin.ListFieldsResponse()
)
await client.list_fields(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_fields_flattened():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_fields), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = firestore_admin.ListFieldsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_fields(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_fields_flattened_error():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_fields(
firestore_admin.ListFieldsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_fields_flattened_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_fields), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = firestore_admin.ListFieldsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
firestore_admin.ListFieldsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_fields(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_fields_flattened_error_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_fields(
firestore_admin.ListFieldsRequest(), parent="parent_value",
)
def test_list_fields_pager(transport_name: str = "grpc"):
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_fields), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
firestore_admin.ListFieldsResponse(
fields=[field.Field(), field.Field(), field.Field(),],
next_page_token="abc",
),
firestore_admin.ListFieldsResponse(fields=[], next_page_token="def",),
firestore_admin.ListFieldsResponse(
fields=[field.Field(),], next_page_token="ghi",
),
firestore_admin.ListFieldsResponse(fields=[field.Field(), field.Field(),],),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_fields(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, field.Field) for i in results)
def test_list_fields_pages(transport_name: str = "grpc"):
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_fields), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
firestore_admin.ListFieldsResponse(
fields=[field.Field(), field.Field(), field.Field(),],
next_page_token="abc",
),
firestore_admin.ListFieldsResponse(fields=[], next_page_token="def",),
firestore_admin.ListFieldsResponse(
fields=[field.Field(),], next_page_token="ghi",
),
firestore_admin.ListFieldsResponse(fields=[field.Field(), field.Field(),],),
RuntimeError,
)
pages = list(client.list_fields(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_fields_async_pager():
client = FirestoreAdminAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_fields), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
firestore_admin.ListFieldsResponse(
fields=[field.Field(), field.Field(), field.Field(),],
next_page_token="abc",
),
firestore_admin.ListFieldsResponse(fields=[], next_page_token="def",),
firestore_admin.ListFieldsResponse(
fields=[field.Field(),], next_page_token="ghi",
),
firestore_admin.ListFieldsResponse(fields=[field.Field(), field.Field(),],),
RuntimeError,
)
async_pager = await client.list_fields(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, field.Field) for i in responses)
@pytest.mark.asyncio
async def test_list_fields_async_pages():
client = FirestoreAdminAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_fields), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
firestore_admin.ListFieldsResponse(
fields=[field.Field(), field.Field(), field.Field(),],
next_page_token="abc",
),
firestore_admin.ListFieldsResponse(fields=[], next_page_token="def",),
firestore_admin.ListFieldsResponse(
fields=[field.Field(),], next_page_token="ghi",
),
firestore_admin.ListFieldsResponse(fields=[field.Field(), field.Field(),],),
RuntimeError,
)
pages = []
async for page_ in (await client.list_fields(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [firestore_admin.ExportDocumentsRequest, dict,]
)
def test_export_documents(request_type, transport: str = "grpc"):
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_documents), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.export_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.ExportDocumentsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_export_documents_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_documents), "__call__") as call:
client.export_documents()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.ExportDocumentsRequest()
@pytest.mark.asyncio
async def test_export_documents_async(
transport: str = "grpc_asyncio", request_type=firestore_admin.ExportDocumentsRequest
):
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_documents), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.export_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.ExportDocumentsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_export_documents_async_from_dict():
await test_export_documents_async(request_type=dict)
def test_export_documents_field_headers():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = firestore_admin.ExportDocumentsRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_documents), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.export_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_export_documents_field_headers_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = firestore_admin.ExportDocumentsRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_documents), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.export_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_export_documents_flattened():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_documents), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.export_documents(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_export_documents_flattened_error():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.export_documents(
firestore_admin.ExportDocumentsRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_export_documents_flattened_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_documents), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.export_documents(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_export_documents_flattened_error_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.export_documents(
firestore_admin.ExportDocumentsRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [firestore_admin.ImportDocumentsRequest, dict,]
)
def test_import_documents(request_type, transport: str = "grpc"):
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_documents), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.import_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.ImportDocumentsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_import_documents_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_documents), "__call__") as call:
client.import_documents()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.ImportDocumentsRequest()
@pytest.mark.asyncio
async def test_import_documents_async(
transport: str = "grpc_asyncio", request_type=firestore_admin.ImportDocumentsRequest
):
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_documents), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.import_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.ImportDocumentsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_import_documents_async_from_dict():
await test_import_documents_async(request_type=dict)
def test_import_documents_field_headers():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = firestore_admin.ImportDocumentsRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_documents), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.import_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_import_documents_field_headers_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = firestore_admin.ImportDocumentsRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_documents), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.import_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_import_documents_flattened():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_documents), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.import_documents(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_import_documents_flattened_error():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.import_documents(
firestore_admin.ImportDocumentsRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_import_documents_flattened_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_documents), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.import_documents(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_import_documents_flattened_error_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.import_documents(
firestore_admin.ImportDocumentsRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [firestore_admin.GetDatabaseRequest, dict,])
def test_get_database(request_type, transport: str = "grpc"):
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_database), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = database.Database(
name="name_value",
location_id="location_id_value",
type_=database.Database.DatabaseType.FIRESTORE_NATIVE,
concurrency_mode=database.Database.ConcurrencyMode.OPTIMISTIC,
etag="etag_value",
)
response = client.get_database(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.GetDatabaseRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, database.Database)
assert response.name == "name_value"
assert response.location_id == "location_id_value"
assert response.type_ == database.Database.DatabaseType.FIRESTORE_NATIVE
assert response.concurrency_mode == database.Database.ConcurrencyMode.OPTIMISTIC
assert response.etag == "etag_value"
def test_get_database_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_database), "__call__") as call:
client.get_database()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.GetDatabaseRequest()
@pytest.mark.asyncio
async def test_get_database_async(
transport: str = "grpc_asyncio", request_type=firestore_admin.GetDatabaseRequest
):
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_database), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
database.Database(
name="name_value",
location_id="location_id_value",
type_=database.Database.DatabaseType.FIRESTORE_NATIVE,
concurrency_mode=database.Database.ConcurrencyMode.OPTIMISTIC,
etag="etag_value",
)
)
response = await client.get_database(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.GetDatabaseRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, database.Database)
assert response.name == "name_value"
assert response.location_id == "location_id_value"
assert response.type_ == database.Database.DatabaseType.FIRESTORE_NATIVE
assert response.concurrency_mode == database.Database.ConcurrencyMode.OPTIMISTIC
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_get_database_async_from_dict():
await test_get_database_async(request_type=dict)
def test_get_database_field_headers():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = firestore_admin.GetDatabaseRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_database), "__call__") as call:
call.return_value = database.Database()
client.get_database(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_database_field_headers_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = firestore_admin.GetDatabaseRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_database), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(database.Database())
await client.get_database(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_database_flattened():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_database), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = database.Database()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_database(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_database_flattened_error():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_database(
firestore_admin.GetDatabaseRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_database_flattened_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_database), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = database.Database()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(database.Database())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_database(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_database_flattened_error_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_database(
firestore_admin.GetDatabaseRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [firestore_admin.ListDatabasesRequest, dict,])
def test_list_databases(request_type, transport: str = "grpc"):
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_databases), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = firestore_admin.ListDatabasesResponse()
response = client.list_databases(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.ListDatabasesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, firestore_admin.ListDatabasesResponse)
def test_list_databases_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_databases), "__call__") as call:
client.list_databases()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.ListDatabasesRequest()
@pytest.mark.asyncio
async def test_list_databases_async(
transport: str = "grpc_asyncio", request_type=firestore_admin.ListDatabasesRequest
):
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_databases), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
firestore_admin.ListDatabasesResponse()
)
response = await client.list_databases(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.ListDatabasesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, firestore_admin.ListDatabasesResponse)
@pytest.mark.asyncio
async def test_list_databases_async_from_dict():
await test_list_databases_async(request_type=dict)
def test_list_databases_field_headers():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = firestore_admin.ListDatabasesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_databases), "__call__") as call:
call.return_value = firestore_admin.ListDatabasesResponse()
client.list_databases(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_databases_field_headers_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = firestore_admin.ListDatabasesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_databases), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
firestore_admin.ListDatabasesResponse()
)
await client.list_databases(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_databases_flattened():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_databases), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = firestore_admin.ListDatabasesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_databases(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_databases_flattened_error():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_databases(
firestore_admin.ListDatabasesRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_databases_flattened_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_databases), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = firestore_admin.ListDatabasesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
firestore_admin.ListDatabasesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_databases(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_databases_flattened_error_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_databases(
firestore_admin.ListDatabasesRequest(), parent="parent_value",
)
@pytest.mark.parametrize("request_type", [firestore_admin.UpdateDatabaseRequest, dict,])
def test_update_database(request_type, transport: str = "grpc"):
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_database), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.update_database(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.UpdateDatabaseRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_database_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_database), "__call__") as call:
client.update_database()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.UpdateDatabaseRequest()
@pytest.mark.asyncio
async def test_update_database_async(
transport: str = "grpc_asyncio", request_type=firestore_admin.UpdateDatabaseRequest
):
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_database), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.update_database(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == firestore_admin.UpdateDatabaseRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_database_async_from_dict():
await test_update_database_async(request_type=dict)
def test_update_database_field_headers():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = firestore_admin.UpdateDatabaseRequest()
request.database.name = "database.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_database), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.update_database(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "database.name=database.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_database_field_headers_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = firestore_admin.UpdateDatabaseRequest()
request.database.name = "database.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_database), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.update_database(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "database.name=database.name/value",) in kw[
"metadata"
]
def test_update_database_flattened():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_database), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_database(
database=gfa_database.Database(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].database
mock_val = gfa_database.Database(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_database_flattened_error():
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_database(
firestore_admin.UpdateDatabaseRequest(),
database=gfa_database.Database(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_database_flattened_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_database), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_database(
database=gfa_database.Database(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].database
mock_val = gfa_database.Database(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_database_flattened_error_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_database(
firestore_admin.UpdateDatabaseRequest(),
database=gfa_database.Database(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.FirestoreAdminGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.FirestoreAdminGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FirestoreAdminClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.FirestoreAdminGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = FirestoreAdminClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = FirestoreAdminClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.FirestoreAdminGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = FirestoreAdminClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.FirestoreAdminGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = FirestoreAdminClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.FirestoreAdminGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.FirestoreAdminGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.FirestoreAdminGrpcTransport,
transports.FirestoreAdminGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = FirestoreAdminClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.FirestoreAdminGrpcTransport,)
def test_firestore_admin_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.FirestoreAdminTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_firestore_admin_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.firestore_admin_v1.services.firestore_admin.transports.FirestoreAdminTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.FirestoreAdminTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_index",
"list_indexes",
"get_index",
"delete_index",
"get_field",
"update_field",
"list_fields",
"export_documents",
"import_documents",
"get_database",
"list_databases",
"update_database",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_firestore_admin_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.firestore_admin_v1.services.firestore_admin.transports.FirestoreAdminTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.FirestoreAdminTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/datastore",
),
quota_project_id="octopus",
)
def test_firestore_admin_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.firestore_admin_v1.services.firestore_admin.transports.FirestoreAdminTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.FirestoreAdminTransport()
adc.assert_called_once()
def test_firestore_admin_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
FirestoreAdminClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/datastore",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.FirestoreAdminGrpcTransport,
transports.FirestoreAdminGrpcAsyncIOTransport,
],
)
def test_firestore_admin_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/datastore",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.FirestoreAdminGrpcTransport, grpc_helpers),
(transports.FirestoreAdminGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_firestore_admin_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"firestore.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/datastore",
),
scopes=["1", "2"],
default_host="firestore.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.FirestoreAdminGrpcTransport,
transports.FirestoreAdminGrpcAsyncIOTransport,
],
)
def test_firestore_admin_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_firestore_admin_host_no_port():
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="firestore.googleapis.com"
),
)
assert client.transport._host == "firestore.googleapis.com:443"
def test_firestore_admin_host_with_port():
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="firestore.googleapis.com:8000"
),
)
assert client.transport._host == "firestore.googleapis.com:8000"
def test_firestore_admin_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.FirestoreAdminGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_firestore_admin_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.FirestoreAdminGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.FirestoreAdminGrpcTransport,
transports.FirestoreAdminGrpcAsyncIOTransport,
],
)
def test_firestore_admin_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.FirestoreAdminGrpcTransport,
transports.FirestoreAdminGrpcAsyncIOTransport,
],
)
def test_firestore_admin_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_firestore_admin_grpc_lro_client():
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_firestore_admin_grpc_lro_async_client():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_collection_group_path():
project = "squid"
database = "clam"
collection = "whelk"
expected = "projects/{project}/databases/{database}/collectionGroups/{collection}".format(
project=project, database=database, collection=collection,
)
actual = FirestoreAdminClient.collection_group_path(project, database, collection)
assert expected == actual
def test_parse_collection_group_path():
expected = {
"project": "octopus",
"database": "oyster",
"collection": "nudibranch",
}
path = FirestoreAdminClient.collection_group_path(**expected)
# Check that the path construction is reversible.
actual = FirestoreAdminClient.parse_collection_group_path(path)
assert expected == actual
def test_database_path():
project = "cuttlefish"
database = "mussel"
expected = "projects/{project}/databases/{database}".format(
project=project, database=database,
)
actual = FirestoreAdminClient.database_path(project, database)
assert expected == actual
def test_parse_database_path():
expected = {
"project": "winkle",
"database": "nautilus",
}
path = FirestoreAdminClient.database_path(**expected)
# Check that the path construction is reversible.
actual = FirestoreAdminClient.parse_database_path(path)
assert expected == actual
def test_field_path():
project = "scallop"
database = "abalone"
collection = "squid"
field = "clam"
expected = "projects/{project}/databases/{database}/collectionGroups/{collection}/fields/{field}".format(
project=project, database=database, collection=collection, field=field,
)
actual = FirestoreAdminClient.field_path(project, database, collection, field)
assert expected == actual
def test_parse_field_path():
expected = {
"project": "whelk",
"database": "octopus",
"collection": "oyster",
"field": "nudibranch",
}
path = FirestoreAdminClient.field_path(**expected)
# Check that the path construction is reversible.
actual = FirestoreAdminClient.parse_field_path(path)
assert expected == actual
def test_index_path():
project = "cuttlefish"
database = "mussel"
collection = "winkle"
index = "nautilus"
expected = "projects/{project}/databases/{database}/collectionGroups/{collection}/indexes/{index}".format(
project=project, database=database, collection=collection, index=index,
)
actual = FirestoreAdminClient.index_path(project, database, collection, index)
assert expected == actual
def test_parse_index_path():
expected = {
"project": "scallop",
"database": "abalone",
"collection": "squid",
"index": "clam",
}
path = FirestoreAdminClient.index_path(**expected)
# Check that the path construction is reversible.
actual = FirestoreAdminClient.parse_index_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "whelk"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = FirestoreAdminClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "octopus",
}
path = FirestoreAdminClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = FirestoreAdminClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "oyster"
expected = "folders/{folder}".format(folder=folder,)
actual = FirestoreAdminClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nudibranch",
}
path = FirestoreAdminClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = FirestoreAdminClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "cuttlefish"
expected = "organizations/{organization}".format(organization=organization,)
actual = FirestoreAdminClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "mussel",
}
path = FirestoreAdminClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = FirestoreAdminClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "winkle"
expected = "projects/{project}".format(project=project,)
actual = FirestoreAdminClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nautilus",
}
path = FirestoreAdminClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = FirestoreAdminClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "scallop"
location = "abalone"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = FirestoreAdminClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "squid",
"location": "clam",
}
path = FirestoreAdminClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = FirestoreAdminClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.FirestoreAdminTransport, "_prep_wrapped_messages"
) as prep:
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.FirestoreAdminTransport, "_prep_wrapped_messages"
) as prep:
transport_class = FirestoreAdminClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = FirestoreAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = FirestoreAdminClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(FirestoreAdminClient, transports.FirestoreAdminGrpcTransport),
(FirestoreAdminAsyncClient, transports.FirestoreAdminGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| 38.601703 | 124 | 0.690463 |
0bee5aa9fac99a34c397a885b845d21c7415f583 | 944 | py | Python | app/main/forms.py | mungai-joel/pitch | 495b328756d35dcd91e3c4b570c1de8c7e1a5dc2 | [
"MIT"
] | null | null | null | app/main/forms.py | mungai-joel/pitch | 495b328756d35dcd91e3c4b570c1de8c7e1a5dc2 | [
"MIT"
] | null | null | null | app/main/forms.py | mungai-joel/pitch | 495b328756d35dcd91e3c4b570c1de8c7e1a5dc2 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, SubmitField, SelectField
from wtforms.validators import Required
class PitchForm(FlaskForm):
pitch = TextAreaField('Your Pitch', validators=[Required()])
# my_category = StringField('Category', validators=[Required()])
my_category = SelectField('Category', choices=[('Interview-Pitch','Interview Pitch'),('Product-Pitch','Product Pitch'),('Promotion-Pitch','Promotion Pitch'),('Business','Business'),('Academic','Academic'),('Political','Political'),('Technology','Technology'),('Health','Health')],validators=[Required()])
submit = SubmitField('Pitch It!')
class CommentForm(FlaskForm):
comment = TextAreaField('Comment', validators=[Required()])
submit = SubmitField('Post Comment')
class UpdateProfile(FlaskForm):
bio = TextAreaField('Write something about yourself',validators=[Required()])
submit = SubmitField('Submit') | 55.529412 | 308 | 0.733051 |
b553ad9eef0d2ca3c3a7c8b84fa0bc133c09169f | 611 | py | Python | dreifaltigkeit/migrations/0014_auto_20200410_1008.py | normanjaeckel/Dreifaltigkeit2 | e9fb94a6ace8e11da85ba20fd296a283ffeea931 | [
"MIT"
] | null | null | null | dreifaltigkeit/migrations/0014_auto_20200410_1008.py | normanjaeckel/Dreifaltigkeit2 | e9fb94a6ace8e11da85ba20fd296a283ffeea931 | [
"MIT"
] | 6 | 2021-03-18T20:44:28.000Z | 2022-02-10T07:19:35.000Z | dreifaltigkeit/migrations/0014_auto_20200410_1008.py | normanjaeckel/Dreifaltigkeit2 | e9fb94a6ace8e11da85ba20fd296a283ffeea931 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.12 on 2020-04-10 08:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("dreifaltigkeit", "0013_clericalwordaudiofile_hidden")]
operations = [
migrations.AlterField(
model_name="event",
name="content",
field=models.TextField(
blank=True,
help_text="Beschreibung der Veranstaltung. Kein HTML erlaubt. Links im Markdown-Stil sind mit Einschränkungen möglich, d. h. [Text](URL).",
verbose_name="Inhalt",
),
)
]
| 29.095238 | 155 | 0.610475 |
64b2b237d9fdfbf18752e053dc8210241c0d5cae | 2,279 | py | Python | app.py | mayankrajcu/Authentication-User-and-Login-using-BCrypt-and-WerkZeug-Packages | bf6695b34af000e2f2a0058f78a85cea7603deed | [
"MIT"
] | 1 | 2019-07-23T18:59:31.000Z | 2019-07-23T18:59:31.000Z | Flask/07-User-Authentication/01-Flask-Login/app.py | Sandy1811/demandforecasting | fdb6878d93502773ba8da809c2de1b33c96fb9a0 | [
"Apache-2.0"
] | 8 | 2021-02-08T20:32:03.000Z | 2022-03-11T23:56:31.000Z | Flask/07-User-Authentication/01-Flask-Login/app.py | Sandy1811/demandforecasting | fdb6878d93502773ba8da809c2de1b33c96fb9a0 | [
"Apache-2.0"
] | null | null | null | from myproject import app,db
from flask import render_template, redirect, request, url_for, flash,abort
from flask_login import login_user,login_required,logout_user
from myproject.models import User
from myproject.forms import LoginForm, RegistrationForm
from werkzeug.security import generate_password_hash, check_password_hash
@app.route('/')
def home():
return render_template('home.html')
@app.route('/welcome')
@login_required
def welcome_user():
return render_template('welcome_user.html')
@app.route('/logout')
@login_required
def logout():
logout_user()
flash('You logged out!')
return redirect(url_for('home'))
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
# Grab the user from our User Models table
user = User.query.filter_by(email=form.email.data).first()
# Check that the user was supplied and the password is right
# The verify_password method comes from the User object
# https://stackoverflow.com/questions/2209755/python-operation-vs-is-not
if user.check_password(form.password.data) and user is not None:
#Log in the user
login_user(user)
flash('Logged in successfully.')
# If a user was trying to visit a page that requires a login
# flask saves that URL as 'next'.
next = request.args.get('next')
# So let's now check if that next exists, otherwise we'll go to
# the welcome page.
if next == None or not next[0]=='/':
next = url_for('welcome_user')
return redirect(next)
return render_template('login.html', form=form)
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data,
username=form.username.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
flash('Thanks for registering! Now you can login!')
return redirect(url_for('login'))
return render_template('register.html', form=form)
if __name__ == '__main__':
app.run(debug=True)
| 30.797297 | 80 | 0.652918 |
c8d83832e4ceddc8185f597fd3db45954b2662ea | 1,829 | py | Python | teaching-asistant evaluation.py | turhansel/teaching-asistant-evaluation | a18dbc7f4743e84c0451dc51aa0a599256991e5e | [
"MIT"
] | 2 | 2021-03-17T12:39:46.000Z | 2022-02-23T20:34:43.000Z | teaching-asistant evaluation.py | turhansel/teaching-asistant-evaluation | a18dbc7f4743e84c0451dc51aa0a599256991e5e | [
"MIT"
] | null | null | null | teaching-asistant evaluation.py | turhansel/teaching-asistant-evaluation | a18dbc7f4743e84c0451dc51aa0a599256991e5e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 24 16:35:19 2020
@author: Turhan
"""
import pandas as pd
#Veri Hazırlama
sutun = ["anadil","egitmen","kurs","donemi"
,"sinifbuyuklugu","sinifozelligi"
]
veri = pd.read_csv("tae.data", names=sutun)
girdiler = veri.iloc[:,0:-1:]
hedef = veri.iloc[:,-1:]
from sklearn.model_selection import train_test_split
X_egitim, X_test, y_egitim, y_test = train_test_split(girdiler, hedef, test_size=0.30,
random_state=45)
#Modelleme
from sklearn.tree import DecisionTreeClassifier
dtc = DecisionTreeClassifier()
dtc.fit(X_egitim, y_egitim)
#Model Değerlendirme
tahmin_test = pd.DataFrame(dtc.predict(X_test))
tahmin_egitim = pd.DataFrame(dtc.predict(X_egitim))
from sklearn.metrics import confusion_matrix
cm_egitim = confusion_matrix(y_egitim, tahmin_egitim)
cm_test = confusion_matrix(y_test, tahmin_test)
print(cm_egitim)
print(cm_test)
from sklearn.metrics import accuracy_score
as_egitim = accuracy_score(y_egitim, tahmin_egitim)
as_test = accuracy_score(y_test, tahmin_test)
print("Eğitim Doğruluk Oranı >>>",as_egitim)
print("Test Doğruluk Oranı >>>",as_test)
#Konuşlandırma
sutun=[]
anadil=float(input("Ana Dilini Giriniz(inligizce ise 1 değilse 2): "))
sutun.append(anadil)
egitmen=float(input("Egitmen Giriniz(kategorik, 25 kategori): "))
sutun.append(egitmen)
kurs=float(input("Kurs Giriniz(kategorik, 26 kategori): "))
sutun.append(kurs)
donem=float(input("Dönem Giriniz(yaz ise 1 normal ise 2): "))
sutun.append(donem)
sinifbuyuklugu=float(input("Sınıf Büyüklüğünü Giriniz(sayısal): "))
sutun.append(sinifbuyuklugu)
import numpy as np
dizi=np.asarray(sutun)
dizi_rs=dizi.reshape(1,-1)
tahmin=dtc.predict(dizi_rs)
print("Sınınf Özelliği Başarı Düzeyi(1=düşük, 2=orta, 3=yüksek): ", tahmin)
| 29.983607 | 87 | 0.734281 |
a8c0c684252cac1254090495e66acf9d868fef24 | 12,589 | py | Python | Miscellaneous_Tools/JobScript_Creator/JobScript_Creator_Class.py | The-Kristina/CellComp | 29ec7690e0d9adb1a6214937ca41fd1dadce18c6 | [
"CNRI-Python",
"RSA-MD",
"Xnet",
"Net-SNMP",
"X11"
] | 7 | 2019-05-13T10:07:44.000Z | 2022-03-01T16:20:48.000Z | Miscellaneous_Tools/JobScript_Creator/JobScript_Creator_Class.py | The-Kristina/CellComp | 29ec7690e0d9adb1a6214937ca41fd1dadce18c6 | [
"CNRI-Python",
"RSA-MD",
"Xnet",
"Net-SNMP",
"X11"
] | null | null | null | Miscellaneous_Tools/JobScript_Creator/JobScript_Creator_Class.py | The-Kristina/CellComp | 29ec7690e0d9adb1a6214937ca41fd1dadce18c6 | [
"CNRI-Python",
"RSA-MD",
"Xnet",
"Net-SNMP",
"X11"
] | 3 | 2020-04-23T18:13:20.000Z | 2020-11-11T18:46:48.000Z | # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# #
# ----- Job Script Creator for SegClass & Tracking Jobs ----- #
# #
# ----- Creator : Kristina ULICNA ----- #
# #
# ----- Last updated : 13th May 2019 ----- #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# ----- Class 'ProcessMovies' with 2 functions 'SegClass' & 'Tracking'
# to write JOB notepads files & submit them to the server automatically:
import os
import datetime
from Movie_Analysis_Pipeline.Single_Movie_Processing.Movie_Frame_Length import FindMovieLengthFromHDF
# You need to be connected to the server!
# Server directory absolute path: "/Volumes/lowegrp/JobServer/jobs/"
print ("Does server path exist? {}".format(os.path.exists("/Volumes/lowegrp/JobServer/jobs/")))
"""
Text of the Alan's template TRACKING job text file: ()
[job]
complete = False
id = 96b13ff2bc772a080b20d7a20f62b449
user = Alan
priority = 99
time = (2019-08-30)_10-02-51
module = bworker
lib_path = /home/alan/code/BayesianTracker/
func = SERVER_track
device = CPU
params = {'volume': ((0, 1200), (0, 1600), (-100000.0, 100000.0), (0, 2000)), 'path': '/mnt/lowe-sn00/Data/Alan/Anna_to_process/2017_04_24/pos4', 'config': {'GFP': 'MDCK_config_wildtype.json', 'RFP': 'MDCK_config_scribble_sparse.json'}}
"""
class ProcessMovies():
def __init__(self, xml_file=None, pos=8, data_date='17_07_31', exp_type='MDCK_WT_Pure', user='Kristina'):
""" Class comprised of 2 functions (SegClass & Tracking) to process time-lapse movies.
TODO: Create arg 'exp_type' with options: "MDCK_WT_Pure", "MDCK_Sc_Tet-_Pure", "MDCK_Sc_Tet+_Pure"
Depending on this arg, the server will iterate all movies in the directory.
Also, it will change the volume in the Tracking function => 1200 frames for 'WT', 1400 frames for 'Sc'
Directory structure (path): "/mnt/lowe-sn00/Data/user/type/date/pos/"
Directory from my Mac: "/Volumes/lowegrp/JobServer/jobs/" (when logged in to the server)
Args:
xml_file (string) -> This is an absolute directory to the xml_file that will be eventually created;
It is only used to extract the absolute path where the source movies are saved.
pos = position for which you have a brightfield, GFP and/or RFP movie available.
date = date of the experiment, as stated in Anna's data folder. Set by default to '17_07_31'.
type = name of your experiment (the subfolder/s for better organisation). Set by default to 'MDCK_WT_Pure'.
user = your first name (capitalised first letter). Set by default to 'Kristina'.
Return:
None.
Creates a .job file (.txt) and directly submits it onto JobServer to run.
Notes:
Run the segmentation, i.e. ProcessMovies.SegClass() first.
Tracking will not work (raises Exception) if you provide no HDF folder to start with.
"""
if xml_file is not None:
xml_file_name = str(xml_file)
xml_file_name = xml_file_name.split("/")
self.pos = xml_file_name[-3].split('pos')[-1]
self.data_date = xml_file_name[-4]
self.exp_type = xml_file_name[-5]
self.user = xml_file_name[-6]
else:
self.pos = pos
self.data_date = data_date
self.exp_type = exp_type
self.user = user
now = datetime.datetime.now()
time = [str(now.year), str(now.month), str(now.day), str(now.hour), str(now.minute), str(now.second)]
time = ['0' + item if len(item) < 2 else item for item in time]
self.current_time = "({}-{}-{})_{}-{}-{}".format(time[0], time[1], time[2], time[3], time[4], time[5])
self.today_date = str(self.current_time.split('_')[0].split('(')[1].split(')')[0])
self.jobs_dir = '/mnt/lowe-sn00/lowegrp/JobServer/jobs/'
def SegClass_Old(self, BF=False, GFP=False, RFP=False, ResNet=False):
""" Segmentation & Classification of the BF, GFP & RFP movies.
Args (Boolean; 'False' if only the 'noise' movie is provided):
Uses 3 .tif files (brightfield, GFP, RFP, which should be stored in the posX folder.
Supply the noise movies as Channel_posX_noise.tif (e.g. RFP_pos6_noise.tif).
I'm only mapping pure populations, so 'RFP=False' or 'RFP_posX_noise.tif' set as default.
Return (overall output):
An HDF file ('segmented.hdf5') saved in the folder from which movies were supplied.
TODO:
SERVER_segment_classify_and_track - ask Alan for JobScript specification & create class function.
"""
# Define which UNet to use; classic or residual:
network = 'U'
if ResNet is True:
network = 'Res' + network
# Create a .job file:
job_name = 'JOB_SegClass_{}_{}_{}_pos{}_fixed' \
.format(self.today_date, self.user, self.data_date, self.pos)
self.job_file = open('/Volumes/lowegrp/JobServer/jobs/' + job_name + '.job', 'w')
# Define what goes into the file:
movie = [BF, GFP, RFP]
channels = ['BF', 'GFP', 'RFP']
for order, item in enumerate(movie):
if item:
channels[order] += '_pos' + str(self.pos) + '.tif'
else:
channels[order] = 'GAUSSIAN_NOISE'
path = '/mnt/lowe-sn00/Data/{}/{}/{}/pos{}/' \
.format(str(self.user), str(self.exp_type), str(self.data_date), str(self.pos))
string = '[job]\ncomplete = False\nid = Kristina_Segment_and_Classif\n'
string += 'user = ' + str(self.user) + '\npriority = 99\n'
string += 'time = ' + str(self.current_time) + '\nmodule = jobs\n'
string += 'func = SERVER_segment_and_classify\ndevice = GPU\ntimeout = 3600\n'
string += 'params = {"path": "' + str(path) + '", "image_dict": {"brightfield": "' + channels[0] + '", ' \
'"gfp": "' + channels[1] + '", "rfp": "' + channels[2] + '"}, "shape": (1200, 1600), "model": "' \
+ str(network) + 'Net2D_competition", "unet_type": "' + str(network) + 'Net2D"}\n'
print (string)
self.job_file.write(string)
self.job_file.close()
return string
# TODO: When segmenting Guilia's movies from posX_aligned folders, just say '' as image_dict inputs.
def SegClass_New(self):
""" Segmentation & Classification of the BF, GFP & RFP movies.
Args (Boolean; 'False' if only the 'noise' movie is provided):
Uses 3 .tif files (brightfield, GFP, RFP, which should be stored in the posX folder.
Supply the noise movies as Channel_posX_noise.tif (e.g. RFP_pos6_noise.tif).
I'm only mapping pure populations, so 'RFP=False' or 'RFP_posX_noise.tif' set as default.
Return (overall output):
An HDF file ('segmented.hdf5') saved in the folder from which movies were supplied.
"""
# Define which UNet to use; classic or residual:
network = 'U'
# Create a .job file:
job_name = 'JOB_SegClass_{}_{}_{}_pos{}' \
.format(self.today_date, self.user, self.data_date, self.pos)
self.job_file = open('/Volumes/lowegrp/JobServer/jobs/' + job_name + '.job', 'w')
# Define what goes into the file:
'GAUSSIAN_NOISE'
path = '/mnt/lowe-sn00/Data/{}/Cells_HeLa/{}/Pos{}/Pos{}_aligned/' \
.format(str(self.user), str(self.data_date), str(self.pos), str(self.pos))
string = '[job]\ncomplete = False\nid = Kristina_Segment_and_Classif\n'
string += 'user = ' + str(self.user) + '\npriority = 99\n'
string += 'time = ' + str(self.current_time) + '\nmodule = jobs\n'
string += 'func = SERVER_segment_and_classify\ndevice = GPU\ntimeout = 3600\n'
string += 'params = {"path": "' + str(path) + '", "image_dict": {"brightfield": "", "gfp": "", "rfp": "GAUSSIAN_NOISE"}, ' \
'"shape": (1200, 1600), "model": "UNet2D_competition", "unet_type": "UNet2D"}\n'
print (string)
self.job_file.write(string)
self.job_file.close()
return string
def Tracking(self, to_track_GFP=False, to_track_RFP=False, timeout_seconds=7200, config_number=""):
""" Tracking of the GFP & RFP movies against the brightfield.
Args (Boolean; 'True' if channel is to be tracked, 'False' if omitted, i.e for pure populations):
Default settings: 'to_track_GFP=True, to_track_RFP=False'
Uses an HDF file ('segmented.hdf5') saved in the folder from which movies were supplied as SegClass input.
timeout_seconds (int)
config_number (int, empty str by default)
Return:
Creates 4 files: 'hypothesis_typeX.txt', 'optimised_typeX.txt', 'tracks_typeX.mat', 'tracks_typeX.xml'.
(X = 1 for 'GFP', X = 2 for 'RFP')
All saved inside the HDF folder with 'segmented.hdf5' file which was used as input for tracking.
"""
# Did the SegClass job ran as expected? Check for HDF folder and/or 'segmented.hdf5' file:
hdf_dir = '/Volumes/lowegrp/Data/{}/{}/{}/Pos{}/' \
.format(self.user, self.exp_type, self.data_date, self.pos)
if config_number != "":
#hdf_dir = '/Volumes/lowegrp/Data/{}/{}/{}/pos{}/tracker_performance_evaluation/tracks_try_{}/HDF'\
# .format(self.user, self.exp_type, self.data_date, self.pos, config_number)
hdf_dir = '/Volumes/lowegrp/Data/{}/{}/{}/pos{}/HDF' \
.format(self.user, self.exp_type, self.data_date, self.pos)
#if os.path.isdir(hdf_dir) is False or os.path.exists(hdf_dir + '/segmented.hdf5') is False:
# raise Exception("Warning: No 'HDF' folder or 'segmented.hdf5' file supplied in the specified directory!")
# Create a .job file:
job_name = 'JOB_Tracking_{}_{}_{}_pos{}'.format(self.today_date, self.user, self.data_date, self.pos)
if config_number != "":
job_name += "_Config_{}".format(config_number)
#self.job_file = open('/Volumes/lowegrp/Data/{}/{}/'.format(self.user, self.exp_type) + job_name + '.job', 'w')
self.job_file = open('/Volumes/lowegrp/JobServer/jobs/' + job_name + '.job', 'w')
# Define what goes into the file:
tracks = [to_track_GFP, to_track_RFP]
channels = ["GFP", "RFP"]
for order, item in enumerate(tracks):
if item is False:
del channels[order]
# Find out how long a movie is by reading the HDF file:
#frame_volume = FindMovieLengthFromHDF(pos=self.pos, data_date=self.data_date,
# exp_type=self.exp_type, user=self.user)
#frame_volume = 1200 - 2
if self.data_date == "17_03_27":
frame_volume = 1447
if self.data_date == "17_07_24":
frame_volume = 1105
path = '/mnt/lowe-sn00/Data/{}/{}/{}/Pos{}' \
.format(self.user, self.exp_type, self.data_date, self.pos)
config = '"MDCK_config_wildtype.json"'
if config_number != "":
path = '/mnt/lowe-sn00/Data/{}/{}/{}/pos{}/tracker_performance_evaluation/tracks_try_{}/' \
.format(self.user, self.exp_type, self.data_date, self.pos, config_number)
config = "'MDCK_config_Kristina_Try_{}.json'".format(config_number)
string = '[job]\ncomplete = False\nid = Kristina_Tracking\n'
string += 'user = ' + str(self.user) + '\npriority = 99\n'
string += 'time = ' + str(self.current_time) + '\nlib_path = /home/alan/code/BayesianTracker/\n'
string += 'module = bworker\nfunc = SERVER_track\ndevice = CPU\ntimeout = ' + str(timeout_seconds) + '\n'
string += 'params = {"path": "' + str(path) + '", "volume":((0,1200),(0,1600),(-100000.0, 100000.0),(0,' \
+ str(frame_volume + 2) + ')), "config": {"GFP": ' + str(config) + ', "RFP": ' + str(config) + '}}'
print (string)
self.job_file.write(string)
self.job_file.close()
return string
| 48.984436 | 236 | 0.583525 |
1d3a6d14986cdd475d4a259f304e5640747ba8df | 962 | py | Python | kubernetes/test/test_v1_attached_volume.py | anemerovsky-essextec/python | 6e40b9169b27c3f1f9422c0f6dd1cd9caef8d57c | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_attached_volume.py | anemerovsky-essextec/python | 6e40b9169b27c3f1f9422c0f6dd1cd9caef8d57c | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_attached_volume.py | anemerovsky-essextec/python | 6e40b9169b27c3f1f9422c0f6dd1cd9caef8d57c | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_attached_volume import V1AttachedVolume
class TestV1AttachedVolume(unittest.TestCase):
""" V1AttachedVolume unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1AttachedVolume(self):
"""
Test V1AttachedVolume
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_attached_volume.V1AttachedVolume()
pass
if __name__ == '__main__':
unittest.main()
| 21.377778 | 105 | 0.70894 |
f2e484a96fff7582a7ab2c55297a8ccb082729a0 | 564 | py | Python | testagent/parser/exceptions/__init__.py | patriziotufarolo/testagent | daf1a56bd208704dfd61d4c9f779f4ad7ee7d6cd | [
"BSD-3-Clause"
] | 1 | 2016-07-12T15:34:36.000Z | 2016-07-12T15:34:36.000Z | testagent/parser/exceptions/__init__.py | patriziotufarolo/testagent | daf1a56bd208704dfd61d4c9f779f4ad7ee7d6cd | [
"BSD-3-Clause"
] | null | null | null | testagent/parser/exceptions/__init__.py | patriziotufarolo/testagent | daf1a56bd208704dfd61d4c9f779f4ad7ee7d6cd | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of cumulus-testagent.
# https://github.com/patriziotufarolo/cumulus-testagent
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2015, Patrizio Tufarolo <patrizio.tufarolo@studenti.unimi.it>
from testagent.parser.exceptions.collector_parsing import CollectorParsingException
from testagent.parser.exceptions.testcase_parsing import TestCaseParsingException
from testagent.parser.exceptions.testinstance_parsing import TestInstanceParsingException
| 40.285714 | 89 | 0.820922 |
1158ba968d56c57b059ff581d95dc90005942bd2 | 10,157 | py | Python | nutszebra_initialization.py | nutszebra/trainer | 9359c6ed01c5dad832e957e0adc1a41c79967044 | [
"MIT"
] | 5 | 2016-12-25T02:55:28.000Z | 2018-05-30T10:40:36.000Z | nutszebra_initialization.py | nutszebra/trainer | 9359c6ed01c5dad832e957e0adc1a41c79967044 | [
"MIT"
] | null | null | null | nutszebra_initialization.py | nutszebra/trainer | 9359c6ed01c5dad832e957e0adc1a41c79967044 | [
"MIT"
] | 2 | 2017-12-14T19:45:04.000Z | 2019-08-24T03:19:35.000Z | import numpy as np
class Initialization(object):
"""Some useful functions for initialization
Attributes:
"""
def __init__(self):
pass
@staticmethod
def gauss(sizes, variance=0.01, mean=0.0, dtype=np.float32):
"""Give numpy.ndarray back that are initialized with the gaussian distribution
Edited date:
160419
Test:
160419
Example:
::
answer = self.gauss((1000, 1000), variance=0.01, mean=0)
>>> print(np.var(answer))
0.010005431337713115
>>> print(np.mean(answer))
-0.00019472348389183342
Args:
sizes (tuple): output size
variance (Optional[float or int]): the variance of the gaussian distribution
mean (Optional[float or int]): the mean of the gaussian distribution
dtype (Optional[np.float64, np.float32, np.int32...]): data type
Returns:
numpy.ndarray: values that are sampled from the gaussian distribution
"""
std = np.sqrt(variance)
return np.array(np.random.normal(mean, std, sizes), dtype=dtype)
@staticmethod
def uniform(sizes, variance=0.01, mean=0.0, dtype=np.float32):
"""Give numpy.ndarray back that are initialized with the uniform distribution
Edited date:
160419
Test:
160419
Note:
| Var[U[a, b]] = (b - a)^2 / 12
| Var: the variance
| U: the uniform distribution
| a, b: the range of the uniform distribution
| Assume the symmetric uniform distribution,
| b = -a
| Then, b has to be:
| b = sqrt(3*Var)
Example:
::
answer = self.uniform((1000, 1000), variance=0.01, mean=0)
>>> print(np.var(answer))
0.010002619805106435
>>> print(np.mean(answer))
1.2950964746309503e-05
answer = self.uniform((1000, 1000), variance=0.01, mean=1)
>>> print(np.var(answer))
0.010006011137365682
>>> print(np.mean(answer))
1.0001240680416341
Args:
sizes (tuple): output size
variance (Optional[float or int]): the variance of the uniform distribution
mean (Optional[float or int]): the mean of the uniform distribution
dtype (Optional[np.float64, np.float32, np.int32...]): data type
Returns:
numpy.ndarray: values that are sampled from the uniform distribution
"""
high = np.sqrt(3 * variance)
low = - high
return np.array(mean + np.random.uniform(low, high, sizes), dtype=dtype)
@staticmethod
def orthonorm(mean, variance, sizes, random, dtype=np.float32):
"""Give numpy.ndarray back that are initialized with the orthnorm initialization
Paper: Exact solutions to the nonlinear dynamics of learning in deep linear neural networks
Paper url: http://arxiv.org/abs/1312.6120
Edited date:
160419
Test:
160419
Note:
| Weights are orthogonal matrix.
| Consider the case of CNN,
| Let input channel be n_i, output channel be n_o and convolutional filter size be k*k.
| Then sizes has to be (n_o, k*k*n_i).
| Consider the case of fully-connected layer,
| Let the number of resposes from layer l-1 be z and the number of neurons at layer l be w.
| Then sizes has to be (w, z).
Example:
::
mean = 0
variance = 0.01
sizes = (1000, 1000)
answer = self.orthonorm(mean, variance, sizes, self.gauss)
>>> print(np.var(answer))
0.01
>>> print(np.mean(answer))
2.915889751875511e-18
# check whether answer is orthogonal matrix or not
answer_unit = answer.dot(answer.T)
scale = answer_unit[0][0]
unit_matrix = scale * np.identity(answer_unit.shape[0])
unit_or_not = np.all(np.isclose(answer_unit - unit_matrix, 0, atol=1.0e-2))
>>> print(unit_or_not)
True
Args:
mean (int or float): the mean of weights
variance (int or float): the variance of weights
sizes (tuple): output size
random (self.gauss, self.uniform): the way of randomization
dtype (Optional[np.float64, np.float32, np.int32...]): data type
Returns:
numpy.ndarray: values that are initialized with orthonorm initialization
"""
u, _, v = np.linalg.svd(random(sizes, variance=variance, mean=mean), full_matrices=False)
# pick the one with the correct shape
q = u if u.shape == sizes else v
q = q / np.sqrt(np.var(q)) * np.sqrt(variance)
return np.array(q + (mean - np.mean(q)), dtype=dtype)
@staticmethod
def const(sizes, constant=0, dtype=np.float32):
"""Give numpy.ndarray back that are initialized with the constant number
Edited date:
160419
Test:
160419
Example:
::
answer = self.const((1000, 1000), constant=10)
>>> print(np.var(answer))
array([[10., 10., 10., ...., 10., 10., 10.],
[10., 10., 10., ...., 10., 10., 10.],
[10., 10., 10., ...., 10., 10., 10.],
...,
[10., 10., 10., ...., 10., 10., 10.],
[10., 10., 10., ...., 10., 10., 10.],
[10., 10., 10., ...., 10., 10., 10.]], dtype=float32)
>>> print(np.mean(answer))
10.0
answer = self.uniform((1000, 1000), variance=0.01, mean=1)
>>> print(np.var(answer))
0.0
Args:
sizes (tuple): output size
const (Optional[float]): constant number
dtype (Optional[np.float64, np.float32, np.int32...]): data type
Returns:
numpy.ndarray: values that are sampled from uniform distirbution
"""
return constant * np.ones(sizes, dtype=dtype)
@staticmethod
def variance_lecun(node_input):
"""give back the variance for lecun initialization
Paper: Efficient Backprop
Paper url: http://goo.gl/FXsRAq
Edited date:
160419
Test:
160419
Example:
::
answer = self.lecun(100)
>>> print(np.var(answer))
0.01
Args:
node_input (int): the number of connections feeding into the node
Returns:
float: the variance for lecun initialization
"""
return 1.0 / node_input
@staticmethod
def variance_xavier(node_input, node_input_next):
"""give back the variance for xavier initialization
Paper: Understanding the difficulty of training deep feedforward neural networks
Paper url: http://goo.gl/dJyx2w
Edited date:
160419
Test:
160419
Note:
| The detail is written here: https://goo.gl/0y7HE2
Example:
::
node_input = 1000.0
node_input_next = 2000.0
answer = self.xavier(node_input, None)
>>> print(np.var(answer))
0.001
answer = self.xavier(None, node_input_next)
>>> print(np.var(answer))
0.0005
answer = self.xavier(node_input, node_input_next)
>>> print(np.var(answer))
0.0006666666666666666
Args:
node_input (int): the number of connections feeding into the node at layer l
node_input_next (int): the number of connections feeding into the node at layer l+1
Returns:
float: the variance for xavier initialization
"""
if node_input is not None:
if node_input_next is not None:
# averaged case
return 2.0 / (node_input + node_input_next)
else:
# forward case
return 1.0 / node_input
else:
# backward case
return 1.0 / node_input_next
@staticmethod
def variance_relu(node_input, node_input_next, a=0.0):
"""give back the variance for ReLU initialization
Paper: Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification
Paper url: http://arxiv.org/abs/1502.01852
Edited date:
160419
Test:
160419
Note:
| The detail is written here: https://goo.gl/3uBlBg
| Typically for PReLU, a is initialized with 0.25
Example:
::
node_input = 1000.0
node_input_next = 2000.0
answer = self.xavier(node_input, None)
>>> print(np.var(answer))
0.001
answer = self.xavier(None, node_input_next)
>>> print(np.var(answer))
0.0005
answer = self.xavier(node_input, node_input_next)
>>> print(np.var(answer))
0.0006666666666666666
Args:
node_input (int): the number of connections feeding into the node at layer l
node_input_next (int): the number of connections feeding into the node at layer l+1
a (float): the parameter for PReLU. If a is 0, then it means ReLU.
Returns:
float: the variance for relu initialization
"""
if node_input is not None:
if node_input_next is not None:
# averaged case
return 4.0 / ((1 + a ** 2) * (node_input + node_input_next))
else:
# forward case
return 2.0 / ((1 + a ** 2) * node_input)
else:
# backward case
return 2.0 / ((1 + a ** 2) * node_input_next)
| 29.526163 | 106 | 0.539431 |
2d40155b91d85d6057d93efcb199bfa54f5cac41 | 1,058 | py | Python | sdk/python/pulumi_azure_native/aadiam/v20170401/__init__.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/aadiam/v20170401/__init__.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/aadiam/v20170401/__init__.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .diagnostic_setting import *
from .get_diagnostic_setting import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:aadiam/v20170401:DiagnosticSetting":
return DiagnosticSetting(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "aadiam/v20170401", _module_instance)
_register_module()
| 31.117647 | 97 | 0.687146 |
2188223cbd2ee0711d5d7acb7cbae5ac6d2d7d8d | 550 | py | Python | 2019/day1/fuel.py | scrubskip/adventofcode2018 | 8149908d1239759597fda575432cf3ec99019dc0 | [
"Apache-2.0"
] | null | null | null | 2019/day1/fuel.py | scrubskip/adventofcode2018 | 8149908d1239759597fda575432cf3ec99019dc0 | [
"Apache-2.0"
] | null | null | null | 2019/day1/fuel.py | scrubskip/adventofcode2018 | 8149908d1239759597fda575432cf3ec99019dc0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
def main():
modules = open("day1input.txt", "r")
requirement_sum = 0
for module in modules.readlines():
requirement_sum += get_fuel_requirement_inclusive(module)
return requirement_sum
def get_fuel_requirement(mass):
return int(int(mass) / 3) - 2
def get_fuel_requirement_inclusive(mass):
total = 0
fuel = get_fuel_requirement(mass)
while (fuel > 0):
total += fuel
fuel = get_fuel_requirement(fuel)
return total
if __name__ == "__main__":
print(main())
| 18.965517 | 65 | 0.658182 |
155d6aa235fd0459c3b2855dbc58e4697ed98083 | 12,066 | py | Python | tests/python/unittest/test_meta_schedule_integration.py | psrivas2/relax | 4329af78eb1dc4c4ff8a61d3bf39aa4034e9cb2a | [
"Apache-2.0"
] | 11 | 2021-11-02T00:49:16.000Z | 2021-11-19T02:17:00.000Z | tests/python/unittest/test_meta_schedule_integration.py | psrivas2/relax | 4329af78eb1dc4c4ff8a61d3bf39aa4034e9cb2a | [
"Apache-2.0"
] | 16 | 2021-11-02T00:17:12.000Z | 2021-11-21T20:47:52.000Z | tests/python/unittest/test_meta_schedule_integration.py | psrivas2/relax | 4329af78eb1dc4c4ff8a61d3bf39aa4034e9cb2a | [
"Apache-2.0"
] | 4 | 2021-11-05T18:17:23.000Z | 2021-11-11T06:22:00.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Integration test for MetaSchedule"""
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import meta_schedule as ms
from tvm import relay, te, tir
from tvm.meta_schedule.testing.relay_workload import get_network
from tvm.meta_schedule.testing.tlcbench import load_quantized_bert_base
from tvm.script import tir as T
from tvm.target import Target
from tvm.tir import Schedule
# pylint: disable=no-member,line-too-long,too-many-nested-blocks,unbalanced-tuple-unpacking,no-self-argument,missing-docstring,invalid-name
@tvm.script.ir_module
class MockModule:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None: # type: ignore
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, (16,), "float32")
B = T.match_buffer(b, (16,), "float32")
for i in T.serial(0, 16):
with T.block("matmul"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi]
# pylint: enable=no-member,line-too-long,too-many-nested-blocks,unbalanced-tuple-unpacking,no-self-argument
def _has_torch():
import importlib.util # pylint: disable=unused-import,import-outside-toplevel
spec = importlib.util.find_spec("torch")
return spec is not None
requires_torch = pytest.mark.skipif(not _has_torch(), reason="torch is not installed")
def test_meta_schedule_apply_history_best_no_current():
assert ms.ApplyHistoryBest.current() is None
@requires_torch
def test_meta_schedule_integration_extract_from_resnet():
mod, params, _ = get_network(name="resnet_18", input_shape=[1, 3, 224, 224])
extracted_tasks = ms.extract_task_from_relay(mod, target="llvm", params=params)
expected_task_names = [
"fused_" + s
for s in [
"nn_max_pool2d",
"nn_adaptive_avg_pool2d",
"nn_dense_add",
"nn_conv2d_add",
"nn_conv2d_add_1",
"nn_conv2d_add_2",
"nn_conv2d_add_add_nn_relu",
"nn_conv2d_add_add_nn_relu_1",
"nn_conv2d_add_nn_relu",
"nn_conv2d_add_nn_relu_1",
"nn_conv2d_add_nn_relu_2",
"nn_conv2d_add_nn_relu_3",
"nn_conv2d_add_nn_relu_4",
"nn_conv2d_add_nn_relu_5",
"nn_contrib_conv2d_winograd_without_weight_transform_add_add_nn_relu",
"nn_contrib_conv2d_winograd_without_weight_transform_add_add_nn_relu_1",
"nn_contrib_conv2d_winograd_without_weight_transform_add_nn_relu",
"nn_contrib_conv2d_winograd_without_weight_transform_add_nn_relu_1",
# The two tasks below are purely spatial and are ruled out by AutoScheduler
"layout_transform",
"layout_transform_reshape_squeeze",
]
]
assert len(extracted_tasks) == len(expected_task_names)
for t in extracted_tasks:
assert t.task_name in expected_task_names, t.task_name
@requires_torch
def test_meta_schedule_integration_extract_from_bert_base():
expected = {
"fused_nn_dense_2": (
12,
[[64, 3072], [768, 3072], [64, 768]],
),
"fused_nn_dense": (
48,
[[64, 768], [768, 768], [64, 768]],
),
"fused_nn_dense_1": (
12,
[[64, 768], [3072, 768], [64, 3072]],
),
"fused_subtract_add_sqrt_divide_multiply_add": (
25,
[[1, 64, 768], [1, 64, 1], [1, 64, 1], [768], [768], [1, 64, 768]],
),
"fused_nn_batch_matmul": (
24,
[[12, 64, 64], [12, 64, 64], [12, 64, 64]],
),
"fused_reshape_add_add": (
24,
[[64, 768], [768], [1, 64, 768], [1, 64, 768]],
),
"fused_variance": (
25,
[[1, 64, 768], [1, 64, 1], [1, 64, 1]],
),
"fused_mean": (
25,
[[1, 64, 768], [1, 64, 1]],
),
"fused_reshape_add_reshape_transpose_reshape": (
12,
[[64, 768], [768], [12, 64, 64]],
),
"fused_reshape_add_multiply_fast_erf_multiply_add_multiply_reshape": (
12,
[[64, 3072], [3072], [64, 3072]],
),
"fused_nn_fast_softmax": (
12,
[[1, 12, 64, 64], [1, 12, 64, 64]],
),
"fused_reshape_add_reshape_transpose_reshape_1": (
24,
[[64, 768], [768], [12, 64, 64]],
),
"fused_reshape_divide_add": (
12,
[[12, 64, 64], [1, 1, 1, 64], [1, 12, 64, 64]],
),
"fused_reshape_transpose_reshape": (
12,
[[12, 64, 64], [64, 768]],
),
"fused_nn_dense_add_fast_tanh": (
1,
[[1, 768], [768, 768], [1, 768], [1, 768]],
),
"fused_cast_take_add": (
1,
[[1, 64], [30522, 768], [1, 64, 768], [1, 64, 768]],
),
"fused_take": (
1,
[[1, 64, 768], [1, 768]],
),
"fused_reshape": (
12,
[[1, 12, 64, 64], [12, 64, 64]],
),
"fused_reshape_1": (
24,
[[1, 64, 768], [64, 768]],
),
}
mod, params, _ = get_network(name="bert_base", input_shape=[1, 64])
extracted_tasks = ms.extract_task_from_relay(mod, target="llvm", params=params)
assert len(extracted_tasks) == len(expected)
for t in extracted_tasks:
prim_func = None
for _, v in t.dispatched[0].functions.items():
prim_func = v
shape = [[int(x) for x in prim_func.buffer_map[b].shape] for b in prim_func.params]
assert t.task_name in expected
expected_weight, expected_shape = expected[t.task_name]
assert expected_weight == t.weight, t.task_name
assert expected_shape == shape, t.task_name
@requires_torch
def test_meta_schedule_integration_extract_from_resnet_with_filter_func():
def filter_func(args) -> bool:
has_complex_op = False
visited = set()
def traverse(t):
nonlocal has_complex_op
assert t.handle is not None
if t.handle.value in visited:
return
if isinstance(t.op, te.PlaceholderOp):
pass
elif isinstance(t.op, te.ComputeOp):
has_complex_op = has_complex_op or any(
[isinstance(e, tir.Reduce) for e in t.op.body]
)
for x in t.op.input_tensors:
traverse(x)
visited.add(t.handle.value)
for t in args:
traverse(t)
return has_complex_op
mod, params, _ = get_network(name="resnet_18", input_shape=[1, 3, 224, 224])
extracted_tasks = ms.extract_task_from_relay(
mod,
target="llvm",
params=params,
filter_func=filter_func,
)
expected_task_names = [
"fused_" + s
for s in [
"nn_max_pool2d",
"nn_adaptive_avg_pool2d",
"nn_dense_add",
"nn_conv2d_add",
"nn_conv2d_add_1",
"nn_conv2d_add_2",
"nn_conv2d_add_add_nn_relu",
"nn_conv2d_add_add_nn_relu_1",
"nn_conv2d_add_nn_relu",
"nn_conv2d_add_nn_relu_1",
"nn_conv2d_add_nn_relu_2",
"nn_conv2d_add_nn_relu_3",
"nn_conv2d_add_nn_relu_4",
"nn_conv2d_add_nn_relu_5",
"nn_contrib_conv2d_winograd_without_weight_transform_add_add_nn_relu",
"nn_contrib_conv2d_winograd_without_weight_transform_add_add_nn_relu_1",
"nn_contrib_conv2d_winograd_without_weight_transform_add_nn_relu",
"nn_contrib_conv2d_winograd_without_weight_transform_add_nn_relu_1",
]
]
assert len(extracted_tasks) == len(expected_task_names)
for t in extracted_tasks:
assert t.task_name in expected_task_names, t.task_name
@requires_torch
def test_meta_schedule_integration_apply_history_best():
mod, _, _ = get_network(name="resnet_18", input_shape=[1, 3, 224, 224])
database = ms.database.MemoryDatabase()
env = ms.ApplyHistoryBest(database)
target = Target("llvm")
workload = database.commit_workload(MockModule)
database.commit_tuning_record(
ms.database.TuningRecord(
trace=Schedule(MockModule).trace,
workload=workload,
run_secs=[1.0],
target=target,
args_info=[],
)
)
mod = env.query(
task_name="mock-task",
mod=mod,
target=target,
dispatched=[MockModule],
)
assert tvm.ir.structural_equal(mod, workload.mod)
@pytest.mark.skip("Too slow on CI")
def extract_task_qbert():
mod, params, _ = load_quantized_bert_base(batch_size=1, seq_len=128)
target = "llvm -mcpu=cascadelake"
extracted_tasks = ms.extract_task_from_relay(mod, target, params)
tune_tasks = list(
filter(
lambda task: "dense" in task.task_name or "batch_matmul" in task.task_name,
extracted_tasks,
)
)
# three int8 dense, two int8 bmm, and one fp32 dense
assert len(tune_tasks) == 6
for task in tune_tasks:
relay_func = list(task.mod.functions.values())[0]
out_type = relay_func.body.checked_type
if out_type.dtype == "float32":
continue
mod = ms.default_config.mod(task.dispatched[0])
sch = tvm.tir.Schedule(mod)
block = sch.get_block("compute")
annotations = sch.get(block).annotations
assert "schedule_rule" in annotations
assert "vnni" in annotations["schedule_rule"]
@tvm.testing.skip_if_32bit(reason="Apparently the LLVM version on i386 image is too old")
def test_extract_task_arm_conv2d_nchwc():
data_shape = (1, 64, 128, 128)
weight_shape = (32, 64, 1, 1)
bias_shape = (weight_shape[0],)
padding = (1, 1)
data = relay.var("data", shape=data_shape, dtype="int8")
weight = relay.var("weight", shape=weight_shape, dtype="int8")
bias = relay.var("bias", shape=bias_shape, dtype="int32")
conv2d = relay.nn.conv2d(
data=data,
weight=weight,
kernel_size=weight_shape[2:],
channels=weight_shape[0],
padding=padding,
strides=(1, 1),
out_dtype="int32",
)
bias_add = relay.nn.bias_add(conv2d, bias)
relay_mod = tvm.IRModule.from_expr(bias_add)
weight_np = np.random.uniform(1, 10, size=weight_shape).astype("int8")
bias_np = np.random.uniform(1, 10, size=bias_shape).astype("int32")
params = {"weight": weight_np, "bias": bias_np}
target = "llvm -device arm_cpu -mtriple aarch64-linux-gnu -mattr=+neon"
extracted_tasks = ms.extract_task_from_relay(relay_mod, target, params)
tune_tasks = list(
filter(
lambda task: "conv2d" in task.task_name,
extracted_tasks,
)
)
assert len(tune_tasks) == 1
relay_func = list(tune_tasks[0].mod.functions.values())[0]
out_type = relay_func.body.checked_type
# Check that the output is in NCHWc layout
assert list(out_type.shape) == [1, 8, 130, 130, 4]
if __name__ == "__main__":
tvm.testing.main()
| 33.798319 | 139 | 0.606746 |
aaacf8cf1561b3cb5b5fe29bb82daf18df921ea7 | 33 | py | Python | application/routes/__init__.py | agu3rra/flask-starter | 87c42dbd0d7709abb17c90ff794c18256c428b56 | [
"MIT"
] | null | null | null | application/routes/__init__.py | agu3rra/flask-starter | 87c42dbd0d7709abb17c90ff794c18256c428b56 | [
"MIT"
] | null | null | null | application/routes/__init__.py | agu3rra/flask-starter | 87c42dbd0d7709abb17c90ff794c18256c428b56 | [
"MIT"
] | null | null | null | from .users import users_resource | 33 | 33 | 0.878788 |
ee121e75cc5d6a0582e80feda13c2572333ce08f | 12,095 | py | Python | mojo/public/tools/bindings/generators/mojom_java_generator.py | aranajhonny/chromium | caf5bcb822f79b8997720e589334266551a50a13 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2019-01-16T03:57:39.000Z | 2019-01-16T03:57:39.000Z | mojo/public/tools/bindings/generators/mojom_java_generator.py | aranajhonny/chromium | caf5bcb822f79b8997720e589334266551a50a13 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2018-02-10T21:00:08.000Z | 2018-03-20T05:09:50.000Z | mojo/public/tools/bindings/generators/mojom_java_generator.py | aranajhonny/chromium | caf5bcb822f79b8997720e589334266551a50a13 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates java source files from a mojom.Module."""
import argparse
import ast
import os
import re
from jinja2 import contextfilter
import mojom.generate.generator as generator
import mojom.generate.module as mojom
from mojom.generate.template_expander import UseJinja
GENERATOR_PREFIX = 'java'
_HEADER_SIZE = 8
_spec_to_java_type = {
'b': 'boolean',
'd': 'double',
'f': 'float',
'h:d:c': 'org.chromium.mojo.system.DataPipe.ConsumerHandle',
'h:d:p': 'org.chromium.mojo.system.DataPipe.ProducerHandle',
'h:m': 'org.chromium.mojo.system.MessagePipeHandle',
'h': 'org.chromium.mojo.system.UntypedHandle',
'h:s': 'org.chromium.mojo.system.SharedBufferHandle',
'i16': 'short',
'i32': 'int',
'i64': 'long',
'i8': 'byte',
's': 'String',
'u16': 'short',
'u32': 'int',
'u64': 'long',
'u8': 'byte',
}
_spec_to_decode_method = {
'b': 'readBoolean',
'd': 'readDouble',
'f': 'readFloat',
'h:d:c': 'readConsumerHandle',
'h:d:p': 'readProducerHandle',
'h:m': 'readMessagePipeHandle',
'h': 'readUntypedHandle',
'h:s': 'readSharedBufferHandle',
'i16': 'readShort',
'i32': 'readInt',
'i64': 'readLong',
'i8': 'readByte',
's': 'readString',
'u16': 'readShort',
'u32': 'readInt',
'u64': 'readLong',
'u8': 'readByte',
}
_java_primitive_to_boxed_type = {
'boolean': 'Boolean',
'byte': 'Byte',
'double': 'Double',
'float': 'Float',
'int': 'Integer',
'long': 'Long',
'short': 'Short',
}
def NameToComponent(name):
# insert '_' between anything and a Title name (e.g, HTTPEntry2FooBar ->
# HTTP_Entry2_FooBar)
name = re.sub('([^_])([A-Z][^A-Z_]+)', r'\1_\2', name)
# insert '_' between non upper and start of upper blocks (e.g.,
# HTTP_Entry2_FooBar -> HTTP_Entry2_Foo_Bar)
name = re.sub('([^A-Z_])([A-Z])', r'\1_\2', name)
return [x.lower() for x in name.split('_')]
def CapitalizeFirst(string):
return string[0].upper() + string[1:]
def UpperCamelCase(name):
return ''.join([CapitalizeFirst(x) for x in NameToComponent(name)])
def CamelCase(name):
uccc = UpperCamelCase(name)
return uccc[0].lower() + uccc[1:]
def ConstantStyle(name):
components = NameToComponent(name)
if components[0] == 'k':
components = components[1:]
return '_'.join([x.upper() for x in components])
def GetNameForElement(element):
if isinstance(element, (mojom.Enum,
mojom.Interface,
mojom.Struct)):
return UpperCamelCase(element.name)
if isinstance(element, mojom.InterfaceRequest):
return GetNameForElement(element.kind)
if isinstance(element, (mojom.Method,
mojom.Parameter,
mojom.Field)):
return CamelCase(element.name)
if isinstance(element, mojom.EnumValue):
return (UpperCamelCase(element.enum_name) + '.' +
ConstantStyle(element.name))
if isinstance(element, (mojom.NamedValue,
mojom.Constant)):
return ConstantStyle(element.name)
raise Exception("Unexpected element: " % element)
def GetInterfaceResponseName(method):
return UpperCamelCase(method.name + 'Response')
def ParseStringAttribute(attribute):
assert isinstance(attribute, basestring)
return attribute
def IsArray(kind):
return isinstance(kind, (mojom.Array, mojom.FixedArray))
@contextfilter
def DecodeMethod(context, kind, offset, bit):
def _DecodeMethodName(kind):
if IsArray(kind):
return _DecodeMethodName(kind.kind) + 's'
if isinstance(kind, mojom.Enum):
return _DecodeMethodName(mojom.INT32)
if isinstance(kind, mojom.InterfaceRequest):
return "readInterfaceRequest"
if isinstance(kind, mojom.Interface):
return "readServiceInterface"
return _spec_to_decode_method[kind.spec]
methodName = _DecodeMethodName(kind)
additionalParams = ''
if (kind == mojom.BOOL):
additionalParams = ', %d' % bit
if isinstance(kind, mojom.Interface):
additionalParams = ', %s.BUILDER' % GetJavaType(context, kind)
if IsArray(kind) and isinstance(kind.kind, mojom.Interface):
additionalParams = ', %s.BUILDER' % GetJavaType(context, kind.kind)
return '%s(%s%s)' % (methodName, offset, additionalParams)
@contextfilter
def EncodeMethod(context, kind, variable, offset, bit):
additionalParams = ''
if (kind == mojom.BOOL):
additionalParams = ', %d' % bit
if isinstance(kind, mojom.Interface):
additionalParams = ', %s.BUILDER' % GetJavaType(context, kind)
if IsArray(kind) and isinstance(kind.kind, mojom.Interface):
additionalParams = ', %s.BUILDER' % GetJavaType(context, kind.kind)
return 'encode(%s, %s%s)' % (variable, offset, additionalParams)
def GetPackage(module):
if 'JavaPackage' in module.attributes:
return ParseStringAttribute(module.attributes['JavaPackage'])
# Default package.
return "org.chromium.mojom." + module.namespace
def GetNameForKind(context, kind):
def _GetNameHierachy(kind):
hierachy = []
if kind.parent_kind:
hierachy = _GetNameHierachy(kind.parent_kind)
hierachy.append(GetNameForElement(kind))
return hierachy
module = context.resolve('module')
elements = []
if GetPackage(module) != GetPackage(kind.module):
elements += [GetPackage(kind.module)]
elements += _GetNameHierachy(kind)
return '.'.join(elements)
def GetBoxedJavaType(context, kind):
unboxed_type = GetJavaType(context, kind, False)
if unboxed_type in _java_primitive_to_boxed_type:
return _java_primitive_to_boxed_type[unboxed_type]
return unboxed_type
@contextfilter
def GetJavaType(context, kind, boxed=False):
if boxed:
return GetBoxedJavaType(context, kind)
if isinstance(kind, (mojom.Struct, mojom.Interface)):
return GetNameForKind(context, kind)
if isinstance(kind, mojom.InterfaceRequest):
return ("org.chromium.mojo.bindings.InterfaceRequest<%s>" %
GetNameForKind(context, kind.kind))
if IsArray(kind):
return "%s[]" % GetJavaType(context, kind.kind)
if isinstance(kind, mojom.Enum):
return "int"
return _spec_to_java_type[kind.spec]
def IsHandle(kind):
return kind.spec[0] == 'h'
@contextfilter
def DefaultValue(context, field):
assert field.default
if isinstance(field.kind, mojom.Struct):
assert field.default == "default"
return "new %s()" % GetJavaType(context, field.kind)
return "(%s) %s" % (
GetJavaType(context, field.kind),
ExpressionToText(context, field.default, kind_spec=field.kind.spec))
@contextfilter
def ConstantValue(context, constant):
return "(%s) %s" % (
GetJavaType(context, constant.kind),
ExpressionToText(context, constant.value, kind_spec=constant.kind.spec))
@contextfilter
def NewArray(context, kind, size):
if IsArray(kind.kind):
return NewArray(context, kind.kind, size) + '[]'
return 'new %s[%s]' % (GetJavaType(context, kind.kind), size)
@contextfilter
def ExpressionToText(context, token, kind_spec=''):
def _TranslateNamedValue(named_value):
entity_name = GetNameForElement(named_value)
if named_value.parent_kind:
return GetJavaType(context, named_value.parent_kind) + '.' + entity_name
# Handle the case where named_value is a module level constant:
if not isinstance(named_value, mojom.EnumValue):
entity_name = (GetConstantsMainEntityName(named_value.module) + '.' +
entity_name)
if GetPackage(named_value.module) == GetPackage(context.resolve('module')):
return entity_name
return GetPackage(named_value.module) + '.' + entity_name
if isinstance(token, mojom.NamedValue):
return _TranslateNamedValue(token)
if kind_spec.startswith('i') or kind_spec.startswith('u'):
# Add Long suffix to all integer literals.
number = ast.literal_eval(token.lstrip('+ '))
if not isinstance(number, (int, long)):
raise ValueError('got unexpected type %r for int literal %r' % (
type(number), token))
# If the literal is too large to fit a signed long, convert it to the
# equivalent signed long.
if number >= 2 ** 63:
number -= 2 ** 64
return '%dL' % number
return token
def IsPointerArrayKind(kind):
if not IsArray(kind):
return False
sub_kind = kind.kind
return generator.IsObjectKind(sub_kind)
def GetConstantsMainEntityName(module):
if 'JavaConstantsClassName' in module.attributes:
return ParseStringAttribute(module.attributes['JavaConstantsClassName'])
# This constructs the name of the embedding classes for module level constants
# by extracting the mojom's filename and prepending it to Constants.
return (UpperCamelCase(module.path.split('/')[-1].rsplit('.', 1)[0]) +
'Constants')
class Generator(generator.Generator):
java_filters = {
"interface_response_name": GetInterfaceResponseName,
"constant_value": ConstantValue,
"default_value": DefaultValue,
"decode_method": DecodeMethod,
"expression_to_text": ExpressionToText,
"encode_method": EncodeMethod,
"is_handle": IsHandle,
"is_pointer_array_kind": IsPointerArrayKind,
"is_struct_kind": lambda kind: isinstance(kind, mojom.Struct),
"java_type": GetJavaType,
"name": GetNameForElement,
"new_array": NewArray,
"struct_size": lambda ps: ps.GetTotalSize() + _HEADER_SIZE,
}
def GetJinjaExports(self):
return {
"module": self.module,
"package": GetPackage(self.module),
}
@UseJinja("java_templates/enum.java.tmpl", filters=java_filters)
def GenerateEnumSource(self, enum):
exports = self.GetJinjaExports()
exports.update({"enum": enum})
return exports
@UseJinja("java_templates/struct.java.tmpl", filters=java_filters)
def GenerateStructSource(self, struct):
exports = self.GetJinjaExports()
exports.update({"struct": struct})
return exports
@UseJinja("java_templates/interface.java.tmpl", filters=java_filters)
def GenerateInterfaceSource(self, interface):
exports = self.GetJinjaExports()
exports.update({"interface": interface})
if interface.client:
for client in self.module.interfaces:
if client.name == interface.client:
exports.update({"client": client})
return exports
@UseJinja("java_templates/constants.java.tmpl", filters=java_filters)
def GenerateConstantsSource(self, module):
exports = self.GetJinjaExports()
exports.update({"main_entity": GetConstantsMainEntityName(module),
"constants": module.constants})
return exports
def GenerateFiles(self, unparsed_args):
parser = argparse.ArgumentParser()
parser.add_argument("--java_output_directory", dest="java_output_directory")
args = parser.parse_args(unparsed_args)
if self.output_dir and args.java_output_directory:
self.output_dir = os.path.join(args.java_output_directory,
GetPackage(self.module).replace('.', '/'))
if not os.path.exists(self.output_dir):
try:
os.makedirs(self.output_dir)
except:
# Ignore errors on directory creation.
pass
for enum in self.module.enums:
self.Write(self.GenerateEnumSource(enum),
"%s.java" % GetNameForElement(enum))
for struct in self.module.structs:
self.Write(self.GenerateStructSource(struct),
"%s.java" % GetNameForElement(struct))
for interface in self.module.interfaces:
self.Write(self.GenerateInterfaceSource(interface),
"%s.java" % GetNameForElement(interface))
if self.module.constants:
self.Write(self.GenerateConstantsSource(self.module),
"%s.java" % GetConstantsMainEntityName(self.module))
def GetJinjaParameters(self):
return {
'lstrip_blocks': True,
'trim_blocks': True,
}
def GetGlobals(self):
return {
'module': self.module,
}
| 33.228022 | 80 | 0.681852 |
824cf717f576a06322541a36196caa296d66c574 | 6,030 | py | Python | graphs/bidirectional_breadth_first_search.py | TeddyFirman/Algorithm_Python | edbd50a97a62c2beb2a187e4c411c677aa43115e | [
"MIT"
] | null | null | null | graphs/bidirectional_breadth_first_search.py | TeddyFirman/Algorithm_Python | edbd50a97a62c2beb2a187e4c411c677aa43115e | [
"MIT"
] | null | null | null | graphs/bidirectional_breadth_first_search.py | TeddyFirman/Algorithm_Python | edbd50a97a62c2beb2a187e4c411c677aa43115e | [
"MIT"
] | null | null | null | """
https://en.wikipedia.org/wiki/Bidirectional_search
"""
from __future__ import annotations
import time
Path = list[tuple[int, int]]
grid = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
delta = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class Node:
def __init__(
self, pos_x: int, pos_y: int, goal_x: int, goal_y: int, parent: Node | None
):
self.pos_x = pos_x
self.pos_y = pos_y
self.pos = (pos_y, pos_x)
self.goal_x = goal_x
self.goal_y = goal_y
self.parent = parent
class BreadthFirstSearch:
"""
>>> bfs = BreadthFirstSearch((0, 0), (len(grid) - 1, len(grid[0]) - 1))
>>> (bfs.start.pos_y + delta[3][0], bfs.start.pos_x + delta[3][1])
(0, 1)
>>> [x.pos for x in bfs.get_successors(bfs.start)]
[(1, 0), (0, 1)]
>>> (bfs.start.pos_y + delta[2][0], bfs.start.pos_x + delta[2][1])
(1, 0)
>>> bfs.retrace_path(bfs.start)
[(0, 0)]
>>> bfs.search() # doctest: +NORMALIZE_WHITESPACE
[(0, 0), (1, 0), (2, 0), (3, 0), (3, 1), (4, 1),
(5, 1), (5, 2), (5, 3), (5, 4), (5, 5), (6, 5), (6, 6)]
"""
def __init__(self, start: tuple[int, int], goal: tuple[int, int]):
self.start = Node(start[1], start[0], goal[1], goal[0], None)
self.target = Node(goal[1], goal[0], goal[1], goal[0], None)
self.node_queue = [self.start]
self.reached = False
def search(self) -> Path | None:
while self.node_queue:
current_node = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
self.reached = True
return self.retrace_path(current_node)
successors = self.get_successors(current_node)
for node in successors:
self.node_queue.append(node)
if not self.reached:
return [self.start.pos]
return None
def get_successors(self, parent: Node) -> list[Node]:
"""
Returns a list of successors (both in the grid and free spaces)
"""
successors = []
for action in delta:
pos_x = parent.pos_x + action[1]
pos_y = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(grid) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(pos_x, pos_y, self.target.pos_y, self.target.pos_x, parent)
)
return successors
def retrace_path(self, node: Node | None) -> Path:
"""
Retrace the path from parents to parents until start node
"""
current_node = node
path = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
current_node = current_node.parent
path.reverse()
return path
class BidirectionalBreadthFirstSearch:
"""
>>> bd_bfs = BidirectionalBreadthFirstSearch((0, 0), (len(grid) - 1,
... len(grid[0]) - 1))
>>> bd_bfs.fwd_bfs.start.pos == bd_bfs.bwd_bfs.target.pos
True
>>> bd_bfs.retrace_bidirectional_path(bd_bfs.fwd_bfs.start,
... bd_bfs.bwd_bfs.start)
[(0, 0)]
>>> bd_bfs.search() # doctest: +NORMALIZE_WHITESPACE
[(0, 0), (0, 1), (0, 2), (1, 2), (2, 2), (2, 3),
(2, 4), (3, 4), (3, 5), (3, 6), (4, 6), (5, 6), (6, 6)]
"""
def __init__(self, start, goal):
self.fwd_bfs = BreadthFirstSearch(start, goal)
self.bwd_bfs = BreadthFirstSearch(goal, start)
self.reached = False
def search(self) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
current_fwd_node = self.fwd_bfs.node_queue.pop(0)
current_bwd_node = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
self.reached = True
return self.retrace_bidirectional_path(
current_fwd_node, current_bwd_node
)
self.fwd_bfs.target = current_bwd_node
self.bwd_bfs.target = current_fwd_node
successors = {
self.fwd_bfs: self.fwd_bfs.get_successors(current_fwd_node),
self.bwd_bfs: self.bwd_bfs.get_successors(current_bwd_node),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(node)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def retrace_bidirectional_path(self, fwd_node: Node, bwd_node: Node) -> Path:
fwd_path = self.fwd_bfs.retrace_path(fwd_node)
bwd_path = self.bwd_bfs.retrace_path(bwd_node)
bwd_path.pop()
bwd_path.reverse()
path = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
init = (0, 0)
goal = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
start_bfs_time = time.time()
bfs = BreadthFirstSearch(init, goal)
path = bfs.search()
bfs_time = time.time() - start_bfs_time
print("Unidirectional BFS computation time : ", bfs_time)
start_bd_bfs_time = time.time()
bd_bfs = BidirectionalBreadthFirstSearch(init, goal)
bd_path = bd_bfs.search()
bd_bfs_time = time.time() - start_bd_bfs_time
print("Bidirectional BFS computation time : ", bd_bfs_time)
| 32.594595 | 85 | 0.536153 |
96cbd9fe9f50214426c78227efa6711647bfb225 | 2,175 | py | Python | tts/plotting_utils.py | flagship-open/emotional_TTS | d153af1c842892e0cd1f19c1e6ea1e6e48a49686 | [
"MIT"
] | null | null | null | tts/plotting_utils.py | flagship-open/emotional_TTS | d153af1c842892e0cd1f19c1e6ea1e6e48a49686 | [
"MIT"
] | null | null | null | tts/plotting_utils.py | flagship-open/emotional_TTS | d153af1c842892e0cd1f19c1e6ea1e6e48a49686 | [
"MIT"
] | 1 | 2021-09-09T06:38:05.000Z | 2021-09-09T06:38:05.000Z | import matplotlib
matplotlib.use("Agg")
import matplotlib.pylab as plt
import numpy as np
def save_figure_to_numpy(fig):
# save it to a numpy array.
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return data
def plot_alignment_to_numpy(alignment, info=None):
fig, ax = plt.subplots(figsize=(6, 4))
im = ax.imshow(alignment, aspect='auto', origin='lower',
interpolation='none')
fig.colorbar(im, ax=ax)
xlabel = 'Decoder timestep'
if info is not None:
xlabel += '\n\n' + info
plt.xlabel(xlabel)
plt.ylabel('Encoder timestep')
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
def plot_spectrogram_to_numpy(spectrogram):
fig, ax = plt.subplots(figsize=(12, 3))
im = ax.imshow(spectrogram, aspect="auto", origin="lower",
interpolation='none')
plt.colorbar(im, ax=ax)
plt.xlabel("Frames")
plt.ylabel("Channels")
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
def plot_gate_outputs_to_numpy(gate_targets, gate_outputs):
fig, ax = plt.subplots(figsize=(12, 3))
ax.scatter(range(len(gate_targets)), gate_targets, alpha=0.5,
color='green', marker='+', s=1, label='target')
ax.scatter(range(len(gate_outputs)), gate_outputs, alpha=0.5,
color='red', marker='.', s=1, label='predicted')
plt.xlabel("Frames (Green target, Red predicted)")
plt.ylabel("Gate State")
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
def plot_inference_gate_outputs_to_numpy(gate_outputs):
fig, ax = plt.subplots(figsize=(12, 3))
ax.scatter(range(len(gate_outputs)), gate_outputs, alpha=0.5,
color='red', marker='.', s=1, label='predicted')
plt.xlabel("Frames (Red predicted)")
plt.ylabel("Gate State")
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
| 28.246753 | 75 | 0.646437 |
a2f0e5a1ef2a980118d0d95b3bb1fff5e4941bff | 2,441 | py | Python | evaluate_test_chemprop.py | cbilodeau2/g2g_optimization | c4afbc141285f4b8241e75c53a1eaa83df7d6ab6 | [
"MIT"
] | 6 | 2020-12-17T20:52:47.000Z | 2022-03-25T12:39:15.000Z | evaluate_test_chemprop.py | cbilodeau2/g2g_optimization | c4afbc141285f4b8241e75c53a1eaa83df7d6ab6 | [
"MIT"
] | null | null | null | evaluate_test_chemprop.py | cbilodeau2/g2g_optimization | c4afbc141285f4b8241e75c53a1eaa83df7d6ab6 | [
"MIT"
] | 1 | 2021-06-27T11:52:15.000Z | 2021-06-27T11:52:15.000Z | import rdkit
import argparse
import pickle
import os
from g2g_optimization.train.decode import decode
from g2g_optimization.train.args import read_args
from g2g_optimization.hgraph import common_atom_vocab
from g2g_optimization.train.evaluate_chemprop import evaluate_chemprop
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
parser = argparse.ArgumentParser()
parser.add_argument('--test',required=True)
parser.add_argument('--model',required=True)
parser.add_argument('--vocab',type=str,default=None)
parser.add_argument('--output_file',type=str,default=None)
parser.add_argument('--stats_file',type=str,default=None)
parser.add_argument('--checkpoint_path',type=str,default=None)
parser.add_argument('--fold_path',default='predictors/chemprop_aqsol/')
parser.add_argument('--args_file',type=str, default=None) #Without an args file, many parameters will revert to default
parser.add_argument('--num_decode',type=int, default=20)
parser.add_argument('--seed',type=int, default=1)
parser.add_argument('--chemprop_path',type=str, default='/data/rsg/chemistry/cbilod/chemprop/')
parser.add_argument('--solvent',type=str, default=None)
args = parser.parse_args()
if args.checkpoint_path !=None:
args.vocab = os.path.join(args.checkpoint_path,'inputs','vocab.txt')
args.model = os.path.join(args.checkpoint_path,'models',args.model)
if not os.path.isdir(os.path.join(args.checkpoint_path,'eval')):
os.mkdir(os.path.join(args.checkpoint_path,'eval'))
args.output_file = os.path.join(args.checkpoint_path,'eval','decoded_mols.csv')
args.stats_file = os.path.join(args.checkpoint_path,'eval','stats.pkl')
args.args_file = os.path.join(args.checkpoint_path,'input.dat')
if args.args_file == None:
print('WARNING: You are running without an args_file')
args_file = {}
else:
args_file = read_args(args.args_file)
decode(args.test,args.vocab,args.model,args.output_file,args_file,
atom_vocab=common_atom_vocab,
num_decode=args.num_decode, ## Will not come from run input
seed=args.seed)
if args.solvent == None:
stats,_ = evaluate_chemprop(args.output_file,fold_path=args.fold_path,chemprop_path=args.chemprop_path)
else:
stats,_ = evaluate_chemprop_sol(out_file,solvent=args.solvent,fold_path=args.fold_path,chemprop_path=args.chemprop_path)
with open(args.stats_file, 'wb') as f:
pickle.dump(stats, f, pickle.HIGHEST_PROTOCOL)
| 40.683333 | 124 | 0.76526 |
d4259cffc38c0e709e56f5dc2325abcde43984fd | 4,046 | py | Python | tidy.py | rgosens2/cpp-fqa | a18c33e1f2681b52df2324f772ca039482583537 | [
"BSD-2-Clause"
] | null | null | null | tidy.py | rgosens2/cpp-fqa | a18c33e1f2681b52df2324f772ca039482583537 | [
"BSD-2-Clause"
] | null | null | null | tidy.py | rgosens2/cpp-fqa | a18c33e1f2681b52df2324f772ca039482583537 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
import os, subprocess
fqas = [f for f in os.listdir(".") if f.endswith(".fqa")]
htmls = [f[:-4] + ".html" for f in fqas] + ["fqa.html"]
files = htmls # ['function.html']
# doesn't help with bitexact since we use a new fqa2html anyway.
# bitexactexc = [line.split()[0] for line in open('..\\post-me-list.txt').read().split('\n') if len(line)]
# print bitexactexc
def getoutput(cmd):
"""commands.getoutput doesn't work on Win32"""
tmp = "_out_.txt"
if os.system(cmd + " > " + tmp):
raise Exception(cmd + " FAILED")
f = open(tmp)
r = f.read()
f.close()
return r
#######################################
# NOTE_: we have cleaned up some of the html and now get no warnings from tidy
# on most of the files. web-vs-c++.html gives 20 warnings because of Russian characters.
# tidy does only error checking here which is a good thing because it will replace
# those characters and they will be unreadable:
# Character codes 128 to 159 (U+0080 to U+009F) are not allowed in HTML;
# even if they were, they would likely be unprintable control characters.
# Tidy assumed you wanted to refer to a character with the same byte value in the
# specified encoding and replaced that reference with the Unicode equivalent.
# See: tidy -f qqq.txt -o xxx.html web-vs-c++.html
# NOTE_: web-vs-c++.fqa was UTF-8 with BOM which tidy also mangles. We have changed it
# to UTF-8 like all the other files.
# NOTE_: use -utf8 to leave char codes >= 128
# DONE: we are getting a clean bill of health now from tidy:
# friend.html: No warnings or errors were found.
# why.html: No warnings or errors were found.
# inheritance-multiple.html: No warnings or errors were found.
# function.html: No warnings or errors were found.
# inheritance-basics.html: No warnings or errors were found.
# io.html: No warnings or errors were found.
# defective.html: No warnings or errors were found.
# web-vs-c++.html: No warnings or errors were found.
# const.html: No warnings or errors were found.
# templates.html: No warnings or errors were found.
# index.html: No warnings or errors were found.
# class.html: No warnings or errors were found.
# mixing.html: No warnings or errors were found.
# changelog.html: No warnings or errors were found.
# picture.html: No warnings or errors were found.
# ref.html: No warnings or errors were found.
# inheritance-virtual.html: No warnings or errors were found.
# exceptions.html: No warnings or errors were found.
# inheritance-abstract.html: No warnings or errors were found.
# ctors.html: No warnings or errors were found.
# inheritance-mother.html: No warnings or errors were found.
# faq.html: No warnings or errors were found.
# heap.html: No warnings or errors were found.
# disclaimers.html: No warnings or errors were found.
# dtor.html: No warnings or errors were found.
# inline.html: No warnings or errors were found.
# web-vs-fqa.html: No warnings or errors were found.
# linking.html: No warnings or errors were found.
# inheritance-proper.html: No warnings or errors were found.
# assign.html: No warnings or errors were found.
# operator.html: No warnings or errors were found.
# fqa.html: No warnings or errors were found.
#######################################
def tidy(f):
o = getoutput('tidy -e -utf8 %s 2>&1 | grep "errors were found"' % (f))
if " 0 errors were found" or "No warnings or errors were found" in o:
print(f + ":", o[:-1])
else:
raise Exception("ERRORS FOUND IN %s: %s" % (f, o[:-1]))
# RG: this fucks up correct <p>
for f in files:
fd = open(f)
contents = fd.read()
fd.close()
tidyisms = ["ul", "pre", "h2"]
for t in tidyisms:
# RG: fucks up and leaves one trailing </p> in index.html and fqa.html
contents = contents.replace("<p>\n<%s>" % t, "<%s>" % t)
contents = contents.replace("</%s>\n</p>\n" % t, "</%s>\n" % t)
fd = open(f, "w")
fd.write(contents)
fd.close()
tidy(f)
# print "WARNING!! i'm not tidying post-me files for bitexact.py!! FIXME!!"
| 41.71134 | 106 | 0.675729 |
8d792883b811ff5b90a406aa72b634cefea2d72a | 3,393 | py | Python | my_instagram/models.py | kd-kinuthiadavid/testing-deploy-ig-clone | 529676e654b0a9b304a4ada6b6664eb439c8cb4c | [
"MIT"
] | null | null | null | my_instagram/models.py | kd-kinuthiadavid/testing-deploy-ig-clone | 529676e654b0a9b304a4ada6b6664eb439c8cb4c | [
"MIT"
] | null | null | null | my_instagram/models.py | kd-kinuthiadavid/testing-deploy-ig-clone | 529676e654b0a9b304a4ada6b6664eb439c8cb4c | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
import datetime as dt
# Create your models here.
# class Image(models.Model):
# '''
# Image model
# '''
# image = models.ImageField(upload_to='gallery/')
# image_url = models.TextField()
# name = models.CharField(max_length=30)
# description = models.TextField(max_length=100)
# category = models.ManyToManyField(category)
# post_date = models.DateTimeField(auto_now=True)
# location = models.ForeignKey(Location)
class Profile(models.Model):
profile_photo = models.ImageField(upload_to='profile_pictures/')
user = models.OneToOneField(User, on_delete=models.CASCADE)
bio = models.TextField(max_length=200, blank=True)
email = models.CharField(max_length=100, null=True, blank=True)
status = models.CharField(max_length=100, blank=True)
following = models.ManyToManyField(User, related_name="follows", blank=True)
followers = models.ManyToManyField(User, related_name="followed_by", blank=True)
def __str__(self):
return self.user.username
@classmethod
def get_profiles(cls):
profiles = cls.objects.all()
return profiles
def save_profile(self):
self.save()
def delete_profile(self):
self.delete()
def update_profile(self):
self.update()
@classmethod
def find_profile(cls, id):
profile = cls.objects.get(id=id)
return profile
class Image(models.Model):
'''
Image model
'''
image = models.ImageField(upload_to='images/')
image_name = models.CharField(max_length=30, blank=True)
image_caption = models.TextField(max_length=100, blank=True)
user = models.ForeignKey(User, related_name="posted_by", on_delete=models.CASCADE, null=True)
profile = models.ForeignKey(Profile, on_delete=models.CASCADE, null=True)
liker = models.ForeignKey(User, related_name='liked_by', on_delete=models.CASCADE, null=True)
post_date = models.DateTimeField(auto_now=True)
def __str__(self):
return self.image_name
@classmethod
def search_by_image_caption(cls, search_term):
images = cls.objects.filter(image_caption__icontains=search_term)
return images
@classmethod
def get_all(cls):
images = cls.objects.all()
return images
def save_image(self):
self.save()
def delete_image(self):
self.delete()
def update_caption(self):
self.image_caption.update()
@classmethod
def get_image_by_id(cls, id):
image = cls.object.get(id=id)
return image
class Comment(models.Model):
content = models.TextField(max_length=150)
user = models.ForeignKey(User, related_name='commented_by', on_delete=models.CASCADE, null=True)
image = models.ForeignKey(Image, related_name='comment_for', on_delete=models.CASCADE, null=True)
def __str__(self):
return self.content
@classmethod
def get_comments(cls):
comments = cls.objects.all()
return comments
class Likes(models.Model):
likes = models.IntegerField()
image = models.ForeignKey(Image, related_name='likes_for', on_delete=models.CASCADE, null=True)
user = models.ForeignKey(User, related_name='who_is_liking', on_delete=models.CASCADE, null=True)
def __str__(self):
return str(self.likes)
| 28.754237 | 101 | 0.690245 |
77a67ed73a92c1713e5dd02f4cb1331af583d5cc | 1,356 | py | Python | fleet-rec/fleetrec/core/reader.py | ZHUI/Fleet | 382c04c40866cfa8ead3d996eaefbfb4575362bd | [
"Apache-2.0"
] | 1 | 2020-04-28T08:04:07.000Z | 2020-04-28T08:04:07.000Z | fleet-rec/fleetrec/core/reader.py | ZHUI/Fleet | 382c04c40866cfa8ead3d996eaefbfb4575362bd | [
"Apache-2.0"
] | null | null | null | fleet-rec/fleetrec/core/reader.py | ZHUI/Fleet | 382c04c40866cfa8ead3d996eaefbfb4575362bd | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import abc
import os
import paddle.fluid.incubate.data_generator as dg
import yaml
from fleetrec.core.utils import envs
class Reader(dg.MultiSlotDataGenerator):
__metaclass__ = abc.ABCMeta
def __init__(self, config):
dg.MultiSlotDataGenerator.__init__(self)
if os.path.exists(config) and os.path.isfile(config):
with open(config, 'r') as rb:
_config = yaml.load(rb.read(), Loader=yaml.FullLoader)
else:
raise ValueError("reader config only support yaml")
envs.set_global_envs(_config)
@abc.abstractmethod
def init(self):
pass
@abc.abstractmethod
def generate_sample(self, line):
pass
| 29.478261 | 74 | 0.713864 |
9f289bf0c17e3e28950602c5a48f12d744029b0a | 12,566 | py | Python | flow_sdk/model/container/workload_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | 5 | 2019-07-31T04:11:05.000Z | 2021-01-07T03:23:20.000Z | flow_sdk/model/container/workload_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | flow_sdk/model/container/workload_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: workload.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from flow_sdk.model.container import container_pb2 as flow__sdk_dot_model_dot_container_dot_container__pb2
from flow_sdk.model.container import volume_pb2 as flow__sdk_dot_model_dot_container_dot_volume__pb2
from flow_sdk.model.container import deployment_strategy_pb2 as flow__sdk_dot_model_dot_container_dot_deployment__strategy__pb2
from flow_sdk.model.container import local_object_reference_pb2 as flow__sdk_dot_model_dot_container_dot_local__object__reference__pb2
from flow_sdk.model.container import deployment_status_pb2 as flow__sdk_dot_model_dot_container_dot_deployment__status__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='workload.proto',
package='container',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'),
serialized_pb=_b('\n\x0eworkload.proto\x12\tcontainer\x1a(flow_sdk/model/container/container.proto\x1a%flow_sdk/model/container/volume.proto\x1a\x32\x66low_sdk/model/container/deployment_strategy.proto\x1a\x35\x66low_sdk/model/container/local_object_reference.proto\x1a\x30\x66low_sdk/model/container/deployment_status.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xe3\x04\n\x08Workload\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04kind\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x11\n\tnamespace\x18\x04 \x01(\t\x12\x14\n\x0cresourceName\x18\x05 \x01(\t\x12.\n\ncontainers\x18\x06 \x03(\x0b\x32\x1a.container.ContainerConfig\x12\x10\n\x08replicas\x18\x07 \x01(\x05\x12\"\n\x07volumes\x18\x08 \x03(\x0b\x32\x11.container.Volume\x12,\n\x0b\x61nnotations\x18\t \x01(\x0b\x32\x17.google.protobuf.Struct\x12\'\n\x06labels\x18\n \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x11\n\tdnsPolicy\x18\x0b \x01(\t\x12\x15\n\rrestartPolicy\x18\x0c \x01(\t\x12\x39\n\x12\x64\x65ploymentStrategy\x18\r \x01(\x0b\x32\x1d.container.DeploymentStrategy\x12\x39\n\x10imagePullSecrets\x18\x0e \x03(\x0b\x32\x1f.container.LocalObjectReference\x12\x35\n\x10\x64\x65ploymentStatus\x18\x0f \x01(\x0b\x32\x1b.container.DeploymentStatus\x12\x14\n\x0cresourceSpec\x18\x10 \x01(\t\x12\x0f\n\x07\x63reator\x18\x11 \x01(\t\x12\x19\n\x11\x63reationTimestamp\x18\x12 \x01(\t\x12\r\n\x05state\x18\x13 \x01(\t\x12\x19\n\x11transitionMessage\x18\x14 \x01(\tBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\x06proto3')
,
dependencies=[flow__sdk_dot_model_dot_container_dot_container__pb2.DESCRIPTOR,flow__sdk_dot_model_dot_container_dot_volume__pb2.DESCRIPTOR,flow__sdk_dot_model_dot_container_dot_deployment__strategy__pb2.DESCRIPTOR,flow__sdk_dot_model_dot_container_dot_local__object__reference__pb2.DESCRIPTOR,flow__sdk_dot_model_dot_container_dot_deployment__status__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_WORKLOAD = _descriptor.Descriptor(
name='Workload',
full_name='container.Workload',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='container.Workload.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kind', full_name='container.Workload.kind', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='container.Workload.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='namespace', full_name='container.Workload.namespace', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resourceName', full_name='container.Workload.resourceName', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='containers', full_name='container.Workload.containers', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='replicas', full_name='container.Workload.replicas', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='volumes', full_name='container.Workload.volumes', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='annotations', full_name='container.Workload.annotations', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels', full_name='container.Workload.labels', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dnsPolicy', full_name='container.Workload.dnsPolicy', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='restartPolicy', full_name='container.Workload.restartPolicy', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='deploymentStrategy', full_name='container.Workload.deploymentStrategy', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='imagePullSecrets', full_name='container.Workload.imagePullSecrets', index=13,
number=14, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='deploymentStatus', full_name='container.Workload.deploymentStatus', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resourceSpec', full_name='container.Workload.resourceSpec', index=15,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='container.Workload.creator', index=16,
number=17, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creationTimestamp', full_name='container.Workload.creationTimestamp', index=17,
number=18, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state', full_name='container.Workload.state', index=18,
number=19, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='transitionMessage', full_name='container.Workload.transitionMessage', index=19,
number=20, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=298,
serialized_end=909,
)
_WORKLOAD.fields_by_name['containers'].message_type = flow__sdk_dot_model_dot_container_dot_container__pb2._CONTAINERCONFIG
_WORKLOAD.fields_by_name['volumes'].message_type = flow__sdk_dot_model_dot_container_dot_volume__pb2._VOLUME
_WORKLOAD.fields_by_name['annotations'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_WORKLOAD.fields_by_name['labels'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_WORKLOAD.fields_by_name['deploymentStrategy'].message_type = flow__sdk_dot_model_dot_container_dot_deployment__strategy__pb2._DEPLOYMENTSTRATEGY
_WORKLOAD.fields_by_name['imagePullSecrets'].message_type = flow__sdk_dot_model_dot_container_dot_local__object__reference__pb2._LOCALOBJECTREFERENCE
_WORKLOAD.fields_by_name['deploymentStatus'].message_type = flow__sdk_dot_model_dot_container_dot_deployment__status__pb2._DEPLOYMENTSTATUS
DESCRIPTOR.message_types_by_name['Workload'] = _WORKLOAD
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Workload = _reflection.GeneratedProtocolMessageType('Workload', (_message.Message,), {
'DESCRIPTOR' : _WORKLOAD,
'__module__' : 'workload_pb2'
# @@protoc_insertion_point(class_scope:container.Workload)
})
_sym_db.RegisterMessage(Workload)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 57.378995 | 1,522 | 0.769378 |
421a0283967bbb1b0bfd73857a8948f77cc06b13 | 2,848 | py | Python | gen_browser_header/main/GenHeader.py | zwzw911/gen-browser-header | bf0f1d3a71ba68842e889ec5e0718672796b0ecb | [
"MIT"
] | null | null | null | gen_browser_header/main/GenHeader.py | zwzw911/gen-browser-header | bf0f1d3a71ba68842e889ec5e0718672796b0ecb | [
"MIT"
] | null | null | null | gen_browser_header/main/GenHeader.py | zwzw911/gen-browser-header | bf0f1d3a71ba68842e889ec5e0718672796b0ecb | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
# -*- coding:utf-8 -*-
import gen_browser_header.main.GenUA as gen_ua
import gen_browser_header.self.SelfEnum as self_enum
import gen_browser_header.helper.Helper as gbh_helper
def gen_header(setting, url, num=None):
'''
:param setting:
:param url: 根据url生成host
:param num:
:return:
'''
ua = []
if num is not None:
# 如果只需要一个header,优选返回firefox的ua
if num == 1:
# print(setting.browser_type)
if self_enum.BrowserType.FireFox in setting.browser_type:
# print('num =1 browse=ff')
ua += gen_ua.generate_firefox_ua(setting=setting, num=1)
elif self_enum.BrowserType.Chrome in setting.browser_type:
# print('num =1 browse=ch')
ua += gen_ua.generate_chrome_ua(setting=setting, num=1)
# 如果需要多个header
else:
# 如果可以产生ff的ua,先产生
if self_enum.BrowserType.FireFox in setting.browser_type:
ua += gen_ua.generate_firefox_ua(setting=setting, num=num)
# 如果生成的ff的ua数量不满足,再尝试生成chrome的ua
if len(ua) < num:
if self_enum.BrowserType.Chrome in setting.browser_type:
ua += gen_ua.generate_chrome_ua(setting=setting, num=num)
# num = None,生成最大数量的ua
else:
if self_enum.BrowserType.FireFox in setting.browser_type:
ua += gen_ua.generate_firefox_ua(setting=setting)
if self_enum.BrowserType.Chrome in setting.browser_type:
ua += gen_ua.generate_chrome_ua(setting=setting)
header = []
host = gbh_helper.extract_host_from_url(url)
for single_ua in ua:
# setting.header_no_ua['User-Agent'] = single_ua
# tmp_header = setting.header_no_ua
# tmp_header['User-Agent'] = single_ua
if 'Firefox' in single_ua:
header.append({**setting.firefox_header_no_ua,
**{'User-Agent': single_ua},
**{'Host': host}
})
elif 'Chrome' in single_ua:
header.append({**setting.chrome_header_no_ua,
**{'User-Agent': single_ua},
**{'Host': host}
})
return header
if __name__ == '__main__':
import gen_browser_header.setting.Setting as setting
cur_setting = setting.GbhSetting()
cur_setting.proxy_ip = ['10.11.12.13:9090']
cur_setting.browser_type = {self_enum.BrowserType.FireFox}
cur_setting.firefox_ver = {'min': 74, 'max': 75}
cur_setting.os_type = {self_enum.OsType.Win64}
cur_setting.chrome_type = {self_enum.ChromeType.Stable}
cur_setting.chrome_max_release_year = 1
r = gen_header(setting=cur_setting, url='https://packaging.python.org/tutorials/packaging-projects/')
print(r)
| 37.973333 | 105 | 0.61236 |
8bbd4f5800edb493ba615ce8225500e8ab10781a | 14,368 | py | Python | google/cloud/securitycenter_v1p1beta1/proto/notification_config_pb2.py | tmatsuo/python-securitycenter | dbb9311676003f452295848c49cbe97cb22c47b5 | [
"Apache-2.0"
] | null | null | null | google/cloud/securitycenter_v1p1beta1/proto/notification_config_pb2.py | tmatsuo/python-securitycenter | dbb9311676003f452295848c49cbe97cb22c47b5 | [
"Apache-2.0"
] | null | null | null | google/cloud/securitycenter_v1p1beta1/proto/notification_config_pb2.py | tmatsuo/python-securitycenter | dbb9311676003f452295848c49cbe97cb22c47b5 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/securitycenter_v1p1beta1/proto/notification_config.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/securitycenter_v1p1beta1/proto/notification_config.proto",
package="google.cloud.securitycenter.v1p1beta1",
syntax="proto3",
serialized_options=b"\n)com.google.cloud.securitycenter.v1p1beta1P\001ZSgoogle.golang.org/genproto/googleapis/cloud/securitycenter/v1p1beta1;securitycenter\252\002%Google.Cloud.SecurityCenter.V1P1Beta1\312\002%Google\\Cloud\\SecurityCenter\\V1p1beta1\352\002(Google::Cloud::SecurityCenter::V1p1beta1\352A@\n\033pubsub.googleapis.com/Topic\022!projects/{project}/topics/{topic}",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\nEgoogle/cloud/securitycenter_v1p1beta1/proto/notification_config.proto\x12%google.cloud.securitycenter.v1p1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto"\xb6\x04\n\x12NotificationConfig\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12W\n\nevent_type\x18\x03 \x01(\x0e\x32\x43.google.cloud.securitycenter.v1p1beta1.NotificationConfig.EventType\x12\x36\n\x0cpubsub_topic\x18\x04 \x01(\tB \xfa\x41\x1d\n\x1bpubsub.googleapis.com/Topic\x12\x1c\n\x0fservice_account\x18\x05 \x01(\tB\x03\xe0\x41\x03\x12\x65\n\x10streaming_config\x18\x06 \x01(\x0b\x32I.google.cloud.securitycenter.v1p1beta1.NotificationConfig.StreamingConfigH\x00\x1a!\n\x0fStreamingConfig\x12\x0e\n\x06\x66ilter\x18\x01 \x01(\t"4\n\tEventType\x12\x1a\n\x16\x45VENT_TYPE_UNSPECIFIED\x10\x00\x12\x0b\n\x07\x46INDING\x10\x01:}\xea\x41z\n0securitycenter.googleapis.com/NotificationConfig\x12\x46organizations/{organization}/notificationConfigs/{notification_config}B\x0f\n\rnotify_configB\xc0\x02\n)com.google.cloud.securitycenter.v1p1beta1P\x01ZSgoogle.golang.org/genproto/googleapis/cloud/securitycenter/v1p1beta1;securitycenter\xaa\x02%Google.Cloud.SecurityCenter.V1P1Beta1\xca\x02%Google\\Cloud\\SecurityCenter\\V1p1beta1\xea\x02(Google::Cloud::SecurityCenter::V1p1beta1\xea\x41@\n\x1bpubsub.googleapis.com/Topic\x12!projects/{project}/topics/{topic}b\x06proto3',
dependencies=[
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,
google_dot_api_dot_resource__pb2.DESCRIPTOR,
],
)
_NOTIFICATIONCONFIG_EVENTTYPE = _descriptor.EnumDescriptor(
name="EventType",
full_name="google.cloud.securitycenter.v1p1beta1.NotificationConfig.EventType",
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name="EVENT_TYPE_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="FINDING",
index=1,
number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
],
containing_type=None,
serialized_options=None,
serialized_start=573,
serialized_end=625,
)
_sym_db.RegisterEnumDescriptor(_NOTIFICATIONCONFIG_EVENTTYPE)
_NOTIFICATIONCONFIG_STREAMINGCONFIG = _descriptor.Descriptor(
name="StreamingConfig",
full_name="google.cloud.securitycenter.v1p1beta1.NotificationConfig.StreamingConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="filter",
full_name="google.cloud.securitycenter.v1p1beta1.NotificationConfig.StreamingConfig.filter",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=538,
serialized_end=571,
)
_NOTIFICATIONCONFIG = _descriptor.Descriptor(
name="NotificationConfig",
full_name="google.cloud.securitycenter.v1p1beta1.NotificationConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.securitycenter.v1p1beta1.NotificationConfig.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="description",
full_name="google.cloud.securitycenter.v1p1beta1.NotificationConfig.description",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="event_type",
full_name="google.cloud.securitycenter.v1p1beta1.NotificationConfig.event_type",
index=2,
number=3,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="pubsub_topic",
full_name="google.cloud.securitycenter.v1p1beta1.NotificationConfig.pubsub_topic",
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\372A\035\n\033pubsub.googleapis.com/Topic",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="service_account",
full_name="google.cloud.securitycenter.v1p1beta1.NotificationConfig.service_account",
index=4,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\340A\003",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="streaming_config",
full_name="google.cloud.securitycenter.v1p1beta1.NotificationConfig.streaming_config",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[_NOTIFICATIONCONFIG_STREAMINGCONFIG,],
enum_types=[_NOTIFICATIONCONFIG_EVENTTYPE,],
serialized_options=b"\352Az\n0securitycenter.googleapis.com/NotificationConfig\022Forganizations/{organization}/notificationConfigs/{notification_config}",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="notify_config",
full_name="google.cloud.securitycenter.v1p1beta1.NotificationConfig.notify_config",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=203,
serialized_end=769,
)
_NOTIFICATIONCONFIG_STREAMINGCONFIG.containing_type = _NOTIFICATIONCONFIG
_NOTIFICATIONCONFIG.fields_by_name[
"event_type"
].enum_type = _NOTIFICATIONCONFIG_EVENTTYPE
_NOTIFICATIONCONFIG.fields_by_name[
"streaming_config"
].message_type = _NOTIFICATIONCONFIG_STREAMINGCONFIG
_NOTIFICATIONCONFIG_EVENTTYPE.containing_type = _NOTIFICATIONCONFIG
_NOTIFICATIONCONFIG.oneofs_by_name["notify_config"].fields.append(
_NOTIFICATIONCONFIG.fields_by_name["streaming_config"]
)
_NOTIFICATIONCONFIG.fields_by_name[
"streaming_config"
].containing_oneof = _NOTIFICATIONCONFIG.oneofs_by_name["notify_config"]
DESCRIPTOR.message_types_by_name["NotificationConfig"] = _NOTIFICATIONCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
NotificationConfig = _reflection.GeneratedProtocolMessageType(
"NotificationConfig",
(_message.Message,),
{
"StreamingConfig": _reflection.GeneratedProtocolMessageType(
"StreamingConfig",
(_message.Message,),
{
"DESCRIPTOR": _NOTIFICATIONCONFIG_STREAMINGCONFIG,
"__module__": "google.cloud.securitycenter_v1p1beta1.proto.notification_config_pb2",
"__doc__": """The config for streaming-based notifications, which send each event as
soon as it is detected.
Attributes:
filter:
Expression that defines the filter to apply across
create/update events of assets or findings as specified by the
event type. The expression is a list of zero or more
restrictions combined via logical operators ``AND`` and
``OR``. Parentheses are supported, and ``OR`` has higher
precedence than ``AND``. Restrictions have the form ``<field>
<operator> <value>`` and may have a ``-`` character in front
of them to indicate negation. The fields map to those defined
in the corresponding resource. The supported operators are:
- ``=`` for all value types. - ``>``, ``<``, ``>=``, ``<=``
for integer values. - ``:``, meaning substring matching, for
strings. The supported value types are: - string literals
in quotes. - integer literals without quotes. - boolean
literals ``true`` and ``false`` without quotes.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1p1beta1.NotificationConfig.StreamingConfig)
},
),
"DESCRIPTOR": _NOTIFICATIONCONFIG,
"__module__": "google.cloud.securitycenter_v1p1beta1.proto.notification_config_pb2",
"__doc__": """Security Command Center notification configs. A notification config
is a Security Command Center resource that contains the configuration
to send notifications for create/update events of findings, assets and
etc.
Attributes:
name:
The relative resource name of this notification config. See: h
ttps://cloud.google.com/apis/design/resource_names#relative_re
source_name Example: “organizations/{organization_id}/notifica
tionConfigs/notify_public_bucket”.
description:
The description of the notification config (max of 1024
characters).
event_type:
The type of events the config is for, e.g. FINDING.
pubsub_topic:
The Pub/Sub topic to send notifications to. Its format is
“projects/[project_id]/topics/[topic]”.
service_account:
Output only. The service account that needs
“pubsub.topics.publish” permission to publish to the Pub/Sub
topic.
notify_config:
The config for triggering notifications.
streaming_config:
The config for triggering streaming-based notifications.
""",
# @@protoc_insertion_point(class_scope:google.cloud.securitycenter.v1p1beta1.NotificationConfig)
},
)
_sym_db.RegisterMessage(NotificationConfig)
_sym_db.RegisterMessage(NotificationConfig.StreamingConfig)
DESCRIPTOR._options = None
_NOTIFICATIONCONFIG.fields_by_name["pubsub_topic"]._options = None
_NOTIFICATIONCONFIG.fields_by_name["service_account"]._options = None
_NOTIFICATIONCONFIG._options = None
# @@protoc_insertion_point(module_scope)
| 41.889213 | 1,451 | 0.676782 |
7b21abb4b5602a83cde9cf644352c3e7c51915e4 | 2,265 | py | Python | blender/arm/logicnode/transform/LN_rotate_object.py | notwarp/armory | bd6078e3035eefcb3c725664698eeb369b4c2d88 | [
"Zlib"
] | null | null | null | blender/arm/logicnode/transform/LN_rotate_object.py | notwarp/armory | bd6078e3035eefcb3c725664698eeb369b4c2d88 | [
"Zlib"
] | null | null | null | blender/arm/logicnode/transform/LN_rotate_object.py | notwarp/armory | bd6078e3035eefcb3c725664698eeb369b4c2d88 | [
"Zlib"
] | null | null | null | from arm.logicnode.arm_nodes import *
class RotateObjectNode(ArmLogicTreeNode):
"""Rotates the given object."""
bl_idname = 'LNRotateObjectNode'
bl_label = 'Rotate Object'
arm_section = 'rotation'
arm_version = 1
def init(self, context):
super().init(context)
self.add_input('ArmNodeSocketAction', 'In')
self.add_input('ArmNodeSocketObject', 'Object')
self.add_input('NodeSocketVector', 'Euler Angles')
self.add_input('NodeSocketFloat', 'Angle / W')
self.add_output('ArmNodeSocketAction', 'Out')
def on_property_update(self, context):
""" called by the EnumProperty, used to update the node socket labels"""
if self.property0 == "Quaternion":
self.inputs[2].name = "Quaternion XYZ"
self.inputs[3].name = "Quaternion W"
elif self.property0 == "Euler Angles":
self.inputs[2].name = "Euler Angles"
self.inputs[3].name = "[unused for Euler input]"
elif self.property0.startswith("Angle Axies"):
self.inputs[2].name = "Axis"
self.inputs[3].name = "Angle"
else:
raise ValueError('No nodesocket labels for current input mode: check self-consistancy of action_set_rotation.py')
def draw_buttons(self, context, layout):
# this block is here to ensure backwards compatibility and warn the user.
# delete it (only keep the "else" part) when the 'old version' of the node will be considered removed.
# (note: please also update the corresponding haxe file when doing so)
if len(self.inputs) < 4:
row = layout.row(align=True)
row.label(text="Node has been updated with armory 2020.09. Please consider deleting and recreating it.")
else:
layout.prop(self, 'property0')
property0: EnumProperty(
items = [('Euler Angles', 'Euler Angles', 'Euler Angles'),
('Angle Axies (Radians)', 'Angle Axies (Radians)', 'Angle Axies (Radians)'),
('Angle Axies (Degrees)', 'Angle Axies (Degrees)', 'Angle Axies (Degrees)'),
('Quaternion', 'Quaternion', 'Quaternion')],
name='', default='Euler Angles',
update = on_property_update)
| 45.3 | 125 | 0.625607 |
506f6318b9c59e3de825b7c8b782e01811ad6bfe | 1,053 | py | Python | skdecide/builders/discrete_optimization/vrp/solver/greedy_vrp.py | galleon/bug-free-invention | 37bcea112da39d1390ff2b30951b36ee5dbc0e6d | [
"MIT"
] | null | null | null | skdecide/builders/discrete_optimization/vrp/solver/greedy_vrp.py | galleon/bug-free-invention | 37bcea112da39d1390ff2b30951b36ee5dbc0e6d | [
"MIT"
] | null | null | null | skdecide/builders/discrete_optimization/vrp/solver/greedy_vrp.py | galleon/bug-free-invention | 37bcea112da39d1390ff2b30951b36ee5dbc0e6d | [
"MIT"
] | null | null | null | from skdecide.builders.discrete_optimization.generic_tools.do_solver import SolverDO, ResultStorage
from skdecide.builders.discrete_optimization.generic_tools.do_problem import ParamsObjectiveFunction, \
build_aggreg_function_and_params_objective
from skdecide.builders.discrete_optimization.vrp.vrp_model import VrpProblem, trivial_solution
class GreedyVRPSolver(SolverDO):
def __init__(self, vrp_model: VrpProblem, params_objective_function: ParamsObjectiveFunction=None):
self.vrp_model = vrp_model
self.aggreg_sol, self.aggreg_dict, self.params_objective_function = \
build_aggreg_function_and_params_objective(problem=self.vrp_model,
params_objective_function=params_objective_function)
def solve(self, **kwargs):
sol, fit = trivial_solution(self.vrp_model)
fit = self.aggreg_sol(sol)
return ResultStorage(list_solution_fits=[(sol, fit)],
mode_optim=self.params_objective_function.sense_function)
| 55.421053 | 107 | 0.745489 |
7b2184ba75f3cf5c13363e12598623b4554af002 | 406 | py | Python | .github/write_target_url.py | Social-Tech-Collab/socialtechcollaborative | 01c540b4c378ceec5bf777ede4004e88e21a44c8 | [
"MIT"
] | null | null | null | .github/write_target_url.py | Social-Tech-Collab/socialtechcollaborative | 01c540b4c378ceec5bf777ede4004e88e21a44c8 | [
"MIT"
] | 12 | 2020-05-07T23:37:34.000Z | 2022-02-26T08:27:13.000Z | .github/write_target_url.py | socialtechus/socialtechus.github.io | 999218f1802409cbc8dcc28f414aabbf554e8641 | [
"MIT"
] | 1 | 2020-05-16T06:42:06.000Z | 2020-05-16T06:42:06.000Z | import requests
import json
with open("payload.json") as file:
test = json.load(file)
pr_url = test['pull_request']['url']
pr_content = json.loads(requests.get(pr_url).content)
status_url = pr_content['statuses_url']
latest_status = json.loads(requests.get(status_url).content)[0]
latest_status['target_url']
with open("target_url.txt", "w") as file:
file.write(latest_status['target_url'])
| 27.066667 | 63 | 0.736453 |
c47ee9fa3860af5f82b56cee6a848e2bef1ecb1b | 4,618 | py | Python | saas/blueking/component/apis/sops.py | nannan00/bk-iam-saas | 217600fa6e5fd466fff9c33c20c4dbd7c69f77d9 | [
"MIT"
] | 7 | 2021-08-13T03:48:16.000Z | 2021-12-20T15:31:38.000Z | saas/blueking/component/apis/sops.py | nannan00/bk-iam-saas | 217600fa6e5fd466fff9c33c20c4dbd7c69f77d9 | [
"MIT"
] | 456 | 2021-08-16T02:13:57.000Z | 2022-03-30T10:02:49.000Z | saas/blueking/component/apis/sops.py | nannan00/bk-iam-saas | 217600fa6e5fd466fff9c33c20c4dbd7c69f77d9 | [
"MIT"
] | 17 | 2021-08-10T04:08:46.000Z | 2022-03-14T14:24:36.000Z | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from ..base import ComponentAPI
class CollectionsSOPS(object):
"""Collections of SOPS APIS"""
def __init__(self, client):
self.client = client
self.create_periodic_task = ComponentAPI(
client=self.client,
method='POST',
path='/api/c/compapi{bk_api_ver}/sops/create_periodic_task/',
description=u'通过流程模板新建周期任务')
self.create_task = ComponentAPI(
client=self.client,
method='POST',
path='/api/c/compapi{bk_api_ver}/sops/create_task/',
description=u'通过流程模板新建任务')
self.get_periodic_task_info = ComponentAPI(
client=self.client,
method='GET',
path='/api/c/compapi{bk_api_ver}/sops/get_periodic_task_info/',
description=u'查询业务下的某个周期任务详情')
self.get_periodic_task_list = ComponentAPI(
client=self.client,
method='GET',
path='/api/c/compapi{bk_api_ver}/sops/get_periodic_task_list/',
description=u'查询业务下的周期任务列表')
self.get_task_detail = ComponentAPI(
client=self.client,
method='GET',
path='/api/c/compapi{bk_api_ver}/sops/get_task_detail/',
description=u'查询任务执行详情')
self.get_task_node_detail = ComponentAPI(
client=self.client,
method='GET',
path='/api/c/compapi{bk_api_ver}/sops/get_task_node_detail/',
description=u'查询任务节点执行详情')
self.get_task_status = ComponentAPI(
client=self.client,
method='GET',
path='/api/c/compapi{bk_api_ver}/sops/get_task_status/',
description=u'查询任务或任务节点执行状态')
self.get_template_info = ComponentAPI(
client=self.client,
method='GET',
path='/api/c/compapi{bk_api_ver}/sops/get_template_info/',
description=u'查询单个模板详情')
self.get_template_list = ComponentAPI(
client=self.client,
method='GET',
path='/api/c/compapi{bk_api_ver}/sops/get_template_list/',
description=u'查询模板列表')
self.import_common_template = ComponentAPI(
client=self.client,
method='POST',
path='/api/c/compapi{bk_api_ver}/sops/import_common_template/',
description=u'导入公共流程')
self.modify_constants_for_periodic_task = ComponentAPI(
client=self.client,
method='POST',
path=
'/api/c/compapi{bk_api_ver}/sops/modify_constants_for_periodic_task/',
description=u'修改周期任务的全局参数')
self.modify_cron_for_periodic_task = ComponentAPI(
client=self.client,
method='POST',
path=
'/api/c/compapi{bk_api_ver}/sops/modify_cron_for_periodic_task/',
description=u'修改周期任务的调度策略')
self.node_callback = ComponentAPI(
client=self.client,
method='POST',
path='/api/c/compapi{bk_api_ver}/sops/node_callback/',
description=u'回调任务节点')
self.operate_task = ComponentAPI(
client=self.client,
method='POST',
path='/api/c/compapi{bk_api_ver}/sops/operate_task/',
description=u'操作任务')
self.query_task_count = ComponentAPI(
client=self.client,
method='POST',
path='/api/c/compapi{bk_api_ver}/sops/query_task_count/',
description=u'查询任务分类统计总数')
self.set_periodic_task_enabled = ComponentAPI(
client=self.client,
method='POST',
path='/api/c/compapi{bk_api_ver}/sops/set_periodic_task_enabled/',
description=u'设置周期任务是否激活')
self.start_task = ComponentAPI(
client=self.client,
method='POST',
path='/api/c/compapi{bk_api_ver}/sops/start_task/',
description=u'开始执行任务')
| 43.566038 | 115 | 0.622347 |
c12b10b256019728b02ce7424c177b1a978eb082 | 2,176 | py | Python | ports/esp32/boards/TTGO_LORA/modules/bios.py | ondiiik/micropython-twatch-2020 | fb526a1ed19a741354e9552a4c077c2b832d4c4f | [
"MIT"
] | 1 | 2021-10-07T16:17:28.000Z | 2021-10-07T16:17:28.000Z | ports/esp32/boards/TTGO_LORA/modules/bios.py | ondiiik/micropython-twatch-2020 | fb526a1ed19a741354e9552a4c077c2b832d4c4f | [
"MIT"
] | 1 | 2021-05-22T15:33:56.000Z | 2021-05-23T13:33:05.000Z | ports/esp32/boards/TTGO_LORA/modules/bios.py | ondiiik/micropython-twatch-2020 | fb526a1ed19a741354e9552a4c077c2b832d4c4f | [
"MIT"
] | null | null | null | # Copyright 2020 LeMaRiva|tech lemariva.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from machine import Pin, SPI
from sx127x import SX127x
from ssd1306 import Display
class Lora(SX127x):
pins_config = { 'miso' : 19,
'mosi' : 27,
'ss' : 18,
'sck' : 5,
'dio_0' : 26,
'reset' : 14,
'led' : 2,
}
lora_config = { 'frequency' : 868E6,
'tx_power_level' : 2,
'signal_bandwidth': 125E3,
'spreading_factor': 8,
'coding_rate' : 5,
'preamble_length' : 8,
'implicit_header' : False,
'sync_word' : 0x12,
'enable_CRC' : False,
'invert_IQ' : False, }
def __init__(self,
parameters = lora_config,
pins = pins_config):
self._spi = SPI(baudrate = 10000000,
polarity = 0,
phase = 0,
bits = 8,
firstbit = SPI.MSB,
sck = Pin(pins['sck'], Pin.OUT, Pin.PULL_DOWN),
mosi = Pin(pins['mosi'], Pin.OUT, Pin.PULL_UP),
miso = Pin(pins['miso'], Pin.IN, Pin.PULL_UP))
super().__init__(self._spi, pins, parameters)
class Bios:
def __init__(self,
lora_config = Lora.lora_config):
self.lora = Lora(lora_config)
self.display = Display()
| 35.672131 | 77 | 0.497702 |
1da80cd45289568a51cb0696e6a524e1d5061e4d | 988 | py | Python | bin/scrape_software_versions.py | nf-core/scflow | 5b128d6fdf1b35af28159bc76d1cd7432f6647e2 | [
"MIT"
] | 9 | 2020-12-09T11:53:02.000Z | 2021-11-18T15:53:48.000Z | bin/scrape_software_versions.py | combiz/nf-core-scflow | 374726161fe2d681c835274d332f043d4d38df5b | [
"MIT"
] | 16 | 2020-03-12T15:16:01.000Z | 2021-07-23T17:00:42.000Z | bin/scrape_software_versions.py | nf-core/scflow | 5b128d6fdf1b35af28159bc76d1cd7432f6647e2 | [
"MIT"
] | 4 | 2021-04-22T14:54:45.000Z | 2021-08-30T13:44:29.000Z | #!/usr/bin/env python
from __future__ import print_function
import os
results = {}
version_files = [x for x in os.listdir(".") if x.endswith(".version.txt")]
for version_file in version_files:
software = version_file.replace(".version.txt", "")
if software == "pipeline":
software = "nf-core/scflow"
with open(version_file) as fin:
version = fin.read().strip()
results[software] = version
# Dump to YAML
print(
"""
id: 'software_versions'
section_name: 'nf-core/scflow Software Versions'
section_href: 'https://github.com/nf-core/scflow'
plot_type: 'html'
description: 'are collected at run time from the software output.'
data: |
<dl class="dl-horizontal">
"""
)
for k, v in sorted(results.items()):
print(" <dt>{}</dt><dd><samp>{}</samp></dd>".format(k, v))
print(" </dl>")
# Write out as tsv file:
with open("software_versions.tsv", "w") as f:
for k, v in sorted(results.items()):
f.write("{}\t{}\n".format(k, v))
| 26.702703 | 74 | 0.644737 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.