hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ace6682e4618491045072c6d1f46935e508f022c | 7,398 | py | Python | custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py | Alfiegerner/hacs_waste_collection_schedule | 32323cda498cb3cabe692dd1bc255ba8637dc88d | [
"MIT"
] | null | null | null | custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py | Alfiegerner/hacs_waste_collection_schedule | 32323cda498cb3cabe692dd1bc255ba8637dc88d | [
"MIT"
] | null | null | null | custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py | Alfiegerner/hacs_waste_collection_schedule | 32323cda498cb3cabe692dd1bc255ba8637dc88d | [
"MIT"
] | null | null | null | import datetime
import logging
from pathlib import Path
import requests
from waste_collection_schedule import Collection # type: ignore[attr-defined]
from waste_collection_schedule.service.ICS import ICS
TITLE = "ICS"
DESCRIPTION = "Source for ICS based schedules."
URL = None
TEST_CASES = {
"Dortmund, Dudenstr. 5": {
"url": "https://www.edg.de/ical/kalender.ics?Strasse=Dudenstr.&Hausnummer=5&Erinnerung=-1&Abfallart=1,2,3,4"
},
"Leipzig, Sandgrubenweg 27": {
"url": "https://stadtreinigung-leipzig.de/wir-kommen-zu-ihnen/abfallkalender/ical.ics?position_nos=38296&name=Sandgrubenweg%2027"
},
"Ludwigsburg": {
"url": "https://www.avl-ludwigsburg.de/fileadmin/Files/Abfallkalender/ICS/Privat/Privat_{%Y}_Ossweil.ics"
},
"Esslingen, Bahnhof": {
"url": "https://api.abfall.io/?kh=DaA02103019b46345f1998698563DaAd&t=ics&s=1a862df26f6943997cef90233877a4fe"
},
"Test File": {
# Path is used here to allow to call the Source from any location.
# This is not required in a yaml configuration!
"file": Path(__file__)
.resolve()
.parents[1]
.joinpath("test/test.ics")
},
"Test File (recurring)": {
# Path is used here to allow to call the Source from any location.
# This is not required in a yaml configuration!
"file": Path(__file__)
.resolve()
.parents[1]
.joinpath("test/recurring.ics")
},
"München, Bahnstr. 11": {
"url": "https://www.awm-muenchen.de/entsorgen/abfuhrkalender?tx_awmabfuhrkalender_abfuhrkalender%5Bhausnummer%5D=11&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BB%5D=1%2F2%3BU&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BP%5D=1%2F2%3BG&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BR%5D=001%3BU&tx_awmabfuhrkalender_abfuhrkalender%5Bsection%5D=ics&tx_awmabfuhrkalender_abfuhrkalender%5Bsinglestandplatz%5D=false&tx_awmabfuhrkalender_abfuhrkalender%5Bstandplatzwahl%5D=true&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Bbio%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Bpapier%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Brestmuell%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstrasse%5D=bahnstr.&tx_awmabfuhrkalender_abfuhrkalender%5Byear%5D=2021&cHash=f7364ba8bdb286cd823297ae66e24181"
},
"Buxtehude, Am Berg": {
"url": "https://abfall.landkreis-stade.de/api_v2/collection_dates/1/ort/10/strasse/90/hausnummern/1/abfallarten/R02-R04-B02-D04-D12-P04-R12-R14-W0-R22-R24-R31/kalender.ics"
},
# "Hausmüllinfo: ASR Chemnitz": {
# "url": "https://asc.hausmuell.info/ics/ics.php",
# "method": "POST",
# "params": {
# "hidden_id_egebiet": 439087,
# "input_ort": "Chemnitz",
# "input_str": "Straße der Nationen",
# "input_hnr": 2,
# "hidden_send_btn": "ics",
# # "hiddenYear": 2021,
# "hidden_id_ort": 10,
# "hidden_id_ortsteil": 0,
# "hidden_id_str": 17814,
# "hidden_id_hnr": 5538100,
# "hidden_kalenderart": "privat",
# "showBinsBio": "on",
# "showBinsRest": "on",
# "showBinsRest_rc": "on",
# "showBinsPapier": "on",
# "showBinsOrganic": "on",
# "showBinsXmas": "on",
# "showBinsDsd": "on",
# "showBinsProb": "on",
# },
# "year_field": "hiddenYear",
# },
"Abfall Zollernalbkreis, Ebingen": {
"url": "https://www.abfallkalender-zak.de",
"params": {
"city": "2,3,4",
"street": "3",
"types[]": [
"restmuell",
"gelbersack",
"papiertonne",
"biomuell",
"gruenabfall",
"schadstoffsammlung",
"altpapiersammlung",
"schrottsammlung",
"weihnachtsbaeume",
"elektrosammlung",
],
"go_ics": "Download",
},
"year_field": "year",
},
}
HEADERS = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"}
_LOGGER = logging.getLogger(__name__)
class Source:
def __init__(
self,
url=None,
file=None,
offset=None,
params=None,
year_field=None,
method="GET",
):
self._url = url
self._file = file
if bool(self._url is not None) == bool(self._file is not None):
raise RuntimeError("Specify either url or file")
self._ics = ICS(offset)
self._params = params
self._year_field = year_field # replace this field in params with current year
self._method = method # The method to send the params
def fetch(self):
if self._url is not None:
if "{%Y}" in self._url or self._year_field is not None:
# url contains wildcard or params contains year field
now = datetime.datetime.now()
# replace year in url
url = self._url.replace("{%Y}", str(now.year))
# replace year in params
if self._year_field is not None:
if self._params is None:
raise RuntimeError("year_field specified without params")
self._params[self._year_field] = str(now.year)
entries = self.fetch_url(url, self._params)
if now.month == 12:
# also get data for next year if we are already in december
url = self._url.replace("{%Y}", str(now.year + 1))
self._params[self._year_field] = str(now.year + 1)
try:
entries.extend(self.fetch_url(url), self._params)
except Exception:
# ignore if fetch for next year fails
pass
return entries
else:
return self.fetch_url(self._url, self._params)
elif self._file is not None:
return self.fetch_file(self._file)
def fetch_url(self, url, params=None):
# get ics file
if self._method == "GET":
r = requests.get(url, params=params, headers=HEADERS)
elif self._method == "POST":
r = requests.post(url, data=params, headers=HEADERS)
else:
_LOGGER.error(
"Error: unknown method to fetch URL, use GET or POST; got %s"
% self._method
)
return "error"
r.encoding = "utf-8" # requests doesn't guess the encoding correctly
# check the return code
if not r.ok:
_LOGGER.error(
"Error: the response is not ok; need code 200, but got code %s"
% r.status_code
)
return "error"
return self._convert(r.text)
def fetch_file(self, file):
f = open(file)
return self._convert(f.read())
def _convert(self, data):
dates = self._ics.convert(data)
entries = []
for d in dates:
entries.append(Collection(d[0], d[1]))
return entries
| 39.351064 | 886 | 0.574885 |
ace66894d8955a515c82cd30d1c4c958d7fbe83b | 12,647 | py | Python | dfwinreg/fake.py | ict/dfwinreg | 275a1e3108b437841e9d442fd696ccec1bb0a60b | [
"Apache-2.0"
] | null | null | null | dfwinreg/fake.py | ict/dfwinreg | 275a1e3108b437841e9d442fd696ccec1bb0a60b | [
"Apache-2.0"
] | null | null | null | dfwinreg/fake.py | ict/dfwinreg | 275a1e3108b437841e9d442fd696ccec1bb0a60b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Fake Windows Registry objects implementation."""
from __future__ import unicode_literals
import collections
import os
from dfdatetime import filetime as dfdatetime_filetime
from dfdatetime import semantic_time as dfdatetime_semantic_time
from dtfabric.runtime import fabric as dtfabric_fabric
from dfwinreg import definitions
from dfwinreg import errors
from dfwinreg import interface
from dfwinreg import key_paths
class FakeWinRegistryFile(interface.WinRegistryFile):
"""Fake implementation of a Windows Registry file."""
def __init__(self, ascii_codepage='cp1252', key_path_prefix=''):
"""Initializes a Windows Registry file.
Args:
ascii_codepage (str): ASCII string codepage.
key_path_prefix (str): Windows Registry key path prefix.
"""
super(FakeWinRegistryFile, self).__init__(
ascii_codepage=ascii_codepage, key_path_prefix=key_path_prefix)
self._root_key = None
def AddKeyByPath(self, key_path, registry_key):
"""Adds a Windows Registry key for a specific key path.
Args:
key_path (str): Windows Registry key path to add the key.
registry_key (WinRegistryKey): Windows Registry key.
Raises:
KeyError: if the subkey already exists.
ValueError: if the Windows Registry key cannot be added.
"""
if not key_path.startswith(definitions.KEY_PATH_SEPARATOR):
raise ValueError('Key path does not start with: {0:s}'.format(
definitions.KEY_PATH_SEPARATOR))
if not self._root_key:
self._root_key = FakeWinRegistryKey(self._key_path_prefix)
path_segments = key_paths.SplitKeyPath(key_path)
parent_key = self._root_key
for path_segment in path_segments:
try:
subkey = FakeWinRegistryKey(path_segment)
parent_key.AddSubkey(subkey)
except KeyError:
subkey = parent_key.GetSubkeyByName(path_segment)
parent_key = subkey
parent_key.AddSubkey(registry_key)
def Close(self):
"""Closes the Windows Registry file."""
return
def GetKeyByPath(self, key_path):
"""Retrieves the key for a specific path.
Args:
key_path (str): Windows Registry key path.
Returns:
WinRegistryKey: Windows Registry key or None if not available.
"""
key_path_upper = key_path.upper()
if key_path_upper.startswith(self._key_path_prefix_upper):
relative_key_path = key_path[self._key_path_prefix_length:]
elif key_path.startswith(definitions.KEY_PATH_SEPARATOR):
relative_key_path = key_path
key_path = ''.join([self._key_path_prefix, key_path])
else:
return None
path_segments = key_paths.SplitKeyPath(relative_key_path)
registry_key = self._root_key
if not registry_key:
return None
for path_segment in path_segments:
registry_key = registry_key.GetSubkeyByName(path_segment)
if not registry_key:
return None
return registry_key
def GetRootKey(self):
"""Retrieves the root key.
Returns:
WinRegistryKey: Windows Registry key or None if not available.
"""
return self._root_key
def Open(self, file_object):
"""Opens the Windows Registry file using a file-like object.
Args:
file_object (file): file-like object.
Returns:
bool: True if successful or False if not.
"""
return True
class FakeWinRegistryKey(interface.WinRegistryKey):
"""Fake implementation of a Windows Registry key."""
def __init__(
self, name, class_name=None, key_path='', last_written_time=None,
offset=None, subkeys=None, values=None):
"""Initializes a Windows Registry key.
Subkeys and values with duplicate names are silently ignored.
Args:
name (str): name of the Windows Registry key.
key_path (Optional[str]): Windows Registry key path.
class_name (Optional[str]): class name of the Windows Registry key.
last_written_time (Optional[int]): last written time, formatted as
a FILETIME timestamp.
offset (Optional[int]): offset of the key within the Windows Registry
file.
subkeys (Optional[list[FakeWinRegistryKey]]): list of subkeys.
values (Optional[list[FakeWinRegistryValue]]): list of values.
"""
super(FakeWinRegistryKey, self).__init__(key_path=key_path)
self._class_name = class_name
self._last_written_time = last_written_time
self._name = name
self._offset = offset
self._subkeys = collections.OrderedDict()
self._values = collections.OrderedDict()
self._BuildKeyHierarchy(subkeys, values)
@property
def class_name(self):
"""str: class name of the key or None if not available."""
return self._class_name
@property
def last_written_time(self):
"""dfdatetime.DateTimeValues: last written time."""
if self._last_written_time is None:
return dfdatetime_semantic_time.SemanticTime('Not set')
return dfdatetime_filetime.Filetime(timestamp=self._last_written_time)
@property
def name(self):
"""str: name of the key."""
return self._name
@property
def number_of_subkeys(self):
"""int: number of subkeys within the key."""
return len(self._subkeys)
@property
def number_of_values(self):
"""int: number of values within the key."""
return len(self._values)
@property
def offset(self):
"""int: offset of the key within the Windows Registry file or None."""
return self._offset
def _BuildKeyHierarchy(self, subkeys, values):
"""Builds the Windows Registry key hierarchy.
Args:
subkeys (list[FakeWinRegistryKey]): list of subkeys.
values (list[FakeWinRegistryValue]): list of values.
"""
if subkeys:
for registry_key in subkeys:
name = registry_key.name.upper()
if name in self._subkeys:
continue
self._subkeys[name] = registry_key
# pylint: disable=protected-access
registry_key._key_path = key_paths.JoinKeyPath([
self._key_path, registry_key.name])
if values:
for registry_value in values:
name = registry_value.name.upper()
if name in self._values:
continue
self._values[name] = registry_value
def AddSubkey(self, registry_key):
"""Adds a subkey.
Args:
registry_key (WinRegistryKey): Windows Registry subkey.
Raises:
KeyError: if the subkey already exists.
"""
name = registry_key.name.upper()
if name in self._subkeys:
raise KeyError(
'Subkey: {0:s} already exists.'.format(registry_key.name))
self._subkeys[name] = registry_key
key_path = key_paths.JoinKeyPath([self._key_path, registry_key.name])
registry_key._key_path = key_path # pylint: disable=protected-access
def AddValue(self, registry_value):
"""Adds a value.
Args:
registry_value (WinRegistryValue): Windows Registry value.
Raises:
KeyError: if the value already exists.
"""
name = registry_value.name.upper()
if name in self._values:
raise KeyError(
'Value: {0:s} already exists.'.format(registry_value.name))
self._values[name] = registry_value
def GetSubkeyByIndex(self, index):
"""Retrieves a subkey by index.
Args:
index (int): index of the subkey.
Returns:
WinRegistryKey: Windows Registry subkey or None if not found.
Raises:
IndexError: if the index is out of bounds.
"""
subkeys = list(self._subkeys.values())
if index < 0 or index >= len(subkeys):
raise IndexError('Index out of bounds.')
return subkeys[index]
def GetSubkeyByName(self, name):
"""Retrieves a subkey by name.
Args:
name (str): name of the subkey.
Returns:
WinRegistryKey: Windows Registry subkey or None if not found.
"""
return self._subkeys.get(name.upper(), None)
def GetSubkeyByPath(self, key_path):
"""Retrieves a subkey by path.
Args:
key_path (str): path of the subkey.
Returns:
WinRegistryKey: Windows Registry subkey or None if not found.
"""
subkey = self
for path_segment in key_paths.SplitKeyPath(key_path):
subkey = subkey.GetSubkeyByName(path_segment)
if not subkey:
break
return subkey
def GetSubkeys(self):
"""Retrieves all subkeys within the key.
Returns:
generator[WinRegistryKey]: Windows Registry subkey generator.
"""
return iter(self._subkeys.values())
def GetValueByName(self, name):
"""Retrieves a value by name.
Args:
name (str): name of the value or an empty string for the default value.
Returns:
WinRegistryValue: Windows Registry value or None if not found.
"""
return self._values.get(name.upper(), None)
def GetValues(self):
"""Retrieves all values within the key.
Returns:
generator[WinRegistryValue]: Windows Registry value generator.
"""
return iter(self._values.values())
class FakeWinRegistryValue(interface.WinRegistryValue):
"""Fake implementation of a Windows Registry value."""
_DATA_TYPE_FABRIC_DEFINITION_FILE = os.path.join(
os.path.dirname(__file__), 'dtfabric.yaml')
with open(_DATA_TYPE_FABRIC_DEFINITION_FILE, 'rb') as file_object:
_DATA_TYPE_FABRIC_DEFINITION = file_object.read()
_DATA_TYPE_FABRIC = dtfabric_fabric.DataTypeFabric(
yaml_definition=_DATA_TYPE_FABRIC_DEFINITION)
_INT32_BIG_ENDIAN = _DATA_TYPE_FABRIC.CreateDataTypeMap('int32be')
_INT32_LITTLE_ENDIAN = _DATA_TYPE_FABRIC.CreateDataTypeMap('int32le')
_INT64_LITTLE_ENDIAN = _DATA_TYPE_FABRIC.CreateDataTypeMap('int64le')
def __init__(self, name, data=b'', data_type=definitions.REG_NONE, offset=0):
"""Initializes a Windows Registry value.
Args:
name (str): name of the Windows Registry value.
data (Optional[bytes]): value data.
data_type (Optional[int]): value data type.
offset (Optional[int]): offset of the value within the Windows Registry
file.
"""
super(FakeWinRegistryValue, self).__init__()
self._data = data
self._data_type = data_type
self._data_size = len(data)
self._name = name
self._offset = offset
@property
def data(self):
"""bytes: value data as a byte string."""
return self._data
@property
def data_type(self):
"""int: data type."""
return self._data_type
@property
def name(self):
"""str: name of the value."""
return self._name
@property
def offset(self):
"""int: offset of the value within the Windows Registry file."""
return self._offset
def GetDataAsObject(self):
"""Retrieves the data as an object.
Returns:
object: data as a Python type or None if not available.
Raises:
WinRegistryValueError: if the value data cannot be read.
"""
if not self._data:
return None
if self._data_type in self._STRING_VALUE_TYPES:
try:
return self._data.decode('utf-16-le')
# AttributeError is raised when self._data has no decode method.
except AttributeError as exception:
raise errors.WinRegistryValueError((
'Unsupported data type: {0!s} of value: {1!s} with error: '
'{2!s}').format(type(self._data), self._name, exception))
except UnicodeError as exception:
raise errors.WinRegistryValueError(
'Unable to decode data of value: {0!s} with error: {1!s}'.format(
self._name, exception))
elif (self._data_type == definitions.REG_DWORD and
self._data_size == 4):
return self._INT32_LITTLE_ENDIAN.MapByteStream(self._data)
elif (self._data_type == definitions.REG_DWORD_BIG_ENDIAN and
self._data_size == 4):
return self._INT32_BIG_ENDIAN.MapByteStream(self._data)
elif (self._data_type == definitions.REG_QWORD and
self._data_size == 8):
return self._INT64_LITTLE_ENDIAN.MapByteStream(self._data)
elif self._data_type == definitions.REG_MULTI_SZ:
try:
utf16_string = self._data.decode('utf-16-le')
return list(filter(None, utf16_string.split('\x00')))
# AttributeError is raised when self._data has no decode method.
except AttributeError as exception:
raise errors.WinRegistryValueError((
'Unsupported data type: {0!s} of value: {1!s} with error: '
'{2!s}').format(type(self._data), self._name, exception))
except UnicodeError as exception:
raise errors.WinRegistryValueError(
'Unable to read data from value: {0!s} with error: {1!s}'.format(
self._name, exception))
return self._data
| 29.618267 | 79 | 0.686645 |
ace668fe045eda6e33c49b406e7559a6901098a4 | 3,220 | py | Python | Anchors/Reposition top and bottom Anchors in Combining Accents.py | NaN-xyz/Glyphs-Scripts | bdacf455babc72e0801d8d8db5dc10f8e88aa37b | [
"Apache-2.0"
] | 1 | 2022-01-09T04:28:36.000Z | 2022-01-09T04:28:36.000Z | Anchors/Reposition top and bottom Anchors in Combining Accents.py | NaN-xyz/Glyphs-Scripts | bdacf455babc72e0801d8d8db5dc10f8e88aa37b | [
"Apache-2.0"
] | null | null | null | Anchors/Reposition top and bottom Anchors in Combining Accents.py | NaN-xyz/Glyphs-Scripts | bdacf455babc72e0801d8d8db5dc10f8e88aa37b | [
"Apache-2.0"
] | null | null | null | #MenuTitle: Reposition top & bottom Anchors in Combining Accents
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
On all layers in selected glyphs, repositions top/bottom anchors for stacking in all top/bottom combining marks in line with the italic angle of the respective master. Keeps the anchor's y height, only moves horizontally.
"""
from Foundation import NSPoint
import math
thisFont = Glyphs.font # frontmost font
thisFontMaster = thisFont.selectedFontMaster # active master
listOfSelectedLayers = thisFont.selectedLayers # active layers of selected glyphs
def italicize( thisPoint, italicAngle=0.0, pivotalY=0.0 ):
"""
Returns the italicized position of an NSPoint 'thisPoint'
for a given angle 'italicAngle' and the pivotal height 'pivotalY',
around which the italic slanting is executed, usually half x-height.
Usage: myPoint = italicize(myPoint,10,xHeight*0.5)
"""
x = thisPoint.x
yOffset = thisPoint.y - pivotalY # calculate vertical offset
italicAngle = math.radians( italicAngle ) # convert to radians
tangens = math.tan( italicAngle ) # math.tan needs radians
horizontalDeviance = tangens * yOffset # vertical distance from pivotal point
x += horizontalDeviance # x of point that is yOffset from pivotal point
return NSPoint( x, thisPoint.y )
def process( thisLayer ):
for anchorName in ("_top", "_bottom"):
underscoreAnchor = thisLayer.anchors[anchorName]
if underscoreAnchor:
# look for anchor without underscore (_top -> top): defaultAnchor
defaultAnchorName = anchorName[1:]
defaultAnchor = thisLayer.anchors[defaultAnchorName]
# if found, try to move it:
if defaultAnchor:
# record original position:
oldPosition = defaultAnchor.position
# determine italic angle and move the anchor accordingly:
italicAngle = thisLayer.associatedFontMaster().italicAngle
straightPosition = NSPoint( underscoreAnchor.position.x, defaultAnchor.position.y )
if italicAngle:
defaultAnchor.position = italicize( straightPosition, italicAngle, underscoreAnchor.position.y )
else:
defaultAnchor.position = straightPosition
# compare new position to original position, and report if moved:
if defaultAnchor.position != oldPosition:
print(" Moved %s on layer '%s'" % ( anchorName[1:], thisLayer.name ))
else:
# create defaultAnchor and append it
# perhaps a bad idea
pass
thisFont.disableUpdateInterface() # suppresses UI updates in Font View
selectedCombiningMarks = [ g for g in [l.parent for l in thisFont.selectedLayers] if g.category=="Mark" and (g.subCategory=="Nonspacing" or "comb.sc" in g.name) ]
if selectedCombiningMarks:
Glyphs.clearLog()
for thisMark in selectedCombiningMarks:
thisMark.beginUndo() # begin undo grouping
print("Processing %s" % thisMark.name)
for thisLayer in thisMark.layers:
process( thisLayer )
thisMark.endUndo() # end undo grouping
else:
Message(
title="No Comb Marks in Selection",
message="No combining marks selected. Select the combining marks you want to process and try again.",
OKButton="OK, got it!"
)
thisFont.enableUpdateInterface() # re-enables UI updates in Font View
| 39.268293 | 221 | 0.749689 |
ace66919f0006c201008a3f51c1923afe75e3e68 | 21,776 | py | Python | suncasa/tasks/task_importeovsa.py | wyq24/suncasa | e6ed6d8b9bd2186c4af6d0354d03af5fff9aef7a | [
"BSD-2-Clause"
] | 2 | 2018-02-12T09:34:23.000Z | 2019-07-16T18:25:12.000Z | suncasa/tasks/task_importeovsa.py | wulinhui1/suncasa-src | 1f94aaabaf6a3911fa532648ec6676a221553436 | [
"BSD-2-Clause"
] | 26 | 2016-11-09T17:11:45.000Z | 2021-08-20T13:41:50.000Z | suncasa/tasks/task_importeovsa.py | wulinhui1/suncasa-src | 1f94aaabaf6a3911fa532648ec6676a221553436 | [
"BSD-2-Clause"
] | 17 | 2016-10-27T18:35:46.000Z | 2021-08-03T05:33:57.000Z | import os
import numpy as np
import numpy.ma as ma
import scipy.constants as constants
import time
import aipy
from taskinit import tb, casalog
from split_cli import split_cli as split
from suncasa.eovsa import impteovsa as ipe
from astropy.time import Time
from eovsapy import util
# idbdir = os.getenv('EOVSAIDB')
#
# if not idbdir:
# print('Environmental variable for EOVSA idb path not defined')
# print('Use default path on pipeline')
# idbdir = '/data1/eovsa/fits/IDB/'
def udb_corr_external(filelist, udbcorr_path, use_exist_udbcorr=False):
import pickle
udbcorr_script = os.path.join(udbcorr_path, 'udbcorr_ext.py')
if os.path.exists(udbcorr_script):
os.system('rm -rf {}'.format(udbcorr_script))
udbcorr_file = os.path.join(udbcorr_path, 'udbcorr_tmp.pickle')
if use_exist_udbcorr and os.path.exists(udbcorr_file):
with open(udbcorr_file, 'rb') as sf:
filelist = pickle.load(sf)
else:
if os.path.exists(udbcorr_file):
os.system('rm -rf {}'.format(udbcorr_file))
with open(udbcorr_file, 'wb') as sf:
pickle.dump(filelist, sf)
fi = open(udbcorr_script, 'wb')
fi.write('import pickle \n')
fi.write('import pipeline_cal as pc \n')
fi.write('import sys \n')
fi.write('syspath = sys.path \n')
fi.write("sys.path = [l for l in syspath if 'casa' not in l] \n")
fi.write("with open('{}', 'rb') as sf: \n".format(udbcorr_file))
fi.write(' filelist = pickle.load(sf) \n')
fi.write('filelist_tmp = [] \n')
fi.write('for ll in filelist: \n')
fi.write(" try: \n")
fi.write(" filelist_tmp.append(pc.udb_corr(ll, outpath='{}/', calibrate=True, desat=True)) \n".format(
udbcorr_path))
fi.write(" except: \n")
fi.write(" pass \n")
fi.write('filelist = filelist_tmp \n')
fi.write("with open('{}', 'wb') as sf: \n".format(udbcorr_file))
fi.write(' pickle.dump(filelist,sf) \n')
fi.close()
udbcorr_shellscript = os.path.join(udbcorr_path, 'udbcorr_ext.csh')
if os.path.exists(udbcorr_shellscript):
os.system('rm -rf {}'.format(udbcorr_shellscript))
fi = open(udbcorr_shellscript, 'wb')
fi.write('#! /bin/tcsh -f \n')
fi.write(' \n')
# fi.write('setenv PYTHONPATH "/home/user/test_svn/python:/common/python/current:/common/python" \n')
fi.write('source /home/user/.cshrc \n')
fi.write('/common/anaconda2/bin/python {} \n'.format(udbcorr_script))
fi.close()
os.system('/bin/tcsh {}'.format(udbcorr_shellscript))
with open(udbcorr_file, 'rb') as sf:
filelist = pickle.load(sf)
if filelist == []:
raise ValueError('udb_corr failed to return any results. Please check your calibration.')
return filelist
def trange2filelist(trange=[], verbose=False):
'''This finds all solar IDB files within a timerange;
Required inputs:
trange - can be 1) a single string or Time() object in UTC: use the entire day, e.g., '2017-08-01' or Time('2017-08-01')
if just a date, find all scans withing the same date in local time.
if a complete time stamp, find the local date first (which may be different from that provided,
and return all scans within that day
2) a range of Time(), e.g., Time(['2017-08-01 00:00','2017-08-01 23:00'])
3) None -- use current date Time.now()
'''
import dump_tsys as dtsys
if trange:
if type(trange) == list or type(trange) == str:
try:
trange = Time(trange)
except:
print('trange format not recognised. Abort....')
return None
else:
print('Please give a time range. Abort....')
return None
# if type(trange) == Time:
try:
# if single Time object, the following line would report an error
nt = len(trange)
if len(trange) > 1:
# more than one value
trange = Time([trange[0], trange[-1]])
else:
# single value in a list
trange = Time(np.array([-1.0, 1.0]) * 5 / 24. / 60. + trange[0].mjd, format='mjd')
except:
trange = Time(np.array([-1.0, 1.0]) * 5 / 24. / 60. + trange.mjd, format='mjd')
t1 = trange[0].datetime
t2 = trange[1].datetime
daydelta = (t2.date() - t1.date()).days
if t1.date() != t2.date():
# End day is different than start day, so read and concatenate two fdb files
info = dtsys.rd_fdb(trange[0])
for ll in range(daydelta):
info2 = dtsys.rd_fdb(Time(trange[0].mjd + ll + 1, format='mjd'))
if info2:
for key in info.keys():
info.update({key: np.append(info[key], info2[key])})
else:
# Both start and end times are on the same day
info = dtsys.rd_fdb(trange[0])
sidx = np.where(
np.logical_and(info['SOURCEID'] == 'Sun', info['PROJECTID'] == 'NormalObserving') & np.logical_and(
info['ST_TS'].astype(np.float) >= trange[0].lv,
info['ST_TS'].astype(np.float) <= trange[
1].lv))
filelist = info['FILE'][sidx]
if verbose:
print(
'{} file found in the time range from {} to {}: '.format(len(filelist), t1.strftime('%Y-%m-%d %H:%M:%S UT'),
t2.strftime('%Y-%m-%d %H:%M:%S UT')))
idbdir = util.get_idbdir(t1.strftime('%Y-%m-%d'))
inpath = '{}/{}/'.format(idbdir, trange[0].datetime.strftime("%Y%m%d"))
filelist = [inpath + ll for ll in filelist]
return filelist
def importeovsa_iter(filelist, timebin, width, visprefix, nocreatms, modelms, doscaling, keep_nsclms, fileidx):
from taskinit import tb, casalog
filename = filelist[fileidx]
uv = aipy.miriad.UV(filename)
# try:
msname0 = list(filename.split('/')[-1])
msname = visprefix + ''.join(msname0) + '.ms'
# try:
# uv.select('antennae', 0, 1, include=True)
# uv.select('polarization', -5, -5, include=True)
if 'antlist' in uv.vartable:
ants = uv['antlist'].replace('\x00', '')
antlist = map(int, ants.split())
else:
antlist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
good_idx = np.where(uv['sfreq'] > 0)[0]
nf = len(good_idx)
npol = uv['npol']
nants = uv['nants']
source_id = uv['source'].replace('\x00', '')
sfreq = uv['sfreq'][good_idx]
sdf = uv['sdf'][good_idx]
ra, dec = uv['ra'], uv['dec']
nbl = nants * (nants - 1) / 2
bl2ord = ipe.bl_list2(nants)
npairs = nbl + nants
timesall = []
uv.rewind()
for preamble, data in uv.all():
uvw, t, (i, j) = preamble
timesall.append(t)
timesjd = np.unique(timesall)
# except:
# pass
uv.select('clear', -1, -1, include=True)
times = ipe.jd2mjds(np.asarray(timesjd))
inttime = np.median((times - np.roll(times, 1))[1:]) / 60 ## time in minutes
inttimed = inttime / (24 * 60) ## time in days
time_steps = np.round((times[-1] - times[0]) / inttime / 60).astype(np.int) + 1
# time_steps = len(timesall) / (npairs * npol)
if len(times) != time_steps:
### This is to solve the timestamp glitch in idb files.
### The timestamps are supposed to be evenly spaced
### However, some idb files may miss a few timestamps in the evenly-spaced time grid.
### The step will map the the data to the evenly-spaced time grid.
timesnew = np.linspace(times[0], times[-1], time_steps)
timesnew[np.hstack([[0], np.cumsum(np.round(np.diff(times) / 60 / inttime))]).astype(np.int)] = times
times = timesnew
durtim = int(np.round((times[-1] - times[0]) / 60 + inttime)) ## time in minutes
time0 = time.time()
flag = np.ones((npol, nf, time_steps, npairs), dtype=bool)
out = np.zeros((npol, nf, time_steps, npairs), dtype=np.complex64) # Cross-correlations
uvwarray = np.zeros((3, time_steps, npairs), dtype=np.float)
chan_band = ipe.get_band(sfreq=sfreq, sdf=sdf, date=Time(uv['time'], format='jd'))
nband = len(chan_band)
uv.rewind()
l = -1
for preamble, data in uv.all():
uvw, t, (i0, j0) = preamble
i = antlist.index(i0 + 1)
j = antlist.index(j0 + 1)
if i > j:
# Reverse order of indices
j = antlist.index(i0 + 1)
i = antlist.index(j0 + 1)
# Assumes uv['pol'] is one of -5, -6, -7, -8
k = -5 - uv['pol']
l += 1
mask0 = data.mask
data = ma.masked_array(ma.masked_invalid(data), fill_value=0.0)
try:
tidx = np.where(np.abs(timesjd - t) < inttimed)[0][0]
except:
tidx = l / (npairs * npol)
out[k, :, tidx, bl2ord[i0, j0]] = data.data
flag[k, :, tidx, bl2ord[i0, j0]] = np.logical_or(data.mask, mask0)
# if i != j:
if k == 3:
uvwarray[:, tidx, bl2ord[i0, j0]] = -uvw * constants.speed_of_light / 1e9
nrows = time_steps * npairs
if doscaling:
out2 = out.copy()
for i0 in antlist:
for j0 in antlist:
if i0 < j0:
i, j = i0 - 1, j0 - 1
out2[:, :, :, bl2ord[i, j]] = out[:, :, :, bl2ord[i, j]] / np.sqrt(
np.abs(out[:, :, :, bl2ord[i, i]]) * np.abs(out[:, :, :, bl2ord[j, j]]))
out2 = out2.reshape(npol, nf, nrows)
out2[np.isnan(out2)] = 0
out2[np.isinf(out2)] = 0
# out2 = ma.masked_array(ma.masked_invalid(out2), fill_value=0.0)
out = out.reshape(npol, nf, nrows) * 1e4
flag = flag.reshape(npol, nf, nrows)
uvwarray = uvwarray.reshape(3, nrows)
uvwarray = np.tile(uvwarray, (1, nband))
sigma = np.ones((4, nrows), dtype=np.float) + 1
sigma = np.tile(sigma, (1, nband))
casalog.post('IDB File {0} is readed in --- {1:10.2f} seconds ---'.format(filename, (time.time() - time0)))
if not nocreatms:
modelms = ipe.creatms(filename, visprefix)
os.system('mv {} {}'.format(modelms, msname))
else:
casalog.post('----------------------------------------')
casalog.post('copying standard MS to {0}'.format(msname, (time.time() - time0)))
casalog.post('----------------------------------------')
os.system("rm -fr {}".format(msname))
os.system("cp -r {} {}".format(modelms, msname))
casalog.post('Standard MS is copied to {0} in --- {1:10.2f} seconds ---'.format(msname, (time.time() - time0)))
tb.open(msname, nomodify=False)
casalog.post('----------------------------------------')
casalog.post("Updating the main table of {}".format(msname))
casalog.post('----------------------------------------')
for l, cband in enumerate(chan_band):
time1 = time.time()
nchannels = len(cband['cidx'])
for row in range(nrows):
if not doscaling or keep_nsclms:
tb.putcell('DATA', (row + l * nrows), out[:, cband['cidx'][0]:cband['cidx'][-1] + 1, row])
tb.putcell('FLAG', (row + l * nrows), flag[:, cband['cidx'][0]:cband['cidx'][-1] + 1, row])
casalog.post('---spw {0:02d} is updated in --- {1:10.2f} seconds ---'.format((l + 1), time.time() - time1))
tb.putcol('UVW', uvwarray)
tb.putcol('SIGMA', sigma)
tb.putcol('WEIGHT', 1.0 / sigma ** 2)
timearr = times
timearr = timearr.reshape(1, time_steps, 1)
timearr = np.tile(timearr, (nband, 1, npairs))
timearr = timearr.reshape(nband * npairs * time_steps)
tb.putcol('TIME', timearr)
tb.putcol('TIME_CENTROID', timearr)
scan_id = tb.getcol('SCAN_NUMBER')
scan_id *= 0
tb.putcol('SCAN_NUMBER', scan_id)
colnames = tb.colnames()
cols2rm = ["MODEL_DATA", "CORRECTED_DATA"]
for l in range(len(cols2rm)):
if cols2rm[l] in colnames:
tb.removecols(cols2rm[l])
tb.close()
casalog.post('----------------------------------------')
casalog.post("Updating the OBSERVATION table of {}".format(msname))
casalog.post('----------------------------------------')
tb.open(msname + '/OBSERVATION', nomodify=False)
tb.putcol('TIME_RANGE', np.asarray([times[0] - 0.5 * inttime, times[-1] + 0.5 * inttime]).reshape(2, 1))
tb.putcol('OBSERVER', ['EOVSA team'])
tb.close()
casalog.post('----------------------------------------')
casalog.post("Updating the POINTING table of {}".format(msname))
casalog.post('----------------------------------------')
tb.open(msname + '/POINTING', nomodify=False)
timearr = times.reshape(1, time_steps, 1)
timearr = np.tile(timearr, (nband, 1, nants))
timearr = timearr.reshape(nband * time_steps * nants)
tb.putcol('TIME', timearr)
tb.putcol('TIME_ORIGIN', timearr) # - 0.5 * delta_time)
direction = tb.getcol('DIRECTION')
direction[0, 0, :] = ra
direction[1, 0, :] = dec
tb.putcol('DIRECTION', direction)
target = tb.getcol('TARGET')
target[0, 0, :] = ra
target[1, 0, :] = dec
tb.putcol('TARGET', target)
tb.close()
casalog.post('----------------------------------------')
casalog.post("Updating the SOURCE table of {}".format(msname))
casalog.post('----------------------------------------')
tb.open(msname + '/SOURCE', nomodify=False)
radec = tb.getcol('DIRECTION')
radec[0], radec[1] = ra, dec
tb.putcol('DIRECTION', radec)
name = np.array([source_id], dtype='|S{0}'.format(len(source_id) + 1))
tb.putcol('NAME', name)
tb.close()
casalog.post('----------------------------------------')
casalog.post("Updating the DATA_DESCRIPTION table of {}".format(msname))
casalog.post('----------------------------------------')
tb.open(msname + '/DATA_DESCRIPTION/', nomodify=False)
pol_id = tb.getcol('POLARIZATION_ID')
pol_id *= 0
tb.putcol('POLARIZATION_ID', pol_id)
# spw_id = tb.getcol('SPECTRAL_WINDOW_ID')
# spw_id *= 0
# tb.putcol('SPECTRAL_WINDOW_ID', spw_id)
tb.close()
# casalog.post('----------------------------------------')
# casalog.post("Updating the POLARIZATION table of {}".format(msname))
# casalog.post('----------------------------------------')
# tb.open(msname + '/POLARIZATION/', nomodify=False)
# tb.removerows(rownrs=np.arange(1, nband, dtype=int))
# tb.close()
casalog.post('----------------------------------------')
casalog.post("Updating the FIELD table of {}".format(msname))
casalog.post('----------------------------------------')
tb.open(msname + '/FIELD/', nomodify=False)
delay_dir = tb.getcol('DELAY_DIR')
delay_dir[0], delay_dir[1] = ra, dec
tb.putcol('DELAY_DIR', delay_dir)
phase_dir = tb.getcol('PHASE_DIR')
phase_dir[0], phase_dir[1] = ra, dec
tb.putcol('PHASE_DIR', phase_dir)
reference_dir = tb.getcol('REFERENCE_DIR')
reference_dir[0], reference_dir[1] = ra, dec
tb.putcol('REFERENCE_DIR', reference_dir)
name = np.array([source_id], dtype='|S{0}'.format(len(source_id) + 1))
tb.putcol('NAME', name)
tb.close()
# FIELD: DELAY_DIR, PHASE_DIR, REFERENCE_DIR, NAME
# del out, flag, uvwarray, uv, timearr, sigma
# gc.collect() #
if doscaling:
if keep_nsclms:
msname_scl = visprefix + ''.join(msname0) + '_scl.ms'
os.system('cp -r {} {}'.format(msname, msname_scl))
else:
msname_scl = msname
tb.open(msname_scl, nomodify=False)
casalog.post('----------------------------------------')
casalog.post("Updating the main table of {}".format(msname_scl))
casalog.post('----------------------------------------')
for l, cband in enumerate(chan_band):
time1 = time.time()
for row in range(nrows):
tb.putcell('DATA', (row + l * nrows), out2[:, cband['cidx'][0]:cband['cidx'][-1] + 1, row])
casalog.post('---spw {0:02d} is updated in --- {1:10.2f} seconds ---'.format((l + 1), time.time() - time1))
tb.close()
if not (timebin == '0s' and width == 1):
msfile = msname + '.split'
if doscaling:
split(vis=msname_scl, outputvis=msname_scl + '.split', datacolumn='data', timebin=timebin, width=width,
keepflags=False)
os.system('rm -rf {}'.format(msname_scl))
msfile_scl = msname_scl + '.split'
if not (doscaling and not keep_nsclms):
split(vis=msname, outputvis=msname + '.split', datacolumn='data', timebin=timebin, width=width,
keepflags=False)
os.system('rm -rf {}'.format(msname))
else:
msfile = msname
if doscaling:
msfile_scl = msname_scl
casalog.post("finished in --- {:.1f} seconds ---".format(time.time() - time0))
if doscaling:
return [True, msfile, msfile_scl, durtim]
else:
return [True, msfile, durtim]
def importeovsa(idbfiles=None, ncpu=None, timebin=None, width=None, visprefix=None, udb_corr=True, nocreatms=None,
doconcat=None, modelms=None,
doscaling=False, keep_nsclms=False, use_exist_udbcorr=False):
casalog.origin('importeovsa')
if type(idbfiles) == Time:
filelist = trange2filelist(idbfiles)
else:
# If input type is not Time, assume that it is the list of files to read
filelist = idbfiles
if type(filelist) == str:
filelist = [filelist]
filelist_tmp = []
for ll in filelist:
if not os.path.exists(ll):
casalog.post("Warning: {} not exist.".format(ll))
else:
filelist_tmp.append(ll)
filelist = filelist_tmp
if not filelist:
casalog.post("No file in idbfiles list exists. Abort.")
return False
for idx, ll in enumerate(filelist):
if ll[-1] == '/':
filelist[idx] = ll[:-1]
if not visprefix:
visprefix = './'
else:
if os.path.exists(visprefix):
pass
else:
casalog.post("The output path {} does not exist. Abort.".format(visprefix))
return False
if not timebin:
timebin = '0s'
if not width:
width = 1
if udb_corr:
udbcorr_path = visprefix + '/tmp_UDBcorr/'
if not os.path.exists(udbcorr_path):
os.makedirs(udbcorr_path)
filelist = udb_corr_external(filelist, udbcorr_path, use_exist_udbcorr)
if not modelms:
if nocreatms:
filename = filelist[0]
modelms = ipe.creatms(filename, visprefix)
else:
if not os.path.exists(modelms):
if nocreatms:
filename = filelist[0]
modelms = ipe.creatms(filename, visprefix)
iterable = range(len(filelist))
t0 = time.time()
casalog.post('Perform importeovsa in parallel with {} CPUs...'.format(ncpu))
if ncpu == 1:
res = []
for fidx, ll in enumerate(filelist):
res.append(
importeovsa_iter(filelist, timebin, width, visprefix, nocreatms, modelms, doscaling, keep_nsclms, fidx))
if ncpu > 1:
import multiprocessing as mprocs
from functools import partial
imppart = partial(importeovsa_iter, filelist, timebin, width, visprefix, nocreatms, modelms, doscaling,
keep_nsclms)
pool = mprocs.Pool(ncpu)
res = pool.map(imppart, iterable)
pool.close()
pool.join()
# print res
t1 = time.time()
timelapse = t1 - t0
print('It took %f secs to complete' % timelapse)
# results = pd.DataFrame({'succeeded': [], 'msfile': [], 'durtim': []})
# for r in res:
# results = results.append(pd.DataFrame({'succeeded': [r[0]], 'msfile': [r[1]], 'durtim': [r[2]]}))
# try:
succeeded = []
msfile = []
durtim = []
if doscaling:
msfile_scl = []
for r in res:
succeeded.append(r[0])
msfile.append(r[1])
msfile_scl.append(r[2])
durtim.append(r[3])
results = {'succeeded': succeeded, 'msfile': msfile, 'msfile_scl': msfile_scl, 'durtim': durtim}
else:
for r in res:
succeeded.append(r[0])
msfile.append(r[1])
durtim.append(r[2])
results = {'succeeded': succeeded, 'msfile': msfile, 'durtim': durtim}
# except:
# print 'errors occurred when creating the output summary.'
if udb_corr:
os.system('rm -rf {}'.format(udbcorr_path))
if doconcat:
from suncasa.tasks import concateovsa_cli as ce
msname = os.path.basename(filelist[0])
durtim = int(np.array(results['durtim']).sum())
if doscaling:
msfiles = list(np.array(results['msfile_scl'])[np.where(np.array(results['succeeded']) == True)])
if keep_nsclms:
concatvis = visprefix + msname + '-{:d}m{}.ms'.format(durtim, '_scl')
else:
concatvis = visprefix + msname + '-{:d}m{}.ms'.format(durtim, '')
else:
msfiles = list(np.array(results['msfile'])[np.where(np.array(results['succeeded']) == True)])
concatvis = visprefix + msname + '-{:d}m{}.ms'.format(durtim, '')
ce.concateovsa(msfiles, concatvis, datacolumn='data', keep_orig_ms=True, cols2rm="model,corrected")
return concatvis
else:
msfiles = list(np.array(results['msfile'])[np.where(np.array(results['succeeded']) == True)])
return [str(m) for m in msfiles]
| 40.103131 | 127 | 0.557265 |
ace669899f7ae7606bc53d1c400f2cda90e55836 | 225 | py | Python | inspect_class.py | YannChemin/LINGRA_RS | 769d649125d55f246a3f85e84aa65c9dbb923da1 | [
"Unlicense"
] | null | null | null | inspect_class.py | YannChemin/LINGRA_RS | 769d649125d55f246a3f85e84aa65c9dbb923da1 | [
"Unlicense"
] | null | null | null | inspect_class.py | YannChemin/LINGRA_RS | 769d649125d55f246a3f85e84aa65c9dbb923da1 | [
"Unlicense"
] | null | null | null | import sys
class Foo(object):
pass
def print_classes():
current_module = sys.modules[__name__]
for key in dir(current_module):
if isinstance( getattr(current_module, key), type ):
print(key)
| 20.454545 | 60 | 0.657778 |
ace66b46937cac69bb1734acc993713c51e254c0 | 8,206 | py | Python | NQueens.py | pratiknc/NQueensHillClimbing | b70f5ef40270f5351d2c33d29a8910519b38e9ae | [
"BSD-2-Clause"
] | null | null | null | NQueens.py | pratiknc/NQueensHillClimbing | b70f5ef40270f5351d2c33d29a8910519b38e9ae | [
"BSD-2-Clause"
] | null | null | null | NQueens.py | pratiknc/NQueensHillClimbing | b70f5ef40270f5351d2c33d29a8910519b38e9ae | [
"BSD-2-Clause"
] | null | null | null | import random
import time
class FlatLocalMinimaException(Exception):
'''
Custom Exeption class. Denotes Flat Local minimum reached.
'''
def __init__(self, message, moves):
self.moves = moves
#Call the base class constructor with the parameters it needs
super().__init__(message)
class NQueens:
def random_state(self, n):
"""
This will generate a random configuration of chess board with one queen in reach row.
:param n: number of queens to place
:return: Initial Board: Randomly generated board
QueenPos: Positions at which the queens are placed.
"""
InitialBoard = [0 for i in range(0,n*n)]
QueenPos = []
for i in range (0, n*n , n):
QueenPos.append(random.randint(i, i+n-1))
for i in QueenPos:
InitialBoard[i] = 1
return InitialBoard, QueenPos
def calculate_attack_vectors(self,fromPos,n):
"""
This method will calculate positions a given queen can attack.
:param fromPos: Queen position form which to calculate attacks.
n: number of queens
:return: list of all possible attacks from queens.
"""
attack = [i for i in range(fromPos, n*n, n)]
for i in range (0,n):
LeftDiagonal = fromPos+((n-1)*i)
RightDiagonal = fromPos+((n+1)*i)
#check if end of the left diagonal is reached ??!! && check if end of the left diagonal is reached ??!!
if int ( LeftDiagonal % n ) >= 0 and int ( LeftDiagonal % n ) < ( fromPos % n ) : # and RightDiagonal< n*n
if LeftDiagonal < n*n :#and LeftDiagonal != pos:
attack.append(LeftDiagonal)
if int ( RightDiagonal % n ) <= n-1 and int ( RightDiagonal % n ) > (fromPos % n) :
if RightDiagonal< n*n :
attack.append(RightDiagonal)
return attack
def printBoard(self,board):
"""
This method will print given board as a n*n matrix.
:param board: Input Board.
"""
n = self.n
i = 0
j = n
while j <= n*n:
print(board[i : j])
i = j
j += n
print()
def calculateBoard(self,QueenPos,Board,UpdQueenPos = None):
"""
This will calculate the heuristic cost on the board, which is the number of attacks performed for any given queen configuration.
:param QueenPos: Current positions of queens, Board: Board on which to calculate heuristic
,UpdQueenPos: Optional, Will only update heuristics of these queens.
:return: QueenAttacks: Number of attacks by each queen on queens below it.
"""
if UpdQueenPos == None:
UpdQueenPos = QueenPos
queenAttackVectors = [self.calculate_attack_vectors(queen,self.n) for queen in QueenPos]
QueenAttacks = self.QueenAttacks[:]
for attack_vector in queenAttackVectors:
queen_num = attack_vector.pop(0)//self.n
attack_val = 0
for pos in attack_vector:
attack_val = Board[pos] + attack_val
QueenAttacks[queen_num] = attack_val
for pos in UpdQueenPos:
self.Heuristic_board[pos] = sum(QueenAttacks)
return QueenAttacks
def moveQueen(self):
"""
This will move the queen on calculation board to calculate the heuristic board.
This does not move the queen on the output board.
:param: N/A
"""
for queen in range(0,self.n):
for qpos in range(self.n*queen,self.n*(queen+1)):
self.Calculation_board = self.InitialBoard.copy()
QueenPos = self.QueenPos.copy()
if qpos != self.QueenPos[queen]:
QueenPos[queen] = qpos
self.Calculation_board[self.QueenPos[queen]], self.Calculation_board[qpos] = self.Calculation_board[qpos], self.Calculation_board[self.QueenPos[queen]]
self.calculateBoard(QueenPos[0:queen+1], self.Calculation_board,QueenPos[queen:queen+1])
self.Calculation_board = self.InitialBoard.copy()
def getMovePos(self,min_heuristic):
"""
This method the queen can be moved to based on minimum heuristic on the heuristic board :param minimum
heuristic selected for movement.
:return: position at which the queen should be moved, this position is
selected randomly if more than one position with same heuristic cost are available
"""
possible_moves = [i for i, x in enumerate(self.Heuristic_board) if x == min_heuristic]
possible_moves = list((set(possible_moves) - set(self.QueenPos)) ) #- set([self.prevpos])
random.seed(time.time())
try:
return random.choice(possible_moves)
except:
raise FlatLocalMinimaException('Flat Local min reached', self.getNumberOfMoves())
def HillClimbMove(self,sidemove = 0):
"""
This method performs hill climbing moves based on caldulated heuristics.
:param sidemove: Number of side ways move allowed, defaulted to 0.
:return: minimum heuristic cost which was chosen for moving the queen.
"""
min_heuristic = min(self.Heuristic_board)
if min_heuristic < self.prevMin:
movepos = self.getMovePos(min_heuristic)
queen = movepos//self.n
self.InitialBoard[self.QueenPos[queen]], self.InitialBoard[movepos] = self.InitialBoard[movepos], self.InitialBoard[self.QueenPos[queen]]
self.QueenPos[queen] = movepos
self.Calculation_board = self.InitialBoard.copy()
return min_heuristic, movepos
elif self.allowed_size_move != 0 and min_heuristic == self.prevMin:
movepos = self.getMovePos(min_heuristic)
queen = movepos//self.n
self.InitialBoard[self.QueenPos[queen]], self.InitialBoard[movepos] = self.InitialBoard[movepos], self.InitialBoard[self.QueenPos[queen]]
self.QueenPos[queen] = movepos
self.Calculation_board = self.InitialBoard.copy()
self.allowed_size_move -= 1
return min_heuristic, movepos
else:
raise FlatLocalMinimaException('Flat Local min reached', self.getNumberOfMoves())
def getNumberOfMoves(self):
"""
This method returns number of moves after problem success or failure.
:param: N/A.
:return: Number of moves performed by the hill-climbing algorithm.
"""
return self.moves
def execHillClimb(self):
"""
This method executes hill climbing algorithm.
:param: N/A.
"""
print("\nQueens Moved as below\n")
while True:
self.QueenAttacks = self.calculateBoard(self.QueenPos,self.InitialBoard)
if sum(self.QueenAttacks) == 0:
break
self.moveQueen()
self.prevMin, self.prevpos = self.HillClimbMove(self.allowed_size_move)
self.moves += 1
self.printBoard(self.InitialBoard)
def __init__(self, n, allowedsidemove = 0):
"""
This method initializes the NQueens problem, with initial board and secondary boards for calculations.
:param n: Number of queens.
allowedsidemove: Number of side moves allowed.
"""
self.n = n
self.allowed_size_move = allowedsidemove
self.side_move = True if allowedsidemove != 0 else False
self.moves = 0
self.InitialBoard, self.QueenPos = self.random_state(n)
self.Heuristic_board = [0 for i in range(0, n*n)]
self.QueenAttacks = [n for i in range(0, n)]
self.Calculation_board = self.InitialBoard.copy()
self.prevMin = n*n
self.prevpos = None
print("\nInitial Board")
self.printBoard(self.InitialBoard)
| 42.082051 | 172 | 0.598586 |
ace66b5b06a51291d2cf229fdc446d070054836a | 3,125 | py | Python | pandas/tests/indexes/test_frozen.py | umangino/pandas | c492672699110fe711b7f76ded5828ff24bce5ab | [
"BSD-3-Clause"
] | 2 | 2022-02-27T04:02:18.000Z | 2022-03-01T03:48:47.000Z | pandas/tests/indexes/test_frozen.py | umangino/pandas | c492672699110fe711b7f76ded5828ff24bce5ab | [
"BSD-3-Clause"
] | 1 | 2022-03-18T01:26:58.000Z | 2022-03-18T01:26:58.000Z | pandas/tests/indexes/test_frozen.py | umangino/pandas | c492672699110fe711b7f76ded5828ff24bce5ab | [
"BSD-3-Clause"
] | 2 | 2022-02-27T04:02:19.000Z | 2022-03-01T03:49:21.000Z | import re
import pytest
from pandas.core.indexes.frozen import FrozenList
@pytest.fixture
def lst():
return [1, 2, 3, 4, 5]
@pytest.fixture
def container(lst):
return FrozenList(lst)
@pytest.fixture
def unicode_container():
return FrozenList(["\u05d0", "\u05d1", "c"])
class TestFrozenList:
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normally would to pytest.raises
# (after the Exception kind).
mutable_regex = re.compile("does not support mutable operations")
msg = "'(_s)?re.(SRE_)?Pattern' object is not callable"
with pytest.raises(TypeError, match=msg):
mutable_regex(*args, **kwargs)
def test_no_mutable_funcs(self, container):
def setitem():
container[0] = 5
self.check_mutable_error(setitem)
def setslice():
container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del container[0]
self.check_mutable_error(delitem)
def delslice():
del container[0:3]
self.check_mutable_error(delslice)
mutable_methods = ("extend", "pop", "remove", "insert")
for meth in mutable_methods:
self.check_mutable_error(getattr(container, meth))
def test_slicing_maintains_type(self, container, lst):
result = container[1:2]
expected = lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected):
assert isinstance(result, FrozenList)
assert result == expected
def test_string_methods_dont_fail(self, container):
repr(container)
str(container)
bytes(container)
def test_tricky_container(self, unicode_container):
repr(unicode_container)
str(unicode_container)
def test_add(self, container, lst):
result = container + (1, 2, 3)
expected = FrozenList(lst + [1, 2, 3])
self.check_result(result, expected)
result = (1, 2, 3) + container
expected = FrozenList([1, 2, 3] + lst)
self.check_result(result, expected)
def test_iadd(self, container, lst):
q = r = container
q += [5]
self.check_result(q, lst + [5])
# Other shouldn't be mutated.
self.check_result(r, lst)
def test_union(self, container, lst):
result = container.union((1, 2, 3))
expected = FrozenList(lst + [1, 2, 3])
self.check_result(result, expected)
def test_difference(self, container):
result = container.difference([2])
expected = FrozenList([1, 3, 4, 5])
self.check_result(result, expected)
def test_difference_dupe(self):
result = FrozenList([1, 2, 3, 2]).difference([2])
expected = FrozenList([1, 3])
self.check_result(result, expected)
def test_tricky_container_to_bytes_raises(self, unicode_container):
# GH 26447
msg = "^'str' object cannot be interpreted as an integer$"
with pytest.raises(TypeError, match=msg):
bytes(unicode_container)
| 27.412281 | 73 | 0.624 |
ace66bad82c3c32c6a42a3def123da17a21d2d30 | 2,518 | py | Python | Array/969PancakeSorting.py | john-the-dev/leetcode | f1038a5357c841a0d3c8aca1ae1a7d0387f77545 | [
"Apache-2.0"
] | null | null | null | Array/969PancakeSorting.py | john-the-dev/leetcode | f1038a5357c841a0d3c8aca1ae1a7d0387f77545 | [
"Apache-2.0"
] | null | null | null | Array/969PancakeSorting.py | john-the-dev/leetcode | f1038a5357c841a0d3c8aca1ae1a7d0387f77545 | [
"Apache-2.0"
] | null | null | null | # 969. Pancake Sorting
'''
Given an array of integers arr, sort the array by performing a series of pancake flips.
In one pancake flip we do the following steps:
Choose an integer k where 1 <= k <= arr.length.
Reverse the sub-array arr[1...k].
For example, if arr = [3,2,1,4] and we performed a pancake flip choosing k = 3, we reverse the sub-array [3,2,1], so arr = [1,2,3,4] after the pancake flip at k = 3.
Return the k-values corresponding to a sequence of pancake flips that sort arr. Any valid answer that sorts the array within 10 * arr.length flips will be judged as correct.
Example 1:
Input: arr = [3,2,4,1]
Output: [4,2,4,3]
Explanation:
We perform 4 pancake flips, with k values 4, 2, 4, and 3.
Starting state: arr = [3, 2, 4, 1]
After 1st flip (k = 4): arr = [1, 4, 2, 3]
After 2nd flip (k = 2): arr = [4, 1, 2, 3]
After 3rd flip (k = 4): arr = [3, 2, 1, 4]
After 4th flip (k = 3): arr = [1, 2, 3, 4], which is sorted.
Notice that we return an array of the chosen k values of the pancake flips.
Example 2:
Input: arr = [1,2,3]
Output: []
Explanation: The input is already sorted, so there is no need to flip anything.
Note that other answers, such as [3, 3], would also be accepted.
Constraints:
1 <= arr.length <= 100
1 <= arr[i] <= arr.length
All integers in arr are unique (i.e. arr is a permutation of the integers from 1 to arr.length).
'''
from common import *
from bisect import bisect_left
class Solution:
'''
Build solution up until i based on solution up until i-1.
O(n^2) runtime, O(1) storage.
Beat 62% runtime, 99.9% storage of all Leetcode submissions.
Note trying to achieve O(nlog(n)) runtime is not practical.
'''
def pancakeSort(self, arr: List[int]) -> List[int]:
i,n,out = 1,len(arr),[]
def reverse(k):
nonlocal arr,out
if k > 1: out.append(k)
j,k = 0,k-1
while j < k:
arr[j],arr[k] = arr[k],arr[j]
j += 1
k -= 1
while i < n:
if arr[i] >= arr[i-1]:
i += 1
continue
j = bisect_left(arr,arr[i],0,i)
if j == 0:
reverse(i)
reverse(i+1)
else:
reverse(i+1)
reverse(i-j+1)
reverse(i-j)
reverse(i+1)
i += 1
return out
# Tests.
assert(Solution().pancakeSort([3,2,4,1]) == [2,3,4])
assert(Solution().pancakeSort([1,2,3]) == [])
| 31.873418 | 173 | 0.575854 |
ace66bb5e5137d33b80ab90b3e952c496403ab32 | 42,563 | py | Python | src/azure-cli/azure/cli/command_modules/rdbms/custom.py | ZengTaoxu/azure-cli | 6be96de450da5ac9f07aafb22dd69880bea04792 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/rdbms/custom.py | ZengTaoxu/azure-cli | 6be96de450da5ac9f07aafb22dd69880bea04792 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/rdbms/custom.py | ZengTaoxu/azure-cli | 6be96de450da5ac9f07aafb22dd69880bea04792 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=unused-argument, line-too-long
from datetime import datetime, timedelta
from importlib import import_module
import re
from dateutil.tz import tzutc # pylint: disable=import-error
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import resource_id, is_valid_resource_id, parse_resource_id # pylint: disable=import-error
from knack.log import get_logger
from knack.util import todict
from urllib.request import urlretrieve
from azure.core.exceptions import ResourceNotFoundError
from azure.cli.core._profile import Profile
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.core.util import CLIError, sdk_no_wait
from azure.cli.core.local_context import ALL
from azure.mgmt.rdbms import postgresql, mysql, mariadb
from azure.mgmt.rdbms.mysql.operations._servers_operations import ServersOperations as MySqlServersOperations
from azure.mgmt.rdbms.postgresql.operations._location_based_performance_tier_operations import LocationBasedPerformanceTierOperations as PostgreSQLLocationOperations
from azure.mgmt.rdbms.mariadb.operations._servers_operations import ServersOperations as MariaDBServersOperations
from azure.mgmt.rdbms.mariadb.operations._location_based_performance_tier_operations import LocationBasedPerformanceTierOperations as MariaDBLocationOperations
from ._client_factory import get_mariadb_management_client, get_mysql_management_client, cf_mysql_db, cf_mariadb_db, \
get_postgresql_management_client, cf_postgres_check_resource_availability_sterling, \
cf_mysql_check_resource_availability_sterling, cf_mariadb_check_resource_availability_sterling
from ._flexible_server_util import generate_missing_parameters, generate_password, resolve_poller
from ._util import parse_public_network_access_input, create_firewall_rule
logger = get_logger(__name__)
SKU_TIER_MAP = {'Basic': 'b', 'GeneralPurpose': 'gp', 'MemoryOptimized': 'mo'}
DEFAULT_DB_NAME = 'defaultdb'
# pylint: disable=too-many-locals, too-many-statements, raise-missing-from
def _server_create(cmd, client, resource_group_name=None, server_name=None, sku_name=None, no_wait=False,
location=None, administrator_login=None, administrator_login_password=None, backup_retention=None,
geo_redundant_backup=None, ssl_enforcement=None, storage_mb=None, tags=None, version=None, auto_grow='Enabled',
assign_identity=False, public_network_access=None, infrastructure_encryption=None, minimal_tls_version=None):
provider = 'Microsoft.DBforPostgreSQL'
if isinstance(client, MySqlServersOperations):
provider = 'Microsoft.DBforMySQL'
elif isinstance(client, MariaDBServersOperations):
provider = 'Microsoft.DBforMariaDB'
server_result = firewall_id = None
administrator_login_password = generate_password(administrator_login_password)
engine_name = 'postgres'
pricing_link = 'https://aka.ms/postgres-pricing'
start_ip = end_ip = ''
if public_network_access is not None and str(public_network_access).lower() != 'enabled' and str(public_network_access).lower() != 'disabled':
if str(public_network_access).lower() == 'all':
start_ip, end_ip = '0.0.0.0', '255.255.255.255'
else:
start_ip, end_ip = parse_public_network_access_input(public_network_access)
# if anything but 'disabled' is passed on to the args,
# then the public network access value passed on to the API is Enabled.
public_network_access = 'Enabled'
# Check availability for server name if it is supplied by the user
if provider == 'Microsoft.DBforPostgreSQL':
# Populate desired parameters
location, resource_group_name, server_name = generate_missing_parameters(cmd, location, resource_group_name,
server_name, engine_name)
check_name_client = cf_postgres_check_resource_availability_sterling(cmd.cli_ctx, None)
name_availability_resquest = postgresql.models.NameAvailabilityRequest(name=server_name, type="Microsoft.DBforPostgreSQL/servers")
check_server_name_availability(check_name_client, name_availability_resquest)
logger.warning('Creating %s Server \'%s\' in group \'%s\'...', engine_name, server_name, resource_group_name)
logger.warning('Your server \'%s\' is using sku \'%s\' (Paid Tier). '
'Please refer to %s for pricing details', server_name, sku_name, pricing_link)
parameters = postgresql.models.ServerForCreate(
sku=postgresql.models.Sku(name=sku_name),
properties=postgresql.models.ServerPropertiesForDefaultCreate(
administrator_login=administrator_login,
administrator_login_password=administrator_login_password,
version=version,
ssl_enforcement=ssl_enforcement,
minimal_tls_version=minimal_tls_version,
public_network_access=public_network_access,
infrastructure_encryption=infrastructure_encryption,
storage_profile=postgresql.models.StorageProfile(
backup_retention_days=backup_retention,
geo_redundant_backup=geo_redundant_backup,
storage_mb=storage_mb,
storage_autogrow=auto_grow)),
location=location,
tags=tags)
if assign_identity:
parameters.identity = postgresql.models.ResourceIdentity(
type=postgresql.models.IdentityType.system_assigned.value)
elif provider == 'Microsoft.DBforMySQL':
engine_name = 'mysql'
pricing_link = 'https://aka.ms/mysql-pricing'
location, resource_group_name, server_name = generate_missing_parameters(cmd, location, resource_group_name,
server_name, engine_name)
check_name_client = cf_mysql_check_resource_availability_sterling(cmd.cli_ctx, None)
name_availability_resquest = mysql.models.NameAvailabilityRequest(name=server_name, type="Microsoft.DBforMySQL/servers")
check_server_name_availability(check_name_client, name_availability_resquest)
logger.warning('Creating %s Server \'%s\' in group \'%s\'...', engine_name, server_name, resource_group_name)
logger.warning('Your server \'%s\' is using sku \'%s\' (Paid Tier). '
'Please refer to %s for pricing details', server_name, sku_name, pricing_link)
parameters = mysql.models.ServerForCreate(
sku=mysql.models.Sku(name=sku_name),
properties=mysql.models.ServerPropertiesForDefaultCreate(
administrator_login=administrator_login,
administrator_login_password=administrator_login_password,
version=version,
ssl_enforcement=ssl_enforcement,
minimal_tls_version=minimal_tls_version,
public_network_access=public_network_access,
infrastructure_encryption=infrastructure_encryption,
storage_profile=mysql.models.StorageProfile(
backup_retention_days=backup_retention,
geo_redundant_backup=geo_redundant_backup,
storage_mb=storage_mb,
storage_autogrow=auto_grow)),
location=location,
tags=tags)
if assign_identity:
parameters.identity = mysql.models.ResourceIdentity(type=mysql.models.IdentityType.system_assigned.value)
elif provider == 'Microsoft.DBforMariaDB':
engine_name = 'mariadb'
pricing_link = 'https://aka.ms/mariadb-pricing'
location, resource_group_name, server_name = generate_missing_parameters(cmd, location, resource_group_name,
server_name, engine_name)
check_name_client = cf_mariadb_check_resource_availability_sterling(cmd.cli_ctx, None)
name_availability_resquest = mariadb.models.NameAvailabilityRequest(name=server_name, type="Microsoft.DBforMariaDB")
check_server_name_availability(check_name_client, name_availability_resquest)
logger.warning('Creating %s Server \'%s\' in group \'%s\'...', engine_name, server_name, resource_group_name)
logger.warning('Your server \'%s\' is using sku \'%s\' (Paid Tier). '
'Please refer to %s for pricing details', server_name, sku_name, pricing_link)
parameters = mariadb.models.ServerForCreate(
sku=mariadb.models.Sku(name=sku_name),
properties=mariadb.models.ServerPropertiesForDefaultCreate(
administrator_login=administrator_login,
administrator_login_password=administrator_login_password,
version=version,
ssl_enforcement=ssl_enforcement,
minimal_tls_version=minimal_tls_version,
public_network_access=public_network_access,
storage_profile=mariadb.models.StorageProfile(
backup_retention_days=backup_retention,
geo_redundant_backup=geo_redundant_backup,
storage_mb=storage_mb,
storage_autogrow=auto_grow)),
location=location,
tags=tags)
server_result = resolve_poller(
client.begin_create(resource_group_name, server_name, parameters), cmd.cli_ctx,
'{} Server Create'.format(engine_name))
user = server_result.administrator_login
version = server_result.version
host = server_result.fully_qualified_domain_name
# Adding firewall rule
if public_network_access is not None and start_ip != '':
firewall_id = create_firewall_rule(cmd, resource_group_name, server_name, start_ip, end_ip, engine_name)
logger.warning('Make a note of your password. If you forget, you would have to '
'reset your password with \'az %s server update -n %s -g %s -p <new-password>\'.',
engine_name, server_name, resource_group_name)
update_local_contexts(cmd, provider, server_name, resource_group_name, location, user)
if engine_name == 'postgres':
return form_response(server_result, administrator_login_password if administrator_login_password is not None else '*****',
host=host,
connection_string=create_postgresql_connection_string(server_name, host, user, administrator_login_password),
database_name=None, firewall_id=firewall_id)
# Serves both - MySQL and MariaDB
# Create mysql database if it does not exist
database_name = DEFAULT_DB_NAME
create_database(cmd, resource_group_name, server_name, database_name, engine_name)
return form_response(server_result, administrator_login_password if administrator_login_password is not None else '*****',
host=host,
connection_string=create_mysql_connection_string(server_name, host, database_name, user, administrator_login_password),
database_name=database_name, firewall_id=firewall_id)
# Need to replace source server name with source server id, so customer server restore function
# The parameter list should be the same as that in factory to use the ParametersContext
# arguments and validators
def _server_restore(cmd, client, resource_group_name, server_name, source_server, restore_point_in_time, no_wait=False):
provider = 'Microsoft.DBforPostgreSQL'
if isinstance(client, MySqlServersOperations):
provider = 'Microsoft.DBforMySQL'
elif isinstance(client, MariaDBServersOperations):
provider = 'Microsoft.DBforMariaDB'
parameters = None
if not is_valid_resource_id(source_server):
if len(source_server.split('/')) == 1:
source_server = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace=provider,
type='servers',
name=source_server)
else:
raise ValueError('The provided source-server {} is invalid.'.format(source_server))
if provider == 'Microsoft.DBforMySQL':
parameters = mysql.models.ServerForCreate(
properties=mysql.models.ServerPropertiesForRestore(
source_server_id=source_server,
restore_point_in_time=restore_point_in_time),
location=None)
elif provider == 'Microsoft.DBforPostgreSQL':
parameters = postgresql.models.ServerForCreate(
properties=postgresql.models.ServerPropertiesForRestore(
source_server_id=source_server,
restore_point_in_time=restore_point_in_time),
location=None)
elif provider == 'Microsoft.DBforMariaDB':
parameters = mariadb.models.ServerForCreate(
properties=mariadb.models.ServerPropertiesForRestore(
source_server_id=source_server,
restore_point_in_time=restore_point_in_time),
location=None)
parameters.properties.source_server_id = source_server
parameters.properties.restore_point_in_time = restore_point_in_time
# Here is a workaround that we don't support cross-region restore currently,
# so the location must be set as the same as source server (not the resource group)
id_parts = parse_resource_id(source_server)
try:
source_server_object = client.get(id_parts['resource_group'], id_parts['name'])
parameters.location = source_server_object.location
except Exception as e:
raise ValueError('Unable to get source server: {}.'.format(str(e)))
return sdk_no_wait(no_wait, client.begin_create, resource_group_name, server_name, parameters)
# need to replace source server name with source server id, so customer server georestore function
# The parameter list should be the same as that in factory to use the ParametersContext
# auguments and validators
def _server_georestore(cmd, client, resource_group_name, server_name, sku_name, location, source_server,
backup_retention=None, geo_redundant_backup=None, no_wait=False, **kwargs):
provider = 'Microsoft.DBforPostgreSQL'
if isinstance(client, MySqlServersOperations):
provider = 'Microsoft.DBforMySQL'
elif isinstance(client, MariaDBServersOperations):
provider = 'Microsoft.DBforMariaDB'
parameters = None
if not is_valid_resource_id(source_server):
if len(source_server.split('/')) == 1:
source_server = resource_id(subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace=provider,
type='servers',
name=source_server)
else:
raise ValueError('The provided source-server {} is invalid.'.format(source_server))
if provider == 'Microsoft.DBforMySQL':
parameters = mysql.models.ServerForCreate(
sku=mysql.models.Sku(name=sku_name),
properties=mysql.models.ServerPropertiesForGeoRestore(
storage_profile=mysql.models.StorageProfile(
backup_retention_days=backup_retention,
geo_redundant_backup=geo_redundant_backup),
source_server_id=source_server),
location=location)
elif provider == 'Microsoft.DBforPostgreSQL':
parameters = postgresql.models.ServerForCreate(
sku=postgresql.models.Sku(name=sku_name),
properties=postgresql.models.ServerPropertiesForGeoRestore(
storage_profile=postgresql.models.StorageProfile(
backup_retention_days=backup_retention,
geo_redundant_backup=geo_redundant_backup),
source_server_id=source_server),
location=location)
elif provider == 'Microsoft.DBforMariaDB':
parameters = mariadb.models.ServerForCreate(
sku=mariadb.models.Sku(name=sku_name),
properties=mariadb.models.ServerPropertiesForGeoRestore(
storage_profile=mariadb.models.StorageProfile(
backup_retention_days=backup_retention,
geo_redundant_backup=geo_redundant_backup),
source_server_id=source_server),
location=location)
parameters.properties.source_server_id = source_server
source_server_id_parts = parse_resource_id(source_server)
try:
source_server_object = client.get(source_server_id_parts['resource_group'], source_server_id_parts['name'])
if parameters.sku.name is None:
parameters.sku.name = source_server_object.sku.name
except Exception as e:
raise ValueError('Unable to get source server: {}.'.format(str(e)))
return sdk_no_wait(no_wait, client.begin_create, resource_group_name, server_name, parameters)
# Custom functions for server replica, will add PostgreSQL part after backend ready in future
def _replica_create(cmd, client, resource_group_name, server_name, source_server, no_wait=False, location=None, sku_name=None, **kwargs):
provider = 'Microsoft.DBforPostgreSQL'
if isinstance(client, MySqlServersOperations):
provider = 'Microsoft.DBforMySQL'
elif isinstance(client, MariaDBServersOperations):
provider = 'Microsoft.DBforMariaDB'
# set source server id
if not is_valid_resource_id(source_server):
if len(source_server.split('/')) == 1:
source_server = resource_id(subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace=provider,
type='servers',
name=source_server)
else:
raise CLIError('The provided source-server {} is invalid.'.format(source_server))
source_server_id_parts = parse_resource_id(source_server)
try:
source_server_object = client.get(source_server_id_parts['resource_group'], source_server_id_parts['name'])
except CloudError as e:
raise CLIError('Unable to get source server: {}.'.format(str(e)))
if location is None:
location = source_server_object.location
if sku_name is None:
sku_name = source_server_object.sku.name
parameters = None
if provider == 'Microsoft.DBforMySQL':
parameters = mysql.models.ServerForCreate(
sku=mysql.models.Sku(name=sku_name),
properties=mysql.models.ServerPropertiesForReplica(source_server_id=source_server),
location=location)
elif provider == 'Microsoft.DBforPostgreSQL':
parameters = postgresql.models.ServerForCreate(
sku=postgresql.models.Sku(name=sku_name),
properties=postgresql.models.ServerPropertiesForReplica(source_server_id=source_server),
location=location)
elif provider == 'Microsoft.DBforMariaDB':
parameters = mariadb.models.ServerForCreate(
sku=mariadb.models.Sku(name=sku_name),
properties=mariadb.models.ServerPropertiesForReplica(source_server_id=source_server),
location=location)
return sdk_no_wait(no_wait, client.begin_create, resource_group_name, server_name, parameters)
def _replica_stop(client, resource_group_name, server_name):
try:
server_object = client.get(resource_group_name, server_name)
except Exception as e:
raise CLIError('Unable to get server: {}.'.format(str(e)))
if server_object.replication_role.lower() != "replica":
raise CLIError('Server {} is not a replica server.'.format(server_name))
server_module_path = server_object.__module__
module = import_module(server_module_path.replace('server', 'server_update_parameters'))
ServerUpdateParameters = getattr(module, 'ServerUpdateParameters')
params = ServerUpdateParameters(replication_role='None')
return client.begin_update(resource_group_name, server_name, params)
def _server_update_custom_func(instance,
sku_name=None,
storage_mb=None,
backup_retention=None,
administrator_login_password=None,
ssl_enforcement=None,
tags=None,
auto_grow=None,
assign_identity=False,
public_network_access=None,
minimal_tls_version=None):
server_module_path = instance.__module__
module = import_module(server_module_path.replace('server', 'server_update_parameters'))
ServerUpdateParameters = getattr(module, 'ServerUpdateParameters')
if sku_name:
instance.sku.name = sku_name
instance.sku.capacity = None
instance.sku.family = None
instance.sku.tier = None
else:
instance.sku = None
if storage_mb:
instance.storage_profile.storage_mb = storage_mb
if backup_retention:
instance.storage_profile.backup_retention_days = backup_retention
if auto_grow:
instance.storage_profile.storage_autogrow = auto_grow
params = ServerUpdateParameters(sku=instance.sku,
storage_profile=instance.storage_profile,
administrator_login_password=administrator_login_password,
version=None,
ssl_enforcement=ssl_enforcement,
tags=tags,
public_network_access=public_network_access,
minimal_tls_version=minimal_tls_version)
if assign_identity:
if server_module_path.find('postgres'):
if instance.identity is None:
instance.identity = postgresql.models.ResourceIdentity(type=postgresql.models.IdentityType.system_assigned.value)
params.identity = instance.identity
elif server_module_path.find('mysql'):
if instance.identity is None:
instance.identity = mysql.models.ResourceIdentity(type=mysql.models.IdentityType.system_assigned.value)
params.identity = instance.identity
return params
def _server_mysql_upgrade(cmd, client, resource_group_name, server_name, target_server_version):
parameters = mysql.models.ServerUpgradeParameters(
target_server_version=target_server_version
)
client.begin_upgrade(resource_group_name, server_name, parameters)
def _server_mariadb_get(cmd, resource_group_name, server_name):
client = get_mariadb_management_client(cmd.cli_ctx)
return client.servers.get(resource_group_name, server_name)
def _server_mysql_get(cmd, resource_group_name, server_name):
client = get_mysql_management_client(cmd.cli_ctx)
return client.servers.get(resource_group_name, server_name)
def _server_stop(cmd, client, resource_group_name, server_name):
logger.warning("Server will be automatically started after 7 days "
"if you do not perform a manual start operation")
return client.begin_stop(resource_group_name, server_name)
def _server_postgresql_get(cmd, resource_group_name, server_name):
client = get_postgresql_management_client(cmd.cli_ctx)
return client.servers.get(resource_group_name, server_name)
def _server_update_get(client, resource_group_name, server_name):
return client.get(resource_group_name, server_name)
def _server_update_set(client, resource_group_name, server_name, parameters):
return client.begin_update(resource_group_name, server_name, parameters)
def _server_delete(cmd, client, resource_group_name, server_name):
database_engine = 'postgres'
if isinstance(client, MySqlServersOperations):
database_engine = 'mysql'
result = client.begin_delete(resource_group_name, server_name)
if cmd.cli_ctx.local_context.is_on:
local_context_file = cmd.cli_ctx.local_context._get_local_context_file() # pylint: disable=protected-access
local_context_file.remove_option('{}'.format(database_engine), 'server_name')
return result.result()
def _get_sku_name(tier, family, capacity):
return '{}_{}_{}'.format(SKU_TIER_MAP[tier], family, str(capacity))
def _firewall_rule_create(client, resource_group_name, server_name, firewall_rule_name, start_ip_address, end_ip_address):
parameters = {'name': firewall_rule_name, 'start_ip_address': start_ip_address, 'end_ip_address': end_ip_address}
return client.begin_create_or_update(resource_group_name, server_name, firewall_rule_name, parameters)
def _firewall_rule_custom_getter(client, resource_group_name, server_name, firewall_rule_name):
return client.get(resource_group_name, server_name, firewall_rule_name)
def _firewall_rule_custom_setter(client, resource_group_name, server_name, firewall_rule_name, parameters):
return client.begin_create_or_update(
resource_group_name,
server_name,
firewall_rule_name,
parameters)
def _firewall_rule_update_custom_func(instance, start_ip_address=None, end_ip_address=None):
if start_ip_address is not None:
instance.start_ip_address = start_ip_address
if end_ip_address is not None:
instance.end_ip_address = end_ip_address
return instance
def _vnet_rule_create(client, resource_group_name, server_name, virtual_network_rule_name, virtual_network_subnet_id, ignore_missing_vnet_service_endpoint=None):
parameters = {
'name': virtual_network_rule_name,
'virtual_network_subnet_id': virtual_network_subnet_id,
'ignore_missing_vnet_service_endpoint': ignore_missing_vnet_service_endpoint
}
return client.begin_create_or_update(resource_group_name, server_name, virtual_network_rule_name, parameters)
def _custom_vnet_update_getter(client, resource_group_name, server_name, virtual_network_rule_name):
return client.get(resource_group_name, server_name, virtual_network_rule_name)
def _custom_vnet_update_setter(client, resource_group_name, server_name, virtual_network_rule_name, parameters):
return client.begin_create_or_update(
resource_group_name,
server_name,
virtual_network_rule_name,
parameters)
def _vnet_rule_update_custom_func(instance, virtual_network_subnet_id, ignore_missing_vnet_service_endpoint=None):
instance.virtual_network_subnet_id = virtual_network_subnet_id
if ignore_missing_vnet_service_endpoint is not None:
instance.ignore_missing_vnet_service_endpoint = ignore_missing_vnet_service_endpoint
return instance
def _configuration_update(client, resource_group_name, server_name, configuration_name, value=None, source=None):
parameters = {
'name': configuration_name,
'value': value,
'source': source
}
return client.begin_create_or_update(resource_group_name, server_name, configuration_name, parameters)
def _db_create(client, resource_group_name, server_name, database_name, charset=None, collation=None):
parameters = {
'name': database_name,
'charset': charset,
'collation': collation
}
return client.begin_create_or_update(resource_group_name, server_name, database_name, parameters)
# Custom functions for server logs
def _download_log_files(
client,
resource_group_name,
server_name,
file_name):
# list all files
files = client.list_by_server(resource_group_name, server_name)
for f in files:
if f.name in file_name:
urlretrieve(f.url, f.name)
def _list_log_files_with_filter(client, resource_group_name, server_name, filename_contains=None,
file_last_written=None, max_file_size=None):
# list all files
all_files = client.list_by_server(resource_group_name, server_name)
files = []
if file_last_written is None:
file_last_written = 72
time_line = datetime.utcnow().replace(tzinfo=tzutc()) - timedelta(hours=file_last_written)
for f in all_files:
if f.last_modified_time < time_line:
continue
if filename_contains is not None and re.search(filename_contains, f.name) is None:
continue
if max_file_size is not None and f.size_in_kb > max_file_size:
continue
del f.created_time
files.append(f)
return files
# Custom functions for list servers
def _server_list_custom_func(client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
# region private_endpoint
def _update_private_endpoint_connection_status(cmd, client, resource_group_name, server_name,
private_endpoint_connection_name, is_approved=True, description=None): # pylint: disable=unused-argument
private_endpoint_connection = client.get(resource_group_name=resource_group_name, server_name=server_name,
private_endpoint_connection_name=private_endpoint_connection_name)
new_status = 'Approved' if is_approved else 'Rejected'
private_link_service_connection_state = {
'status': new_status,
'description': description
}
private_endpoint_connection.private_link_service_connection_state = private_link_service_connection_state
return client.begin_create_or_update(resource_group_name=resource_group_name,
server_name=server_name,
private_endpoint_connection_name=private_endpoint_connection_name,
parameters=private_endpoint_connection)
def approve_private_endpoint_connection(cmd, client, resource_group_name, server_name, private_endpoint_connection_name,
description=None):
"""Approve a private endpoint connection request for a server."""
return _update_private_endpoint_connection_status(
cmd, client, resource_group_name, server_name, private_endpoint_connection_name, is_approved=True,
description=description)
def reject_private_endpoint_connection(cmd, client, resource_group_name, server_name, private_endpoint_connection_name,
description=None):
"""Reject a private endpoint connection request for a server."""
return _update_private_endpoint_connection_status(
cmd, client, resource_group_name, server_name, private_endpoint_connection_name, is_approved=False,
description=description)
def server_key_create(client, resource_group_name, server_name, kid):
"""Create Server Key."""
key_name = _get_server_key_name_from_uri(kid)
parameters = {
'uri': kid,
'server_key_type': "AzureKeyVault"
}
return client.begin_create_or_update(server_name, key_name, resource_group_name, parameters)
def server_key_get(client, resource_group_name, server_name, kid):
"""Get Server Key."""
key_name = _get_server_key_name_from_uri(kid)
return client.get(
resource_group_name=resource_group_name,
server_name=server_name,
key_name=key_name)
def server_key_delete(cmd, client, resource_group_name, server_name, kid):
"""Drop Server Key."""
key_name = _get_server_key_name_from_uri(kid)
return client.begin_delete(
resource_group_name=resource_group_name,
server_name=server_name,
key_name=key_name)
def _get_server_key_name_from_uri(uri):
'''
Gets the key's name to use as a server key.
The SQL server key API requires that the server key has a specific name
based on the vault, key and key version.
'''
match = re.match(r'https://(.)+\.(managedhsm.azure.net|managedhsm-preview.azure.net|vault.azure.net|vault-int.azure-int.net|vault.azure.cn|managedhsm.azure.cn|vault.usgovcloudapi.net|managedhsm.usgovcloudapi.net|vault.microsoftazure.de|managedhsm.microsoftazure.de|vault.cloudapi.eaglex.ic.gov|vault.cloudapi.microsoft.scloud)(:443)?\/keys/[^\/]+\/[0-9a-zA-Z]+$', uri)
if match is None:
raise CLIError('The provided uri is invalid. Please provide a valid Azure Key Vault key id. For example: '
'"https://YourVaultName.vault.azure.net/keys/YourKeyName/01234567890123456789012345678901" or "https://YourManagedHsmRegion.YourManagedHsmName.managedhsm.azure.net/keys/YourKeyName/01234567890123456789012345678901"')
vault = uri.split('.')[0].split('/')[-1]
key = uri.split('/')[-2]
version = uri.split('/')[-1]
return '{}_{}_{}'.format(vault, key, version)
def server_ad_admin_set(client, resource_group_name, server_name, login=None, sid=None):
'''
Sets a server's AD admin.
'''
parameters = {
'administratorType': 'ActiveDirectory',
'login': login,
'sid': sid,
'tenant_id': _get_tenant_id()
}
return client.begin_create_or_update(
server_name=server_name,
resource_group_name=resource_group_name,
properties=parameters)
def _get_tenant_id():
'''
Gets tenantId from current subscription.
'''
profile = Profile()
sub = profile.get_subscription()
return sub['tenantId']
# endregion
# region new create experience
def create_mysql_connection_string(server_name, host, database_name, user_name, password):
connection_kwargs = {
'host': host,
'dbname': database_name,
'username': user_name,
'servername': server_name,
'password': password if password is not None else '{password}'
}
return 'mysql {dbname} --host {host} --user {username}@{servername} --password={password}'.format(**connection_kwargs)
def create_database(cmd, resource_group_name, server_name, database_name, engine_name):
if engine_name == 'mysql':
# check for existing database, create if not present
database_client = cf_mysql_db(cmd.cli_ctx, None)
elif engine_name == 'mariadb':
database_client = cf_mariadb_db(cmd.cli_ctx, None)
parameters = {
'name': database_name,
'charset': 'utf8'
}
try:
database_client.get(resource_group_name, server_name, database_name)
except ResourceNotFoundError:
logger.warning('Creating %s database \'%s\'...', engine_name, database_name)
database_client.begin_create_or_update(resource_group_name, server_name, database_name, parameters)
def form_response(server_result, password, host, connection_string, database_name=None, firewall_id=None):
result = todict(server_result)
result['connectionString'] = connection_string
result['password'] = password
if firewall_id is not None:
result['firewallName'] = firewall_id
if database_name is not None:
result['databaseName'] = database_name
return result
def create_postgresql_connection_string(server_name, host, user, password):
connection_kwargs = {
'user': user,
'host': host,
'servername': server_name,
'password': password if password is not None else '{password}'
}
return 'postgres://{user}%40{servername}:{password}@{host}/postgres?sslmode=require'.format(**connection_kwargs)
def check_server_name_availability(check_name_client, parameters):
server_availability = check_name_client.execute(parameters)
if not server_availability.name_available:
raise CLIError(server_availability.message)
return True
def update_local_contexts(cmd, provider, server_name, resource_group_name, location, user):
engine = 'postgres'
if provider == 'Microsoft.DBforMySQL':
engine = 'mysql'
elif provider == 'Microsoft.DBforMariaDB':
engine = 'mariadb'
if cmd.cli_ctx.local_context.is_on:
cmd.cli_ctx.local_context.set([engine], 'server_name',
server_name) # Setting the server name in the local context
cmd.cli_ctx.local_context.set([engine], 'administrator_login',
user) # Setting the server name in the local context
cmd.cli_ctx.local_context.set([ALL], 'location',
location) # Setting the location in the local context
cmd.cli_ctx.local_context.set([ALL], 'resource_group_name', resource_group_name)
def get_connection_string(cmd, client, server_name='{server}', database_name='{database}', administrator_login='{username}', administrator_login_password='{password}'):
provider = 'MySQL'
if isinstance(client, PostgreSQLLocationOperations):
provider = 'PostgreSQL'
elif isinstance(client, MariaDBLocationOperations):
provider = 'MariaDB'
if provider == 'MySQL':
server_endpoint = cmd.cli_ctx.cloud.suffixes.mysql_server_endpoint
host = '{}{}'.format(server_name, server_endpoint)
result = {
'mysql_cmd': "mysql {database} --host {host} --user {user}@{server} --password={password}",
'ado.net': "Server={host}; Port=3306; Database={database}; Uid={user}@{server}; Pwd={password}",
'jdbc': "jdbc:mysql://{host}:3306/{database}?user={user}@{server}&password={password}",
'node.js': "var conn = mysql.createConnection({{host: '{host}', user: '{user}@{server}',"
"password: {password}, database: {database}, port: 3306}});",
'php': "$con=mysqli_init(); [mysqli_ssl_set($con, NULL, NULL, {{ca-cert filename}}, NULL, NULL);] mysqli_real_connect($con, '{host}', '{user}@{server}', '{password}', '{database}', 3306);",
'python': "cnx = mysql.connector.connect(user='{user}@{server}', password='{password}', host='{host}', "
"port=3306, database='{database}')",
'ruby': "client = Mysql2::Client.new(username: '{user}@{server}', password: '{password}', "
"database: '{database}', host: '{host}', port: 3306)"
}
connection_kwargs = {
'host': host,
'user': administrator_login,
'password': administrator_login_password if administrator_login_password is not None else '{password}',
'database': database_name,
'server': server_name
}
for k, v in result.items():
result[k] = v.format(**connection_kwargs)
if provider == 'PostgreSQL':
server_endpoint = cmd.cli_ctx.cloud.suffixes.postgresql_server_endpoint
host = '{}{}'.format(server_name, server_endpoint)
result = {
'psql_cmd': "postgresql://{user}@{server}:{password}@{host}/{database}?sslmode=require",
'C++ (libpq)': "host={host} port=5432 dbname={database} user={user}@{server} password={password} sslmode=require",
'ado.net': "Server={host};Database={database};Port=5432;User Id={user}@{server};Password={password};",
'jdbc': "jdbc:postgresql://{host}:5432/{database}?user={user}@{server}&password={password}",
'node.js': "var client = new pg.Client('postgres://{user}@{server}:{password}@{host}:5432/{database}');",
'php': "host={host} port=5432 dbname={database} user={user}@{server} password={password}",
'python': "cnx = psycopg2.connect(database='{database}', user='{user}@{server}', host='{host}', password='{password}', "
"port='5432')",
'ruby': "cnx = PG::Connection.new(:host => '{host}', :user => '{user}@{server}', :dbname => '{database}', "
":port => '5432', :password => '{password}')"
}
connection_kwargs = {
'host': host,
'user': administrator_login,
'password': administrator_login_password if administrator_login_password is not None else '{password}',
'database': database_name,
'server': server_name
}
for k, v in result.items():
result[k] = v.format(**connection_kwargs)
if provider == 'MariaDB':
server_endpoint = cmd.cli_ctx.cloud.suffixes.mariadb_server_endpoint
host = '{}{}'.format(server_name, server_endpoint)
result = {
'ado.net': "Server={host}; Port=3306; Database={database}; Uid={user}@{server}; Pwd={password}",
'jdbc': "jdbc:mariadb://{host}:3306/{database}?user={user}@{server}&password={password}",
'node.js': "var conn = mysql.createConnection({{host: '{host}', user: '{user}@{server}',"
"password: {password}, database: {database}, port: 3306}});",
'php': "$con=mysqli_init(); [mysqli_ssl_set($con, NULL, NULL, {{ca-cert filename}}, NULL, NULL);] mysqli_real_connect($con, '{host}', '{user}@{server}', '{password}', '{database}', 3306);",
'python': "cnx = mysql.connector.connect(user='{user}@{server}', password='{password}', host='{host}', "
"port=3306, database='{database}')",
'ruby': "client = Mysql2::Client.new(username: '{user}@{server}', password: '{password}', "
"database: '{database}', host: '{host}', port: 3306)"
}
connection_kwargs = {
'host': host,
'user': administrator_login,
'password': administrator_login_password if administrator_login_password is not None else '{password}',
'database': database_name,
'server': server_name
}
for k, v in result.items():
result[k] = v.format(**connection_kwargs)
return {
'connectionStrings': result
}
| 47.45039 | 372 | 0.682941 |
ace66bced3cc2ad7998e06da27d9af531c4b904e | 1,102 | py | Python | Labs/ShootingMethod/secant_method.py | jessicaleete/numerical_computing | cc71f51f35ca74d00e617af3d1a0223e19fb9a68 | [
"CC-BY-3.0"
] | 10 | 2016-10-18T19:54:25.000Z | 2021-10-09T20:12:38.000Z | Labs/ShootingMethod/secant_method.py | jessicaleete/numerical_computing | cc71f51f35ca74d00e617af3d1a0223e19fb9a68 | [
"CC-BY-3.0"
] | null | null | null | Labs/ShootingMethod/secant_method.py | jessicaleete/numerical_computing | cc71f51f35ca74d00e617af3d1a0223e19fb9a68 | [
"CC-BY-3.0"
] | 2 | 2017-05-14T16:07:59.000Z | 2020-06-20T09:05:06.000Z | import numpy as np
from scipy.integrate import odeint
from matplotlib import pyplot as plt
# y'' +4y = -9sin(x), y(0) = 1., y(3*pi/4.) = -(1.+3*sqrt(2))/2., y'(0) = -2
# Exact Solution: y(x) = cos(2x) + (1/2)sin(2x) - 3sin(x)
def find_t(f,a,b,alpha,beta,t0,t1,maxI):
sol1 = 0
i = 0
while abs(sol1-beta) > 10**-8 and i < maxI:
sol0 = odeint(f,np.array([alpha,t0]), [a,b],atol=1e-10)[1,0]
sol1 = odeint(f,np.array([alpha,t1]), [a,b],atol=1e-10)[1,0]
t2 = t1 - (sol1 - beta)*(t1-t0)/(sol1-sol0)
t0 = t1
t1 = t2
i = i+1
if i == maxI:
print "t not found"
return t2
def solveSecant(f,X,a,b,alpha,beta,t0,t1,maxI):
t = find_t(f,a,b,alpha,beta,t0,t1,maxI)
sol = odeint(f,np.array([alpha,t]), X,atol=1e-10)[:,0]
return sol
def ode(y,x):
return np.array([y[1], -4*y[0]-9*np.sin(x)])
X = np.linspace(0,3*np.pi/4,100)
Y = solveSecant(ode,X,0,3*np.pi/4,1,-(1.+3*np.sqrt(2)) /2,
(1+(1.+3*np.sqrt(2)) /2)/(-3*np.pi/4),-1,40)
plt.plot(X,Y,'-k',linewidth=2)
plt.show()
| 29 | 84 | 0.521779 |
ace66c052966f4c3e253ed00bc30c310e61b3930 | 830 | py | Python | moon/rospy/rospy_scout_round3.py | m3d/osgar_archive_2020 | 556b534e59f8aa9b6c8055e2785c8ae75a1a0a0e | [
"MIT"
] | 12 | 2017-02-16T10:22:59.000Z | 2022-03-20T05:48:06.000Z | moon/rospy/rospy_scout_round3.py | m3d/osgar_archive_2020 | 556b534e59f8aa9b6c8055e2785c8ae75a1a0a0e | [
"MIT"
] | 618 | 2016-08-30T04:46:12.000Z | 2022-03-25T16:03:10.000Z | moon/rospy/rospy_scout_round3.py | robotika/osgar | 6f4f584d5553ab62c08a1c7bb493fefdc9033173 | [
"MIT"
] | 11 | 2016-08-27T20:02:55.000Z | 2022-03-07T08:53:53.000Z | """
Proxy for ROS sensors and effectors
this is Python 2.7 code
scout enhanced with reporting detection of volatiles (task specific to Round 1 of Qualification)
"""
import sys
import rospy
from rospy_scout import RospyScout, RospyScoutReqRep, RospyScoutPushPull
from rospy_round3 import RospyRound3, RospyRound3ReqRep, RospyRound3PushPull
from srcp2_msgs.msg import Qual3ScoringMsg
from srcp2_msgs.srv import Qual3ScoreSrv
from geometry_msgs.msg import Point
class RospyScoutRound3PushPull(RospyScoutPushPull, RospyRound3PushPull):
pass
class RospyScoutRound3ReqRep(RospyRound3ReqRep, RospyScoutReqRep):
pass
class RospyScoutRound3(RospyScout, RospyRound3):
pass
if __name__ == '__main__':
rs = RospyScoutRound3()
rs.launch(RospyScoutRound3PushPull, RospyScoutRound3ReqRep, sys.argv[1:])
| 27.666667 | 98 | 0.80241 |
ace66cc89819138899214c2345f8b162295e181f | 1,127 | py | Python | lib/dojo_requests.py | tcmRyan/DojoMiner | 19ce44e737dce13e9ee4975347ec9e9fdddce1bb | [
"MIT"
] | null | null | null | lib/dojo_requests.py | tcmRyan/DojoMiner | 19ce44e737dce13e9ee4975347ec9e9fdddce1bb | [
"MIT"
] | null | null | null | lib/dojo_requests.py | tcmRyan/DojoMiner | 19ce44e737dce13e9ee4975347ec9e9fdddce1bb | [
"MIT"
] | null | null | null | import browser_cookie3
from requests_html import HTMLSession
from setting import config
SUPPORTED_BROWSERS = ['chrome', 'firefox']
class DojoRequests:
"""
Small wrapper around the requests library to make it easy to make calls to the
dojo/gdp api. DojoRequests automatically adds the cookies in for to make sure
the requests are authenticated. For documentation just view the Pythons Requests
library.
"""
def __init__(self):
self.session = HTMLSession()
if config.get('browser') == 'firefox':
self.cookies = browser_cookie3.firefox()
else:
self.cookies = browser_cookie3.chrome()
def request(self, method, url, **kwargs):
method = method.upper()
request_args = {
'url': url,
'method': method,
'cookies': self.cookies
}
request_args.update(kwargs)
return self.session.request(**request_args)
def get(self, url, **kwargs):
return self.request('GET', url, **kwargs)
def post(self, url, **kwargs):
return self.request('POST', url, **kwargs)
| 29.657895 | 85 | 0.632653 |
ace66cd02f5b0ee6f8dcff95d996afd33738d8ed | 930 | py | Python | numba/ocl/tests/oclpy/test_py2_div_issue.py | SPIRV/NUMBA | 6b93f44c923e7bf8cd9f95cc5188bba3aea4e75d | [
"BSD-2-Clause",
"MIT"
] | 4 | 2017-06-30T14:22:30.000Z | 2021-01-11T16:47:23.000Z | numba/ocl/tests/oclpy/test_py2_div_issue.py | SPIRV/NUMBA | 6b93f44c923e7bf8cd9f95cc5188bba3aea4e75d | [
"BSD-2-Clause",
"MIT"
] | 1 | 2017-12-21T23:31:59.000Z | 2017-12-29T16:56:05.000Z | numba/ocl/tests/oclpy/test_py2_div_issue.py | SPIRV/NUMBA | 6b93f44c923e7bf8cd9f95cc5188bba3aea4e75d | [
"BSD-2-Clause",
"MIT"
] | null | null | null | from __future__ import print_function, absolute_import
import numpy as np
from numba import ocl, float32, int32
from numba.ocl.testing import unittest
class TestOclPy2Div(unittest.TestCase):
def test_py2_div_issue(self):
@ocl.jit('(float32[:], float32[:], float32[:], int32)')
def preCalc(y, yA, yB, numDataPoints):
i = ocl.get_global_id(0)
k = i % numDataPoints
ans = float32(1.001 * float32(i))
y[i] = ans
yA[i] = ans * 1.0
yB[i] = ans / 1.0
numDataPoints = 15
y = np.zeros(numDataPoints, dtype=np.float32)
yA = np.zeros(numDataPoints, dtype=np.float32)
yB = np.zeros(numDataPoints, dtype=np.float32)
z = 1.0
preCalc[1, 15](y, yA, yB, numDataPoints)
self.assertTrue(np.all(y == yA))
self.assertTrue(np.all(y == yB))
if __name__ == '__main__':
unittest.main()
| 27.352941 | 63 | 0.590323 |
ace66db7c5c04f32e68047304ab1b4035d64d079 | 1,139 | py | Python | classes/UnidadeFederativa.py | mgfzemor/microdados-enem | 5be3de40d8f2fc6e7ec9555fed6e52b49f554c55 | [
"MIT"
] | null | null | null | classes/UnidadeFederativa.py | mgfzemor/microdados-enem | 5be3de40d8f2fc6e7ec9555fed6e52b49f554c55 | [
"MIT"
] | null | null | null | classes/UnidadeFederativa.py | mgfzemor/microdados-enem | 5be3de40d8f2fc6e7ec9555fed6e52b49f554c55 | [
"MIT"
] | null | null | null | __author__ = "Mario Figueiro Zemor"
__email__ = "mario.figueiro@ufgrs.br"
#import classes.Database as db; remove this comment
import logging
logger = logging.getLogger(__name__);
class UnidadeFederativa():
def __init__(self,cod,nome,sigla,codregiao):
self.cod = cod;
self.nome = nome;
self.sigla = sigla;
self.codregiao = codregiao;
@staticmethod
def getQuery(query):
queries = { 'INSERT' : """INSERT INTO (nome,sigla,codregiao) uf VALUES (%s,%s,%s)""",
'UPDATE' : """""",
'DELETE' : """ """,
'FIND_ALL' : """SELECT * FROM formatendnecessidades"""};
path = "uf - getQuery - Error:";
try:
ret = queries[query];
except KeyError as e:
ret = None;
print("{} Query '{}' not found in dict of Queries!".format(path,query));
except Exception as e:
print("{} {}".format(path,e));
return ret;
def toString(self):
string = "UnidadeFederativa[{},{},{},{}]";
return string.format(self.cod,self.nome,self.sigla,self.codregiao);
| 32.542857 | 93 | 0.556629 |
ace66dd76ab67eef3334742e4aa14fdc20f1bd05 | 19,866 | py | Python | helpers/trainer.py | f2re/FCH-TTS | 54ddee710694929d978943356fe913609ed0aab5 | [
"MIT"
] | null | null | null | helpers/trainer.py | f2re/FCH-TTS | 54ddee710694929d978943356fe913609ed0aab5 | [
"MIT"
] | null | null | null | helpers/trainer.py | f2re/FCH-TTS | 54ddee710694929d978943356fe913609ed0aab5 | [
"MIT"
] | null | null | null | """Wrapper trainer class for training our models."""
__author__ = 'Atomicoo'
import sys
import os
import os.path as osp
import time
import copy
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import ReduceLROnPlateau
from utils.optim import NoamScheduler
from helpers.logger import Logger
from utils.augment import add_random_noise, degrade_some, frame_dropout
from utils.plot import plot_alignment, plot_spectrogram
from utils.utils import get_last_chkpt_path
from datasets.data_loader import Text2MelDataLoader
from datasets.dataset import SpeechDataset
from utils.transform import MinMaxNorm, StandardNorm
from models import DurationExtractor, ParallelText2Mel
from losses import l1_masked, guided_att, masked_huber, masked_ssim, l1_dtw
class Trainer:
def __init__(self,
model=None,
dataset=None,
compute_metrics=None,
optimizers=None,
checkpoint=None,
device=None,
drivepath=None
):
# model, metrics, optim
self.model = model
self.compute_metrics = compute_metrics
self.optimizer, self.scheduler = optimizers
# dataset
self.dataset = dataset
# Google drive path
self.drivepath = drivepath
# device
self.device = device
self.model.to(self.device)
print(f'Model sent to {self.device}')
# helper vars
self.checkpoint = None
self.epoch, self.step = 0, 0
if checkpoint is not None:
self.load_checkpoint(self.checkpoint)
def to_device(self, device):
print(f'Sending network to {device}')
self.device = device
self.model.to(device)
return self
def save_checkpoint(self):
if self.checkpoint is not None:
os.remove(self.checkpoint)
self.checkpoint = osp.join(self.loggers.logdir, f'{time.strftime("%Y-%m-%d")}_chkpt_epoch{self.epoch:03d}.pth')
if self.drivepath is not None:
torch.save(
{
'epoch': self.epoch,
'step': self.step,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict()
},
osp.join(self.drivepath, f'{time.strftime("%Y-%m-%d")}_chkpt_epoch{self.epoch:03d}.pth'))
print("Saving the checkpoint file '%s'..." % self.checkpoint)
torch.save(
{
'epoch': self.epoch,
'step': self.step,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict()
},
self.checkpoint)
def load_checkpoint(self, checkpoint):
checkpoint = torch.load(checkpoint, map_location=self.device)
self.epoch = checkpoint['epoch']
self.step = checkpoint['step']
self.model.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.scheduler.load_state_dict(checkpoint['scheduler'])
print("Loaded checkpoint epoch=%d step=%d" % (self.epoch, self.step))
self.checkpoint = None # prevent overriding old checkpoint
return self
def train_dataloader(self, dataset, batch_size=32, \
num_workers=0 if sys.platform.startswith('win') else 8):
return Text2MelDataLoader(dataset, batch_size=batch_size, mode='train', num_workers=num_workers)
def valid_dataloader(self, dataset, batch_size=32, \
num_workers=0 if sys.platform.startswith('win') else 8):
return Text2MelDataLoader(dataset, batch_size=batch_size, mode='valid', num_workers=num_workers)
class DurationTrainer(Trainer):
def __init__(self,
hparams,
adam_lr=0.002,
warmup_epochs=30,
init_scale=0.25,
checkpoint=None,
device='cuda',
drivepath=None
):
self.hparams = hparams
model = DurationExtractor(hparams.duration)
dataset_root = osp.join(hparams.data.datasets_path, hparams.data.dataset_dir)
dataset = SpeechDataset(['mels', 'mlens', 'texts', 'tlens'], dataset_root, hparams.text)
compute_metrics = self.recon_losses
optimizer = torch.optim.Adam(model.parameters(), lr=adam_lr)
scheduler = NoamScheduler(optimizer, warmup_epochs, init_scale)
optimizers = (optimizer, scheduler)
super(DurationTrainer, self).__init__(
model=model,
dataset=dataset,
compute_metrics=compute_metrics,
optimizers=optimizers,
checkpoint=checkpoint,
device=device,
drivepath=None
)
def fit(self, batch_size, epochs=1, chkpt_every=10, checkpoint=None, loggers=None):
self.loggers = loggers or \
Logger(self.hparams.trainer.logdir, self.hparams.data.dataset, 'duration')
checkpoint = checkpoint or get_last_chkpt_path(self.loggers.logdir)
if checkpoint is not None:
self.load_checkpoint(checkpoint)
train_loader = self.train_dataloader(copy.deepcopy(self.dataset), batch_size=batch_size)
valid_loader = self.valid_dataloader(copy.deepcopy(self.dataset), batch_size=batch_size)
self.normalizer = MinMaxNorm(self.hparams.audio.spec_min, self.hparams.audio.spec_max)
for e in range(self.epoch + 1, self.epoch + 1 + epochs):
self.epoch = e
train_losses = self._train_epoch(train_loader)
valid_losses = self._validate(valid_loader)
self.scheduler.step()
if self.epoch % chkpt_every == 0:
# checkpoint at every 10th epoch
self.save_checkpoint()
self.loggers.log_epoch('train', self.epoch,
{'train_l1_loss': train_losses[1], 'train_ssim_loss': train_losses[2], 'train_att_loss': train_losses[3]})
self.loggers.log_epoch('valid', self.epoch,
{'valid_l1_loss': valid_losses[1], 'valid_ssim_loss': valid_losses[2], 'valid_att_loss': valid_losses[3]})
print(f'Epoch {e} | '
f'Train - loss: {train_losses[0]}, l1: {train_losses[1]}, ssim: {train_losses[2]}, att: {train_losses[3]}| '
f'Valid - loss: {valid_losses[0]}, l1: {valid_losses[1]}, ssim: {valid_losses[2]}, att: {valid_losses[3]}| ')
def _train_epoch(self, dataloader=None):
self.model.train()
ll = len(dataloader)
running_loss = 0.0
running_l1_loss = 0.0
running_ssim_loss = 0.0
running_att_loss = 0.0
pbar = tqdm(dataloader, unit="audios", unit_scale=dataloader.batch_size, \
disable=self.hparams.trainer.disable_progress_bar)
# print(pbar)
for it, batch in enumerate(pbar, start=1):
# print(batch)
self.optimizer.zero_grad()
mels, mlens, texts, tlens = \
batch['mels'], batch['mlens'].squeeze(1), batch['texts'].long(), batch['tlens'].squeeze(1)
mels, mlens, texts, tlens = \
mels.to(self.device), mlens.to(self.device), texts.to(self.device), tlens.to(self.device)
s = mels = self.normalizer(mels)
# Spectrogram augmentation
if self.hparams.duration.enable_augment:
s = add_random_noise(mels, self.hparams.duration.noise)
s = degrade_some(self.model, s, texts, tlens, \
self.hparams.duration.feed_ratio, repeat=self.hparams.duration.feed_repeat)
s = frame_dropout(s, self.hparams.duration.replace_ratio)
melspecs, attns = self.model((texts, tlens, s, True))
outputs_and_targets = (melspecs, mels, attns, mlens, tlens)
loss, l1_loss, ssim_loss, att_loss = self.compute_metrics(outputs_and_targets)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1)
self.optimizer.step()
self.step += 1
loss, l1_loss, ssim_loss, att_loss = loss.item(), l1_loss.item(), ssim_loss.item(), att_loss.item()
running_loss += loss
running_l1_loss += l1_loss
running_ssim_loss += ssim_loss
running_att_loss += att_loss
# update the progress bar
pbar.set_postfix({
'l1': "%.05f" % (running_l1_loss / it),
'ssim': "%.05f" % (running_ssim_loss / it),
'att': "%.05f" % (running_att_loss / it)
})
mels, melspecs, attns = mels.cpu().detach(), melspecs.cpu().detach(), attns.cpu().detach()
index = -1
mlen, tlen = mlens[index].item(), tlens[index].item()
mels_fig = plot_spectrogram(melspecs[index, :mlen, :],
target_spectrogram=mels[index, :mlen, :])
attn_fig = plot_alignment(attns[index, :mlen, :tlen])
self.loggers.log_step('train', self.step,
{'step_l1_loss': l1_loss, 'step_ssim_loss': ssim_loss, 'step_att_loss': att_loss},
{'melspecs': mels_fig, 'attention': attn_fig})
epoch_loss = running_loss / ll
epoch_l1_loss = running_l1_loss / ll
epoch_ssim_loss = running_ssim_loss / ll
epoch_att_loss = running_att_loss / ll
return epoch_loss, epoch_l1_loss, epoch_ssim_loss, epoch_att_loss
def _validate(self, dataloader):
self.model.eval()
ll = len(dataloader)
running_loss = 0.0
running_l1_loss = 0.0
running_ssim_loss = 0.0
running_att_loss = 0.0
pbar = tqdm(dataloader, unit="audios", unit_scale=dataloader.batch_size, \
disable=self.hparams.trainer.disable_progress_bar)
for it, batch in enumerate(pbar, start=1):
mels, mlens, texts, tlens = \
batch['mels'], batch['mlens'].squeeze(1), batch['texts'].long(), batch['tlens'].squeeze(1)
mels, mlens, texts, tlens = \
mels.to(self.device), mlens.to(self.device), texts.to(self.device), tlens.to(self.device)
mels = self.normalizer(mels)
with torch.no_grad():
melspecs, attns = self.model((texts, tlens, mels, True))
outputs_and_targets = (melspecs, mels, attns, mlens, tlens)
loss, l1_loss, ssim_loss, att_loss = self.compute_metrics(outputs_and_targets)
loss, l1_loss, ssim_loss, att_loss = loss.item(), l1_loss.item(), ssim_loss.item(), att_loss.item()
running_loss += loss
running_l1_loss += l1_loss
running_ssim_loss += ssim_loss
running_att_loss += att_loss
epoch_loss = running_loss / ll
epoch_l1_loss = running_l1_loss / ll
epoch_ssim_loss = running_ssim_loss / ll
epoch_att_loss = running_att_loss / ll
return epoch_loss, epoch_l1_loss, epoch_ssim_loss, epoch_att_loss
def recon_losses(self, outputs_and_targets):
melspecs, mels, attns, mlens, tlens = outputs_and_targets
l1_loss = l1_masked(melspecs, mels, mlens)
ssim_loss = masked_ssim(melspecs, mels, mlens)
att_loss = guided_att(attns, mlens, tlens)
loss = l1_loss + ssim_loss + att_loss
return loss, l1_loss, ssim_loss, att_loss
class ParallelTrainer(Trainer):
def __init__(self,
hparams,
adam_lr=0.002,
ground_truth=False,
checkpoint=None,
device='cuda',
drivepath=None
):
self.hparams = hparams
model = ParallelText2Mel(hparams.parallel)
dataset_root = osp.join(hparams.data.datasets_path, hparams.data.dataset_dir)
dataset = SpeechDataset(['mels-gt' if ground_truth else 'mels', 'mlens', 'texts', 'tlens', 'drns'],
dataset_root, hparams.text)
compute_metrics = self.recon_losses
optimizer = torch.optim.Adam(model.parameters(), lr=adam_lr)
scheduler = ReduceLROnPlateau(optimizer, factor=0.5, patience=3)
optimizers = (optimizer, scheduler)
super(ParallelTrainer, self).__init__(
model=model,
dataset=dataset,
compute_metrics=compute_metrics,
optimizers=optimizers,
checkpoint=checkpoint,
device=device,
drivepath=None
)
def fit(self, batch_size, epochs=1, chkpt_every=10, checkpoint=None, duration_file=None, loggers=None):
self.loggers = loggers or \
Logger(self.hparams.trainer.logdir, self.hparams.data.dataset, 'parallel')
checkpoint = checkpoint or get_last_chkpt_path(self.loggers.logdir)
if checkpoint is not None:
self.load_checkpoint(checkpoint)
if self.dataset.durans is None:
self.dataset.load_durations(duration_file)
train_loader = self.train_dataloader(copy.deepcopy(self.dataset), batch_size=batch_size)
valid_loader = self.valid_dataloader(copy.deepcopy(self.dataset), batch_size=batch_size)
self.normalizer = StandardNorm(self.hparams.audio.spec_mean, self.hparams.audio.spec_std)
for e in range(self.epoch + 1, self.epoch + 1 + epochs):
self.epoch = e
train_losses = self._train_epoch(train_loader)
valid_losses = self._validate(valid_loader)
self.scheduler.step(valid_losses[0])
if self.epoch % chkpt_every == 0:
# checkpoint at every 10th epoch
self.save_checkpoint()
self.loggers.log_epoch('train', self.epoch,
{'train_l1_loss': train_losses[1], 'train_ssim_loss': train_losses[2], 'train_drn_loss': train_losses[3]})
self.loggers.log_epoch('valid', self.epoch,
{'valid_l1_loss': valid_losses[1], 'valid_ssim_loss': valid_losses[2], 'valid_drn_loss': valid_losses[3]})
print(f'Epoch {e} | '
f'Train - loss: {train_losses[0]}, l1: {train_losses[1]}, ssim: {train_losses[2]}, drn: {train_losses[3]}| '
f'Valid - loss: {valid_losses[0]}, l1: {valid_losses[1]}, ssim: {valid_losses[2]}, drn: {valid_losses[3]}| ')
def _train_epoch(self, dataloader):
self.model.train()
running_loss = 0.0
running_l1_loss = 0.0
running_ssim_loss = 0.0
running_drn_loss = 0.0
pbar = tqdm(dataloader, unit="audios", unit_scale=dataloader.batch_size, \
disable=self.hparams.trainer.disable_progress_bar)
for it, batch in enumerate(pbar, start=1):
self.optimizer.zero_grad()
mels, mlens, texts, tlens, durations = \
batch['mels'], batch['mlens'].squeeze(1), batch['texts'].long(), batch['tlens'].squeeze(1), batch['drns'].long()
mels, mlens, texts, tlens, durations = \
mels.to(self.device), mlens.to(self.device), texts.to(self.device), tlens.to(self.device), durations.to(self.device)
mels = self.normalizer(mels)
melspecs, prd_durans = self.model((texts, tlens, durations, 1.0))
outputs_and_targets = (melspecs, mels, mlens, tlens, durations, prd_durans)
loss, l1_loss, ssim_loss, drn_loss = self.compute_metrics(outputs_and_targets)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1)
self.optimizer.step()
self.step += 1
loss, l1_loss, ssim_loss, drn_loss = \
loss.item(), l1_loss.item(), ssim_loss.item(), drn_loss.item()
running_loss += loss
running_l1_loss += l1_loss
running_ssim_loss += ssim_loss
running_drn_loss += drn_loss
# update the progress bar
pbar.set_postfix({
'l1': "%.05f" % (running_l1_loss / it),
'ssim': "%.05f" % (running_ssim_loss / it),
'drn': "%.05f" % (running_drn_loss / it)
})
mels, melspecs = mels.cpu().detach(), melspecs.cpu().detach()
index = -1
mlen, tlen = mlens[index].item(), tlens[index].item()
mels_fig = plot_spectrogram(melspecs[index, :mlen, :],
target_spectrogram=mels[index, :mlen, :])
self.loggers.log_step('train', self.step,
{'step_l1_loss': l1_loss, 'step_ssim_loss': ssim_loss, 'step_drn_loss': drn_loss},
{'melspecs': mels_fig})
epoch_loss = running_loss / it
epoch_l1_loss = running_l1_loss / it
epoch_ssim_loss = running_ssim_loss / it
epoch_drn_loss = running_drn_loss / it
return epoch_loss, epoch_l1_loss, epoch_ssim_loss, epoch_drn_loss
def _validate(self, dataloader):
self.model.eval()
running_loss = 0.0
running_l1_loss = 0.0
running_ssim_loss = 0.0
running_drn_loss = 0.0
pbar = tqdm(dataloader, unit="audios", unit_scale=dataloader.batch_size, \
disable=self.hparams.trainer.disable_progress_bar)
for it, batch in enumerate(pbar, start=1):
mels, mlens, texts, tlens, durations = \
batch['mels'], batch['mlens'].squeeze(1), batch['texts'].long(), batch['tlens'].squeeze(1), batch['drns'].long()
mels, mlens, texts, tlens, durations = \
mels.to(self.device), mlens.to(self.device), texts.to(self.device), tlens.to(self.device), durations.to(self.device)
mels = self.normalizer(mels)
with torch.no_grad():
melspecs, prd_durans = self.model((texts, tlens, None, 1.0))
outputs_and_targets = (melspecs, mels, mlens, tlens, durations, prd_durans)
loss, l1_loss, ssim_loss, drn_loss = self.compute_metrics(outputs_and_targets, use_dtw=True)
loss, l1_loss, ssim_loss, drn_loss = \
loss.item(), l1_loss.item(), ssim_loss.item(), drn_loss.item()
running_loss += loss
running_l1_loss += l1_loss
running_ssim_loss += ssim_loss
running_drn_loss += drn_loss
epoch_loss = running_loss / it
epoch_l1_loss = running_l1_loss / it
epoch_ssim_loss = running_ssim_loss / it
epoch_drn_loss = running_drn_loss / it
return epoch_loss, epoch_l1_loss, epoch_ssim_loss, epoch_drn_loss
def recon_losses(self, outputs_and_targets, use_dtw=False):
melspecs, mels, mlens, tlens, durations, prd_durations = outputs_and_targets
if use_dtw:
prd_mlens = prd_durations.sum(axis=-1).long()
l1_loss = l1_dtw(mels, mlens, melspecs, prd_mlens)
ssim_loss = torch.zeros(1)
drn_loss = masked_huber(prd_durations, durations.float(), tlens)
loss = l1_loss + drn_loss
else:
l1_loss = l1_masked(melspecs, mels, mlens)
ssim_loss = masked_ssim(melspecs, mels, mlens)
durations[durations < 1] = 1 # needed to prevent log(0)
drn_loss = masked_huber(prd_durations, torch.log(durations.float()), tlens)
loss = l1_loss + ssim_loss + drn_loss
return loss, l1_loss, ssim_loss, drn_loss
| 42.539615 | 141 | 0.599819 |
ace66e3eda3ff568f7b8785a6692c1a5971e2f3e | 97 | py | Python | src/dll.py | weijiancai/depressive | 28a3730066d717ca7c925d23f6f3d70d066a4b0e | [
"Apache-2.0"
] | null | null | null | src/dll.py | weijiancai/depressive | 28a3730066d717ca7c925d23f6f3d70d066a4b0e | [
"Apache-2.0"
] | null | null | null | src/dll.py | weijiancai/depressive | 28a3730066d717ca7c925d23f6f3d70d066a4b0e | [
"Apache-2.0"
] | null | null | null | import ctypes
dll = ctypes.windll.LoadLibrary('user32.dll')
dll.MessageBoxA(0, "看我闪瞎你的狗年", 0, 0) | 24.25 | 45 | 0.742268 |
ace66f21e972cb88e188cd140cb5be29cd061648 | 11,276 | py | Python | leetcode_python/Stack/basic-calculator.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | leetcode_python/Stack/basic-calculator.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | leetcode_python/Stack/basic-calculator.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | """
224. Basic Calculator
Hard
Given a string s representing a valid expression, implement a basic calculator to evaluate it, and return the result of the evaluation.
Note: You are not allowed to use any built-in function which evaluates strings as mathematical expressions, such as eval().
Example 1:
Input: s = "1 + 1"
Output: 2
Example 2:
Input: s = " 2-1 + 2 "
Output: 3
Example 3:
Input: s = "(1+(4+5+2)-3)+(6+8)"
Output: 23
Constraints:
1 <= s.length <= 3 * 105
s consists of digits, '+', '-', '(', ')', and ' '.
s represents a valid expression.
'+' is not used as a unary operation (i.e., "+1" and "+(2 + 3)" is invalid).
'-' could be used as a unary operation (i.e., "-1" and "-(2 + 3)" is valid).
There will be no two consecutive operators in the input.
Every number and running calculation will fit in a signed 32-bit integer.
"""
# V0
# IDEA : STACK
class Solution(object):
def calculate(self, s):
res = 0
num = 0
sign = 1
stack = [1]
s += "+"
for i in s:
if i.isdigit():
num = 10*num + int(i)
elif i in "+-":
res += num * sign * stack[-1]
#sign = 1 if i=="+" else -1
if i == "+":
sign = 1
else:
sign = -1
num = 0
elif i == "(":
stack.append(sign * stack[-1])
sign = 1
elif i == ")":
res += num * sign * stack[-1]
num = 0
stack.pop(-1)
return res
# V0'
# IDEA : STACK
# https://leetcode.com/problems/basic-calculator/solution/
class Solution:
def calculate(self, s):
stack = []
operand = 0
res = 0 # For the on-going result
sign = 1 # 1 means positive, -1 means negative
for ch in s:
if ch.isdigit():
# Forming operand, since it could be more than one digit
operand = (operand * 10) + int(ch)
elif ch == '+':
# Evaluate the expression to the left,
# with result, sign, operand
res += sign * operand
# Save the recently encountered '+' sign
sign = 1
# Reset operand
operand = 0
elif ch == '-':
res += sign * operand
sign = -1
operand = 0
elif ch == '(':
# Push the result and sign on to the stack, for later
# We push the result first, then sign
stack.append(res)
stack.append(sign)
# Reset operand and result, as if new evaluation begins for the new sub-expression
sign = 1
res = 0
elif ch == ')':
# Evaluate the expression to the left
# with result, sign and operand
res += sign * operand
# ')' marks end of expression within a set of parenthesis
# Its result is multiplied with sign on top of stack
# as stack.pop() is the sign before the parenthesis
res *= stack.pop() # stack pop 1, sign
# Then add to the next operand on the top.
# as stack.pop() is the result calculated before this parenthesis
# (operand on stack) + (sign on stack * (result from parenthesis))
res += stack.pop() # stack pop 2, operand
# Reset the operand
operand = 0
return res + sign * operand
# V1
# https://leetcode.com/problems/basic-calculator/discuss/62418/Python-with-stack
# IDEA : STACK
class Solution(object):
def calculate(self, s):
res, num, sign, stack = 0, 0, 1, [1]
for i in s+"+":
if i.isdigit():
num = 10*num + int(i)
elif i in "+-":
res += num * sign * stack[-1]
sign = 1 if i=="+" else -1
num = 0
elif i == "(":
stack.append(sign * stack[-1])
sign = 1
elif i == ")":
res += num * sign * stack[-1]
num = 0
stack.pop()
return res
# V1
# IDEA : Stack and String Reversal
# https://leetcode.com/problems/basic-calculator/solution/
class Solution:
def evaluate_expr(self, stack):
# If stack is empty or the expression starts with
# a symbol, then append 0 to the stack.
# i.e. [1, '-', 2, '-'] becomes [1, '-', 2, '-', 0]
if not stack or type(stack[-1]) == str:
stack.append(0)
res = stack.pop()
# Evaluate the expression till we get corresponding ')'
while stack and stack[-1] != ')':
sign = stack.pop()
if sign == '+':
res += stack.pop()
else:
res -= stack.pop()
return res
def calculate(self, s: str) -> int:
stack = []
n, operand = 0, 0
for i in range(len(s) - 1, -1, -1):
ch = s[i]
if ch.isdigit():
# Forming the operand - in reverse order.
operand = (10**n * int(ch)) + operand
n += 1
elif ch != " ":
if n:
# Save the operand on the stack
# As we encounter some non-digit.
stack.append(operand)
n, operand = 0, 0
if ch == '(':
res = self.evaluate_expr(stack)
stack.pop()
# Append the evaluated result to the stack.
# This result could be of a sub-expression within the parenthesis.
stack.append(res)
# For other non-digits just push onto the stack.
else:
stack.append(ch)
# Push the last operand to stack, if any.
if n:
stack.append(operand)
# Evaluate any left overs in the stack.
return self.evaluate_expr(stack)
# V1
# IDEA : Stack and No String Reversal
# https://leetcode.com/problems/basic-calculator/solution/
class Solution:
def calculate(self, s: str) -> int:
stack = []
operand = 0
res = 0 # For the on-going result
sign = 1 # 1 means positive, -1 means negative
for ch in s:
if ch.isdigit():
# Forming operand, since it could be more than one digit
operand = (operand * 10) + int(ch)
elif ch == '+':
# Evaluate the expression to the left,
# with result, sign, operand
res += sign * operand
# Save the recently encountered '+' sign
sign = 1
# Reset operand
operand = 0
elif ch == '-':
res += sign * operand
sign = -1
operand = 0
elif ch == '(':
# Push the result and sign on to the stack, for later
# We push the result first, then sign
stack.append(res)
stack.append(sign)
# Reset operand and result, as if new evaluation begins for the new sub-expression
sign = 1
res = 0
elif ch == ')':
# Evaluate the expression to the left
# with result, sign and operand
res += sign * operand
# ')' marks end of expression within a set of parenthesis
# Its result is multiplied with sign on top of stack
# as stack.pop() is the sign before the parenthesis
res *= stack.pop() # stack pop 1, sign
# Then add to the next operand on the top.
# as stack.pop() is the result calculated before this parenthesis
# (operand on stack) + (sign on stack * (result from parenthesis))
res += stack.pop() # stack pop 2, operand
# Reset the operand
operand = 0
return res + sign * operand
# V1'
# https://leetcode.com/problems/basic-calculator/discuss/196363/Python-solution
# IDEA : STACK
class Solution:
def calculate(self, s):
res = 0
stack = []
for c in s:
if c == " ":
continue
elif c == "(":
stack.append(c)
elif c.isdigit():
if stack and stack[-1].isdigit():
tmp = stack.pop()
stack.append(tmp+c)
else:
stack.append(c)
elif c == ")":
summ = 0
tmp = stack.pop()
while tmp != "(":
if tmp == "+":
if prev[-1].isdigit():
summ += int(prev)
elif tmp == "-":
if prev[-1].isdigit():
summ -= int(prev)
prev = tmp
tmp = stack.pop()
if prev.isdigit():
summ += int(prev)
stack.append(str(summ))
else:
stack.append(c)
res = 0
while stack:
tmp = stack.pop()
if tmp == "+":
if prev[-1].isdigit():
res += int(prev)
elif tmp == "-":
if prev[-1].isdigit():
res -= int(prev)
prev = tmp
if prev[-1].isdigit():
res += int(prev)
return res
# V1''
# https://leetcode.com/problems/basic-calculator/discuss/62344/Easy-18-lines-C%2B%2B-16-lines-Python
# IDEA : STACK
class Solution:
def calculate(self, s):
total = 0
i, signs = 0, [1, 1]
while i < len(s):
c = s[i]
if c.isdigit():
start = i
while i < len(s) and s[i].isdigit():
i += 1
total += signs.pop() * int(s[start:i])
continue
if c in '+-(':
signs += signs[-1] * (1, -1)[c == '-'],
elif c == ')':
signs.pop()
i += 1
return total
# V1''''
# https://leetcode.com/problems/basic-calculator/discuss/62483/AC-Python-Solution
# IDEA : STACK
class Solution:
def calculate(self, s):
s = '+(+' + s + ')'
s = s.replace('+-', '-').replace('++', '+') # for the corner case '-5', '+5'
stack = []
for i in s:
if i == ')':
total = 0
while stack[-1] != '(':
total += int(stack.pop())
stack.pop()
sign = 1 if stack.pop() == '+' else -1
stack.append(sign * total)
elif i.isdigit() and stack[-1][-1] in '+-0123456789':
stack[-1] += i
elif i != ' ':
stack.append(i)
return stack[0]
# V2 | 29.673684 | 135 | 0.448652 |
ace66f7794b02f5220101895b0eacd756c9f5679 | 4,087 | py | Python | corehq/ex-submodules/casexml/apps/stock/models.py | SEL-Columbia/commcare-hq | 992ee34a679c37f063f86200e6df5a197d5e3ff6 | [
"BSD-3-Clause"
] | 1 | 2015-02-10T23:26:39.000Z | 2015-02-10T23:26:39.000Z | corehq/ex-submodules/casexml/apps/stock/models.py | SEL-Columbia/commcare-hq | 992ee34a679c37f063f86200e6df5a197d5e3ff6 | [
"BSD-3-Clause"
] | null | null | null | corehq/ex-submodules/casexml/apps/stock/models.py | SEL-Columbia/commcare-hq | 992ee34a679c37f063f86200e6df5a197d5e3ff6 | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
from django.db.models.signals import pre_save
from django.dispatch import receiver
from casexml.apps.stock import const
from decimal import Decimal
class StockReport(models.Model):
form_id = models.CharField(max_length=100, db_index=True)
date = models.DateTimeField(db_index=True)
type = models.CharField(max_length=20) # currently "balance" or "transfer"
# todo: there are properties like these that could be really useful for queries
# and reports - should decide which ones we want to add if any.
# they are all redundant via form_id linkage
# server_date = models.DateTimeField(default=datetime.utcnow, db_index=True)
# user_id = models.CharField(max_length=100, db_index=True)
def __unicode__(self):
return '{type} on {date} ({form})'.format(type=self.type, date=self.date, form=self.form_id)
class StockTransaction(models.Model):
report = models.ForeignKey(StockReport)
section_id = models.CharField(max_length=100, db_index=True)
# for now, a supply point or requisition case
case_id = models.CharField(max_length=100, db_index=True)
product_id = models.CharField(max_length=100, db_index=True)
# currently supported/expected: 'stockonhand', 'receipts', 'consumption'
type = models.CharField(max_length=20)
# e.g. 'loss', 'transfer', 'inferred'
subtype = models.CharField(max_length=20, null=True, blank=True)
# often one of these two will be derived based on the other one
quantity = models.DecimalField(null=True, max_digits=20, decimal_places=5)
stock_on_hand = models.DecimalField(max_digits=20, decimal_places=5)
def __unicode__(self):
return '{type} of {quantity} to {soh} (case: {case}, product: {product}, section id: {section_id})'.format(
type=self.type, quantity=self.quantity, soh=self.stock_on_hand,
case=self.case_id, product=self.product_id, section_id=self.section_id,
)
def get_previous_transaction(self):
siblings = StockTransaction._peer_qs(self.case_id, self.section_id, self.product_id).exclude(pk=self.pk)
if siblings.count():
return siblings[0]
@classmethod
def latest(cls, case_id, section_id, product_id):
relevant = cls._peer_qs(case_id, section_id, product_id)
if relevant.count():
return relevant.select_related()[0]
return None
@classmethod
def _peer_qs(self, case_id, section_id, product_id):
return StockTransaction.objects.filter(
case_id=case_id, product_id=product_id, section_id=section_id).order_by('-report__date', '-pk')
class DocDomainMapping(models.Model):
"""
Used to store the relationship between a doc and the
domain it belongs to for efficient lookup
"""
doc_id = models.CharField(max_length=100, db_index=True, primary_key=True)
doc_type = models.CharField(max_length=100, db_index=True)
domain_name = models.CharField(max_length=100, db_index=True)
# signal catchers
@receiver(pre_save, sender=StockTransaction)
def create_reconciliation_transaction(sender, instance, *args, **kwargs):
creating = instance.pk is None
if creating and instance.type == const.TRANSACTION_TYPE_STOCKONHAND:
previous_transaction = instance.get_previous_transaction()
# only soh reports that have changed the stock create inferred transactions
if previous_transaction and previous_transaction.stock_on_hand != instance.stock_on_hand:
amt = instance.stock_on_hand - Decimal(previous_transaction.stock_on_hand)
StockTransaction.objects.create(
report=instance.report,
case_id=instance.case_id,
section_id=instance.section_id,
product_id=instance.product_id,
type=const.TRANSACTION_TYPE_CONSUMPTION if amt < 0 else const.TRANSACTION_TYPE_RECEIPTS,
quantity=amt,
stock_on_hand=instance.stock_on_hand,
subtype=const.TRANSACTION_SUBTYPE_INFERRED,
)
| 43.478723 | 115 | 0.71079 |
ace673c506be2d30ebb381766dd2aa35c1c789d0 | 262 | py | Python | app/routes/dashboard.py | Alec74/python-newsfeed | 91ad7a57e3a7b29f6c21819ec20a3364193ae118 | [
"MIT"
] | null | null | null | app/routes/dashboard.py | Alec74/python-newsfeed | 91ad7a57e3a7b29f6c21819ec20a3364193ae118 | [
"MIT"
] | 6 | 2021-09-30T20:50:18.000Z | 2021-10-04T12:54:22.000Z | app/routes/dashboard.py | Alec74/python-newsfeed | 91ad7a57e3a7b29f6c21819ec20a3364193ae118 | [
"MIT"
] | null | null | null | from flask import Blueprint, render_template
bp = Blueprint('dashboard', __name__, url_prefix='/dashboard')
@bp.route('/')
def dash():
return render_template('dashboard.html')
@bp.route('/edit/<id>')
def edit(id):
return render_template('edit-post.html')
| 21.833333 | 62 | 0.721374 |
ace67467158bc7854fc43b19297ca49609ca8a5f | 4,119 | py | Python | solving/bnf.py | dizys/nyu-ai-lab-2 | 22f471c6359b3af914e583021a422fcddd7c40b6 | [
"MIT"
] | null | null | null | solving/bnf.py | dizys/nyu-ai-lab-2 | 22f471c6359b3af914e583021a422fcddd7c40b6 | [
"MIT"
] | null | null | null | solving/bnf.py | dizys/nyu-ai-lab-2 | 22f471c6359b3af914e583021a422fcddd7c40b6 | [
"MIT"
] | null | null | null | from typing import Dict, List, Tuple
from .bnf_parser import bnf_text_to_trees, BNFParserError
def text_to_cnf_rep(text: str) -> List[List[Tuple[str, bool]]]:
"""
Converts a text to a CNF representation.
:param text: The text to convert.
:return: The CNF representation of the text.
"""
lines = text.strip().split('\n')
cnf_rep = []
for line in lines:
line = line.strip()
sentence: List[Tuple[str, bool]] = []
atoms = line.split(' ')
for atom in atoms:
atom = atom.strip()
if atom == '':
continue
if atom[0] == '!' or atom[0] == '~' or atom[0] == '¬':
sentence.append((atom[1:], False))
else:
sentence.append((atom, True))
sentence.sort(key=lambda x: x[0])
cnf_rep.append(sentence)
return cnf_rep
def cnf_rep_to_text(cnf_rep: List[List[Tuple[str, bool]]]) -> str:
"""
Converts a CNF representation to a text.
:param cnf_rep: The CNF representation to convert.
:return: The text representation of the CNF.
"""
lines = []
for sentence in cnf_rep:
sentence_str = ''
first_in_clause = True
for atom in sentence:
if first_in_clause:
first_in_clause = False
else:
sentence_str += ' '
if atom[1]:
sentence_str += atom[0]
else:
sentence_str += '!' + atom[0]
lines.append(sentence_str)
return '\n'.join(lines)
def print_rules(rules: List[List[Tuple[str, bool]]]):
for rule in rules:
print(" - ", rule)
def remove_sentence_with_atom_and_its_negation(cnf_rep: List[List[Tuple[str, bool]]]) -> List[List[Tuple[str, bool]]]:
result: List[List[Tuple[str, bool]]] = []
for sentence in cnf_rep:
atom_dict: Dict[str, bool] = {}
is_sentence_valid = True
for atom_bound in sentence:
atom = atom_bound[0]
bound = atom_bound[1]
if atom in atom_dict:
if atom_dict[atom] != bound:
is_sentence_valid = False
break
else:
atom_dict[atom] = bound
if is_sentence_valid:
result.append(sentence)
return result
def print_cnf_rep(cnf_rep: List[List[Tuple[str, bool]]]):
for sentence in cnf_rep:
sentence_rep = []
for atom in sentence:
atom_rep = atom[0]
if not atom[1]:
atom_rep = '¬' + atom_rep
sentence_rep.append(atom_rep)
print(' - ', " ".join(sentence_rep))
def bnf_to_cnf_rep(bnf: str, verbose=False) -> List[List[Tuple[str, bool]]]:
"""
Converts a BNF to a CNF representation.
:param bnf: The BNF to convert.
:return: The CNF representation of the BNF.
"""
rules = bnf_text_to_trees(bnf)
if verbose:
print('Parsed rules:')
print_rules(rules)
for rule in rules:
rule.eliminate_iff()
if verbose:
print('\nStep 1. Eliminating IFF "<=>":')
print_rules(rules)
for rule in rules:
rule.eliminate_implies()
if verbose:
print('\nStep 2. Eliminating IMPLIES "=>":')
print_rules(rules)
for rule in rules:
rule.push_not_to_atom_level()
if verbose:
print('\nStep 3. Pushing NOT to atom level:')
print_rules(rules)
for rule in rules:
rule.apply_distribution_rule()
if verbose:
print('\nStep 4. Applying distribution rule:')
print_rules(rules)
cnf_rep: List[List[Tuple[str, bool]]] = []
for rule in rules:
rule_cnf_rep = rule.to_cnf_rep()
cnf_rep.extend(rule_cnf_rep)
if verbose:
print('\nStep 5. Separate top-level conjunctions and convert to CNF:')
print_cnf_rep(cnf_rep)
cnf_rep = remove_sentence_with_atom_and_its_negation(cnf_rep)
if verbose:
print('\nStep 6. Remove any sentence that includes an atom and its negation:')
print_cnf_rep(cnf_rep)
return cnf_rep
| 29.007042 | 118 | 0.574897 |
ace675b405b7dc830a4fce3b6d1286504825377f | 2,514 | py | Python | srf/parse_ips.py | duo-labs/holidayhack-2019 | 5175bb45ff667298bb63c6973bef3793a0a6f539 | [
"Apache-2.0"
] | null | null | null | srf/parse_ips.py | duo-labs/holidayhack-2019 | 5175bb45ff667298bb63c6973bef3793a0a6f539 | [
"Apache-2.0"
] | null | null | null | srf/parse_ips.py | duo-labs/holidayhack-2019 | 5175bb45ff667298bb63c6973bef3793a0a6f539 | [
"Apache-2.0"
] | null | null | null | import json
with open('initial_ips.txt') as blocklist:
ips = []
for ip in blocklist.readlines():
ip = ip.strip('"\n')
if not ip or ip in ips or ip.startswith('10.') or ip.startswith('#'):
continue
ips.append(ip)
# print(', '.join(sorted(ips)))
# print('Loaded {} IP addresses'.format(len(ips)))
# Load all the affected user agents
uas = {}
with open('http.json', 'r') as logs:
for log in logs.readlines():
parsed = json.loads(log)
ip = parsed['id.orig_h']
ua = parsed['user_agent']
if ip not in ips:
continue
if ua not in uas:
uas[ua] = []
# Now, we can go back and gather all the IP addresses that have the identified
# user agents
with open('http.json', 'r') as logs:
for log in logs.readlines():
parsed = json.loads(log)
ip = parsed['id.orig_h']
ua = parsed['user_agent']
if ua not in uas:
continue
# Only add unique IP addresses
if ip not in uas[ua]:
uas[ua].append(ip)
# Let's also keep track of the total IPs
if ip not in ips:
ips.append(ip)
for ua, uaips in uas.items():
print('{}\t{}'.format(len(uaips), ua))
# Double-check the "maybes"
maybe_uas = [
'Mozilla/5.0 (X11; U; Linux i686; it; rv:1.9.0.5) Gecko/2008121711 Ubuntu/9.04 (jaunty) Firefox/3.0.5',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; sk; rv:1.8.1.15) Gecko/20080623 Firefox/2.0.0.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10_4_11; fr) AppleWebKit/525.18 (KHTML, like Gecko) Version/3.1.2 Safari/525.22',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.8) Gecko/20071004 Firefox/2.0.0.8 (Debian-2.0.0.8-1)'
]
with open('http.json', 'r') as logs:
for log in logs.readlines():
parsed = json.loads(log)
uri = parsed['uri']
ip = parsed['id.orig_h']
ua = parsed['user_agent']
if ua not in maybe_uas:
continue
if '<script>' not in uri and \
'<script>' not in parsed['host'] and \
'/etc/passwd' not in uri and \
'UNION' not in uri and \
'UNION' not in ua and \
'1=1' not in parsed['username']:
print('Removing {}\t{}\t{}'.format(ip, uri, ua))
ips.remove(ip)
with open('ips.txt', 'w') as ip_file:
for ip in sorted(ips):
ip_file.write(ip + '\n')
print(', '.join(sorted(ips)))
print('IP addresses to block: {}'.format(len(ips))) | 33.972973 | 126 | 0.559666 |
ace675bb98d3b79da1d068b876bfbeff7e10349d | 24 | py | Python | fdtool/modules/dbschema/__init__.py | dancps/FDTool | 0958f79fccbb3bb7d55cf9031ee4bd411e9c9b5a | [
"CC0-1.0"
] | 13 | 2019-03-22T13:30:04.000Z | 2022-02-01T04:46:44.000Z | fdtool/modules/dbschema/__init__.py | dancps/FDTool | 0958f79fccbb3bb7d55cf9031ee4bd411e9c9b5a | [
"CC0-1.0"
] | 3 | 2020-07-01T11:17:40.000Z | 2022-02-13T11:20:34.000Z | fdtool/modules/dbschema/__init__.py | dancps/FDTool | 0958f79fccbb3bb7d55cf9031ee4bd411e9c9b5a | [
"CC0-1.0"
] | 11 | 2018-07-02T23:46:31.000Z | 2021-12-14T12:29:38.000Z | __all__ = ['dbschema']
| 12 | 23 | 0.625 |
ace6770bf7722e6ccc42176430765c5b093ad3c4 | 7,677 | py | Python | cdqa/pipeline/cdqa_sklearn.py | SunYanCN/cdQA | 40387925db8f0da44a5bd2fe549cde675a92d479 | [
"Apache-2.0"
] | 593 | 2019-06-25T17:12:07.000Z | 2022-03-21T07:40:04.000Z | cdqa/pipeline/cdqa_sklearn.py | SunYanCN/cdQA | 40387925db8f0da44a5bd2fe549cde675a92d479 | [
"Apache-2.0"
] | 178 | 2019-06-25T14:12:49.000Z | 2021-12-16T07:06:20.000Z | cdqa/pipeline/cdqa_sklearn.py | SunYanCN/cdQA | 40387925db8f0da44a5bd2fe549cde675a92d479 | [
"Apache-2.0"
] | 202 | 2019-06-26T09:18:43.000Z | 2022-03-30T00:35:27.000Z | import joblib
import warnings
import pandas as pd
import numpy as np
import torch
from sklearn.base import BaseEstimator
from cdqa.retriever import TfidfRetriever, BM25Retriever
from cdqa.utils.converters import generate_squad_examples
from cdqa.reader import BertProcessor, BertQA
RETRIEVERS = {"bm25": BM25Retriever, "tfidf": TfidfRetriever}
class QAPipeline(BaseEstimator):
"""
A scikit-learn implementation of the whole cdQA pipeline
Parameters
----------
reader: str (path to .joblib) or .joblib object of an instance of BertQA (BERT model with sklearn wrapper), optional
retriever: "bm25" or "tfidf"
The type of retriever
retrieve_by_doc: bool (default: True). If Retriever will rank by documents
or by paragraphs.
kwargs: kwargs for BertQA(), BertProcessor(), TfidfRetriever() and BM25Retriever()
Please check documentation for these classes
Examples
--------
>>> from cdqa.pipeline import QAPipeline
>>> qa_pipeline = QAPipeline(reader='bert_qa_squad_vCPU-sklearn.joblib')
>>> qa_pipeline.fit_retriever(df=df)
>>> prediction = qa_pipeline.predict(query='When BNP Paribas was created?')
>>> from cdqa.pipeline import QAPipeline
>>> qa_pipeline = QAPipeline()
>>> qa_pipeline.fit_reader('train-v1.1.json')
>>> qa_pipeline.fit_retriever(df=df)
>>> prediction = qa_pipeline.predict(X='When BNP Paribas was created?')
"""
def __init__(self, reader=None, retriever="bm25", retrieve_by_doc=False, **kwargs):
if retriever not in RETRIEVERS:
raise ValueError(
"You provided a type of retriever that is not supported. "
+ "Please provide a retriver in the following list: "
+ str(list(RETRIEVERS.keys()))
)
retriever_class = RETRIEVERS[retriever]
# Separating kwargs
kwargs_bertqa = {
key: value
for key, value in kwargs.items()
if key in BertQA.__init__.__code__.co_varnames
}
kwargs_processor = {
key: value
for key, value in kwargs.items()
if key in BertProcessor.__init__.__code__.co_varnames
}
kwargs_retriever = {
key: value
for key, value in kwargs.items()
if key in retriever_class.__init__.__code__.co_varnames
}
if not reader:
self.reader = BertQA(**kwargs_bertqa)
elif type(reader) == str:
self.reader = joblib.load(reader)
else:
self.reader = reader
self.processor_train = BertProcessor(is_training=True, **kwargs_processor)
self.processor_predict = BertProcessor(is_training=False, **kwargs_processor)
self.retriever = retriever_class(**kwargs_retriever)
self.retrieve_by_doc = retrieve_by_doc
if torch.cuda.is_available():
self.cuda()
def fit_retriever(self, df: pd.DataFrame = None):
""" Fit the QAPipeline retriever to a list of documents in a dataframe.
Parameters
----------
df: pandas.Dataframe
Dataframe with the following columns: "title", "paragraphs"
"""
if self.retrieve_by_doc:
self.metadata = df
self.metadata["content"] = self.metadata["paragraphs"].apply(
lambda x: " ".join(x)
)
else:
self.metadata = self._expand_paragraphs(df)
self.retriever.fit(self.metadata)
return self
def fit_reader(self, data=None):
""" Fit the QAPipeline retriever to a list of documents in a dataframe.
Parameters
----------
data: dict str-path to json file
Annotated dataset in squad-like for Reader training
"""
train_examples, train_features = self.processor_train.fit_transform(data)
self.reader.fit(X=(train_examples, train_features))
return self
def predict(
self,
query: str = None,
n_predictions: int = None,
retriever_score_weight: float = 0.35,
return_all_preds: bool = False,
):
""" Compute prediction of an answer to a question
Parameters
----------
query: str
Sample (question) to perform a prediction on
n_predictions: int or None (default: None).
Number of returned predictions. If None, only one prediction is return
retriever_score_weight: float (default: 0.35).
The weight of retriever score in the final score used for prediction.
Given retriever score and reader average of start and end logits, the final score used for ranking is:
final_score = retriever_score_weight * retriever_score + (1 - retriever_score_weight) * (reader_avg_logit)
return_all_preds: boolean (default: False)
whether to return a list of all predictions done by the Reader or not
Returns
-------
if return_all_preds is False:
prediction: tuple (answer, title, paragraph, score/logit)
if return_all_preds is True:
List of dictionnaries with all metadada of all answers outputted by the Reader
given the question.
"""
if not isinstance(query, str):
raise TypeError(
"The input is not a string. Please provide a string as input."
)
if not (
isinstance(n_predictions, int) or n_predictions is None or n_predictions < 1
):
raise TypeError("n_predictions should be a positive Integer or None")
best_idx_scores = self.retriever.predict(query)
squad_examples = generate_squad_examples(
question=query,
best_idx_scores=best_idx_scores,
metadata=self.metadata,
retrieve_by_doc=self.retrieve_by_doc,
)
examples, features = self.processor_predict.fit_transform(X=squad_examples)
prediction = self.reader.predict(
X=(examples, features),
n_predictions=n_predictions,
retriever_score_weight=retriever_score_weight,
return_all_preds=return_all_preds,
)
return prediction
def to(self, device):
""" Send reader to CPU if device=='cpu' or to GPU if device=='cuda'
"""
if device not in ("cpu", "cuda"):
raise ValueError("Attribute device should be 'cpu' or 'cuda'.")
self.reader.model.to(device)
self.reader.device = torch.device(device)
return self
def cpu(self):
""" Send reader to CPU
"""
self.reader.model.cpu()
self.reader.device = torch.device("cpu")
return self
def cuda(self):
""" Send reader to GPU
"""
self.reader.model.cuda()
self.reader.device = torch.device("cuda")
return self
def dump_reader(self, filename):
""" Dump reader model to a .joblib object
"""
self.cpu()
joblib.dump(self.reader, filename)
if torch.cuda.is_available():
self.cuda()
@staticmethod
def _expand_paragraphs(df):
# Snippet taken from: https://stackoverflow.com/a/48532692/11514226
lst_col = "paragraphs"
df = pd.DataFrame(
{
col: np.repeat(df[col].values, df[lst_col].str.len())
for col in df.columns.drop(lst_col)
}
).assign(**{lst_col: np.concatenate(df[lst_col].values)})[df.columns]
df["content"] = df["paragraphs"]
return df.drop("paragraphs", axis=1)
| 32.392405 | 120 | 0.614693 |
ace677e54e9b063336162739dc3b239e95b76789 | 1,611 | py | Python | weather.py | zhhuan/beforewake | 06b1017bb800639a8fc3f977cacee54fd496293d | [
"MIT"
] | 1 | 2020-07-08T09:23:12.000Z | 2020-07-08T09:23:12.000Z | weather.py | zhhuan/beforewake | 06b1017bb800639a8fc3f977cacee54fd496293d | [
"MIT"
] | null | null | null | weather.py | zhhuan/beforewake | 06b1017bb800639a8fc3f977cacee54fd496293d | [
"MIT"
] | null | null | null | import json
import pymysql.cursors
from urllib.request import urlopen, Request
def get_status():
req = Request('http://d1.weather.com.cn/sk_2d/101010100.html?_=1510475795976')
req.add_header('Referer', 'http://www.weather.com.cn/weather1dn/101010100.shtml')
data = urlopen(req)
origin_data = data.read()
decoded_data = origin_data.decode('utf8')
weather_obj = json.loads(decoded_data[13:])
get_fullstatus(weather_obj)
def get_fullstatus(weather_obj):
status = {}
status['climate'] = weather_obj['weather']
status['tep_cur'] = weather_obj['temp']
status['wind_dir'] = weather_obj['WD']
status['wind_scl'] = weather_obj['WS']
status['air_aqi'] = weather_obj['aqi']
save_status(status)
def save_status(status):
#connect to the database
connection = pymysql.connect(host='localhost',
user='johan',
password='AlarmJohan',
db='weather_alarm',
charset='utf8')
try:
with connection.cursor() as cursor:
# Create a new record
sql = "INSERT INTO `weather` (`climate`, `tep_cur`, `wind_dir`, `wind_scl`, `air_aqi`) VALUES (%s,%s,%s,%s,%s)"
cursor.execute(sql,(status['climate'],status['tep_cur'],status['wind_dir'],status['wind_scl'],status['air_aqi']))
# connection is not autocommit by default. So you must commit to save
# your changes.
connection.commit()
finally:
connection.close()
if __name__ == '__main__':
get_status()
| 32.877551 | 125 | 0.607697 |
ace679433a99e5b306ad5d584bdaea653067f47a | 990 | py | Python | miqa/core/tests/test_zarr.py | thewtex/miqa | d39c549d0601004b4b731ba2763b33781c6197d1 | [
"Apache-2.0"
] | null | null | null | miqa/core/tests/test_zarr.py | thewtex/miqa | d39c549d0601004b4b731ba2763b33781c6197d1 | [
"Apache-2.0"
] | null | null | null | miqa/core/tests/test_zarr.py | thewtex/miqa | d39c549d0601004b4b731ba2763b33781c6197d1 | [
"Apache-2.0"
] | null | null | null | import os
from pathlib import Path
import shutil
from miqa.core.conversion.nifti_to_zarr_ngff import nifti_to_zarr_ngff
def test_convert_to_zarr():
script_dir = Path(__file__).resolve().parent
samples_dir = script_dir / '..' / '..' / '..' / 'samples'
samples = [
samples_dir
/ Path(
'datasnap-2019-01-23/fs/storage/XNAT/archive/ohsu_incoming/arc001/D-99999-P-9-2081220/RESOURCES/nifti/2_ncanda-mprage-v1/image.nii.gz' # noqa: E501
),
samples_dir
/ Path(
'datasnap-2019-01-23/fs/storage/XNAT/archive/ohsu_incoming/arc001/D-99999-P-9-2081220/RESOURCES/nifti/2_ncanda-mprage-v1/image.nii.gz' # noqa: E501
),
]
sample = samples[0]
for sample in samples:
result = str(sample) + '.zarr'
if os.path.exists(result):
shutil.rmtree(result)
result_path = nifti_to_zarr_ngff(sample)
assert str(result_path) == result
assert os.path.exists(result)
| 31.935484 | 160 | 0.644444 |
ace6795a5f6b205c9b38d63846cc8d077adc7277 | 13,414 | py | Python | examples/mldg_smm_baseline_v2.py | ZhaoChuyang/dgreid | ee1d7af74b796f2f194307ab023e43ecc3d3d525 | [
"MIT"
] | null | null | null | examples/mldg_smm_baseline_v2.py | ZhaoChuyang/dgreid | ee1d7af74b796f2f194307ab023e43ecc3d3d525 | [
"MIT"
] | null | null | null | examples/mldg_smm_baseline_v2.py | ZhaoChuyang/dgreid | ee1d7af74b796f2f194307ab023e43ecc3d3d525 | [
"MIT"
] | null | null | null | from __future__ import print_function, absolute_import
import argparse
import os.path as osp
import random
import numpy as np
import sys
import collections
import copy
import time
from datetime import timedelta
import tabulate
from sklearn.cluster import DBSCAN, KMeans
from sklearn.preprocessing import normalize
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
import torch.nn.functional as F
sys.path.append(".")
from reid import datasets
from reid import models
# from reid.models.dsbn import convert_dsbn, convert_bn
# from reid.models.csbn import convert_csbn
# from reid.models.idm_dsbn import convert_dsbn_idm, convert_bn_idm
# from reid.models.xbm import XBM
from reid.trainers import MLDGSMMTrainer2
from reid.evaluators import Evaluator, extract_features
from reid.utils.data import CommDataset
from reid.utils.data import IterLoader
from reid.utils.data import transforms as T
from reid.utils.data.sampler import RandomMultipleGallerySampler
from reid.utils.data.preprocessor import Preprocessor
from reid.utils.logging import Logger
from reid.utils.serialization import load_checkpoint, save_checkpoint, copy_state_dict
from reid.solver import WarmupMultiStepLR
start_epoch = best_mAP = 0
def relabel_datasets(data):
num_pids = 0
num_camids = 0
for dataset in data:
train = [(img, pid + num_pids, camid + num_camids) for img, pid, camid in dataset.train]
dataset.train = train
num_pids += dataset.num_train_pids
num_camids += dataset.num_train_cams
print('Totally %d pids, %d camids' % (num_pids, num_camids))
def get_data(name, data_dir, combineall=False):
# data_dir = '/data/datasets'
root = osp.join(data_dir, name)
dataset = datasets.create(name, root, combineall=combineall)
return dataset
def get_train_loader(args, dataset, height, width, batch_size, workers, num_instances, iters, trainset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.RandomHorizontalFlip(p=0.5),
T.Pad(10),
T.RandomCrop((height, width)),
T.ToTensor(),
normalizer,
])
train_loaders = []
for data in dataset:
train_set = sorted(data.train) if trainset is None else sorted(trainset)
rmgs_flag = num_instances > 0
if rmgs_flag:
sampler = RandomMultipleGallerySampler(train_set, num_instances)
else:
sampler = None
preprocessor = Preprocessor(train_set, root=data.images_dir, transform=train_transformer)
dataloader = DataLoader(preprocessor,
batch_size=batch_size,
num_workers=workers,
sampler=sampler,
shuffle=not rmgs_flag,
pin_memory=False,
drop_last=True)
train_loader = IterLoader(dataloader, length=iters)
train_loaders.append(train_loader)
return train_loaders
def get_test_loader(dataset, height, width, batch_size, workers, testset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
test_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.ToTensor(),
normalizer
])
if testset is None:
testset = list(set(dataset.query) | set(dataset.gallery))
test_loader = DataLoader(
Preprocessor(testset, root=dataset.images_dir, transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=False)
return test_loader
def create_model(args):
model = models.create(args.arch, num_features=args.features, norm=False, dropout=args.dropout, num_classes=args.nclass)
# use CUDA
model.cuda()
model = nn.DataParallel(model)
return model
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
main_worker(args)
def main_worker(args):
global start_epoch, best_mAP
start_time = time.monotonic()
cudnn.benchmark = True
sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
print("==========\nArgs:{}\n==========".format(args))
# Create datasets
iters = args.iters if (args.iters>0) else None
print("==> Load source-domain dataset")
dataset_source = []
dataset_names = args.dataset_source.split(',')
num_domains = len(dataset_names)
for src in dataset_names:
dataset = get_data(src, args.data_dir, args.combine_all)
train_items = dataset.train
dataset_source.append(CommDataset(train_items))
# relabel dataset pids to avoid label collision
relabel_datasets(dataset_source)
print("==> Load target-domain dataset")
target_loaders = []
target_datasets = []
target_dataset_names = args.dataset_target.split(',')
for target_dataset_name in target_dataset_names:
target_dataset = get_data(target_dataset_name, args.data_dir)
target_loader = get_test_loader(target_dataset, args.height, args.width, args.batch_size, args.workers)
target_loaders.append(target_loader)
target_datasets.append(target_dataset)
train_loader_source = get_train_loader(args, dataset_source, args.height, args.width,
args.batch_size, args.workers, args.num_instances, iters)
source_classes = sum([data.num_train_pids for data in dataset_source])
args.nclass = source_classes
# Create model
model = create_model(args)
print(model)
# Evaluator
evaluator = Evaluator(model)
best_mAP = [0] * len(target_datasets)
# Optimizer
params = [{"params": [value]} for _, value in model.named_parameters() if value.requires_grad]
optimizer = torch.optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay)
# optimizer = torch.optim.SGD(params, lr=0.1, momentum=0.9, weight_decay=5e-4)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=0.1)
# lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[8, 20, 40, 60], gamma=0.1)
# milestones = [30, 50, 70]
# lr_scheduler = WarmupMultiStepLR(optimizer, milestones=[30, 50, 70], gamma=0.1, warmup_factor=0.01, warmup_iters=10, warmup_method="linear")
# Trainer
trainer = MLDGSMMTrainer2(model, args.nclass, margin=args.margin, mldg_beta=args.mldg_beta, num_domains=num_domains)
table = []
header = ['Epoch', 'Dataset', 'mAP', 'Rank-1', 'Rank-5', 'Rank-10']
table.append(header)
for epoch in range(args.epochs):
# train_loader_source.new_epoch()
# train_loader_target.new_epoch()
trainer.train(epoch, train_loader_source, optimizer, print_freq=args.print_freq, train_iters=args.iters)
if (epoch + 1) % args.eval_step == 0 or (epoch == args.epochs - 1):
current_mAP = [0] * len(target_datasets)
for target_id in range(len(target_datasets)):
target_dataset_name = target_dataset_names[target_id]
target_dataset = target_datasets[target_id]
target_loader = target_loaders[target_id]
print('Test on target: ', target_dataset_name)
result_dict, mAP = evaluator.evaluate(target_loader, target_dataset.query, target_dataset.gallery,
cmc_flag=True)
# show results in table
record = list()
record.append(epoch)
record.append(target_dataset_name)
record.append(result_dict['mAP'])
record.append(result_dict['rank-1'])
record.append(result_dict['rank-5'])
record.append(result_dict['rank-10'])
table.append(record)
print(tabulate.tabulate(table, headers='firstrow', tablefmt='github', floatfmt='.2%'))
current_mAP[target_id] = mAP
print('\n * Finished epoch {:3d} model mAP: {:5.1%} best: {:5.1%}\n'.
format(epoch, mAP, best_mAP[target_id]))
is_best = False
if sum(current_mAP) > sum(best_mAP): is_best = True
save_checkpoint({
'state_dict': model.state_dict(),
'epoch': epoch + 1,
'best_mAP': best_mAP,
}, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))
for target_id in range(len(best_mAP)):
best_mAP[target_id] = max(current_mAP[target_id], best_mAP[target_id])
lr_scheduler.step()
# print ('==> Test with the best model on the target domain:')
# checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
# model.load_state_dict(checkpoint['state_dict'])
# evaluator.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True)
end_time = time.monotonic()
print('Total running time: ', timedelta(seconds=end_time - start_time))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Self-paced contrastive learning on UDA re-ID")
# data
parser.add_argument('-ds', '--dataset-source', type=str, default='dukemtmc')
parser.add_argument('-dt', '--dataset-target', type=str, default='market1501')
parser.add_argument('--combine-all', action='store_true',
help="if True: combinall train, query, gallery for training;")
parser.add_argument('-b', '--batch-size', type=int, default=64)
parser.add_argument('-j', '--workers', type=int, default=4)
parser.add_argument('--height', type=int, default=256, help="input height")
parser.add_argument('--width', type=int, default=128, help="input width")
parser.add_argument('--num-instances', type=int, default=4,
help="each minibatch consist of "
"(batch_size // num_instances) identities, and "
"each identity has num_instances instances, "
"default: 0 (NOT USE)")
# cluster
parser.add_argument('--eps', type=float, default=0.6,
help="max neighbor distance for DBSCAN")
parser.add_argument('--k1', type=int, default=30,
help="hyperparameter for jaccard distance")
parser.add_argument('--k2', type=int, default=6,
help="hyperparameter for jaccard distance")
parser.add_argument('--nclass', type=int, default=1000,
help="number of classes (source+target)")
parser.add_argument('--s-class', type=int, default=1000,
help="number of classes (source)")
parser.add_argument('--t-class', type=int, default=1000,
help="number of classes (target)")
# loss
parser.add_argument('--margin', type=float, default=0.3,
help="margin for triplet loss")
parser.add_argument('--mu1', type=float, default=0.5,
help="weight for loss_bridge_pred")
parser.add_argument('--mu2', type=float, default=0.1,
help="weight for loss_bridge_feat")
parser.add_argument('--mu3', type=float, default=1,
help="weight for loss_div")
# model
parser.add_argument('-a', '--arch', type=str, default='resnet50_idm',
choices=models.names())
parser.add_argument('--features', type=int, default=0)
parser.add_argument('--dropout', type=float, default=0)
# xbm parameters
parser.add_argument('--memorySize', type=int, default=8192,
help='meomory bank size')
parser.add_argument('--ratio', type=float, default=1,
help='memorySize=ratio*data_size')
parser.add_argument('--featureSize', type=int, default=2048)
parser.add_argument('--use-xbm', action='store_true', help="if True: strong baseline; if False: naive baseline")
# optimizer
parser.add_argument('--lr', type=float, default=0.00035,
help="learning rate")
parser.add_argument('--weight-decay', type=float, default=5e-4)
parser.add_argument('--epochs', type=int, default=60)
parser.add_argument('--iters', type=int, default=200)
parser.add_argument('--step-size', type=int, default=30)
# training configs
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--print-freq', type=int, default=50)
parser.add_argument('--eval-step', type=int, default=10)
# path
working_dir = osp.dirname(osp.abspath(__file__))
parser.add_argument('--data-dir', type=str, default='/data/datasets')
parser.add_argument('--logs-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'logs'))
# hbchen
parser.add_argument('--csdn', type=bool, default=False)
parser.add_argument('--mldg-beta', type=float, default=0.5)
main()
| 40.041791 | 146 | 0.642016 |
ace679e093fdb94238e894cf023f85c0cd1531bf | 2,768 | py | Python | test/python/circuit/test_isometry.py | romainfd/qiskit-terra | b5285ccc5cb1d17b7c73402833f2750b93652426 | [
"Apache-2.0"
] | 2 | 2019-06-28T19:58:42.000Z | 2019-07-26T05:04:02.000Z | test/python/circuit/test_isometry.py | romainfd/qiskit-terra | b5285ccc5cb1d17b7c73402833f2750b93652426 | [
"Apache-2.0"
] | 3 | 2018-11-13T17:33:37.000Z | 2018-12-03T09:35:00.000Z | test/python/circuit/test_isometry.py | romainfd/qiskit-terra | b5285ccc5cb1d17b7c73402833f2750b93652426 | [
"Apache-2.0"
] | 2 | 2017-12-03T15:48:14.000Z | 2018-03-11T13:08:03.000Z | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Isometry tests."""
import unittest
import numpy as np
from qiskit.quantum_info.random import random_unitary
from qiskit import BasicAer
from qiskit import QuantumCircuit
from qiskit import QuantumRegister
from qiskit import execute
from qiskit.test import QiskitTestCase
from qiskit.compiler import transpile
from qiskit.quantum_info.operators.predicates import matrix_equal
class TestIsometry(QiskitTestCase):
"""Qiskit isometry tests."""
def test_isometry(self):
"""Tests for the decomposition of isometries from m to n qubits"""
for iso in [np.eye(2, 2), random_unitary(2).data, np.eye(4, 4),
random_unitary(4).data[:, 0],
np.eye(4, 4)[:, 0:2], random_unitary(4).data,
np.eye(4, 4)[:, np.random.permutation(np.eye(4, 4).shape[1])][:, 0:2],
np.eye(8, 8)[:, np.random.permutation(np.eye(8, 8).shape[1])],
random_unitary(8).data[:, 0:4], random_unitary(8).data, random_unitary(16).data,
random_unitary(16).data[:, 0:8]]:
with self.subTest(iso=iso):
if len(iso.shape) == 1:
iso = iso.reshape((len(iso), 1))
num_q_input = int(np.log2(iso.shape[1]))
num_q_ancilla_for_output = int(np.log2(iso.shape[0])) - num_q_input
n = num_q_input + num_q_ancilla_for_output
q = QuantumRegister(n)
qc = QuantumCircuit(q)
qc.isometry(iso, q[:num_q_input], q[num_q_input:])
# Verify the circuit can be decomposed
self.assertIsInstance(qc.decompose(), QuantumCircuit)
# Decompose the gate
qc = transpile(qc, basis_gates=['u1', 'u3', 'u2', 'cx', 'id'])
# Simulate the decomposed gate
simulator = BasicAer.get_backend('unitary_simulator')
result = execute(qc, simulator).result()
unitary = result.get_unitary(qc)
iso_from_circuit = unitary[::, 0:2 ** num_q_input]
iso_desired = iso
self.assertTrue(matrix_equal(iso_from_circuit, iso_desired, ignore_phase=True))
if __name__ == '__main__':
unittest.main()
| 39.542857 | 100 | 0.618497 |
ace67a12c92ab1377211def26c2e69a279f5d573 | 1,067 | py | Python | hackerearth/Algorithms/Glowing Bulbs/solution.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | 4 | 2020-07-24T01:59:50.000Z | 2021-07-24T15:14:08.000Z | hackerearth/Algorithms/Glowing Bulbs/solution.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | null | null | null | hackerearth/Algorithms/Glowing Bulbs/solution.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | null | null | null | """
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
t = int(input())
def num_glowing(n, primes):
if len(primes) == 0:
return 0
p = primes[0]
m = n // p
ps = primes[1:]
return m + num_glowing(n, ps) - num_glowing(m, ps)
for _ in range(t):
s = input()
k = int(input())
switches = []
for i in range(len(s)):
if s[i] == '1':
switches.append(i + 1)
left = 1
right = 40 * k
u = 0
v = num_glowing(right, switches)
while left + 1 < right:
mid = (left * (v - k) + right * (k - u)) // (v - u)
if mid <= left:
mid += 1
if mid >= right:
mid -= 1
x = num_glowing(mid, switches)
if x >= k:
right = mid
v = x
else:
left = mid
u = x
print(right)
| 22.229167 | 94 | 0.47985 |
ace67b38a0df6db5836c72619a8b203540a87952 | 723 | py | Python | mapping/hector_slam/hector_mapping/catkin_generated/pkg.develspace.context.pc.py | mrsd16teamd/loco_car | 36e4ed685f9463ad689ca72eec80e0f05f1ad66c | [
"MIT"
] | 48 | 2016-11-10T06:00:27.000Z | 2022-03-01T12:57:23.000Z | mapping/hector_slam/hector_mapping/catkin_generated/pkg.develspace.context.pc.py | mrsd16teamd/loco_car | 36e4ed685f9463ad689ca72eec80e0f05f1ad66c | [
"MIT"
] | 6 | 2017-04-03T05:39:06.000Z | 2017-07-27T02:35:44.000Z | mapping/hector_slam/hector_mapping/catkin_generated/pkg.develspace.context.pc.py | mrsd16teamd/loco_car | 36e4ed685f9463ad689ca72eec80e0f05f1ad66c | [
"MIT"
] | 20 | 2017-02-28T13:24:31.000Z | 2021-12-06T12:36:46.000Z | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/ubuntu/catkin_ws/src/devel/include;/home/ubuntu/catkin_ws/src/hector_slam/hector_mapping/include;/usr/include/eigen3".split(';') if "/home/ubuntu/catkin_ws/src/devel/include;/home/ubuntu/catkin_ws/src/hector_slam/hector_mapping/include;/usr/include/eigen3" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;nav_msgs;visualization_msgs;tf;message_filters;laser_geometry;tf_conversions;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "hector_mapping"
PROJECT_SPACE_DIR = "/home/ubuntu/catkin_ws/src/devel"
PROJECT_VERSION = "0.3.5"
| 80.333333 | 311 | 0.786999 |
ace67be360cfcd671da9b4328bc91830abfba6a9 | 22,576 | py | Python | kivymd/uix/card.py | shashi278/KivyMD | d920196358e17f6d4bd74ce6e19f6ecb462e4290 | [
"MIT"
] | null | null | null | kivymd/uix/card.py | shashi278/KivyMD | d920196358e17f6d4bd74ce6e19f6ecb462e4290 | [
"MIT"
] | null | null | null | kivymd/uix/card.py | shashi278/KivyMD | d920196358e17f6d4bd74ce6e19f6ecb462e4290 | [
"MIT"
] | null | null | null | """
Components/Card
===============
.. seealso::
`Material Design spec, Cards <https://material.io/components/cards>`_
.. rubric:: Cards contain content and actions about a single subject.
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/cards.gif
:align: center
`KivyMD` provides the following card classes for use:
- MDCard_
- MDCardSwipe_
.. Note:: :class:`~MDCard` inherited from
:class:`~kivy.uix.boxlayout.BoxLayout`. You can use all parameters and
attributes of the :class:`~kivy.uix.boxlayout.BoxLayout` class in the
:class:`~MDCard` class.
.. MDCard:
MDCard
------
.. code-block:: python
from kivy.lang import Builder
from kivymd.app import MDApp
KV = '''
MDScreen:
MDCard:
size_hint: None, None
size: "280dp", "180dp"
pos_hint: {"center_x": .5, "center_y": .5}
'''
class TestCard(MDApp):
def build(self):
return Builder.load_string(KV)
TestCard().run()
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/card.png
:align: center
Add content to card:
--------------------
.. code-block:: python
from kivy.lang import Builder
from kivymd.app import MDApp
KV = '''
MDScreen:
MDCard:
orientation: "vertical"
padding: "8dp"
size_hint: None, None
size: "280dp", "180dp"
pos_hint: {"center_x": .5, "center_y": .5}
MDLabel:
text: "Title"
theme_text_color: "Secondary"
size_hint_y: None
height: self.texture_size[1]
MDSeparator:
height: "1dp"
MDLabel:
text: "Body"
'''
class TestCard(MDApp):
def build(self):
return Builder.load_string(KV)
TestCard().run()
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/card-content.png
:align: center
.. MDCardSwipe:
MDCardSwipe
-----------
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/MDCardSwipe.gif
:align: center
To create a card with `swipe-to-delete` behavior, you must create a new class
that inherits from the :class:`~MDCardSwipe` class:
.. code-block:: kv
<SwipeToDeleteItem>:
size_hint_y: None
height: content.height
MDCardSwipeLayerBox:
MDCardSwipeFrontBox:
OneLineListItem:
id: content
text: root.text
_no_ripple_effect: True
.. code-block:: python
class SwipeToDeleteItem(MDCardSwipe):
text = StringProperty()
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/map-mdcard-swipr.png
:align: center
End full code
-------------
.. code-block:: python
from kivy.lang import Builder
from kivy.properties import StringProperty
from kivymd.app import MDApp
from kivymd.uix.card import MDCardSwipe
KV = '''
<SwipeToDeleteItem>:
size_hint_y: None
height: content.height
MDCardSwipeLayerBox:
# Content under the card.
MDCardSwipeFrontBox:
# Content of card.
OneLineListItem:
id: content
text: root.text
_no_ripple_effect: True
MDScreen:
MDBoxLayout:
orientation: "vertical"
spacing: "10dp"
MDToolbar:
elevation: 10
title: "MDCardSwipe"
ScrollView:
scroll_timeout : 100
MDList:
id: md_list
padding: 0
'''
class SwipeToDeleteItem(MDCardSwipe):
'''Card with `swipe-to-delete` behavior.'''
text = StringProperty()
class TestCard(MDApp):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.screen = Builder.load_string(KV)
def build(self):
return self.screen
def on_start(self):
'''Creates a list of cards.'''
for i in range(20):
self.screen.ids.md_list.add_widget(
SwipeToDeleteItem(text=f"One-line item {i}")
)
TestCard().run()
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/list-mdcard-swipe.gif
:align: center
Binding a swipe to one of the sides of the screen
-------------------------------------------------
.. code-block:: kv
<SwipeToDeleteItem>:
# By default, the parameter is "left"
anchor: "right"
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/mdcard-swipe-anchor-right.gif
:align: center
.. Note:: You cannot use the left and right swipe at the same time.
Swipe behavior
--------------
.. code-block:: kv
<SwipeToDeleteItem>:
# By default, the parameter is "hand"
type_swipe: "hand"
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/hand-mdcard-swipe.gif
:align: center
.. code-block:: kv
<SwipeToDeleteItem>:
type_swipe: "auto"
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/auto-mdcard-swipe.gif
:align: center
Removing an item using the ``type_swipe = "auto"`` parameter
------------------------------------------------------------
The map provides the :attr:`MDCardSwipe.on_swipe_complete` event.
You can use this event to remove items from a list:
.. code-block:: kv
<SwipeToDeleteItem>:
on_swipe_complete: app.on_swipe_complete(root)
.. code-block:: python
def on_swipe_complete(self, instance):
self.screen.ids.md_list.remove_widget(instance)
End full code
-------------
.. code-block:: python
from kivy.lang import Builder
from kivy.properties import StringProperty
from kivymd.app import MDApp
from kivymd.uix.card import MDCardSwipe
KV = '''
<SwipeToDeleteItem>:
size_hint_y: None
height: content.height
type_swipe: "auto"
on_swipe_complete: app.on_swipe_complete(root)
MDCardSwipeLayerBox:
MDCardSwipeFrontBox:
OneLineListItem:
id: content
text: root.text
_no_ripple_effect: True
MDScreen:
MDBoxLayout:
orientation: "vertical"
spacing: "10dp"
MDToolbar:
elevation: 10
title: "MDCardSwipe"
ScrollView:
MDList:
id: md_list
padding: 0
'''
class SwipeToDeleteItem(MDCardSwipe):
text = StringProperty()
class TestCard(MDApp):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.screen = Builder.load_string(KV)
def build(self):
return self.screen
def on_swipe_complete(self, instance):
self.screen.ids.md_list.remove_widget(instance)
def on_start(self):
for i in range(20):
self.screen.ids.md_list.add_widget(
SwipeToDeleteItem(text=f"One-line item {i}")
)
TestCard().run()
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/autodelete-mdcard-swipe.gif
:align: center
Add content to the bottom layer of the card
-------------------------------------------
To add content to the bottom layer of the card,
use the :class:`~MDCardSwipeLayerBox` class.
.. code-block:: kv
<SwipeToDeleteItem>:
MDCardSwipeLayerBox:
padding: "8dp"
MDIconButton:
icon: "trash-can"
pos_hint: {"center_y": .5}
on_release: app.remove_item(root)
End full code
-------------
.. code-block:: python
from kivy.lang import Builder
from kivy.properties import StringProperty
from kivymd.app import MDApp
from kivymd.uix.card import MDCardSwipe
KV = '''
<SwipeToDeleteItem>:
size_hint_y: None
height: content.height
MDCardSwipeLayerBox:
padding: "8dp"
MDIconButton:
icon: "trash-can"
pos_hint: {"center_y": .5}
on_release: app.remove_item(root)
MDCardSwipeFrontBox:
OneLineListItem:
id: content
text: root.text
_no_ripple_effect: True
MDScreen:
MDBoxLayout:
orientation: "vertical"
spacing: "10dp"
MDToolbar:
elevation: 10
title: "MDCardSwipe"
ScrollView:
MDList:
id: md_list
padding: 0
'''
class SwipeToDeleteItem(MDCardSwipe):
text = StringProperty()
class TestCard(MDApp):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.screen = Builder.load_string(KV)
def build(self):
return self.screen
def remove_item(self, instance):
self.screen.ids.md_list.remove_widget(instance)
def on_start(self):
for i in range(20):
self.screen.ids.md_list.add_widget(
SwipeToDeleteItem(text=f"One-line item {i}")
)
TestCard().run()
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/handdelete-mdcard-swipe.gif
:align: center
Focus behavior
--------------
.. code-block:: kv
MDCard:
focus_behavior: True
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/card-focus.gif
:align: center
Ripple behavior
---------------
.. code-block:: kv
MDCard:
ripple_behavior: True
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/card-behavior.gif
:align: center
End full code
-------------
.. code-block:: python
from kivy.lang import Builder
from kivymd.app import MDApp
KV = '''
<StarButton@MDIconButton>
icon: "star"
on_release: self.icon = "star-outline" if self.icon == "star" else "star"
MDScreen:
MDCard:
orientation: "vertical"
size_hint: .5, None
height: box_top.height + box_bottom.height
focus_behavior: True
ripple_behavior: True
pos_hint: {"center_x": .5, "center_y": .5}
MDBoxLayout:
id: box_top
spacing: "20dp"
adaptive_height: True
FitImage:
source: "/Users/macbookair/album.jpeg"
size_hint: .3, None
height: text_box.height
MDBoxLayout:
id: text_box
orientation: "vertical"
adaptive_height: True
spacing: "10dp"
padding: 0, "10dp", "10dp", "10dp"
MDLabel:
text: "Ride the Lightning"
theme_text_color: "Primary"
font_style: "H5"
bold: True
size_hint_y: None
height: self.texture_size[1]
MDLabel:
text: "July 27, 1984"
size_hint_y: None
height: self.texture_size[1]
theme_text_color: "Primary"
MDSeparator:
MDBoxLayout:
id: box_bottom
adaptive_height: True
padding: "10dp", 0, 0, 0
MDLabel:
text: "Rate this album"
size_hint_y: None
height: self.texture_size[1]
pos_hint: {"center_y": .5}
theme_text_color: "Primary"
StarButton:
StarButton:
StarButton:
StarButton:
StarButton:
'''
class Test(MDApp):
def build(self):
self.theme_cls.theme_style = "Dark"
return Builder.load_string(KV)
Test().run()
"""
__all__ = (
"MDCard",
"MDCardSwipe",
"MDCardSwipeFrontBox",
"MDCardSwipeLayerBox",
"MDSeparator",
)
from kivy.animation import Animation
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.metrics import dp
from kivy.properties import (
BooleanProperty,
ColorProperty,
NumericProperty,
OptionProperty,
StringProperty,
)
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.relativelayout import RelativeLayout
from kivy.utils import get_color_from_hex
from kivymd.color_definitions import colors
from kivymd.theming import ThemableBehavior
from kivymd.uix.behaviors import (
BackgroundColorBehavior,
FocusBehavior,
RectangularRippleBehavior,
RoundedRectangularElevationBehavior,
)
from kivymd.uix.boxlayout import MDBoxLayout
Builder.load_string(
"""
<MDCardSwipeLayerBox>:
canvas.before:
Color:
rgba: app.theme_cls.divider_color
Rectangle:
size: self.size
pos: self.pos
<MDCard>
canvas.before:
Color:
rgba: self.md_bg_color
RoundedRectangle:
size: self.size
pos: self.pos
radius: root.radius
source: root.background
<MDSeparator>
md_bg_color: self.theme_cls.divider_color if not root.color else root.color
"""
)
class MDSeparator(ThemableBehavior, MDBoxLayout):
"""A separator line."""
color = ColorProperty(None)
"""Separator color in ``rgba`` format.
:attr:`color` is a :class:`~kivy.properties.ColorProperty`
and defaults to `None`.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.on_orientation()
def on_orientation(self, *args):
self.size_hint = (
(1, None) if self.orientation == "horizontal" else (None, 1)
)
if self.orientation == "horizontal":
self.height = dp(1)
else:
self.width = dp(1)
class MDCard(
ThemableBehavior,
RoundedRectangularElevationBehavior,
BackgroundColorBehavior,
RectangularRippleBehavior,
FocusBehavior,
BoxLayout,
):
focus_behavior = BooleanProperty(False)
"""
Using focus when hovering over a card.
:attr:`focus_behavior` is a :class:`~kivy.properties.BooleanProperty`
and defaults to `False`.
"""
ripple_behavior = BooleanProperty(False)
"""
Use ripple effect for card.
:attr:`ripple_behavior` is a :class:`~kivy.properties.BooleanProperty`
and defaults to `False`.
"""
elevation = NumericProperty(None, allownone=True)
"""
Elevation value.
:attr:`elevation` is an :class:`~kivy.properties.NumericProperty`
and defaults to 1.
"""
_bg_color_map = (
get_color_from_hex(colors["Light"]["CardsDialogs"]),
get_color_from_hex(colors["Dark"]["CardsDialogs"]),
[1.0, 1.0, 1.0, 0.0],
)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.theme_cls.bind(theme_style=self.update_md_bg_color)
Clock.schedule_once(lambda x: self._on_elevation(self.elevation))
Clock.schedule_once(
lambda x: self.on_ripple_behavior(0, self.ripple_behavior)
)
self.update_md_bg_color(self, self.theme_cls.theme_style)
def update_md_bg_color(self, instance, value):
if self.md_bg_color in self._bg_color_map:
self.md_bg_color = get_color_from_hex(colors[value]["CardsDialogs"])
def on_ripple_behavior(self, instance, value):
self._no_ripple_effect = False if value else True
def _on_elevation(self, value):
if value is None:
self.elevation = 6
else:
self.elevation = value
class MDCardSwipe(RelativeLayout):
"""
:Events:
:attr:`on_swipe_complete`
Called when a swipe of card is completed.
"""
open_progress = NumericProperty(0.0)
"""
Percent of visible part of side panel. The percent is specified as a
floating point number in the range 0-1. 0.0 if panel is closed and 1.0 if
panel is opened.
:attr:`open_progress` is a :class:`~kivy.properties.NumericProperty`
and defaults to `0.0`.
"""
opening_transition = StringProperty("out_cubic")
"""
The name of the animation transition type to use when animating to
the :attr:`state` `'opened'`.
:attr:`opening_transition` is a :class:`~kivy.properties.StringProperty`
and defaults to `'out_cubic'`.
"""
closing_transition = StringProperty("out_sine")
"""
The name of the animation transition type to use when animating to
the :attr:`state` 'closed'.
:attr:`closing_transition` is a :class:`~kivy.properties.StringProperty`
and defaults to `'out_sine'`.
"""
anchor = OptionProperty("left", options=("left", "right"))
"""
Anchoring screen edge for card. Available options are: `'left'`, `'right'`.
:attr:`anchor` is a :class:`~kivy.properties.OptionProperty`
and defaults to `left`.
"""
swipe_distance = NumericProperty(50)
"""
The distance of the swipe with which the movement of navigation drawer
begins.
:attr:`swipe_distance` is a :class:`~kivy.properties.NumericProperty`
and defaults to `50`.
"""
opening_time = NumericProperty(0.2)
"""
The time taken for the card to slide to the :attr:`state` `'open'`.
:attr:`opening_time` is a :class:`~kivy.properties.NumericProperty`
and defaults to `0.2`.
"""
state = OptionProperty("closed", options=("closed", "opened"))
"""
Detailed state. Sets before :attr:`state`. Bind to :attr:`state` instead
of :attr:`status`. Available options are: `'closed'`, `'opened'`.
:attr:`status` is a :class:`~kivy.properties.OptionProperty`
and defaults to `'closed'`.
"""
max_swipe_x = NumericProperty(0.3)
"""
If, after the events of :attr:`~on_touch_up` card position exceeds this
value - will automatically execute the method :attr:`~open_card`,
and if not - will automatically be :attr:`~close_card` method.
:attr:`max_swipe_x` is a :class:`~kivy.properties.NumericProperty`
and defaults to `0.3`.
"""
max_opened_x = NumericProperty("100dp")
"""
The value of the position the card shifts to when :attr:`~type_swipe`
s set to `'hand'`.
:attr:`max_opened_x` is a :class:`~kivy.properties.NumericProperty`
and defaults to `100dp`.
"""
type_swipe = OptionProperty("hand", options=("auto", "hand"))
"""
Type of card opening when swipe. Shift the card to the edge or to
a set position :attr:`~max_opened_x`. Available options are:
`'auto'`, `'hand'`.
:attr:`type_swipe` is a :class:`~kivy.properties.OptionProperty`
and defaults to `auto`.
"""
_opens_process = False
_to_closed = True
def __init__(self, **kw):
self.register_event_type("on_swipe_complete")
super().__init__(**kw)
def _on_swipe_complete(self, *args):
self.dispatch("on_swipe_complete")
def add_widget(self, widget, index=0, canvas=None):
if isinstance(widget, (MDCardSwipeFrontBox, MDCardSwipeLayerBox)):
return super().add_widget(widget)
def on_swipe_complete(self, *args):
"""Called when a swipe of card is completed."""
def on_anchor(self, instance, value):
if value == "right":
self.open_progress = 1.0
else:
self.open_progress = 0.0
def on_open_progress(self, instance, value):
if self.anchor == "left":
self.children[0].x = self.width * value
else:
self.children[0].x = self.width * value - self.width
def on_touch_move(self, touch):
if self.collide_point(touch.x, touch.y):
expr = (
touch.x < self.swipe_distance
if self.anchor == "left"
else touch.x > self.width - self.swipe_distance
)
if expr and not self._opens_process:
self._opens_process = True
self._to_closed = False
if self._opens_process:
self.open_progress = max(
min(self.open_progress + touch.dx / self.width, 2.5), 0
)
return super().on_touch_move(touch)
def on_touch_up(self, touch):
if self.collide_point(touch.x, touch.y):
if not self._to_closed:
self._opens_process = False
self.complete_swipe()
return super().on_touch_up(touch)
def on_touch_down(self, touch):
if self.collide_point(touch.x, touch.y):
if self.state == "opened":
self._to_closed = True
self.close_card()
return super().on_touch_down(touch)
def complete_swipe(self):
expr = (
self.open_progress <= self.max_swipe_x
if self.anchor == "left"
else self.open_progress >= self.max_swipe_x
)
if expr:
self.close_card()
else:
self.open_card()
def open_card(self):
if self.type_swipe == "hand":
swipe_x = (
self.max_opened_x
if self.anchor == "left"
else -self.max_opened_x
)
else:
swipe_x = self.width if self.anchor == "left" else 0
anim = Animation(
x=swipe_x, t=self.opening_transition, d=self.opening_time
)
anim.bind(on_complete=self._on_swipe_complete)
anim.start(self.children[0])
self.state = "opened"
def close_card(self):
anim = Animation(x=0, t=self.closing_transition, d=self.opening_time)
anim.bind(on_complete=self._reset_open_progress)
anim.start(self.children[0])
self.state = "closed"
def _reset_open_progress(self, *args):
self.open_progress = 0.0 if self.anchor == "left" else 1.0
self._to_closed = False
self.dispatch("on_swipe_complete")
class MDCardSwipeFrontBox(MDCard):
pass
class MDCardSwipeLayerBox(BoxLayout):
pass
| 25.567384 | 111 | 0.583097 |
ace67c83067ac781c420ddd111b26724aa0ca638 | 1,121 | py | Python | test/test_groups.py | skharab-anaplan/objective_regex | ffce0f4d644cb222785b45328a9f97948f18196a | [
"MIT"
] | null | null | null | test/test_groups.py | skharab-anaplan/objective_regex | ffce0f4d644cb222785b45328a9f97948f18196a | [
"MIT"
] | null | null | null | test/test_groups.py | skharab-anaplan/objective_regex | ffce0f4d644cb222785b45328a9f97948f18196a | [
"MIT"
] | 1 | 2021-12-16T15:06:17.000Z | 2021-12-16T15:06:17.000Z | import o_re
def test_numbered():
_pat = o_re.Raw("[a-d]+")
_reg = _pat.as_group() + o_re.Raw(".*?") + _pat.as_group()
_m = _reg.get_compiled().search("poiopipouipipoioppippoipoi abcddcba qwqADDC abcdX")
assert _m.group(1) == "abcddcba"
assert _m.group(2) == "abcd"
def test_named():
_pat = o_re.Raw("[a-d]+")
_reg = _pat.as_group("grp1") + o_re.Raw(".*?") + _pat.as_group("grp2")
_m = _reg.get_compiled().search("poiopipouipipoioppippoipoi abcddcba qwqADDC abcdX")
assert _m.group("grp1") == "abcddcba"
assert _m.group("grp2") == "abcd"
def test_numbered_reference():
_pat = o_re.Raw(r"ST\w+")
_grp = _pat.as_group()
_reg = _grp + o_re.Spaces + o_re.Raw(r"\w+").as_group() + o_re.Spaces + _grp
_m = _reg.get_compiled().match("ST123 ST424242 ST123")
assert _m.group(1) == "ST123"
assert _m.group(2) == "ST424242"
def test_named_equality():
_obj1 = o_re.Raw("42").as_group("name")
_obj2 = o_re.Raw("42").as_group("name")
assert _obj1 == _obj2
_obj3 = o_re.Raw("43").as_group("name")
assert _obj1 != _obj3
assert _obj2 != _obj3
| 31.138889 | 88 | 0.628903 |
ace67d88f19c72f6965da3c8d69c3bfe337e29ba | 4,162 | py | Python | models/mlhead_client.py | caifederated/mlhead-release | 703fe2294f210b7259cd1404632d7757766f5a7d | [
"BSD-2-Clause"
] | 26 | 2020-04-29T10:42:40.000Z | 2022-02-16T21:11:14.000Z | models/mlhead_client.py | caifederated/mlhead-release | 703fe2294f210b7259cd1404632d7757766f5a7d | [
"BSD-2-Clause"
] | 2 | 2020-06-02T16:06:18.000Z | 2020-07-01T06:13:32.000Z | models/mlhead_client.py | caifederated/mlhead-release | 703fe2294f210b7259cd1404632d7757766f5a7d | [
"BSD-2-Clause"
] | 6 | 2020-10-12T05:26:07.000Z | 2022-02-16T21:11:16.000Z | import random
import warnings
import os
class Client:
def __init__(self, client_id, group=None, train_data={'x' : [],'y' : []}, eval_data={'x' : [],'y' : []}, model=None, write_path=None):
self._model = model
self.id = client_id # integer
self.group = group
self.train_data = train_data
self.eval_data = eval_data
self.write_path = write_path
def train(self, num_epochs=1, batch_size=10, minibatch=None, write_file=False):
"""Trains on self.model using the client's train_data.
Args:
num_epochs: Number of epochs to train. Unsupported if minibatch is provided (minibatch has only 1 epoch)
batch_size: Size of training batches.
minibatch: fraction of client's data to apply minibatch sgd,
None to use FedAvg
Return:
comp: number of FLOPs executed in training process
num_samples: number of samples used in training
update: set of weights
update_size: number of bytes in update
"""
if minibatch is None:
data = self.train_data
comp, update = self.model.train(data, num_epochs, batch_size)
if write_file:
self.save_model(os.path.join(self.write_path, "write_%s.ckpt" % self.id))
#print("temp model write to this path", self.write_path)
else:
frac = min(1.0, minibatch)
num_data = max(1, int(frac*len(self.train_data["x"])))
xs, ys = zip(*random.sample(list(zip(self.train_data["x"], self.train_data["y"])), num_data))
data = {"x": xs, "y": ys}
# Minibatch trains for only 1 epoch - multiple local epochs don't make sense!
num_epochs = 1
comp, update = self.model.train(data, num_epochs, num_data)
if write_file:
self.save_model(os.path.join(self.write_path, "write_%s.ckpt" % self.id))
#print("temp model write to this path", self.write_path)
num_train_samples = len(data)
return comp, num_train_samples, update
def prepare_test(self, batch_size, min_loss):
data = self.train_data
self.model.prepare_test(data, batch_size, min_loss)
def save_model(self, path):
return self.model.save_file(path)
def test(self, set_to_use='test'):
"""Tests self.model on self.test_data.
Args:
set_to_use. Set to test on. Should be in ['train', 'test'].
Return:
dict of metrics returned by the model.
"""
assert set_to_use in ['train', 'test']
if set_to_use == 'train':
data = self.train_data
elif set_to_use == 'test':
data = self.eval_data
return self.model.test(data)
@property
def num_test_samples(self):
"""Number of test samples for this client.
Return:
int: Number of test samples for this client
"""
return len(self.eval_data['y'])
@property
def num_train_samples(self):
"""Number of train samples for this client.
Return:
int: Number of train samples for this client
"""
if self.train_data is None:
return 0
return len(self.train_data['y'])
@property
def num_samples(self):
"""Number samples for this client.
Return:
int: Number of samples for this client
"""
train_size = 0
if self.train_data is not None:
train_size = len(self.train_data['y'])
test_size = 0
if self.eval_data is not None:
test_size = len(self.eval_data['y'])
return train_size + test_size
@property
def model(self):
"""Returns this client reference to model being trained"""
return self._model
@model.setter
def model(self, model):
warnings.warn('The current implementation shares the model among all clients.'
'Setting it on one client will effectively modify all clients.')
self._model = model
| 35.271186 | 138 | 0.588419 |
ace67dc46ef3680aea4abd41026806f510eb6656 | 1,229 | py | Python | ARC073_a/test_pasted_from_page.py | staguchi0703/prob_boot_camp_medium | d6f31b3e50230877efb2ebfef40f90ef6468bfc7 | [
"MIT"
] | null | null | null | ARC073_a/test_pasted_from_page.py | staguchi0703/prob_boot_camp_medium | d6f31b3e50230877efb2ebfef40f90ef6468bfc7 | [
"MIT"
] | null | null | null | ARC073_a/test_pasted_from_page.py | staguchi0703/prob_boot_camp_medium | d6f31b3e50230877efb2ebfef40f90ef6468bfc7 | [
"MIT"
] | null | null | null | #
from resolve import resolve
####################################
####################################
# 以下にプラグインの内容をペーストする
#
import sys
from io import StringIO
import unittest
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """2 4
0 3"""
output = """7"""
self.assertIO(input, output)
def test_入力例_2(self):
input = """2 4
0 5"""
output = """8"""
self.assertIO(input, output)
def test_入力例_3(self):
input = """4 1000000000
0 1000 1000000 1000000000"""
output = """2000000000"""
self.assertIO(input, output)
def test_入力例_4(self):
input = """1 1
0"""
output = """1"""
self.assertIO(input, output)
def test_入力例_5(self):
input = """9 10
0 3 5 7 100 110 200 300 311"""
output = """67"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main() | 26.148936 | 59 | 0.539463 |
ace67e52c082b5fe716f526ddecb330f2aa92ee1 | 1,893 | py | Python | models/fgn.py | wang-chen/graph-action-recognition | 319a5287c3fb58f233a8b56ed70f5be94703aa61 | [
"BSD-3-Clause"
] | 2 | 2021-12-18T07:10:40.000Z | 2022-03-06T16:24:06.000Z | models/fgn.py | wang-chen/graph-action-recognition | 319a5287c3fb58f233a8b56ed70f5be94703aa61 | [
"BSD-3-Clause"
] | null | null | null | models/fgn.py | wang-chen/graph-action-recognition | 319a5287c3fb58f233a8b56ed70f5be94703aa61 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import torch
import torch.nn as nn
class FGN(nn.Module):
def __init__(self):
super().__init__()
self.feat1 = Trans1d(5, 32, kernel_size=(5,11), stride=1, padding=(0,5))
self.acvt1 = nn.Sequential(nn.BatchNorm2d(32), nn.MaxPool2d((1,2)), nn.ReLU())
self.feat2 = Trans1d(32, 32, kernel_size=(1,5), stride=1, padding=(0,2))
self.acvt2 = nn.Sequential(nn.BatchNorm2d(32), nn.MaxPool2d((1,2)), nn.ReLU())
self.linear = nn.Sequential(nn.Flatten(), nn.Dropout(0.2), nn.Linear(32*12, 13))
def forward(self, x):
assert x.size(-1)%2 == 0
x, n = x.split([x.size(-1)//2, x.size(-1)//2], dim=-1)
x, n = self.feat1(x, x), self.feat1(n, n)
x, n = self.acvt1(x), self.acvt1(n)
x, n = self.feat2(x, n), self.feat2(n, x)
x, n = self.acvt2(x), self.acvt2(n)
return self.linear(torch.cat([x, n], dim=-1))
class Trans1d(nn.Module):
'''
Temporal Feature Transforming Layer for multi-channel 1D features.
'''
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0):
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, padding_mode='circular')
def forward(self, x, neighbor):
adj = self.feature_adjacency(x, neighbor)
return self.transform(x, adj)
def transform(self, x, adj):
return self.conv(torch.einsum('bcxy,bncx->bncx', adj, x))
def feature_adjacency(self, x, y):
fadj = torch.einsum('bncx,bdcy->bcxy', x, y)
fadj += fadj.transpose(-2, -1)
return self.row_normalize(self.sgnroot(fadj))
def sgnroot(self, x):
return x.sign()*(x.abs().clamp(min=1e-8).sqrt())
def row_normalize(self, x):
x = x / (x.abs().sum(-1, keepdim=True) + 1e-7)
x[torch.isnan(x)] = 0
return x
| 35.716981 | 111 | 0.59588 |
ace67f38fce2e5c469ff2310f9f83e46d2da589d | 1,073 | py | Python | jacerong/utils/__init__.py | jacerong/TASS-2017 | 80200a3ada8febe4af51ef9ea463f194dce73e42 | [
"MIT"
] | 5 | 2019-02-04T22:49:39.000Z | 2020-09-14T11:08:24.000Z | jacerong/utils/__init__.py | jacerong/TASS-2017 | 80200a3ada8febe4af51ef9ea463f194dce73e42 | [
"MIT"
] | null | null | null | jacerong/utils/__init__.py | jacerong/TASS-2017 | 80200a3ada8febe4af51ef9ea463f194dce73e42 | [
"MIT"
] | 3 | 2019-07-23T20:50:27.000Z | 2020-03-10T07:34:03.000Z | # -*- coding: iso-8859-15 -*-
import os
__all__ = ["convert_into_str", "convert_into_unicode", "remove_accent_marks",
"write_in_file", "services"]
def convert_into_str(token):
return token.encode('utf-8') if not isinstance(token, str) else token
def convert_into_unicode(token):
return token.decode('utf-8') if not isinstance(token, unicode) else token
def remove_accent_marks(word):
word = convert_into_unicode(word)
remove_accents = {u'\xe1': u'a',
u'\xe9': u'e',
u'\xed': u'i',
u'\xf3': u'o',
u'\xfa': u'u',
u'\xfc': u'u'}
return convert_into_unicode(
''.join([remove_accents[s] if s in remove_accents.keys() else s
for s in word])
)
def write_in_file(fname, content, mode='w', makedirs_recursive=True):
dir_ = '/'.join(fname.split('/')[:-1])
if not os.path.isdir(dir_) and makedirs_recursive:
os.makedirs(dir_)
with open(fname, mode) as f:
f.write(content)
| 29 | 77 | 0.575023 |
ace68035c9b58301f87113a73af93e152bef3814 | 784 | py | Python | migrations/versions/1e44b9d888b9_initial_migration.py | Koech-code/Pitches | 1132b0604cbb0d21952d901b26592f29bfd40402 | [
"MIT"
] | null | null | null | migrations/versions/1e44b9d888b9_initial_migration.py | Koech-code/Pitches | 1132b0604cbb0d21952d901b26592f29bfd40402 | [
"MIT"
] | null | null | null | migrations/versions/1e44b9d888b9_initial_migration.py | Koech-code/Pitches | 1132b0604cbb0d21952d901b26592f29bfd40402 | [
"MIT"
] | null | null | null | """Initial Migration
Revision ID: 1e44b9d888b9
Revises: 212983c96341
Create Date: 2021-08-19 16:46:21.527036
"""
# revision identifiers, used by Alembic.
revision = '1e44b9d888b9'
down_revision = '212983c96341'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('password', sa.String(length=255), nullable=True))
op.drop_column('users', 'pass_secure')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('pass_secure', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.drop_column('users', 'password')
# ### end Alembic commands ###
| 27.034483 | 112 | 0.696429 |
ace681ecbfd2f3cb96341be06c4bc54fdd317672 | 545 | py | Python | src/data/245.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | src/data/245.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | src/data/245.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | from collections import deque
N, Q = map(int, input().split())
G = {i: [] for i in range(1, N + 1)}
for _ in range(N - 1):
a, b = map(int, input().split())
G[a].append(b)
G[b].append(a)
dist = [-1 for _ in range(N + 1)]
dist[1] = 0
que = deque([1])
while que:
x = que.popleft()
for y in G[x]:
if dist[y] < 0:
dist[y] = dist[x] + 1
que.append(y)
for _ in range(Q):
c, d = map(int, input().split())
if (dist[c] + dist[d]) % 2 == 0:
print("Town")
else:
print("Road")
| 22.708333 | 36 | 0.484404 |
ace6823ac2f2c1efd5b0d98c3e8a901cc655878e | 430 | py | Python | huaweicloud-sdk-hss/huaweicloudsdkhss/v1/__init__.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | 1 | 2021-11-03T07:54:50.000Z | 2021-11-03T07:54:50.000Z | huaweicloud-sdk-hss/huaweicloudsdkhss/v1/__init__.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-hss/huaweicloudsdkhss/v1/__init__.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
# import HssClient
from huaweicloudsdkhss.v1.hss_client import HssClient
from huaweicloudsdkhss.v1.hss_async_client import HssAsyncClient
# import models into sdk package
from huaweicloudsdkhss.v1.model.host import Host
from huaweicloudsdkhss.v1.model.list_hosts_request import ListHostsRequest
from huaweicloudsdkhss.v1.model.list_hosts_response import ListHostsResponse
| 33.076923 | 76 | 0.867442 |
ace683b26697bf00b46011b40dad50deecd44476 | 553 | py | Python | classified-jobs/vacancies/api/viewsets.py | mattvidal/classified-jobs | 0b9918769f030a156742f761ac73ade80f756ed7 | [
"MIT"
] | null | null | null | classified-jobs/vacancies/api/viewsets.py | mattvidal/classified-jobs | 0b9918769f030a156742f761ac73ade80f756ed7 | [
"MIT"
] | null | null | null | classified-jobs/vacancies/api/viewsets.py | mattvidal/classified-jobs | 0b9918769f030a156742f761ac73ade80f756ed7 | [
"MIT"
] | null | null | null | from rest_framework import viewsets
from vacancies.api import serializers
from vacancies import models
class CandidateViewSet(viewsets.ModelViewSet):
serializer_class = serializers.CandidateSerializer
queryset = models.Candidate.objects.all()
class JobVacancyViewSet(viewsets.ModelViewSet):
serializer_class = serializers.JobVacancySerializer
queryset = models.JobVacancy.objects.all()
class EmployerViewSet(viewsets.ModelViewSet):
serializer_class = serializers.EmployerSerializer
queryset = models.Employer.objects.all() | 36.866667 | 55 | 0.813743 |
ace6840348efcd57e2cbe014e4e6cb453c73fc3b | 24 | py | Python | test/helper_utility.py | ahvblackwelltech/lambdata_ahvblackwelltech | 8a1b9f22e595735aeb89b4d1be0625deabf8ebc5 | [
"MIT"
] | null | null | null | test/helper_utility.py | ahvblackwelltech/lambdata_ahvblackwelltech | 8a1b9f22e595735aeb89b4d1be0625deabf8ebc5 | [
"MIT"
] | 1 | 2021-08-05T04:54:31.000Z | 2021-08-05T04:54:31.000Z | test/helper_utility.py | ahvblackwelltech/lambdata_ahvblackwelltech | 8a1b9f22e595735aeb89b4d1be0625deabf8ebc5 | [
"MIT"
] | null | null | null | import pandas as pd
| 4.8 | 20 | 0.666667 |
ace6848a58c9c62f9da83692042271bc9ab96d49 | 8,305 | py | Python | pandas/tests/window/test_expanding.py | vedkharche538/Pandas | 8e299c79c9a1c8caaa4d3a7fd08440b84ec248fc | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2021-01-06T00:28:03.000Z | 2021-01-06T00:28:03.000Z | pandas/tests/window/test_expanding.py | vedkharche538/Pandas | 8e299c79c9a1c8caaa4d3a7fd08440b84ec248fc | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/window/test_expanding.py | vedkharche538/Pandas | 8e299c79c9a1c8caaa4d3a7fd08440b84ec248fc | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
from pandas.core.window import Expanding
def test_doc_string():
df = DataFrame({"B": [0, 1, 2, np.nan, 4]})
df
df.expanding(2).sum()
@pytest.mark.filterwarnings(
"ignore:The `center` argument on `expanding` will be removed in the future"
)
def test_constructor(which):
# GH 12669
c = which.expanding
# valid
c(min_periods=1)
c(min_periods=1, center=True)
c(min_periods=1, center=False)
@pytest.mark.parametrize("w", [2.0, "foo", np.array([2])])
@pytest.mark.filterwarnings(
"ignore:The `center` argument on `expanding` will be removed in the future"
)
def test_constructor_invalid(which, w):
# not valid
c = which.expanding
msg = "min_periods must be an integer"
with pytest.raises(ValueError, match=msg):
c(min_periods=w)
msg = "center must be a boolean"
with pytest.raises(ValueError, match=msg):
c(min_periods=1, center=w)
@pytest.mark.parametrize("method", ["std", "mean", "sum", "max", "min", "var"])
def test_numpy_compat(method):
# see gh-12811
e = Expanding(Series([2, 4, 6]), window=2)
msg = "numpy operations are not valid with window objects"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(dtype=np.float64)
@pytest.mark.parametrize(
"expander",
[
1,
pytest.param(
"ls",
marks=pytest.mark.xfail(
reason="GH#16425 expanding with offset not supported"
),
),
],
)
def test_empty_df_expanding(expander):
# GH 15819 Verifies that datetime and integer expanding windows can be
# applied to empty DataFrames
expected = DataFrame()
result = DataFrame().expanding(expander).sum()
tm.assert_frame_equal(result, expected)
# Verifies that datetime and integer expanding windows can be applied
# to empty DataFrames with datetime index
expected = DataFrame(index=pd.DatetimeIndex([]))
result = DataFrame(index=pd.DatetimeIndex([])).expanding(expander).sum()
tm.assert_frame_equal(result, expected)
def test_missing_minp_zero():
# https://github.com/pandas-dev/pandas/pull/18921
# minp=0
x = Series([np.nan])
result = x.expanding(min_periods=0).sum()
expected = Series([0.0])
tm.assert_series_equal(result, expected)
# minp=1
result = x.expanding(min_periods=1).sum()
expected = Series([np.nan])
tm.assert_series_equal(result, expected)
def test_expanding_axis(axis_frame):
# see gh-23372.
df = DataFrame(np.ones((10, 20)))
axis = df._get_axis_number(axis_frame)
if axis == 0:
expected = DataFrame(
{i: [np.nan] * 2 + [float(j) for j in range(3, 11)] for i in range(20)}
)
else:
# axis == 1
expected = DataFrame([[np.nan] * 2 + [float(i) for i in range(3, 21)]] * 10)
result = df.expanding(3, axis=axis_frame).sum()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("constructor", [Series, DataFrame])
def test_expanding_count_with_min_periods(constructor):
# GH 26996
result = constructor(range(5)).expanding(min_periods=3).count()
expected = constructor([np.nan, np.nan, 3.0, 4.0, 5.0])
tm.assert_equal(result, expected)
@pytest.mark.parametrize("constructor", [Series, DataFrame])
def test_expanding_count_default_min_periods_with_null_values(constructor):
# GH 26996
values = [1, 2, 3, np.nan, 4, 5, 6]
expected_counts = [1.0, 2.0, 3.0, 3.0, 4.0, 5.0, 6.0]
result = constructor(values).expanding().count()
expected = constructor(expected_counts)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("constructor", [Series, DataFrame])
def test_expanding_count_with_min_periods_exceeding_series_length(constructor):
# GH 25857
result = constructor(range(5)).expanding(min_periods=6).count()
expected = constructor([np.nan, np.nan, np.nan, np.nan, np.nan])
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"df,expected,min_periods",
[
(
DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
[
({"A": [1], "B": [4]}, [0]),
({"A": [1, 2], "B": [4, 5]}, [0, 1]),
({"A": [1, 2, 3], "B": [4, 5, 6]}, [0, 1, 2]),
],
3,
),
(
DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
[
({"A": [1], "B": [4]}, [0]),
({"A": [1, 2], "B": [4, 5]}, [0, 1]),
({"A": [1, 2, 3], "B": [4, 5, 6]}, [0, 1, 2]),
],
2,
),
(
DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),
[
({"A": [1], "B": [4]}, [0]),
({"A": [1, 2], "B": [4, 5]}, [0, 1]),
({"A": [1, 2, 3], "B": [4, 5, 6]}, [0, 1, 2]),
],
1,
),
(DataFrame({"A": [1], "B": [4]}), [], 2),
(DataFrame(), [({}, [])], 1),
(
DataFrame({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}),
[
({"A": [1.0], "B": [np.nan]}, [0]),
({"A": [1, np.nan], "B": [np.nan, 5]}, [0, 1]),
({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}, [0, 1, 2]),
],
3,
),
(
DataFrame({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}),
[
({"A": [1.0], "B": [np.nan]}, [0]),
({"A": [1, np.nan], "B": [np.nan, 5]}, [0, 1]),
({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}, [0, 1, 2]),
],
2,
),
(
DataFrame({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}),
[
({"A": [1.0], "B": [np.nan]}, [0]),
({"A": [1, np.nan], "B": [np.nan, 5]}, [0, 1]),
({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}, [0, 1, 2]),
],
1,
),
],
)
def test_iter_expanding_dataframe(df, expected, min_periods):
# GH 11704
expected = [DataFrame(values, index=index) for (values, index) in expected]
for (expected, actual) in zip(expected, df.expanding(min_periods)):
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize(
"ser,expected,min_periods",
[
(Series([1, 2, 3]), [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])], 3),
(Series([1, 2, 3]), [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])], 2),
(Series([1, 2, 3]), [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])], 1),
(Series([1, 2]), [([1], [0]), ([1, 2], [0, 1])], 2),
(Series([np.nan, 2]), [([np.nan], [0]), ([np.nan, 2], [0, 1])], 2),
(Series([], dtype="int64"), [], 2),
],
)
def test_iter_expanding_series(ser, expected, min_periods):
# GH 11704
expected = [Series(values, index=index) for (values, index) in expected]
for (expected, actual) in zip(expected, ser.expanding(min_periods)):
tm.assert_series_equal(actual, expected)
def test_center_deprecate_warning():
# GH 20647
df = DataFrame()
with tm.assert_produces_warning(FutureWarning):
df.expanding(center=True)
with tm.assert_produces_warning(FutureWarning):
df.expanding(center=False)
with tm.assert_produces_warning(None):
df.expanding()
@pytest.mark.parametrize("constructor", ["DataFrame", "Series"])
def test_expanding_sem(constructor):
# GH: 26476
obj = getattr(pd, constructor)([0, 1, 2])
result = obj.expanding().sem()
if isinstance(result, DataFrame):
result = Series(result[0].values)
expected = Series([np.nan] + [0.707107] * 2)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("method", ["skew", "kurt"])
def test_expanding_skew_kurt_numerical_stability(method):
# GH: 6929
s = Series(np.random.rand(10))
expected = getattr(s.expanding(3), method)()
s = s + 5000
result = getattr(s.expanding(3), method)()
tm.assert_series_equal(result, expected)
| 30.988806 | 87 | 0.542565 |
ace6853ba82b742fef7ef037b8ecfd82dac2e2c2 | 396 | py | Python | django_cricket_statistics/migrations/0003_auto_20200916_0306.py | drewyh/django-cricket-statistics | f5a9b112d83c1a0d2c974f8a8b491a87c92cf8de | [
"Apache-2.0"
] | null | null | null | django_cricket_statistics/migrations/0003_auto_20200916_0306.py | drewyh/django-cricket-statistics | f5a9b112d83c1a0d2c974f8a8b491a87c92cf8de | [
"Apache-2.0"
] | 4 | 2020-08-28T06:20:10.000Z | 2020-09-22T13:42:06.000Z | django_cricket_statistics/migrations/0003_auto_20200916_0306.py | drewyh/django-cricket-statistics | f5a9b112d83c1a0d2c974f8a8b491a87c92cf8de | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.1.1 on 2020-09-15 17:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('django_cricket_statistics', '0002_auto_20200905_0345'),
]
operations = [
migrations.RenameField(
model_name='statistic',
old_name='number_of_matches',
new_name='matches',
),
]
| 20.842105 | 65 | 0.618687 |
ace6854f30712246c1665332bf0d9bf7d737facc | 1,594 | py | Python | zsimparse/h5stat.py | gaomy3832/zsimparse | 6d94c92f58ca62a11b946dd23031c97eb3fb7ddb | [
"BSD-3-Clause"
] | 3 | 2018-08-26T10:52:40.000Z | 2021-01-26T08:58:41.000Z | zsimparse/h5stat.py | gaomy3832/zsimparse | 6d94c92f58ca62a11b946dd23031c97eb3fb7ddb | [
"BSD-3-Clause"
] | null | null | null | zsimparse/h5stat.py | gaomy3832/zsimparse | 6d94c92f58ca62a11b946dd23031c97eb3fb7ddb | [
"BSD-3-Clause"
] | 2 | 2019-09-17T09:24:04.000Z | 2020-08-25T11:07:27.000Z | """ $lic$
Copyright (C) 2016-2020, Mingyu Gao
All rights reserved.
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
import os
import h5py
import numpy as np
from .base_data_dict import BaseDataDict
from . import util
class H5Stat(BaseDataDict):
'''
A wrapper class of h5 stat.
'''
@classmethod
def make_from_file(cls, fname):
'''
Construct from an h5 file.
'''
if not os.path.exists(fname):
raise ValueError('{}: {}: no h5 file {} found'
.format(util.PACKAGE_NAME, cls.__name__, fname))
fobj = h5py.File(fname, 'r')
stat = fobj['stats']['root']
if stat.shape[0] < 2:
raise RuntimeError('{}: {}: incomplete simulation for h5 file {}'
.format(util.PACKAGE_NAME, cls.__name__, fname))
return cls(stat)
def num_samples(self):
''' Get the number of stat samples. '''
return np.prod(self.ddict.shape)
def num_dims(self):
''' Get the dimensions of stat samples. '''
return len(self.ddict.shape)
| 28.464286 | 79 | 0.642409 |
ace686ee8df05528fc2aac1d01b455023ed8f053 | 1,982 | py | Python | create_split.py | truebigsand/GiantMIDI-Piano | 2d23535e808bcfa9c5f1432f4808758cf3026416 | [
"CC-BY-4.0"
] | 1,148 | 2020-10-06T01:55:05.000Z | 2022-03-30T09:34:01.000Z | create_split.py | truebigsand/GiantMIDI-Piano | 2d23535e808bcfa9c5f1432f4808758cf3026416 | [
"CC-BY-4.0"
] | 4 | 2021-06-23T02:19:39.000Z | 2022-01-23T06:04:41.000Z | create_split.py | truebigsand/GiantMIDI-Piano | 2d23535e808bcfa9c5f1432f4808758cf3026416 | [
"CC-BY-4.0"
] | 134 | 2020-10-16T05:25:12.000Z | 2022-03-29T13:24:18.000Z | import argparse
import os
from dataset import read_csv_to_meta_dict, write_meta_dict_to_csv
def create_piano_split(args):
"""Validation, test, train: 1:1:8
"""
# Arguments & parameters
workspace = args.workspace
# Paths
piano_prediction_path = os.path.join(workspace,
'full_music_pieces_youtube_similarity_pianosoloprob.csv')
split_path = os.path.join(workspace,
'full_music_pieces_youtube_similarity_pianosoloprob_split.csv')
# Meta info to be downloaded
meta_dict = read_csv_to_meta_dict(piano_prediction_path)
splits = []
i = 0
for n in range(len(meta_dict['surname'])):
if float(meta_dict['piano_solo_prob'][n]) >= 0.5:
if i == 0:
splits.append('validation')
elif i == 1:
splits.append('test')
else:
splits.append('train')
i += 1
else:
splits.append('none')
# Reset i if moved to next composer
if n > 0:
previous_name = '{}, {}'.format(meta_dict['surname'][n - 1], meta_dict['surname'][n - 1])
current_name = '{}, {}'.format(meta_dict['surname'][n], meta_dict['surname'][n])
if previous_name != current_name:
i = 0
if i == 10:
i = 0
meta_dict['split'] = splits
write_meta_dict_to_csv(meta_dict, split_path)
print('Write csv to {}'.format(split_path))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
subparsers = parser.add_subparsers(dest='mode')
parser_create_piano_split = subparsers.add_parser('create_piano_split')
parser_create_piano_split.add_argument('--workspace', type=str, required=True, help='Directory of your workspace.')
# Parse arguments
args = parser.parse_args()
if args.mode == 'create_piano_split':
create_piano_split(args)
else:
raise Exception('Error argument!') | 29.58209 | 119 | 0.617558 |
ace68722cb605c9129856c5d70fc10c032655774 | 9,288 | py | Python | threeML/io/plotting/plot_style.py | BjoernBiltzinger/threeML | fc3d989173b1613a199633455f260e67fdb50369 | [
"BSD-3-Clause"
] | null | null | null | threeML/io/plotting/plot_style.py | BjoernBiltzinger/threeML | fc3d989173b1613a199633455f260e67fdb50369 | [
"BSD-3-Clause"
] | null | null | null | threeML/io/plotting/plot_style.py | BjoernBiltzinger/threeML | fc3d989173b1613a199633455f260e67fdb50369 | [
"BSD-3-Clause"
] | null | null | null | import matplotlib as mpl
import matplotlib.pyplot as plt
import contextlib
import glob
import os
import yaml
from threeML.io.package_data import get_path_of_data_file, get_path_of_user_dir
from astromodels.utils.valid_variable import is_valid_variable_name
def check_legal_plot_style_name(style_name):
if style_name not in defined_styles:
raise NameError("Style '%s' is not known. Valid styles: %s" % (style_name, ",".join(defined_styles.keys())))
class PlotStyle(object):
"""
Contains the styles for the plots. It embeds the matplotlib style, so that by choosing
an instance of PlotStyle the user can set at the same time the matplotlib style and
all the elements of the 3ML style.
"""
def __init__(self,
matplotlib_base_style='seaborn-notebook',
matplotlib_overrides=None,
threeml_style=None):
assert matplotlib_base_style in plt.style.available, \
"Style %s is not among the known matplotlib styles" % matplotlib_base_style
self._matplotlib_base_style = matplotlib_base_style
self._matplotlib_overrides = {} if matplotlib_overrides is None else matplotlib_overrides
self._threeml_style = {} if threeml_style is None else threeml_style
@classmethod
def from_style_file(cls, filename):
# Read style file
with open(filename) as f:
d = yaml.load(f)
return cls(matplotlib_base_style=d['matplotlib_base_style'],
matplotlib_overrides=d['matplotlib_overrides'],
threeml_style=d['threeml_style'])
def clone(self):
"""
Clone this style
"""
clone = PlotStyle(matplotlib_base_style=self._matplotlib_base_style,
matplotlib_overrides=dict(self._matplotlib_overrides),
threeml_style=dict(self._threeml_style))
return clone
def activate(self):
"""
Activate this style so that it becomes the default style for any plot. This is mainly useful for the
default style. For any other style, use the `with plot_style([style name])` context manager instead.
:return: None
"""
# Activate matplotlib base style
mpl.style.use(self._matplotlib_base_style)
# Override some settings if needed
mpl.rcParams.update(self._matplotlib_overrides)
# Use this style as active style
global current_style
current_style = self
@staticmethod
def deactivate():
"""
Deactivate the current style and restore the default. Do not use this directly. Use the
`with plot_style([style name])` context manager instead.
:return: None
"""
# Restore matplotlib defaults
mpl.rcdefaults()
# Restore 3ML default
global current_style
current_style = defined_styles['default']
@staticmethod
def _check_name(name):
if not is_valid_variable_name(name):
raise NameError("The name '%s' is not valid. Please use a simple name with no spaces nor "
"special characters." % (name))
def save(self, name, overwrite=False):
"""
Save the style with the provided name, so it will be made available also in future sessions of 3ML.
:param name: the name to give to the new style
:param overwrite: whether to overwrite an existing style with the same name or not
:return: the path of the YAML file in which the style has been saved for future use
"""
# Make sure name is legal
self._check_name(name)
# Make sure we are not trying to overwrite the default style
assert name != "default", "You cannot overwrite the default style"
# Get the list of existing styles
defined_styles = _discover_styles()
# Prepare dictionary to be written
d = {}
d['matplotlib_base_style'] = self._matplotlib_base_style
d['matplotlib_overrides'] = self._matplotlib_overrides
d['threeml_style'] = self._threeml_style
# Write it
# Save in the style directory
this_path = os.path.join(_get_styles_directory(), "%s.yml" % name)
# Check whether it exists already.
if this_path in defined_styles and not overwrite:
raise IOError("Style %s already exists. Use 'overwrite=True' to overwrite it." % name)
# If necessary, create the styles directory (needed the first time that the user
# save a custom style)
if not os.path.exists(_get_styles_directory()):
os.makedirs(_get_styles_directory())
# At this point, either the file is new or we are overwriting, so we can open with "w+"
with open(this_path, "w+") as f:
yaml.dump(d, f)
print("Successfully written style into %s" % this_path)
# Refresh the list of defined styles so the new style can be used immediately
_refresh_defined_styles()
# Return the path
return this_path
@staticmethod
def _raise_unknown_element(item):
raise NameError("'%s' is not a known style element" % item)
def __setitem__(self, item, setting):
if item not in self._threeml_style:
if item in mpl.rcParams:
self._matplotlib_overrides[item] = setting
else:
self._raise_unknown_element(item)
else:
self._threeml_style[item] = setting
def __getitem__(self, item):
if item in self._threeml_style:
return self._threeml_style[item]
else:
if item in mpl.rcParams:
return mpl.rcParams[item]
elif item in self._matplotlib_overrides:
return self._matplotlib_overrides[item]
else:
self._raise_unknown_element(item)
@contextlib.contextmanager
def plot_style(style_name):
"""
A context manager to temporarily change the plotting style to the provided style.
Examples:
Say we have defined a style 'plain'::
with plot_style('plain'):
# plots generated here will have the 'plain' style
...
# plots generated here will have the default style
...
You can also temporarily change an attribute of the style within the `with` context::
with plot_style('plain') as my_style:
# Temporarily change the width of the lines. Outside of this particular "with" context
# reusing the "plain" style will result in normal lines
my_style['lines.linewidth'] = 2
# plots generated here will have the 'plain' style with lines with double width
# Plots generated here will have the default style
...
:param style_name: name of the style. Use `get_available_plotting_styles()` to get a list of known styles.
:return: style instance
"""
check_legal_plot_style_name(style_name)
# Get the PlotStyle instances corresponding to the provided style.
# We clone the style so that the user can temporarily change anything in the style
# within the `with` statement only temporarily affecting the plots. After the `with`
# context is done, the original style will be unaffected
style = defined_styles[style_name].clone()
# Activate
style.activate()
# Return control to caller yielding the clone of the style instance.
yield style
# After the caller is done, restore default
style.deactivate()
def create_new_plotting_style(based_on='default'):
"""
Create a new plotting style ready for customization, based on an existing plotting style. By default, the
default plotting style is used.
:param based_on: the plot style to clone. By default, the default plotting style is used.
:return: a PlotStyle instance ready for customization
"""
check_legal_plot_style_name(based_on)
return defined_styles[based_on].clone()
def _get_styles_directory():
return os.path.join(get_path_of_user_dir(), 'styles')
def _discover_styles():
# Scan the 3ML styles directory for styles
styles = glob.glob(os.path.join(_get_styles_directory(), '*.yml'))
return styles
def _load_styles():
# Discover defined styles
styles = _discover_styles()
# Load them
defined_styles = {}
for style_file in styles:
this_style = PlotStyle.from_style_file(style_file)
# The name of the style is just the file name without the .yml extension
style_name = os.path.splitext(os.path.basename(style_file))[0]
defined_styles[style_name] = this_style
# Now load the default style
default_style_filename = get_path_of_data_file("default_style.yml")
defined_styles['default'] = PlotStyle.from_style_file(default_style_filename)
return defined_styles
def get_available_plotting_styles():
return defined_styles.keys()
# Load them on import
defined_styles = _load_styles()
current_style = defined_styles['default']
# This is used to refresh the list on demand
def _refresh_defined_styles():
global defined_styles
defined_styles = _load_styles() | 28.231003 | 116 | 0.662898 |
ace6874f68d0b6b545f8b9798b1eb244c18bb7dc | 4,480 | py | Python | zvt/recorders/eastmoney/finance/china_stock_finance_factor_recorder.py | markqiu/zvt | 1bcfb71279f2652c3600f0f8e45d941f98ceaa10 | [
"MIT"
] | 6 | 2020-09-03T10:02:00.000Z | 2021-02-04T02:51:47.000Z | zvt/recorders/eastmoney/finance/china_stock_finance_factor_recorder.py | wlwd13303/zvt | 23105a5bfdc3a5080c6c22d11e9e53d216688dea | [
"MIT"
] | null | null | null | zvt/recorders/eastmoney/finance/china_stock_finance_factor_recorder.py | wlwd13303/zvt | 23105a5bfdc3a5080c6c22d11e9e53d216688dea | [
"MIT"
] | 2 | 2020-07-08T04:15:40.000Z | 2021-06-08T08:51:31.000Z | # -*- coding: utf-8 -*-
from zvt.utils.time_utils import to_pd_timestamp
from zvt.utils.utils import add_func_to_value, to_float
from zvt.api.quote import to_report_period_type
from zvt.domain import FinanceFactor
from zvt.recorders.eastmoney.finance.base_china_stock_finance_recorder import BaseChinaStockFinanceRecorder
finance_factor_map = {
# 基本每股收益(元)
"basic_eps": "Epsjb",
# 扣非每股收益(元)
"deducted_eps": "Epskcjb",
# 稀释每股收益(元)
"diluted_eps": "Epsxs",
# 每股净资产(元)
"bps": "Bps",
# 每股资本公积(元)
"capital_reserve_ps": "Mgzbgj",
# 每股未分配利润(元)
"undistributed_profit_ps": "Mgwfplr",
# 每股经营现金流(元)
"op_cash_flow_ps": "Mgjyxjje",
# 成长能力指标
#
# 营业总收入(元)
"total_op_income": "Totalincome",
# 毛利润(元)
"gross_profit": "Grossprofit",
# 归属净利润(元)
"net_profit": "Parentnetprofit",
# 扣非净利润(元)
"deducted_net_profit": "Bucklenetprofit",
# 营业总收入同比增长
"total_op_income_growth_yoy": "Totalincomeyoy",
# 归属净利润同比增长
"inc_net_profit_shareholders_yoy ": "Parentnetprofityoy",
# 扣非净利润同比增长
"deducted_net_profit_growth_yoy": "Bucklenetprofityoy",
# 营业总收入滚动环比增长
"op_income_growth_qoq": "Totalincomerelativeratio",
# 归属净利润滚动环比增长
"net_profit_growth_qoq": "Parentnetprofitrelativeratio",
# 扣非净利润滚动环比增长
"deducted_net_profit_growth_qoq": "Bucklenetprofitrelativeratio",
# 盈利能力指标
#
# 净资产收益率(加权)
"roe": "Roejq",
# 净资产收益率(扣非/加权)
"deducted_roe": "Roekcjq",
# 总资产收益率(加权)
"rota": "Allcapitalearningsrate",
# 毛利率
"gross_profit_margin": "Grossmargin",
# 净利率
"net_margin": "Netinterest",
# 收益质量指标
#
# 预收账款/营业收入
"advance_receipts_per_op_income": "Accountsrate",
# 销售净现金流/营业收入
"sales_net_cash_flow_per_op_income": "Salesrate",
# 经营净现金流/营业收入
"op_net_cash_flow_per_op_income": "Operatingrate",
# 实际税率
"actual_tax_rate": "Taxrate",
# 财务风险指标
#
# 流动比率
"current_ratio": "Liquidityratio",
# 速动比率
"quick_ratio": "Quickratio",
# 现金流量比率
"cash_flow_ratio": "Cashflowratio",
# 资产负债率
"debt_asset_ratio": "Assetliabilityratio",
# 权益乘数
"em": "Equitymultiplier",
# 产权比率
"equity_ratio": "Equityratio",
# 营运能力指标(一般企业)
#
# 总资产周转天数(天)
"total_assets_turnover_days": "Totalassetsdays",
# 存货周转天数(天)
"inventory_turnover_days": "Inventorydays",
# 应收账款周转天数(天)
"receivables_turnover_days": "Accountsreceivabledays",
# 总资产周转率(次)
"total_assets_turnover": "Totalassetrate",
# 存货周转率(次)
"inventory_turnover": "Inventoryrate",
# 应收账款周转率(次)
"receivables_turnover": "Accountsreceiveablerate",
# 专项指标(银行)
#
# 存款总额
"fi_total_deposit": "Totaldeposit",
# 贷款总额
"fi_total_loan": "Totalloan",
# 存贷款比例
"fi_loan_deposit_ratio": "Depositloanratio",
# 资本充足率
"fi_capital_adequacy_ratio": "Capitaladequacyratio",
# 核心资本充足率
"fi_core_capital_adequacy_ratio": "Corecapitaladequacyratio",
# 不良贷款率
"fi_npl_ratio": "Nplratio",
# 不良贷款拨备覆盖率
"fi_npl_provision_coverage": "Nplprovisioncoverage",
# 资本净额
"fi_net_capital": "Netcapital_b",
# 专项指标(保险)
#
# 总投资收益率
"insurance_roi": "Tror",
# 净投资收益率
"insurance_net_investment_yield": "Nror",
# 已赚保费
"insurance_earned_premium": "Eapre",
# 赔付支出
"insurance_payout": "Comexpend",
# 退保率
"insurance_surrender_rate": "Surrate",
# 偿付能力充足率
"insurance_solvency_adequacy_ratio": "Solvenra",
# 专项指标(券商)
#
# 净资本
"broker_net_capital": "Netcapital",
# 净资产
"broker_net_assets": "Netassets",
# 净资本/净资产
"broker_net_capital_assets_ratio": "Captialrate",
# 自营固定收益类证券规模/净资本
"broker_self_operated_fixed_income_securities_net_capital_ratio": "Incomesizerate",
}
add_func_to_value(finance_factor_map, to_float)
finance_factor_map["report_period"] = ("ReportDate", to_report_period_type)
finance_factor_map["report_date"] = ("ReportDate", to_pd_timestamp)
class ChinaStockFinanceFactorRecorder(BaseChinaStockFinanceRecorder):
url = 'https://emh5.eastmoney.com/api/CaiWuFenXi/GetZhuYaoZhiBiaoList'
finance_report_type = 'ZhuYaoZhiBiaoList'
data_schema = FinanceFactor
data_type = 1
def get_data_map(self):
return finance_factor_map
__all__ = ['ChinaStockFinanceFactorRecorder']
if __name__ == '__main__':
# init_log('finance_factor.log')
recorder = ChinaStockFinanceFactorRecorder(codes=['000001'])
recorder.run()
| 27.654321 | 107 | 0.678571 |
ace6887d55eafcf36cc7066690432d3774c7d944 | 7,213 | py | Python | ruleProcess/tokenMatchers.py | neel-lab/GlycoOntology | f65faebe5d8ca86d057f4972fad981ba06ff6284 | [
"CC-BY-4.0"
] | null | null | null | ruleProcess/tokenMatchers.py | neel-lab/GlycoOntology | f65faebe5d8ca86d057f4972fad981ba06ff6284 | [
"CC-BY-4.0"
] | null | null | null | ruleProcess/tokenMatchers.py | neel-lab/GlycoOntology | f65faebe5d8ca86d057f4972fad981ba06ff6284 | [
"CC-BY-4.0"
] | null | null | null | import re
#########################################
# Rule and Attribute Class Matching Class
#########################################
class matcherClass:
def __init__(self,regex,front=True):
'''
The matcherClass stores regular expression
information used to lexicographically
parse rule strings and extract rule tokens.
The matcherClass will use the "regex" variable
to find matches within the rule string, starting
at the beginning of the string.
'''
self.matchString=regex
#Append "^" for stepping into the rule string.
if front:
self.matchString='^'+regex
else:
self.matchString=regex
def search(self,string):
return(re.search(self.matchString,string))
def isPresent(self,string):
searchRes=self.search(string)
return(True if searchRes is not None else False)
def __call__(self,string,presence=True):
if presence:
return(self.isPresent(string))
else:
return(self.search(string))
############################
# Lexion-based Matcher Class
############################
class LexMatcher:
def __init__(self,*args,regex=None,front=True):
'''
The LexMatcher class creates a regular expression
function which searches for expected values
provided in an input list.
An optional "regex" variable can be passed to a
LexMatcher where the lexicon can be used with other
regular expressions. Use string replacement syntax as
shown below.
Example:
INPUT
Lexicon: [Gal,Glc,GlcNAc]
Regex: %s\[.+?\]
front=True
OUTPUT
Search String: "^(Gal|Glc|GlcNAc)\[.+?\]"
'''
self.lexicons=args
self.regex=regex
self.lexRegex=self.make_regex()
#Append "^" for stepping into the rule string.
if front:
self.lexRegex='^'+self.lexRegex
def make_regex(self):
'''
Creates regular expression string to
match all values.
If "regex" parameter passed to class,
processes the pattern to include the
lexicon in the right place.
'''
#Create lexicon patterns for every kwargs input:
lexPatterns=['('+'|'.join([re.escape(i) for i in x])+')' for x in self.lexicons]
#lexPattern='('+'|'.join([re.escape(x) for x in self.lexicon])+')'
if self.regex is not None:
#Replace "%s"s with regex patterns in "lexPatterns":
regex=self.regex %(tuple(lexPatterns))
return(regex)
else:
if len(lexPatterns)>1:
raise Exception('Multiple lexicons passed, must provide a regex string')
else:
lexPattern=lexPatterns[0]
return(lexPattern)
def search(self,string):
return(re.search(self.lexRegex,string))
def isPresent(self,string):
searchRes=self.search(string)
return(True if searchRes is not None else False)
def __call__(self,string,presence=False):
if presence:
return(self.isPresent(string))
else:
return(self.search(string))
####################
# Reaction Matchers:
####################
#reactionMatcher=matcherClass('\{.+?\}')
#additionMatcher=matcherClass('\{(?!\!).+?\}')
#subtractionMatcher=matcherClass('\{(?=\!).+?\}')
#substitutionMatcher=matcherClass('\{.+?(?<!\<)\-\>.+?\}')
#reversibleMatcher=matcherClass('\{.+?\<\-\>.+?\}')
###################
# Entity Detection:
###################
entityDict={
'monosaccharides':['GlcNAc','GlcN','GlcA','Glc','GalNAc','GalN','Gal','ManNAc','Man','Neu5Ac','Neu5Gc','Xyl','Fuc','IdoA','Kdn','Ribitol(P5-1)','Ribitol(P5-3)','Ribitol','Fruc'],
'Nucleotides':['CMP','UDP','GDP'],
'Modifications':['S','P','Ac'],
'Substrates':['PAPS','R','ATP'],
'Compartments':['ER','Golgi','Cytoplasm','Extracellular'],
'Aglyca':['Dol-P-P','Dol-P', 'Asn','Ser/Thr','Ser','Thr','Cer','5-hydroxy-L-lysyl','Lys','WXXW','PI','[Dystro]','[anchored protein]'],
'ProteinConstraints':['EGF','Cad','TSR']
}
############################
# Matcher Classes:
############################
#Monosaccharide Matcher
monoMatcher=LexMatcher(entityDict['monosaccharides'],
entityDict['Compartments'],
entityDict['Modifications'],
regex="\[?%s(\[%s\])*((?:\d\,|\,\d|\d|\<.+?\>)*)((?:\{\!?\,\d\}|\{\!?\d?\D+?\}|\{.+?\<?\-\>.+?\}))*%s*(\([ab\?][12\?]\-[\d\?]\))*\]?")
#Sugar Nucleotide Matching:
nucleotideSugarMatcher=LexMatcher(entityDict['Nucleotides'],entityDict['monosaccharides'],regex="%s\-%s")
#Modification Matcher
modMatcher=LexMatcher(entityDict['Modifications'],regex="\d%s",front=True)
modMatcher_middle=LexMatcher(entityDict['Modifications'],regex="\d%s",front=False)
#Aglycon Matcher:
aglyconMatcher=LexMatcher([x for x in entityDict['Aglyca']],front=True)
#Compartment Matcher:
compartmentMatcher=LexMatcher(entityDict['Compartments'],regex='\[%s\]',front=True)
#Transport Matcher:
transportMatcher=matcherClass('.+?\{\[.+?\]\-\>\[.+?\]\}')
#Protein Constraints:
proteinConstraintMatcher=LexMatcher(entityDict['ProteinConstraints'],regex="\[%s\]")
#Substrate Matcher:
substrateMatcher=LexMatcher(entityDict['Substrates'])
#Multi-entity matcher:
multiMatcher=matcherClass('\<.+?\>')
#Wild Card Matcher:
wildCardMatcher=matcherClass('\[?\.\.\.\]?')
#######################
# Constraint Detection:
#######################
quantityStartMatcher=LexMatcher(entityDict['monosaccharides'],regex="n(?=%s)")
attachRuleMatcher=LexMatcher(entityDict['monosaccharides'],regex="\@(?=%s)")
negationRuleMatcher=LexMatcher(entityDict['monosaccharides'],regex="\!(?=%s)")
reactionMatcher=matcherClass('\[?\{.+?\}\]?')
additionMatcher=matcherClass('\{(?!\!).*?\}')
subtractionMatcher=matcherClass('\{(?=\!).*?\}')
substitutionMatcher=matcherClass('\{.*?\-\>.*?\}')
reversibleMatcher=matcherClass('\{.*?\<\-\>.*?\}')
#Terminal Addition Matcher:
termAdditionMatcher=matcherClass('\[\{(?!\!).*?\}')
termSubtractionMatcher=matcherClass('\[\{(?=\!).*?\}')
#innerReactionMatcher used to find reactions within
# monosaccharide entities (for modification reactions)
innerReactionMatcher=matcherClass('\{',front=False)
###################
# Entity Detection:
###################
wildCardMatcher=matcherClass('\[?\.\.\.\]?')
#Monosaccharide-linkage matcher:
#Match general monosaccharide pattern
# ensure no { } in front of the string:
monoLinkMatcher=matcherClass('(?!\{)\[?([A-Za-z0-9\,\{\}]+?)(\([ab\?][12\?]\-[\d\?]\))\]?')
# Excludes any special characters assoicated
# with reaction and constraint string (n,@,{},<>)
#monoLinkMatcher=matcherClass('(?!n|\@|\{|\}|\<|\>)\[?(.+?)(\([ab\?][12\?]\-[\d\?]\))\]?')
#######################
# Constraint Detection:
#######################
quantityStartMatcher=matcherClass('n')
attachRuleMatcher=matcherClass('\@')
negationRuleMatcher=matcherClass('\!')
quantifierMatcher=matcherClass('(\=|\>\=|\<\=|\>|\<)(\d)')
#######################
# Separator Detection
#######################
orMatcher=matcherClass('\|')
andMatcher=matcherClass('\&')
| 35.707921 | 186 | 0.587134 |
ace689ad7e1427078c9a3a050c02d112b658a9a8 | 1,374 | py | Python | tests/test_webhook.py | unfoldingWord-dev/tx-job-handler | 5364ed079bbd5b6528eeb6d12f2ca5c696e84f4f | [
"MIT"
] | 1 | 2020-11-25T04:07:37.000Z | 2020-11-25T04:07:37.000Z | tests/test_webhook.py | unfoldingWord-dev/tx-job-handler | 5364ed079bbd5b6528eeb6d12f2ca5c696e84f4f | [
"MIT"
] | 52 | 2018-10-25T05:49:30.000Z | 2022-03-16T22:31:57.000Z | tests/test_webhook.py | unfoldingWord-dev/tx-job-handler | 5364ed079bbd5b6528eeb6d12f2ca5c696e84f4f | [
"MIT"
] | null | null | null | from unittest import TestCase, skip
from unittest.mock import Mock, patch
import json
from rq_settings import prefix, webhook_queue_name
from webhook import job, AppSettings
from rq import get_current_job
def my_get_current_job():
class Result:
id = 12345
origin = webhook_queue_name
return Result()
class TestWebhook(TestCase):
def setUp(self):
# Make sure that other tests didn't mess up our prefix
AppSettings(prefix=prefix)
def test_prefix(self):
self.assertEqual(prefix, AppSettings.prefix)
@skip("Not currently working")
@patch('webhook.get_current_job', side_effect=my_get_current_job)
def test_bad_payload(self, mocked_get_current_job_function):
test_payload = {'something': 'anything',}
with self.assertRaises(KeyError):
job(test_payload)
@skip("Skip this test on Travis-CI coz it fails with AWS test credentials - leave for standalone testing")
@patch('webhook.get_current_job', side_effect=my_get_current_job)
def test_typical_full_payload(self, mocked_get_current_job_function):
with open( 'tests/resources/webhook_post.json', 'rt' ) as json_file:
payload_json = json.load(json_file)
job(payload_json)
# After job has run, should update https://dev.door43.org/u/tx-manager-test-data/en-obs-rc-0.2/93829a566c/
| 34.35 | 114 | 0.719068 |
ace68b873330f8ad204dfc84e0a08a0658562e21 | 2,520 | py | Python | anon_requests/proxy_sources/my_proxy.py | OpenJarbas/anon_requests | d19b0809f563eb07375e34918507a9a16309f508 | [
"Apache-2.0"
] | null | null | null | anon_requests/proxy_sources/my_proxy.py | OpenJarbas/anon_requests | d19b0809f563eb07375e34918507a9a16309f508 | [
"Apache-2.0"
] | null | null | null | anon_requests/proxy_sources/my_proxy.py | OpenJarbas/anon_requests | d19b0809f563eb07375e34918507a9a16309f508 | [
"Apache-2.0"
] | null | null | null | from anon_requests.proxy_sources import ProxyGetter, ProxyAnonymity, ProxyType
import bs4
import base64
class MyProxy(ProxyGetter):
url = 'https://www.my-proxy.com/'
def scrap_proxy_list(self):
proxies = []
for t in ["elite", "anonymous", "socks-4", "socks-5", "proxy"]:
url = self.url + f"free-{t}-proxy.html"
proxies += self._scrap_url(url)
url = self.url + "free-proxy-list.html"
proxies += self._scrap_url(url)
for i in range(2, 10):
url = self.url + f"free-proxy-list-{i}.html"
proxies += self._scrap_url(url)
return proxies
def _scrap_url(self, url):
proxies = []
page = self.session.get(url, headers=self.headers)
doc = bs4.BeautifulSoup(page.text, features="html.parser")
el = doc.find("div", {"class": "list"})
for e in str(el).split("<br/>"):
e = e.replace('<div class="list">', "") \
.replace('<div class="to-lock">', "").replace("</div>", "")
if not e or e == "None":
continue
ip, port = e.split(":")
country_code = None
if "#" in port:
port, country_code = port.split("#")
proxy_urls = {
"http": ip + ":" + port,
"https": ip + ":" + port
}
anon = ProxyAnonymity.TRANSPARENT
if "elite" in url:
anon = ProxyAnonymity.ELITE
elif "anonymous" in url:
anon = ProxyAnonymity.ANONYMOUS
proxy_type = ProxyType.HTTP
if "socks-4" in url:
proxy_type = ProxyType.SOCKS4
anon = ProxyAnonymity.ELITE
proxy_urls = {
"http": "socks4://" + ip + ":" + port,
"https": "socks4://" + ip + ":" + port
}
elif "socks-5" in url:
proxy_type = ProxyType.SOCKS5
anon = ProxyAnonymity.ELITE
proxy_urls = {
"http": "socks5://" + ip + ":" + port,
"https": "socks5://" + ip + ":" + port
}
proxies.append({"ip": ip,
"port": port,
"country_code": country_code,
"proxy_anonymity": anon,
"proxy_type": proxy_type,
"urls": proxy_urls})
return proxies
| 35 | 78 | 0.457937 |
ace68e0fd66ba90bf5dad9ea8a45b6aefab713eb | 2,564 | py | Python | stockdb/common/models.py | alexzhangs/stockdb | 1f4b7fca977949d9ec816f8d9f0586fbd15349fe | [
"MIT"
] | null | null | null | stockdb/common/models.py | alexzhangs/stockdb | 1f4b7fca977949d9ec816f8d9f0586fbd15349fe | [
"MIT"
] | null | null | null | stockdb/common/models.py | alexzhangs/stockdb | 1f4b7fca977949d9ec816f8d9f0586fbd15349fe | [
"MIT"
] | 1 | 2022-01-25T04:54:00.000Z | 2022-01-25T04:54:00.000Z | from django.db import models
# Create your models here.
# refer to: https://zh.wikipedia.org/wiki/ISO_4217
class Currency(models.Model):
id = models.AutoField(primary_key=True,
help_text='ISO 4217, see Num, https://zh.wikipedia.org/wiki/ISO_4217')
code = models.CharField(max_length=3, unique=True,
help_text='ISO 4217, see Code, https://zh.wikipedia.org/wiki/ISO_4217')
name = models.CharField(max_length=32)
symbol = models.CharField(max_length=8)
dt_created = models.DateTimeField('Created', auto_now_add=True)
dt_updated = models.DateTimeField('Updated', auto_now=True)
def __str__(self):
return '%s (%s)' % (self.name, self.code)
# Level 2, refer to: https://gist.github.com/richjenks/15b75f1960bc3321e295
# Level 3, refer to: https://zh.wikipedia.org/wiki/ISO_3166-1
# Level 4 & 5, partly refer to: https://github.com/adyliu/china_area
class Region(models.Model):
id = models.AutoField(primary_key=True,
help_text='ISO 3166-1 for level 3, see Numeric code, https://zh.wikipedia.org/wiki/ISO_3166-1')
code = models.CharField(max_length=9, unique=True,
help_text='ISO 3166-1 for level 3, see Alpha-2 code, https://zh.wikipedia.org/wiki/ISO_3166-1')
name = models.CharField(max_length=64)
level = models.SmallIntegerField(help_text='Started with level 1.')
parent = models.ForeignKey('Region', to_field='code', on_delete=models.DO_NOTHING, null=True, blank=True, related_name='subs')
dt_created = models.DateTimeField('Created', auto_now_add=True)
dt_updated = models.DateTimeField('Updated', auto_now=True)
def __str__(self):
return '%s (%s)' % (self.name, self.code)
class Industry(models.Model):
code = models.CharField(max_length=16, unique=True)
name = models.CharField(max_length=64)
level = models.SmallIntegerField(help_text='Started with level 1.')
parent = models.ForeignKey('Industry', to_field='code', on_delete=models.DO_NOTHING, null=True, blank=True, related_name='subs')
dt_created = models.DateTimeField('Created', auto_now_add=True)
dt_updated = models.DateTimeField('Updated', auto_now=True)
def __str__(self):
return '%s (%s)' % (self.name, self.code)
class Period(models.Model):
code = models.CharField(max_length=16, unique=True)
name = models.CharField(max_length=64)
dt_created = models.DateTimeField('Created', auto_now_add=True)
dt_updated = models.DateTimeField('Updated', auto_now=True)
def __str__(self):
return '%s (%s)' % (self.name, self.code)
| 43.457627 | 132 | 0.709438 |
ace68f4d535f5448b545036c727f49d1f6345b84 | 16,083 | py | Python | musicbot/config.py | hellox5/3play | 4b9a7fbc37c337ca5078974876d4e830a3064140 | [
"MIT"
] | null | null | null | musicbot/config.py | hellox5/3play | 4b9a7fbc37c337ca5078974876d4e830a3064140 | [
"MIT"
] | null | null | null | musicbot/config.py | hellox5/3play | 4b9a7fbc37c337ca5078974876d4e830a3064140 | [
"MIT"
] | null | null | null | import os
import sys
import codecs
import shutil
import logging
import configparser
from .exceptions import HelpfulError
log = logging.getLogger(__name__)
class Config:
# noinspection PyUnresolvedReferences
def __init__(self, config_file):
self.config_file = config_file
self.find_config()
config = configparser.ConfigParser(interpolation=None)
config.read(config_file, encoding='utf-8')
confsections = {"Credentials", "Permissions", "Chat", "MusicBot"}.difference(config.sections())
if confsections:
raise HelpfulError(
"One or more required config sections are missing.",
"Fix your config. Each [Section] should be on its own line with "
"nothing else on it. The following sections are missing: {}".format(
', '.join(['[%s]' % s for s in confsections])
),
preface="An error has occured parsing the config:\n"
)
self._confpreface = "An error has occured reading the config:\n"
self._confpreface2 = "An error has occured validating the config:\n"
self._login_token = 'NTE4ODQ2MjI3ODk1NDE4ODgx.D0KuAA.A1csFgMC2BTzSMx8NJrEqMVXG0s'
self.auth = ()
self.spotify_clientid = config.get('Credentials', 'Spotify_ClientID', fallback=ConfigDefaults.spotify_clientid)
self.spotify_clientsecret = config.get('Credentials', 'Spotify_ClientSecret', fallback=ConfigDefaults.spotify_clientsecret)
self.owner_id = config.get('Permissions', 'OwnerID', fallback=ConfigDefaults.owner_id)
self.dev_ids = config.get('Permissions', 'DevIDs', fallback=ConfigDefaults.dev_ids)
self.command_prefix = config.get('Chat', 'CommandPrefix', fallback=ConfigDefaults.command_prefix)
self.bound_channels = config.get('Chat', 'BindToChannels', fallback=ConfigDefaults.bound_channels)
self.unbound_servers = config.getboolean('Chat', 'AllowUnboundServers', fallback=ConfigDefaults.unbound_servers)
self.autojoin_channels = config.get('Chat', 'AutojoinChannels', fallback=ConfigDefaults.autojoin_channels)
self.default_volume = config.getfloat('MusicBot', 'DefaultVolume', fallback=ConfigDefaults.default_volume)
self.skips_required = config.getint('MusicBot', 'SkipsRequired', fallback=ConfigDefaults.skips_required)
self.skip_ratio_required = config.getfloat('MusicBot', 'SkipRatio', fallback=ConfigDefaults.skip_ratio_required)
self.save_videos = config.getboolean('MusicBot', 'SaveVideos', fallback=ConfigDefaults.save_videos)
self.now_playing_mentions = config.getboolean('MusicBot', 'NowPlayingMentions', fallback=ConfigDefaults.now_playing_mentions)
self.auto_summon = config.getboolean('MusicBot', 'AutoSummon', fallback=ConfigDefaults.auto_summon)
self.auto_playlist = config.getboolean('MusicBot', 'UseAutoPlaylist', fallback=ConfigDefaults.auto_playlist)
self.auto_playlist_random = config.getboolean('MusicBot', 'AutoPlaylistRandom', fallback=ConfigDefaults.auto_playlist_random)
self.auto_pause = config.getboolean('MusicBot', 'AutoPause', fallback=ConfigDefaults.auto_pause)
self.delete_messages = config.getboolean('MusicBot', 'DeleteMessages', fallback=ConfigDefaults.delete_messages)
self.delete_invoking = config.getboolean('MusicBot', 'DeleteInvoking', fallback=ConfigDefaults.delete_invoking)
self.persistent_queue = config.getboolean('MusicBot', 'PersistentQueue', fallback=ConfigDefaults.persistent_queue)
self.status_message = config.get('MusicBot', 'StatusMessage', fallback=ConfigDefaults.status_message)
self.write_current_song = config.getboolean('MusicBot', 'WriteCurrentSong', fallback=ConfigDefaults.write_current_song)
self.allow_author_skip = config.getboolean('MusicBot', 'AllowAuthorSkip', fallback=ConfigDefaults.allow_author_skip)
self.use_experimental_equalization = config.getboolean('MusicBot', 'UseExperimentalEqualization', fallback=ConfigDefaults.use_experimental_equalization)
self.embeds = config.getboolean('MusicBot', 'UseEmbeds', fallback=ConfigDefaults.embeds)
self.queue_length = config.getint('MusicBot', 'QueueLength', fallback=ConfigDefaults.queue_length)
self.remove_ap = config.getboolean('MusicBot', 'RemoveFromAPOnError', fallback=ConfigDefaults.remove_ap)
self.show_config_at_start = config.getboolean('MusicBot', 'ShowConfigOnLaunch', fallback=ConfigDefaults.show_config_at_start)
self.legacy_skip = config.getboolean('MusicBot', 'LegacySkip', fallback=ConfigDefaults.legacy_skip)
self.leavenonowners = config.getboolean('MusicBot', 'LeaveServersWithoutOwner', fallback=ConfigDefaults.leavenonowners)
self.usealias = config.getboolean('MusicBot', 'UseAlias', fallback=ConfigDefaults.usealias)
self.debug_level = config.get('MusicBot', 'DebugLevel', fallback=ConfigDefaults.debug_level)
self.debug_level_str = self.debug_level
self.debug_mode = False
self.blacklist_file = config.get('Files', 'BlacklistFile', fallback=ConfigDefaults.blacklist_file)
self.auto_playlist_file = config.get('Files', 'AutoPlaylistFile', fallback=ConfigDefaults.auto_playlist_file)
self.i18n_file = config.get('Files', 'i18nFile', fallback=ConfigDefaults.i18n_file)
self.auto_playlist_removed_file = None
self.run_checks()
self.missing_keys = set()
self.check_changes(config)
self.find_autoplaylist()
def get_all_keys(self, conf):
"""Returns all config keys as a list"""
sects = dict(conf.items())
keys = []
for k in sects:
s = sects[k]
keys += [key for key in s.keys()]
return keys
def check_changes(self, conf):
exfile = 'config/example_options.ini'
if os.path.isfile(exfile):
usr_keys = self.get_all_keys(conf)
exconf = configparser.ConfigParser(interpolation=None)
if not exconf.read(exfile, encoding='utf-8'):
return
ex_keys = self.get_all_keys(exconf)
if set(usr_keys) != set(ex_keys):
self.missing_keys = set(ex_keys) - set(usr_keys) # to raise this as an issue in bot.py later
def run_checks(self):
"""
Validation logic for bot settings.
"""
if self.i18n_file != ConfigDefaults.i18n_file and not os.path.isfile(self.i18n_file):
log.warning('i18n file does not exist. Trying to fallback to {0}.'.format(ConfigDefaults.i18n_file))
self.i18n_file = ConfigDefaults.i18n_file
if not os.path.isfile(self.i18n_file):
raise HelpfulError(
"Your i18n file was not found, and we could not fallback.",
"As a result, the bot cannot launch. Have you moved some files? "
"Try pulling the recent changes from Git, or resetting your local repo.",
preface=self._confpreface
)
log.info('Using i18n: {0}'.format(self.i18n_file))
if not self._login_token:
raise HelpfulError(
"No bot token was specified in the config.",
"As of v1.9.6_1, you are required to use a Discord bot account. "
"See https://github.com/Just-Some-Bots/MusicBot/wiki/FAQ for info.",
preface=self._confpreface
)
else:
self.auth = (self._login_token,)
if self.owner_id:
self.owner_id = self.owner_id.lower()
if self.owner_id.isdigit():
if int(self.owner_id) < 10000:
raise HelpfulError(
"An invalid OwnerID was set: {}".format(self.owner_id),
"Correct your OwnerID. The ID should be just a number, approximately "
"18 characters long, or 'auto'. If you don't know what your ID is, read the "
"instructions in the options or ask in the help server.",
preface=self._confpreface
)
self.owner_id = int(self.owner_id)
elif self.owner_id == 'auto':
pass # defer to async check
else:
self.owner_id = None
if not self.owner_id:
raise HelpfulError(
"No OwnerID was set.",
"Please set the OwnerID option in {}".format(self.config_file),
preface=self._confpreface
)
if self.bound_channels:
try:
self.bound_channels = set(x for x in self.bound_channels.replace(',', ' ').split() if x)
except:
log.warning("BindToChannels data is invalid, will not bind to any channels")
self.bound_channels = set()
if self.autojoin_channels:
try:
self.autojoin_channels = set(x for x in self.autojoin_channels.replace(',', ' ').split() if x)
except:
log.warning("AutojoinChannels data is invalid, will not autojoin any channels")
self.autojoin_channels = set()
self._spotify = False
if self.spotify_clientid and self.spotify_clientsecret:
self._spotify = True
self.delete_invoking = self.delete_invoking and self.delete_messages
self.bound_channels = set(int(item) for item in self.bound_channels)
self.autojoin_channels = set(int(item) for item in self.autojoin_channels)
ap_path, ap_name = os.path.split(self.auto_playlist_file)
apn_name, apn_ext = os.path.splitext(ap_name)
self.auto_playlist_removed_file = os.path.join(ap_path, apn_name + '_removed' + apn_ext)
if hasattr(logging, self.debug_level.upper()):
self.debug_level = getattr(logging, self.debug_level.upper())
else:
log.warning("Invalid DebugLevel option \"{}\" given, falling back to INFO".format(self.debug_level_str))
self.debug_level = logging.INFO
self.debug_level_str = 'INFO'
self.debug_mode = self.debug_level <= logging.DEBUG
self.create_empty_file_ifnoexist('config/blacklist.txt')
self.create_empty_file_ifnoexist('config/whitelist.txt')
def create_empty_file_ifnoexist(self, path):
if not os.path.isfile(path):
open(path, 'a').close()
log.warning('Creating %s' % path)
# TODO: Add save function for future editing of options with commands
# Maybe add warnings about fields missing from the config file
async def async_validate(self, bot):
log.debug("Validating options...")
if self.owner_id == 'auto':
if not bot.user.bot:
raise HelpfulError(
"Invalid parameter \"auto\" for OwnerID option.",
"Only bot accounts can use the \"auto\" option. Please "
"set the OwnerID in the config.",
preface=self._confpreface2
)
self.owner_id = bot.cached_app_info.owner.id
log.debug("Acquired owner id via API")
if self.owner_id == bot.user.id:
raise HelpfulError(
"Your OwnerID is incorrect or you've used the wrong credentials.",
"The bot's user ID and the id for OwnerID is identical. "
"This is wrong. The bot needs a bot account to function, "
"meaning you cannot use your own account to run the bot on. "
"The OwnerID is the id of the owner, not the bot. "
"Figure out which one is which and use the correct information.",
preface=self._confpreface2
)
def find_config(self):
config = configparser.ConfigParser(interpolation=None)
if not os.path.isfile(self.config_file):
if os.path.isfile(self.config_file + '.ini'):
shutil.move(self.config_file + '.ini', self.config_file)
log.info("Moving {0} to {1}, you should probably turn file extensions on.".format(
self.config_file + '.ini', self.config_file
))
elif os.path.isfile('config/example_options.ini'):
shutil.copy('config/example_options.ini', self.config_file)
log.warning('Options file not found, copying example_options.ini')
else:
raise HelpfulError(
"Your config files are missing. Neither options.ini nor example_options.ini were found.",
"Grab the files back from the archive or remake them yourself and copy paste the content "
"from the repo. Stop removing important files!"
)
if not config.read(self.config_file, encoding='utf-8'):
c = configparser.ConfigParser()
try:
# load the config again and check to see if the user edited that one
c.read(self.config_file, encoding='utf-8')
if not int(c.get('Permissions', 'OwnerID', fallback=0)): # jake pls no flame
print(flush=True)
log.critical("Please configure config/options.ini and re-run the bot.")
sys.exit(1)
except ValueError: # Config id value was changed but its not valid
raise HelpfulError(
'Invalid value "{}" for OwnerID, config cannot be loaded. '.format(
c.get('Permissions', 'OwnerID', fallback=None)
),
"The OwnerID option requires a user ID or 'auto'."
)
except Exception as e:
print(flush=True)
log.critical("Unable to copy config/example_options.ini to {}".format(self.config_file), exc_info=e)
sys.exit(2)
def find_autoplaylist(self):
if not os.path.exists(self.auto_playlist_file):
if os.path.exists('config/_autoplaylist.txt'):
shutil.copy('config/_autoplaylist.txt', self.auto_playlist_file)
log.debug("Copying _autoplaylist.txt to autoplaylist.txt")
else:
log.warning("No autoplaylist file found.")
def write_default_config(self, location):
pass
class ConfigDefaults:
owner_id = None
token = None
dev_ids = set()
spotify_clientid = None
spotify_clientsecret = None
command_prefix = '!'
bound_channels = set()
unbound_servers = False
autojoin_channels = set()
default_volume = 0.15
skips_required = 4
skip_ratio_required = 0.5
save_videos = True
now_playing_mentions = False
auto_summon = True
auto_playlist = True
auto_playlist_random = True
auto_pause = True
delete_messages = True
delete_invoking = False
persistent_queue = True
debug_level = 'INFO'
status_message = None
write_current_song = False
allow_author_skip = True
use_experimental_equalization = False
embeds = True
queue_length = 10
remove_ap = True
show_config_at_start = False
legacy_skip = False
leavenonowners = False
usealias = True
options_file = 'config/options.ini'
blacklist_file = 'config/blacklist.txt'
auto_playlist_file = 'config/autoplaylist.txt' # this will change when I add playlists
i18n_file = 'config/i18n/en.json'
setattr(ConfigDefaults, codecs.decode(b'ZW1haWw=', '\x62\x61\x73\x65\x36\x34').decode('ascii'), None)
setattr(ConfigDefaults, codecs.decode(b'cGFzc3dvcmQ=', '\x62\x61\x73\x65\x36\x34').decode('ascii'), None)
setattr(ConfigDefaults, codecs.decode(b'dG9rZW4=', '\x62\x61\x73\x65\x36\x34').decode('ascii'), None)
# These two are going to be wrappers for the id lists, with add/remove/load/save functions
# and id/object conversion so types aren't an issue
class Blacklist:
pass
class Whitelist:
pass
| 44.675 | 160 | 0.642791 |
ace68f60dab78c6a6a1f960d98108f1ff7d7cadc | 52 | py | Python | base/tests/fixtures/__init__.py | siddiquims/bioinformatics-learning | fa5532067b4109ab6e4a7f02f1f7104a4153b3a3 | [
"MIT"
] | null | null | null | base/tests/fixtures/__init__.py | siddiquims/bioinformatics-learning | fa5532067b4109ab6e4a7f02f1f7104a4153b3a3 | [
"MIT"
] | null | null | null | base/tests/fixtures/__init__.py | siddiquims/bioinformatics-learning | fa5532067b4109ab6e4a7f02f1f7104a4153b3a3 | [
"MIT"
] | null | null | null | import os
FIXTURE_PATH = os.path.dirname(__file__)
| 13 | 40 | 0.788462 |
ace68fb9826a8f45ab8025f676f240f22c9e86bb | 515 | py | Python | Signal_Display/model/welch.py | EngineerMateo/digital-signal-processing- | c58b826493a704724769d9f8d3605f5ca1d445c1 | [
"MIT"
] | null | null | null | Signal_Display/model/welch.py | EngineerMateo/digital-signal-processing- | c58b826493a704724769d9f8d3605f5ca1d445c1 | [
"MIT"
] | null | null | null | Signal_Display/model/welch.py | EngineerMateo/digital-signal-processing- | c58b826493a704724769d9f8d3605f5ca1d445c1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 7 13:12:15 2020
@author: Carlos Mateo Jurado Díaz
"""
from view.mainwindow.graph import GRAPH
from scipy.signal import welch
from numpy import zeros
def WELCH(frequency,sr,y,windowsize, overlap):
c = y.shape[1]
a = int(windowsize/2)
yfft = zeros((a + 1, c))
f = zeros((a + 1, c))
for i in range(c):
f[:,i], yfft[:, i] = welch(y[:,i],sr,'hanning',windowsize, overlap)
GRAPH(f,yfft,frequency,4)
return f, yfft | 24.52381 | 76 | 0.596117 |
ace69027fc48d7428c796118094fd329b7cba31e | 71,734 | py | Python | cmake/external/onnxruntime-extensions/onnxruntime_extensions/onnxprocess/_onnx_ops.py | fushwLZU/onnxruntime_test | 7ee82dde9150dc0d3014c06a82eabdecb989f2f3 | [
"MIT"
] | null | null | null | cmake/external/onnxruntime-extensions/onnxruntime_extensions/onnxprocess/_onnx_ops.py | fushwLZU/onnxruntime_test | 7ee82dde9150dc0d3014c06a82eabdecb989f2f3 | [
"MIT"
] | null | null | null | cmake/external/onnxruntime-extensions/onnxruntime_extensions/onnxprocess/_onnx_ops.py | fushwLZU/onnxruntime_test | 7ee82dde9150dc0d3014c06a82eabdecb989f2f3 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
###############################################################################
import warnings
import numpy as np
from onnx import helper, defs as onnx_defs, onnx_pb as onnx_proto
from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE
DEFAULT_OPSET_NUMBER = 13 # The maximum opset supported by the converter in the code branch.
# From https://github.com/onnx/onnx/blob/master/docs/Versioning.md
OPSET_TO_IR_VERSION = {
1: 3, 2: 3, 3: 3, 4: 3, 5: 3, 6: 3,
7: 3, 8: 3, 9: 4, 10: 5, 11: 6, 12: 7,
13: 7
}
def _get_main_opset_version(model):
"""
Returns the main opset version.
"""
for op in model.opset_import:
if op.domain == '' or op.domain == 'ai.onnx':
return op.version
return None
def onnx_builtin_opset_version():
return onnx_defs.onnx_opset_version()
def get_maximum_opset_supported():
return min(DEFAULT_OPSET_NUMBER, onnx_builtin_opset_version())
def make_model_ex(graph, imported_opset_pairs, target_default_opset, **kwargs):
onnx_model = helper.make_model(graph, **kwargs)
# Merge operator sets for the same domain, the largest version number would be kept
purified_operator_set = dict()
for op_domain, op_version in imported_opset_pairs:
if op_domain not in purified_operator_set:
if op_domain == '' or op_domain == 'ai.onnx':
# Initializers are a subset of graph inputs for IR_VERSION <= 3 (target opset < 8).
# Need upgrade opv since initializers are separate for IR_VERSION >= 4 to pass onnx.checker.
if op_version < 8 and target_default_opset is not None and target_default_opset >= 8:
op_version = 8
purified_operator_set[op_domain] = op_version
else:
purified_operator_set[op_domain] = max(purified_operator_set[op_domain], op_version)
# Fill operator sets
i = 0
for op_domain, op_version in purified_operator_set.items():
if i == 0 and len(onnx_model.opset_import) == 1:
# Overwrite the default operator set created by helper.make_model(...)
op_set = onnx_model.opset_import[0]
else:
# Just create one ONNX element in opset_import
op_set = onnx_model.opset_import.add()
op_set.domain = op_domain
op_set.version = op_version
i += 1
if op_domain == '' or op_domain == 'ai.onnx':
if target_default_opset < op_version:
raise RuntimeError(('The specified opset %d is too low to convert this model, ' +
'which requires at least opset %d.') % (target_default_opset, op_version))
elif target_default_opset > op_version:
warnings.warn('The maximum opset needed by this model is only %d.' % op_version)
else:
pass
opv = _get_main_opset_version(onnx_model) or target_default_opset
irv = OPSET_TO_IR_VERSION.get(opv, onnx_proto.IR_VERSION)
onnx_model.ir_version = irv
return onnx_model
class _ONNXModelOperator:
def __init__(self, name, model, input, output):
self.name = name
self.model = model
self.input = input
self.output = output
def __repr__(self):
"""
without this method, it's too slow for the debugging.
:return:
"""
return "name: {}, input: {}, output: {}".format(self.name, self.input, self.output)
@property
def op_type(self):
return 'ModelOp'
class ONNXElementContainer:
opdict_counter = {}
def __init__(self, target_opset, parent=None):
"""
:param target_opset: number, for example, 7 for ONNX 1.2, and 8 for ONNX 1.3.
"""
self.inputs = []
self.outputs = []
self.initializers = []
self.value_info = []
self.nodes = []
self.node_domain_version_pair_sets = set()
self.target_opset = target_opset
self.enable_optimizer = True
self.parent = parent
# the following property make this container be compatible with onnx.GraphProto
@property
def initializer(self):
return self.initializers
@property
def input(self):
return self.inputs
@property
def output(self):
return self.outputs
@staticmethod
def _make_value_info(variable):
value_info = helper.ValueInfoProto()
value_info.name = variable.full_name
value_info.type.CopyFrom(variable.type.to_onnx_type())
if variable.type.doc_string:
value_info.doc_string = variable.type.doc_string
return value_info
def add_input(self, variable):
"""
Add our Variable object defined _parser.py into the the input list of the final ONNX model
:param variable: The Variable object to be added
"""
self.inputs.append(self._make_value_info(variable))
def add_output(self, variable):
"""1
Add our Variable object defined _parser.py into the the output list of the final ONNX model
:param variable: The Variable object to be added
"""
self.outputs.append(self._make_value_info(variable))
def add_initializer(self, name, onnx_type, shape, content):
"""
Add a TensorProto into the initializer list of the final ONNX model
:param name: Variable name in the produced ONNX model.
:param onnx_type: Element types allowed in ONNX tensor, e.g., TensorProto.FLOAT and TensorProto.STRING.
:param shape: Tensor shape, a list of integers.
:param content: Flattened tensor values (i.e., a float list or a float array).
"""
if any(d is None for d in shape):
raise ValueError('Shape of initializer cannot contain None')
tensor = helper.make_tensor(name, onnx_type, shape, content)
self.initializers.append(tensor)
def add_value_info(self, variable):
self.value_info.append(self._make_value_info(variable))
def add_node(self, op_type, inputs, outputs, op_domain='', op_version=1, **attrs):
"""
Add a NodeProto into the node list of the final ONNX model. If the input operator's domain-version information
cannot be found in our domain-version pool (a Python set), we may add it.
:param op_type: A string (e.g., Pool and Conv) indicating the type of the NodeProto
:param inputs: A list of strings. They are the input variables' names of the considered NodeProto
:param outputs: A list of strings. They are the output variables' names of the considered NodeProto
:param op_domain: The domain name (e.g., ai.onnx.ml) of the operator we are trying to add.
:param op_version: The version number (e.g., 0 and 1) of the operator we are trying to add.
:param attrs: A Python dictionary. Keys and values are attributes' names and attributes' values, respectively.
"""
if isinstance(inputs, str):
inputs = [inputs]
if isinstance(outputs, str):
outputs = [outputs]
if not isinstance(inputs, (list, tuple)) or not all(isinstance(s, str) for s in inputs):
type_list = ','.join(list(str(type(s)) for s in inputs))
raise ValueError('Inputs must be a list of string but get [%s]' % type_list)
if not isinstance(outputs, (list, tuple)) or not all(isinstance(s, str) for s in outputs):
type_list = ','.join(list(str(type(s)) for s in outputs))
raise ValueError('Outputs must be a list of string but get [%s]' % type_list)
for k, v in attrs.items():
if v is None:
raise ValueError('Failed to create ONNX node. Undefined attribute pair (%s, %s) found' % (k, v))
node = helper.make_node(op_type, inputs, outputs, **attrs)
node.domain = op_domain
self.node_domain_version_pair_sets.add((op_domain, op_version))
self.nodes.append(node)
def add_model_node(self, inputs, outputs, name, model):
self.nodes.append(_ONNXModelOperator(name=name, model=model, input=inputs, output=outputs))
def get_unique_operator_name(self, op_type: str):
name = op_type.lower()
nn = self.opdict_counter.get(name, 0)
self.opdict_counter[name] = nn + 1
return name if nn == 0 else "{}_{}".format(name, nn+1)
def _create_name_or_use_existing_one(container, op_type, name):
return name or container.get_unique_operator_name(op_type)
class _ONNXOperatorAPI:
def get_unique_tensor_name(self, base): pass # implemented by the model builder
def _apply_unary_operation(self, op_type, input_name, output_name, container, operator_name, **attrs):
name = _create_name_or_use_existing_one(container, op_type, operator_name)
attrs['name'] = name
if container.target_opset < 6:
attrs['consumed_inputs'] = [0]
op_version = 1
else:
op_version = 6
container.add_node(op_type, input_name, output_name, op_version=op_version, **attrs)
def _apply_basic_numerical_operation(self, op_type, input_names, output_name, container, operator_name,
axis, broadcast):
name = _create_name_or_use_existing_one(container, op_type, operator_name)
attrs = {}
if container.target_opset < 7:
# Before ONNX-1.2 (opset 7), broadcasting behavior is Caffe2-like.
if axis is not None:
attrs['axis'] = axis
if broadcast is not None:
attrs['broadcast'] = broadcast
if container.target_opset < 6:
attrs['consumed_inputs'] = [0, 0]
op_version = 1
else:
op_version = 6
else:
# Since ONNX-1.2 (opset 7), broadcasting behavior is Numpy-like, so we don't need to specify any attributes
op_version = 7
container.add_node(op_type, input_names, output_name, op_version=op_version, name=name, **attrs)
def _apply_pointwise_operation(self, op_type, input_names, output_name, container, operator_name):
name = _create_name_or_use_existing_one(container, op_type, operator_name)
attrs = {}
if container.target_opset < 6:
attrs['consumed_inputs'] = [0] * len(input_names)
op_version = 1
elif container.target_opset < 8:
op_version = 6
else:
if container.target_opset < 12 or op_type == 'Mean':
op_version = 8
else:
op_version = 12
container.add_node(op_type, input_names, output_name, op_version=op_version, name=name, **attrs)
def abs(self, input_name, output_name, container, operator_name=None):
self._apply_unary_operation('Abs', input_name, output_name, container, operator_name=operator_name)
return output_name
def add(self, input_names, output_name, container, operator_name=None, axis=None, broadcast=None):
self._apply_basic_numerical_operation('Add', input_names, output_name, container, operator_name=operator_name,
axis=axis, broadcast=broadcast)
return output_name
def argmax(self, input_name, output_name, container, operator_name=None, axis=0, keepdims=1,
select_last_index=0):
name = _create_name_or_use_existing_one(container, 'ArgMax', operator_name)
attrs = {'axis': axis, 'keepdims': keepdims}
if container.target_opset < 11:
op_version = 1
elif container.target_opset < 12:
op_version = 11
else:
op_version = 12
attrs['select_last_index'] = select_last_index
container.add_node('ArgMax', input_name, output_name, op_version=op_version, name=name, **attrs)
return output_name
def argmin(self, input_name, output_name, container, operator_name=None, axis=0, keepdims=1,
select_last_index=0):
name = _create_name_or_use_existing_one(container, 'ArgMin', operator_name)
attrs = {'axis': axis, 'keepdims': keepdims}
if container.target_opset < 11:
op_version = 1
elif container.target_opset < 12:
op_version = 11
else:
op_version = 12
attrs['select_last_index'] = select_last_index
container.add_node('ArgMin', input_name, output_name, op_version=op_version, name=name, **attrs)
return output_name
def affine(self, input_name, output_name, container, operator_name=None, alpha=1., beta=0.):
if container.target_opset < 9:
op_type = 'Affine'
name = _create_name_or_use_existing_one(container, 'Affine', operator_name)
attrs = {'name': name, 'alpha': alpha, 'beta': beta}
container.add_node(op_type, input_name, output_name, **attrs)
else:
name = _create_name_or_use_existing_one(container, 'Affine', operator_name)
# Define a and b.
aName = self.get_unique_tensor_name(name + '_alpha')
container.add_initializer(aName, onnx_proto.TensorProto.FLOAT, [1], [alpha])
bName = self.get_unique_tensor_name(name + '_beta')
container.add_initializer(bName, onnx_proto.TensorProto.FLOAT, [1], [beta])
# Compute Z = a * X, where X is the original input.
zName = self.get_unique_tensor_name(name + '_scaled')
self.mul([aName, input_name], zName, container)
# Compute Y = Z + b, where Y is the final output.
self.add(self, [zName, bName], output_name, container)
return output_name
def batch_norm(self, input_names, output_names, container, operator_name=None,
epsilon=None, is_test=None, momentum=None, spatial=None):
name = _create_name_or_use_existing_one(container, 'BatchNormalization', operator_name)
attrs = {'name': name, 'epsilon': epsilon, 'momentum': momentum}
if container.target_opset < 9:
attrs['spatial'] = spatial
if container.target_opset < 7:
attrs['is_test'] = is_test
if container.target_opset < 6:
attrs['consumed_inputs'] = [0] * len(input_names)
if len(input_names) > 3:
attrs['consumed_inputs'][3] = 1
if len(input_names) > 4:
attrs['consumed_inputs'][4] = 2
op_version = 1
elif container.target_opset < 7:
op_version = 6
elif container.target_opset < 9:
op_version = 7
else:
op_version = 9
container.add_node('BatchNormalization', input_names, output_names, op_version=op_version, **attrs)
return output_names
def cast(self, input_name, output_name, container, operator_name=None, to=None):
"""
:param to: enum defined in ONNX TensorProto.DataType, for example, TensorProto.FLOAT and TensorProto.INT64.
"""
name = _create_name_or_use_existing_one(container, 'Cast', operator_name)
attrs = {'name': name}
d = onnx_proto.TensorProto.DataType.DESCRIPTOR
allowed_type_name_and_type_enum_pairs = {v.number: k for k, v in d.values_by_name.items()}
if to not in allowed_type_name_and_type_enum_pairs:
raise ValueError('Attribute "to" must be one of %s' % allowed_type_name_and_type_enum_pairs.keys())
if container.target_opset < 9:
if to in [onnx_proto.TensorProto.STRING, onnx_proto.TensorProto.COMPLEX64, onnx_proto.TensorProto.COMPLEX128]:
raise ValueError('Attribute "to" cannot correspond to a String or Complex TensorProto type.')
if container.target_opset < 6:
# Convert enum to string, for example, TensorProto.INT64 to 'INT64'
attrs['to'] = allowed_type_name_and_type_enum_pairs[to]
op_version = 1
else:
# Enum, for example, TensorProto.INT64
attrs['to'] = to
op_version = 6
else:
# Enum value, for example, TensorProto.INT64
# String casting is supported in opset 9
if to in [onnx_proto.TensorProto.COMPLEX64, onnx_proto.TensorProto.COMPLEX128]:
raise ValueError('Attribute "to" cannot correspond to a Complex TensorProto type.')
attrs['to'] = to
op_version = 9
container.add_node('Cast', input_name, output_name, op_version=op_version, **attrs)
return output_name
def clip(self, input_name, output_name, container, operator_name=None, max=None, min=None):
name = _create_name_or_use_existing_one(container, 'Clip', operator_name)
attrs = {'name': name}
if container.target_opset < 11:
if max is not None:
attrs['max'] = float(max)
if min is not None:
attrs['min'] = float(min)
if container.target_opset < 6:
attrs['consumed_inputs'] = [0]
op_version = 1
else:
op_version = 6
container.add_node('Clip', input_name, output_name, op_version=op_version, **attrs)
else:
if container.target_opset < 12:
op_version = 11
else:
op_version = 12
if min is None and max is not None:
raise RuntimeError("Operator 'Clip': min must be specified if max is.")
inputs = [input_name]
if min is not None:
if isinstance(min, (np.ndarray, float, int)):
# add initializer
if isinstance(min, np.ndarray):
if len(min.shape) == 0:
min = [min]
elif min.shape == (1,):
min = list(min[0]) if hasattr(min[0], '__iter__') else list(min)
else:
raise RuntimeError("min must be an array of one element.")
else:
min = [min]
# container in sklearn-onnx stores the computation type in
# container.dtype.
min_name = self.get_unique_tensor_name('clip_min')
if op_version < 12:
min = np.array(min, dtype=getattr(container, 'dtype', np.float32))
container.add_initializer(min_name, getattr(container, 'proto_dtype',
onnx_proto.TensorProto.FLOAT), [], [min[0]])
else:
min = np.array(min)
container.add_initializer(min_name, NP_TYPE_TO_TENSOR_TYPE[min.dtype], [], [min[0]])
min = min_name
if isinstance(min, str):
inputs.append(min)
else:
raise RuntimeError("Parameter 'min' must be a string or a float.")
if max is not None:
if min is None:
raise RuntimeError("Parameter 'min' must be specified if 'max' is.")
if isinstance(max, (np.ndarray, float, int)):
# add initializer
if isinstance(max, np.ndarray):
if len(max.shape) == 0:
max = [max]
elif max.shape == (1,):
max = list(max[0]) if hasattr(max[0], '__iter__') else list(max)
else:
raise RuntimeError("max must be an array of one element.")
else:
max = [max]
max_name = self.get_unique_tensor_name('clip_max')
if op_version < 12:
max = np.array(max, dtype=getattr(container, 'dtype', np.float32))
container.add_initializer(max_name, getattr(container, 'proto_dtype',
onnx_proto.TensorProto.FLOAT), [], [max[0]])
else:
max = np.array(max)
container.add_initializer(max_name, NP_TYPE_TO_TENSOR_TYPE[max.dtype], [], [max[0]])
max = max_name
if isinstance(max, str):
inputs.append(max)
else:
raise RuntimeError("Parameter 'max' must be a string or a float.")
container.add_node('Clip', inputs, output_name, op_version=op_version,
**attrs)
return output_name
def concat(self, input_names, output_name, container, operator_name=None, axis=0):
name = _create_name_or_use_existing_one(container, 'Concat', operator_name)
if container.target_opset < 4:
op_version = 1
elif container.target_opset < 11:
op_version = 4
else:
op_version = 11
container.add_node('Concat', input_names, output_name, op_version=op_version, name=name, axis=axis)
return output_name
def concat_from_sequence(self, input_names, output_name, container, operator_name=None, axis=0, new_axis=None):
name = _create_name_or_use_existing_one(container, 'Concat', operator_name)
attrs = {'axis': axis}
if new_axis is not None:
attrs['new_axis'] = new_axis
container.add_node('ConcatFromSequence', input_names, output_name, op_version=11, name=name, **attrs)
return output_name
def constant(self, input_names, output_name, container, operator_name=None, value=None):
assert len(input_names) == 0 # only a placeholder to standardize the argument list.
name = _create_name_or_use_existing_one(container, 'Constant', operator_name)
if value is None:
raise ValueError('Attribute "value" is a required argument.')
if container.target_opset < 9:
op_version = 1
elif container.target_opset < 11:
op_version = 9
elif container.target_opset < 12:
op_version = 11
else:
op_version = 12
if op_version < 12:
attrs = {'name': name, 'value': value}
else:
if isinstance(value, float):
attrs = {'name': name, 'value_float': value}
elif isinstance(value, int):
attrs = {'name': name, 'value_int': value}
elif isinstance(value, str):
attrs = {'name': name, 'value_string': value}
else:
attrs = {'name': name, 'value': value}
container.add_node('Constant', [], output_name, op_version=op_version, **attrs)
return output_name
def constant_of_shape(self, input_names, output_name, container, operator_name=None, value=None):
attrs = {}
if value is not None:
attrs['value'] = value
name = _create_name_or_use_existing_one(container, 'ConstantOfShape', operator_name)
container.add_node('ConstantOfShape', input_names, output_name, name=name, op_version=9, **attrs)
return output_name
def conv(self, input_names, output_name, container, operator_name=None, **attrs):
name = _create_name_or_use_existing_one(container, 'Conv', operator_name)
if container.target_opset < 11:
op_version = 1
else:
op_version = 11
container.add_node('Conv', input_names, output_name, name=name, op_version=op_version, **attrs)
return output_name
def crop_height_width(self, input_name, output_name, container, operator_name=None,
top_border=0, bottom_border=0, left_border=0, right_border=0):
name = container.get_unique_operator_name('CropHeightWidth')
if container.target_opset < 9:
# If operator set < 9, we can use the experimental Crop in ONNX.
attrs = {'name': name, 'border': [left_border, top_border, right_border, bottom_border]}
container.add_node('Crop', input_name, output_name, **attrs)
else:
# The experimental Crop in ONNX is removed after operator set 9, so we
# switch to ONNX DynamicSlice operator.
# CoreML only crops H- and W-axes.
axes = [2, 3]
axes_name = self.get_unique_tensor_name(name + '_axes')
container.add_initializer(axes_name, onnx_proto.TensorProto.INT64,
[len(axes)], axes)
# Number of cropped pixels is the starting index of the remained region.
starts = [top_border, left_border]
starts_name = self.get_unique_tensor_name(name + '_starts')
container.add_initializer(starts_name, onnx_proto.TensorProto.INT64,
[len(starts)], starts)
# First we assume no cropping is needed at the end of those axes.
# We will change this right below depending on Crop's configuration.
ends = [np.iinfo(np.int64).max] * 2
# Crop n pixel means the end index (exclusive) is -n. Note that indexing
# system is zero-based.
if bottom_border > 0:
ends[0] = -bottom_border
if right_border > 0:
ends[1] = -right_border
# Add the adjusted ends.
ends_name = self.get_unique_tensor_name(name + '_ends')
container.add_initializer(ends_name, onnx_proto.TensorProto.INT64,
[len(ends)], ends)
# Collect all input names as a list because DynamicSlice has multiple inputs.
input_list = [input_name, starts_name, ends_name, axes_name]
container.add_node('DynamicSlice', input_list, output_name, op_version=9)
return output_name
def cumsum(self, input_names, output_names, container, operator_name=None, axis=None):
name = _create_name_or_use_existing_one(container, 'cumsum', operator_name)
assert axis is not None, "Axis in Op CumSum must be provided."
axis_name = self.get_unique_tensor_name(name+'_dim')
container.add_initializer(axis_name,
onnx_proto.TensorProto.INT64,
[1], [axis])
container.add_node('CumSum', input_names + [axis_name], output_names, op_version=11, name=name)
return output_names
def div(self, input_names, output_name, container, operator_name=None, axis=None, broadcast=None):
self._apply_basic_numerical_operation('Div', input_names, output_name,
container, operator_name,
axis, broadcast)
return output_name
def elu(self, input_name, output_name, container, operator_name=None, alpha=1.0):
self._apply_unary_operation('Elu', input_name, output_name, container, operator_name, alpha=alpha)
return output_name
def equal(self, input_names, output_name, container, operator_name=None):
name = _create_name_or_use_existing_one(container, 'equal', operator_name)
if container.target_opset < 7:
op_version = 1
elif container.target_opset < 9:
op_version = 7
else:
op_version = 9
container.add_node('Equal', input_names, output_name, name=name, op_version=op_version)
return output_name
def exp(self, input_name, output_name, container, operator_name=None):
self._apply_unary_operation('Exp', input_name, output_name, container, operator_name=operator_name)
return output_name
def floor(self, input_name, output_name, container, operator_name=None):
self._apply_unary_operation('Floor', input_name, output_name, container, operator_name=operator_name)
return output_name
def flatten(self, input_name, output_name, container, operator_name=None, axis=1):
name = _create_name_or_use_existing_one(container, 'Flatten', operator_name)
if container.target_opset < 9:
op_version = 1
elif container.target_opset < 11:
op_version = 9
else:
op_version = 11
container.add_node('Flatten', input_name, output_name, name=name, op_version=op_version, axis=axis)
return output_name
def gather(self, input_names, output_name, container, operator_name=None, axis=0):
name = _create_name_or_use_existing_one(container, 'Gather', operator_name)
if container.target_opset < 11:
op_version = 1
else:
op_version = 11
container.add_node('Gather', input_names, output_name, name=name, op_version=op_version, axis=axis)
return output_name
def gemm(self, input_name, output_name, container, operator_name=None, alpha=1.0, beta=1.0,
transA=0, transB=0):
"""
Applies operator `gemm <https://github.com/onnx/onnx/blob/master/docs/Operators.md#gemm>`.
"""
name = _create_name_or_use_existing_one(container, 'Gemm', operator_name)
attrs = {'alpha': alpha, 'beta': beta, 'transA': transA, 'transB': transB}
if container.target_opset < 5:
attrs['op_version'] = 1
attrs['broadcast'] = 1
elif container.target_opset < 7:
attrs['op_version'] = 6
attrs['broadcast'] = 1
elif container.target_opset < 11:
attrs['op_version'] = 7
else:
attrs['op_version'] = 11
container.add_node('Gemm', input_name, output_name, name=name, **attrs)
return output_name
def greater(self, input_names, output_name, container, operator_name=None):
name = _create_name_or_use_existing_one(container, 'Greater', operator_name)
if container.target_opset < 7:
op_version = 1
elif container.target_opset < 9:
op_version = 7
else:
op_version = 9
container.add_node('Greater', input_names, output_name, name=name, op_version=op_version)
return output_name
def _apply_convert_compare_equal(self, input_names, output_name, container, operator_name,
tf_op_string, onnx_op_string_rev, onnx_op_string):
if container.target_opset < 7:
raise ValueError(tf_op_string + " op is not supported for opset < 7")
elif container.target_opset < 9:
op_version = 7
elif container.target_opset < 12:
op_version = 9
else:
op_version = 12
name = _create_name_or_use_existing_one(container, tf_op_string, operator_name)
if op_version < 9:
compare_input_0 = self.get_unique_tensor_name(name + '_input_0_cast')
container.add_node('Cast', [input_names[0]], compare_input_0, name=name + '_input_0_cast', to=1)
compare_input_1 = self.get_unique_tensor_name(name + '_input_1_cast')
container.add_node('Cast', [input_names[1]], compare_input_1, name=name + '_input_1_cast', to=1)
less_out = self.get_unique_tensor_name(name + '_less_out')
container.add_node(onnx_op_string_rev, [compare_input_0, compare_input_1], less_out,
name=name + '_' + onnx_op_string_rev.lower(),
op_version=op_version)
container.add_node('Not', less_out, output_name, name=name + '_not')
elif op_version < 12:
compare_node = self.get_unique_tensor_name(name + '_compare_node')
container.add_node(onnx_op_string_rev, input_names, compare_node,
name=name + '_' + onnx_op_string_rev.lower(),
op_version=op_version)
container.add_node('Not', [compare_node], output_name, name=name)
else:
container.add_node(onnx_op_string, input_names, output_name,
name=name + '_' + onnx_op_string_rev.lower(), op_version=op_version)
def greater_or_equal(self, input_names, output_name, container, operator_name=None):
self._apply_convert_compare_equal(input_names, output_name, container, operator_name,
'GreaterEqual', 'Less', 'GreaterOrEqual')
return output_name
def less_or_equal(self, input_names, output_name, container, operator_name=None):
self._apply_convert_compare_equal(input_names, output_name, container,
operator_name, 'LessEqual', 'Greater', 'LessOrEqual')
return output_name
def gru(self, input_names, output_names, container, operator_name=None, output_seq=0, reset_after=0, **attrs):
name = _create_name_or_use_existing_one(container, 'GRU', operator_name)
if container.target_opset < 3:
op_version = 1
attrs['output_sequence'] = 1 if output_seq else 0
else:
attrs['linear_before_reset'] = 1 if reset_after else 0
if container.target_opset <= 5:
attrs['output_sequence'] = 1 if output_seq else 0
op_version = 3
else:
op_version = 7
container.add_node('GRU', input_names, output_names, name=name, op_version=op_version, **attrs)
return output_names
def hard_sigmoid(self, input_name, output_name, container, operator_name=None, alpha=None, beta=None):
self._apply_unary_operation('HardSigmoid', input_name, output_name, container, operator_name,
alpha=alpha, beta=beta)
return output_name
def identity(self, input_name, output_name, container, operator_name=None):
name = _create_name_or_use_existing_one(container, 'Identity', operator_name)
container.add_node('Identity', input_name, output_name, name=name)
return output_name
def instance_norm(self, input_names, output_name, container, operator_name=None, epsilon=1e-5):
name = _create_name_or_use_existing_one(container, 'InstanceNormalization', operator_name)
attrs = {'name': name, 'epsilon': epsilon}
if container.target_opset < 2:
attrs['consumed_inputs'] = [0] * len(input_names)
op_version = 1
else:
op_version = 6
container.add_node('InstanceNormalization', input_names, output_name, op_version=op_version, **attrs)
return output_name
def inverse(self, input_name, output_name, container, operator_name=None):
if container.target_opset < 12:
raise ValueError("tf op MatrixInverse is not supported for opset < 12")
else:
op_version = 12
name = _create_name_or_use_existing_one(container, 'Inverse', operator_name)
container.add_node('Inverse', input_name, output_name, name=name, op_version=op_version)
return output_name
def leaky_relu(self, input_name, output_name, container, operator_name=None, alpha=0.01):
self._apply_unary_operation('LeakyRelu', input_name, output_name, container, operator_name, alpha=alpha)
return output_name
def less(self, input_names, output_name, container, operator_name=None):
name = _create_name_or_use_existing_one(container, 'Less', operator_name)
if container.target_opset < 7:
op_version = 1
elif container.target_opset < 9:
op_version = 7
else:
op_version = 9
container.add_node('Less', input_names, output_name, name=name, op_version=op_version)
return output_name
def log(self, input_name, output_name, container, operator_name=None):
self._apply_unary_operation('Log', input_name, output_name, container, operator_name=operator_name)
return output_name
def lstm(self, input_names, output_names, container, operator_name=None, output_seq=0, **attrs):
name = _create_name_or_use_existing_one(container, 'LSTM', operator_name)
if container.target_opset <= 6:
attrs['output_sequence'] = 1 if output_seq else 0
op_version = 1
else:
op_version = 7
container.add_node('LSTM', input_names, output_names, name=name, op_version=op_version, **attrs)
return output_names
def matmul(self, input_names, output_name, container, operator_name=None):
op_type = 'MatMul'
name = _create_name_or_use_existing_one(container, op_type, operator_name)
if container.target_opset <= 9:
op_version = 1
else:
op_version = 9
container.add_node(op_type, input_names, output_name, op_version=op_version, name=name)
return output_name
def max(self, input_names, output_name, container, operator_name=None):
self._apply_pointwise_operation('Max', input_names, output_name, container, operator_name)
return output_name
def mean(self, input_names, output_name, container, operator_name=None):
self._apply_pointwise_operation('Mean', input_names, output_name, container, operator_name)
return output_name
def min(self, input_names, output_name, container, operator_name=None):
self._apply_pointwise_operation('Min', input_names, output_name, container, operator_name)
return output_name
def mul(self, input_names, output_name, container, operator_name=None, axis=None, broadcast=None):
self._apply_basic_numerical_operation('Mul', input_names, output_name,
container, operator_name=operator_name,
axis=axis, broadcast=broadcast)
return output_name
def neg(self, input_name, output_name, container, operator_name=None):
self._apply_unary_operation('Neg', input_name, output_name, container, operator_name)
return output_name
def lpnormalization(self, input_name, output_name, container, operator_name=None, axis=1, p=2):
name = _create_name_or_use_existing_one(container, 'LpNormalization', operator_name)
container.add_node('LpNormalization', input_name, output_name, name=name, p=p, axis=axis)
return output_name
def not_op(self, input_name, output_name, container, operator_name=None):
self._apply_unary_operation('Not', input_name, output_name, container, operator_name)
return output_name
def or_op(self, input_names, output_names, container, operator_name=None):
name = _create_name_or_use_existing_one(container, 'or', operator_name)
container.add_node('Or', input_names, output_names, op_version=7, name=name)
return output_names
def pad(self, input_name, output_name, container, operator_name=None, mode=None, pads=None, value=None,
onnx_type=onnx_proto.TensorProto.FLOAT):
name = _create_name_or_use_existing_one(container, 'Pad', operator_name)
attrs = {'name': name}
inputs = input_name if isinstance(input_name, list) else [input_name]
if mode is not None:
attrs['mode'] = mode
if container.target_opset < 11:
if isinstance(pads, str):
raise ValueError("Dynamic pad is not supported for opset < 11.")
if value is not None:
attrs['value'] = value
if container.target_opset < 2:
attrs['paddings'] = pads
op_version = 1
else:
attrs['pads'] = pads
op_version = 2
else:
op_version = 11
if isinstance(pads, str):
inputs.append(pads)
else:
pads_name = self.get_unique_tensor_name(name + '_pads')
container.add_initializer(pads_name, onnx_proto.TensorProto.INT64, [len(pads)], pads)
inputs.append(pads_name)
if value is not None:
value_name = self.get_unique_tensor_name(name + '_value')
container.add_initializer(value_name, onnx_type, [], [value])
inputs.append(value_name)
container.add_node('Pad', inputs, output_name, op_version=op_version, **attrs)
return output_name
def parametric_softplus(self, input_name, output_name, container, operator_name=None, alpha=None, beta=None):
if alpha is None:
alpha = [1.0]
if beta is None:
beta = [0.]
name = _create_name_or_use_existing_one(container, 'ParametricSoftplus', operator_name)
if container.target_opset < 9:
if len(alpha) != 1 or len(beta) != 1:
raise ValueError('alpha and beta must be 1-element lists')
op_type = 'ParametricSoftplus'
attrs = {'name': name, 'alpha': alpha[0], 'beta': beta[0]}
container.add_node(op_type, input_name, output_name, **attrs)
else:
# Define three scalars: a, b, 1.
aName = self.get_unique_tensor_name(name + '_alpha')
aShape = [len(alpha)] if len(alpha) == 1 else [len(alpha), 1, 1]
container.add_initializer(aName, onnx_proto.TensorProto.FLOAT, aShape, alpha)
bShape = [len(beta)] if len(beta) == 1 else [len(beta), 1, 1]
bName = self.get_unique_tensor_name(name + '_beta')
container.add_initializer(bName, onnx_proto.TensorProto.FLOAT, bShape, beta)
oneName = self.get_unique_tensor_name(name + '_one')
container.add_initializer(oneName, onnx_proto.TensorProto.FLOAT, [1], [1.])
# c = b * x
cName = self.get_unique_tensor_name(name + '_c')
self.mul([input_name, bName], cName, container)
# d = exp(c)
dName = self.get_unique_tensor_name(name + '_d')
self.exp(cName, dName, container)
# e = 1 + d
eName = self.get_unique_tensor_name(name + '_e')
self.add([dName, oneName], eName, container)
# f = log(e)
fName = self.get_unique_tensor_name(name + '_f')
self.log(eName, fName, container)
# g = a * f
self.mul([fName, aName], output_name, container)
return output_name
def pow(self, input_names, output_name, container, operator_name=None, axis=None, broadcast=None):
name = _create_name_or_use_existing_one(container, 'Pow', operator_name)
attrs = {'name': name}
if container.target_opset < 7:
# Before ONNX-1.2, broadcasting behavior is Caffe2-like.
if axis is not None:
attrs['axis'] = axis
if broadcast is not None:
attrs['broadcast'] = broadcast
op_version = 1
elif container.target_opset < 12:
# Since ONNX-1.2, broadcasting behavior is Numpy-like, so we don't need to specify any attributes
op_version = 7
else:
op_version = 12
container.add_node('Pow', input_names, output_name, op_version=op_version, **attrs)
return output_name
def prelu(self, input_name, output_name, container, operator_name=None, slp_rate=None):
name = _create_name_or_use_existing_one(container, 'PRelu', operator_name)
slp_rate_tensor_name = self.get_unique_tensor_name('slp_rate')
s_shape = slp_rate.shape
if container.target_opset < 7:
s_shape = [len(slp_rate.flatten())]
container.add_initializer(slp_rate_tensor_name, onnx_proto.TensorProto.FLOAT, s_shape, slp_rate.flatten())
if container.target_opset < 6:
container.add_node('PRelu', [input_name, slp_rate_tensor_name], output_name, op_version=1, name=name,
consumed_inputs=[0, 0])
else:
if container.target_opset < 7:
op_version = 6
elif container.target_opset < 9:
op_version = 7
else:
# opset 9 supports unidirectional broadcasting
op_version = 9
container.add_node('PRelu', [input_name, slp_rate_tensor_name], output_name, op_version=op_version, name=name)
return output_name
def range(self, input_name, output_name, container, operator_name=None):
name = _create_name_or_use_existing_one(container, 'Range', operator_name)
container.add_node('Range', input_name, output_name, op_version=11, name=name)
return output_name
def reciprocal(self, input_name, output_name, container, operator_name=None):
self._apply_unary_operation('Reciprocal', input_name, output_name, container, operator_name=operator_name)
return output_name
# Some old ORT supports axis < 0 case, so put rank=0 as default.
def reducesum(self, input_name, output_name, container, operator_name=None, axes=None, keepdims=1, rank=0):
name = _create_name_or_use_existing_one(container, 'ReduceSum', operator_name)
if axes is None:
axes = []
if container.target_opset < 13:
if container.target_opset < 11:
op_version = 1
axes = [axis if axis >= 0 else axis + rank for axis in axes]
else:
op_version = 11
container.add_node('ReduceSum', input_name, output_name, name=name,
op_version=op_version, axes=axes, keepdims=keepdims)
else:
if not isinstance(input_name, list):
input_name = [input_name]
op_version = 13
if isinstance(axes, str):
container.add_node('ReduceSum', input_name + [axes], output_name,
op_version=op_version, name=name, keepdims=keepdims)
elif axes is None or len(axes) == 0:
container.add_node('ReduceSum', input_name, output_name,
op_version=op_version, name=name, keepdims=keepdims)
else:
axes_name = self.get_unique_tensor_name(name + '_reducesum')
container.add_initializer(axes_name, onnx_proto.TensorProto.INT64, [len(axes)], axes)
container.add_node('ReduceSum', input_name + [axes_name], output_name,
op_version=op_version, name=name, keepdims=keepdims)
return output_name
def reducemin(self, input_name, output_name, container, operator_name=None, axes=None, keepdims=1, rank=0):
name = _create_name_or_use_existing_one(container, 'ReduceMin', operator_name)
if axes is None:
axes = []
if container.target_opset < 13:
if container.target_opset < 11:
op_version = 1
axes = [axis if axis >= 0 else axis + rank for axis in axes]
else:
op_version = 11
container.add_node('ReduceMin', input_name, output_name, name=name,
op_version=op_version, axes=axes, keepdims=keepdims)
else:
if not isinstance(input_name, list):
input_name = [input_name]
op_version = 13
if isinstance(axes, str):
container.add_node('ReduceMin', input_name + [axes], output_name,
op_version=op_version, name=name, keepdims=keepdims)
elif axes is None or len(axes) == 0:
container.add_node('ReduceMin', input_name, output_name,
op_version=op_version, name=name, keepdims=keepdims)
else:
axes_name = self.get_unique_tensor_name(name + '_reducemin')
container.add_initializer(axes_name, onnx_proto.TensorProto.INT64, [len(axes)], axes)
container.add_node('ReduceMin', input_name + [axes_name], output_name,
op_version=op_version, name=name, keepdims=keepdims)
return output_name
def relu(self, input_name, output_name, container, operator_name=None):
self._apply_unary_operation('Relu', input_name, output_name, container, operator_name)
return output_name
def relu_6(self, input_name, output_name, container, operator_name=None, zero_value=0.0):
name_relu = _create_name_or_use_existing_one(container, 'relu', operator_name)
name_relu_op = _create_name_or_use_existing_one(container, 'relu6', operator_name)
self.relu(input_name, name_relu, container, name_relu_op+'_relu')
self.clip(name_relu, output_name, container, name_relu_op + '_clip', zero_value+6, zero_value)
def reshape(self, input_name, output_name, container, operator_name=None, desired_shape=None):
if not isinstance(desired_shape, str) and len(list(i for i in desired_shape if i is not None and i < 0)) > 1:
raise ValueError('There can only be one -1 in the targeted shape of a Reshape but got %s' % desired_shape)
name = _create_name_or_use_existing_one(container, 'Reshape', operator_name)
if container.target_opset < 5:
container.add_node('Reshape', input_name, output_name, op_version=1, name=name, shape=desired_shape,
consumed_inputs=[0])
else:
if isinstance(desired_shape, str):
desired_shape_name = desired_shape
else:
desired_shape_name = self.get_unique_tensor_name('shape_tensor')
container.add_initializer(desired_shape_name, onnx_proto.TensorProto.INT64, [len(desired_shape)],
desired_shape)
# Create ONNX Reshape operator
if isinstance(input_name, list):
input_name.append(desired_shape_name)
else:
input_name = [input_name, desired_shape_name]
container.add_node('Reshape', input_name, output_name, op_version=5, name=name)
return output_name
def resize(self, input_name, output_name, container, operator_name=None, mode='nearest',
coordinate_transformation_mode='asymmetric', scales=None):
"""
:param mode: "nearest" or "linear"
:param scales: a float tensor for scaling (upsampling or downsampling) all input dimensions
"""
name = _create_name_or_use_existing_one(container, 'Resize', operator_name)
attrs = {'name': name}
attrs['mode'] = mode.lower()
inputs = [input_name]
if container.target_opset < 11:
op_version = 10
else:
op_version = 11
roi_tensor_name = self.get_unique_tensor_name(name + '_roi')
roi = [0.0] * len(scales) + [1.0] * len(scales)
container.add_initializer(roi_tensor_name, onnx_proto.TensorProto.FLOAT, [2 * len(scales)], roi)
inputs.append(roi_tensor_name)
attrs['coordinate_transformation_mode'] = coordinate_transformation_mode
if attrs['mode'] == 'nearest':
attrs['nearest_mode'] = 'floor'
scales_tensor_name = self.get_unique_tensor_name(name + '_scales')
container.add_initializer(scales_tensor_name, onnx_proto.TensorProto.FLOAT, [len(scales)], scales)
inputs.append(scales_tensor_name)
container.add_node('Resize', inputs, output_name, op_version=op_version, **attrs)
return output_name
def rnn(self, input_names, output_names, container, operator_name=None, output_seq=0, **attrs):
name = _create_name_or_use_existing_one(container, 'RNN', operator_name)
if container.target_opset <= 6:
attrs['output_sequence'] = 1 if output_seq else 0
op_version = 1
else:
op_version = 7
container.add_node('RNN', input_names, output_names, name=name, op_version=op_version, **attrs)
return output_names
def shape(self, input_name, output_name, container, operator_name=None):
name = _create_name_or_use_existing_one(container, 'Shape', operator_name)
container.add_node('Shape', input_name, output_name, name=name, op_version=1)
return output_name
def sigmoid(self, input_name, output_name, container, operator_name=None):
self._apply_unary_operation('Sigmoid', input_name, output_name, container, operator_name)
return output_name
def softsign(self, input_name, output_name, container, operator_name=None):
name = _create_name_or_use_existing_one(container, 'Softsign', operator_name)
container.add_node('Softsign', input_name, output_name, name=name, op_version=1)
return output_name
# See alpha and gamma at https://github.com/keras-team/keras/blob/master/keras/activations.py#L80-L81
def selu(self, input_name, output_name, container, operator_name=None, alpha=1.673263, gamma=1.050701):
self._apply_unary_operation('Selu', input_name, output_name, container, operator_name, alpha=alpha, gamma=gamma)
return output_name
def softmax(self, input_name, output_name, container, operator_name=None, axis=None):
name = _create_name_or_use_existing_one(container, 'Softmax', operator_name)
if axis is None:
axis = 1 if container.target_opset < 13 else -1
container.add_node('Softmax', input_name, output_name, name=name, axis=axis)
return output_name
def scaled_tanh(self, input_name, output_name, container, operator_name=None, alpha=None, beta=None):
if alpha is None:
alpha = [1.0]
if beta is None:
beta = [1.0]
if len(alpha) != 1 or len(beta) != 1:
raise ValueError('alpha and beta must be 1-element lists')
name = _create_name_or_use_existing_one(container, 'ScaledTanh', operator_name)
if container.target_opset < 9:
attrs = {'name': name, 'alpha': alpha[0], 'beta': beta[0]}
container.add_node('ScaledTanh', input_name, output_name, **attrs)
else:
# Define scalar a, initialize with parameter alpha.
aName = self.get_unique_tensor_name(name + '_alpha')
aShape = [len(alpha)] if len(alpha) == 1 else [len(alpha), 1, 1]
container.add_initializer(aName, onnx_proto.TensorProto.FLOAT, aShape, alpha)
# Define scalar b, initialize with parameter beta.
bShape = [len(beta)] if len(beta) == 1 else [len(beta), 1, 1]
bName = self.get_unique_tensor_name(name + '_beta')
container.add_initializer(bName, onnx_proto.TensorProto.FLOAT, bShape, beta)
# c = b * x
cName = self.get_unique_tensor_name(name + '_c')
self.mul([input_name, bName], cName, container)
# d = tanh(c)
dName = self.get_unique_tensor_name(name + '_d')
self.tanh(cName, dName, container)
# output = a * d
self.mul([aName, dName], output_name, container)
return output_name
def slice(self, input_name, output_name, container,
operator_name=None, starts=None, ends=None, axes=None, steps=None):
assert starts is not None, 'the starts in slice op cannot be None'
assert ends is not None, 'the ends in slice op cannot be None'
name = _create_name_or_use_existing_one(container, 'Slice', operator_name)
if container.target_opset < 10:
if axes is None:
container.add_node('Slice', input_name, output_name, name=name,
starts=starts, ends=ends, op_version=1)
else:
container.add_node('Slice', input_name, output_name, name=name,
starts=starts, ends=ends, axes=axes, op_version=1)
else:
if container.target_opset == 10:
op_version = 10
else:
op_version = 11
inputs = input_name if isinstance(input_name, list) else [input_name]
if isinstance(starts, str):
starts_name = starts
else:
starts_name = self.get_unique_tensor_name('starts')
container.add_initializer(starts_name, onnx_proto.TensorProto.INT64,
[len(starts)], starts)
if isinstance(ends, str):
ends_name = ends
else:
ends_name = self.get_unique_tensor_name('ends')
container.add_initializer(ends_name, onnx_proto.TensorProto.INT64,
[len(ends)], ends)
inputs.append(starts_name)
inputs.append(ends_name)
if axes:
if isinstance(axes, str):
axes_name = axes
else:
axes_name = self.get_unique_tensor_name('axes')
container.add_initializer(axes_name, onnx_proto.TensorProto.INT64,
[len(axes)], axes)
inputs.append(axes_name)
if steps:
if not axes:
inputs.append('')
if isinstance(steps, str):
steps_name = steps
else:
steps_name = self.get_unique_tensor_name('steps')
container.add_initializer(steps_name, onnx_proto.TensorProto.INT64,
[len(steps)], steps)
inputs.append(steps_name)
container.add_node('Slice', inputs, output_name, name=name,
op_version=op_version)
return output_name
def split(self, input_name, output_names, container, operator_name=None, split=None, axis=0):
name = _create_name_or_use_existing_one(container, 'Split', operator_name)
if container.target_opset <= 1:
op_version = 1
elif container.target_opset < 11:
op_version = 2
elif container.target_opset < 13:
op_version = 11
else:
op_version = 13
attrs = {'name': name}
if split is not None:
if container.target_opset < 13:
attrs['split'] = split
else:
if not isinstance(input_name, list):
input_name = [input_name]
if isinstance(split, str):
split_name = split
else:
split_name = self.get_unique_tensor_name(name + '_split')
container.add_initializer(split_name, onnx_proto.TensorProto.INT64, [len(split)], split)
input_name = input_name + [split_name]
if axis is not None:
attrs['axis'] = axis
container.add_node('Split', input_name, output_names, op_version=op_version, **attrs)
return output_names
def sqrt(self, input_name, output_name, container, operator_name=None):
self._apply_unary_operation('Sqrt', input_name, output_name, container, operator_name=operator_name)
return output_name
def _apply_squeeze_unsqueeze(self, input_name, output_name, container, squeeze_str, operator_name=None, axes=None,
rank=0):
name = _create_name_or_use_existing_one(container, squeeze_str, operator_name)
if container.target_opset < 13:
if container.target_opset < 11:
op_version = 1
axes = [axis if axis >= 0 else axis + rank for axis in axes]
else:
op_version = 11
container.add_node(squeeze_str, input_name, output_name, name=name, op_version=op_version, axes=axes)
else:
op_version = 13
if not isinstance(input_name, list):
input_name = [input_name]
if isinstance(axes, str):
container.add_node(squeeze_str, input_name + [axes], output_name, op_version=op_version, name=name)
elif len(axes) == 0:
container.add_node(squeeze_str, input_name, output_name, op_version=op_version, name=name)
else:
axes_name = self.get_unique_tensor_name(name + '_axes')
container.add_initializer(axes_name, onnx_proto.TensorProto.INT64, [len(axes)], axes)
container.add_node(squeeze_str, input_name + [axes_name], output_name, op_version=op_version, name=name)
return output_name
def squeeze(self, input_name, output_name, container, operator_name=None, axes=None, rank=0):
if axes is None:
axes = []
self._apply_squeeze_unsqueeze(input_name, output_name, container, 'Squeeze', operator_name, axes, rank)
return output_name
def sub(self, input_names, output_name, container, operator_name=None, axis=None, broadcast=0):
self._apply_basic_numerical_operation('Sub', input_names, output_name, container, operator_name=operator_name,
axis=axis, broadcast=broadcast)
return output_name
def sum(self, input_names, output_name, container, operator_name=None):
name = _create_name_or_use_existing_one(container, 'Sum', operator_name)
if container.target_opset < 6:
op_version = 1
else:
op_version = 6
container.add_node('Sum', input_names, output_name, op_version=op_version, name=name)
return output_name
def tanh(self, input_name, output_name, container, operator_name=None):
self._apply_unary_operation('Tanh', input_name, output_name, container, operator_name)
return output_name
def thresholded_relu(self, input_name, output_name, container, operator_name=None, alpha=None):
if alpha is None:
alpha = [1.0]
name = _create_name_or_use_existing_one(container, 'ThresholdedRelu', operator_name)
attrs = {'name': name, 'alpha': alpha[0]}
if container.target_opset < 10:
# ThresholdedRelu graduated from an experimental op to a full op in opset 10
# onnxruntime maintains support in the ONNX domain for ThresholdedRelu as a contrib op
attrs['op_domain'] = "ai.onnx"
op_version = 1
else:
op_version = 10
container.add_node('ThresholdedRelu', input_name, output_name, op_version=op_version, **attrs)
return output_name
def tile(self, input_name, output_name, container, operator_name=None, repeats=None):
name = _create_name_or_use_existing_one(container, 'Tile', operator_name)
if repeats is None or (not isinstance(repeats, str) and all(repeat_count == 1 for repeat_count in repeats)):
container.add_node('Identity', input_name, output_name, name=name)
return output_name
if container.target_opset < 6:
intermediate_input_name = input_name
intermediate_output_name = None
if isinstance(repeats, str):
raise ValueError('repeats cannot be string type before opset 6')
for axis, repeat_count in enumerate(repeats):
if repeat_count == 1:
continue
# Create the 2nd input of Tile
tile_tensor_name = self.get_unique_tensor_name(name + '_tile')
container.add_initializer(tile_tensor_name, onnx_proto.TensorProto.FLOAT, [1], [float(repeat_count)])
# Create the 3rd input of Tile
axis_tensor_name = self.get_unique_tensor_name(name + '_axis')
container.add_initializer(axis_tensor_name, onnx_proto.TensorProto.FLOAT, [1], [float(axis)])
# Create tile for duplicating along one axis. After ONNX-1.2, we can duplicate along multiple axes,
# so we don't have to iterate through all axes.
intermediate_output_name = self.get_unique_tensor_name(name + '_input')
container.add_node('Tile', [intermediate_input_name, tile_tensor_name, axis_tensor_name],
intermediate_output_name, name=name)
# Use the output produced by this round as the input in the next iteration
intermediate_input_name = intermediate_output_name
# Create a new name for next Tile
name = container.get_unique_operator_name('Tile')
# Use the last Tile name for the name of an Identity
container.add_node('Identity', intermediate_output_name, output_name, op_version=1, name=name)
else:
# ONNX-1.2 has a new Tile and we use it here
if isinstance(repeats, str):
container.add_node('Tile', input_name + [repeats], output_name, op_version=6, name=name)
else:
repeat_tensor_name = self.get_unique_tensor_name(name + '_repeats')
container.add_initializer(repeat_tensor_name, onnx_proto.TensorProto.INT64, [len(repeats)], repeats)
container.add_node('Tile', [input_name, repeat_tensor_name], output_name, op_version=6, name=name)
return output_name
def topk(self, input_name, output_names, container, k, operator_name=None):
name = _create_name_or_use_existing_one(container, 'TopK', operator_name)
if container.target_opset < 10:
if isinstance(k, str):
raise ValueError('topk k cannot be string type before opset 10')
container.add_node('TopK', input_name, output_names, name=name, k=k, op_version=1)
else:
if container.target_opset == 10:
op_version = 10
else:
op_version = 11
if isinstance(k, str):
k_value_name = k
else:
k_value_name = self.get_unique_tensor_name('k_value')
container.add_initializer(k_value_name, onnx_proto.TensorProto.INT64, [1], [k])
container.add_node('TopK', input_name + [k_value_name], output_names, name=name, op_version=op_version)
return output_names
def transpose(self, input_name, output_name, container, operator_name=None, perm=None):
name = _create_name_or_use_existing_one(container, 'Transpose', operator_name)
container.add_node('Transpose', input_name, output_name, name=name, perm=perm)
return output_name
def upsample(self, input_name, output_name, container, operator_name=None, mode='nearest',
coordinate_transformation_mode='asymmetric', scales=None):
"""
:param input_name:
:param output_name:
:param container:
:param operator_name:
:param mode: nearest or linear
:param coordinate_transformation_mode:
:param scales: an integer list of scaling-up rate of all input dimensions
:return:
"""
if container.target_opset < 10:
name = _create_name_or_use_existing_one(container, 'Upsample', operator_name)
inputs = [input_name]
attrs = {'name': name}
if container.target_opset < 7:
if len(scales) != 4:
raise ValueError('Need to specify a 4-element list the the scales of N-, C-, H-, and W-axes')
attrs['height_scale'] = float(scales[2])
attrs['width_scale'] = float(scales[3])
attrs['mode'] = mode.upper()
op_version = 1
else:
attrs['mode'] = mode.lower()
if container.target_opset < 9:
attrs['scales'] = list(map(float, scales))
op_version = 7
else:
# scales moved from attribute to input in opset 9
scales_tensor_name = self.get_unique_tensor_name(name + '_scales')
container.add_initializer(scales_tensor_name, onnx_proto.TensorProto.FLOAT, [len(scales)], scales)
inputs = [input_name, scales_tensor_name]
op_version = 9
container.add_node('Upsample', inputs, output_name, op_version=op_version, **attrs)
else:
# Upsample op is deprecated in ONNX opset 10
# We implement Upsample through Resize instead
self.resize(input_name, output_name, container, operator_name, mode, coordinate_transformation_mode,
scales)
return output_name
def unsqueeze(self, input_name, output_name, container, operator_name=None, axes=None, rank=0):
if axes is None:
axes = [0]
self._apply_squeeze_unsqueeze(input_name, output_name, container, 'Unsqueeze', operator_name, axes, rank)
return output_name
def where(self, input_names, output_names, container, operator_name=None):
name = _create_name_or_use_existing_one(container, 'where', operator_name)
container.add_node('Where', input_names, output_names, op_version=9, name=name)
return output_names
def loop(self, input_names, output_names, container, operator_name=None, body=None):
name = _create_name_or_use_existing_one(container, 'loop', operator_name)
trip_count, cond, *states = tuple(input_names)
trip_count = '' if trip_count is None else trip_count
cond_name = '' if cond is None else cond
container.add_node(
'Loop', [trip_count, cond_name] + states, output_names, op_version=11, name=name, body=body)
return output_names
def model_call(self, input_name, output_name, container, operator_name=None, oxml=None):
name = operator_name
if name is None:
name = container.get_unique_operator_name('og')
# The tensor name replacement happens on unfolding ONNX model.
for idx, nm_ in enumerate(input_name):
nvi = oxml.graph.input[idx]
self.identity([nm_], ["{}_{}".format(name, nvi.name)], container)
container.value_info.append(nvi)
for idx, nm_ in enumerate(output_name):
self.identity(["{}_{}".format(name, oxml.graph.output[idx].name)], [nm_], container)
container.value_info.extend(oxml.graph.output)
container.add_model_node(input_name, output_name, name=name, model=oxml)
return output_name
class _ONNXModelBuilder(_ONNXOperatorAPI):
def __init__(self):
self._id_count = 0
def get_unique_tensor_name(self, hint):
self._id_count += 1
return "v{}_{}".format(hint, str(self._id_count))
def make_tensor(self, dtype, dims, vals):
return helper.make_tensor(self.get_unique_tensor_name('ts'), dtype, dims, vals)
ox = _ONNXModelBuilder()
| 47.568966 | 122 | 0.615914 |
ace690d3485e0c2f8639640ce66c711cbb299763 | 186 | py | Python | 2 semester/PP/9/Code/1.7.py | kurpenok/Labs | 069c92b7964a1445d093313b38ebdc56318d2a73 | [
"MIT"
] | null | null | null | 2 semester/PP/9/Code/1.7.py | kurpenok/Labs | 069c92b7964a1445d093313b38ebdc56318d2a73 | [
"MIT"
] | null | null | null | 2 semester/PP/9/Code/1.7.py | kurpenok/Labs | 069c92b7964a1445d093313b38ebdc56318d2a73 | [
"MIT"
] | null | null | null | sort = lambda array: [sublist for sublist in sorted(array, key=lambda x: (len(x), x[0]))]
if __name__ == "__main__":
print(sort([[2], [0], [1, 3], [0, 7], [9, 11], [13, 15, 17]]))
| 31 | 89 | 0.553763 |
ace69168cc7922a0f8138819888aebb19d5348b4 | 5,136 | py | Python | src/main/python/trajectory/utils/prereqs.py | jrouly/trajectory | 1be2290e0d1c9158781281824c0e49bc1872f2b3 | [
"Apache-2.0"
] | 4 | 2015-03-19T13:05:38.000Z | 2019-11-15T21:46:16.000Z | src/main/python/trajectory/utils/prereqs.py | jrouly/trajectory | 1be2290e0d1c9158781281824c0e49bc1872f2b3 | [
"Apache-2.0"
] | null | null | null | src/main/python/trajectory/utils/prereqs.py | jrouly/trajectory | 1be2290e0d1c9158781281824c0e49bc1872f2b3 | [
"Apache-2.0"
] | 3 | 2015-02-18T15:38:20.000Z | 2021-07-10T23:52:51.000Z | """
trajectory/utils/prereqs.py
Author: Jean Michel Rouly
Define a collection of useful utility functions for analyzing course and
departmental prerequisite structures.
"""
def get_prereq_graph(course_id, format=None):
"""
Generate a graph of prerequisites within a course. If format is not
requested, simply return a NetworkX graph object.
couse_id: the ID of the requested course
format: what format to return in (optional)
node: json formatted as node-link style
adjacency: json formatted as adjacency style
tree: json formatted as tree style
"""
from trajectory.models import Department, Course
from trajectory.models.meta import session
from trajectory.utils.common import row2dict
from networkx.readwrite import json_graph
import networkx as nx
import json
if format not in [None, "node", "adjacency", "tree"]:
raise RuntimeError("Unknown requested data format %s" % format)
# Initialize a new NetworkX graph.
G = nx.DiGraph()
# Attempt to look up the requested course.
course = session.query(Course).get(course_id)
if course is None:
return None
# Recursively add course ids in a subtree to the graph.
def add_tree(G, tree, parent=None):
cid = tree[0] # unpack information
prereqs = tree[1] # unpack information
course = session.query(Course).get(cid)
# Insert all known data, including department abbreviation.
node_data = row2dict(course)
node_data['dept'] = course.department.abbreviation
# Identify the primary course in the graph (the requested).
if str(cid) == str(course_id):
node_data['prime'] = True
else:
node_data['prime'] = False
# If the course has already been added, generate a unique ID for it
# based on its parent, and add it anyway. But don't recurse into
# its list of prereqs.
seen = False
if cid in G.nodes():
cid = str(parent) + "-" + str(cid)
seen = True
# Add course and an edge from its parent, if relevant.
G.add_node(cid, node_data)
if parent is not None:
G.add_edge(parent, cid)
# Recurse through the prerequisite tree and add in subtrees.
if not seen:
for prereq in prereqs:
add_tree(G, prereq, cid)
# Navigate the prerequisite tree and add the course ids as nodes, and
# prerequisite relationships as unweighted edges.
prereq_tree = get_prereq_tree(course_id)
add_tree(G, prereq_tree)
if G is None:
return G
# Calculate and apply a basic layout.
pos = nx.spring_layout(G)
for node in G.nodes():
G.node[node]["viz"] = {
'position': {
'x': pos[node][0],
'y': pos[node][1]
}
}
# Apply any requested data output formatting.
if format == "node":
return json.dumps(json_graph.node_link_data(G))
elif format == "adjacency":
return json.dumps(json_graph.adjacency_data(G))
elif format == "tree":
return json.dumps(json_graph.tree_data(G, int(course_id)))
else:
return G
def get_prereq_tree(course_id, parents=set()):
"""
Recursively identify the prerequisite chain of a course. This tree is
rooted at the requested parent course and is structured as a tuple of
tuples.
Ex:
(a [
(b, [ ]) prereq of a
(c, [ prereq of a
(d, []) prereq of c
(e, []) prereq of c
])
])
"""
from trajectory.models import Course
from trajectory.models.meta import session
# Attempt to identify the parent course.
course = session.query(Course).get(course_id)
if course is None:
return None
# Recursive depth base case.
if course_id in parents:
return None
else:
parents = parents | {course_id}
# Base case.
if len(course.prerequisites) == 0:
return (course.id, [])
# Recursive call.
builder = []
for prerequisite in course.prerequisites:
sub_prereqs = get_prereq_tree(prerequisite.id, parents)
if sub_prereqs is not None:
builder.append(sub_prereqs)
# Add recursively determined list.
return (course.id, builder)
def get_prereq_set(course_id):
"""
Get the set of prerequisite courses for a requested course. That is, a
flat set with no repeats. This set does not contain the requested
course.
"""
# Attempt to identify a reference to the requested course.
prereq_tree = get_prereq_tree(course_id)
if prereq_tree is None:
return set()
# Flatten function of an arbitrarily deeply nested list of lists.
def flatten(container):
for i in container:
if isinstance(i, list) or isinstance(i, tuple):
for j in flatten(i):
yield j
else:
yield i
# Remove duplicates.
return set(flatten(prereq_tree)) - {course_id}
| 29.860465 | 75 | 0.622079 |
ace692200702a1d8919a47f5c6896127928086cb | 973 | py | Python | src/states/collecthealthstate.py | Yasmojam/DoYouHaveTheGuts2019 | dae9c1db27707f3a244845ccc282373a64ad7248 | [
"MIT"
] | null | null | null | src/states/collecthealthstate.py | Yasmojam/DoYouHaveTheGuts2019 | dae9c1db27707f3a244845ccc282373a64ad7248 | [
"MIT"
] | 1 | 2019-10-20T14:59:29.000Z | 2019-10-20T14:59:29.000Z | src/states/collecthealthstate.py | Yasmojam/DoYouHaveTheGuts2019 | dae9c1db27707f3a244845ccc282373a64ad7248 | [
"MIT"
] | null | null | null | from .state import State
from .constants import COLLECTABLE_CHECKPOINTS
from utils import closest_point
class CollectHealthState(State):
def perform(self) -> None:
if self.closest_health is not None:
self.body_controls.movetopoint(self.closest_health.position)
else:
self.body_controls.movetopoint(
closest_point(self.status.position, COLLECTABLE_CHECKPOINTS)
)
def calculate_priority(self, is_current_state: bool) -> None:
'''
If full health and no health packs nearby -> dont collect health pack
'''
self.closest_health = self.status.find_nearest_health()
if self.status.health == self.status.max_health or self.closest_health is None:
return 0.02 # can go to the possible checkpoints
return 0.5 - ((self.status.health - 1)/((self.status.max_health-1)*2)) + self.base_priority
#hard coded priority + base priority
| 38.92 | 100 | 0.669065 |
ace692ad2f6ec2af8061da362fea8a5f7516dd31 | 17,940 | py | Python | localflavor/generic/validators.py | 3DHubs/localflavor | d84f7e13c738402790ab84ab894227c509826b02 | [
"BSD-3-Clause"
] | 1 | 2022-01-31T11:12:03.000Z | 2022-01-31T11:12:03.000Z | localflavor/generic/validators.py | 3DHubs/localflavor | d84f7e13c738402790ab84ab894227c509826b02 | [
"BSD-3-Clause"
] | 2 | 2021-04-22T09:02:20.000Z | 2021-09-20T09:23:45.000Z | localflavor/generic/validators.py | 3DHubs/localflavor | d84f7e13c738402790ab84ab894227c509826b02 | [
"BSD-3-Clause"
] | 1 | 2021-02-05T09:42:52.000Z | 2021-02-05T09:42:52.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import string
from localflavor.exceptions import ValidationError
from localflavor.stub import _
from localflavor.ar.forms import ARPostalCodeField
from localflavor.at.forms import ATPostalCodeField
from localflavor.au.forms import AUPostalCodeField
from localflavor.be.forms import BEPostalCodeField
from localflavor.br.forms import BRPostalCodeField
from localflavor.ca.forms import CAPostalCodeField
from localflavor.ch.forms import CHPostalCodeField
from localflavor.cn.forms import CNPostalCodeField
from localflavor.cu.forms import CUPostalCodeField
from localflavor.cz.forms import CZPostalCodeField
from localflavor.de.forms import DEPostalCodeField
from localflavor.dk.forms import DKPostalCodeField
from localflavor.ee.forms import EEPostalCodeField
from localflavor.es.forms import ESPostalCodeField
from localflavor.fi.forms import FIPostalCodeField
from localflavor.fr.forms import FRPostalCodeField
from localflavor.gb.forms import GBPostalCodeField
from localflavor.gr.forms import GRPostalCodeField
from localflavor.hr.forms import HRPostalCodeField
from localflavor.id_.forms import IDPostalCodeField
from localflavor.il.forms import ILPostalCodeField
from localflavor.in_.forms import INPostalCodeField
from localflavor.is_.forms import ISPostalCodeField
from localflavor.it.forms import ITPostalCodeField
from localflavor.jp.forms import JPPostalCodeField
from localflavor.lt.forms import LTPostalCodeField
from localflavor.lv.forms import LVPostalCodeField
from localflavor.ma.forms import MAPostalCodeField
from localflavor.mt.forms import MTPostalCodeField
from localflavor.mx.forms import MXPostalCodeField
from localflavor.nl.forms import NLPostalCodeField
from localflavor.no.forms import NOPostalCodeField
from localflavor.nz.forms import NZPostalCodeField
from localflavor.pk.forms import PKPostalCodeField
from localflavor.pl.forms import PLPostalCodeField
from localflavor.pt.forms import PTPostalCodeField
from localflavor.ro.forms import ROPostalCodeField
from localflavor.ru.forms import RUPostalCodeField
from localflavor.se.forms import SEPostalCodeField
from localflavor.sg.forms import SGPostalCodeField
from localflavor.si.forms import SIPostalCodeField
from localflavor.sk.forms import SKPostalCodeField
from localflavor.tr.forms import TRPostalCodeField
from localflavor.ua.forms import UAPostalCodeField
from localflavor.us.forms import USZipCodeField
from localflavor.za.forms import ZAPostalCodeField
from . import checksums
from .countries.iso_3166 import ISO_3166_1_ALPHA2_COUNTRY_CODES
POSTCODE_VALIDATORS = {
'AR': ARPostalCodeField(),
'AT': ATPostalCodeField(),
'AU': AUPostalCodeField(),
'BE': BEPostalCodeField(),
'BR': BRPostalCodeField(),
'CA': CAPostalCodeField(),
'CH': CHPostalCodeField(),
'CN': CNPostalCodeField(),
'CU': CUPostalCodeField(),
'CZ': CZPostalCodeField(),
'DE': DEPostalCodeField(),
'DK': DKPostalCodeField(),
'EE': EEPostalCodeField(),
'ES': ESPostalCodeField(),
'FI': FIPostalCodeField(),
'FR': FRPostalCodeField(),
'GB': GBPostalCodeField(),
'GR': GRPostalCodeField(),
'HR': HRPostalCodeField(),
'ID': IDPostalCodeField(),
'IL': ILPostalCodeField(),
'IN': INPostalCodeField(),
'IS': ISPostalCodeField(),
'IT': ITPostalCodeField(),
'JP': JPPostalCodeField(),
'LT': LTPostalCodeField(),
'LV': LVPostalCodeField(),
'MA': MAPostalCodeField(),
'MT': MTPostalCodeField(),
'MX': MXPostalCodeField(),
'NL': NLPostalCodeField(),
'NO': NOPostalCodeField(),
'NZ': NZPostalCodeField(),
'PK': PKPostalCodeField(),
'PL': PLPostalCodeField(),
'PT': PTPostalCodeField(),
'RO': ROPostalCodeField(),
'RU': RUPostalCodeField(),
'SE': SEPostalCodeField(),
'SG': SGPostalCodeField(),
'SI': SIPostalCodeField(),
'SK': SKPostalCodeField(),
'TR': TRPostalCodeField(),
'UA': UAPostalCodeField(),
'US': USZipCodeField(),
'ZA': ZAPostalCodeField(),
}
# Dictionary of ISO country code to IBAN length.
#
# The official IBAN Registry document is the best source for up-to-date information about IBAN formats and which
# countries are in IBAN.
#
# https://www.swift.com/standards/data-standards/iban
#
# The IBAN_COUNTRY_CODE_LENGTH dictionary has been updated version 78 of the IBAN Registry document which was published
# in August 2017.
#
# Other Resources:
#
# https://en.wikipedia.org/wiki/International_Bank_Account_Number#IBAN_formats_by_country
# http://www.ecbs.org/iban/france-bank-account-number.html
# https://www.nordea.com/V%C3%A5ra+tj%C3%A4nster/Internationella+produkter+och+tj%C3%A4nster/Cash+Management/IBAN+countries/908472.html
IBAN_COUNTRY_CODE_LENGTH = {'AD': 24, # Andorra
'AE': 23, # United Arab Emirates
'AL': 28, # Albania
'AT': 20, # Austria
'AZ': 28, # Azerbaijan
'BA': 20, # Bosnia and Herzegovina
'BE': 16, # Belgium
'BG': 22, # Bulgaria
'BH': 22, # Bahrain
'BR': 29, # Brazil
'BY': 28, # Republic of Belarus
'CH': 21, # Switzerland
'CR': 22, # Costa Rica
'CY': 28, # Cyprus
'CZ': 24, # Czech Republic
'DE': 22, # Germany
'DK': 18, # Denmark
'DO': 28, # Dominican Republic
'EE': 20, # Estonia
'ES': 24, # Spain
'FI': 18, # Finland
'FO': 18, # Faroe Islands
'FR': 27, # France + French Guiana (GF), Guadeloupe (GP), Martinique (MQ), Réunion (RE),
# French Polynesia (PF), French Southern Territories (TF), Mayotte (YT),
# New Caledonia (NC), Saint Barthélemy (BL),
# Saint Martin - French part (MF), Saint-Pierre and Miquelon (PM),
# Wallis and Futuna (WF)
'GB': 22, # United Kingdom + Guernsey (GG), Isle of Man (IM), Jersey (JE)
'GE': 22, # Georgia
'GI': 23, # Gibraltar
'GL': 18, # Greenland
'GR': 27, # Greece
'GT': 28, # Guatemala
'HR': 21, # Croatia
'HU': 28, # Hungary
'IE': 22, # Ireland
'IL': 23, # Israel
'IQ': 23, # Iraq
'IS': 26, # Iceland
'IT': 27, # Italy
'JO': 30, # Jordan
'KW': 30, # Kuwait
'KZ': 20, # Kazakhstan
'LB': 28, # Lebanon
'LC': 32, # Saint Lucia
'LI': 21, # Liechtenstein
'LT': 20, # Lithuania
'LU': 20, # Luxembourg
'LV': 21, # Latvia
'MC': 27, # Monaco
'MD': 24, # Moldova
'ME': 22, # Montenegro
'MK': 19, # Macedonia
'MR': 27, # Mauritania
'MT': 31, # Malta
'MU': 30, # Mauritius
'NL': 18, # Netherlands
'NO': 15, # Norway
'PK': 24, # Pakistan
'PL': 28, # Poland
'PS': 29, # Palestine
'PT': 25, # Portugal
'QA': 29, # Qatar
'RO': 24, # Romania
'RS': 22, # Serbia
'SA': 24, # Saudi Arabia
'SC': 31, # Seychelles
'SE': 24, # Sweden
'SI': 19, # Slovenia
'SK': 24, # Slovakia
'SM': 27, # San Marino
'ST': 25, # Sao Tome and Principe
'SV': 28, # El Salvador
'TL': 23, # Timor-Leste
'TN': 24, # Tunisia
'TR': 26, # Turkey
'UA': 29, # Ukraine
'VG': 24, # British Virgin Islands
'XK': 20} # Kosovo (user-assigned country code)
# Nordea has catalogued IBANs for some additional countries but they are not part of the office IBAN network yet.
#
# Reference:
# https://www.nordea.com/V%C3%A5ra+tj%C3%A4nster/Internationella+produkter+och+tj%C3%A4nster/Cash+Management/IBAN+countries/908472.html
NORDEA_COUNTRY_CODE_LENGTH = {'AO': 25, # Angola
'BJ': 28, # Benin
'BF': 27, # Burkina Faso
'BI': 16, # Burundi
'CI': 28, # Ivory Coast
'CG': 27, # Congo
'CM': 27, # Cameroon
'CV': 25, # Cape Verde
'DZ': 24, # Algeria
'EG': 27, # Egypt
'GA': 27, # Gabon
'IR': 26, # Iran
'MG': 27, # Madagascar
'ML': 28, # Mali
'MZ': 25, # Mozambique
'SN': 28} # Senegal
class IBANValidator(object):
"""A validator for International Bank Account Numbers (IBAN - ISO 13616-1:2007)."""
def __init__(self, use_nordea_extensions=False, include_countries=None):
self.use_nordea_extensions = use_nordea_extensions
self.include_countries = include_countries
self.validation_countries = IBAN_COUNTRY_CODE_LENGTH.copy()
if self.use_nordea_extensions:
self.validation_countries.update(NORDEA_COUNTRY_CODE_LENGTH)
if self.include_countries:
for country_code in self.include_countries:
if country_code not in self.validation_countries:
msg = 'Explicitly requested country code %s is not ' \
'part of the configured IBAN validation set.' % country_code
raise Exception(msg)
def __eq__(self, other):
return self.use_nordea_extensions == other.use_nordea_extensions and \
self.include_countries == other.include_countries
@staticmethod
def iban_checksum(value):
"""
Returns check digits for an input IBAN number.
Original checksum in input value is ignored.
"""
# 1. Move the two initial characters to the end of the string, replacing checksum for '00'
value = value[4:] + value[:2] + '00'
# 2. Replace each letter in the string with two digits, thereby expanding the string, where
# A = 10, B = 11, ..., Z = 35.
value_digits = ''
for x in value:
if '0' <= x <= '9':
value_digits += x
elif 'A' <= x <= 'Z':
value_digits += str(ord(x) - 55)
else:
raise ValidationError(_('%s is not a valid character for IBAN.') % x)
# 3. The remainder of the number above when divided by 97 is then subtracted from 98.
return '%02d' % (98 - int(value_digits) % 97)
def __call__(self, value):
"""
Validates the IBAN value using the official IBAN validation algorithm.
https://en.wikipedia.org/wiki/International_Bank_Account_Number#Validating_the_IBAN
"""
if value is None:
return value
value = value.upper().replace(' ', '').replace('-', '')
# Check that the total IBAN length is correct as per the country. If not, the IBAN is invalid.
country_code = value[:2]
if country_code in self.validation_countries:
if self.validation_countries[country_code] != len(value):
msg_params = {'country_code': country_code, 'number': self.validation_countries[country_code]}
raise ValidationError(_('%(country_code)s IBANs must contain %(number)s characters.') % msg_params)
else:
raise ValidationError(_('%s is not a valid country code for IBAN.') % country_code)
if self.include_countries and country_code not in self.include_countries:
raise ValidationError(_('%s IBANs are not allowed in this field.') % country_code)
if self.iban_checksum(value) != value[2:4]:
raise ValidationError(_('Not a valid IBAN.'))
class BICValidator(object):
"""
A validator for SWIFT Business Identifier Codes (ISO 9362:2009).
Validation is based on the BIC structure found on wikipedia.
https://en.wikipedia.org/wiki/ISO_9362#Structure
"""
def __eq__(self, other):
# The is no outside modification of properties so this should always be true by default.
return True
def __call__(self, value):
if value is None:
return value
value = value.upper()
# Length is 8 or 11.
bic_length = len(value)
if bic_length != 8 and bic_length != 11:
raise ValidationError(_('BIC codes have either 8 or 11 characters.'))
# First 4 letters are A - Z.
institution_code = value[:4]
for x in institution_code:
if x not in string.ascii_uppercase:
raise ValidationError(_('%s is not a valid institution code.') % institution_code)
# Letters 5 and 6 consist of an ISO 3166-1 alpha-2 country code.
country_code = value[4:6]
if country_code not in ISO_3166_1_ALPHA2_COUNTRY_CODES:
raise ValidationError(_('%s is not a valid country code.') % country_code)
class EANValidator(object):
"""
A generic validator for EAN like codes with the last digit being the checksum.
http://en.wikipedia.org/wiki/International_Article_Number_(EAN)
"""
message = _('Not a valid EAN code.')
def __init__(self, strip_nondigits=False, message=None):
if message is not None:
self.message = message
self.strip_nondigits = strip_nondigits
def __eq__(self, other):
return (not hasattr(self, 'message') or self.message == other.message) and \
self.strip_nondigits == other.strip_nondigits
def __call__(self, value):
if value is None:
return value
if self.strip_nondigits:
value = re.compile(r'[^\d]+').sub('', value)
if not checksums.ean(value):
raise ValidationError(self.message, code='invalid')
VATIN_PATTERN_MAP = {
'AT': r'^ATU\d{8}$',
'BE': r'^BE0?\d{9}$',
'BG': r'^BG\d{9,10}$',
'HR': r'^HR\d{11}$',
'CY': r'^CY\d{8}[A-Z]$',
'CZ': r'^CZ\d{8,10}$',
'DE': r'^DE\d{9}$',
'DK': r'^DK\d{8}$',
'EE': r'^EE\d{9}$',
'EL': r'^EL\d{9}$',
'ES': r'^ES[A-Z0-9]\d{7}[A-Z0-9]$',
'FI': r'^FI\d{8}$',
'FR': r'^FR[A-HJ-NP-Z0-9][A-HJ-NP-Z0-9]\d{9}$',
'GB': r'^(GB(GD|HA)\d{3}|GB\d{9}|GB\d{12})$',
'HU': r'^HU\d{8}$',
'IE': r'^IE\d[A-Z0-9\+\*]\d{5}[A-Z]{1,2}$',
'IT': r'^IT\d{11}$',
'LT': r'^LT(\d{9}|\d{12})$',
'LU': r'^LU\d{8}$',
'LV': r'^LV\d{11}$',
'MT': r'^MT\d{8}$',
'NL': r'^NL\d{9}B\d{2}$',
'PL': r'^PL\d{10}$',
'PT': r'^PT\d{9}$',
'RO': r'^RO\d{2,10}$',
'SE': r'^SE\d{10}01$',
'SI': r'^SI\d{8}$',
'SK': r'^SK\d{10}$',
}
"""
Map of country codes and regular expressions.
See https://en.wikipedia.org/wiki/VAT_identification_number
"""
VATIN_COUNTRY_CODE_LENGTH = 2
"""
Length of the country code prefix of a VAT identification number.
Codes are two letter ISO 3166-1 alpha-2 codes except for Greece that uses
ISO 639-1.
"""
class VATINValidator:
"""
A validator for VAT identification numbers.
Currently only supports European VIES VAT identification numbers.
See See https://en.wikipedia.org/wiki/VAT_identification_number
"""
messages = {
'country_code': _('%(country_code)s is not a valid country code.'),
'vatin': _('%(vatin)s is not a valid VAT identification number.'),
}
def __call__(self, value):
country_code, number = self.clean(value)
try:
match = re.match(VATIN_PATTERN_MAP[country_code], value)
if not match:
raise ValidationError(
self.messages['vatin'],
code='vatin',
params={'vatin': value}
)
except KeyError:
raise ValidationError(
self.messages['country_code'],
code='country_code',
params={'country_code': country_code}
)
def clean(self, value):
"""Return tuple of country code and number."""
return value[:VATIN_COUNTRY_CODE_LENGTH], value[VATIN_COUNTRY_CODE_LENGTH:]
def validate_country_postcode(value, alpha2_country_code):
validator = POSTCODE_VALIDATORS.get(alpha2_country_code)
if validator:
return validator.clean(value)
return value
| 39.428571 | 135 | 0.556745 |
ace6953a72ed15a21772ca375b1da230aa62146a | 708 | py | Python | pmdarima/datasets/tests/test_load_datasets.py | Saravji/pmdarima | 7f42e36beb888d9e1e7e41b0d9c9f7419c730a3a | [
"MIT"
] | 1 | 2020-11-22T00:41:47.000Z | 2020-11-22T00:41:47.000Z | pmdarima/datasets/tests/test_load_datasets.py | Saravji/pmdarima | 7f42e36beb888d9e1e7e41b0d9c9f7419c730a3a | [
"MIT"
] | null | null | null | pmdarima/datasets/tests/test_load_datasets.py | Saravji/pmdarima | 7f42e36beb888d9e1e7e41b0d9c9f7419c730a3a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from pmdarima.datasets import load_heartrate, load_lynx, \
load_wineind, load_woolyrnq, load_austres, load_airpassengers
import numpy as np
import pandas as pd
import pytest
# Simply test loading the datasets and that we get the expected type
@pytest.mark.parametrize(
'f', [load_heartrate,
load_lynx,
load_wineind,
load_woolyrnq,
load_austres,
load_airpassengers])
def test_load(f):
for as_series in (True, False):
x = f(as_series=as_series)
if as_series:
assert isinstance(x, pd.Series)
else:
assert isinstance(x, np.ndarray)
| 23.6 | 68 | 0.662429 |
ace697215c8050c4b765b04bc0c7dfc12cdf059e | 98,810 | py | Python | src/sage/tensor/modules/finite_rank_free_module.py | saraedum/sage-renamed | d2da67b14da2ad766a5906425d60d43a3b3e1270 | [
"BSL-1.0"
] | null | null | null | src/sage/tensor/modules/finite_rank_free_module.py | saraedum/sage-renamed | d2da67b14da2ad766a5906425d60d43a3b3e1270 | [
"BSL-1.0"
] | null | null | null | src/sage/tensor/modules/finite_rank_free_module.py | saraedum/sage-renamed | d2da67b14da2ad766a5906425d60d43a3b3e1270 | [
"BSL-1.0"
] | null | null | null | r"""
Free modules of finite rank
The class :class:`FiniteRankFreeModule` implements free modules of finite rank
over a commutative ring.
A *free module of finite rank* over a commutative ring `R` is a module `M` over
`R` that admits a *finite basis*, i.e. a finite familly of linearly independent
generators. Since `R` is commutative, it has the invariant basis number
property, so that the rank of the free module `M` is defined uniquely, as the
cardinality of any basis of `M`.
No distinguished basis of `M` is assumed. On the contrary, many bases can be
introduced on the free module along with change-of-basis rules (as module
automorphisms). Each
module element has then various representations over the various bases.
.. NOTE::
The class :class:`FiniteRankFreeModule` does not inherit from
class :class:`~sage.modules.free_module.FreeModule_generic`
nor from class
:class:`~sage.combinat.free_module.CombinatorialFreeModule`, since
both classes deal with modules with a *distinguished basis* (see
details :ref:`below <diff-FreeModule>`). Accordingly, the class
:class:`FiniteRankFreeModule` inherits directly from the generic class
:class:`~sage.structure.parent.Parent` with the category set to
:class:`~sage.categories.modules.Modules` (and not to
:class:`~sage.categories.modules_with_basis.ModulesWithBasis`).
.. TODO::
- implement submodules
- create a FreeModules category (cf. the *TODO* statement in the
documentation of :class:`~sage.categories.modules.Modules`: *Implement
a ``FreeModules(R)`` category, when so prompted by a concrete use case*)
AUTHORS:
- Eric Gourgoulhon, Michal Bejger (2014-2015): initial version
- Travis Scrimshaw (2016): category set to Modules(ring).FiniteDimensional()
(:trac:`20770`)
REFERENCES:
- Chap. 10 of R. Godement : *Algebra* [God1968]_
- Chap. 3 of S. Lang : *Algebra* [Lan2002]_
EXAMPLES:
Let us define a free module of rank 2 over `\ZZ`::
sage: M = FiniteRankFreeModule(ZZ, 2, name='M') ; M
Rank-2 free module M over the Integer Ring
sage: M.category()
Category of finite dimensional modules over Integer Ring
We introduce a first basis on ``M``::
sage: e = M.basis('e') ; e
Basis (e_0,e_1) on the Rank-2 free module M over the Integer Ring
The elements of the basis are of course module elements::
sage: e[0]
Element e_0 of the Rank-2 free module M over the Integer Ring
sage: e[1]
Element e_1 of the Rank-2 free module M over the Integer Ring
sage: e[0].parent()
Rank-2 free module M over the Integer Ring
We define a module element by its components w.r.t. basis ``e``::
sage: u = M([2,-3], basis=e, name='u')
sage: u.display(e)
u = 2 e_0 - 3 e_1
Module elements can be also be created by arithmetic expressions::
sage: v = -2*u + 4*e[0] ; v
Element of the Rank-2 free module M over the Integer Ring
sage: v.display(e)
6 e_1
sage: u == 2*e[0] - 3*e[1]
True
We define a second basis on ``M`` from a family of linearly independent
elements::
sage: f = M.basis('f', from_family=(e[0]-e[1], -2*e[0]+3*e[1])) ; f
Basis (f_0,f_1) on the Rank-2 free module M over the Integer Ring
sage: f[0].display(e)
f_0 = e_0 - e_1
sage: f[1].display(e)
f_1 = -2 e_0 + 3 e_1
We may of course express the elements of basis ``e`` in terms of basis ``f``::
sage: e[0].display(f)
e_0 = 3 f_0 + f_1
sage: e[1].display(f)
e_1 = 2 f_0 + f_1
as well as any module element::
sage: u.display(f)
u = -f_1
sage: v.display(f)
12 f_0 + 6 f_1
The two bases are related by a module automorphism::
sage: a = M.change_of_basis(e,f) ; a
Automorphism of the Rank-2 free module M over the Integer Ring
sage: a.parent()
General linear group of the Rank-2 free module M over the Integer Ring
sage: a.matrix(e)
[ 1 -2]
[-1 3]
Let us check that basis ``f`` is indeed the image of basis ``e`` by ``a``::
sage: f[0] == a(e[0])
True
sage: f[1] == a(e[1])
True
The reverse change of basis is of course the inverse automorphism::
sage: M.change_of_basis(f,e) == a^(-1)
True
We introduce a new module element via its components w.r.t. basis ``f``::
sage: v = M([2,4], basis=f, name='v')
sage: v.display(f)
v = 2 f_0 + 4 f_1
The sum of the two module elements ``u`` and ``v`` can be performed even if
they have been defined on different bases, thanks to the known relation
between the two bases::
sage: s = u + v ; s
Element u+v of the Rank-2 free module M over the Integer Ring
We can display the result in either basis::
sage: s.display(e)
u+v = -4 e_0 + 7 e_1
sage: s.display(f)
u+v = 2 f_0 + 3 f_1
Tensor products of elements are implemented::
sage: t = u*v ; t
Type-(2,0) tensor u*v on the Rank-2 free module M over the Integer Ring
sage: t.parent()
Free module of type-(2,0) tensors on the
Rank-2 free module M over the Integer Ring
sage: t.display(e)
u*v = -12 e_0*e_0 + 20 e_0*e_1 + 18 e_1*e_0 - 30 e_1*e_1
sage: t.display(f)
u*v = -2 f_1*f_0 - 4 f_1*f_1
We can access to tensor components w.r.t. to a given basis via the square
bracket operator::
sage: t[e,0,1]
20
sage: t[f,1,0]
-2
sage: u[e,0]
2
sage: u[e,:]
[2, -3]
sage: u[f,:]
[0, -1]
The parent of the automorphism ``a`` is the group `\mathrm{GL}(M)`, but
``a`` can also be considered as a tensor of type `(1,1)` on ``M``::
sage: a.parent()
General linear group of the Rank-2 free module M over the Integer Ring
sage: a.tensor_type()
(1, 1)
sage: a.display(e)
e_0*e^0 - 2 e_0*e^1 - e_1*e^0 + 3 e_1*e^1
sage: a.display(f)
f_0*f^0 - 2 f_0*f^1 - f_1*f^0 + 3 f_1*f^1
As such, we can form its tensor product with ``t``, yielding a tensor of
type `(3,1)`::
sage: t*a
Type-(3,1) tensor on the Rank-2 free module M over the Integer Ring
sage: (t*a).display(e)
-12 e_0*e_0*e_0*e^0 + 24 e_0*e_0*e_0*e^1 + 12 e_0*e_0*e_1*e^0
- 36 e_0*e_0*e_1*e^1 + 20 e_0*e_1*e_0*e^0 - 40 e_0*e_1*e_0*e^1
- 20 e_0*e_1*e_1*e^0 + 60 e_0*e_1*e_1*e^1 + 18 e_1*e_0*e_0*e^0
- 36 e_1*e_0*e_0*e^1 - 18 e_1*e_0*e_1*e^0 + 54 e_1*e_0*e_1*e^1
- 30 e_1*e_1*e_0*e^0 + 60 e_1*e_1*e_0*e^1 + 30 e_1*e_1*e_1*e^0
- 90 e_1*e_1*e_1*e^1
The parent of `t\otimes a` is itself a free module of finite rank over `\ZZ`::
sage: T = (t*a).parent() ; T
Free module of type-(3,1) tensors on the Rank-2 free module M over the
Integer Ring
sage: T.base_ring()
Integer Ring
sage: T.rank()
16
.. _diff-FreeModule:
.. RUBRIC:: Differences between ``FiniteRankFreeModule`` and ``FreeModule``
(or ``VectorSpace``)
To illustrate the differences, let us create two free modules of rank 3 over
`\ZZ`, one with ``FiniteRankFreeModule`` and the other one with
``FreeModule``::
sage: M = FiniteRankFreeModule(ZZ, 3, name='M') ; M
Rank-3 free module M over the Integer Ring
sage: N = FreeModule(ZZ, 3) ; N
Ambient free module of rank 3 over the principal ideal domain Integer Ring
The main difference is that ``FreeModule`` returns a free module with a
distinguished basis, while ``FiniteRankFreeModule`` does not::
sage: N.basis()
[
(1, 0, 0),
(0, 1, 0),
(0, 0, 1)
]
sage: M.bases()
[]
sage: M.print_bases()
No basis has been defined on the Rank-3 free module M over the Integer Ring
This is also revealed by the category of each module::
sage: M.category()
Category of finite dimensional modules over Integer Ring
sage: N.category()
Category of finite dimensional modules with basis over
(euclidean domains and infinite enumerated sets and metric spaces)
In other words, the module created by ``FreeModule`` is actually `\ZZ^3`,
while, in the absence of any distinguished basis, no *canonical* isomorphism
relates the module created by ``FiniteRankFreeModule`` to `\ZZ^3`::
sage: N is ZZ^3
True
sage: M is ZZ^3
False
sage: M == ZZ^3
False
Because it is `\ZZ^3`, ``N`` is unique, while there may be various modules
of the same rank over the same ring created by ``FiniteRankFreeModule``;
they are then distinguished by their names (actually by the complete
sequence of arguments of ``FiniteRankFreeModule``)::
sage: N1 = FreeModule(ZZ, 3) ; N1
Ambient free module of rank 3 over the principal ideal domain Integer Ring
sage: N1 is N # FreeModule(ZZ, 3) is unique
True
sage: M1 = FiniteRankFreeModule(ZZ, 3, name='M_1') ; M1
Rank-3 free module M_1 over the Integer Ring
sage: M1 is M # M1 and M are different rank-3 modules over ZZ
False
sage: M1b = FiniteRankFreeModule(ZZ, 3, name='M_1') ; M1b
Rank-3 free module M_1 over the Integer Ring
sage: M1b is M1 # because M1b and M1 have the same name
True
As illustrated above, various bases can be introduced on the module created by
``FiniteRankFreeModule``::
sage: e = M.basis('e') ; e
Basis (e_0,e_1,e_2) on the Rank-3 free module M over the Integer Ring
sage: f = M.basis('f', from_family=(-e[0], e[1]-e[2], -2*e[1]+3*e[2])) ; f
Basis (f_0,f_1,f_2) on the Rank-3 free module M over the Integer Ring
sage: M.bases()
[Basis (e_0,e_1,e_2) on the Rank-3 free module M over the Integer Ring,
Basis (f_0,f_1,f_2) on the Rank-3 free module M over the Integer Ring]
Each element of a basis is accessible via its index::
sage: e[0]
Element e_0 of the Rank-3 free module M over the Integer Ring
sage: e[0].parent()
Rank-3 free module M over the Integer Ring
sage: f[1]
Element f_1 of the Rank-3 free module M over the Integer Ring
sage: f[1].parent()
Rank-3 free module M over the Integer Ring
while on module ``N``, the element of the (unique) basis is accessible
directly from the module symbol::
sage: N.0
(1, 0, 0)
sage: N.1
(0, 1, 0)
sage: N.0.parent()
Ambient free module of rank 3 over the principal ideal domain Integer Ring
The arithmetic of elements is similar; the difference lies in the display:
a basis has to be specified for elements of ``M``, while elements of ``N`` are
displayed directly as elements of `\ZZ^3`::
sage: u = 2*e[0] - 3*e[2] ; u
Element of the Rank-3 free module M over the Integer Ring
sage: u.display(e)
2 e_0 - 3 e_2
sage: u.display(f)
-2 f_0 - 6 f_1 - 3 f_2
sage: u[e,:]
[2, 0, -3]
sage: u[f,:]
[-2, -6, -3]
sage: v = 2*N.0 - 3*N.2 ; v
(2, 0, -3)
For the case of ``M``, in order to avoid to specify the basis if the user is
always working with the same basis (e.g. only one basis has been defined),
the concept of *default basis* has been introduced::
sage: M.default_basis()
Basis (e_0,e_1,e_2) on the Rank-3 free module M over the Integer Ring
sage: M.print_bases()
Bases defined on the Rank-3 free module M over the Integer Ring:
- (e_0,e_1,e_2) (default basis)
- (f_0,f_1,f_2)
This is different from the *distinguished basis* of ``N``: it simply means that
the mention of the basis can be omitted in function arguments::
sage: u.display() # equivalent to u.display(e)
2 e_0 - 3 e_2
sage: u[:] # equivalent to u[e,:]
[2, 0, -3]
At any time, the default basis can be changed::
sage: M.set_default_basis(f)
sage: u.display()
-2 f_0 - 6 f_1 - 3 f_2
Another difference between ``FiniteRankFreeModule`` and ``FreeModule`` is that
for the former the range of indices can be specified (by default, it starts
from 0)::
sage: M = FiniteRankFreeModule(ZZ, 3, name='M', start_index=1) ; M
Rank-3 free module M over the Integer Ring
sage: e = M.basis('e') ; e # compare with (e_0,e_1,e_2) above
Basis (e_1,e_2,e_3) on the Rank-3 free module M over the Integer Ring
sage: e[1], e[2], e[3]
(Element e_1 of the Rank-3 free module M over the Integer Ring,
Element e_2 of the Rank-3 free module M over the Integer Ring,
Element e_3 of the Rank-3 free module M over the Integer Ring)
All the above holds for ``VectorSpace`` instead of ``FreeModule``: the object
created by ``VectorSpace`` is actually a Cartesian power of the base field::
sage: V = VectorSpace(QQ,3) ; V
Vector space of dimension 3 over Rational Field
sage: V.category()
Category of finite dimensional vector spaces with basis
over (number fields and quotient fields and metric spaces)
sage: V is QQ^3
True
sage: V.basis()
[
(1, 0, 0),
(0, 1, 0),
(0, 0, 1)
]
To create a vector space without any distinguished basis, one has to use
``FiniteRankFreeModule``::
sage: V = FiniteRankFreeModule(QQ, 3, name='V') ; V
3-dimensional vector space V over the Rational Field
sage: V.category()
Category of finite dimensional vector spaces over Rational Field
sage: V.bases()
[]
sage: V.print_bases()
No basis has been defined on the 3-dimensional vector space V over the
Rational Field
The class :class:`FiniteRankFreeModule` has been created for the needs
of the `SageManifolds project <http://sagemanifolds.obspm.fr/>`_, where
free modules do not have any distinguished basis. Too kinds of free modules
occur in the context of differentiable manifolds (see
`here <http://sagemanifolds.obspm.fr/tensor_modules.html>`_ for more
details):
- the tangent vector space at any point of the manifold (cf.
:class:`~sage.manifolds.differentiable.tangent_space.TangentSpace`);
- the set of vector fields on a parallelizable open subset `U` of the manifold,
which is a free module over the algebra of scalar fields on `U` (cf.
:class:`~sage.manifolds.differentiable.vectorfield_module.VectorFieldFreeModule`).
For instance, without any specific coordinate choice, no basis can be
distinguished in a tangent space.
On the other side, the modules created by ``FreeModule`` have much more
algebraic functionalities than those created by ``FiniteRankFreeModule``. In
particular, submodules have not been implemented yet in
:class:`FiniteRankFreeModule`. Moreover, modules resulting from ``FreeModule``
are tailored to the specific kind of their base ring:
- free module over a commutative ring that is not an integral domain
(`\ZZ/6\ZZ`)::
sage: R = IntegerModRing(6) ; R
Ring of integers modulo 6
sage: FreeModule(R, 3)
Ambient free module of rank 3 over Ring of integers modulo 6
sage: type(FreeModule(R, 3))
<class 'sage.modules.free_module.FreeModule_ambient_with_category'>
- free module over an integral domain that is not principal (`\ZZ[X]`)::
sage: R.<X> = ZZ[] ; R
Univariate Polynomial Ring in X over Integer Ring
sage: FreeModule(R, 3)
Ambient free module of rank 3 over the integral domain Univariate
Polynomial Ring in X over Integer Ring
sage: type(FreeModule(R, 3))
<class 'sage.modules.free_module.FreeModule_ambient_domain_with_category'>
- free module over a principal ideal domain (`\ZZ`)::
sage: R = ZZ ; R
Integer Ring
sage: FreeModule(R,3)
Ambient free module of rank 3 over the principal ideal domain Integer Ring
sage: type(FreeModule(R, 3))
<class 'sage.modules.free_module.FreeModule_ambient_pid_with_category'>
On the contrary, all objects constructed with ``FiniteRankFreeModule`` belong
to the same class::
sage: R = IntegerModRing(6)
sage: type(FiniteRankFreeModule(R, 3))
<class 'sage.tensor.modules.finite_rank_free_module.FiniteRankFreeModule_with_category'>
sage: R.<X> = ZZ[]
sage: type(FiniteRankFreeModule(R, 3))
<class 'sage.tensor.modules.finite_rank_free_module.FiniteRankFreeModule_with_category'>
sage: R = ZZ
sage: type(FiniteRankFreeModule(R, 3))
<class 'sage.tensor.modules.finite_rank_free_module.FiniteRankFreeModule_with_category'>
.. RUBRIC:: Differences between ``FiniteRankFreeModule`` and
``CombinatorialFreeModule``
An alternative to construct free modules in Sage is
:class:`~sage.combinat.free_module.CombinatorialFreeModule`.
However, as ``FreeModule``, it leads to a module with a distinguished basis::
sage: N = CombinatorialFreeModule(ZZ, [1,2,3]) ; N
Free module generated by {1, 2, 3} over Integer Ring
sage: N.category()
Category of finite dimensional modules with basis over Integer Ring
The distinguished basis is returned by the method ``basis()``::
sage: b = N.basis() ; b
Finite family {1: B[1], 2: B[2], 3: B[3]}
sage: b[1]
B[1]
sage: b[1].parent()
Free module generated by {1, 2, 3} over Integer Ring
For the free module ``M`` created above with ``FiniteRankFreeModule``, the
method ``basis`` has at least one argument: the symbol string that
specifies which basis is required::
sage: e = M.basis('e') ; e
Basis (e_1,e_2,e_3) on the Rank-3 free module M over the Integer Ring
sage: e[1]
Element e_1 of the Rank-3 free module M over the Integer Ring
sage: e[1].parent()
Rank-3 free module M over the Integer Ring
The arithmetic of elements is similar::
sage: u = 2*e[1] - 5*e[3] ; u
Element of the Rank-3 free module M over the Integer Ring
sage: v = 2*b[1] - 5*b[3] ; v
2*B[1] - 5*B[3]
One notices that elements of ``N`` are displayed directly in terms of their
expansions on the distinguished basis. For elements of ``M``, one has to use
the method
:meth:`~sage.tensor.modules.free_module_tensor.FreeModuleTensor.display`
in order to specify the basis::
sage: u.display(e)
2 e_1 - 5 e_3
The components on the basis are returned by the square bracket operator for
``M`` and by the method ``coefficient`` for ``N``::
sage: [u[e,i] for i in {1,2,3}]
[2, 0, -5]
sage: u[e,:] # a shortcut for the above
[2, 0, -5]
sage: [v.coefficient(i) for i in {1,2,3}]
[2, 0, -5]
"""
#******************************************************************************
# Copyright (C) 2015 Eric Gourgoulhon <eric.gourgoulhon@obspm.fr>
# Copyright (C) 2015 Michal Bejger <bejger@camk.edu.pl>
# Copyright (C) 2016 Travis Scrimshaw <tscrimsh@umn.edu>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#******************************************************************************
from __future__ import print_function
from __future__ import absolute_import
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.parent import Parent
from sage.categories.modules import Modules
from sage.categories.rings import Rings
from sage.categories.fields import Fields
from sage.rings.integer import Integer
from sage.tensor.modules.free_module_element import FiniteRankFreeModuleElement
class FiniteRankFreeModule(UniqueRepresentation, Parent):
r"""
Free module of finite rank over a commutative ring.
A *free module of finite rank* over a commutative ring `R` is a module `M`
over `R` that admits a *finite basis*, i.e. a finite familly of linearly
independent generators. Since `R` is commutative, it has the invariant
basis number property, so that the rank of the free module `M` is defined
uniquely, as the cardinality of any basis of `M`.
No distinguished basis of `M` is assumed. On the contrary, many bases can be
introduced on the free module along with change-of-basis rules (as module
automorphisms). Each
module element has then various representations over the various bases.
.. NOTE::
The class :class:`FiniteRankFreeModule` does not inherit from
class :class:`~sage.modules.free_module.FreeModule_generic`
nor from class
:class:`~sage.combinat.free_module.CombinatorialFreeModule`, since
both classes deal with modules with a *distinguished basis* (see
details :ref:`above <diff-FreeModule>`).
Moreover, following the recommendation exposed in :trac:`16427`
the class :class:`FiniteRankFreeModule` inherits directly from
:class:`~sage.structure.parent.Parent` (with the category set to
:class:`~sage.categories.modules.Modules`) and not from the Cython
class :class:`~sage.modules.module.Module`.
The class :class:`FiniteRankFreeModule` is a Sage *parent* class,
the corresponding *element* class being
:class:`~sage.tensor.modules.free_module_element.FiniteRankFreeModuleElement`.
INPUT:
- ``ring`` -- commutative ring `R` over which the free module is
constructed
- ``rank`` -- positive integer; rank of the free module
- ``name`` -- (default: ``None``) string; name given to the free module
- ``latex_name`` -- (default: ``None``) string; LaTeX symbol to denote
the freemodule; if none is provided, it is set to ``name``
- ``start_index`` -- (default: 0) integer; lower bound of the range of
indices in bases defined on the free module
- ``output_formatter`` -- (default: ``None``) function or unbound
method called to format the output of the tensor components;
``output_formatter`` must take 1 or 2 arguments: the first argument
must be an element of the ring `R` and the second one, if any, some
format specification
EXAMPLES:
Free module of rank 3 over `\ZZ`::
sage: FiniteRankFreeModule._clear_cache_() # for doctests only
sage: M = FiniteRankFreeModule(ZZ, 3) ; M
Rank-3 free module over the Integer Ring
sage: M = FiniteRankFreeModule(ZZ, 3, name='M') ; M # declaration with a name
Rank-3 free module M over the Integer Ring
sage: M.category()
Category of finite dimensional modules over Integer Ring
sage: M.base_ring()
Integer Ring
sage: M.rank()
3
If the base ring is a field, the free module is in the category of vector
spaces::
sage: V = FiniteRankFreeModule(QQ, 3, name='V') ; V
3-dimensional vector space V over the Rational Field
sage: V.category()
Category of finite dimensional vector spaces over Rational Field
The LaTeX output is adjusted via the parameter ``latex_name``::
sage: latex(M) # the default is the symbol provided in the string ``name``
M
sage: M = FiniteRankFreeModule(ZZ, 3, name='M', latex_name=r'\mathcal{M}')
sage: latex(M)
\mathcal{M}
The free module M has no distinguished basis::
sage: M in ModulesWithBasis(ZZ)
False
sage: M in Modules(ZZ)
True
In particular, no basis is initialized at the module construction::
sage: M.print_bases()
No basis has been defined on the Rank-3 free module M over the Integer Ring
sage: M.bases()
[]
Bases have to be introduced by means of the method :meth:`basis`,
the first defined basis being considered as the *default basis*, meaning
it can be skipped in function arguments required a basis (this can
be changed by means of the method :meth:`set_default_basis`)::
sage: e = M.basis('e') ; e
Basis (e_0,e_1,e_2) on the Rank-3 free module M over the Integer Ring
sage: M.default_basis()
Basis (e_0,e_1,e_2) on the Rank-3 free module M over the Integer Ring
A second basis can be created from a family of linearly independent
elements expressed in terms of basis ``e``::
sage: f = M.basis('f', from_family=(-e[0], e[1]+e[2], 2*e[1]+3*e[2]))
sage: f
Basis (f_0,f_1,f_2) on the Rank-3 free module M over the Integer Ring
sage: M.print_bases()
Bases defined on the Rank-3 free module M over the Integer Ring:
- (e_0,e_1,e_2) (default basis)
- (f_0,f_1,f_2)
sage: M.bases()
[Basis (e_0,e_1,e_2) on the Rank-3 free module M over the Integer Ring,
Basis (f_0,f_1,f_2) on the Rank-3 free module M over the Integer Ring]
M is a *parent* object, whose elements are instances of
:class:`~sage.tensor.modules.free_module_element.FiniteRankFreeModuleElement`
(actually a dynamically generated subclass of it)::
sage: v = M.an_element() ; v
Element of the Rank-3 free module M over the Integer Ring
sage: from sage.tensor.modules.free_module_element import FiniteRankFreeModuleElement
sage: isinstance(v, FiniteRankFreeModuleElement)
True
sage: v in M
True
sage: M.is_parent_of(v)
True
sage: v.display() # expansion w.r.t. the default basis (e)
e_0 + e_1 + e_2
sage: v.display(f)
-f_0 + f_1
The test suite of the category of modules is passed::
sage: TestSuite(M).run()
Constructing an element of ``M`` from (the integer) 0 yields
the zero element of ``M``::
sage: M(0)
Element zero of the Rank-3 free module M over the Integer Ring
sage: M(0) is M.zero()
True
Non-zero elements are constructed by providing their components in
a given basis::
sage: v = M([-1,0,3]) ; v # components in the default basis (e)
Element of the Rank-3 free module M over the Integer Ring
sage: v.display() # expansion w.r.t. the default basis (e)
-e_0 + 3 e_2
sage: v.display(f)
f_0 - 6 f_1 + 3 f_2
sage: v = M([-1,0,3], basis=f) ; v # components in a specific basis
Element of the Rank-3 free module M over the Integer Ring
sage: v.display(f)
-f_0 + 3 f_2
sage: v.display()
e_0 + 6 e_1 + 9 e_2
sage: v = M([-1,0,3], basis=f, name='v') ; v
Element v of the Rank-3 free module M over the Integer Ring
sage: v.display(f)
v = -f_0 + 3 f_2
sage: v.display()
v = e_0 + 6 e_1 + 9 e_2
An alternative is to construct the element from an empty list of
componentsand to set the nonzero components afterwards::
sage: v = M([], name='v')
sage: v[e,0] = -1
sage: v[e,2] = 3
sage: v.display(e)
v = -e_0 + 3 e_2
Indices on the free module, such as indices labelling the element of a
basis, are provided by the generator method :meth:`irange`. By default,
they range from 0 to the module's rank minus one::
sage: list(M.irange())
[0, 1, 2]
This can be changed via the parameter ``start_index`` in the module
construction::
sage: M1 = FiniteRankFreeModule(ZZ, 3, name='M', start_index=1)
sage: list(M1.irange())
[1, 2, 3]
The parameter ``output_formatter`` in the constructor of the free module
is used to set the output format of tensor components::
sage: N = FiniteRankFreeModule(QQ, 3, output_formatter=Rational.numerical_approx)
sage: e = N.basis('e')
sage: v = N([1/3, 0, -2], basis=e)
sage: v[e,:]
[0.333333333333333, 0.000000000000000, -2.00000000000000]
sage: v.display(e) # default format (53 bits of precision)
0.333333333333333 e_0 - 2.00000000000000 e_2
sage: v.display(e, format_spec=10) # 10 bits of precision
0.33 e_0 - 2.0 e_2
"""
Element = FiniteRankFreeModuleElement
def __init__(self, ring, rank, name=None, latex_name=None, start_index=0,
output_formatter=None, category=None):
r"""
See :class:`FiniteRankFreeModule` for documentation and examples.
TESTS::
sage: M = FiniteRankFreeModule(ZZ, 3, name='M')
sage: TestSuite(M).run()
sage: e = M.basis('e')
sage: TestSuite(M).run()
sage: f = M.basis('f')
sage: TestSuite(M).run()
"""
if ring not in Rings().Commutative():
raise TypeError("the module base ring must be commutative")
category = Modules(ring).FiniteDimensional().or_subcategory(category)
Parent.__init__(self, base=ring, category=category)
self._ring = ring # same as self._base
self._rank = rank
self._name = name
if latex_name is None:
self._latex_name = self._name
else:
self._latex_name = latex_name
self._sindex = start_index
self._output_formatter = output_formatter
# Dictionary of the tensor modules built on self
# (keys = (k,l) --the tensor type)
# This dictionary is to be extended on need by the method tensor_module
self._tensor_modules = {(1,0): self} # self is considered as the set
# of tensors of type (1,0)
# Dictionaries of exterior powers of self and of its dual
# (keys = p --the power degree)
# These dictionaries are to be extended on need by the methods
# exterior_power and dual_exterior_power
self._exterior_powers = {1: self}
self._dual_exterior_powers = {}
# List of known bases on the free module:
self._known_bases = []
self._def_basis = None # default basis
self._basis_changes = {} # Dictionary of the changes of bases
# Zero element:
if not hasattr(self, '_zero_element'):
self._zero_element = self._element_constructor_(name='zero',
latex_name='0')
# Identity automorphism:
self._identity_map = None # to be set by self.identity_map()
# General linear group:
self._general_linear_group = None # to be set by
# self.general_linear_group()
#### Parent methods
def _element_constructor_(self, comp=[], basis=None, name=None,
latex_name=None):
r"""
Construct an element of ``self``.
EXAMPLES::
sage: FiniteRankFreeModule._clear_cache_() # for doctests only
sage: M = FiniteRankFreeModule(ZZ, 3, name='M')
sage: e = M.basis('e')
sage: v = M._element_constructor_(comp=[1,0,-2], basis=e, name='v') ; v
Element v of the Rank-3 free module M over the Integer Ring
sage: v.display()
v = e_0 - 2 e_2
sage: v == M([1,0,-2])
True
sage: v = M._element_constructor_(0) ; v
Element zero of the Rank-3 free module M over the Integer Ring
sage: v = M._element_constructor_() ; v
Element of the Rank-3 free module M over the Integer Ring
"""
if isinstance(comp, (int, Integer)) and comp == 0:
return self._zero_element
resu = self.element_class(self, name=name, latex_name=latex_name)
if comp:
resu.set_comp(basis)[:] = comp
return resu
def _an_element_(self):
r"""
Construct some (unamed) element of ``self``.
EXAMPLES::
sage: FiniteRankFreeModule._clear_cache_() # for doctests only
sage: M = FiniteRankFreeModule(ZZ, 3, name='M')
sage: v = M._an_element_(); v
Element of the Rank-3 free module M over the Integer Ring
sage: v.display()
e_0 + e_1 + e_2
sage: v == M.an_element()
True
sage: v.parent()
Rank-3 free module M over the Integer Ring
"""
if self._def_basis is None:
self.basis('e')
resu = self.element_class(self)
resu.set_comp()[:] = [self._ring.an_element() for i in range(self._rank)]
return resu
#### End of parent methods
#### Methods to be redefined by derived classes ####
def _repr_(self):
r"""
Return a string representation of ``self``.
EXAMPLES::
sage: FiniteRankFreeModule(ZZ, 3, name='M')
Rank-3 free module M over the Integer Ring
"""
if self._ring in Fields():
description = "{}-dimensional vector space ".format(self._rank)
else:
description = "Rank-{} free module ".format(self._rank)
if self._name is not None:
description += self._name + " "
description += "over the {}".format(self._ring)
return description
def _Hom_(self, other, category=None):
r"""
Construct the set of homomorphisms ``self`` --> ``other``.
INPUT:
- ``other`` -- another free module of finite rank over the same ring
as ``self``
- ``category`` -- (default: ``None``) not used here (to ensure
compatibility with generic hook ``_Hom_``)
OUTPUT:
- the hom-set Hom(M,N), where M is ``self`` and N is ``other``
EXAMPLES::
sage: M = FiniteRankFreeModule(ZZ, 3, name='M')
sage: N = FiniteRankFreeModule(ZZ, 2, name='N')
sage: H = M._Hom_(N) ; H
Set of Morphisms from Rank-3 free module M over the Integer Ring
to Rank-2 free module N over the Integer Ring
in Category of finite dimensional modules over Integer Ring
sage: H = Hom(M,N) ; H # indirect doctest
Set of Morphisms from Rank-3 free module M over the Integer Ring
to Rank-2 free module N over the Integer Ring
in Category of finite dimensional modules over Integer Ring
"""
from .free_module_homset import FreeModuleHomset
return FreeModuleHomset(self, other)
def tensor_module(self, k, l):
r"""
Return the free module of all tensors of type `(k, l)` defined on
``self``.
INPUT:
- ``k`` -- non-negative integer; the contravariant rank, the tensor
type being `(k, l)`
- ``l`` -- non-negative integer; the covariant rank, the tensor type
being `(k, l)`
OUTPUT:
- instance of
:class:`~sage.tensor.modules.tensor_free_module.TensorFreeModule`
representing the free module
`T^{(k,l)}(M)` of type-`(k,l)` tensors on the free module ``self``
EXAMPLES:
Tensor modules over a free module over `\ZZ`::
sage: M = FiniteRankFreeModule(ZZ, 3, name='M')
sage: T = M.tensor_module(1,2) ; T
Free module of type-(1,2) tensors on the Rank-3 free module M
over the Integer Ring
sage: T.an_element()
Type-(1,2) tensor on the Rank-3 free module M over the Integer Ring
Tensor modules are unique::
sage: M.tensor_module(1,2) is T
True
The base module is itself the module of all type-`(1,0)` tensors::
sage: M.tensor_module(1,0) is M
True
See :class:`~sage.tensor.modules.tensor_free_module.TensorFreeModule`
for more documentation.
"""
from sage.tensor.modules.tensor_free_module import TensorFreeModule
if (k,l) not in self._tensor_modules:
self._tensor_modules[(k,l)] = TensorFreeModule(self, (k,l))
return self._tensor_modules[(k,l)]
def exterior_power(self, p):
r"""
Return the `p`-th exterior power of ``self``.
If `M` stands for the free module ``self``, the *p-th exterior
power of* `M` is the set `\Lambda^p(M)` of all *alternating
contravariant tensors* of rank `p`, i.e. of all multilinear maps
.. MATH::
\underbrace{M^*\times\cdots\times M^*}_{p\ \; \mbox{times}}
\longrightarrow R
that vanish whenever any of two of their arguments are equal.
`\Lambda^p(M)` is a free module of rank `\binom{n}{p}`
over the same ring as `M`, where `n` is the rank of `M`.
INPUT:
- ``p`` -- non-negative integer
OUTPUT:
- for `p=0`, the base ring `R`
- for `p=1`, the free module `M`, since `\Lambda^1(M)=M`
- for `p\geq 2`, instance of
:class:`~sage.tensor.modules.ext_pow_free_module.ExtPowerFreeModule`
representing the free module `\Lambda^p(M)`
EXAMPLES:
Exterior powers of the dual of a free `\ZZ`-module of rank 3::
sage: M = FiniteRankFreeModule(ZZ, 3, name='M')
sage: e = M.basis('e')
sage: M.exterior_power(0) # return the base ring
Integer Ring
sage: M.exterior_power(1) # return the module itself
Rank-3 free module M over the Integer Ring
sage: M.exterior_power(1) is M
True
sage: M.exterior_power(2)
2nd exterior power of the Rank-3 free module M over the Integer Ring
sage: M.exterior_power(2).an_element()
Alternating contravariant tensor of degree 2 on the Rank-3
free module M over the Integer Ring
sage: M.exterior_power(2).an_element().display()
e_0/\e_1
sage: M.exterior_power(3)
3rd exterior power of the Rank-3 free module M over the Integer Ring
sage: M.exterior_power(3).an_element()
Alternating contravariant tensor of degree 3 on the Rank-3
free module M over the Integer Ring
sage: M.exterior_power(3).an_element().display()
e_0/\e_1/\e_2
See
:class:`~sage.tensor.modules.ext_pow_free_module.ExtPowerFreeModule`
for more documentation.
"""
from sage.tensor.modules.ext_pow_free_module import ExtPowerFreeModule
if p == 0:
return self._ring
if p not in self._exterior_powers:
self._exterior_powers[p] = ExtPowerFreeModule(self, p)
return self._exterior_powers[p]
def dual_exterior_power(self, p):
r"""
Return the `p`-th exterior power of the dual of ``self``.
If `M` stands for the free module ``self``, the *p-th exterior
power of the dual of* `M` is the set `\Lambda^p(M^*)` of all
*alternating forms of degree* `p` on `M`, i.e. of all
multilinear maps
.. MATH::
\underbrace{M\times\cdots\times M}_{p\ \; \mbox{times}}
\longrightarrow R
that vanish whenever any of two of their arguments are equal.
`\Lambda^p(M^*)` is a free module of rank `\binom{n}{p}`
over the same ring as `M`, where `n` is the rank of `M`.
INPUT:
- ``p`` -- non-negative integer
OUTPUT:
- for `p=0`, the base ring `R`
- for `p\geq 1`, instance of
:class:`~sage.tensor.modules.ext_pow_free_module.ExtPowerDualFreeModule`
representing the free module `\Lambda^p(M^*)`
EXAMPLES:
Exterior powers of the dual of a free `\ZZ`-module of rank 3::
sage: M = FiniteRankFreeModule(ZZ, 3, name='M')
sage: e = M.basis('e')
sage: M.dual_exterior_power(0) # return the base ring
Integer Ring
sage: M.dual_exterior_power(1) # return the dual module
Dual of the Rank-3 free module M over the Integer Ring
sage: M.dual_exterior_power(1) is M.dual()
True
sage: M.dual_exterior_power(2)
2nd exterior power of the dual of the Rank-3 free module M over the Integer Ring
sage: M.dual_exterior_power(2).an_element()
Alternating form of degree 2 on the Rank-3 free module M over the Integer Ring
sage: M.dual_exterior_power(2).an_element().display()
e^0/\e^1
sage: M.dual_exterior_power(3)
3rd exterior power of the dual of the Rank-3 free module M over the Integer Ring
sage: M.dual_exterior_power(3).an_element()
Alternating form of degree 3 on the Rank-3 free module M over the Integer Ring
sage: M.dual_exterior_power(3).an_element().display()
e^0/\e^1/\e^2
See
:class:`~sage.tensor.modules.ext_pow_free_module.ExtPowerDualFreeModule`
for more documentation.
"""
from sage.tensor.modules.ext_pow_free_module import ExtPowerDualFreeModule
if p == 0:
return self._ring
if p not in self._dual_exterior_powers:
self._dual_exterior_powers[p] = ExtPowerDualFreeModule(self, p)
return self._dual_exterior_powers[p]
def general_linear_group(self):
r"""
Return the general linear group of ``self``.
If ``self`` is the free module `M`, the *general linear group* is the
group `\mathrm{GL}(M)` of automorphisms of `M`.
OUTPUT:
- instance of class
:class:`~sage.tensor.modules.free_module_linear_group.FreeModuleLinearGroup`
representing `\mathrm{GL}(M)`
EXAMPLES:
The general linear group of a rank-3 free module::
sage: M = FiniteRankFreeModule(ZZ, 3, name='M')
sage: e = M.basis('e')
sage: GL = M.general_linear_group() ; GL
General linear group of the Rank-3 free module M over the Integer Ring
sage: GL.category()
Category of groups
sage: type(GL)
<class 'sage.tensor.modules.free_module_linear_group.FreeModuleLinearGroup_with_category'>
There is a unique instance of the general linear group::
sage: M.general_linear_group() is GL
True
The group identity element::
sage: GL.one()
Identity map of the Rank-3 free module M over the Integer Ring
sage: GL.one().matrix(e)
[1 0 0]
[0 1 0]
[0 0 1]
An element::
sage: GL.an_element()
Automorphism of the Rank-3 free module M over the Integer Ring
sage: GL.an_element().matrix(e)
[ 1 0 0]
[ 0 -1 0]
[ 0 0 1]
See
:class:`~sage.tensor.modules.free_module_linear_group.FreeModuleLinearGroup`
for more documentation.
"""
from sage.tensor.modules.free_module_linear_group import \
FreeModuleLinearGroup
if self._general_linear_group is None:
self._general_linear_group = FreeModuleLinearGroup(self)
return self._general_linear_group
def basis(self, symbol, latex_symbol=None, from_family=None,
indices=None, latex_indices=None, symbol_dual=None,
latex_symbol_dual=None):
r"""
Define or return a basis of the free module ``self``.
Let `M` denotes the free module ``self`` and `n` its rank.
The basis can be defined from a set of `n` linearly independent
elements of `M` by means of the argument ``from_family``.
If ``from_family`` is not specified, the basis is created from
scratch and, at this stage, is unrelated to bases that could have been
defined previously on `M`. It can be related afterwards by means of
the method :meth:`set_change_of_basis`.
If the basis specified by the given symbol already exists, it is
simply returned, whatever the value of the arguments ``latex_symbol``
or ``from_family``.
Note that another way to construct a basis of ``self`` is to use
the method
:meth:`~sage.tensor.modules.free_module_basis.FreeModuleBasis.new_basis`
on an existing basis, with the automorphism relating the two bases as
an argument.
INPUT:
- ``symbol`` -- either a string, to be used as a common base for the
symbols of the elements of the basis, or a list/tuple of strings,
representing the individual symbols of the elements of the basis
- ``latex_symbol`` -- (default: ``None``) either a string, to be used
as a common base for the LaTeX symbols of the elements of the basis,
or a list/tuple of strings, representing the individual LaTeX symbols
of the elements of the basis; if ``None``, ``symbol`` is used in
place of ``latex_symbol``
- ``from_family`` -- (default: ``None``) tuple of `n` linearly
independent elements of the free module ``self`` (`n` being the
rank of ``self``)
- ``indices`` -- (default: ``None``; used only if ``symbol`` is a
single string) list/tuple of strings representing the indices
labelling the elements of the basis; if ``None``, the indices will be
generated as integers within the range declared on ``self``
- ``latex_indices`` -- (default: ``None``) list/tuple of strings
representing the indices for the LaTeX symbols of the elements of
the basis; if ``None``, ``indices`` is used instead
- ``symbol_dual`` -- (default: ``None``) same as ``symbol`` but for the
dual basis; if ``None``, ``symbol`` must be a string and is used
for the common base of the symbols of the elements of the dual basis
- ``latex_symbol_dual`` -- (default: ``None``) same as ``latex_symbol``
but for the dual basis
OUTPUT:
- instance of
:class:`~sage.tensor.modules.free_module_basis.FreeModuleBasis`
representing a basis on ``self``
EXAMPLES:
Bases on a rank-3 free module::
sage: M = FiniteRankFreeModule(ZZ, 3, name='M')
sage: e = M.basis('e') ; e
Basis (e_0,e_1,e_2) on the Rank-3 free module M over the Integer Ring
sage: e[0]
Element e_0 of the Rank-3 free module M over the Integer Ring
sage: latex(e)
\left(e_{0},e_{1},e_{2}\right)
The LaTeX symbol can be set explicitely::
sage: eps = M.basis('eps', latex_symbol=r'\epsilon') ; eps
Basis (eps_0,eps_1,eps_2) on the Rank-3 free module M
over the Integer Ring
sage: latex(eps)
\left(\epsilon_{0},\epsilon_{1},\epsilon_{2}\right)
The indices can be customized::
sage: f = M.basis('f', indices=('x', 'y', 'z')); f
Basis (f_x,f_y,f_z) on the Rank-3 free module M over the Integer Ring
sage: latex(f[1])
f_{y}
By providing a list or a tuple for the argument ``symbol``, one can
have a different symbol for each element of the basis; it is then
mandatory to specify some symbols for the dual basis::
sage: g = M.basis(('a', 'b', 'c'), symbol_dual=('A', 'B', 'C')); g
Basis (a,b,c) on the Rank-3 free module M over the Integer Ring
sage: g.dual_basis()
Dual basis (A,B,C) on the Rank-3 free module M over the Integer Ring
If the provided symbol and indices are that of an already defined
basis, the latter is returned (no new basis is created)::
sage: M.basis('e') is e
True
sage: M.basis('eps') is eps
True
sage: M.basis('e', indices=['x', 'y', 'z']) is e
False
sage: M.basis('e', indices=['x', 'y', 'z']) is \
....: M.basis('e', indices=['x', 'y', 'z'])
True
The individual elements of the basis are labelled according the
parameter ``start_index`` provided at the free module construction::
sage: M = FiniteRankFreeModule(ZZ, 3, name='M', start_index=1)
sage: e = M.basis('e') ; e
Basis (e_1,e_2,e_3) on the Rank-3 free module M over the Integer Ring
sage: e[1]
Element e_1 of the Rank-3 free module M over the Integer Ring
Construction of a basis from a family of linearly independent module
elements::
sage: f1 = -e[2]
sage: f2 = 4*e[1] + 3*e[3]
sage: f3 = 7*e[1] + 5*e[3]
sage: f = M.basis('f', from_family=(f1,f2,f3))
sage: f[1].display()
f_1 = -e_2
sage: f[2].display()
f_2 = 4 e_1 + 3 e_3
sage: f[3].display()
f_3 = 7 e_1 + 5 e_3
The change-of-basis automorphisms have been registered::
sage: M.change_of_basis(e,f).matrix(e)
[ 0 4 7]
[-1 0 0]
[ 0 3 5]
sage: M.change_of_basis(f,e).matrix(e)
[ 0 -1 0]
[-5 0 7]
[ 3 0 -4]
sage: M.change_of_basis(f,e) == M.change_of_basis(e,f).inverse()
True
Check of the change-of-basis e --> f::
sage: a = M.change_of_basis(e,f) ; a
Automorphism of the Rank-3 free module M over the Integer Ring
sage: all( f[i] == a(e[i]) for i in M.irange() )
True
For more documentation on bases see
:class:`~sage.tensor.modules.free_module_basis.FreeModuleBasis`.
"""
from .free_module_basis import FreeModuleBasis
for other in self._known_bases:
if symbol == other._symbol and indices == other._indices:
return other
resu = FreeModuleBasis(self, symbol, latex_symbol=latex_symbol,
indices=indices, latex_indices=latex_indices,
symbol_dual=symbol_dual,
latex_symbol_dual=latex_symbol_dual)
if from_family:
n = self._rank
if len(from_family) != n:
raise ValueError("the size of the family is not {}".format(n))
for ff in from_family:
if ff not in self:
raise TypeError("{} is not an element of {}".format(ff,
self))
# The automorphisms relating the family to previously defined
# bases are registered:
ff0 = from_family[0]
for basis in ff0._components:
try:
comp = [ff.components(basis) for ff in from_family]
except ValueError:
continue
mat = [[comp_ff[[i]] for comp_ff in comp]
for i in self.irange()]
aut = self.automorphism()
aut.set_comp(basis)[:] = mat
self.set_change_of_basis(basis, resu, aut)
return resu
def tensor(self, tensor_type, name=None, latex_name=None, sym=None,
antisym=None):
r"""
Construct a tensor on the free module ``self``.
INPUT:
- ``tensor_type`` -- pair ``(k, l)`` with ``k`` being the
contravariant rank and ``l`` the covariant rank
- ``name`` -- (default: ``None``) string; name given to the tensor
- ``latex_name`` -- (default: ``None``) string; LaTeX symbol to
denote the tensor; if none is provided, the LaTeX symbol is set
to ``name``
- ``sym`` -- (default: ``None``) a symmetry or a list of symmetries
among the tensor arguments: each symmetry is described by a tuple
containing the positions of the involved arguments, with the
convention ``position = 0`` for the first argument. For instance:
* ``sym = (0,1)`` for a symmetry between the 1st and 2nd arguments
* ``sym = [(0,2), (1,3,4)]`` for a symmetry between the 1st and 3rd
arguments and a symmetry between the 2nd, 4th and 5th arguments.
- ``antisym`` -- (default: ``None``) antisymmetry or list of
antisymmetries among the arguments, with the same convention
as for ``sym``
OUTPUT:
- instance of
:class:`~sage.tensor.modules.free_module_tensor.FreeModuleTensor`
representing the tensor defined on ``self`` with the provided
characteristics
EXAMPLES:
Tensors on a rank-3 free module::
sage: M = FiniteRankFreeModule(ZZ, 3, name='M')
sage: t = M.tensor((1,0), name='t') ; t
Element t of the Rank-3 free module M over the Integer Ring
sage: t = M.tensor((0,1), name='t') ; t
Linear form t on the Rank-3 free module M over the Integer Ring
sage: t = M.tensor((1,1), name='t') ; t
Type-(1,1) tensor t on the Rank-3 free module M over the Integer Ring
sage: t = M.tensor((0,2), name='t', sym=(0,1)) ; t
Symmetric bilinear form t on the
Rank-3 free module M over the Integer Ring
sage: t = M.tensor((0,2), name='t', antisym=(0,1)) ; t
Alternating form t of degree 2 on the
Rank-3 free module M over the Integer Ring
sage: t = M.tensor((1,2), name='t') ; t
Type-(1,2) tensor t on the Rank-3 free module M over the Integer Ring
See :class:`~sage.tensor.modules.free_module_tensor.FreeModuleTensor`
for more examples and documentation.
"""
# Special cases:
if tensor_type == (1,0):
return self.element_class(self, name=name, latex_name=latex_name)
elif tensor_type == (0,1):
return self.linear_form(name=name, latex_name=latex_name)
elif tensor_type[0] == 0 and tensor_type[1] > 1 and antisym:
if isinstance(antisym[0], (int, Integer)):
# a single antisymmetry is provided as a tuple or a range
# object; it is converted to a 1-item list:
antisym = [tuple(antisym)]
if isinstance(antisym, list):
antisym0 = antisym[0]
else:
antisym0 = antisym
if len(antisym0) == tensor_type[1]:
return self.alternating_form(tensor_type[1], name=name,
latex_name=latex_name)
elif tensor_type[0] > 1 and tensor_type[1] == 0 and antisym:
if isinstance(antisym[0], (int, Integer)):
# a single antisymmetry is provided as a tuple or a range
# object; it is converted to a 1-item list:
antisym = [tuple(antisym)]
if isinstance(antisym, list):
antisym0 = antisym[0]
else:
antisym0 = antisym
if len(antisym0) == tensor_type[0]:
return self.alternating_contravariant_tensor(tensor_type[0],
name=name, latex_name=latex_name)
# Generic case:
return self.tensor_module(*tensor_type).element_class(self,
tensor_type, name=name, latex_name=latex_name,
sym=sym, antisym=antisym)
def tensor_from_comp(self, tensor_type, comp, name=None, latex_name=None):
r"""
Construct a tensor on ``self`` from a set of components.
The tensor symmetries are deduced from those of the components.
INPUT:
- ``tensor_type`` -- pair ``(k, l)`` with ``k`` being the
contravariant rank and ``l`` the covariant rank
- ``comp`` -- instance of :class:`~sage.tensor.modules.comp.Components`
representing the tensor components in a given basis
- ``name`` -- (default: ``None``) string; name given to the tensor
- ``latex_name`` -- (default: ``None``) string; LaTeX symbol to denote
the tensor; if none is provided, the LaTeX symbol is set to ``name``
OUTPUT:
- instance of
:class:`~sage.tensor.modules.free_module_tensor.FreeModuleTensor`
representing the tensor defined on ``self`` with the provided
characteristics.
EXAMPLES:
Construction of a tensor of rank 1::
sage: from sage.tensor.modules.comp import Components, CompWithSym, CompFullySym, CompFullyAntiSym
sage: M = FiniteRankFreeModule(ZZ, 3, name='M')
sage: e = M.basis('e') ; e
Basis (e_0,e_1,e_2) on the Rank-3 free module M over the Integer Ring
sage: c = Components(ZZ, e, 1)
sage: c[:]
[0, 0, 0]
sage: c[:] = [-1,4,2]
sage: t = M.tensor_from_comp((1,0), c)
sage: t
Element of the Rank-3 free module M over the Integer Ring
sage: t.display(e)
-e_0 + 4 e_1 + 2 e_2
sage: t = M.tensor_from_comp((0,1), c) ; t
Linear form on the Rank-3 free module M over the Integer Ring
sage: t.display(e)
-e^0 + 4 e^1 + 2 e^2
Construction of a tensor of rank 2::
sage: c = CompFullySym(ZZ, e, 2)
sage: c[0,0], c[1,2] = 4, 5
sage: t = M.tensor_from_comp((0,2), c) ; t
Symmetric bilinear form on the
Rank-3 free module M over the Integer Ring
sage: t.symmetries()
symmetry: (0, 1); no antisymmetry
sage: t.display(e)
4 e^0*e^0 + 5 e^1*e^2 + 5 e^2*e^1
sage: c = CompFullyAntiSym(ZZ, e, 2)
sage: c[0,1], c[1,2] = 4, 5
sage: t = M.tensor_from_comp((0,2), c) ; t
Alternating form of degree 2 on the
Rank-3 free module M over the Integer Ring
sage: t.display(e)
4 e^0/\e^1 + 5 e^1/\e^2
"""
from .comp import CompWithSym, CompFullySym, CompFullyAntiSym
#
# 0/ Compatibility checks:
if comp._ring is not self._ring:
raise TypeError("the components are not defined on the same" +
" ring as the module")
if comp._frame not in self._known_bases:
raise TypeError("the components are not defined on a basis of" +
" the module")
if comp._nid != tensor_type[0] + tensor_type[1]:
raise TypeError("number of component indices not compatible with "+
" the tensor type")
#
# 1/ Construction of the tensor:
if tensor_type == (1,0):
resu = self.element_class(self, name=name, latex_name=latex_name)
elif tensor_type == (0,1):
resu = self.linear_form(name=name, latex_name=latex_name)
elif tensor_type[0] == 0 and tensor_type[1] > 1 and \
isinstance(comp, CompFullyAntiSym):
resu = self.alternating_form(tensor_type[1], name=name,
latex_name=latex_name)
elif tensor_type[0] > 1 and tensor_type[1] == 0 and \
isinstance(comp, CompFullyAntiSym):
resu = self.alternating_contravariant_tensor(tensor_type[0],
name=name,
latex_name=latex_name)
else:
resu = self.tensor_module(*tensor_type).element_class(self,
tensor_type, name=name, latex_name=latex_name)
# Tensor symmetries deduced from those of comp:
if isinstance(comp, CompWithSym):
resu._sym = comp._sym
resu._antisym = comp._antisym
#
# 2/ Tensor components set to comp:
resu._components[comp._frame] = comp
#
return resu
def alternating_contravariant_tensor(self, degree, name=None,
latex_name=None):
r"""
Construct an alternating contravariant tensor on the free module.
INPUT:
- ``degree`` -- degree of the alternating contravariant tensor
(i.e. its tensor rank)
- ``name`` -- (default: ``None``) string; name given to the
alternating contravariant tensor
- ``latex_name`` -- (default: ``None``) string; LaTeX symbol to
denote the alternating contravariant tensor; if none is
provided, the LaTeX symbol is set to ``name``
OUTPUT:
- instance of
:class:`~sage.tensor.modules.alternating_contr_tensor.AlternatingContrTensor`
EXAMPLES:
Alternating contravariant tensor on a rank-3 module::
sage: M = FiniteRankFreeModule(ZZ, 3, name='M')
sage: a = M.alternating_contravariant_tensor(2, 'a') ; a
Alternating contravariant tensor a of degree 2 on the
Rank-3 free module M over the Integer Ring
The nonzero components in a given basis have to be set in a second
step, thereby fully specifying the alternating form::
sage: e = M.basis('e') ; e
Basis (e_0,e_1,e_2) on the Rank-3 free module M over the Integer Ring
sage: a.set_comp(e)[0,1] = 2
sage: a.set_comp(e)[1,2] = -3
sage: a.display(e)
a = 2 e_0/\e_1 - 3 e_1/\e_2
An alternating contravariant tensor of degree 1 is simply
an element of the module::
sage: a = M.alternating_contravariant_tensor(1, 'a') ; a
Element a of the Rank-3 free module M over the Integer Ring
See
:class:`~sage.tensor.modules.alternating_contr_tensor.AlternatingContrTensor`
for more documentation.
"""
if degree == 1:
return self.element_class(self, name=name,
latex_name=latex_name)
return self.exterior_power(degree).element_class(self, degree,
name=name, latex_name=latex_name)
def alternating_form(self, degree, name=None, latex_name=None):
r"""
Construct an alternating form on the free module.
INPUT:
- ``degree`` -- the degree of the alternating form (i.e. its
tensor rank)
- ``name`` -- (default: ``None``) string; name given to the
alternating form
- ``latex_name`` -- (default: ``None``) string; LaTeX symbol to
denote the alternating form; if none is provided, the LaTeX symbol
is set to ``name``
OUTPUT:
- instance of
:class:`~sage.tensor.modules.free_module_alt_form.FreeModuleAltForm`
EXAMPLES:
Alternating forms on a rank-3 module::
sage: M = FiniteRankFreeModule(ZZ, 3, name='M')
sage: a = M.alternating_form(2, 'a') ; a
Alternating form a of degree 2 on the
Rank-3 free module M over the Integer Ring
The nonzero components in a given basis have to be set in a second
step, thereby fully specifying the alternating form::
sage: e = M.basis('e') ; e
Basis (e_0,e_1,e_2) on the Rank-3 free module M over the Integer Ring
sage: a.set_comp(e)[0,1] = 2
sage: a.set_comp(e)[1,2] = -3
sage: a.display(e)
a = 2 e^0/\e^1 - 3 e^1/\e^2
An alternating form of degree 1 is a linear form::
sage: a = M.alternating_form(1, 'a') ; a
Linear form a on the Rank-3 free module M over the Integer Ring
To construct such a form, it is preferable to call the method
:meth:`linear_form` instead::
sage: a = M.linear_form('a') ; a
Linear form a on the Rank-3 free module M over the Integer Ring
See
:class:`~sage.tensor.modules.free_module_alt_form.FreeModuleAltForm`
for more documentation.
"""
return self.dual_exterior_power(degree).element_class(self, degree,
name=name, latex_name=latex_name)
def linear_form(self, name=None, latex_name=None):
r"""
Construct a linear form on the free module ``self``.
A *linear form* on a free module `M` over a ring `R` is a map
`M \rightarrow R` that is linear. It can be viewed as a tensor of type
`(0,1)` on `M`.
INPUT:
- ``name`` -- (default: ``None``) string; name given to the linear
form
- ``latex_name`` -- (default: ``None``) string; LaTeX symbol to
denote the linear form; if none is provided, the LaTeX symbol
is set to ``name``
OUTPUT:
- instance of
:class:`~sage.tensor.modules.free_module_alt_form.FreeModuleAltForm`
EXAMPLES:
Linear form on a rank-3 free module::
sage: M = FiniteRankFreeModule(ZZ, 3, name='M')
sage: e = M.basis('e')
sage: a = M.linear_form('A') ; a
Linear form A on the Rank-3 free module M over the Integer Ring
sage: a[:] = [2,-1,3] # components w.r.t. the module's default basis (e)
sage: a.display()
A = 2 e^0 - e^1 + 3 e^2
A linear form maps module elements to ring elements::
sage: v = M([1,1,1])
sage: a(v)
4
Test of linearity::
sage: u = M([-5,-2,7])
sage: a(3*u - 4*v) == 3*a(u) - 4*a(v)
True
See
:class:`~sage.tensor.modules.free_module_alt_form.FreeModuleAltForm`
for more documentation.
"""
return self.dual_exterior_power(1).element_class(self, 1, name=name,
latex_name=latex_name)
def automorphism(self, matrix=None, basis=None, name=None,
latex_name=None):
r"""
Construct a module automorphism of ``self``.
Denoting ``self`` by `M`, an automorphism of ``self`` is an element
of the general linear group `\mathrm{GL}(M)`.
INPUT:
- ``matrix`` -- (default: ``None``) matrix of size rank(M)*rank(M)
representing the automorphism with respect to ``basis``;
this entry can actually be any material from which a matrix of
elements of ``self`` base ring can be constructed; the *columns* of
``matrix`` must be the components w.r.t. ``basis`` of
the images of the elements of ``basis``. If ``matrix`` is ``None``,
the automorphism has to be initialized afterwards by
method :meth:`~sage.tensor.modules.free_module_tensor.FreeModuleTensor.set_comp`
or via the operator [].
- ``basis`` -- (default: ``None``) basis of ``self`` defining the
matrix representation; if ``None`` the default basis of ``self`` is
assumed.
- ``name`` -- (default: ``None``) string; name given to the
automorphism
- ``latex_name`` -- (default: ``None``) string; LaTeX symbol to
denote the automorphism; if none is provided, the LaTeX symbol
is set to ``name``
OUTPUT:
- instance of
:class:`~sage.tensor.modules.free_module_automorphism.FreeModuleAutomorphism`
EXAMPLES:
Automorphism of a rank-2 free `\ZZ`-module::
sage: M = FiniteRankFreeModule(ZZ, 2, name='M')
sage: e = M.basis('e')
sage: a = M.automorphism(matrix=[[1,2],[1,3]], basis=e, name='a') ; a
Automorphism a of the Rank-2 free module M over the Integer Ring
sage: a.parent()
General linear group of the Rank-2 free module M over the Integer Ring
sage: a.matrix(e)
[1 2]
[1 3]
An automorphism is a tensor of type (1,1)::
sage: a.tensor_type()
(1, 1)
sage: a.display(e)
a = e_0*e^0 + 2 e_0*e^1 + e_1*e^0 + 3 e_1*e^1
The automorphism components can be specified in a second step, as
components of a type-`(1,1)` tensor::
sage: a1 = M.automorphism(name='a')
sage: a1[e,:] = [[1,2],[1,3]]
sage: a1.matrix(e)
[1 2]
[1 3]
sage: a1 == a
True
Component by component specification::
sage: a2 = M.automorphism(name='a')
sage: a2[0,0] = 1 # component set in the module's default basis (e)
sage: a2[0,1] = 2
sage: a2[1,0] = 1
sage: a2[1,1] = 3
sage: a2.matrix(e)
[1 2]
[1 3]
sage: a2 == a
True
See
:class:`~sage.tensor.modules.free_module_automorphism.FreeModuleAutomorphism`
for more documentation.
"""
resu = self.general_linear_group().element_class(self, name=name,
latex_name=latex_name)
if matrix:
if basis is None:
basis = self.default_basis()
resu.set_comp(basis)[:] = matrix
return resu
def sym_bilinear_form(self, name=None, latex_name=None):
r"""
Construct a symmetric bilinear form on the free module ``self``.
INPUT:
- ``name`` -- (default: ``None``) string; name given to the symmetric
bilinear form
- ``latex_name`` -- (default: ``None``) string; LaTeX symbol to
denote the symmetric bilinear form; if none is provided, the LaTeX
symbol is set to ``name``
OUTPUT:
- instance of
:class:`~sage.tensor.modules.free_module_tensor.FreeModuleTensor`
of tensor type `(0,2)` and symmetric
EXAMPLES:
Symmetric bilinear form on a rank-3 free module::
sage: M = FiniteRankFreeModule(ZZ, 3, name='M')
sage: a = M.sym_bilinear_form('A') ; a
Symmetric bilinear form A on the
Rank-3 free module M over the Integer Ring
A symmetric bilinear form is a type-`(0,2)` tensor that is symmetric::
sage: a.parent()
Free module of type-(0,2) tensors on the
Rank-3 free module M over the Integer Ring
sage: a.tensor_type()
(0, 2)
sage: a.tensor_rank()
2
sage: a.symmetries()
symmetry: (0, 1); no antisymmetry
Components with respect to a given basis::
sage: e = M.basis('e')
sage: a[0,0], a[0,1], a[0,2] = 1, 2, 3
sage: a[1,1], a[1,2] = 4, 5
sage: a[2,2] = 6
Only independent components have been set; the other ones are
deduced by symmetry::
sage: a[1,0], a[2,0], a[2,1]
(2, 3, 5)
sage: a[:]
[1 2 3]
[2 4 5]
[3 5 6]
A symmetric bilinear form acts on pairs of module elements::
sage: u = M([2,-1,3]) ; v = M([-2,4,1])
sage: a(u,v)
61
sage: a(v,u) == a(u,v)
True
The sum of two symmetric bilinear forms is another symmetric bilinear
form::
sage: b = M.sym_bilinear_form('B')
sage: b[0,0], b[0,1], b[1,2] = -2, 1, -3
sage: s = a + b ; s
Symmetric bilinear form A+B on the
Rank-3 free module M over the Integer Ring
sage: a[:], b[:], s[:]
(
[1 2 3] [-2 1 0] [-1 3 3]
[2 4 5] [ 1 0 -3] [ 3 4 2]
[3 5 6], [ 0 -3 0], [ 3 2 6]
)
Adding a symmetric bilinear from with a non-symmetric one results in a
generic type-`(0,2)` tensor::
sage: c = M.tensor((0,2), name='C')
sage: c[0,1] = 4
sage: s = a + c ; s
Type-(0,2) tensor A+C on the Rank-3 free module M over the Integer Ring
sage: s.symmetries()
no symmetry; no antisymmetry
sage: s[:]
[1 6 3]
[2 4 5]
[3 5 6]
See :class:`~sage.tensor.modules.free_module_tensor.FreeModuleTensor`
for more documentation.
"""
return self.tensor_module(0,2).element_class(self, (0,2), name=name,
latex_name=latex_name, sym=(0,1))
#### End of methods to be redefined by derived classes ####
def _latex_(self):
r"""
LaTeX representation of ``self``.
EXAMPLES::
sage: M = FiniteRankFreeModule(ZZ, 3, name='M')
sage: M._latex_()
'M'
sage: latex(M)
M
sage: M1 = FiniteRankFreeModule(ZZ, 3, name='M', latex_name=r'\mathcal{M}')
sage: M1._latex_()
'\\mathcal{M}'
sage: latex(M1)
\mathcal{M}
"""
if self._latex_name is None:
return r'\mbox{' + str(self) + r'}'
else:
return self._latex_name
def rank(self):
r"""
Return the rank of the free module ``self``.
Since the ring over which ``self`` is built is assumed to be
commutative (and hence has the invariant basis number property), the
rank is defined uniquely, as the cardinality of any basis of ``self``.
EXAMPLES:
Rank of free modules over `\ZZ`::
sage: M = FiniteRankFreeModule(ZZ, 3)
sage: M.rank()
3
sage: M.tensor_module(0,1).rank()
3
sage: M.tensor_module(0,2).rank()
9
sage: M.tensor_module(1,0).rank()
3
sage: M.tensor_module(1,1).rank()
9
sage: M.tensor_module(1,2).rank()
27
sage: M.tensor_module(2,2).rank()
81
"""
return self._rank
def zero(self):
r"""
Return the zero element of ``self``.
EXAMPLES:
Zero elements of free modules over `\ZZ`::
sage: M = FiniteRankFreeModule(ZZ, 3, name='M')
sage: M.zero()
Element zero of the Rank-3 free module M over the Integer Ring
sage: M.zero().parent() is M
True
sage: M.zero() is M(0)
True
sage: T = M.tensor_module(1,1)
sage: T.zero()
Type-(1,1) tensor zero on the Rank-3 free module M over the Integer Ring
sage: T.zero().parent() is T
True
sage: T.zero() is T(0)
True
Components of the zero element with respect to some basis::
sage: e = M.basis('e')
sage: M.zero()[e,:]
[0, 0, 0]
sage: all(M.zero()[e,i] == M.base_ring().zero() for i in M.irange())
True
sage: T.zero()[e,:]
[0 0 0]
[0 0 0]
[0 0 0]
sage: M.tensor_module(1,2).zero()[e,:]
[[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]]]
"""
return self._zero_element
def dual(self):
r"""
Return the dual module of ``self``.
EXAMPLES:
Dual of a free module over `\ZZ`::
sage: M = FiniteRankFreeModule(ZZ, 3, name='M')
sage: M.dual()
Dual of the Rank-3 free module M over the Integer Ring
sage: latex(M.dual())
M^*
The dual is a free module of the same rank as M::
sage: isinstance(M.dual(), FiniteRankFreeModule)
True
sage: M.dual().rank()
3
It is formed by alternating forms of degree 1, i.e. linear forms::
sage: M.dual() is M.dual_exterior_power(1)
True
sage: M.dual().an_element()
Linear form on the Rank-3 free module M over the Integer Ring
sage: a = M.linear_form()
sage: a in M.dual()
True
The elements of a dual basis belong of course to the dual module::
sage: e = M.basis('e')
sage: e.dual_basis()[0] in M.dual()
True
"""
return self.dual_exterior_power(1)
def irange(self, start=None):
r"""
Single index generator, labelling the elements of a basis of ``self``.
INPUT:
- ``start`` -- (default: ``None``) integer; initial value of the
index; if none is provided, ``self._sindex`` is assumed
OUTPUT:
- an iterable index, starting from ``start`` and ending at
``self._sindex + self.rank() - 1``
EXAMPLES:
Index range on a rank-3 module::
sage: M = FiniteRankFreeModule(ZZ, 3)
sage: list(M.irange())
[0, 1, 2]
sage: list(M.irange(start=1))
[1, 2]
The default starting value corresponds to the parameter ``start_index``
provided at the module construction (the default value being 0)::
sage: M1 = FiniteRankFreeModule(ZZ, 3, start_index=1)
sage: list(M1.irange())
[1, 2, 3]
sage: M2 = FiniteRankFreeModule(ZZ, 3, start_index=-4)
sage: list(M2.irange())
[-4, -3, -2]
"""
si = self._sindex
imax = self._rank + si
if start is None:
i = si
else:
i = start
while i < imax:
yield i
i += 1
def default_basis(self):
r"""
Return the default basis of the free module ``self``.
The *default basis* is simply a basis whose name can be skipped in
methods requiring a basis as an argument. By default, it is the first
basis introduced on the module. It can be changed by the method
:meth:`set_default_basis`.
OUTPUT:
- instance of
:class:`~sage.tensor.modules.free_module_basis.FreeModuleBasis`
EXAMPLES:
At the module construction, no default basis is assumed::
sage: M = FiniteRankFreeModule(ZZ, 2, name='M', start_index=1)
sage: M.default_basis()
No default basis has been defined on the
Rank-2 free module M over the Integer Ring
The first defined basis becomes the default one::
sage: e = M.basis('e') ; e
Basis (e_1,e_2) on the Rank-2 free module M over the Integer Ring
sage: M.default_basis()
Basis (e_1,e_2) on the Rank-2 free module M over the Integer Ring
sage: f = M.basis('f') ; f
Basis (f_1,f_2) on the Rank-2 free module M over the Integer Ring
sage: M.default_basis()
Basis (e_1,e_2) on the Rank-2 free module M over the Integer Ring
"""
if self._def_basis is None:
print("No default basis has been defined on the {}".format(self))
return self._def_basis
def set_default_basis(self, basis):
r"""
Sets the default basis of ``self``.
The *default basis* is simply a basis whose name can be skipped in
methods requiring a basis as an argument. By default, it is the first
basis introduced on the module.
INPUT:
- ``basis`` -- instance of
:class:`~sage.tensor.modules.free_module_basis.FreeModuleBasis`
representing a basis on ``self``
EXAMPLES:
Changing the default basis on a rank-3 free module::
sage: M = FiniteRankFreeModule(ZZ, 3, name='M', start_index=1)
sage: e = M.basis('e') ; e
Basis (e_1,e_2,e_3) on the Rank-3 free module M over the Integer Ring
sage: f = M.basis('f') ; f
Basis (f_1,f_2,f_3) on the Rank-3 free module M over the Integer Ring
sage: M.default_basis()
Basis (e_1,e_2,e_3) on the Rank-3 free module M over the Integer Ring
sage: M.set_default_basis(f)
sage: M.default_basis()
Basis (f_1,f_2,f_3) on the Rank-3 free module M over the Integer Ring
"""
from .free_module_basis import FreeModuleBasis
if not isinstance(basis, FreeModuleBasis):
raise TypeError("the argument is not a free module basis")
if basis._fmodule is not self:
raise ValueError("the basis is not defined on the current module")
self._def_basis = basis
def print_bases(self):
r"""
Display the bases that have been defined on the free module ``self``.
Use the method :meth:`bases` to get the raw list of bases.
EXAMPLES:
Bases on a rank-4 free module::
sage: M = FiniteRankFreeModule(ZZ, 4, name='M', start_index=1)
sage: M.print_bases()
No basis has been defined on the
Rank-4 free module M over the Integer Ring
sage: e = M.basis('e')
sage: M.print_bases()
Bases defined on the Rank-4 free module M over the Integer Ring:
- (e_1,e_2,e_3,e_4) (default basis)
sage: f = M.basis('f')
sage: M.print_bases()
Bases defined on the Rank-4 free module M over the Integer Ring:
- (e_1,e_2,e_3,e_4) (default basis)
- (f_1,f_2,f_3,f_4)
sage: M.set_default_basis(f)
sage: M.print_bases()
Bases defined on the Rank-4 free module M over the Integer Ring:
- (e_1,e_2,e_3,e_4)
- (f_1,f_2,f_3,f_4) (default basis)
"""
if not self._known_bases:
print("No basis has been defined on the {}".format(self))
else:
print("Bases defined on the {}:".format(self))
for basis in self._known_bases:
item = " - " + basis._name
if basis is self._def_basis:
item += " (default basis)"
print(item)
def bases(self):
r"""
Return the list of bases that have been defined on the free module
``self``.
Use the method :meth:`print_bases` to get a formatted output with more
information.
OUTPUT:
- list of instances of class
:class:`~sage.tensor.modules.free_module_basis.FreeModuleBasis`
EXAMPLES:
Bases on a rank-3 free module::
sage: M = FiniteRankFreeModule(ZZ, 3, name='M_3', start_index=1)
sage: M.bases()
[]
sage: e = M.basis('e')
sage: M.bases()
[Basis (e_1,e_2,e_3) on the Rank-3 free module M_3 over the Integer Ring]
sage: f = M.basis('f')
sage: M.bases()
[Basis (e_1,e_2,e_3) on the Rank-3 free module M_3 over the Integer Ring,
Basis (f_1,f_2,f_3) on the Rank-3 free module M_3 over the Integer Ring]
"""
return self._known_bases
def change_of_basis(self, basis1, basis2):
r"""
Return a module automorphism linking two bases defined on the free
module ``self``.
If the automorphism has not been recorded yet (in the internal
dictionary ``self._basis_changes``), it is computed by transitivity,
i.e. by performing products of recorded changes of basis.
INPUT:
- ``basis1`` -- a basis of ``self``, denoted `(e_i)` below
- ``basis2`` -- a basis of ``self``, denoted `(f_i)` below
OUTPUT:
- instance of
:class:`~sage.tensor.modules.free_module_automorphism.FreeModuleAutomorphism`
describing the automorphism `P` that relates the basis `(e_i)` to the
basis `(f_i)` according to `f_i = P(e_i)`
EXAMPLES:
Changes of basis on a rank-2 free module::
sage: FiniteRankFreeModule._clear_cache_() # for doctests only
sage: M = FiniteRankFreeModule(ZZ, 2, name='M', start_index=1)
sage: e = M.basis('e')
sage: f = M.basis('f', from_family=(e[1]+2*e[2], e[1]+3*e[2]))
sage: P = M.change_of_basis(e,f) ; P
Automorphism of the Rank-2 free module M over the Integer Ring
sage: P.matrix(e)
[1 1]
[2 3]
Note that the columns of this matrix contain the components of the
elements of basis ``f`` w.r.t. to basis ``e``::
sage: f[1].display(e)
f_1 = e_1 + 2 e_2
sage: f[2].display(e)
f_2 = e_1 + 3 e_2
The change of basis is cached::
sage: P is M.change_of_basis(e,f)
True
Check of the change-of-basis automorphism::
sage: f[1] == P(e[1])
True
sage: f[2] == P(e[2])
True
Check of the reverse change of basis::
sage: M.change_of_basis(f,e) == P^(-1)
True
We have of course::
sage: M.change_of_basis(e,e)
Identity map of the Rank-2 free module M over the Integer Ring
sage: M.change_of_basis(e,e) is M.identity_map()
True
Let us introduce a third basis on ``M``::
sage: h = M.basis('h', from_family=(3*e[1]+4*e[2], 5*e[1]+7*e[2]))
The change of basis ``e`` --> ``h`` has been recorded directly from the
definition of ``h``::
sage: Q = M.change_of_basis(e,h) ; Q.matrix(e)
[3 5]
[4 7]
The change of basis ``f`` --> ``h`` is computed by transitivity, i.e.
from the changes of basis ``f`` --> ``e`` and ``e`` --> ``h``::
sage: R = M.change_of_basis(f,h) ; R
Automorphism of the Rank-2 free module M over the Integer Ring
sage: R.matrix(e)
[-1 2]
[-2 3]
sage: R.matrix(f)
[ 5 8]
[-2 -3]
Let us check that ``R`` is indeed the change of basis ``f`` --> ``h``::
sage: h[1] == R(f[1])
True
sage: h[2] == R(f[2])
True
A related check is::
sage: R == Q*P^(-1)
True
"""
if basis1 == basis2:
return self.identity_map()
bc = self._basis_changes
if (basis1, basis2) not in bc:
if basis1 not in self._known_bases:
raise TypeError("{} is not a basis of the {}".format(basis1,
self))
if basis2 not in self._known_bases:
raise TypeError("{} is not a basis of the {}".format(basis2,
self))
# Is the inverse already registred ?
if (basis2, basis1) in bc:
inv = bc[(basis2, basis1)].inverse()
bc[(basis1, basis2)] = inv
return inv
# Search for a third basis, basis say, such that either the changes
# basis1 --> basis and basis --> basis2
# or
# basis2 --> basis and basis --> basis1
# are known:
for basis in self._known_bases:
if (basis1, basis) in bc and (basis, basis2) in bc:
transf = bc[(basis, basis2)] * bc[(basis1, basis)]
bc[(basis1, basis2)] = transf
bc[(basis2, basis1)] = transf.inverse()
break
if (basis2, basis) in bc and (basis, basis1) in bc:
inv = bc[(basis, basis1)] * bc[(basis2, basis)]
bc[(basis2, basis1)] = inv
bc[(basis1, basis2)] = inv.inverse()
break
else:
raise ValueError(("the change of basis from '{!r}' to '{!r}'"
+ " cannot be computed"
).format(basis1, basis2))
return bc[(basis1, basis2)]
def set_change_of_basis(self, basis1, basis2, change_of_basis,
compute_inverse=True):
r"""
Relates two bases by an automorphism of ``self``.
This updates the internal dictionary ``self._basis_changes``.
INPUT:
- ``basis1`` -- basis 1, denoted `(e_i)` below
- ``basis2`` -- basis 2, denoted `(f_i)` below
- ``change_of_basis`` -- instance of class
:class:`~sage.tensor.modules.free_module_automorphism.FreeModuleAutomorphism`
describing the automorphism `P` that relates the basis `(e_i)` to
the basis `(f_i)` according to `f_i = P(e_i)`
- ``compute_inverse`` (default: ``True``) -- if set to ``True``, the
inverse automorphism is computed and the change from basis `(f_i)`
to `(e_i)` is set to it in the internal dictionary
``self._basis_changes``
EXAMPLES:
Defining a change of basis on a rank-2 free module::
sage: M = FiniteRankFreeModule(QQ, 2, name='M')
sage: e = M.basis('e')
sage: f = M.basis('f')
sage: a = M.automorphism()
sage: a[:] = [[1, 2], [-1, 3]]
sage: M.set_change_of_basis(e, f, a)
The change of basis and its inverse have been recorded::
sage: M.change_of_basis(e,f).matrix(e)
[ 1 2]
[-1 3]
sage: M.change_of_basis(f,e).matrix(e)
[ 3/5 -2/5]
[ 1/5 1/5]
and are effective::
sage: f[0].display(e)
f_0 = e_0 - e_1
sage: e[0].display(f)
e_0 = 3/5 f_0 + 1/5 f_1
"""
if basis1 not in self._known_bases:
raise TypeError("{} is not a basis of the {}".format(basis1,
self))
if basis2 not in self._known_bases:
raise TypeError("{} is not a basis of the {}".format(basis2,
self))
if change_of_basis not in self.general_linear_group():
raise TypeError("{} is not an automorphism of the {}".format(
change_of_basis, self))
self._basis_changes[(basis1, basis2)] = change_of_basis
if compute_inverse:
self._basis_changes[(basis2, basis1)] = change_of_basis.inverse()
def hom(self, codomain, matrix_rep, bases=None, name=None,
latex_name=None):
r"""
Homomorphism from ``self`` to a free module.
Define a module homomorphism
.. MATH::
\phi:\ M \longrightarrow N,
where `M` is ``self`` and `N` is a free module of finite rank
over the same ring `R` as ``self``.
.. NOTE::
This method is a redefinition of
:meth:`sage.structure.parent.Parent.hom` because the latter assumes
that ``self`` has some privileged generators, while an instance of
:class:`FiniteRankFreeModule` has no privileged basis.
INPUT:
- ``codomain`` -- the target module `N`
- ``matrix_rep`` -- matrix of size rank(N)*rank(M) representing the
homomorphism with respect to the pair of bases defined by ``bases``;
this entry can actually be any material from which a matrix of
elements of `R` can be constructed; the *columns* of
``matrix_rep`` must be the components w.r.t. ``basis_N`` of
the images of the elements of ``basis_M``.
- ``bases`` -- (default: ``None``) pair ``(basis_M, basis_N)`` defining
the matrix representation, ``basis_M`` being a basis of ``self`` and
``basis_N`` a basis of module `N` ; if None the pair formed by the
default bases of each module is assumed.
- ``name`` -- (default: ``None``) string; name given to the
homomorphism
- ``latex_name`` -- (default: ``None``) string; LaTeX symbol to denote
the homomorphism; if None, ``name`` will be used.
OUTPUT:
- the homomorphism `\phi: M \rightarrow N` corresponding to the given
specifications, as an instance of
:class:`~sage.tensor.modules.free_module_morphism.FiniteRankFreeModuleMorphism`
EXAMPLES:
Homomorphism between two free modules over `\ZZ`::
sage: M = FiniteRankFreeModule(ZZ, 3, name='M')
sage: N = FiniteRankFreeModule(ZZ, 2, name='N')
sage: e = M.basis('e')
sage: f = N.basis('f')
sage: phi = M.hom(N, [[-1,2,0], [5,1,2]]) ; phi
Generic morphism:
From: Rank-3 free module M over the Integer Ring
To: Rank-2 free module N over the Integer Ring
Homomorphism defined by a matrix w.r.t. bases that are not the
default ones::
sage: ep = M.basis('ep', latex_symbol=r"e'")
sage: fp = N.basis('fp', latex_symbol=r"f'")
sage: phi = M.hom(N, [[3,2,1], [1,2,3]], bases=(ep, fp)) ; phi
Generic morphism:
From: Rank-3 free module M over the Integer Ring
To: Rank-2 free module N over the Integer Ring
Call with all arguments specified::
sage: phi = M.hom(N, [[3,2,1], [1,2,3]], bases=(ep, fp),
....: name='phi', latex_name=r'\phi')
The parent::
sage: phi.parent() is Hom(M,N)
True
See class
:class:`~sage.tensor.modules.free_module_morphism.FiniteRankFreeModuleMorphism`
for more documentation.
"""
from sage.categories.homset import Hom
homset = Hom(self, codomain)
return homset(matrix_rep, bases=bases, name=name,
latex_name=latex_name)
def endomorphism(self, matrix_rep, basis=None, name=None, latex_name=None):
r"""
Construct an endomorphism of the free module ``self``.
The returned object is a module morphism `\phi: M \rightarrow M`,
where `M` is ``self``.
INPUT:
- ``matrix_rep`` -- matrix of size rank(M)*rank(M) representing the
endomorphism with respect to ``basis``;
this entry can actually be any material from which a matrix of
elements of ``self`` base ring can be constructed; the *columns* of
``matrix_rep`` must be the components w.r.t. ``basis`` of
the images of the elements of ``basis``.
- ``basis`` -- (default: ``None``) basis of ``self`` defining the
matrix representation; if None the default basis of ``self`` is
assumed.
- ``name`` -- (default: ``None``) string; name given to the
endomorphism
- ``latex_name`` -- (default: ``None``) string; LaTeX symbol to denote
the endomorphism; if none is provided, ``name`` will be used.
OUTPUT:
- the endomorphism `\phi: M \rightarrow M` corresponding to the given
specifications, as an instance of
:class:`~sage.tensor.modules.free_module_morphism.FiniteRankFreeModuleMorphism`
EXAMPLES:
Construction of an endomorphism with minimal data (module's default
basis and no name)::
sage: M = FiniteRankFreeModule(ZZ, 2, name='M')
sage: e = M.basis('e')
sage: phi = M.endomorphism([[1,-2], [-3,4]]) ; phi
Generic endomorphism of Rank-2 free module M over the Integer Ring
sage: phi.matrix() # matrix w.r.t the default basis
[ 1 -2]
[-3 4]
Construction with full list of arguments (matrix given a basis
different from the default one)::
sage: a = M.automorphism() ; a[0,1], a[1,0] = 1, -1
sage: ep = e.new_basis(a, 'ep', latex_symbol="e'")
sage: phi = M.endomorphism([[1,-2], [-3,4]], basis=ep, name='phi',
....: latex_name=r'\phi')
sage: phi
Generic endomorphism of Rank-2 free module M over the Integer Ring
sage: phi.matrix(ep) # the input matrix
[ 1 -2]
[-3 4]
sage: phi.matrix() # matrix w.r.t the default basis
[4 3]
[2 1]
See :class:`~sage.tensor.modules.free_module_morphism.FiniteRankFreeModuleMorphism`
for more documentation.
"""
from sage.categories.homset import End
if basis is None:
basis = self.default_basis()
return End(self)(matrix_rep, bases=(basis,basis), name=name,
latex_name=latex_name)
def identity_map(self, name='Id', latex_name=None):
r"""
Return the identity map of the free module ``self``.
INPUT:
- ``name`` -- (string; default: 'Id') name given to the identity
identity map
- ``latex_name`` -- (string; default: ``None``) LaTeX symbol to denote
the identity map; if none is provided, the LaTeX symbol is set to
'\mathrm{Id}' if ``name`` is 'Id' and to ``name`` otherwise
OUTPUT:
- the identity map of ``self`` as an instance of
:class:`~sage.tensor.modules.free_module_automorphism.FreeModuleAutomorphism`
EXAMPLES:
Identity map of a rank-3 `\ZZ`-module::
sage: M = FiniteRankFreeModule(ZZ, 3, name='M')
sage: e = M.basis('e')
sage: Id = M.identity_map() ; Id
Identity map of the Rank-3 free module M over the Integer Ring
sage: Id.parent()
General linear group of the Rank-3 free module M over the Integer Ring
sage: Id.matrix(e)
[1 0 0]
[0 1 0]
[0 0 1]
The default LaTeX symbol::
sage: latex(Id)
\mathrm{Id}
It can be changed by means of the method
:meth:`~sage.tensor.modules.free_module_tensor.FreeModuleTensor.set_name`::
sage: Id.set_name(latex_name=r'\mathrm{1}_M')
sage: latex(Id)
\mathrm{1}_M
The identity map is actually the identity element of GL(M)::
sage: Id is M.general_linear_group().one()
True
It is also a tensor of type-`(1,1)` on M::
sage: Id.tensor_type()
(1, 1)
sage: Id.comp(e)
Kronecker delta of size 3x3
sage: Id[:]
[1 0 0]
[0 1 0]
[0 0 1]
Example with a LaTeX symbol different from the default one and set
at the creation of the object::
sage: N = FiniteRankFreeModule(ZZ, 3, name='N')
sage: f = N.basis('f')
sage: Id = N.identity_map(name='Id_N', latex_name=r'\mathrm{Id}_N')
sage: Id
Identity map of the Rank-3 free module N over the Integer Ring
sage: latex(Id)
\mathrm{Id}_N
"""
if self._identity_map is None:
self._identity_map = self.general_linear_group().one()
if name != 'Id':
if latex_name is None:
latex_name = name
self._identity_map.set_name(name=name, latex_name=latex_name)
return self._identity_map
| 37.132657 | 110 | 0.576763 |
ace697af66ec3524a798d65803a9cb4106335136 | 998 | py | Python | feder/institutions/migrations/0016_auto_20180325_2244.py | dzemeuksis/feder | 32ef7793af6256d4ecada61505c7baf334b34419 | [
"MIT"
] | 16 | 2015-08-11T17:20:26.000Z | 2022-02-11T20:15:41.000Z | feder/institutions/migrations/0016_auto_20180325_2244.py | dzemeuksis/feder | 32ef7793af6256d4ecada61505c7baf334b34419 | [
"MIT"
] | 534 | 2015-08-04T00:10:54.000Z | 2022-03-17T10:44:47.000Z | feder/institutions/migrations/0016_auto_20180325_2244.py | dzemeuksis/feder | 32ef7793af6256d4ecada61505c7baf334b34419 | [
"MIT"
] | 10 | 2017-08-30T13:34:32.000Z | 2022-02-18T13:00:35.000Z | # Generated by Django 2.0.3 on 2018-03-25 22:44
import autoslug.fields
from django.db import migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [("institutions", "0015_auto_20170830_1408")]
operations = [
migrations.AlterField(
model_name="institution",
name="extra",
field=jsonfield.fields.JSONField(
blank=True, verbose_name="Unorganized additional information"
),
),
migrations.AlterField(
model_name="institution",
name="slug",
field=autoslug.fields.AutoSlugField(
editable=False, populate_from="name", unique=True, verbose_name="Slug"
),
),
migrations.AlterField(
model_name="tag",
name="slug",
field=autoslug.fields.AutoSlugField(
editable=False, populate_from="name", verbose_name="Slug"
),
),
]
| 28.514286 | 86 | 0.582164 |
ace6997e184fe38054b0affeed1b563309d0b532 | 5,930 | py | Python | exercises/classifiers_evaluation.py | Sefi4/IML.HUJI | c93111cce6568d84560ba88a2d2e1a6d34035746 | [
"MIT"
] | null | null | null | exercises/classifiers_evaluation.py | Sefi4/IML.HUJI | c93111cce6568d84560ba88a2d2e1a6d34035746 | [
"MIT"
] | null | null | null | exercises/classifiers_evaluation.py | Sefi4/IML.HUJI | c93111cce6568d84560ba88a2d2e1a6d34035746 | [
"MIT"
] | null | null | null | from IMLearn.learners.classifiers import Perceptron, LDA, GaussianNaiveBayes
import numpy as np
from typing import Tuple
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from math import atan2, pi
def load_dataset(filename: str) -> Tuple[np.ndarray, np.ndarray]:
"""
Load dataset for comparing the Gaussian Naive Bayes and LDA classifiers. File is assumed to be an
ndarray of shape (n_samples, 3) where the first 2 columns represent features and the third column the class
Parameters
----------
filename: str
Path to .npy data file
Returns
-------
X: ndarray of shape (n_samples, 2)
Design matrix to be used
y: ndarray of shape (n_samples,)
Class vector specifying for each sample its class
"""
data = np.load(filename)
return data[:, :2], data[:, 2].astype(int)
def run_perceptron():
"""
Fit and plot fit progression of the Perceptron algorithm over both the linearly separable and inseparable datasets
Create a line plot that shows the perceptron algorithm's training loss values (y-axis)
as a function of the training iterations (x-axis).
"""
for n, f in [("Linearly Separable", "linearly_separable.npy"),
("Linearly Inseparable", "linearly_inseparable.npy")]:
# Load dataset
X, Y = load_dataset(f'C:\Projects\IML\IML.HUJI\datasets\{f}')
# raise NotImplementedError()
# Fit Perceptron and record loss in each fit iteration
losses = []
p = Perceptron(callback=lambda p, _, __: losses.append(p.loss(X, Y)))
p.fit(X, Y)
# raise NotImplementedError()
# Plot figure
fig = go.Figure()
x_axis = list(range(1, len(losses)))
fig.add_scatter(x=x_axis, y=losses)
fig.update_layout(
title=f'Preceptron losss over {n} data as a function '
f'of training iterations', xaxis_title='iterations',
yaxis_title='loss')
fig.show()
# raise NotImplementedError()
def get_ellipse(mu: np.ndarray, cov: np.ndarray):
"""
Draw an ellipse centered at given location and according to specified covariance matrix
Parameters
----------
mu : ndarray of shape (2,)
Center of ellipse
cov: ndarray of shape (2,2)
Covariance of Gaussian
Returns
-------
scatter: A plotly trace object of the ellipse
"""
l1, l2 = tuple(np.linalg.eigvalsh(cov)[::-1])
theta = atan2(l1 - cov[0, 0], cov[0, 1]) if cov[0, 1] != 0 else (
np.pi / 2 if cov[0, 0] < cov[1, 1] else 0)
t = np.linspace(0, 2 * pi, 100)
xs = (l1 * np.cos(theta) * np.cos(t)) - (l2 * np.sin(theta) * np.sin(t))
ys = (l1 * np.sin(theta) * np.cos(t)) + (l2 * np.cos(theta) * np.sin(t))
return go.Scatter(x=mu[0] + xs, y=mu[1] + ys, mode="lines",
marker_color="black")
def compare_gaussian_classifiers():
"""
Fit both Gaussian Naive Bayes and LDA classifiers on both gaussians1 and gaussians2 datasets
"""
for f in ["gaussian1.npy", "gaussian2.npy"]:
# Load dataset
X, y = load_dataset(f'C:\Projects\IML\IML.HUJI\datasets\{f}')
# raise NotImplementedError()
# Fit models and predict over training set
lda = LDA()
lda.fit(X, y)
lda_pred = lda.predict(X)
g = GaussianNaiveBayes()
g.fit(X, y)
g_pred = g.predict(X)
# raise NotImplementedError()
# Plot a figure with two suplots, showing the Gaussian Naive Bayes predictions on the left and LDA predictions
# on the right. Plot title should specify dataset used and subplot titles should specify algorithm and accuracy
from IMLearn.metrics import accuracy
fig = make_subplots(rows=1, cols=2,
subplot_titles=(f'LDA, accuracy: '
f'{accuracy(y, lda_pred)}',
f'Gaussian Naive Bayes, accuracy'
f' {accuracy(y, g_pred)}'))
fig.add_trace(
go.Scatter(
x=X[:, 0], y=X[:, 1], mode="markers",
showlegend=False,
marker=dict(color=lda_pred, symbol=y,
line=dict(color="black", width=1))),
row=1, col=1)
fig.add_trace(
go.Scatter(
x=X[:, 0], y=X[:, 1], mode="markers",
showlegend=False,
marker=dict(color=g_pred, symbol=y,
line=dict(color="black", width=1))),
row=1, col=2)
# Add ellipses for each class and classifier
for k in range(len(lda.classes_)):
fig.add_trace(get_ellipse(lda.mu_[k], lda.cov_), row=1, col=1)
fig.add_trace(get_ellipse(g.mu_[k], np.diag(g.vars_[k])), row=1,
col=2)
# Mark with X ellipse center
fig.add_trace(
go.Scatter(x=lda.mu_[:, 0], y=lda.mu_[:, 1], mode="markers",
marker_size=10,
showlegend=False,
marker=dict(color='black', symbol='x')),
row=1, col=1)
fig.add_trace(
go.Scatter(x=g.mu_[:, 0], y=g.mu_[:, 1], mode="markers",
marker_size=10,
showlegend=False,
marker=dict(color='black', symbol='x')),
row=1, col=2)
fig.update_layout(title_text=f, xaxis_title='Feature 1',
yaxis_title='Feature 2')
fig.show()
# raise NotImplementedError()
if __name__ == '__main__':
np.random.seed(0)
run_perceptron()
compare_gaussian_classifiers()
# coefs = np.array([1, 2, 3])
# xi = np.array([1, 1, 1])
# print(coefs @ np.transpose(xi))
| 34.882353 | 119 | 0.563744 |
ace69a2ca4a989f2d4adcb9b20fbe696b38bf81f | 1,917 | py | Python | examples/xgboost_examples/regression.py | PaulGureghian1/Hyperparameter_Hunter | ab06844b72f8424d82cf7b26493d4cabf0d2eee8 | [
"MIT"
] | 1 | 2019-01-26T00:18:58.000Z | 2019-01-26T00:18:58.000Z | examples/xgboost_examples/regression.py | PaulGureghian1/Hyperparameter_Hunter | ab06844b72f8424d82cf7b26493d4cabf0d2eee8 | [
"MIT"
] | null | null | null | examples/xgboost_examples/regression.py | PaulGureghian1/Hyperparameter_Hunter | ab06844b72f8424d82cf7b26493d4cabf0d2eee8 | [
"MIT"
] | null | null | null | from hyperparameter_hunter import Environment, CVExperiment
from hyperparameter_hunter import GBRT, Real, Integer, Categorical
import pandas as pd
from sklearn.datasets import load_diabetes
from xgboost import XGBRegressor
#################### Format DataFrame ####################
data = load_diabetes()
train_df = pd.DataFrame(data=data.data, columns=data.feature_names)
train_df["progression"] = data.target
#################### Set Up Environment ####################
env = Environment(
train_dataset=train_df,
root_results_path="HyperparameterHunterAssets",
target_column="progression",
metrics_map=["mean_absolute_error"],
cross_validation_type="KFold",
cross_validation_params=dict(n_splits=12, shuffle=True, random_state=32),
runs=2,
)
# Now that HyperparameterHunter has an active `Environment`, we can do two things:
#################### 1. Perform Experiments ####################
experiment = CVExperiment(
model_initializer=XGBRegressor,
model_init_params=dict(max_depth=4, n_estimators=400, subsample=0.5),
model_extra_params=dict(fit=dict(eval_metric="mae")),
)
# And/or...
#################### 2. Hyperparameter Optimization ####################
optimizer = GBRT(iterations=20, random_state=32)
optimizer.set_experiment_guidelines(
model_initializer=XGBRegressor,
model_init_params=dict(
max_depth=Integer(2, 20),
n_estimators=Integer(100, 900),
learning_rate=Real(0.0001, 0.5),
subsample=0.5,
booster=Categorical(["gbtree", "gblinear"]),
),
model_extra_params=dict(fit=dict(eval_metric=Categorical(["rmse", "mae"]))),
)
optimizer.go()
# Notice, `optimizer` recognizes our earlier `experiment`'s hyperparameters fit inside the search
# space/guidelines set for `optimizer`.
# Then, when optimization is started, it automatically learns from `experiment`'s results
# - without any extra work for us!
| 36.865385 | 97 | 0.691706 |
ace69b023484283f1563c94466dbfbcb979a4898 | 8,860 | py | Python | pytorch_lightning/callbacks/gpu_stats_monitor.py | alanhdu/pytorch-lightning | 0e45220263f4e2045dfe7f68e3e0eaac0b2033d5 | [
"Apache-2.0"
] | 3 | 2021-04-09T14:03:03.000Z | 2021-04-10T02:58:23.000Z | pytorch_lightning/callbacks/gpu_stats_monitor.py | ethanwharris/pytorch-lightning | b7a22ba046ba57072a71b12d16caff000e66f798 | [
"Apache-2.0"
] | 1 | 2021-03-01T17:32:12.000Z | 2021-03-01T17:32:12.000Z | pytorch_lightning/callbacks/gpu_stats_monitor.py | ethanwharris/pytorch-lightning | b7a22ba046ba57072a71b12d16caff000e66f798 | [
"Apache-2.0"
] | 1 | 2021-02-16T00:47:46.000Z | 2021-02-16T00:47:46.000Z | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
GPU Stats Monitor
=================
Monitor and logs GPU stats during training.
"""
import os
import shutil
import subprocess
import time
from typing import Dict, List, Tuple
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.utilities import DeviceType, rank_zero_only
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.parsing import AttributeDict
class GPUStatsMonitor(Callback):
r"""
Automatically monitors and logs GPU stats during training stage. ``GPUStatsMonitor``
is a callback and in order to use it you need to assign a logger in the ``Trainer``.
Args:
memory_utilization: Set to ``True`` to monitor used, free and percentage of memory
utilization at the start and end of each step. Default: ``True``.
gpu_utilization: Set to ``True`` to monitor percentage of GPU utilization
at the start and end of each step. Default: ``True``.
intra_step_time: Set to ``True`` to monitor the time of each step. Default: ``False``.
inter_step_time: Set to ``True`` to monitor the time between the end of one step
and the start of the next step. Default: ``False``.
fan_speed: Set to ``True`` to monitor percentage of fan speed. Default: ``False``.
temperature: Set to ``True`` to monitor the memory and gpu temperature in degree Celsius.
Default: ``False``.
Raises:
MisconfigurationException:
If NVIDIA driver is not installed, not running on GPUs, or ``Trainer`` has no logger.
Example::
>>> from pytorch_lightning import Trainer
>>> from pytorch_lightning.callbacks import GPUStatsMonitor
>>> gpu_stats = GPUStatsMonitor() # doctest: +SKIP
>>> trainer = Trainer(callbacks=[gpu_stats]) # doctest: +SKIP
GPU stats are mainly based on `nvidia-smi --query-gpu` command. The description of the queries is as follows:
- **fan.speed** – The fan speed value is the percent of maximum speed that the device's fan is currently
intended to run at. It ranges from 0 to 100 %. Note: The reported speed is the intended fan speed.
If the fan is physically blocked and unable to spin, this output will not match the actual fan speed.
Many parts do not report fan speeds because they rely on cooling via fans in the surrounding enclosure.
- **memory.used** – Total memory allocated by active contexts.
- **memory.free** – Total free memory.
- **utilization.gpu** – Percent of time over the past sample period during which one or more kernels was
executing on the GPU. The sample period may be between 1 second and 1/6 second depending on the product.
- **utilization.memory** – Percent of time over the past sample period during which global (device) memory was
being read or written. The sample period may be between 1 second and 1/6 second depending on the product.
- **temperature.gpu** – Core GPU temperature, in degrees C.
- **temperature.memory** – HBM memory temperature, in degrees C.
"""
def __init__(
self,
memory_utilization: bool = True,
gpu_utilization: bool = True,
intra_step_time: bool = False,
inter_step_time: bool = False,
fan_speed: bool = False,
temperature: bool = False
):
super().__init__()
if shutil.which('nvidia-smi') is None:
raise MisconfigurationException(
'Cannot use GPUStatsMonitor callback because NVIDIA driver is not installed.'
)
self._log_stats = AttributeDict({
'memory_utilization': memory_utilization,
'gpu_utilization': gpu_utilization,
'intra_step_time': intra_step_time,
'inter_step_time': inter_step_time,
'fan_speed': fan_speed,
'temperature': temperature
})
def on_train_start(self, trainer, *args, **kwargs):
if not trainer.logger:
raise MisconfigurationException('Cannot use GPUStatsMonitor callback with Trainer that has no logger.')
if trainer._device_type != DeviceType.GPU:
raise MisconfigurationException(
'You are using GPUStatsMonitor but are not running on GPU'
f' since gpus attribute in Trainer is set to {trainer.gpus}.'
)
self._gpu_ids = ','.join(map(str, trainer.data_parallel_device_ids))
def on_train_epoch_start(self, *args, **kwargs):
self._snap_intra_step_time = None
self._snap_inter_step_time = None
@rank_zero_only
def on_train_batch_start(self, trainer, *args, **kwargs):
if self._log_stats.intra_step_time:
self._snap_intra_step_time = time.time()
if not self._should_log(trainer):
return
gpu_stat_keys = self._get_gpu_stat_keys()
gpu_stats = self._get_gpu_stats([k for k, _ in gpu_stat_keys])
logs = self._parse_gpu_stats(self._gpu_ids, gpu_stats, gpu_stat_keys)
if self._log_stats.inter_step_time and self._snap_inter_step_time:
# First log at beginning of second step
logs['batch_time/inter_step (ms)'] = (time.time() - self._snap_inter_step_time) * 1000
trainer.logger.log_metrics(logs, step=trainer.global_step)
@rank_zero_only
def on_train_batch_end(self, trainer, *args, **kwargs):
if self._log_stats.inter_step_time:
self._snap_inter_step_time = time.time()
if not self._should_log(trainer):
return
gpu_stat_keys = self._get_gpu_stat_keys() + self._get_gpu_device_stat_keys()
gpu_stats = self._get_gpu_stats([k for k, _ in gpu_stat_keys])
logs = self._parse_gpu_stats(self._gpu_ids, gpu_stats, gpu_stat_keys)
if self._log_stats.intra_step_time and self._snap_intra_step_time:
logs['batch_time/intra_step (ms)'] = (time.time() - self._snap_intra_step_time) * 1000
trainer.logger.log_metrics(logs, step=trainer.global_step)
def _get_gpu_stats(self, queries: List[str]) -> List[List[float]]:
"""Run nvidia-smi to get the gpu stats"""
gpu_query = ','.join(queries)
format = 'csv,nounits,noheader'
result = subprocess.run(
[shutil.which('nvidia-smi'), f'--query-gpu={gpu_query}', f'--format={format}', f'--id={self._gpu_ids}'],
encoding="utf-8",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, # for backward compatibility with python version 3.6
check=True
)
def _to_float(x: str) -> float:
try:
return float(x)
except ValueError:
return 0.
stats = result.stdout.strip().split(os.linesep)
stats = [[_to_float(x) for x in s.split(', ')] for s in stats]
return stats
@staticmethod
def _parse_gpu_stats(gpu_ids: str, stats: List[List[float]], keys: List[Tuple[str, str]]) -> Dict[str, float]:
"""Parse the gpu stats into a loggable dict"""
logs = {}
for i, gpu_id in enumerate(gpu_ids.split(',')):
for j, (x, unit) in enumerate(keys):
logs[f'gpu_id: {gpu_id}/{x} ({unit})'] = stats[i][j]
return logs
def _get_gpu_stat_keys(self) -> List[Tuple[str, str]]:
"""Get the GPU stats keys"""
stat_keys = []
if self._log_stats.gpu_utilization:
stat_keys.append(('utilization.gpu', '%'))
if self._log_stats.memory_utilization:
stat_keys.extend([('memory.used', 'MB'), ('memory.free', 'MB'), ('utilization.memory', '%')])
return stat_keys
def _get_gpu_device_stat_keys(self) -> List[Tuple[str, str]]:
"""Get the device stats keys"""
stat_keys = []
if self._log_stats.fan_speed:
stat_keys.append(('fan.speed', '%'))
if self._log_stats.temperature:
stat_keys.extend([('temperature.gpu', '°C'), ('temperature.memory', '°C')])
return stat_keys
@staticmethod
def _should_log(trainer) -> bool:
should_log = ((trainer.global_step + 1) % trainer.log_every_n_steps == 0 or trainer.should_stop)
return should_log
| 41.209302 | 116 | 0.656433 |
ace69b7f24e1a433c8a266859750a123efe3bd78 | 5,523 | py | Python | matrx/logger/logger.py | thaije/matrx | a50441f4ddc84b13395bc2013dba1a7edd617d98 | [
"MIT"
] | 1 | 2020-01-31T17:16:08.000Z | 2020-01-31T17:16:08.000Z | matrx/logger/logger.py | thaije/matrx | a50441f4ddc84b13395bc2013dba1a7edd617d98 | [
"MIT"
] | 88 | 2019-12-12T14:06:39.000Z | 2020-02-27T13:42:34.000Z | matrx/logger/logger.py | thaije/matrx | a50441f4ddc84b13395bc2013dba1a7edd617d98 | [
"MIT"
] | null | null | null | import csv
import datetime
import os
class GridWorldLogger:
LOG_ON_LAST_TICK = "log_last_tick"
LOG_ON_FIRST_TICK = "log_first_tick"
LOG_ON_GOAL_REACHED = "log_on_reached_goal"
def __init__(self, log_strategy=1, save_path="/logs", file_name="", file_extension=".csv", delimiter=";"):
self.__log_strategy = log_strategy
self.__save_path = save_path
self.__file_name_prefix = file_name
self.__file_extension = file_extension
self.__delimiter = delimiter
# Create the file name
current_time = datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S")
self.__file_name = f"{file_name}_{current_time}{file_extension}"
self.__last_logged_tick = -1 # to track when we logged last
self.__columns = [] # place holder for the columns in our data file
self.__prev_goal_status = {} # to track if a goal was accomplished since last call
def log(self, grid_world, agent_data):
return {}
def _grid_world_log(self, grid_world, agent_data, last_tick=False, goal_status=None):
if not self._needs_to_log(grid_world, last_tick, goal_status):
return
data = self.log(grid_world, agent_data)
if data is None or data == {}:
return
self.__check_data(data)
self.__write_data(data, grid_world.current_nr_ticks)
def _needs_to_log(self, grid_world, last_tick, goal_status):
current_tick = grid_world.current_nr_ticks
# If the strategy is a tick frequency, check if enough ticks have passed
if isinstance(self.__log_strategy, int):
# the current nr ticks minus the tick we last logged should be smaller or equal to our frequency
to_log = (current_tick - self.__last_logged_tick) >= self.__log_strategy
if to_log:
self.__last_logged_tick = current_tick
# if the strategy is to log at the first tick, we do so if the current tick is zero
elif self.__log_strategy == self.LOG_ON_FIRST_TICK:
to_log = current_tick == 0
# if the strategy is to log whenever one of the goals was reached
elif self.__log_strategy == self.LOG_ON_GOAL_REACHED:
to_log = False
# we loop over all goals and see it's status became True whereas it was the previous time False
for goal, status in goal_status.items():
if goal in self.__prev_goal_status.keys():
if status and not self.__prev_goal_status[goal]:
to_log = True
break
self.__prev_goal_status = goal_status.copy()
# If we log on the last tick, only if the GridWorld says it is done
elif self.__log_strategy == self.LOG_ON_LAST_TICK:
to_log = last_tick
# If the strategy is not found, we return an exception
else:
raise Exception(f"The log strategy {self.__log_strategy} is not recognized. Should be an integer or one of"
f"the GridWorld.ON_LOG_<...> values.")
return to_log
def _set_world_nr(self, world_nr):
# Set the world number
self.__world_nr = world_nr
# Create the total file name path based on the world number (set by the WorldBuilder on logger creation)
self.__save_path = f"{self.__save_path}{os.sep}world_{world_nr}"
# Create the directory if not given
if not os.path.exists(self.__save_path):
os.makedirs(self.__save_path)
self.__file_name = f"{self.__save_path}{os.sep}{self.__file_name}"
def __check_data(self, data):
if isinstance(data, dict):
# Check if the data contains new columns, if so raise an exception that we cannot add columns on the fly
if len(self.__columns) > 0:
new_columns = set(data.keys()) - set(self.__columns)
if len(new_columns) > 0:
raise Exception(f"Cannot append columns to the log file when we already logged with different "
f"columns. THe following columns are new; {list(new_columns)}")
return True
else:
raise Exception(f"The data in this {self.__class__} should be a dictionary.")
def __write_data(self, data, tick_nr):
# We always include the world number and the tick number
if "world_nr" not in data.keys():
data["world_nr"] = self.__world_nr
if "tick_nr" not in data.keys():
data["tick_nr"] = tick_nr
# Check if we have columns to write to, this will be the order in which we write them
if len(self.__columns) == 0:
# Then we set the keys as column names
self.__columns = list(data.keys())
# Write the data to the file, create it when it does not exist and in that case write the columns as well
if not os.path.isfile(self.__file_name):
mode = "w+"
write_columns = True
else:
mode = "a"
write_columns = False
with open(self.__file_name, mode=mode, newline='') as data_file:
csv_writer = csv.DictWriter(data_file, delimiter=self.__delimiter, quotechar='"',
quoting=csv.QUOTE_MINIMAL, fieldnames=self.__columns)
# Write columns if we need to
if write_columns:
csv_writer.writeheader()
csv_writer.writerow(data)
| 41.216418 | 119 | 0.627014 |
ace69dda9e8d8eaebde480cb8e2ee470d4e7d53a | 6,650 | py | Python | tests/test_seq2seq.py | lucienwang1009/tensorflow-onnx | aa197443579bba6fdf8990f5005d955ab515a39a | [
"MIT"
] | 1 | 2020-04-03T07:57:20.000Z | 2020-04-03T07:57:20.000Z | keras2onnx/ktf2onnx/tests/test_seq2seq.py | souptc/keras-onnx | c08d52bf4d4ec2bba69ec4ffd2ea14f47fecb1f5 | [
"MIT"
] | null | null | null | keras2onnx/ktf2onnx/tests/test_seq2seq.py | souptc/keras-onnx | c08d52bf4d4ec2bba69ec4ffd2ea14f47fecb1f5 | [
"MIT"
] | 1 | 2020-04-03T07:56:47.000Z | 2020-04-03T07:56:47.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
""" Unit Tests for tf.contrib.seq2seq """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib import rnn
from tensorflow.python.ops import init_ops
from backend_test_base import Tf2OnnxBackendTestBase
from common import unittest_main
# pylint: disable=missing-docstring
class Seq2SeqTests(Tf2OnnxBackendTestBase):
def test_dynamic_decode_maximum_iterations(self):
batch_size = 2
num_units = 4
vocab_size = 5
embedding_size = 3
go_token = 0
end_token = 1
embedding = tf.constant(np.ones([vocab_size, embedding_size], dtype=np.float32))
state_val = np.reshape([np.ones([num_units], dtype=np.float32) * i for i in range(batch_size)],
[batch_size, num_units])
encoder_state = tf.nn.rnn_cell.LSTMStateTuple(state_val, state_val)
initializer = init_ops.constant_initializer(0.5)
cell = rnn.LSTMCell(
num_units=num_units,
initializer=initializer,
state_is_tuple=True)
helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
embedding=embedding,
start_tokens=tf.tile([go_token], [batch_size]),
end_token=end_token)
output_layer = tf.layers.Dense(vocab_size, kernel_initializer=initializer)
decoder = tf.contrib.seq2seq.BasicDecoder(
cell=cell,
helper=helper,
initial_state=encoder_state,
output_layer=output_layer)
outputs, state, sequence_lengths = tf.contrib.seq2seq.dynamic_decode(
decoder=decoder,
maximum_iterations=6)
_ = tf.identity(outputs.rnn_output, name="rnn_output")
_ = tf.identity(outputs.sample_id, name="sample_id")
_ = tf.identity(state, name="state")
_ = tf.identity(sequence_lengths, name="sequence_lengths")
output_names_with_port = [
"rnn_output:0",
# "sample_id:0", # incomplete type support for Transpose on onnxruntime 0.2.1
"state:0",
]
self.run_test_case({}, [], output_names_with_port, atol=1e-06, rtol=1e-6)
def test_dynamic_decode_normal_stop(self):
batch_size = 2
num_units = 4
vocab_size = 5
embedding_size = 3
go_token = 0
end_token = 1
embedding = tf.constant(np.ones([vocab_size, embedding_size], dtype=np.float32))
state_val = np.reshape([np.ones([num_units], dtype=np.float32) * i for i in range(batch_size)],
[batch_size, num_units])
encoder_state = tf.nn.rnn_cell.LSTMStateTuple(state_val, state_val)
cell_initializer = init_ops.constant_initializer(
np.array([[-0.9592235, 0.42451382, 0.7437744, -0.54485345, -0.80763197,
0.19663906, -0.22738314, 0.7762785, 0.7464578, 0.27227187,
0.7661047, 0.3596425, -0.8528242, -0.89316916, -0.48946142,
0.87882376],
[0.86586094, -0.75018406, 0.25992537, -0.69368935, 0.2515502,
-0.26379275, 0.8954313, 0.5759742, -0.7753072, -0.4388857,
0.95751476, -0.82085776, -0.9467752, -0.37055635, -0.18570113,
-0.86504984],
[0.02305841, 0.3850248, 0.893692, -0.6866486, -0.83703446,
-0.9828961, 0.3989377, -0.59993076, 0.5330808, 0.6916566,
0.98468065, -0.6047034, 0.10823512, 0.34599304, -0.7834821,
-0.7852347],
[0.81643987, 0.31507468, -0.51369476, -0.12273741, 0.9701307,
-0.79669356, -0.34496522, -0.88750815, -0.17995334, 0.34707904,
-0.09201193, 0.5363934, -0.87229705, -0.5073328, -0.95894027,
0.5481839],
[-0.84093595, -0.2341497, -0.86047816, 0.43370056, -0.39073753,
0.37730122, 0.48026466, 0.3004985, -0.60727096, 0.9043884,
-0.37619448, 0.22490788, -0.03739262, 0.61672115, 0.478899,
-0.40780973],
[0.31202435, -0.22045255, -0.6087918, 0.95115066, 0.00199413,
-0.688287, -0.1103518, 0.4169519, 0.7913246, -0.9844644,
-0.6193857, 0.38659644, -0.4726901, -0.44781208, -0.5174744,
-0.605911],
[0.66771054, 0.34912825, 0.22297978, -0.4990945, 0.24057317,
-0.5540829, 0.92277217, 0.74939895, -0.35278273, -0.21587133,
-0.28613377, -0.8794241, -0.40119147, 0.67175174, -0.22741508,
0.37898326]], dtype=np.float32))
dense_initializer = init_ops.constant_initializer(
np.array([[0.56177187, -0.6233454, 0.73997784, 0.35032558, 0.6479795],
[0.6831174, -0.34233975, 0.39330363, 0.45177555, -0.49649096],
[-0.98890066, 0.6175642, 0.09800482, -0.6721206, 0.48805737],
[0.19671416, 0.2623148, 0.742548, 0.13555217, 0.56009054]], dtype=np.float32))
cell = rnn.LSTMCell(
num_units=num_units,
initializer=cell_initializer,
state_is_tuple=True)
helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
embedding=embedding,
start_tokens=tf.tile([go_token], [batch_size]),
end_token=end_token)
output_layer = tf.layers.Dense(vocab_size, kernel_initializer=dense_initializer)
decoder = tf.contrib.seq2seq.BasicDecoder(
cell=cell,
helper=helper,
initial_state=encoder_state,
output_layer=output_layer)
outputs, state, sequence_lengths = tf.contrib.seq2seq.dynamic_decode(
decoder=decoder,
maximum_iterations=6)
_ = tf.identity(outputs.rnn_output, name="rnn_output")
_ = tf.identity(outputs.sample_id, name="sample_id")
_ = tf.identity(state, name="state")
_ = tf.identity(sequence_lengths, name="sequence_lengths")
output_names_with_port = [
"rnn_output:0",
# "sample_id:0", # incomplete type support for Transpose on onnxruntime 0.2.1
"state:0",
]
self.run_test_case({}, [], output_names_with_port, atol=1e-06, rtol=1e-6)
if __name__ == '__main__':
unittest_main()
| 43.464052 | 103 | 0.591128 |
ace69e1b46caa4979f5765d9ceed28ef1fdeaab2 | 220 | py | Python | utils/init.py | alanhuang122/skyless-utils | 773f23c37a95d97a3346948997b19eccbed8cc24 | [
"MIT"
] | 2 | 2018-09-12T19:47:19.000Z | 2019-11-07T07:46:10.000Z | utils/init.py | alanhuang122/skyless-utils | 773f23c37a95d97a3346948997b19eccbed8cc24 | [
"MIT"
] | 11 | 2018-08-29T01:49:29.000Z | 2019-07-08T18:54:16.000Z | utils/init.py | alanhuang122/skyless-utils | 773f23c37a95d97a3346948997b19eccbed8cc24 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import json
data = {}
print('loading...')
with open('skyless.dat') as f:
for line in f:
temp = json.loads(line)
data[temp['key']] = temp['value']
import skyless
skyless.data=data
| 15.714286 | 41 | 0.609091 |
ace69e2e1f2f1403a13d87fe993d56cb9495cfd2 | 6,278 | py | Python | src/testers/unittests/test_synthesizer.py | o2e/Triton | 0753a0c097fe637beb25b428ff2f0983f14f96d9 | [
"Apache-2.0"
] | null | null | null | src/testers/unittests/test_synthesizer.py | o2e/Triton | 0753a0c097fe637beb25b428ff2f0983f14f96d9 | [
"Apache-2.0"
] | null | null | null | src/testers/unittests/test_synthesizer.py | o2e/Triton | 0753a0c097fe637beb25b428ff2f0983f14f96d9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# coding: utf-8
"""Test synthesizing."""
import unittest
import random
from triton import *
class TestSynth_1(unittest.TestCase):
def setUp(self):
self.ctx = TritonContext(ARCH.X86_64)
self.ast = self.ctx.getAstContext()
self.ctx.setAstRepresentationMode(AST_REPRESENTATION.PYTHON)
c = self.ast.variable(self.ctx.newSymbolicVariable(8, 'c'))
x = self.ast.variable(self.ctx.newSymbolicVariable(8, 'x'))
y = self.ast.variable(self.ctx.newSymbolicVariable(8, 'y'))
z = self.ast.variable(self.ctx.newSymbolicVariable(32, 'z'))
# Some obfuscated expressions
self.obf_exprs = [
('(~(x) & 0xff)', (((0xff - x) & 0xff) + (1 * (1 - 1)))),
('((x + 0x1) & 0xff)', -~x & 0xff),
('((x + y) & 0xff)', (x | y) + y - (~x & y)), # from http://archive.bar/pdfs/bar2020-preprint9.pdf
('(x ^ y)', (x | y) - y + (~x & y)), # from http://archive.bar/pdfs/bar2020-preprint9.pdf
('(x ^ y)', (x & ~y) | (~x & y)), # from ?
('(x | y)', (x ^ y) + y - (~x & y)), # from http://archive.bar/pdfs/bar2020-preprint9.pdf
('(y & x)', -(x | y) + y + x), # from http://archive.bar/pdfs/bar2020-preprint9.pdf
('(z & 0xffff00)', ((z << 8) >> 16) << 8), # from https://blog.regehr.org/archives/1636
('((x + y) & 0xff)', (((x ^ y) + 2 * (x & y)) * 39 + 23) * 151 + 111), # from Ninon Eyrolle's thesis
('(x ^ 0x5c)', self.x_xor_92_obfuscated(x)), # from imassage
('((0x2 * (c ^ 0x1)) & 0xff)', self.opaque_constant(x, y, c)), # from ?
('(((bswap(z, 32) ^ 0x23746fbe) + 0xfffffffd) & 0xffffffff)', self.bswap32_xor_const(z)), # from UnityPlayer.dll
]
def bswap32_xor_const(self, x):
a = ((~((((((x & 0xff)) << 8 | ((x >> 8) & 0xff)) << 8 | ((x >> 16) & 0xff)) << 8 | ((x >> 24) & 0xff)))) & 0xd7848ce1)
b = ((((((x & 0xff)) << 8 | ((x >> 8) & 0xff)) << 8 | ((x >> 16) & 0xff)) << 8 | ((x >> 24) & 0xff)) & 0x287b731e)
return ((( a | b ) ^ 0xf4f0e35f) + 0xfffffffd)
def x_xor_92_obfuscated(self, x):
a = 229 * x + 247
b = 237 * a + 214 + ((38 * a + 85) & 254)
c = (b + ((-(2 * b) + 255) & 254)) * 3 + 77
d = ((86 * c + 36) & 70) * 75 + 231 * c + 118
e = ((58 * d + 175) & 244) + 99 * d + 46
f = (e & 148)
g = (f - (e & 255) + f) * 103 + 13
R = (237 * (45 * g + (174 * g | 34) * 229 + 194 - 247) & 255)
return R
def opaque_constant(self, a, b, c):
op1 = (2 * (b & ~a) + ( -1 * (~ a | b) + ( -1 * ~( a & b) + (2 * ~( a | b) + 1 * a )))) # 0 (opaque constant)
op2 = (a | b) - (a + b) + (a & b) # 0 (opaque constant)
n = op1 + (1 << op2) # 0 + 1
n = (op1 + 2) * (c ^ n) # (0 + 2) * (c ^ 1)
return n
def test_1(self):
for org, obfu in self.obf_exprs:
self.assertEqual(str(self.ctx.synthesize(obfu, constant=True, subexpr=True, opaque=True)), org)
class TestSynth_2(unittest.TestCase):
def setUp(self):
self.CODE = b"\x55\x48\x89\xE5\x89\x7D\xEC\x89\x75\xE8\x8B\x45\xE8\x23\x45\xEC"
self.CODE += b"\x89\xC2\x8B\x45\xE8\x0B\x45\xEC\x89\xD1\x0F\xAF\xC8\x8B\x45\xEC"
self.CODE += b"\xF7\xD0\x23\x45\xE8\x89\xC2\x8B\x45\xE8\xF7\xD0\x23\x45\xEC\x0F"
self.CODE += b"\xAF\xC2\x01\xC8\x23\x45\xE8\x89\xC2\x8B\x45\xE8\x23\x45\xEC\x89"
self.CODE += b"\xC1\x8B\x45\xE8\x0B\x45\xEC\x89\xCE\x0F\xAF\xF0\x8B\x45\xEC\xF7"
self.CODE += b"\xD0\x23\x45\xE8\x89\xC1\x8B\x45\xE8\xF7\xD0\x23\x45\xEC\x0F\xAF"
self.CODE += b"\xC1\x01\xF0\x0B\x45\xE8\x89\xD6\x0F\xAF\xF0\x8B\x45\xE8\x23\x45"
self.CODE += b"\xEC\x89\xC2\x8B\x45\xE8\x0B\x45\xEC\x89\xD1\x0F\xAF\xC8\x8B\x45"
self.CODE += b"\xEC\xF7\xD0\x23\x45\xE8\x89\xC2\x8B\x45\xE8\xF7\xD0\x23\x45\xEC"
self.CODE += b"\x0F\xAF\xC2\x8D\x14\x01\x8B\x45\xE8\xF7\xD0\x89\xD1\x21\xC1\x8B"
self.CODE += b"\x45\xE8\x23\x45\xEC\x89\xC2\x8B\x45\xE8\x0B\x45\xEC\x89\xD7\x0F"
self.CODE += b"\xAF\xF8\x8B\x45\xEC\xF7\xD0\x23\x45\xE8\x89\xC2\x8B\x45\xE8\xF7"
self.CODE += b"\xD0\x23\x45\xEC\x0F\xAF\xC2\x01\xF8\xF7\xD0\x23\x45\xE8\x0F\xAF"
self.CODE += b"\xC1\x8D\x14\x06\x8B\x45\xEC\x01\xD0\x83\xC0\x01\x89\x45\xFC\x8B"
self.CODE += b"\x45\xFC\x5D\xC3"
def emulate(self, ctx, pc):
while pc:
opcode = ctx.getConcreteMemoryAreaValue(pc, 16)
instruction = Instruction(pc, opcode)
ctx.processing(instruction)
pc = ctx.getConcreteRegisterValue(ctx.registers.rip)
return
def init(self):
self.ctx = TritonContext(ARCH.X86_64)
self.ctx.setMode(MODE.AST_OPTIMIZATIONS, True)
self.ctx.symbolizeRegister(self.ctx.registers.edi, "a")
self.ctx.symbolizeRegister(self.ctx.registers.esi, "b")
self.ctx.setConcreteMemoryAreaValue(0x1000, self.CODE)
def test_2(self):
self.init()
self.emulate(self.ctx, 0x1000)
eax = self.ctx.getRegisterAst(self.ctx.registers.eax)
ast = self.ctx.getAstContext()
res = str(ast.unroll(self.ctx.synthesize(eax, constant=False, subexpr=True)))
self.assertLessEqual(res, "(bvadd (bvadd a (bvmul (bvmul a b) b)) (_ bv1 32))")
| 58.12963 | 181 | 0.462727 |
ace69e6272eea8855a5ac66387097fadb653cad5 | 3,922 | py | Python | cohesity_management_sdk/models/pvc_info_pvc_spec.py | cohesity/management-sdk-python | 867d8c0c40dd317cdb017902c895527da7ae31c0 | [
"Apache-2.0"
] | 18 | 2019-09-24T17:35:53.000Z | 2022-03-25T08:08:47.000Z | cohesity_management_sdk/models/pvc_info_pvc_spec.py | cohesity/management-sdk-python | 867d8c0c40dd317cdb017902c895527da7ae31c0 | [
"Apache-2.0"
] | 18 | 2019-03-29T19:32:29.000Z | 2022-01-03T23:16:45.000Z | cohesity_management_sdk/models/pvc_info_pvc_spec.py | cohesity/management-sdk-python | 867d8c0c40dd317cdb017902c895527da7ae31c0 | [
"Apache-2.0"
] | 16 | 2019-02-27T06:54:12.000Z | 2021-11-16T18:10:24.000Z | # -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
import cohesity_management_sdk.models.object_reference
import cohesity_management_sdk.models.pvc_info_pvc_spec_resources
import cohesity_management_sdk.models.label_selector
class PVCInfo_PVCSpec(object):
"""Implementation of the 'PVCInfo_PVCSpec' model.
Attributes:
access_modes (list of string): AccessModes contains the desired access
modes the volume should have.
data_source (ObjectReference): This field can be used to specify either:
An existing VolumeSnapshot object
An existing PVC (PersistentVolumeClaim)
An existing custom resource/object that implements
data population.
resources (PVCInfo_PVCSpec_Resources): Resources represents the minimum
resources the volume should have.
selector (LabelSelector): A label query over volumes to consider for
binding.
storage_class_name (string): Name of the StorageClass required by the claim.
volume_mode (string): volumeMode defines what type of volume is required
by the claim.
Value of Filesystem is implied when not included in claim spec.
volume_name (string): Name of the volume that is using this PVC.
"""
# Create a mapping from Model property names to API property names
_names = {
"access_modes": 'accessModes',
"data_source": 'dataSource',
"resources": 'resources',
"selector":'selector',
"storage_class_name":'storageClassName',
"volume_mode":'volumeMode',
"volume_name":'volumeName'
}
def __init__(self,
access_modes=None,
data_source=None,
resources=None,
selector=None,
storage_class_name=None,
volume_mode=None,
volume_name=None
):
"""Constructor for the PVCInfo_PVCSpec class"""
# Initialize members of the class
self.access_modes = access_modes
self.data_source = data_source
self.resources = resources
self.selector = selector
self.storage_class_name = storage_class_name
self.volume_mode = volume_mode
self.volume_name = volume_name
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
access_modes = dictionary.get('accessModes')
data_source = cohesity_management_sdk.models.object_reference.ObjectReference.from_dictionary(dictionary.get('dataSource')) if dictionary.get('dataSource') else None
resources = cohesity_management_sdk.models.pvc_info_pvc_spec_resources.PVCInfo_PVCSpec_Resources.from_dictionary(dictionary.get('resources')) if dictionary.get('resources') else None
selector = cohesity_management_sdk.models.label_selectore.LabelSelector.from_dictionary(dictionary.get('selector')) if dictionary.get('selector') else None
storage_class_name = dictionary.get('storageClassName')
volume_mode = dictionary.get('volumeMode')
volume_name = dictionary.get('volumeName')
# Return an object of this model
return cls(access_modes,
data_source,
resources,
selector,
storage_class_name,
volume_mode,
volume_name)
| 39.22 | 190 | 0.652728 |
ace69e831d87b3b795b9e04f7e139dce1ada34a1 | 402 | py | Python | Day_71/simple_threading_exp2.py | kiranrraj/100Days_Of_Coding | ab75d83be9be87fb7bc83a3f3b72a4638dab22a1 | [
"MIT"
] | null | null | null | Day_71/simple_threading_exp2.py | kiranrraj/100Days_Of_Coding | ab75d83be9be87fb7bc83a3f3b72a4638dab22a1 | [
"MIT"
] | null | null | null | Day_71/simple_threading_exp2.py | kiranrraj/100Days_Of_Coding | ab75d83be9be87fb7bc83a3f3b72a4638dab22a1 | [
"MIT"
] | null | null | null | import threading
import time
threads =[]
start = time.perf_counter()
def do_something():
print("Will sleep for 1 second")
time.sleep(1)
print("Finished..")
for _ in range(10):
t = threading.Thread(target=do_something)
t.start()
threads.append(t)
for thread in threads:
thread.join()
finish = time.perf_counter();
print(f"Total time = {round(finish - start)}second(s)") | 18.272727 | 55 | 0.671642 |
ace69f65ef16c66da288485d5fb845fdd764dfbc | 1,561 | py | Python | pandas_ta/trend/decreasing.py | yssource/pandas-ta | 0f975320684a91db3c04f6ea3dd739177dcb65aa | [
"MIT"
] | null | null | null | pandas_ta/trend/decreasing.py | yssource/pandas-ta | 0f975320684a91db3c04f6ea3dd739177dcb65aa | [
"MIT"
] | null | null | null | pandas_ta/trend/decreasing.py | yssource/pandas-ta | 0f975320684a91db3c04f6ea3dd739177dcb65aa | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from pandas_ta.utils import get_offset, verify_series
def decreasing(close, length=None, asint=True, offset=None, **kwargs):
"""Indicator: Decreasing"""
# Validate Arguments
close = verify_series(close)
length = int(length) if length and length > 0 else 1
offset = get_offset(offset)
# Calculate Result
decreasing = close.diff(length) < 0
if asint:
decreasing = decreasing.astype(int)
# Offset
if offset != 0:
decreasing = decreasing.shift(offset)
# Handle fills
if "fillna" in kwargs:
decreasing.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
decreasing.fillna(method=kwargs["fill_method"], inplace=True)
# Name and Categorize it
decreasing.name = f"DEC_{length}"
decreasing.category = "trend"
return decreasing
decreasing.__doc__ = \
"""Decreasing
Returns True or False if the series is decreasing over a periods. By default,
it returns True and False as 1 and 0 respectively with kwarg 'asint'.
Sources:
Calculation:
decreasing = close.diff(length) < 0
if asint:
decreasing = decreasing.astype(int)
Args:
close (pd.Series): Series of 'close's
length (int): It's period. Default: 1
asint (bool): Returns as binary. Default: True
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.Series: New feature generated.
"""
| 26.016667 | 78 | 0.676489 |
ace6a139ffb46b27e8434f4b86ff0b024b45f704 | 2,392 | py | Python | opsdroid/message.py | sbeesm/opsdroid | 19db07fd568abfd9c9b9f080fd0dea5d919f8f90 | [
"Apache-2.0"
] | null | null | null | opsdroid/message.py | sbeesm/opsdroid | 19db07fd568abfd9c9b9f080fd0dea5d919f8f90 | [
"Apache-2.0"
] | 1 | 2018-03-02T19:41:41.000Z | 2018-03-02T19:46:04.000Z | opsdroid/message.py | TarunRKaushik/opsdroid | 7835e1218d81f7bc6191ede5858e6732706288c4 | [
"Apache-2.0"
] | null | null | null | """Class to encapsulate a message."""
from datetime import datetime
from copy import copy
import asyncio
from random import randrange
from opsdroid.helper import get_opsdroid
class Message:
# pylint: disable=too-few-public-methods
"""A message object."""
def __init__(self, text, user, room, connector, raw_message=None):
"""Create object with minimum properties."""
self.created = datetime.now()
self.text = text
self.user = user
self.room = room
self.connector = connector
self.raw_message = raw_message
self.regex = None
self.responded_to = False
async def _thinking_delay(self):
"""Make opsdroid wait x-seconds before responding."""
seconds = self.connector.configuration.get('thinking-delay', 0)
if isinstance(seconds, list):
seconds = randrange(seconds[0], seconds[1])
await asyncio.sleep(seconds)
async def _typing_delay(self, text):
"""Simulate typing, takes an int(characters per second typed)."""
try:
char_per_sec = self.connector.configuration['typing-delay']
char_count = len(text)
await asyncio.sleep(char_count//char_per_sec)
except KeyError:
pass
async def respond(self, text):
"""Respond to this message using the connector it was created by."""
opsdroid = get_opsdroid()
response = copy(self)
response.text = text
if 'thinking-delay' in self.connector.configuration or \
'typing-delay' in self.connector.configuration:
await self._thinking_delay()
await self._typing_delay(response.text)
await self.connector.respond(response)
if not self.responded_to:
now = datetime.now()
opsdroid.stats["total_responses"] = \
opsdroid.stats["total_responses"] + 1
opsdroid.stats["total_response_time"] = \
opsdroid.stats["total_response_time"] + \
(now - self.created).total_seconds()
self.responded_to = True
async def react(self, emoji):
"""React to this message using the connector it was created by."""
if 'thinking-delay' in self.connector.configuration:
await self._thinking_delay()
return await self.connector.react(self, emoji)
| 34.171429 | 76 | 0.629181 |
ace6a2517473324981e796310ef8be5679db87c3 | 13,050 | py | Python | homeassistant/components/device_tracker/owntracks.py | olskar/home-assistant | 5986d9ff5b068b221e9d2c675f388b80070e8d87 | [
"Apache-2.0"
] | 1 | 2017-09-26T06:13:10.000Z | 2017-09-26T06:13:10.000Z | homeassistant/components/device_tracker/owntracks.py | moose51789/home-assistant | 63c9d59d5455850fd4b37c2475fe6f10effb5245 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/device_tracker/owntracks.py | moose51789/home-assistant | 63c9d59d5455850fd4b37c2475fe6f10effb5245 | [
"Apache-2.0"
] | null | null | null | """
Device tracker platform that adds support for OwnTracks over MQTT.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.owntracks/
"""
import asyncio
import base64
import json
import logging
from collections import defaultdict
import voluptuous as vol
import homeassistant.components.mqtt as mqtt
import homeassistant.helpers.config_validation as cv
from homeassistant.components import zone as zone_comp
from homeassistant.components.device_tracker import PLATFORM_SCHEMA
from homeassistant.const import STATE_HOME
from homeassistant.core import callback
from homeassistant.util import slugify, decorator
REQUIREMENTS = ['libnacl==1.6.0']
_LOGGER = logging.getLogger(__name__)
HANDLERS = decorator.Registry()
BEACON_DEV_ID = 'beacon'
CONF_MAX_GPS_ACCURACY = 'max_gps_accuracy'
CONF_SECRET = 'secret'
CONF_WAYPOINT_IMPORT = 'waypoints'
CONF_WAYPOINT_WHITELIST = 'waypoint_whitelist'
DEPENDENCIES = ['mqtt']
OWNTRACKS_TOPIC = 'owntracks/#'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_MAX_GPS_ACCURACY): vol.Coerce(float),
vol.Optional(CONF_WAYPOINT_IMPORT, default=True): cv.boolean,
vol.Optional(CONF_WAYPOINT_WHITELIST): vol.All(
cv.ensure_list, [cv.string]),
vol.Optional(CONF_SECRET): vol.Any(
vol.Schema({vol.Optional(cv.string): cv.string}),
cv.string)
})
def get_cipher():
"""Return decryption function and length of key.
Async friendly.
"""
from libnacl import crypto_secretbox_KEYBYTES as KEYLEN
from libnacl.secret import SecretBox
def decrypt(ciphertext, key):
"""Decrypt ciphertext using key."""
return SecretBox(key).decrypt(ciphertext)
return (KEYLEN, decrypt)
@asyncio.coroutine
def async_setup_scanner(hass, config, async_see, discovery_info=None):
"""Set up an OwnTracks tracker."""
context = context_from_config(async_see, config)
@asyncio.coroutine
def async_handle_mqtt_message(topic, payload, qos):
"""Handle incoming OwnTracks message."""
try:
message = json.loads(payload)
except ValueError:
# If invalid JSON
_LOGGER.error("Unable to parse payload as JSON: %s", payload)
return
message['topic'] = topic
yield from async_handle_message(hass, context, message)
yield from mqtt.async_subscribe(
hass, OWNTRACKS_TOPIC, async_handle_mqtt_message, 1)
return True
def _parse_topic(topic):
"""Parse an MQTT topic owntracks/user/dev, return (user, dev) tuple.
Async friendly.
"""
try:
_, user, device, *_ = topic.split('/', 3)
except ValueError:
_LOGGER.error("Can't parse topic: '%s'", topic)
raise
return user, device
def _parse_see_args(message):
"""Parse the OwnTracks location parameters, into the format see expects.
Async friendly.
"""
user, device = _parse_topic(message['topic'])
dev_id = slugify('{}_{}'.format(user, device))
kwargs = {
'dev_id': dev_id,
'host_name': user,
'gps': (message['lat'], message['lon']),
'attributes': {}
}
if 'acc' in message:
kwargs['gps_accuracy'] = message['acc']
if 'batt' in message:
kwargs['battery'] = message['batt']
if 'vel' in message:
kwargs['attributes']['velocity'] = message['vel']
if 'tid' in message:
kwargs['attributes']['tid'] = message['tid']
if 'addr' in message:
kwargs['attributes']['address'] = message['addr']
return dev_id, kwargs
def _set_gps_from_zone(kwargs, location, zone):
"""Set the see parameters from the zone parameters.
Async friendly.
"""
if zone is not None:
kwargs['gps'] = (
zone.attributes['latitude'],
zone.attributes['longitude'])
kwargs['gps_accuracy'] = zone.attributes['radius']
kwargs['location_name'] = location
return kwargs
def _decrypt_payload(secret, topic, ciphertext):
"""Decrypt encrypted payload."""
try:
keylen, decrypt = get_cipher()
except OSError:
_LOGGER.warning(
"Ignoring encrypted payload because libsodium not installed")
return None
if isinstance(secret, dict):
key = secret.get(topic)
else:
key = secret
if key is None:
_LOGGER.warning(
"Ignoring encrypted payload because no decryption key known "
"for topic %s", topic)
return None
key = key.encode("utf-8")
key = key[:keylen]
key = key.ljust(keylen, b'\0')
try:
ciphertext = base64.b64decode(ciphertext)
message = decrypt(ciphertext, key)
message = message.decode("utf-8")
_LOGGER.debug("Decrypted payload: %s", message)
return message
except ValueError:
_LOGGER.warning(
"Ignoring encrypted payload because unable to decrypt using "
"key for topic %s", topic)
return None
def context_from_config(async_see, config):
"""Create an async context from Home Assistant config."""
max_gps_accuracy = config.get(CONF_MAX_GPS_ACCURACY)
waypoint_import = config.get(CONF_WAYPOINT_IMPORT)
waypoint_whitelist = config.get(CONF_WAYPOINT_WHITELIST)
secret = config.get(CONF_SECRET)
return OwnTracksContext(async_see, secret, max_gps_accuracy,
waypoint_import, waypoint_whitelist)
class OwnTracksContext:
"""Hold the current OwnTracks context."""
def __init__(self, async_see, secret, max_gps_accuracy, import_waypoints,
waypoint_whitelist):
"""Initialize an OwnTracks context."""
self.async_see = async_see
self.secret = secret
self.max_gps_accuracy = max_gps_accuracy
self.mobile_beacons_active = defaultdict(list)
self.regions_entered = defaultdict(list)
self.import_waypoints = import_waypoints
self.waypoint_whitelist = waypoint_whitelist
@callback
def async_valid_accuracy(self, message):
"""Check if we should ignore this message."""
acc = message.get('acc')
if acc is None:
return False
try:
acc = float(acc)
except ValueError:
return False
if acc == 0:
_LOGGER.warning(
"Ignoring %s update because GPS accuracy is zero: %s",
message['_type'], message)
return False
if self.max_gps_accuracy is not None and \
acc > self.max_gps_accuracy:
_LOGGER.info("Ignoring %s update because expected GPS "
"accuracy %s is not met: %s",
message['_type'], self.max_gps_accuracy,
message)
return False
return True
@asyncio.coroutine
def async_see_beacons(self, dev_id, kwargs_param):
"""Set active beacons to the current location."""
kwargs = kwargs_param.copy()
# the battery state applies to the tracking device, not the beacon
kwargs.pop('battery', None)
for beacon in self.mobile_beacons_active[dev_id]:
kwargs['dev_id'] = "{}_{}".format(BEACON_DEV_ID, beacon)
kwargs['host_name'] = beacon
yield from self.async_see(**kwargs)
@HANDLERS.register('location')
@asyncio.coroutine
def async_handle_location_message(hass, context, message):
"""Handle a location message."""
if not context.async_valid_accuracy(message):
return
dev_id, kwargs = _parse_see_args(message)
if context.regions_entered[dev_id]:
_LOGGER.debug(
"Location update ignored, inside region %s",
context.regions_entered[-1])
return
yield from context.async_see(**kwargs)
yield from context.async_see_beacons(dev_id, kwargs)
@asyncio.coroutine
def _async_transition_message_enter(hass, context, message, location):
"""Execute enter event."""
zone = hass.states.get("zone.{}".format(slugify(location)))
dev_id, kwargs = _parse_see_args(message)
if zone is None and message.get('t') == 'b':
# Not a HA zone, and a beacon so assume mobile
beacons = context.mobile_beacons_active[dev_id]
if location not in beacons:
beacons.append(location)
_LOGGER.info("Added beacon %s", location)
else:
# Normal region
regions = context.regions_entered[dev_id]
if location not in regions:
regions.append(location)
_LOGGER.info("Enter region %s", location)
_set_gps_from_zone(kwargs, location, zone)
yield from context.async_see(**kwargs)
yield from context.async_see_beacons(dev_id, kwargs)
@asyncio.coroutine
def _async_transition_message_leave(hass, context, message, location):
"""Execute leave event."""
dev_id, kwargs = _parse_see_args(message)
regions = context.regions_entered[dev_id]
if location in regions:
regions.remove(location)
new_region = regions[-1] if regions else None
if new_region:
# Exit to previous region
zone = hass.states.get(
"zone.{}".format(slugify(new_region)))
_set_gps_from_zone(kwargs, new_region, zone)
_LOGGER.info("Exit to %s", new_region)
yield from context.async_see(**kwargs)
yield from context.async_see_beacons(dev_id, kwargs)
return
else:
_LOGGER.info("Exit to GPS")
# Check for GPS accuracy
if context.async_valid_accuracy(message):
yield from context.async_see(**kwargs)
yield from context.async_see_beacons(dev_id, kwargs)
beacons = context.mobile_beacons_active[dev_id]
if location in beacons:
beacons.remove(location)
_LOGGER.info("Remove beacon %s", location)
@HANDLERS.register('transition')
@asyncio.coroutine
def async_handle_transition_message(hass, context, message):
"""Handle a transition message."""
if message.get('desc') is None:
_LOGGER.error(
"Location missing from `Entering/Leaving` message - "
"please turn `Share` on in OwnTracks app")
return
# OwnTracks uses - at the start of a beacon zone
# to switch on 'hold mode' - ignore this
location = message['desc'].lstrip("-")
if location.lower() == 'home':
location = STATE_HOME
if message['event'] == 'enter':
yield from _async_transition_message_enter(
hass, context, message, location)
elif message['event'] == 'leave':
yield from _async_transition_message_leave(
hass, context, message, location)
else:
_LOGGER.error(
"Misformatted mqtt msgs, _type=transition, event=%s",
message['event'])
@HANDLERS.register('waypoints')
@asyncio.coroutine
def async_handle_waypoints_message(hass, context, message):
"""Handle a waypoints message."""
if not context.import_waypoints:
return
if context.waypoint_whitelist is not None:
user = _parse_topic(message['topic'])[0]
if user not in context.waypoint_whitelist:
return
wayps = message['waypoints']
_LOGGER.info("Got %d waypoints from %s", len(wayps), message['topic'])
name_base = ' '.join(_parse_topic(message['topic']))
for wayp in wayps:
name = wayp['desc']
pretty_name = '{} - {}'.format(name_base, name)
lat = wayp['lat']
lon = wayp['lon']
rad = wayp['rad']
# check zone exists
entity_id = zone_comp.ENTITY_ID_FORMAT.format(slugify(pretty_name))
# Check if state already exists
if hass.states.get(entity_id) is not None:
continue
zone = zone_comp.Zone(hass, pretty_name, lat, lon, rad,
zone_comp.ICON_IMPORT, False)
zone.entity_id = entity_id
yield from zone.async_update_ha_state()
@HANDLERS.register('encrypted')
@asyncio.coroutine
def async_handle_encrypted_message(hass, context, message):
"""Handle an encrypted message."""
plaintext_payload = _decrypt_payload(context.secret, message['topic'],
message['data'])
if plaintext_payload is None:
return
decrypted = json.loads(plaintext_payload)
decrypted['topic'] = message['topic']
yield from async_handle_message(hass, context, decrypted)
@HANDLERS.register('lwt')
@asyncio.coroutine
def async_handle_lwt_message(hass, context, message):
"""Handle an lwt message."""
_LOGGER.debug('Not handling lwt message: %s', message)
@asyncio.coroutine
def async_handle_message(hass, context, message):
"""Handle an OwnTracks message."""
msgtype = message.get('_type')
handler = HANDLERS.get(msgtype)
if handler is None:
_LOGGER.warning(
'Received unsupported message type: %s.', msgtype)
return
yield from handler(hass, context, message)
| 30.490654 | 77 | 0.650421 |
ace6a2b1a3ccb92263b437d0a55646500e3542b9 | 159 | py | Python | venv_rc/Scripts/django-admin.py | vinit-kanani/RetailConnect | 01e0a75ab817c6a441f05cc1ce971d7d3a95cfde | [
"MIT"
] | null | null | null | venv_rc/Scripts/django-admin.py | vinit-kanani/RetailConnect | 01e0a75ab817c6a441f05cc1ce971d7d3a95cfde | [
"MIT"
] | null | null | null | venv_rc/Scripts/django-admin.py | vinit-kanani/RetailConnect | 01e0a75ab817c6a441f05cc1ce971d7d3a95cfde | [
"MIT"
] | null | null | null | #!D:\RetailConnect\Server\venv_rc\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 26.5 | 52 | 0.792453 |
ace6a2f4d0c1d20f588df1e5bb4789318b533cbf | 2,310 | py | Python | utils_1.py | melikeakalan/twitter-sentiment-analysis | c5440c5fb2dd4b805060b7c62947671eabdc5d7a | [
"Apache-2.0"
] | 1 | 2021-06-03T15:33:21.000Z | 2021-06-03T15:33:21.000Z | utils_1.py | melikeakalan/twitter-sentiment-analysis | c5440c5fb2dd4b805060b7c62947671eabdc5d7a | [
"Apache-2.0"
] | null | null | null | utils_1.py | melikeakalan/twitter-sentiment-analysis | c5440c5fb2dd4b805060b7c62947671eabdc5d7a | [
"Apache-2.0"
] | null | null | null | import re
import string
import numpy as np
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import TweetTokenizer
## PREPROCESSING(ON ISLEME) fonksiyonu ##
def process_tweet(tweet):
stemmer = PorterStemmer()
stopwords_english = stopwords.words('english')
# remove stock market tickers like $GE
tweet = re.sub(r'\$\w*', '', tweet)
# remove old style retweet text "RT"
tweet = re.sub(r'^RT[\s]+', '', tweet)
# remove hyperlinks
tweet = re.sub(r'https?:\/\/.*[\r\n]*', '', tweet)
# remove hashtags
# only removing the hash # sign from the word
tweet = re.sub(r'#', '', tweet)
# tokenize tweets
tokenizer = TweetTokenizer(preserve_case=False, strip_handles=True,
reduce_len=True)
tweet_tokens = tokenizer.tokenize(tweet)
tweets_clean = []
for word in tweet_tokens:
if (word not in stopwords_english and # remove stopwords
word not in string.punctuation): # remove punctuation
# tweets_clean.append(word)
stem_word = stemmer.stem(word) # stemming word
tweets_clean.append(stem_word)
return tweets_clean
## FREKANS DICTIONARY OLUSTURMA ##
def build_freqs(tweets, ys):
# Boş bir sözlükle başlayıp tüm tweet'leri döngüye alarak, her tweet için içerisindeki kelimeleri,
# işlenmiş halleri üzerinden frekans listesine ekliyoruz.
# y lerin olduğu Numpy array python listesine çevrilir.
# Eğer y bir numpy array se y'nin boyutu m*1 old. icin:
# np.squeeze ile array veri kaybı olmadan m e dönüştürülür.
# to list ile de m boyutunda np arrayden python listesine çevrilir.
yslist = np.squeeze(ys).tolist()
# Boş bir frekans listesiyle başlayıp y ler ve tweetler için:
# ylist'tekileri y'ye tweets listesindekileri word'e atıyor.
freqs = {}
for y, tweet in zip(yslist, tweets):
for word in process_tweet(tweet): #token preprocessing olur.
pair = (word, y) #pair = kelime+kelimenin etiketidir, type ı tuple olur.
if pair in freqs: #pair freqs(frekans) listesinin içerisindeyse degerini 1 artılır. yoksa da ilk kez eklenir.
freqs[pair] += 1
else:
freqs[pair] = 1
return freqs | 37.258065 | 121 | 0.658442 |
ace6a3005b66b3d0fb785f1e08fa5e7c31312975 | 417 | py | Python | server/helper.py | azharaiz/sistem-parkir | 842767e00a47d8dcff6a9361ff352a162dd9a59c | [
"MIT"
] | null | null | null | server/helper.py | azharaiz/sistem-parkir | 842767e00a47d8dcff6a9361ff352a162dd9a59c | [
"MIT"
] | 1 | 2019-12-24T09:14:18.000Z | 2019-12-24T09:14:18.000Z | server/helper.py | azharaiz/sistem-parkir | 842767e00a47d8dcff6a9361ff352a162dd9a59c | [
"MIT"
] | 1 | 2021-04-28T13:40:33.000Z | 2021-04-28T13:40:33.000Z | import os
def get_img():
return os.path.join(os.path.abspath(os.getcwd()),"data","img")
def get_log():
return os.path.join(os.path.abspath(os.getcwd()),"server","static","data","log.txt")
def get_file():
log_file = open(get_log(), 'r')
arr = []
line = log_file.readline()
while line:
arr.append(line.split(','))
line = log_file.readline()
log_file.close()
return arr | 24.529412 | 88 | 0.609113 |
ace6a31d2df439d793edbfe92027bcd5ed4d223d | 2,723 | py | Python | src/tox/exception.py | schinckel/tox | e797317f6f5b816253c875d613fc55f00ef88e7d | [
"MIT"
] | null | null | null | src/tox/exception.py | schinckel/tox | e797317f6f5b816253c875d613fc55f00ef88e7d | [
"MIT"
] | null | null | null | src/tox/exception.py | schinckel/tox | e797317f6f5b816253c875d613fc55f00ef88e7d | [
"MIT"
] | null | null | null | import os
import signal
def exit_code_str(exception_name, command, exit_code):
"""String representation for an InvocationError, with exit code
NOTE: this might also be used by plugin tests (tox-venv at the time of writing),
so some coordination is needed if this is ever moved or a different solution for this hack
is found.
NOTE: this is a separate function because pytest-mock `spy` does not work on Exceptions
We can use neither a class method nor a static because of https://bugs.python.org/issue23078.
Even a normal method failed with "TypeError: descriptor '__getattribute__' requires a
'BaseException' object but received a 'type'".
"""
str_ = "{} for command {}".format(exception_name, command)
if exit_code is not None:
str_ += " (exited with code {:d})".format(exit_code)
if (os.name == "posix") and (exit_code > 128):
signals = {
number: name for name, number in vars(signal).items() if name.startswith("SIG")
}
number = exit_code - 128
name = signals.get(number)
if name:
str_ += (
"\nNote: this might indicate a fatal error signal "
"({:d} - 128 = {:d}: {})".format(number + 128, number, name)
)
return str_
class Error(Exception):
def __str__(self):
return "{}: {}".format(self.__class__.__name__, self.args[0])
class MissingSubstitution(Error):
FLAG = "TOX_MISSING_SUBSTITUTION"
"""placeholder for debugging configurations"""
def __init__(self, name):
self.name = name
class ConfigError(Error):
"""Error in tox configuration."""
class UnsupportedInterpreter(Error):
"""Signals an unsupported Interpreter."""
class InterpreterNotFound(Error):
"""Signals that an interpreter could not be found."""
class InvocationError(Error):
"""An error while invoking a script."""
def __init__(self, command, exit_code=None):
super(Error, self).__init__(command, exit_code)
self.command = command
self.exit_code = exit_code
def __str__(self):
return exit_code_str(self.__class__.__name__, self.command, self.exit_code)
class MissingDirectory(Error):
"""A directory did not exist."""
class MissingDependency(Error):
"""A dependency could not be found or determined."""
class MissingRequirement(Error):
"""A requirement defined in :config:`require` is not met."""
class MinVersionError(Error):
"""The installed tox version is lower than requested minversion."""
def __init__(self, message):
self.message = message
super(MinVersionError, self).__init__(message)
| 30.595506 | 97 | 0.654425 |
ace6a3a461b4d6d6129fc05dea5f36b35cf078c5 | 9,968 | py | Python | stablecompoper.py | philcarmona/conda | 80ea5e0e30aab2817ab7e2883aff49fa654bb79b | [
"BSD-3-Clause"
] | null | null | null | stablecompoper.py | philcarmona/conda | 80ea5e0e30aab2817ab7e2883aff49fa654bb79b | [
"BSD-3-Clause"
] | null | null | null | stablecompoper.py | philcarmona/conda | 80ea5e0e30aab2817ab7e2883aff49fa654bb79b | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import numpy.random as rnd
import matplotlib
from matplotlib.backends.backend_agg import FigureCanvas
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pandas as pd
from scipy.integrate import odeint,quad
from scipy.stats import kde,beta
import seaborn as sns
#%matplotlib
from importlib import reload
pi=np.pi
from scipy.optimize import brentq
from numpy import linalg as LA
from scipy.linalg import expm
from bdp import periodise
def partiefrac(x,T):
y=x/T
return(T*(y-int(y)))
def zchi(lam,mu,A,T,N):
r"""genere N points du processus ponctuel forme des phases des differents individus dans un processus de naissance et de mort inhomogene
Si le processus s'éteint avant on a moins de N points"""
z=[[0,0]] #initialisation : un individu au temps 0 (donc sa phase est 0)
S=0 #S et linstant de saut courant du processus de Poisson deparametre A
i=0 #i+1 doit etre l longueur de z
while (len(z) <= N):
echelle=len(z)*A
S=S+rnd.exponential(scale=1/echelle)
p=(lam(S) + mu(S))/A
#print("S,p,z",S,p,z)
u=rnd.uniform()
if (u< p): #on accepte le temps de saut
q=lam(S)/(lam(S)+mu(S))
v=rnd.uniform()
if (v<q):
z.append([S,partiefrac(S,T)])
else:
j=rnd.choice(len(z))
del z[j] #on enleve un individu au hasard
if (len(z)==0):
break #le processus s'est éteint
return(z)
def estimdenszchi(lzero,muzero,T,N):
def lam(t):
return(lzero*(1+np.cos((2*pi*t)/T)))
def mu(t):
return(muzero)
A=2*lzero+muzero
z=np.array(zchi(lam,mu,A,T,N))
if (len(z) != 0):
w=z[:,1]
k=kde.gaussian_kde(w)
tt=np.linspace(0,T,100)
plt.plot(tt,k(tt))
plt.plot(tt,[1+np.cos((2*pi*t)/T) for t in tt],color="red")
#estimdenszchi(2,1,1,5000)
def sestimdenszchi(lzero,muzero,T,N):
def lam(t):
return(lzero*(1+np.cos((2*pi*t)/T)))
def primlam(t):
r"une primitive de la fonction precedente"
return (lzero*(t + (T/2*pi)*np.sin((2*pi*t)/T)))
def mu(t):
return(muzero)
A=2*lzero+muzero
tt=np.linspace(0,T,100)
lamepa=np.array([lam(t)*np.exp(primlam(t)) for t in tt])
lamepa=(lamepa/(lamepa.sum()))*100
tlam=np.array([lam(t) for t in tt])
tlam=(tlam/tlam.sum())*100
print("tlam.sum()",tlam.sum())
while True:
z=np.array(zchi(lam,mu,A,T,N))
if (len(z) != 0):
w=z[:,1]
k=kde.gaussian_kde(w)
plt.plot(tt,k(tt),label="estim par noyau echantillon")
#plt.plot(tt,[(1+np.cos((2*pi*t)/T))/T for t in tt],color="red")
plt.plot(tt,lamepa,color="green",label="densite")
plt.plot(tt,tlam,color="red",label=r"$\lambda$")
plt.legend()
break
def nzchi(lam,mu,A,T,N):
r"""genere des points du processus ponctuel jusqu'à l'instant N T forme des phases des differents individus dans un processus de naissance et de mort inhomogene
Si le processus s'éteint avant on a moins de N points"""
z=[[0,0]] #initialisation : un individu au temps 0 (donc sa phase est 0)
S=0 #S et linstant de saut courant du processus de Poisson deparametre A
i=0 #i+1 doit etre l longueur de z
while (S < N*T):
echelle=len(z)*A
S=S+rnd.exponential(scale=1/echelle)
p=(lam(S) + mu(S))/A
#print("S,p,z",S,p,z)
u=rnd.uniform()
if (u< p): #on accepte le temps de saut
q=lam(S)/(lam(S)+mu(S))
v=rnd.uniform()
if (v<q):
z.append([S,partiefrac(S,T)])
else:
j=rnd.choice(len(z))
del z[j] #on enleve un individu au hasard
if (len(z)==0):
break #le processus s'est éteint
return(z)
def toto(lzero=0.8,muzero=0.1,T=2,N=8,coeff=1.0):
return(nsestimdenszchi(lzero=lzero,muzero=muzero,T=T,N=N,coeff=coeff))
def nsestimdenszchi(lzero=0.8,muzero=0.1,T=2,N=8,coeff=1.0,estimnoyau=False,image=False):
r""" coeff est l'intensité de la modulation sinusoidale. On attend la première fois ou le processus n'est pas éteint, d'ou la boucle while"""
nbpts=100
def lam(t):
return(lzero*(1+coeff*np.cos(2*pi*t/T)))
def primlam(t):
r"une primitive de la fonction precedente"
return (lzero*(t + coeff*(T/(2*pi))*np.sin((2*pi*t)/T)))
def mu(t):
return(muzero)
A=(1+coeff)*lzero+muzero
tt=np.linspace(0,T,nbpts)
lamepa=np.array([lam(t)*np.exp(primlam(t)) for t in tt])
lamepa=(lamepa/(lamepa.mean()))/T #normalisation
tlam=np.array([lam(t) for t in tt])
tlam=(tlam/tlam.mean())/T #normalisation
fig=matplotlib.figure.Figure()
FigureCanvas(fig)
while True:
z=np.array(nzchi(lam,mu,A,T,N))
if (len(z) != 0):
w=z[:,1]
k=kde.gaussian_kde(w)
print("tlam.sum()",tlam.sum(),"Total population after N periods",len(w))
plt.hist(w,density=True,label="histogram")
if estimnoyau:
plt.plot(tt,k(tt),label="echantillon")
#plt.plot(tt,[(1+np.cos((2*pi*t)/T))/T for t in tt],color="red")
#plt.plot(tt,lamepa,color="green",label=r"stable composition density $\lambda(t) e^{A(t)}$ ")
plt.plot(tt,lamepa,color="green",label=r"$\pi(0)(t)$")
#plt.plot(tt,tlam,color="red",label=r"birth rate $\lambda(t)$")
plt.plot(tt,tlam,color="red",label=r"birth rate $\lambda(t)$")
#plt.plot(tt,tlam,color="red",label="birth rate")
plt.title("Stable composition density")
plt.legend()
if image:
plt.savefig("stablecompolbdsinusoid.pdf",bbox_inches='tight',dpi=150)
break
return(fig)
#jeudi 23 avril : simplifions en prenant lambda consant et mu =0, T=1
def sisestimdenszchi(lzero,muzero,T,N):
def lam(t):
return(lzero)
def primlam(t):
r"une primitive de la fonction precedente"
return (lzero*t)
def mu(t):
return(muzero)
A=2*lzero+muzero
tt=np.linspace(0,T,100)
lamepa=np.array([lam(t)*np.exp(primlam(t)) for t in tt])
lamepa=(lamepa/(lamepa.sum()))*100
tlam=np.array([lam(t) for t in tt])
tlam=(tlam/tlam.sum())*100
while True:
z=np.array(nzchi(lam,mu,A,T,N))
if (len(z) != 0):
w=z[:,1]
k=kde.gaussian_kde(w)
print("tlam.sum()",tlam.sum(),"Taille de l'echantillon",len(w))
plt.hist(w,density=True,label="histogramme")
plt.plot(tt,k(tt),label="estim noyau")
#plt.plot(tt,[(1+np.cos((2*pi*t)/T))/T for t in tt],color="red")
plt.plot(tt,lamepa,color="green",label="densite")
plt.plot(tt,tlam,color="red",label=r"$\lambda$")
plt.legend()
break
#mardi 18 avril 2020
#un exemple de regime switching pour Sylvain
def swreg(t,xzero=[1,0],T=1,nbpts=50):
r""" T est la periode, et t le temps pendant lequel on fait tourner l'edo"""
B=(2*np.log(2)/3)* np.array([[-2, 2], [1, -1]])
BT=B.transpose()
def msisi(x,s):
y=s/T
if (y-int(y)<0.5):
M=B
else:
M=BT
return(np.dot(M,x))
def msisi1(x,s):
return(np.dot(B,x))
def msisi2(x,s):
return(np.dot(BT,x))
timeint=np.linspace(0,t,1+int(t/T)*nbpts)
z=np.array((odeint(msisi,xzero,timeint)))
z1=np.array((odeint(msisi1,xzero,timeint)))
z2=np.array((odeint(msisi2,xzero,timeint)))
plt.plot(timeint,z[:,0],label=" H switching")
plt.plot(timeint,z[:,1],label=" V switching")
#plt.plot(timeint,z1,label="fixe 1")
plt.plot(timeint,z2[:,0],label="H fixed")
plt.plot(timeint,z2[:,1],label="V fixed")
plt.legend()
plt.savefig("switchingvsfixed.pdf",bbox_inches='tight',dpi=150)
####################################################################
########### 6 avril 2022 : modification de la figure
### rajout des valeurs reproductives périodiques v(tau)
def stablerepro(lzero=0.8,muzero=0.1,T=2,N=8,coeff=0.5,image=False):
r""" coeff est l'intensité de la modulation sinusoidale. On attend la première fois ou le processus n'est pas éteint, d'ou la boucle while"""
nbpts=100
def lam(t):
return(lzero*(1+coeff*np.cos(2*pi*t/T)))
def primlam(t):
r"une primitive de la fonction precedente"
return (lzero*(t + coeff*(T/(2*pi))*np.sin((2*pi*t)/T)))
def mu(t):
return(muzero)
A=(1+coeff)*lzero+muzero
tt=np.linspace(0,T,nbpts)
lamepa=np.array([lam(t)*np.exp(primlam(t)) for t in tt])
lamepa=(lamepa/(lamepa.mean()))/T #normalisation
tlam=np.array([lam(t) for t in tt])
#tlam=(tlam/tlam.mean())/T #normalisation
#plt.figure(figsize=(10,6))
fig,(ax1,ax2)=plt.subplots(1,2,sharey=True)
while True:
z=np.array(nzchi(lam,mu,A,T,N))
if (len(z) != 0):
w=z[:,1]
k=kde.gaussian_kde(w)
print("tlam.sum()",tlam.sum(),"Total population after N periods",len(w))
ax1.hist(w,density=True,label="histogram")
ax1.plot(tt,lamepa,color="green",label=r"$\pi(0)(t)$")
ax1.plot(tt,tlam,color="red",label=r"birth rate $\lambda(t)$")
ax1.set_title("Stable composition density",size=20)
ax1.legend()
break
dtt=np.linspace(0,2*T,nbpts)
vt=np.array([np.exp(-primlam(t)+lzero*t) for t in dtt])
ax2.plot(dtt,vt,label=r"$v(\tau)$")
ax2.set_title("Reproductive values",size=20)
dtlam=np.array([lam(t) for t in dtt])
#dtlam=(tlam/tlam.mean())/(2*T) #normalisation
ax2.plot(dtt,dtlam,color="red",label=r"birth rate $\lambda(t)$")
ax2.legend()
if image:
plt.savefig("stablerepro.pdf",bbox_inches='tight',dpi=300)
return(len(w))
| 36.379562 | 165 | 0.579856 |
ace6a405390018067a00413110462a404035ebbb | 6,362 | py | Python | zipline/data/bundles/yahoo.py | tianhm/zipline | 5343344929558ef42dc6ea75d433218471e91a0d | [
"Apache-2.0"
] | 18 | 2016-11-06T05:45:47.000Z | 2021-04-24T13:59:25.000Z | zipline/data/bundles/yahoo.py | tianhm/zipline | 5343344929558ef42dc6ea75d433218471e91a0d | [
"Apache-2.0"
] | 1 | 2021-03-20T05:45:32.000Z | 2021-03-20T05:45:32.000Z | zipline/data/bundles/yahoo.py | varunrai/zipline | 28060bf11fc34966438c24c688caf335bf15f1d7 | [
"Apache-2.0"
] | 7 | 2017-06-29T21:24:25.000Z | 2019-09-25T12:43:34.000Z | import os
import numpy as np
import pandas as pd
from pandas_datareader.data import DataReader
import requests
from zipline.utils.calendars import register_calendar_alias
from zipline.utils.cli import maybe_show_progress
from .core import register
def _cachpath(symbol, type_):
return '-'.join((symbol.replace(os.path.sep, '_'), type_))
def yahoo_equities(symbols, start=None, end=None):
"""Create a data bundle ingest function from a set of symbols loaded from
yahoo.
Parameters
----------
symbols : iterable[str]
The ticker symbols to load data for.
start : datetime, optional
The start date to query for. By default this pulls the full history
for the calendar.
end : datetime, optional
The end date to query for. By default this pulls the full history
for the calendar.
Returns
-------
ingest : callable
The bundle ingest function for the given set of symbols.
Examples
--------
This code should be added to ~/.zipline/extension.py
.. code-block:: python
from zipline.data.bundles import yahoo_equities, register
symbols = (
'AAPL',
'IBM',
'MSFT',
)
register('my_bundle', yahoo_equities(symbols))
Notes
-----
The sids for each symbol will be the index into the symbols sequence.
"""
# strict this in memory so that we can reiterate over it
symbols = tuple(symbols)
def ingest(environ,
asset_db_writer,
minute_bar_writer, # unused
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir,
# pass these as defaults to make them 'nonlocal' in py2
start=start,
end=end):
if start is None:
start = start_session
if end is None:
end = None
metadata = pd.DataFrame(np.empty(len(symbols), dtype=[
('start_date', 'datetime64[ns]'),
('end_date', 'datetime64[ns]'),
('auto_close_date', 'datetime64[ns]'),
('symbol', 'object'),
]))
def _pricing_iter():
sid = 0
with maybe_show_progress(
symbols,
show_progress,
label='Downloading Yahoo pricing data: ') as it, \
requests.Session() as session:
for symbol in it:
path = _cachpath(symbol, 'ohlcv')
try:
df = cache[path]
except KeyError:
df = cache[path] = DataReader(
symbol,
'yahoo',
start,
end,
session=session,
).sort_index()
# the start date is the date of the first trade and
# the end date is the date of the last trade
start_date = df.index[0]
end_date = df.index[-1]
# The auto_close date is the day after the last trade.
ac_date = end_date + pd.Timedelta(days=1)
metadata.iloc[sid] = start_date, end_date, ac_date, symbol
df.rename(
columns={
'Open': 'open',
'High': 'high',
'Low': 'low',
'Close': 'close',
'Volume': 'volume',
},
inplace=True,
)
yield sid, df
sid += 1
daily_bar_writer.write(_pricing_iter(), show_progress=show_progress)
symbol_map = pd.Series(metadata.symbol.index, metadata.symbol)
# Hardcode the exchange to "YAHOO" for all assets and (elsewhere)
# register "YAHOO" to resolve to the NYSE calendar, because these are
# all equities and thus can use the NYSE calendar.
metadata['exchange'] = "YAHOO"
asset_db_writer.write(equities=metadata)
adjustments = []
with maybe_show_progress(
symbols,
show_progress,
label='Downloading Yahoo adjustment data: ') as it, \
requests.Session() as session:
for symbol in it:
path = _cachpath(symbol, 'adjustment')
try:
df = cache[path]
except KeyError:
df = cache[path] = DataReader(
symbol,
'yahoo-actions',
start,
end,
session=session,
).sort_index()
df['sid'] = symbol_map[symbol]
adjustments.append(df)
adj_df = pd.concat(adjustments)
adj_df.index.name = 'date'
adj_df.reset_index(inplace=True)
splits = adj_df[adj_df.action == 'SPLIT']
splits = splits.rename(
columns={'value': 'ratio', 'date': 'effective_date'},
)
splits.drop('action', axis=1, inplace=True)
dividends = adj_df[adj_df.action == 'DIVIDEND']
dividends = dividends.rename(
columns={'value': 'amount', 'date': 'ex_date'},
)
dividends.drop('action', axis=1, inplace=True)
# we do not have this data in the yahoo dataset
dividends['record_date'] = pd.NaT
dividends['declared_date'] = pd.NaT
dividends['pay_date'] = pd.NaT
adjustment_writer.write(splits=splits, dividends=dividends)
return ingest
# bundle used when creating test data
register(
'.test',
yahoo_equities(
(
'AMD',
'CERN',
'COST',
'DELL',
'GPS',
'INTC',
'MMM',
'AAPL',
'MSFT',
),
pd.Timestamp('2004-01-02', tz='utc'),
pd.Timestamp('2015-01-01', tz='utc'),
),
)
register_calendar_alias("YAHOO", "NYSE")
| 31.186275 | 78 | 0.497957 |
ace6a474afe59baaa80b604446d195c305d7d51c | 97 | py | Python | complex/complex_test.py | baijianhua/pymath | a96ebbd8c8ac646c436d8bf33cb01764a948255d | [
"MIT"
] | null | null | null | complex/complex_test.py | baijianhua/pymath | a96ebbd8c8ac646c436d8bf33cb01764a948255d | [
"MIT"
] | null | null | null | complex/complex_test.py | baijianhua/pymath | a96ebbd8c8ac646c436d8bf33cb01764a948255d | [
"MIT"
] | null | null | null | import numpy
z = 1 + 1j*3
z1 = 2 + 1j*4
print(z * z)
print(z)
print(z**2)
print(numpy.e ** z)
| 8.818182 | 19 | 0.556701 |
ace6a4a176b80dd81a0623d83a80db2523537952 | 310 | py | Python | Codewars/find the odd int/find_the_odd_int.py | adoreblvnk/code_solutions | 03e4261241dd33a4232dabe0e9450d344f7ccc6d | [
"MIT"
] | null | null | null | Codewars/find the odd int/find_the_odd_int.py | adoreblvnk/code_solutions | 03e4261241dd33a4232dabe0e9450d344f7ccc6d | [
"MIT"
] | null | null | null | Codewars/find the odd int/find_the_odd_int.py | adoreblvnk/code_solutions | 03e4261241dd33a4232dabe0e9450d344f7ccc6d | [
"MIT"
] | null | null | null | def find_it(seq):
unique_seq = set(seq)
for i in unique_seq:
no_of_appearances = seq.count(i)
if no_of_appearances % 2 == 1:
return i
# solution
def find_it_solution(seq):
return [x for x in set(seq) if seq.count(x) % 2][0]
print(find_it_solution([1, 1, 2, 3, 2]))
| 19.375 | 55 | 0.603226 |
ace6a5100e73e428aec07ac579295e5354012717 | 2,204 | py | Python | src/aecgviewer/backgroundworks.py | FDA/aecgviewer | f93e27971968581149feb1f11b078b98006c9ed3 | [
"CC0-1.0"
] | 4 | 2021-11-30T16:19:22.000Z | 2021-12-28T02:41:43.000Z | src/aecgviewer/backgroundworks.py | FDA/aecgviewer | f93e27971968581149feb1f11b078b98006c9ed3 | [
"CC0-1.0"
] | null | null | null | src/aecgviewer/backgroundworks.py | FDA/aecgviewer | f93e27971968581149feb1f11b078b98006c9ed3 | [
"CC0-1.0"
] | null | null | null | """This submodule implements classes to support execution of asyncronous taks.
See authors, license and disclaimer at the top level directory of this project.
"""
from PySide2.QtCore import (QRunnable, Signal, Slot, QObject)
import sys
import traceback
class WorkerSignals(QObject):
'''
Defines the signals available from a running worker thread.
Supported signals are:
finished
No data
error
`tuple` (exctype, value, traceback.format_exc() )
result
`object` data returned from processing, anything
progress
`int` ith processed element
`int` total number of elements to process
'''
finished = Signal()
error = Signal(tuple)
result = Signal(object)
progress = Signal(int, int)
class Worker(QRunnable):
"""
Worker thread
Inherits from QRunnable to handler worker thread setup, signals and wrap-up
Args:
fn (function): The function callback to run on this worker thread.
Supplied args and kwargs will be passed through to the
runner.
args: Arguments to pass to the callback function
kwargs: Keywords to pass to the callback function
"""
def __init__(self, fn, *args, **kwargs):
super(Worker, self).__init__()
# Store constructor arguments (re-used for processing)
self.fn = fn
self.args = args
self.kwargs = kwargs
self.signals = WorkerSignals()
# Add the progress callback to our kwargs
self.kwargs['progress_callback'] = self.signals.progress
@Slot()
def run(self):
"""
Initialise the runner function with passed args, kwargs.
"""
# Retrieve args/kwargs here; and fire processing using them
try:
result = self.fn(*self.args, **self.kwargs)
except Exception as ex:
traceback.print_exc()
exctype, value = sys.exc_info()[:2]
self.signals.error.emit((exctype, value, traceback.format_exc()))
else:
self.signals.result.emit(result) # Return the result
finally:
self.signals.finished.emit() # Done
| 26.878049 | 79 | 0.625681 |
ace6a51e2b2f8ff4dd8fb2dc354dbb18e4f9f3cc | 807 | py | Python | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/file/cmd/strings/errors.py | bidhata/EquationGroupLeaks | 1ff4bc115cb2bd5bf2ed6bf769af44392926830c | [
"Unlicense"
] | 9 | 2019-11-22T04:58:40.000Z | 2022-02-26T16:47:28.000Z | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/file/cmd/strings/errors.py | bidhata/EquationGroupLeaks | 1ff4bc115cb2bd5bf2ed6bf769af44392926830c | [
"Unlicense"
] | null | null | null | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/file/cmd/strings/errors.py | bidhata/EquationGroupLeaks | 1ff4bc115cb2bd5bf2ed6bf769af44392926830c | [
"Unlicense"
] | 8 | 2017-09-27T10:31:18.000Z | 2022-01-08T10:30:46.000Z | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: errors.py
import mcl.status
ERR_SUCCESS = mcl.status.MCL_SUCCESS
ERR_INVALID_PARAM = mcl.status.framework.ERR_START
ERR_UNAVAILABLE_FULLPATH = mcl.status.framework.ERR_START + 1
ERR_UNABLE_TO_OPEN_FILE = mcl.status.framework.ERR_START + 2
ERR_MARSHAL_FAILED = mcl.status.framework.ERR_START + 3
ERR_NO_UNICODE = mcl.status.framework.ERR_START + 4
errorStrings = {ERR_INVALID_PARAM: 'Invalid parameter(s)',
ERR_UNAVAILABLE_FULLPATH: 'Unable to get the full path',
ERR_UNABLE_TO_OPEN_FILE: 'Unable to open file',
ERR_MARSHAL_FAILED: 'Marshal of return data failed',
ERR_NO_UNICODE: "This platform doesn't support UNICODE"
} | 44.833333 | 67 | 0.778191 |
ace6a5892851620763f7a1d0b1c2dec8c77b330b | 4,345 | py | Python | mlbase/hyper_param.py | n-kats/mlbase | 7d69f259dcaf9608a921523083458fa6d0d6914b | [
"MIT"
] | null | null | null | mlbase/hyper_param.py | n-kats/mlbase | 7d69f259dcaf9608a921523083458fa6d0d6914b | [
"MIT"
] | 2 | 2018-09-23T18:39:01.000Z | 2018-09-24T18:02:21.000Z | mlbase/hyper_param.py | n-kats/mlbase | 7d69f259dcaf9608a921523083458fa6d0d6914b | [
"MIT"
] | null | null | null | import os
import json
import yaml
META_DATA_COLLECTION = "meta"
GLOBAL_HYPER_PARAM_COLLECTION = "global"
class CollectionMissing(Exception):
def __init__(self, key):
message = f"設定ファイルで、{key}の値を定める必要があります。"
super().__init__(message)
def _test_collection(obj, collection):
assert obj is not None and collection in obj, CollectionMissing(collection)
class ItemMissing(Exception):
def __init__(self, key, collection):
message = f"設定ファイルの{collection}で、{key}の値を定める必要があります。"
super().__init__(message)
def _test_item(obj, item, collection):
assert obj is not None and item in obj, ItemMissing(item, collection)
class HyperParamManager:
def __init__(self):
self.__loaded = False
self.__data_store = None
def load(self, obj):
# objはjsonやyamlを読み込んだpythonオブジェクト
_test_collection(obj, META_DATA_COLLECTION)
self.__data_store = self.__select_store(obj[META_DATA_COLLECTION])
self.__data_store.load(obj)
self.__loaded = True
def get_all(self):
assert self.__loaded
return self.__data_store.get_all()
def get_collection(self, collection):
assert self.__loaded
return self.__data_store.get_collection(collection)
def get(self, name, collection, dtype):
assert self.__loaded
return self.__data_store.get(name, collection, dtype)
def __select_store(self, meta_data):
if meta_data is not None and "verbose" in meta_data and meta_data["verbose"]:
print(meta_data) # 適切なloggerへ
return HyperParamStore()
class HyperParamStore:
def __init__(self):
self.__data = None
def load(self, obj):
self.__data = obj
def get_all(self):
return self.__data
def get_collection(self, collection):
_test_collection(self.__data, collection)
return self.__data[collection]
def get(self, name, collection, dtype):
# collectionに属する名前がnameである値を取得する
obj = self.get_collection(collection)
_test_item(obj, name, collection)
value = obj[name]
if dtype is not None:
value = dtype(value)
return value
def _detect_loader(input_path):
# ファイルをロードする方法を決める
ext = os.path.splitext(input_path)[-1]
if ext == ".json":
return lambda file_path: json.load(open(file_path))
elif ext == ".yaml" or ext == ".yml":
return lambda file_path: yaml.load(open(file_path))
else:
raise Exception(
"cannot open {input_path}".format(input_path=input_path))
def _load_by_ext(input_path):
loader = _detect_loader(input_path)
return loader(input_path)
def _detect_saver(output_path):
# ファイルを保存する方法を決める
ext = os.path.splitext(output_path)[-1]
if ext == ".json":
return lambda obj, file_path: json.load(open(output_path))
elif ext == ".yaml" or ext == ".yml":
return lambda obj, file_path: yaml.load(open(output_path))
else:
raise Exception(
"cannot open {output_path}".format(output_path=output_path))
def _save_object_by_ext(obj, output_path):
saver = _detect_saver(output_path)
return saver(obj, output_path)
_global_hyper_param_manager = HyperParamManager()
def open_hyper_param(input_path):
"""
ハイパーパラメータを読み込む
input_path: 入力のパス
"""
obj = _load_by_ext(input_path)
_global_hyper_param_manager.load(obj)
def save_hyper_param(output_path):
"""
ハイパーパラメータを保存する
output_path: 出力のパス
"""
obj = _global_hyper_param_manager.get_all()
_save_object_by_ext(obj, output_path)
def get_hyper_param(name, collection=GLOBAL_HYPER_PARAM_COLLECTION,
dtype=None):
"""
ハイパーパラメータを取得する関数
name: パラメータ名
collection: ハイパーパラメータの種類
dtype: パイパーパラメータを利用する際のデータタイプ
"""
return _global_hyper_param_manager.get(name, collection, dtype)
def get_all():
return _global_hyper_param_manager.get_all()
def get_hyper_param_or_default(name,
collection=GLOBAL_HYPER_PARAM_COLLECTION,
dtype=None,
default=None):
if name not in _global_hyper_param_manager.get_collection(collection):
return default
return _global_hyper_param_manager.get(name, collection, dtype)
| 26.987578 | 85 | 0.673188 |
ace6a666e44782664cd443ad81e77a7ae7397c03 | 10,313 | py | Python | cartoframes/credentials.py | oss-spanish-geoserver/cartoframes | 2e1a1aa785180f8c031688012b3c2663f7c7edc7 | [
"BSD-3-Clause"
] | null | null | null | cartoframes/credentials.py | oss-spanish-geoserver/cartoframes | 2e1a1aa785180f8c031688012b3c2663f7c7edc7 | [
"BSD-3-Clause"
] | null | null | null | cartoframes/credentials.py | oss-spanish-geoserver/cartoframes | 2e1a1aa785180f8c031688012b3c2663f7c7edc7 | [
"BSD-3-Clause"
] | null | null | null | """Credentials management for cartoframes usage."""
import appdirs
import os
import json
import sys
import warnings
if sys.version_info >= (3, 0):
from urllib.parse import urlparse
else:
from urlparse import urlparse
_USER_CONFIG_DIR = appdirs.user_config_dir('cartoframes')
_DEFAULT_PATH = os.path.join(_USER_CONFIG_DIR,
'cartocreds.json')
class Credentials(object):
"""Credentials class for managing and storing user CARTO credentials. The
arguments are listed in order of precedence: :obj:`Credentials` instances
are first, `key` and `base_url`/`username` are taken next, and
`config_file` (if given) is taken last. If no arguments are passed, then
there will be an attempt to retrieve credentials from a previously saved
session. One of the above scenarios needs to be met to successfully
instantiate a :obj:`Credentials` object.
Args:
creds (:obj:`cartoframes.Credentials`, optional): Credentials instance
key (str, optional): API key of user's CARTO account
username (str, optional): Username of CARTO account
base_url (str, optional): Base URL used for API calls. This is usually
of the form `https://eschbacher.carto.com/` for user `eschbacher`.
On premises installations (and others) have a different URL
pattern.
cred_file (str, optional): Pull credentials from a stored file. If this
and all other args are not entered, Credentials will attempt to
load a user config credentials file that was previously set with
Credentials(...).save().
Raises:
RuntimeError: If not enough credential information is passed and no
stored credentials file is found, this error will be raised.
Example:
.. code::
from cartoframes import Credentials, CartoContext
creds = Credentials(key='abcdefg', username='eschbacher')
cc = CartoContext(creds=creds)
"""
def __init__(self, creds=None, key=None, username=None, base_url=None,
cred_file=None):
self._key = None
self._username = None
self._base_url = None
if creds and isinstance(creds, Credentials):
self.key(key=creds.key())
self.username(username=creds.username())
self.base_url(base_url=creds.base_url())
elif (key and username) or (key and base_url):
self.key(key=key)
self.username(username=username)
if base_url:
self.base_url(base_url=base_url)
else:
self.base_url(
base_url='https://{}.carto.com/'.format(self._username)
)
elif cred_file:
self._retrieve(cred_file)
else:
try:
self._retrieve(_DEFAULT_PATH)
except Exception:
raise RuntimeError(
'Could not load CARTO credentials. Try setting them with '
'the `key` and `username` arguments.'
)
self._norm_creds()
def __repr__(self):
return ('Credentials(username={username}, '
'key={key}, '
'base_url={base_url})').format(username=self._username,
key=self._key,
base_url=self._base_url)
def __eq__(self, obj):
return self._key == obj._key and self._username == obj._username and self._base_url == obj._base_url
def _norm_creds(self):
"""Standardize credentials"""
if self._base_url:
self._base_url = self._base_url.strip('/')
def save(self, config_loc=None):
"""Saves current user credentials to user directory.
Args:
config_loc (str, optional): Location where credentials are to be
stored. If no argument is provided, it will be send to the
default location.
Example:
.. code::
from cartoframes import Credentials
creds = Credentials(username='eschbacher', key='abcdefg')
creds.save() # save to default location
"""
if not os.path.exists(_USER_CONFIG_DIR):
"""create directory if not exists"""
os.makedirs(_USER_CONFIG_DIR)
with open(_DEFAULT_PATH, 'w') as f:
json.dump({'key': self._key, 'base_url': self._base_url,
'username': self._username}, f)
def _retrieve(self, config_file=None):
"""Retrives credentials from a file. Defaults to the user config
directory"""
with open(config_file or _DEFAULT_PATH, 'r') as f:
creds = json.load(f)
self._key = creds.get('key')
self._base_url = creds.get('base_url')
self._username = creds.get('username')
def delete(self, config_file=None):
"""Deletes the credentials file specified in `config_file`. If no
file is specified, it deletes the default user credential file.
Args:
config_file (str): Path to configuration file. Defaults to delete
the user default location if `None`.
.. Tip::
To see if there is a default user credential file stored, do the
following::
>>> creds = Credentials()
>>> print(creds)
Credentials(username=eschbacher, key=abcdefg,
base_url=https://eschbacher.carto.com/)
"""
path_to_remove = config_file or _DEFAULT_PATH
try:
os.remove(path_to_remove)
print('Credentials at {} successfully removed.'.format(
path_to_remove))
except OSError:
warnings.warn('No credential file found at {}.'.format(
path_to_remove))
def set(self, key=None, username=None, base_url=None):
"""Update the credentials of a Credentials instance instead with new
values.
Args:
key (str): API key of user account. Defaults to previous value if
not specified.
username (str): User name of account. This parameter is optional if
`base_url` is not specified, but defaults to the previous
value if not set.
base_url (str): Base URL of user account. This parameter is
optional if `username` is specified and on CARTO's
cloud-based account. Generally of the form
``https://your_user_name.carto.com/`` for cloud-based accounts.
If on-prem or otherwise, contact your admin.
Example:
.. code::
from cartoframes import Credentials
# load credentials saved in previous session
creds = Credentials()
# set new API key
creds.set(key='new_api_key')
# save new creds to default user config directory
creds.save()
Note:
If the `username` is specified but the `base_url` is not, the
`base_url` will be updated to ``https://<username>.carto.com/``.
"""
self.__init__(key=(key or self._key),
username=(username or self._username),
base_url=base_url)
def key(self, key=None):
"""Return or set API `key`.
Args:
key (str, optional): If set, updates the API key, otherwise returns
current API key.
Example:
.. code::
>>> from cartoframes import Credentials
# load credentials saved in previous session
>>> creds = Credentials()
# returns current API key
>>> creds.key()
'abcdefg'
# updates API key with new value
>>> creds.key('new_api_key')
"""
if key:
self._key = key
else:
return self._key
def username(self, username=None):
"""Return or set `username`.
Args:
username (str, optional): If set, updates the `username`. Otherwise
returns current `username`.
Note:
This does not update the `base_url` attribute. Use
`Credentials.set` to have that updated with `username`.
Example:
.. code::
>>> from cartoframes import Credentials
# load credentials saved in previous session
>>> creds = Credentials()
# returns current username
>>> creds.username()
'eschbacher'
# updates username with new value
>>> creds.username('new_username')
"""
if username:
self._username = username
else:
return self._username
def base_url(self, base_url=None):
"""Return or set `base_url`.
Args:
base_url (str, optional): If set, updates the `base_url`. Otherwise
returns current `base_url`.
Note:
This does not update the `username` attribute. Separately update
the username with ``Credentials.username`` or update `base_url` and
`username` at the same time with ``Credentials.set``.
Example:
.. code::
>>> from cartoframes import Credentials
# load credentials saved in previous session
>>> creds = Credentials()
# returns current base_url
>>> creds.base_url()
'https://eschbacher.carto.com/'
# updates base_url with new value
>>> creds.base_url('new_base_url')
"""
if base_url:
# POSTs need to be over HTTPS (e.g., Import API reverts to a GET)
if urlparse(base_url).scheme != 'https':
raise ValueError(
'`base_url`s need to be over `https`. Update your '
'`base_url`.'
)
self._base_url = base_url
else:
return self._base_url
| 36.31338 | 108 | 0.562203 |
ace6a6d1f2e1befa4a9675b1eec688d870f00d65 | 631 | py | Python | deepthought/bricks/swapaxes.py | maosenGao/openmiir-rl-2016 | d2e5744b1fa503a896994d8a70b3ca45d521db14 | [
"BSD-3-Clause"
] | 8 | 2017-01-27T11:08:12.000Z | 2022-02-07T15:54:39.000Z | deepthought/bricks/swapaxes.py | maosenGao/openmiir-rl-2016 | d2e5744b1fa503a896994d8a70b3ca45d521db14 | [
"BSD-3-Clause"
] | null | null | null | deepthought/bricks/swapaxes.py | maosenGao/openmiir-rl-2016 | d2e5744b1fa503a896994d8a70b3ca45d521db14 | [
"BSD-3-Clause"
] | 2 | 2017-01-26T23:49:55.000Z | 2019-10-30T05:31:35.000Z | from theano import tensor
from blocks.bricks import Brick
from blocks.bricks.base import application
class SwapAxes(Brick):
def __init__(self, axis1, axis2, debug=False, **kwargs):
super(SwapAxes, self).__init__(**kwargs)
self.axis1 = axis1
self.axis2 = axis2
self.debug = debug
@application(inputs=['input_'], outputs=['output'])
def apply(self, input_):
output = tensor.swapaxes(input_, self.axis1, self.axis2)
if self.debug:
import theano
output = theano.printing.Print('output:', attrs=('shape',))(output)
return output
| 31.55 | 79 | 0.630745 |
ace6a7648d3775f8ed88ccac7d11457df472b98f | 6,037 | py | Python | snuba/query/validation/signature.py | fpacifici/snuba | cf732b71383c948f9387fbe64e9404ca71f8e9c5 | [
"Apache-2.0"
] | null | null | null | snuba/query/validation/signature.py | fpacifici/snuba | cf732b71383c948f9387fbe64e9404ca71f8e9c5 | [
"Apache-2.0"
] | null | null | null | snuba/query/validation/signature.py | fpacifici/snuba | cf732b71383c948f9387fbe64e9404ca71f8e9c5 | [
"Apache-2.0"
] | null | null | null | import logging
from abc import ABC
from datetime import date, datetime
from typing import Sequence, Set, Type, Union
from snuba.clickhouse.columns import (
UUID,
Array,
ColumnSet,
Date,
DateTime,
FixedString,
Float,
IPv4,
IPv6,
Nullable,
String,
UInt,
)
from snuba.query.expressions import (
Expression,
Literal as LiteralType,
)
from snuba.query.matchers import (
Any as AnyMatcher,
Column as ColumnMatcher,
Literal as LiteralMatcher,
Param,
)
from snuba.query.validation import FunctionCallValidator, InvalidFunctionCall
logger = logging.getLogger(__name__)
class ParamType(ABC):
def validate(self, expression: Expression, schema: ColumnSet) -> None:
raise NotImplementedError
class Any(ParamType):
def validate(self, expression: Expression, schema: ColumnSet) -> None:
return
def __str__(self) -> str:
return "Any"
COLUMN_PATTERN = ColumnMatcher(
table_name=None, column_name=Param("column_name", AnyMatcher(str)),
)
LITERAL_PATTERN = LiteralMatcher()
AllowedTypes = Union[
Type[Array],
Type[String],
Type[UUID],
Type[IPv4],
Type[IPv6],
Type[FixedString],
Type[UInt],
Type[Float],
Type[Date],
Type[DateTime],
]
AllowedScalarTypes = Union[
Type[None],
Type[bool],
Type[str],
Type[float],
Type[int],
Type[date],
Type[datetime],
]
class Column(ParamType):
"""
Validates that the type of a Column expression is in a set of
allowed types.
If the expression provided is not a Column, it accepts it.
We may consider later whether we want to enforce only column
expressions can be passed as arguments in certain functions.
This class discriminates between Nullable columns and non Nullable.
If the allow_nullable field, is True this will accept both, if it
is False it will require non nullable columns.
"""
def __init__(self, types: Set[AllowedTypes], allow_nullable: bool = True) -> None:
self.__valid_types = types
self.__allow_nullable = allow_nullable
def __str__(self) -> str:
return f"{'Nullable ' if self.__allow_nullable else ''}{self.__valid_types}"
def validate(self, expression: Expression, schema: ColumnSet) -> None:
match = COLUMN_PATTERN.match(expression)
if match is None:
return
column_name = match.string("column_name")
column = schema.get(column_name)
if column is None:
# TODO: We cannot raise exceptions if the column is not present
# on the schema just yet because the current logical schemas are
# sadly not complete. Fix them and then raise an exception in this
# case.
return
nullable = column.type.has_modifier(Nullable)
if not isinstance(column.type, tuple(self.__valid_types)) or (
nullable and not self.__allow_nullable
):
raise InvalidFunctionCall(
(
f"Illegal type {'Nullable ' if nullable else ''}{str(column.type)} "
f"of argument `{column_name}`. Required types {self.__valid_types}"
)
)
class Literal(ParamType):
"""
Validates that the type of a Literal expression is in a set of
allowed types.
If the expression provided is not a Literal, it accepts it.
We may consider later whether we want to enforce only literal
expressions can be passed as arguments in certain functions.
"""
def __init__(
self, types: Set[AllowedScalarTypes], allow_nullable: bool = False
) -> None:
self.__valid_types = types
if allow_nullable:
self.__valid_types.add(type(None))
def __str__(self) -> str:
return f"{self.__valid_types}"
def validate(self, expression: Expression, schema: ColumnSet) -> None:
if not isinstance(expression, LiteralType):
return None
value = expression.value
if not isinstance(value, tuple(self.__valid_types)):
raise InvalidFunctionCall(
f"Illegal type {type(value)} of argument {value}. Required types {self.__valid_types}"
)
class SignatureValidator(FunctionCallValidator):
"""
Validates the signature of the function call.
The signature is defined as a sequence of ParamType objects.
"""
def __init__(
self,
param_types: Sequence[ParamType],
allow_extra_params: bool = False,
enforce: bool = True,
):
self.__param_types = param_types
# If True, this signature allows extra parameters after those
# specified by param_types. The extra parameters are not
# validated.
self.__allow_extra_params = allow_extra_params
# If False it would simply log invalid functions instead of raising
# exceptions.
self.__enforce = enforce
def validate(self, parameters: Sequence[Expression], schema: ColumnSet) -> None:
try:
self.__validate_impl(parameters, schema)
except InvalidFunctionCall as exception:
if self.__enforce:
raise exception
else:
logger.warning(
f"Query validation exception. Validator: {self}", exc_info=True
)
def __validate_impl(
self, parameters: Sequence[Expression], schema: ColumnSet
) -> None:
if len(parameters) < len(self.__param_types):
raise InvalidFunctionCall(
f"Too few arguments. Required {[str(t) for t in self.__param_types]}"
)
if not self.__allow_extra_params and len(parameters) > len(self.__param_types):
raise InvalidFunctionCall(
f"Too many arguments. Required {[str(t) for t in self.__param_types]}"
)
for validator, param in zip(self.__param_types, parameters):
validator.validate(param, schema)
| 29.593137 | 102 | 0.641875 |
ace6a77bd1cbfecad11009037b197b90edc67d4e | 3,874 | py | Python | tests/test_state.py | CSeq/manticore | 6133a0e2ed98de6a58f3bf574498ec320ccbc43e | [
"Apache-2.0"
] | null | null | null | tests/test_state.py | CSeq/manticore | 6133a0e2ed98de6a58f3bf574498ec320ccbc43e | [
"Apache-2.0"
] | null | null | null | tests/test_state.py | CSeq/manticore | 6133a0e2ed98de6a58f3bf574498ec320ccbc43e | [
"Apache-2.0"
] | 1 | 2021-12-26T12:57:01.000Z | 2021-12-26T12:57:01.000Z | import unittest
from manticore.core.state import State
from manticore.core.smtlib import BitVecVariable
from manticore.core.smtlib import ConstraintSet
from manticore.platforms import linux
class FakeMemory(object):
def __init__(self):
self._constraints = None
@property
def constraints(self):
return self._constraints
class FakeCpu(object):
def __init__(self):
self._memory = FakeMemory()
@property
def memory(self):
return self._memory
class FakePlatform(object):
def __init__(self):
self._constraints = None
self.procs = [FakeCpu()]
@property
def current(self):
return self.procs[0]
@property
def constraints(self):
return self._constraints
class StateTest(unittest.TestCase):
def setUp(self):
l = linux.Linux('/bin/ls')
self.state = State(ConstraintSet(), l)
def test_solve_one(self):
val = 42
expr = BitVecVariable(32, 'tmp')
self.state.constrain(expr == val)
solved = self.state.solve_one(expr)
self.assertEqual(solved, val)
def test_solve_n(self):
expr = BitVecVariable(32, 'tmp')
self.state.constrain(expr > 4)
self.state.constrain(expr < 7)
solved = self.state.solve_n(expr, 2)
self.assertEqual(solved, [5,6])
def test_solve_n2(self):
expr = BitVecVariable(32, 'tmp')
self.state.constrain(expr > 4)
self.state.constrain(expr < 100)
solved = self.state.solve_n(expr, 5)
self.assertEqual(len(solved), 5)
def test_policy_one(self):
expr = BitVecVariable(32, 'tmp')
self.state.constrain(expr > 0)
self.state.constrain(expr < 100)
solved = self.state.concretize(expr, 'ONE')
self.assertEqual(len(solved), 1)
self.assertIn(solved[0], xrange(100))
def test_state(self):
constraints = ConstraintSet()
initial_state = State(constraints, FakePlatform())
arr = initial_state.symbolicate_buffer('+'*100, label='SYMBA')
initial_state.constrain(arr[0] > 0x41)
self.assertTrue(len(initial_state.constraints.declarations) == 1 )
with initial_state as new_state:
self.assertTrue(len(initial_state.constraints.declarations) == 1 )
self.assertTrue(len(new_state.constraints.declarations) == 1 )
arrb = new_state.symbolicate_buffer('+'*100, label='SYMBB')
self.assertTrue(len(initial_state.constraints.declarations) == 1 )
self.assertTrue(len(new_state.constraints.declarations) == 1 )
new_state.constrain(arrb[0] > 0x42)
self.assertTrue(len(new_state.constraints.declarations) == 2 )
self.assertTrue(len(initial_state.constraints.declarations) == 1 )
def test_new_symbolic_buffer(self):
length = 64
expr = self.state.new_symbolic_buffer(length)
self.assertEqual(len(expr), length)
def test_new_symbolic_value(self):
length = 64
expr = self.state.new_symbolic_value(length)
self.assertEqual(expr.size, length)
def test_new_bad_symbolic_value(self):
length = 62
with self.assertRaises(Exception):
expr = self.state.new_symbolic_value(length)
def test_record_branches(self):
branch = 0x80488bb
target = 0x8048997
fallthrough = 0x80488c1
self.state.last_pc = (0, branch)
self.state.record_branches([target, fallthrough])
self.assertEqual(self.state.branches[(branch, target)], 1)
self.assertEqual(self.state.branches[(branch, fallthrough)], 1)
self.state.record_branches([target, fallthrough])
self.assertEqual(self.state.branches[(branch, target)], 2)
self.assertEqual(self.state.branches[(branch, fallthrough)], 2)
| 30.503937 | 79 | 0.646618 |
ace6a80d57de59725c5dbba356e31c98446ba214 | 18,833 | py | Python | code/src/lm/BLANC/pytorch_pretrained_bert/tokenization.py | noble6emc2/MoCo-SSPT | e6d7cf3f0a3b5a467318dfc32096e4929adbe646 | [
"MIT"
] | null | null | null | code/src/lm/BLANC/pytorch_pretrained_bert/tokenization.py | noble6emc2/MoCo-SSPT | e6d7cf3f0a3b5a467318dfc32096e4929adbe646 | [
"MIT"
] | null | null | null | code/src/lm/BLANC/pytorch_pretrained_bert/tokenization.py | noble6emc2/MoCo-SSPT | e6d7cf3f0a3b5a467318dfc32096e4929adbe646 | [
"MIT"
] | null | null | null | # coding=utf-8
# Copyright (c) 2019, Facebook, Inc. and its affiliates. All Rights Reserved
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
from .file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
'spanbert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'spanbert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt"
}
PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'spanbert-base-cased': 512,
'spanbert-large-cased': 512
}
VOCAB_NAME = 'vocab.txt'
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertTokenizer(object):
"""Runs end-to-end tokenization: punctuation splitting + wordpiece"""
def __init__(self, vocab_file, do_lower_case=True, max_len=None, do_basic_tokenize=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]", "[unused1]")):
"""Constructs a BertTokenizer.
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input
Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to;
Effective maximum length is always the minimum of this
value (if specified) and the underlying BERT model's
sequence length.
never_split: List of tokens which will never be split during tokenization.
Only has an effect when do_wordpiece_only=False
"""
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
never_split=never_split)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
self.max_len = max_len if max_len is not None else int(1e12)
def tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
if len(ids) > self.max_len:
logger.warning(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this BERT model ({} > {}). Running this"
" sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
)
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary to a directory or file."""
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(vocab_file))
index = token_index
writer.write(token + u'\n')
index += 1
return vocab_file
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
if '-cased' in pretrained_model_name_or_path and kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is a cased model but you have not set "
"`do_lower_case` to False. We are setting `do_lower_case=False` for you but "
"you may want to check this behavior.")
kwargs['do_lower_case'] = False
elif '-cased' not in pretrained_model_name_or_path and not kwargs.get('do_lower_case', True):
logger.warning("The pre-trained model you are loading is an uncased model but you have set "
"`do_lower_case` to False. We are setting `do_lower_case=True` for you "
"but you may want to check this behavior.")
kwargs['do_lower_case'] = True
else:
vocab_file = pretrained_model_name_or_path
if os.path.isdir(vocab_file):
vocab_file = os.path.join(vocab_file, VOCAB_NAME)
# redirect to the cache, if necessary
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
vocab_file))
return None
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
# if we're using a pretrained model, ensure the tokenizer wont index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Instantiate tokenizer.
tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
return tokenizer
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self,
do_lower_case=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]", "[unused1]")):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
self.never_split = never_split
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case and token not in self.never_split:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
if text in self.never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
@staticmethod
def _is_chinese_char(cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically control characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
cp = ord(char)
if ((33 <= cp <= 47) or (58 <= cp <= 64) or
(91 <= cp <= 96) or (123 <= cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False | 40.852495 | 133 | 0.606064 |
ace6a8d1fe530405853fc33e9bfba811c954831d | 1,044 | py | Python | platyplus/urls.py | plmercereau/platyplus-api | e883e6a3a092252af628cd4637cae6eeb826b083 | [
"MIT"
] | null | null | null | platyplus/urls.py | plmercereau/platyplus-api | e883e6a3a092252af628cd4637cae6eeb826b083 | [
"MIT"
] | null | null | null | platyplus/urls.py | plmercereau/platyplus-api | e883e6a3a092252af628cd4637cae6eeb826b083 | [
"MIT"
] | null | null | null | """hackernews URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import RedirectView
from graphene_django.views import GraphQLView
urlpatterns = [
path('admin/', admin.site.urls),
path('graphql/', csrf_exempt(GraphQLView.as_view(graphiql=True)), name='graphql'),
path('', RedirectView.as_view(pattern_name='graphql')),
]
| 38.666667 | 86 | 0.733716 |
ace6a904974f224abdca8bdcb3cd721128342ba6 | 1,360 | py | Python | karlooper/utils/base64encrypt.py | karldoenitz/karlooper | 2e1df83ed1ec9b343cdd930162a4de7ecd149c04 | [
"MIT"
] | 161 | 2016-05-17T12:44:07.000Z | 2020-07-30T02:18:34.000Z | karlooper/utils/base64encrypt.py | karldoenitz/karlooper | 2e1df83ed1ec9b343cdd930162a4de7ecd149c04 | [
"MIT"
] | 6 | 2016-08-29T01:40:26.000Z | 2017-12-29T09:20:41.000Z | karlooper/utils/base64encrypt.py | karldoenitz/karlooper | 2e1df83ed1ec9b343cdd930162a4de7ecd149c04 | [
"MIT"
] | 16 | 2016-06-27T02:56:54.000Z | 2019-08-08T08:18:48.000Z | # -*-coding:utf-8-*-
"""
security
~~~~~~~~
Use this model to encrypt string.
Usage
=====
>>> d = Encryption()
>>> d.input_key("123456789")
>>> s = "/static/hello.js"
>>> a = d.encode(s)
>>> print a
b14f1453ceddc91e492fbe883d552a2e
>>> b = d.decode(a)
>>> print b
/static/hello.js
"""
import base64
from karlooper.utils import PY3
class Encryption(object):
def __init__(self):
self.__key = ""
def input_key(self, key):
""" set base key
:param key: str type, the base key
:return: None
"""
self.__key = key
def encode(self, s):
""" encode a string
:param s: the string will be encoded
:return: encoded result
"""
ss = s + self.__key
if PY3:
return base64.b64encode(ss.encode()).decode()
else:
return base64.b64encode(ss)
def decode(self, s):
""" decode a string
:param s: the string will be decoded
:return: decoded result
"""
missing_padding = 4 - len(s) % 4
if missing_padding:
s += '=' * missing_padding
if PY3:
decode_result = base64.b64decode(s.encode()).decode()
else:
decode_result = base64.b64decode(s)
result = decode_result[:len(decode_result)-len(self.__key)]
return result
| 19.428571 | 67 | 0.55 |
ace6a93790b3822e256c990b5cef0285899b6160 | 2,054 | py | Python | adjutant_ui/content/email/forms.py | elastx/adjutant-ui | 104c474479e9a89d22b96777378ff59983c03393 | [
"Apache-2.0"
] | 8 | 2017-09-05T15:33:31.000Z | 2019-02-13T10:10:06.000Z | adjutant_ui/content/email/forms.py | elastx/adjutant-ui | 104c474479e9a89d22b96777378ff59983c03393 | [
"Apache-2.0"
] | null | null | null | adjutant_ui/content/email/forms.py | elastx/adjutant-ui | 104c474479e9a89d22b96777378ff59983c03393 | [
"Apache-2.0"
] | 5 | 2019-04-04T20:47:12.000Z | 2021-07-06T16:36:13.000Z | # Copyright 2013 Centrin Data Systems Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.forms import ValidationError
from django.utils.translation import ugettext_lazy as _
from horizon import forms
from horizon import messages
from adjutant_ui import api
class EmailForm(forms.SelfHandlingForm):
new_email = forms.EmailField(
label=_("New email address"),
required=True)
confirm_email = forms.CharField(
label=_("Confirm email address"),
required=True)
no_autocomplete = True
def clean(self):
'''Check to make sure email fields match.'''
data = super(forms.Form, self).clean()
if data.get('new_email') != data.get('confirm_email', None):
raise ValidationError(_('Email addresses do not match.'))
return data
def handle(self, request, data):
try:
response = api.adjutant.email_update(request, data['new_email'])
if response.status_code == 202:
msg = _("Confirmation email sent to %s.")
messages.success(request, msg % data['new_email'])
elif response.status_code == 400:
messages.warning(request, _(
'Unable to update email. May already be in use.'))
else:
messages.error(request, _('Failed to update email.'))
return True
except Exception as e:
messages.error(request, _('Failed to update email. %s' % str(e)))
return False
| 36.678571 | 78 | 0.646543 |
ace6a98208c99e3b9489f7b4a4954bc751f76cfe | 127 | py | Python | _version.py | 4dn-dcic/submitpipeline | 702c4f936877afd6d3926224d6f2d6890805fbb7 | [
"MIT"
] | null | null | null | _version.py | 4dn-dcic/submitpipeline | 702c4f936877afd6d3926224d6f2d6890805fbb7 | [
"MIT"
] | null | null | null | _version.py | 4dn-dcic/submitpipeline | 702c4f936877afd6d3926224d6f2d6890805fbb7 | [
"MIT"
] | null | null | null | """Version information."""
# The following line *must* be the last in the module, exactly as formatted:
__version__ = "0.0.1"
| 25.4 | 76 | 0.708661 |
ace6a9d11228af5728b28f97f08f2c9bacdd00c8 | 772 | py | Python | sample/my_tools/dict2vocab.py | MiuGod0126/wmt16-scripts | df693aca7da284a320c9f396dc890ea2e3028051 | [
"MIT"
] | 3 | 2022-03-15T10:45:51.000Z | 2022-03-21T09:56:12.000Z | sample/my_tools/dict2vocab.py | MiuGod0126/wmt16-scripts | df693aca7da284a320c9f396dc890ea2e3028051 | [
"MIT"
] | null | null | null | sample/my_tools/dict2vocab.py | MiuGod0126/wmt16-scripts | df693aca7da284a320c9f396dc890ea2e3028051 | [
"MIT"
] | null | null | null | import os
import sys
skip_words=['<s>','<pad>','</s>','<unk>']
def read_file(file):
with open(file,'r',encoding='utf-8') as f:
lines=f.readlines()
return lines
def write_file(res,file):
with open(file,'w',encoding='utf-8') as f:
f.write(''.join(res))
print(f'write to {file} success.')
def dic2vocab(in_file,out_file):
dic=read_file(in_file)
words=[d.strip().split(' ')[0] for d in dic]
words=skip_words+words
words=[w+'\n' for w in words]
write_file(words,out_file)
if __name__ == '__main__':
lang=sys.argv[1]
folder=sys.argv[2]
in_path=os.path.join(folder,f"dict.{lang}.txt")
out_path=os.path.join(folder,f"vocab.{lang}")
dic2vocab(in_file=in_path,out_file=out_path)
| 27.571429 | 52 | 0.615285 |
ace6aa1fa4dddb6e8628433f25801b369c27cfba | 631 | py | Python | Python3/0897-Increasing-Order-Search-Tree/soln-1.py | wyaadarsh/LeetCode-Solutions | 3719f5cb059eefd66b83eb8ae990652f4b7fd124 | [
"MIT"
] | 5 | 2020-07-24T17:48:59.000Z | 2020-12-21T05:56:00.000Z | Python3/0897-Increasing-Order-Search-Tree/soln-1.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | null | null | null | Python3/0897-Increasing-Order-Search-Tree/soln-1.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | 2 | 2020-07-24T17:49:01.000Z | 2020-08-31T19:57:35.000Z | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def increasingBST(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
self.pre = None
def traverse(node):
if node:
traverse(node.right)
left = node.left
node.left = None
node.right = self.pre
self.pre = node
traverse(left)
traverse(root)
return self.pre | 26.291667 | 37 | 0.48336 |
ace6aa4b0d6ff67b8ca46bcbf7c8d1b83d85707d | 151 | py | Python | landing/context_processors.py | XeryusTC/projman | 3db118d51a9fc362153593f5a862187bdaf0a73c | [
"MIT"
] | null | null | null | landing/context_processors.py | XeryusTC/projman | 3db118d51a9fc362153593f5a862187bdaf0a73c | [
"MIT"
] | 3 | 2015-12-08T17:14:31.000Z | 2016-01-29T18:46:59.000Z | landing/context_processors.py | XeryusTC/projman | 3db118d51a9fc362153593f5a862187bdaf0a73c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.contrib.sites.shortcuts import get_current_site
def site(request):
return {'site': get_current_site(request)}
| 25.166667 | 59 | 0.728477 |
ace6aa5cff109267cbed6fe65a8d0b008e62c45e | 4,010 | py | Python | hexa/plugins/connector_dhis2/api.py | hastakecorp/openhexa-app | dde38f14701c3bfe7fb51f29d73b35e6b8d45b7b | [
"MIT"
] | null | null | null | hexa/plugins/connector_dhis2/api.py | hastakecorp/openhexa-app | dde38f14701c3bfe7fb51f29d73b35e6b8d45b7b | [
"MIT"
] | 1 | 2021-09-07T08:10:18.000Z | 2021-09-07T08:10:18.000Z | hexa/plugins/connector_dhis2/api.py | hastakecorp/openhexa-app | dde38f14701c3bfe7fb51f29d73b35e6b8d45b7b | [
"MIT"
] | null | null | null | import datetime
from dhis2 import Api
from django.utils import timezone, dateparse
class Dhis2Result:
"""Base class for DHIS2 result items - handles translations"""
# Mapping dhis2 field name -> field type, field default
FIELD_SPECS = {
"id": (str, None),
"name": (str, None),
"shortName": (str, ""),
"description": (str, ""),
"externalAccess": (bool, None),
"favorite": (bool, None),
"created": (datetime.datetime, None),
"lastUpdated": (datetime.datetime, None),
}
def __init__(self, data):
self._data = data
@property
def fields(self):
return {**Dhis2Result.FIELD_SPECS, **self.FIELD_SPECS}
def get_values(self, locale=None):
return {
field_name: self.get_value(field_name, locale) for field_name in self.fields
}
def get_value(self, field_name, locale=None):
try:
field_type, field_default = self.fields[field_name]
except KeyError:
raise ValueError(
f'The "{field_name}" field does not exist in {self.__class__.__name__}'
)
# If "dict" type, references another record - return as is (or default)
if field_type is dict:
return self._data.get(field_name, {"id": field_default})
# If "datetime" type, convert to time-aware datetime
if field_type is datetime.datetime:
return timezone.make_aware(
dateparse.parse_datetime(self._data.get(field_name, field_default))
)
# If not a translated property (or no translations), early return
if "translations" not in self._data or not any(
p for p in self._data["translations"] if p["property"] == field_name.upper()
):
return self._data.get(field_name, field_default)
try:
# Attempt to extract the translated value for the provided locale (which can be None)
return next(
p
for p in self._data["translations"]
if p["property"] == field_name.upper()
# If locale is None, the first description will be returned
and (locale is None or locale in p["locale"])
)["value"]
except StopIteration:
if (
locale is None
): # Locale is None: if no description at all, return the default
return field_default
# Could not find a description for the provided locale, find any description
return self.get_value(field_name, None)
class DataElementResult(Dhis2Result):
FIELD_SPECS = {
"code": (str, ""),
"domainType": (str, None),
"valueType": (str, None),
"aggregationType": (str, None),
}
class IndicatorTypeResult(Dhis2Result):
FIELD_SPECS = {
"number": (bool, None),
"factor": (int, None),
}
class IndicatorResult(Dhis2Result):
FIELD_SPECS = {
"code": (str, ""),
"indicatorType": (dict, None),
"annualized": (bool, None),
}
class Dhis2Client:
def __init__(self, *, url, username, password):
self._api = Api(url, username, password)
def fetch_info(self):
return self._api.get_info()
def fetch_data_elements(self):
for page in self._api.get_paged(
"dataElements", params={"fields": ":all"}, page_size=100
):
yield [DataElementResult(data) for data in page["dataElements"]]
def fetch_indicator_types(self):
for page in self._api.get_paged(
"indicatorTypes", params={"fields": ":all"}, page_size=100
):
yield [IndicatorTypeResult(data) for data in page["indicatorTypes"]]
def fetch_indicators(self):
for page in self._api.get_paged(
"indicators", params={"fields": ":all"}, page_size=100
):
yield [IndicatorResult(data) for data in page["indicators"]]
| 32.08 | 97 | 0.58803 |
ace6aa6b216ac0f41a7f5e7276b53f42cf5fa007 | 26,450 | py | Python | keystone/logic/types/auth.py | admiyo/keystone | 9452cf04bc8b0a4dc66dc640615d5ace1ca715f2 | [
"Apache-2.0"
] | null | null | null | keystone/logic/types/auth.py | admiyo/keystone | 9452cf04bc8b0a4dc66dc640615d5ace1ca715f2 | [
"Apache-2.0"
] | null | null | null | keystone/logic/types/auth.py | admiyo/keystone | 9452cf04bc8b0a4dc66dc640615d5ace1ca715f2 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=C0103,R0912,R0913,R0914
import json
from lxml import etree
from keystone.logic.types import fault
import keystone.backends.api as db_api
from keystone import utils
class AuthBase(object):
def __init__(self, tenant_id=None, tenant_name=None):
self.tenant_id = tenant_id
self.tenant_name = tenant_name
@staticmethod
def _validate_auth(obj, *valid_keys):
if not 'auth' in obj:
raise fault.BadRequestFault('Expecting auth')
auth = obj.get('auth')
for key in auth:
if not key in valid_keys:
raise fault.BadRequestFault('Invalid attribute(s): %s' % key)
if auth.get('tenantId') and auth.get('tenantName'):
raise fault.BadRequestFault(
'Expecting either Tenant ID or Tenant Name, but not both')
return auth
@staticmethod
def _validate_key(obj, key, *required_keys):
if not key in obj:
raise fault.BadRequestFault('Expecting %s' % key)
ret = obj[key]
for skey in ret:
if not skey in required_keys:
raise fault.BadRequestFault('Invalid attribute(s): %s' % skey)
for required_key in required_keys:
if not ret.get(required_key):
raise fault.BadRequestFault('Expecting %s:%s' %
(key, required_key))
return ret
class AuthWithUnscopedToken(AuthBase):
def __init__(self, token_id, tenant_id=None, tenant_name=None):
super(AuthWithUnscopedToken, self).__init__(tenant_id, tenant_name)
self.token_id = token_id
@staticmethod
def from_xml(xml_str):
try:
dom = etree.Element("root")
dom.append(etree.fromstring(xml_str))
root = dom.find("{http://docs.openstack.org/identity/api/v2.0}"
"auth")
if root is None:
raise fault.BadRequestFault("Expecting auth")
token = root.find("{http://docs.openstack.org/identity/api/v2.0}"
"token")
if token is None:
raise fault.BadRequestFault("Expecting token")
token_id = token.get("id")
tenant_id = root.get("tenantId")
tenant_name = root.get("tenantName")
utils.check_empty_string(token_id, "Expecting a token id.")
if tenant_id and tenant_name:
raise fault.BadRequestFault(
"Expecting either Tenant ID or Tenant Name, but not both")
return AuthWithUnscopedToken(token_id, tenant_id, tenant_name)
except etree.LxmlError as e:
raise fault.BadRequestFault("Cannot parse password access", str(e))
@staticmethod
def from_json(json_str):
try:
obj = json.loads(json_str)
auth = AuthBase._validate_auth(obj, 'tenantId', 'tenantName',
'token')
token = AuthBase._validate_key(auth, 'token', 'id')
return AuthWithUnscopedToken(token['id'],
auth.get('tenantId'),
auth.get('tenantName'))
except (ValueError, TypeError) as e:
raise fault.BadRequestFault("Cannot parse auth", str(e))
class AuthWithPasswordCredentials(AuthBase):
def __init__(self, username, password, tenant_id=None, tenant_name=None):
super(AuthWithPasswordCredentials, self).__init__(tenant_id,
tenant_name)
self.username = username
self.password = password
@staticmethod
def from_xml(xml_str):
try:
dom = etree.Element("root")
dom.append(etree.fromstring(xml_str))
root = dom.find("{http://docs.openstack.org/identity/api/v2.0}"
"auth")
if root is None:
raise fault.BadRequestFault("Expecting auth")
tenant_id = root.get("tenantId")
tenant_name = root.get("tenantName")
password_credentials = \
root.find("{http://docs.openstack.org/identity/api/v2.0}"
"passwordCredentials")
if password_credentials is None:
raise fault.BadRequestFault("Expecting passwordCredentials")
username = password_credentials.get("username")
utils.check_empty_string(username, "Expecting a username")
password = password_credentials.get("password")
utils.check_empty_string(password, "Expecting a password")
if tenant_id and tenant_name:
raise fault.BadRequestFault(
"Expecting either Tenant ID or Tenant Name, but not both")
return AuthWithPasswordCredentials(username, password, tenant_id,
tenant_name)
except etree.LxmlError as e:
raise fault.BadRequestFault("Cannot parse password access", str(e))
@staticmethod
def from_json(json_str):
try:
obj = json.loads(json_str)
auth = AuthBase._validate_auth(obj, 'tenantId', 'tenantName',
'passwordCredentials', 'token')
cred = AuthBase._validate_key(auth, 'passwordCredentials',
'username', 'password')
return AuthWithPasswordCredentials(cred['username'],
cred['password'],
auth.get('tenantId'),
auth.get('tenantName'))
except (ValueError, TypeError) as e:
raise fault.BadRequestFault("Cannot parse auth", str(e))
class Ec2Credentials(object):
"""Credentials based on username, access_key, signature and data.
@type access: str
@param access: Access key for user in the form of access:project.
@type signature: str
@param signature: Signature of the request.
@type params: dictionary of str
@param params: Web paramaters used for the signature.
@type verb: str
@param verb: Web request verb ('GET' or 'POST').
@type host: str
@param host: Web request host string (including port).
@type path: str
@param path: Web request path.
"""
def __init__(self, access, signature, verb,
host, path, params):
self.access = access
self.signature = signature
self.verb = verb
self.host = host
self.path = path
self.params = params
@staticmethod
def from_xml(xml_str):
try:
dom = etree.Element("root")
dom.append(etree.fromstring(xml_str))
root = dom.find("{http://docs.openstack.org/identity/api/v2.0}"
"auth")
xmlns = "http://docs.openstack.org/identity/api/ext/OS-KSEC2/v1.0"
if root is None:
root = dom.find("{%s}ec2Credentials" % xmlns)
else:
root = root.find("{%s}ec2Credentials" % xmlns)
if root is None:
raise fault.BadRequestFault("Expecting ec2Credentials")
access = root.get("key")
utils.check_empty_string(access, "Expecting an access key.")
signature = root.get("signature")
utils.check_empty_string(signature, "Expecting a signature.")
verb = root.get("verb")
utils.check_empty_string(verb, "Expecting a verb.")
host = root.get("host")
utils.check_empty_string(signature, "Expecting a host.")
path = root.get("path")
utils.check_empty_string(signature, "Expecting a path.")
# TODO(vish): parse xml params
params = {}
return Ec2Credentials(access, signature, verb, host, path, params)
except etree.LxmlError as e:
raise fault.BadRequestFault("Cannot parse password credentials",
str(e))
@staticmethod
def from_json(json_str):
try:
root = json.loads(json_str)
if "auth" in root:
obj = root['auth']
else:
obj = root
if "OS-KSEC2:ec2Credentials" in obj:
cred = obj["OS-KSEC2:ec2Credentials"]
elif "ec2Credentials" in obj:
cred = obj["ec2Credentials"]
else:
raise fault.BadRequestFault("Expecting ec2Credentials")
# Check that fields are valid
invalid = [key for key in cred if key not in\
['username', 'access', 'signature', 'params',
'verb', 'host', 'path']]
if invalid != []:
raise fault.BadRequestFault("Invalid attribute(s): %s"
% invalid)
if not "access" in cred:
raise fault.BadRequestFault("Expecting an access key")
access = cred["access"]
if not "signature" in cred:
raise fault.BadRequestFault("Expecting a signature")
signature = cred["signature"]
if not "verb" in cred:
raise fault.BadRequestFault("Expecting a verb")
verb = cred["verb"]
if not "host" in cred:
raise fault.BadRequestFault("Expecting a host")
host = cred["host"]
if not "path" in cred:
raise fault.BadRequestFault("Expecting a path")
path = cred["path"]
if not "params" in cred:
raise fault.BadRequestFault("Expecting params")
params = cred["params"]
return Ec2Credentials(access, signature, verb, host, path, params)
except (ValueError, TypeError) as e:
raise fault.BadRequestFault("Cannot parse password credentials",
str(e))
# pylint: disable=R0902
class S3Credentials(object):
"""Credentials based on username, access_key, signature and data.
@type access: str
@param access: Access key for user in the form of access:project.
@type signature: str
@param signature: Signature of the request.
@type verb: str
@param verb: Web request verb ('GET' or 'POST').
@type host: expire
@param host: Web request expire time.
@type path: str
@param path: Web request path.
@type expire: str
@param expire: Web request expire.
@type content_type: str
@param content_type: Web request content contenttype.
@type content_md5: str
@param content_md5: Web request content contentmd5.
@type xheaders: str
@param xheaders: Web request content extended headers.
"""
def __init__(self, access, signature, verb, path, expire, content_type,
content_md5, xheaders):
self.access = access
self.signature = signature
self.verb = verb
self.path = path
self.expire = expire
self.content_type = content_type
self.content_md5 = content_md5
self.xheaders = xheaders
@staticmethod
def from_xml(xml_str):
try:
dom = etree.Element("root")
dom.append(etree.fromstring(xml_str))
root = dom.find("{http://docs.openstack.org/identity/api/v2.0}"
"auth")
xmlns = "http://docs.openstack.org/identity/api/ext/OS-KSS3/v1.0"
if root is None:
root = dom.find("{%s}s3Credentials" % xmlns)
else:
root = root.find("{%s}s3Credentials" % xmlns)
if root is None:
raise fault.BadRequestFault("Expecting s3Credentials")
access = root.get("access")
if access == None:
raise fault.BadRequestFault("Expecting an access key")
signature = root.get("signature")
if signature == None:
raise fault.BadRequestFault("Expecting a signature")
verb = root.get("verb")
if verb == None:
raise fault.BadRequestFault("Expecting a verb")
path = root.get("path")
if path == None:
raise fault.BadRequestFault("Expecting a path")
expire = root.get("expire")
if expire == None:
raise fault.BadRequestFault("Expecting a expire")
content_type = root.get("content_type", '')
content_md5 = root.get("content_md5", '')
xheaders = root.get("xheaders", None)
return S3Credentials(access, signature, verb, path, expire,
content_type, content_md5, xheaders)
except etree.LxmlError as e:
raise fault.BadRequestFault("Cannot parse password credentials",
str(e))
@staticmethod
def from_json(json_str):
try:
root = json.loads(json_str)
if "auth" in root:
obj = root['auth']
else:
obj = root
if "OS-KSS3:s3Credentials" in obj:
cred = obj["OS-KSS3:s3Credentials"]
elif "s3Credentials" in obj:
cred = obj["s3Credentials"]
else:
raise fault.BadRequestFault("Expecting s3Credentials")
# Check that fields are valid
invalid = [key for key in cred if key not in\
['username', 'access', 'signature', 'verb', 'expire',
'path', 'content_type', 'content_md5', 'xheaders']]
if invalid != []:
raise fault.BadRequestFault("Invalid attribute(s): %s"
% invalid)
if not "access" in cred:
raise fault.BadRequestFault("Expecting an access key")
access = cred["access"]
if not "signature" in cred:
raise fault.BadRequestFault("Expecting a signature")
signature = cred["signature"]
if not "verb" in cred:
raise fault.BadRequestFault("Expecting a verb")
verb = cred["verb"]
if not "path" in cred:
raise fault.BadRequestFault("Expecting a path")
path = cred["path"]
if not "expire" in cred:
raise fault.BadRequestFault("Expecting a expire")
expire = cred["expire"]
content_type = cred.get("content_type", '')
content_md5 = cred.get("content_md5", '')
xheaders = cred.get("xheaders", None)
return S3Credentials(access, signature, verb, path, expire,
content_type, content_md5, xheaders)
except (ValueError, TypeError) as e:
raise fault.BadRequestFault("Cannot parse password credentials",
str(e))
class Tenant(object):
"""Provides the scope of a token"""
def __init__(self, id, name):
self.id = id
self.name = name
class Token(object):
"""An auth token."""
def __init__(self, expires, token_id, tenant=None):
assert tenant is None or isinstance(tenant, Tenant)
self.expires = expires
self.id = token_id
self.tenant = tenant
class User(object):
"""A user."""
id = None
username = None
tenant_id = None
tenant_name = None
rolegrants = None
def __init__(self, id, username, tenant_id, tenant_name, rolegrants=None):
self.id = id
self.username = username
self.tenant_id = tenant_id
self.tenant_name = tenant_name
self.rolegrants = rolegrants
class AuthData(object):
"""Authentation Information returned upon successful login.
This class handles rendering to JSON and XML. It renders
the token, the user data, the roles, and the service catalog.
The list of endpoint URLs in the service catalog can be filtered by
URL type. For example, when we respond to a public call from a user
without elevated privileges, the "adminURL" is not returned. The
url_types paramater in the initializer lists the types to return.
The actual authorization is done in logic/service.py
"""
def __init__(self, token, user, base_urls=None, url_types=None):
self.token = token
self.user = user
self.base_urls = base_urls
if url_types is None:
self.url_types = ["internal", "public", "admin"]
else:
self.url_types = url_types
self.d = {}
if self.base_urls is not None:
self.__convert_baseurls_to_dict()
def to_xml(self):
dom = etree.Element("access",
xmlns="http://docs.openstack.org/identity/api/v2.0")
token = etree.Element("token",
expires=self.token.expires.isoformat())
token.set("id", self.token.id)
if self.token.tenant:
tenant = etree.Element("tenant",
id=unicode(self.token.tenant.id),
name=unicode(self.token.tenant.name))
token.append(tenant)
dom.append(token)
user = etree.Element("user",
id=unicode(self.user.id),
name=unicode(self.user.username))
dom.append(user)
if self.user.rolegrants is not None:
user.append(self.user.rolegrants.to_dom())
if self.base_urls is not None and len(self.base_urls) > 0:
service_catalog = etree.Element("serviceCatalog")
for key, key_base_urls in self.d.items():
dservice = db_api.SERVICE.get(key)
if not dservice:
raise fault.ItemNotFoundFault(
"The service could not be found")
service = etree.Element("service",
name=dservice.name, type=dservice.type)
for base_url in key_base_urls:
include_this_endpoint = False
endpoint = etree.Element("endpoint")
if base_url.region:
endpoint.set("region", base_url.region)
for url_kind in self.url_types:
base_url_item = getattr(base_url, url_kind + "_url")
if base_url_item:
if '%tenant_id%' in base_url_item:
if self.token.tenant:
# Don't return tenant endpoints if token
# not scoped to a tenant
endpoint.set(url_kind + "URL",
base_url_item.replace('%tenant_id%',
str(self.token.tenant.id)))
endpoint.set('tenantId',
str(self.token.tenant.id))
include_this_endpoint = True
else:
endpoint.set(url_kind + "URL", base_url_item)
include_this_endpoint = True
if include_this_endpoint:
endpoint.set("id", str(base_url.id))
if hasattr(base_url, "version_id"):
if base_url.version_id:
endpoint.set("versionId",
str(base_url.version_id))
service.append(endpoint)
if service.find("endpoint") is not None:
service_catalog.append(service)
dom.append(service_catalog)
return etree.tostring(dom)
def __convert_baseurls_to_dict(self):
for base_url in self.base_urls:
if base_url.service_id not in self.d:
self.d[base_url.service_id] = list()
self.d[base_url.service_id].append(base_url)
def to_json(self):
token = {}
token["id"] = self.token.id
token["expires"] = self.token.expires.isoformat()
if self.token.tenant:
tenant = {
'id': unicode(self.token.tenant.id),
'name': unicode(self.token.tenant.name)}
token['tenant'] = tenant # v2.0/Diablo contract
token['tenants'] = [tenant] # missed use case in v2.0
auth = {}
auth["token"] = token
auth['user'] = {
'id': unicode(self.user.id),
'name': unicode(self.user.username)}
if self.user.rolegrants is not None:
auth['user']["roles"] = self.user.rolegrants.to_json_values()
if self.base_urls is not None and len(self.base_urls) > 0:
service_catalog = []
for key, key_base_urls in self.d.items():
service = {}
endpoints = []
for base_url in key_base_urls:
include_this_endpoint = False
endpoint = {}
if base_url.region:
endpoint["region"] = base_url.region
for url_kind in self.url_types:
base_url_item = getattr(base_url, url_kind + "_url")
if base_url_item:
if '%tenant_id%' in base_url_item:
if self.token.tenant:
# Don't return tenant endpoints if token
# not scoped to a tenant
endpoint[url_kind + "URL"] = \
base_url_item.replace('%tenant_id%',
str(self.token.tenant.id))
endpoint['tenantId'] = \
str(self.token.tenant.id)
include_this_endpoint = True
else:
endpoint[url_kind + "URL"] = base_url_item
include_this_endpoint = True
if include_this_endpoint:
endpoint['id'] = str(base_url.id)
if hasattr(base_url, 'version_id'):
if base_url.version_id:
endpoint['versionId'] = \
str(base_url.version_id)
endpoints.append(endpoint)
dservice = db_api.SERVICE.get(key)
if not dservice:
raise fault.ItemNotFoundFault(
"The service could not be found for" + str(key))
if len(endpoints):
service["name"] = dservice.name
service["type"] = dservice.type
service["endpoints"] = endpoints
service_catalog.append(service)
auth["serviceCatalog"] = service_catalog
ret = {}
ret["access"] = auth
return json.dumps(ret)
class ValidateData(object):
"""Authentation Information returned upon successful token validation."""
token = None
user = None
def __init__(self, token, user):
self.token = token
self.user = user
def to_xml(self):
dom = etree.Element("access",
xmlns="http://docs.openstack.org/identity/api/v2.0")
token = etree.Element("token",
id=unicode(self.token.id),
expires=self.token.expires.isoformat())
if self.token.tenant:
tenant = etree.Element("tenant",
id=unicode(self.token.tenant.id),
name=unicode(self.token.tenant.name))
token.append(tenant)
user = etree.Element("user",
id=unicode(self.user.id),
name=unicode(self.user.username))
if self.user.tenant_id is not None:
user.set('tenantId', unicode(self.user.tenant_id))
if self.user.tenant_name is not None:
user.set('tenantName', unicode(self.user.tenant_name))
if self.user.rolegrants is not None:
user.append(self.user.rolegrants.to_dom())
dom.append(token)
dom.append(user)
return etree.tostring(dom)
def to_json(self):
token = {
"id": unicode(self.token.id),
"expires": self.token.expires.isoformat()}
if self.token.tenant:
tenant = {
'id': unicode(self.token.tenant.id),
'name': unicode(self.token.tenant.name)}
token['tenant'] = tenant # v2.0/Diablo contract
token['tenants'] = [tenant] # missed use case in v2.0
user = {
"id": unicode(self.user.id),
"name": unicode(self.user.username),
# TODO(ziad) temporary until we are comfortable clients are updated
"username": unicode(self.user.username)}
if self.user.tenant_id is not None:
user['tenantId'] = unicode(self.user.tenant_id)
if self.user.tenant_name is not None:
user['tenantName'] = unicode(self.user.tenant_name)
if self.user.rolegrants is not None:
user["roles"] = self.user.rolegrants.to_json_values()
return json.dumps({
"access": {
"token": token,
"user": user}})
| 39.243323 | 79 | 0.538299 |
ace6aab7b09f07bf313c7fd0c6c6aca239958d4e | 5,016 | py | Python | fpn/operator_py/proposal_target.py | qilei123/sod_v1 | 901f20283865ab2882c9fb5f6a48db0c52285e80 | [
"MIT"
] | 2 | 2019-09-16T05:40:31.000Z | 2019-12-23T06:52:58.000Z | fpn/operator_py/proposal_target.py | qilei123/sod_v1_demo | a38f76e5a3af13f8f16d32aa40369f1a4f4fd839 | [
"MIT"
] | null | null | null | fpn/operator_py/proposal_target.py | qilei123/sod_v1_demo | a38f76e5a3af13f8f16d32aa40369f1a4f4fd839 | [
"MIT"
] | null | null | null | # --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Modified by Yuwen Xiong
# --------------------------------------------------------
# Based on:
# MX-RCNN
# Copyright (c) 2016 by Contributors
# Licence under The Apache 2.0 License
# https://github.com/ijkguo/mx-rcnn/
# --------------------------------------------------------
"""
Proposal Target Operator selects foreground and background roi and assigns label, bbox_transform to them.
"""
import mxnet as mx
import numpy as np
from distutils.util import strtobool
from easydict import EasyDict as edict
import cPickle
import gc
from datetime import datetime
from core.rcnn import sample_rois
DEBUG = False
class ProposalTargetOperator(mx.operator.CustomOp):
def __init__(self, num_classes, batch_images, batch_rois, cfg, fg_fraction):
super(ProposalTargetOperator, self).__init__()
self._num_classes = num_classes
self._batch_images = batch_images
self._batch_rois = batch_rois
self._cfg = cfg
self._fg_fraction = fg_fraction
if DEBUG:
self._count = 0
self._fg_num = 0
self._bg_num = 0
def forward(self, is_train, req, in_data, out_data, aux):
before_proposal_target = datetime.now()
assert self._batch_rois == -1 or self._batch_rois % self._batch_images == 0, \
'batchimages {} must devide batch_rois {}'.format(self._batch_images, self._batch_rois)
all_rois = in_data[0].asnumpy()
gt_boxes = in_data[1].asnumpy()
if self._batch_rois == -1:
rois_per_image = all_rois.shape[0] + gt_boxes.shape[0]
fg_rois_per_image = rois_per_image
else:
rois_per_image = self._batch_rois / self._batch_images
fg_rois_per_image = np.round(self._fg_fraction * rois_per_image).astype(int)
# Include ground-truth boxes in the set of candidate rois
zeros = np.zeros((gt_boxes.shape[0], 1), dtype=gt_boxes.dtype)
all_rois = np.vstack((all_rois, np.hstack((zeros, gt_boxes[:, :-1]))))
# Sanity check: single batch only
assert np.all(all_rois[:, 0] == 0), 'Only single item batches are supported'
rois, labels, bbox_targets, bbox_weights = \
sample_rois(all_rois, fg_rois_per_image, rois_per_image, self._num_classes, self._cfg, gt_boxes=gt_boxes)
if DEBUG:
print "labels=", labels
print 'num fg: {}'.format((labels > 0).sum())
print 'num bg: {}'.format((labels == 0).sum())
self._count += 1
self._fg_num += (labels > 0).sum()
self._bg_num += (labels == 0).sum()
print "self._count=", self._count
print 'num fg avg: {}'.format(self._fg_num / self._count)
print 'num bg avg: {}'.format(self._bg_num / self._count)
print 'ratio: {:.3f}'.format(float(self._fg_num) / float(self._bg_num))
for ind, val in enumerate([rois, labels, bbox_targets, bbox_weights]):
self.assign(out_data[ind], req[ind], val)
after_proposal_target = datetime.now()
#print 'proposal_target times:'
#print (after_proposal_target-before_proposal_target).seconds
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
for i in range(len(in_grad)):
self.assign(in_grad[i], req[i], 0)
gc.collect()
@mx.operator.register('proposal_target')
class ProposalTargetProp(mx.operator.CustomOpProp):
def __init__(self, num_classes, batch_images, batch_rois, cfg, fg_fraction='0.25'):
super(ProposalTargetProp, self).__init__(need_top_grad=False)
self._num_classes = int(num_classes)
self._batch_images = int(batch_images)
self._batch_rois = int(batch_rois)
self._cfg = cPickle.loads(cfg)
self._fg_fraction = float(fg_fraction)
def list_arguments(self):
return ['rois', 'gt_boxes']
def list_outputs(self):
return ['rois_output', 'label', 'bbox_target', 'bbox_weight']
def infer_shape(self, in_shape):
rpn_rois_shape = in_shape[0]
gt_boxes_shape = in_shape[1]
rois = rpn_rois_shape[0] + gt_boxes_shape[0] if self._batch_rois == -1 else self._batch_rois
output_rois_shape = (rois, 5)
label_shape = (rois, )
bbox_target_shape = (rois, self._num_classes * 4)
bbox_weight_shape = (rois, self._num_classes * 4)
return [rpn_rois_shape, gt_boxes_shape], \
[output_rois_shape, label_shape, bbox_target_shape, bbox_weight_shape]
def create_operator(self, ctx, shapes, dtypes):
return ProposalTargetOperator(self._num_classes, self._batch_images, self._batch_rois, self._cfg, self._fg_fraction)
def declare_backward_dependency(self, out_grad, in_data, out_data):
return []
| 38.584615 | 124 | 0.63138 |
ace6aac53871fd6bc422849e145ce7c386f7a7b9 | 2,906 | py | Python | src/secondaires/navigation/equipage/signaux/attendre.py | stormi/tsunami | bdc853229834b52b2ee8ed54a3161a1a3133d926 | [
"BSD-3-Clause"
] | null | null | null | src/secondaires/navigation/equipage/signaux/attendre.py | stormi/tsunami | bdc853229834b52b2ee8ed54a3161a1a3133d926 | [
"BSD-3-Clause"
] | null | null | null | src/secondaires/navigation/equipage/signaux/attendre.py | stormi/tsunami | bdc853229834b52b2ee8ed54a3161a1a3133d926 | [
"BSD-3-Clause"
] | null | null | null | # -*-coding:Utf-8 -*
# Copyright (c) 2013 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la classe SignalAttendre."""
from secondaires.navigation.equipage.signaux.base import Signal
class SignalAttendre(Signal):
"""Signal utilisé pour attendre la fin de l'exécution d'un ordre.
Cette classe est utilisée pour signaler à un ordre parent que
le signal actuel attend l'exécution d'ordre enfant. Par exemple,
l'ordre de se déplacer de plusieurs salles ne s'exécute pas
instantanément : il fait une légère pause entre chaque déplacement.
Si l'ordre de déplacement multiple est utilisé comme sous-ordre,
alors quand le déplacement commence le déplacement multiple informe
le script parent qu'il doit se mettre en pause le temps que
l'action s'exécute.
Ce signal prend en paramètre l'ordre dont on attend l'exécution.
"""
def __init__(self, generateur_enfant):
Signal.__init__(self)
self.attendre = True
self.generateur_enfant = generateur_enfant
def __repr__(self):
return "<signal attendre>"
def traiter(self, generateur, profondeur):
"""Traite le générateur."""
ordre = generateur.ordre
matelot = ordre.matelot
differe = self.generateur_enfant
differe.ordre.volonte = ordre.volonte
differe.parent = generateur
matelot.executer_generateur(differe, profondeur + 1)
| 42.735294 | 79 | 0.749484 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.