hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5158fa476d500280071fd98277d8c194d877355a
| 4,330
|
py
|
Python
|
final_project.py
|
jetboom1/workshop1
|
9f3d33876e6c2592386fca4f8df6659606059901
|
[
"CC0-1.0"
] | null | null | null |
final_project.py
|
jetboom1/workshop1
|
9f3d33876e6c2592386fca4f8df6659606059901
|
[
"CC0-1.0"
] | null | null | null |
final_project.py
|
jetboom1/workshop1
|
9f3d33876e6c2592386fca4f8df6659606059901
|
[
"CC0-1.0"
] | 1
|
2021-12-15T08:44:41.000Z
|
2021-12-15T08:44:41.000Z
|
from idlelib.multicall import r
def read_csv(path_file):
"""
:param path_file: path to csv file
:return: list of lists, consists of 5 elements:
0 - name of task
1 - plasce
2 - teammates
3 - deadline
4 - priority
>>> 1 == 1
True
"""
all_list = []
with open(path_file, 'r', encoding='utf-8') as csv_file:
for line in csv_file:
line = line.strip()
line = line.split(',')
all_list.append(line)
return all_list
def print_csv(all_list):
"""
:param all_list: list of all tasks
:return: nothing
prints all tasks
>>> print_csv([["поїсти", "БФК", "сам", "17.12.2021", "5"]])
['поїсти', 'БФК', 'сам', '17.12.2021', '5']
<BLANKLINE>
>>> 110 * 2 == 221
False
"""
all_list = sorted(all_list, key=lambda x: x[4])
for i in range(len(all_list)):
print(all_list[i])
print()
def delete_notion(filepath, name_task):
"""
Delete task from csv file
:param filepath:
:param name_task:
:return:
>>> 10==1123
False
"""
with open(filepath, mode='r', encoding='utf-8') as file:
data = file.readlines()
for i in range(len(data)):
data[i] = data[i].strip("\n")
data[i] = data[i].split(',')
for i in data:
if name_task in i:
data.pop(data.index(i))
with open(filepath, mode='w', encoding='utf-8') as file:
for item in data:
file.write(",".join(item))
file.write('\n')
def tasks_today(list_of_tasks):
"""
list[list[str]] --> list[list[str]]
Return tasks for today.
>>> tasks_today([['task', 's', 's', '18.12.2001', '1'], ['task2', 's', 's', '18.12.2001', '2']])
No task for today, Relax :)
<BLANKLINE>
"""
from datetime import date
today = str(date.today().strftime('%d.%m.%Y'))
# today = today.replace("/", ".")
today_tasks = []
for i in range(len(list_of_tasks)):
if today in list_of_tasks[i]:
today_tasks.append(list_of_tasks[i])
if len(today_tasks) == 0:
print('No task for today, Relax :)')
else:
print(today_tasks)
print()
def write_csv(path_file, new_task):
"""
:param new_task: what to write in csv file
:param path_file: path to csv file
:return: nothing
writes a new line (task) to csv file
>>> write_csv('my_csv.csv', 'task, s, s, 18.12.2001, 1')
"""
with open(path_file, 'a', encoding='utf-8') as csv_file:
csv_file.write('\n' + new_task)
def add_tasks():
"""Asks information about task and returns it in csv format
>>> print('Doctest will not work here')
Doctest will not work here
"""
task = input('Write a task: ')
location = input('Write a location: ')
collaborators = input('Write your coworkers: ')
date = input('Write the date by which the task must be completed in format dd.mm.yyyy: ')
priority = input('Write a priority from 1 to the number of the last task: ')
lst = [task,location,collaborators,date,priority]
return ','.join(lst)
if __name__ == '__main__':
doctest.testmod(raise_on_error=True)
print('enter your path to csv file with tasks')
path = input()
while True:
print('Enter 1 if you want to add task')
print('Enter 2 if you want to delete task')
print('Enter 3 if you want to see today task')
print('Enter 4 to see all task, sorted by priority')
print('Enter exit if you want to exit')
action = input()
if action == '1':
print("What task do you want to add ?")
task = add_tasks()
write_csv(path, task)
elif action == '2':
print("What task do you want to delete ?")
task = input()
delete_notion(path, task)
elif action == '3':
print("Do you want to see today tasks ?")
tasks_today(read_csv(path))
elif action == '4':
print_csv(read_csv(path))
elif action == "exit":
print('thanks for using, bye')
break
else:
print('wrong input, repeat one more time')
| 30.27972
| 101
| 0.546882
|
a028b6fb41a464d2260076a9b0771dd1e9f8a72a
| 5,276
|
py
|
Python
|
MIT-6.006/01-PeakFinding/ps1/peak.py
|
ashirwadsangwan/Python
|
b4e570bb31783178d241b9f2a7145343d830b698
|
[
"MIT"
] | null | null | null |
MIT-6.006/01-PeakFinding/ps1/peak.py
|
ashirwadsangwan/Python
|
b4e570bb31783178d241b9f2a7145343d830b698
|
[
"MIT"
] | null | null | null |
MIT-6.006/01-PeakFinding/ps1/peak.py
|
ashirwadsangwan/Python
|
b4e570bb31783178d241b9f2a7145343d830b698
|
[
"MIT"
] | 1
|
2022-02-22T16:08:43.000Z
|
2022-02-22T16:08:43.000Z
|
import trace
################################################################################
########################### Class for Peak Problems ############################
################################################################################
class PeakProblem(object):
"""
A class representing an instance of a peak-finding problem.
"""
def __init__(self, array, bounds):
"""
A method for initializing an instance of the PeakProblem class.
Takes an array and an argument indicating which rows to include.
RUNTIME: O(1)
"""
(startRow, startCol, numRow, numCol) = bounds
self.array = array
self.bounds = bounds
self.startRow = startRow
self.startCol = startCol
self.numRow = numRow
self.numCol = numCol
def get(self, location):
"""
Returns the value of the array at the given location, offset by
the coordinates (startRow, startCol).
RUNTIME: O(1)
"""
(r, c) = location
if not (0 <= r and r < self.numRow):
return 0
if not (0 <= c and c < self.numCol):
return 0
return self.array[self.startRow + r][self.startCol + c]
def getBetterNeighbor(self, location, trace=None):
"""
If (r, c) has a better neighbor, return the neighbor. Otherwise,
return the location (r, c).
RUNTIME: O(1)
"""
(r, c) = location
best = location
if r - 1 >= 0 and self.get((r - 1, c)) > self.get(best):
best = (r - 1, c)
if c - 1 >= 0 and self.get((r, c - 1)) > self.get(best):
best = (r, c - 1)
if r + 1 < self.numRow and self.get((r + 1, c)) > self.get(best):
best = (r + 1, c)
if c + 1 < self.numCol and self.get((r, c + 1)) > self.get(best):
best = (r, c + 1)
if not trace is None:
trace.getBetterNeighbor(location, best)
return best
def getMaximum(self, locations, trace=None):
"""
Finds the location in the current problem with the greatest value.
RUNTIME: O(len(locations))
"""
(bestLoc, bestVal) = (None, 0)
for loc in locations:
if bestLoc is None or self.get(loc) > bestVal:
(bestLoc, bestVal) = (loc, self.get(loc))
if not trace is None:
trace.getMaximum(locations, bestLoc)
return bestLoc
def isPeak(self, location):
"""
Returns true if the given location is a peak in the current subproblem.
RUNTIME: O(1)
"""
return self.getBetterNeighbor(location) == location
def getSubproblem(self, bounds):
"""
Returns a subproblem with the given bounds. The bounds is a quadruple
of numbers: (starting row, starting column, # of rows, # of columns).
RUNTIME: O(1)
"""
(sRow, sCol, nRow, nCol) = bounds
newBounds = (self.startRow + sRow, self.startCol + sCol, nRow, nCol)
return PeakProblem(self.array, newBounds)
def getSubproblemContaining(self, boundList, location):
"""
Returns the subproblem containing the given location. Picks the first
of the subproblems in the list which satisfies that constraint, and
then constructs the subproblem using getSubproblem().
RUNTIME: O(len(boundList))
"""
(row, col) = location
for (sRow, sCol, nRow, nCol) in boundList:
if sRow <= row and row < sRow + nRow:
if sCol <= col and col < sCol + nCol:
return self.getSubproblem((sRow, sCol, nRow, nCol))
# shouldn't reach here
return self
def getLocationInSelf(self, problem, location):
"""
Remaps the location in the given problem to the same location in
the problem that this function is being called from.
RUNTIME: O(1)
"""
(row, col) = location
newRow = row + problem.startRow - self.startRow
newCol = col + problem.startCol - self.startCol
return (newRow, newCol)
################################################################################
################################ Helper Methods ################################
################################################################################
def getDimensions(array):
"""
Gets the dimensions for a two-dimensional array. The first dimension
is simply the number of items in the list; the second dimension is the
length of the shortest row. This ensures that any location (row, col)
that is less than the resulting bounds will in fact map to a valid
location in the array.
RUNTIME: O(len(array))
"""
rows = len(array)
cols = 0
for row in array:
if len(row) > cols:
cols = len(row)
return (rows, cols)
def createProblem(array):
"""
Constructs an instance of the PeakProblem object for the given array,
using bounds derived from the array using the getDimensions function.
RUNTIME: O(len(array))
"""
(rows, cols) = getDimensions(array)
return PeakProblem(array, (0, 0, rows, cols))
| 29.80791
| 80
| 0.530136
|
5168c2f4d07d8a8dc2631dcdd6edfcca3eb821f0
| 156
|
py
|
Python
|
test.py
|
aseemxandr/repo1
|
4e1b7c1e42e61c0ec348d73effeb819ec940f5d1
|
[
"MIT"
] | null | null | null |
test.py
|
aseemxandr/repo1
|
4e1b7c1e42e61c0ec348d73effeb819ec940f5d1
|
[
"MIT"
] | null | null | null |
test.py
|
aseemxandr/repo1
|
4e1b7c1e42e61c0ec348d73effeb819ec940f5d1
|
[
"MIT"
] | null | null | null |
class main:
x = 'aseem'
def func1(self):
self.x += 'something'
print('x=', self.x)
if __name__ == '__main__':
main().func1()
| 14.181818
| 29
| 0.50641
|
7acef55594eaaf92ab862d9b54f84fdccc5f265f
| 25,083
|
py
|
Python
|
salt/modules/rpmbuild_pkgbuild.py
|
markgras/salt
|
d66cd3c935533c63870b83228b978ce43e0ef70d
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/rpmbuild_pkgbuild.py
|
markgras/salt
|
d66cd3c935533c63870b83228b978ce43e0ef70d
|
[
"Apache-2.0"
] | 1
|
2017-07-10T21:44:39.000Z
|
2017-07-10T21:44:39.000Z
|
salt/modules/rpmbuild_pkgbuild.py
|
markgras/salt
|
d66cd3c935533c63870b83228b978ce43e0ef70d
|
[
"Apache-2.0"
] | null | null | null |
"""
RPM Package builder system
.. versionadded:: 2015.8.0
This system allows for all of the components to build rpms safely in chrooted
environments. This also provides a function to generate yum repositories
This module implements the pkgbuild interface
"""
import errno
import functools
import logging
import os
import re
import shutil
import tempfile
import time
import traceback
import urllib.parse
import salt.utils.files
import salt.utils.path
import salt.utils.user
import salt.utils.vt
from salt.exceptions import CommandExecutionError, SaltInvocationError
HAS_LIBS = False
try:
import gnupg # pylint: disable=unused-import
import salt.modules.gpg
HAS_LIBS = True
except ImportError:
pass
log = logging.getLogger(__name__)
__virtualname__ = "pkgbuild"
def __virtual__():
"""
Confirm this module is on a RPM based system, and has required utilities
"""
missing_util = False
utils_reqd = ["gpg", "rpm", "rpmbuild", "mock", "createrepo"]
for named_util in utils_reqd:
if not salt.utils.path.which(named_util):
missing_util = True
break
if HAS_LIBS and not missing_util:
if __grains__.get("os_family", False) in ("RedHat", "Suse"):
return __virtualname__
else:
# The module will be exposed as `rpmbuild` on non-RPM based systems
return "rpmbuild"
else:
return (
False,
"The rpmbuild module could not be loaded: requires python-gnupg, "
"gpg, rpm, rpmbuild, mock and createrepo utilities to be installed",
)
def _create_rpmmacros(runas="root"):
"""
Create the .rpmmacros file in user's home directory
"""
home = os.path.expanduser("~" + runas)
rpmbuilddir = os.path.join(home, "rpmbuild")
if not os.path.isdir(rpmbuilddir):
__salt__["file.makedirs_perms"](name=rpmbuilddir, user=runas, group="mock")
mockdir = os.path.join(home, "mock")
if not os.path.isdir(mockdir):
__salt__["file.makedirs_perms"](name=mockdir, user=runas, group="mock")
rpmmacros = os.path.join(home, ".rpmmacros")
with salt.utils.files.fopen(rpmmacros, "w") as afile:
afile.write(salt.utils.stringutils.to_str("%_topdir {}\n".format(rpmbuilddir)))
afile.write("%signature gpg\n")
afile.write("%_source_filedigest_algorithm 8\n")
afile.write("%_binary_filedigest_algorithm 8\n")
afile.write("%_gpg_name packaging@saltstack.com\n")
def _mk_tree(runas="root"):
"""
Create the rpm build tree
"""
basedir = tempfile.mkdtemp()
paths = ["BUILD", "RPMS", "SOURCES", "SPECS", "SRPMS"]
for path in paths:
full = os.path.join(basedir, path)
__salt__["file.makedirs_perms"](name=full, user=runas, group="mock")
return basedir
def _get_spec(tree_base, spec, template, saltenv="base"):
"""
Get the spec file and place it in the SPECS dir
"""
spec_tgt = os.path.basename(spec)
dest = os.path.join(tree_base, "SPECS", spec_tgt)
return __salt__["cp.get_url"](spec, dest, saltenv=saltenv)
def _get_src(tree_base, source, saltenv="base", runas="root"):
"""
Get the named sources and place them into the tree_base
"""
parsed = urllib.parse.urlparse(source)
sbase = os.path.basename(source)
dest = os.path.join(tree_base, "SOURCES", sbase)
if parsed.scheme:
lsrc = __salt__["cp.get_url"](source, dest, saltenv=saltenv)
else:
shutil.copy(source, dest)
__salt__["file.chown"](path=dest, user=runas, group="mock")
def _get_distset(tgt):
"""
Get the distribution string for use with rpmbuild and mock
"""
# Centos adds 'centos' string to rpm names, removing that to have
# consistent naming on Centos and Redhat, and allow for Amazon naming
tgtattrs = tgt.split("-")
if tgtattrs[0] == "amzn2":
distset = '--define "dist .{}"'.format(tgtattrs[0])
elif tgtattrs[1] in ["6", "7", "8"]:
distset = '--define "dist .el{}"'.format(tgtattrs[1])
else:
distset = ""
return distset
def _get_deps(deps, tree_base, saltenv="base"):
"""
Get include string for list of dependent rpms to build package
"""
deps_list = ""
if deps is None:
return deps_list
if not isinstance(deps, list):
raise SaltInvocationError(
"'deps' must be a Python list or comma-separated string"
)
for deprpm in deps:
parsed = urllib.parse._urlparse(deprpm)
depbase = os.path.basename(deprpm)
dest = os.path.join(tree_base, depbase)
if parsed.scheme:
__salt__["cp.get_url"](deprpm, dest, saltenv=saltenv)
else:
shutil.copy(deprpm, dest)
deps_list += " {}".format(dest)
return deps_list
def _check_repo_gpg_phrase_utils():
"""
Check for /usr/libexec/gpg-preset-passphrase is installed
"""
util_name = "/usr/libexec/gpg-preset-passphrase"
if __salt__["file.file_exists"](util_name):
return True
else:
raise CommandExecutionError(
"utility '{}' needs to be installed".format(util_name)
)
def _get_gpg_key_resources(keyid, env, use_passphrase, gnupghome, runas):
"""
Obtain gpg key resource infomation to sign repo files with
keyid
Optional Key ID to use in signing packages and repository.
Utilizes Public and Private keys associated with keyid which have
been loaded into the minion's Pillar data.
env
A dictionary of environment variables to be utilized in creating the
repository.
use_passphrase : False
Use a passphrase with the signing key presented in ``keyid``.
Passphrase is received from Pillar data which could be passed on the
command line with ``pillar`` parameter.
gnupghome : /etc/salt/gpgkeys
Location where GPG related files are stored, used with ``keyid``.
runas : root
User to create the repository as, and optionally sign packages.
.. note::
Ensure the user has correct permissions to any files and
directories which are to be utilized.
Returns:
tuple
use_gpg_agent True | False, Redhat 8 now makes use of a gpg-agent similar ot Debian
local_keyid key id to use in signing
define_gpg_name string containing definition to use with addsign (use_gpg_agent False)
phrase pass phrase (may not be used)
"""
local_keygrip_to_use = None
local_key_fingerprint = None
local_keyid = None
local_uids = None
define_gpg_name = ""
phrase = ""
retrc = 0
use_gpg_agent = False
if (
__grains__.get("os_family") == "RedHat"
and __grains__.get("osmajorrelease") >= 8
):
use_gpg_agent = True
if keyid is not None:
# import_keys
pkg_pub_key_file = "{}/{}".format(
gnupghome, __salt__["pillar.get"]("gpg_pkg_pub_keyname", None)
)
pkg_priv_key_file = "{}/{}".format(
gnupghome, __salt__["pillar.get"]("gpg_pkg_priv_keyname", None)
)
if pkg_pub_key_file is None or pkg_priv_key_file is None:
raise SaltInvocationError(
"Pillar data should contain Public and Private keys associated with 'keyid'"
)
try:
__salt__["gpg.import_key"](
user=runas, filename=pkg_pub_key_file, gnupghome=gnupghome
)
__salt__["gpg.import_key"](
user=runas, filename=pkg_priv_key_file, gnupghome=gnupghome
)
except SaltInvocationError:
raise SaltInvocationError(
"Public and Private key files associated with Pillar data and 'keyid' "
"{} could not be found".format(keyid)
)
# gpg keys should have been loaded as part of setup
# retrieve specified key and preset passphrase
local_keys = __salt__["gpg.list_keys"](user=runas, gnupghome=gnupghome)
for gpg_key in local_keys:
if keyid == gpg_key["keyid"][8:]:
local_uids = gpg_key["uids"]
local_keyid = gpg_key["keyid"]
if use_gpg_agent:
local_keygrip_to_use = gpg_key["fingerprint"]
local_key_fingerprint = gpg_key["fingerprint"]
break
if use_gpg_agent:
cmd = "gpg --with-keygrip --list-secret-keys"
local_keys2_keygrip = __salt__["cmd.run"](cmd, runas=runas, env=env)
local_keys2 = iter(local_keys2_keygrip.splitlines())
try:
for line in local_keys2:
if line.startswith("sec"):
line_fingerprint = next(local_keys2).lstrip().rstrip()
if local_key_fingerprint == line_fingerprint:
lkeygrip = next(local_keys2).split("=")
local_keygrip_to_use = lkeygrip[1].lstrip().rstrip()
break
except StopIteration:
raise SaltInvocationError(
"unable to find keygrip associated with fingerprint '{}' for keyid '{}'".format(
local_key_fingerprint, local_keyid
)
)
if local_keyid is None:
raise SaltInvocationError(
"The key ID '{}' was not found in GnuPG keyring at '{}'".format(
keyid, gnupghome
)
)
if use_passphrase:
phrase = __salt__["pillar.get"]("gpg_passphrase")
if use_gpg_agent:
_check_repo_gpg_phrase_utils()
cmd = (
"/usr/libexec/gpg-preset-passphrase --verbose --preset "
'--passphrase "{}" {}'.format(phrase, local_keygrip_to_use)
)
retrc = __salt__["cmd.retcode"](cmd, runas=runas, env=env)
if retrc != 0:
raise SaltInvocationError(
"Failed to preset passphrase, error {1}, "
"check logs for further details".format(retrc)
)
if local_uids:
define_gpg_name = "--define='%_signature gpg' --define='%_gpg_name {}'".format(
local_uids[0]
)
# need to update rpm with public key
cmd = "rpm --import {}".format(pkg_pub_key_file)
retrc = __salt__["cmd.retcode"](cmd, runas=runas, use_vt=True)
if retrc != 0:
raise SaltInvocationError(
"Failed to import public key from file {} with return "
"error {}, check logs for further details".format(
pkg_pub_key_file, retrc
)
)
return (use_gpg_agent, local_keyid, define_gpg_name, phrase)
def _sign_file(runas, define_gpg_name, phrase, abs_file, timeout):
"""
Sign file with provided key and definition
"""
SIGN_PROMPT_RE = re.compile(r"Enter pass phrase: ", re.M)
# interval of 0.125 is really too fast on some systems
interval = 0.5
number_retries = timeout / interval
times_looped = 0
error_msg = "Failed to sign file {}".format(abs_file)
cmd = "rpm {} --addsign {}".format(define_gpg_name, abs_file)
preexec_fn = functools.partial(salt.utils.user.chugid_and_umask, runas, None)
try:
stdout, stderr = None, None
proc = salt.utils.vt.Terminal(
cmd,
shell=True,
preexec_fn=preexec_fn,
stream_stdout=True,
stream_stderr=True,
)
while proc.has_unread_data:
stdout, stderr = proc.recv()
if stdout and SIGN_PROMPT_RE.search(stdout):
# have the prompt for inputting the passphrase
proc.sendline(phrase)
else:
times_looped += 1
if times_looped > number_retries:
raise SaltInvocationError(
"Attemping to sign file {} failed, timed out after {} seconds".format(
abs_file, int(times_looped * interval)
)
)
time.sleep(interval)
proc_exitstatus = proc.exitstatus
if proc_exitstatus != 0:
raise SaltInvocationError(
"Signing file {} failed with proc.status {}".format(
abs_file, proc_exitstatus
)
)
except salt.utils.vt.TerminalException as err:
trace = traceback.format_exc()
log.error(error_msg, err, trace)
finally:
proc.close(terminate=True, kill=True)
def _sign_files_with_gpg_agent(runas, local_keyid, abs_file, repodir, env, timeout):
"""
Sign file with provided key utilizing gpg-agent
"""
cmd = "rpmsign --verbose --key-id={} --addsign {}".format(local_keyid, abs_file)
retrc = __salt__["cmd.retcode"](cmd, runas=runas, cwd=repodir, use_vt=True, env=env)
if retrc != 0:
raise SaltInvocationError(
"Signing encountered errors for command '{}', "
"return error {}, check logs for further details".format(cmd, retrc)
)
def make_src_pkg(
dest_dir, spec, sources, env=None, template=None, saltenv="base", runas="root"
):
"""
Create a source rpm from the given spec file and sources
CLI Example:
.. code-block:: bash
salt '*' pkgbuild.make_src_pkg /var/www/html/
https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/rpm/python-libnacl.spec
https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz
This example command should build the libnacl SOURCE package and place it in
/var/www/html/ on the minion
.. versionchanged:: 2017.7.0
dest_dir
The directory on the minion to place the built package(s)
spec
The location of the spec file (used for rpms)
sources
The list of package sources
env
A dictionary of environment variables to be set prior to execution.
template
Run the spec file through a templating engine
Optional argument, allows for no templating engine used to be
if none is desired.
saltenv
The saltenv to use for files downloaded from the salt filesever
runas
The user to run the build process as
.. versionadded:: 2018.3.3
.. note::
using SHA256 as digest and minimum level dist el6
"""
_create_rpmmacros(runas)
tree_base = _mk_tree(runas)
spec_path = _get_spec(tree_base, spec, template, saltenv)
__salt__["file.chown"](path=spec_path, user=runas, group="mock")
__salt__["file.chown"](path=tree_base, user=runas, group="mock")
if isinstance(sources, str):
sources = sources.split(",")
for src in sources:
_get_src(tree_base, src, saltenv, runas)
# make source rpms for dist el6 with SHA256, usable with mock on other dists
cmd = 'rpmbuild --verbose --define "_topdir {}" -bs --define "dist .el6" {}'.format(
tree_base, spec_path
)
retrc = __salt__["cmd.retcode"](cmd, runas=runas)
if retrc != 0:
raise SaltInvocationError(
"Make source package for destination directory {}, spec {}, sources {}, failed "
"with return error {}, check logs for further details".format(
dest_dir, spec, sources, retrc
)
)
srpms = os.path.join(tree_base, "SRPMS")
ret = []
if not os.path.isdir(dest_dir):
__salt__["file.makedirs_perms"](name=dest_dir, user=runas, group="mock")
for fn_ in os.listdir(srpms):
full = os.path.join(srpms, fn_)
tgt = os.path.join(dest_dir, fn_)
shutil.copy(full, tgt)
ret.append(tgt)
return ret
def build(
runas,
tgt,
dest_dir,
spec,
sources,
deps,
env,
template,
saltenv="base",
log_dir="/var/log/salt/pkgbuild",
):
"""
Given the package destination directory, the spec file source and package
sources, use mock to safely build the rpm defined in the spec file
CLI Example:
.. code-block:: bash
salt '*' pkgbuild.build mock epel-7-x86_64 /var/www/html
https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/rpm/python-libnacl.spec
https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz
This example command should build the libnacl package for rhel 7 using user
mock and place it in /var/www/html/ on the minion
"""
ret = {}
try:
__salt__["file.chown"](path=dest_dir, user=runas, group="mock")
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
srpm_dir = os.path.join(dest_dir, "SRPMS")
srpm_build_dir = tempfile.mkdtemp()
try:
srpms = make_src_pkg(
srpm_build_dir, spec, sources, env, template, saltenv, runas
)
except Exception as exc: # pylint: disable=broad-except
shutil.rmtree(srpm_build_dir)
log.error("Failed to make src package")
return ret
distset = _get_distset(tgt)
noclean = ""
deps_dir = tempfile.mkdtemp()
deps_list = _get_deps(deps, deps_dir, saltenv)
retrc = 0
for srpm in srpms:
dbase = os.path.dirname(srpm)
results_dir = tempfile.mkdtemp()
try:
__salt__["file.chown"](path=dbase, user=runas, group="mock")
__salt__["file.chown"](path=results_dir, user=runas, group="mock")
cmd = "mock --root={} --resultdir={} --init".format(tgt, results_dir)
retrc |= __salt__["cmd.retcode"](cmd, runas=runas)
if deps_list and not deps_list.isspace():
cmd = "mock --root={} --resultdir={} --install {} {}".format(
tgt, results_dir, deps_list, noclean
)
retrc |= __salt__["cmd.retcode"](cmd, runas=runas)
noclean += " --no-clean"
cmd = "mock --root={} --resultdir={} {} {} {}".format(
tgt, results_dir, distset, noclean, srpm
)
retrc |= __salt__["cmd.retcode"](cmd, runas=runas)
cmdlist = [
"rpm",
"-qp",
"--queryformat",
"{0}/%{{name}}/%{{version}}-%{{release}}".format(log_dir),
srpm,
]
log_dest = __salt__["cmd.run_stdout"](cmdlist, python_shell=False)
for filename in os.listdir(results_dir):
full = os.path.join(results_dir, filename)
if filename.endswith("src.rpm"):
sdest = os.path.join(srpm_dir, filename)
try:
__salt__["file.makedirs_perms"](
name=srpm_dir, user=runas, group="mock"
)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
shutil.copy(full, sdest)
ret.setdefault("Source Packages", []).append(sdest)
elif filename.endswith(".rpm"):
bdist = os.path.join(dest_dir, filename)
shutil.copy(full, bdist)
ret.setdefault("Packages", []).append(bdist)
else:
log_file = os.path.join(log_dest, filename)
try:
__salt__["file.makedirs_perms"](
name=log_dest, user=runas, group="mock"
)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
shutil.copy(full, log_file)
ret.setdefault("Log Files", []).append(log_file)
except Exception as exc: # pylint: disable=broad-except
log.error("Error building from %s: %s", srpm, exc)
finally:
shutil.rmtree(results_dir)
if retrc != 0:
raise SaltInvocationError(
"Building packages for destination directory {}, spec {}, sources {}, failed "
"with return error {}, check logs for further details".format(
dest_dir, spec, sources, retrc
)
)
shutil.rmtree(deps_dir)
shutil.rmtree(srpm_build_dir)
return ret
def make_repo(
repodir,
keyid=None,
env=None,
use_passphrase=False,
gnupghome="/etc/salt/gpgkeys",
runas="root",
timeout=15.0,
):
"""
Make a package repository and optionally sign packages present
Given the repodir, create a ``yum`` repository out of the rpms therein
and optionally sign it and packages present, the name is directory to
turn into a repo. This state is best used with onchanges linked to
your package building states.
repodir
The directory to find packages that will be in the repository.
keyid
.. versionchanged:: 2016.3.0
Optional Key ID to use in signing packages and repository.
Utilizes Public and Private keys associated with keyid which have
been loaded into the minion's Pillar data.
For example, contents from a Pillar data file with named Public
and Private keys as follows:
.. code-block:: yaml
gpg_pkg_priv_key: |
-----BEGIN PGP PRIVATE KEY BLOCK-----
Version: GnuPG v1
lQO+BFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b
w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc
.
.
Ebe+8JCQTwqSXPRTzXmy/b5WXDeM79CkLWvuGpXFor76D+ECMRPv/rawukEcNptn
R5OmgHqvydEnO4pWbn8JzQO9YX/Us0SMHBVzLC8eIi5ZIopzalvX
=JvW8
-----END PGP PRIVATE KEY BLOCK-----
gpg_pkg_priv_keyname: gpg_pkg_key.pem
gpg_pkg_pub_key: |
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1
mQENBFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b
w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc
.
.
bYP7t5iwJmQzRMyFInYRt77wkJBPCpJc9FPNebL9vlZcN4zv0KQta+4alcWivvoP
4QIxE+/+trC6QRw2m2dHk6aAeq/J0Sc7ilZufwnNA71hf9SzRIwcFXMsLx4iLlki
inNqW9c=
=s1CX
-----END PGP PUBLIC KEY BLOCK-----
gpg_pkg_pub_keyname: gpg_pkg_key.pub
env
.. versionchanged:: 2016.3.0
A dictionary of environment variables to be utilized in creating the
repository.
.. note::
This parameter is not used for making ``yum`` repositories.
use_passphrase : False
.. versionadded:: 2016.3.0
Use a passphrase with the signing key presented in ``keyid``.
Passphrase is received from Pillar data which could be passed on the
command line with ``pillar`` parameter.
.. code-block:: bash
pillar='{ "gpg_passphrase" : "my_passphrase" }'
.. versionadded:: 3001.1
RHEL 8 and above leverages gpg-agent and gpg-preset-passphrase for
caching keys, etc.
gnupghome : /etc/salt/gpgkeys
.. versionadded:: 2016.3.0
Location where GPG related files are stored, used with ``keyid``.
runas : root
.. versionadded:: 2016.3.0
User to create the repository as, and optionally sign packages.
.. note::
Ensure the user has correct permissions to any files and
directories which are to be utilized.
timeout : 15.0
.. versionadded:: 2016.3.4
Timeout in seconds to wait for the prompt for inputting the passphrase.
CLI Example:
.. code-block:: bash
salt '*' pkgbuild.make_repo /var/www/html/
"""
home = os.path.expanduser("~" + runas)
rpmmacros = os.path.join(home, ".rpmmacros")
if not os.path.exists(rpmmacros):
_create_rpmmacros(runas)
if gnupghome and env is None:
env = {}
env["GNUPGHOME"] = gnupghome
use_gpg_agent, local_keyid, define_gpg_name, phrase = _get_gpg_key_resources(
keyid, env, use_passphrase, gnupghome, runas
)
# sign_it_here
for fileused in os.listdir(repodir):
if fileused.endswith(".rpm"):
abs_file = os.path.join(repodir, fileused)
if use_gpg_agent:
_sign_files_with_gpg_agent(
runas, local_keyid, abs_file, repodir, env, timeout
)
else:
_sign_file(runas, define_gpg_name, phrase, abs_file, timeout)
cmd = "createrepo --update {}".format(repodir)
retrc = __salt__["cmd.run_all"](cmd, runas=runas)
return retrc
| 33.178571
| 106
| 0.593948
|
b20671d98ccf682d00dbae959a1dea5d3856055a
| 213
|
py
|
Python
|
Mean/calMean.py
|
Neiva07/Algorithms
|
cc2b22d1f69f0af7b91a8326550e759abfba79c8
|
[
"MIT"
] | 199
|
2019-12-01T01:23:34.000Z
|
2022-02-28T10:30:40.000Z
|
Mean/calMean.py
|
Neiva07/Algorithms
|
cc2b22d1f69f0af7b91a8326550e759abfba79c8
|
[
"MIT"
] | 35
|
2020-06-08T17:59:22.000Z
|
2021-11-11T04:00:29.000Z
|
Mean/calMean.py
|
Neiva07/Algorithms
|
cc2b22d1f69f0af7b91a8326550e759abfba79c8
|
[
"MIT"
] | 106
|
2020-02-05T01:28:19.000Z
|
2022-03-11T05:38:54.000Z
|
def calMean(array):
mean = 0
for i in range(len(array)):
mean = mean + array[i]
mean = mean/float(len(array))
return mean
if __name__ == "__main__":
array = [1,2,3,4]
mean = calMean(array)
print (mean)
| 17.75
| 30
| 0.638498
|
56e7a8ba6c61ede75655db3a33853c41ff6c5222
| 1,072
|
py
|
Python
|
var/spack/repos/builtin/packages/py-griddataformats/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 3
|
2021-09-29T02:14:40.000Z
|
2022-01-27T20:50:36.000Z
|
var/spack/repos/builtin/packages/py-griddataformats/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8
|
2022-02-28T11:30:18.000Z
|
2022-03-23T19:34:56.000Z
|
var/spack/repos/builtin/packages/py-griddataformats/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyGriddataformats(PythonPackage):
"""The gridDataFormats package provides classes to unify reading
and writing n-dimensional datasets. One can read grid data from
files, make them available as a Grid object, and write out the
data again."""
homepage = "http://www.mdanalysis.org/GridDataFormats"
pypi = "GridDataFormats/GridDataFormats-0.5.0.tar.gz"
version('0.5.0', sha256='f317ed60708de22d1b2a76ce89a00f722d903291b1055ff1018d441870c39d69')
version('0.4.1', sha256='b362662c2dc475e2a3895fe044eaaa9a707bd660fd109a63dac84a47236690a3')
version('0.3.3', sha256='938f0efcb3bc2f58ec85048b933942da8a52c134170acc97cb095f09d3698fbd')
depends_on('python@2.7:')
depends_on('py-setuptools', type='build')
depends_on('py-numpy@1.0.3:', type=('build', 'run'))
depends_on('py-six', type=('build', 'run'))
| 41.230769
| 95
| 0.747201
|
aec964e58f67ad5360f33c72ae2fe6ed99fe9afa
| 1,076
|
py
|
Python
|
tests/nlpzero/common/test_util.py
|
ftnext/nlp-zero
|
5bb010d1eed58cc75924aebc47b8b771bc9e203e
|
[
"MIT"
] | null | null | null |
tests/nlpzero/common/test_util.py
|
ftnext/nlp-zero
|
5bb010d1eed58cc75924aebc47b8b771bc9e203e
|
[
"MIT"
] | 1
|
2020-09-08T10:33:40.000Z
|
2020-09-08T10:52:46.000Z
|
tests/nlpzero/common/test_util.py
|
ftnext/nlp-zero
|
5bb010d1eed58cc75924aebc47b8b771bc9e203e
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
import numpy as np
from nlpzero.common import util
class PreprocessTestCase(TestCase):
def test_return_numpy_array_as_corpus(self):
text = "You say goodbye and I say hello."
expected = np.array([0, 1, 2, 3, 4, 1, 5, 6])
actual, _, _ = util.preprocess(text)
np.testing.assert_array_equal(actual, expected)
class CreateCoMatrixTestCase(TestCase):
def test_return_numpy_array_as_cooccurrence_matrix(self):
text = "You say goodbye and I say hello."
corpus, word_to_id, _ = util.preprocess(text)
expected = np.array(
[
[0, 1, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 1, 1, 0],
[0, 1, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 0],
],
dtype=np.int32,
)
actual = util.create_co_matrix(corpus, len(word_to_id))
np.testing.assert_array_equal(actual, expected)
| 28.315789
| 63
| 0.531599
|
7c2e287cb8251ebf11dbb922e50012ce4e01b692
| 8,456
|
py
|
Python
|
options/base_options.py
|
Huage001/Artistic-Video-Partial-Conv-Depth-Loss
|
c990b8bcc88ce0655f3ac78b526324b1ff5deb41
|
[
"BSD-3-Clause"
] | 9
|
2020-10-14T03:32:51.000Z
|
2022-01-14T20:38:05.000Z
|
options/base_options.py
|
Huage001/Artistic-Video-Partial-Conv-Depth-Loss
|
c990b8bcc88ce0655f3ac78b526324b1ff5deb41
|
[
"BSD-3-Clause"
] | 1
|
2021-03-03T20:15:42.000Z
|
2021-08-07T12:29:22.000Z
|
options/base_options.py
|
Huage001/Artistic-Video-Partial-Conv-Depth-Loss
|
c990b8bcc88ce0655f3ac78b526324b1ff5deb41
|
[
"BSD-3-Clause"
] | 2
|
2020-09-02T00:58:29.000Z
|
2020-11-25T16:41:51.000Z
|
import argparse
import os
from util import util
import torch
import models
import data
class BaseOptions():
"""This class defines options used during both training and test time.
It also implements several helper functions such as parsing, printing, and saving the options.
It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.
"""
def __init__(self):
"""Reset the class; indicates the class hasn't been initailized"""
self.initialized = False
def initialize(self, parser):
"""Define the common options that are used in both training and test."""
# basic parameters
parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
parser.add_argument('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
# dataset parameters
parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser.add_argument('--load_size_w', type=int, default=512, help='scale images to this size')
parser.add_argument('--crop_size_w', type=int, default=512, help='then crop to this size')
parser.add_argument('--load_size_h', type=int, default=256, help='scale images to this size')
parser.add_argument('--crop_size_h', type=int, default=256, help='then crop to this size')
parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
# additional parameters
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG} size{load_size}')
self.initialized = True
return parser
def gather_options(self):
"""Initialize our parser with basic options(only once).
Add additional model-specific and dataset-specific options.
These options are defined in the <modify_commandline_options> function
in model and dataset classes.
"""
if not self.initialized: # check if it has been initialized
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
opt, _ = parser.parse_known_args() # parse again with new defaults
# modify dataset-related parser options
dataset_name = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_name)
parser = dataset_option_setter(parser, self.isTrain)
# save and return the parser
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
"""Print and save options
It will print both current options and default values(if different).
It will save options into a text file / [checkpoints_dir] / opt.txt
"""
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
"""Parse our options, create checkpoints directory suffix, and set up gpu device."""
opt = self.gather_options()
opt.isTrain = self.isTrain # train or test
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
self.print_options(opt)
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
assert(opt.crop_size_h % 4 == 0 and opt.crop_size_w % 4 == 0)
opt.load_size = (opt.load_size_h, opt.load_size_w)
opt.crop_size = (opt.crop_size_h, opt.crop_size_w)
self.opt = opt
return self.opt
| 58.722222
| 235
| 0.661897
|
7755438c11804a09332e06897a8e996432ecc858
| 3,142
|
py
|
Python
|
sdk/textanalytics/azure-ai-textanalytics/samples/sample_analyze_healthcare_entities.py
|
abhahn/azure-sdk-for-python
|
09521dfb517e0859ec961cae006fb728d787b565
|
[
"MIT"
] | 2
|
2019-08-23T21:14:00.000Z
|
2021-09-07T18:32:34.000Z
|
sdk/textanalytics/azure-ai-textanalytics/samples/sample_analyze_healthcare_entities.py
|
rakshith91/azure-sdk-for-python
|
3c4f2575d31260fa1bda870b04e34c082ac5702b
|
[
"MIT"
] | null | null | null |
sdk/textanalytics/azure-ai-textanalytics/samples/sample_analyze_healthcare_entities.py
|
rakshith91/azure-sdk-for-python
|
3c4f2575d31260fa1bda870b04e34c082ac5702b
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_analyze_healthcare_entities.py
DESCRIPTION:
This sample demonstrates how to detect healthcare entities in a batch of documents.
Each entity found in the document will have a link associated with it from a
data source. Relations between entities will also be included in the response.
USAGE:
python sample_analyze_healthcare_entities.py
Set the environment variables with your own values before running the sample:
1) AZURE_TEXT_ANALYTICS_ENDPOINT - the endpoint to your Cognitive Services resource.
2) AZURE_TEXT_ANALYTICS_KEY - your Text Analytics subscription key
"""
import os
class AnalyzeHealthcareEntitiesSample(object):
def analyze_healthcare_entities(self):
# [START analyze_healthcare_entities]
from azure.core.credentials import AzureKeyCredential
from azure.ai.textanalytics import TextAnalyticsClient
endpoint = os.environ["AZURE_TEXT_ANALYTICS_ENDPOINT"]
key = os.environ["AZURE_TEXT_ANALYTICS_KEY"]
text_analytics_client = TextAnalyticsClient(
endpoint=endpoint,
credential=AzureKeyCredential(key),
)
documents = [
"Subject is taking 100mg of ibuprofen twice daily"
]
poller = text_analytics_client.begin_analyze_healthcare_entities(documents, show_stats=True)
result = poller.result()
docs = [doc for doc in result if not doc.is_error]
print("Results of Healthcare Entities Analysis:")
for idx, doc in enumerate(docs):
for entity in doc.entities:
print("Entity: {}".format(entity.text))
print("...Category: {}".format(entity.category))
print("...Subcategory: {}".format(entity.subcategory))
print("...Offset: {}".format(entity.offset))
print("...Confidence score: {}".format(entity.confidence_score))
if entity.data_sources is not None:
print("...Data Sources:")
for data_source in entity.data_sources:
print("......Entity ID: {}".format(data_source.entity_id))
print("......Name: {}".format(data_source.name))
if len(entity.related_entities) > 0:
print("...Related Entities:")
for related_entity, relation_type in entity.related_entities.items():
print("......Entity Text: {}".format(related_entity.text))
print("......Relation Type: {}".format(relation_type))
print("------------------------------------------")
# [END analyze_healthcare_entities]
if __name__ == "__main__":
sample = AnalyzeHealthcareEntitiesSample()
sample.analyze_healthcare_entities()
| 40.282051
| 100
| 0.606938
|
3efffbafaeceaded5a9d81932a7b7410f958c107
| 6,749
|
py
|
Python
|
official/transformer/utils/tokenizer_test.py
|
jdavidagudelo/tensorflow-models
|
6f019beec73b01861363bf717706e27f4210b979
|
[
"Apache-2.0"
] | 1
|
2021-05-17T01:42:29.000Z
|
2021-05-17T01:42:29.000Z
|
official/transformer/utils/tokenizer_test.py
|
jdavidagudelo/tensorflow-models
|
6f019beec73b01861363bf717706e27f4210b979
|
[
"Apache-2.0"
] | null | null | null |
official/transformer/utils/tokenizer_test.py
|
jdavidagudelo/tensorflow-models
|
6f019beec73b01861363bf717706e27f4210b979
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test Subtokenizer and string helper methods."""
import collections
import tempfile
import tensorflow as tf # pylint: disable=g-bad-import-order
from official.transformer.utils import tokenizer
class SubtokenizerTest(tf.test.TestCase):
@staticmethod
def _init_subtokenizer(vocab_list):
temp_file = tempfile.NamedTemporaryFile(delete=False)
with tf.gfile.Open(temp_file.name, 'w') as w:
for subtoken in vocab_list:
w.write("'%s'" % subtoken)
w.write("\n")
return tokenizer.Subtokenizer(temp_file.name, reserved_tokens=[])
def test_encode(self):
vocab_list = ["123_", "test", "ing_"]
subtokenizer = self._init_subtokenizer(vocab_list)
s = "testing 123"
encoded_list = subtokenizer.encode(s)
self.assertEqual([1, 2, 0], encoded_list)
def test_decode(self):
vocab_list = ["123_", "test", "ing_"]
subtokenizer = self._init_subtokenizer(vocab_list)
encoded_list = [1, 2, 0] # testing 123
decoded_str = subtokenizer.decode(encoded_list)
self.assertEqual("testing 123", decoded_str)
def test_subtoken_ids_to_tokens(self):
vocab_list = ["123_", "test", "ing_"]
subtokenizer = self._init_subtokenizer(vocab_list)
encoded_list = [1, 2, 0] # testing 123
token_list = subtokenizer._subtoken_ids_to_tokens(encoded_list)
self.assertEqual([u"testing", u"123"], token_list)
class StringHelperTest(tf.test.TestCase):
def test_split_string_to_tokens(self):
text = "test? testing 123."
tokens = tokenizer._split_string_to_tokens(text)
self.assertEqual(["test", "? ", "testing", "123", "."], tokens)
def test_join_tokens_to_string(self):
tokens = ["test", "? ", "testing", "123", "."]
s = tokenizer._join_tokens_to_string(tokens)
self.assertEqual("test? testing 123.", s)
def test_escape_token(self):
token = u"abc_\\4"
alphabet = set("abc_\\u;")
escaped_token = tokenizer._escape_token(token, alphabet)
self.assertEqual("abc\\u\\\\\\52;_", escaped_token)
def test_unescape_token(self):
escaped_token = u"Underline: \\u, Backslash: \\\\, Unicode: \\52;"
unescaped_token = tokenizer._unescape_token(escaped_token)
self.assertEqual(
"Underline: _, Backslash: \\, Unicode: 4", unescaped_token)
def test_list_to_index_dict(self):
lst = ["test", "strings"]
d = tokenizer._list_to_index_dict(lst)
self.assertDictEqual({"test": 0, "strings": 1}, d)
def test_split_token_to_subtokens(self):
token = "abc"
subtoken_dict = {"a": 0, "b": 1, "c": 2, "ab": 3}
max_subtoken_length = 2
subtokens = tokenizer._split_token_to_subtokens(
token, subtoken_dict, max_subtoken_length)
self.assertEqual(["ab", "c"], subtokens)
def test_generate_alphabet_dict(self):
s = ["testing", "123"]
reserved_tokens = ["???"]
alphabet = tokenizer._generate_alphabet_dict(s, reserved_tokens)
self.assertIn("?", alphabet)
self.assertIn("t", alphabet)
self.assertIn("e", alphabet)
self.assertIn("s", alphabet)
self.assertIn("i", alphabet)
self.assertIn("n", alphabet)
self.assertIn("g", alphabet)
self.assertIn("1", alphabet)
self.assertIn("2", alphabet)
self.assertIn("3", alphabet)
def test_count_and_gen_subtokens(self):
token_counts = {"abc": 5}
alphabet = set("abc_")
subtoken_dict = {"a": 0, "b": 1, "c": 2, "_": 3}
max_subtoken_length = 2
subtoken_counts = tokenizer._count_and_gen_subtokens(
token_counts, alphabet, subtoken_dict, max_subtoken_length)
self.assertIsInstance(subtoken_counts, collections.defaultdict)
self.assertDictEqual(
{"a": 5, "b": 5, "c": 5, "_": 5, "ab": 5, "bc": 5, "c_": 5,
"abc": 5, "bc_": 5, "abc_": 5}, subtoken_counts)
def test_filter_and_bucket_subtokens(self):
subtoken_counts = collections.defaultdict(
int, {"a": 2, "b": 4, "c": 1, "ab": 6, "ac": 3, "abbc": 5})
min_count = 3
subtoken_buckets = tokenizer._filter_and_bucket_subtokens(
subtoken_counts, min_count)
self.assertEqual(len(subtoken_buckets[0]), 0)
self.assertEqual(set("b"), subtoken_buckets[1])
self.assertEqual({"ab", "ac"}, subtoken_buckets[2])
self.assertEqual(len(subtoken_buckets[3]), 0)
self.assertEqual({"abbc"}, subtoken_buckets[4])
def test_gen_new_subtoken_list(self):
subtoken_counts = collections.defaultdict(
int, {"translate": 10, "t": 40, "tr": 16, "tra": 12})
min_count = 5
alphabet = set("translate")
reserved_tokens = ["reserved", "tokens"]
subtoken_list, max_token_length = tokenizer._gen_new_subtoken_list(
subtoken_counts, min_count, alphabet, reserved_tokens)
# Check that "tra" isn"t in the list (its count should be decremented to 2,
# so it should not be added to the canddiate list).
self.assertNotIn("tra", subtoken_list)
self.assertIn("tr", subtoken_list)
self.assertIn("t", subtoken_list)
self.assertEqual(len("translate"), max_token_length)
def test_generate_subtokens(self):
token_counts = {"ab": 1, "bc": 3, "abc": 5}
alphabet = set("abc_")
min_count = 100
num_iterations = 1
reserved_tokens = ["reserved", "tokens"]
vocab_list = tokenizer._generate_subtokens(
token_counts, alphabet, min_count, num_iterations, reserved_tokens)
# Check that reserved tokens are at the front of the list
self.assertEqual(vocab_list[:2], reserved_tokens)
# Check that each character in alphabet is in the vocab list
for c in alphabet:
self.assertIn(c, vocab_list)
if __name__ == "__main__":
tf.test.main()
| 36.679348
| 83
| 0.63076
|
e0c01a90365b4453ca612d0098c2f7c97bca6a0f
| 3,582
|
py
|
Python
|
PaddleRec/ctr/deepfm/data/preprocess.py
|
JianzhouZhan/models
|
85d08586f388c194c12f8ffa842ab0648d525dc2
|
[
"Apache-2.0"
] | null | null | null |
PaddleRec/ctr/deepfm/data/preprocess.py
|
JianzhouZhan/models
|
85d08586f388c194c12f8ffa842ab0648d525dc2
|
[
"Apache-2.0"
] | null | null | null |
PaddleRec/ctr/deepfm/data/preprocess.py
|
JianzhouZhan/models
|
85d08586f388c194c12f8ffa842ab0648d525dc2
|
[
"Apache-2.0"
] | 1
|
2020-06-10T07:46:25.000Z
|
2020-06-10T07:46:25.000Z
|
import os
import numpy
from collections import Counter
import shutil
import pickle
def get_raw_data():
if not os.path.isdir('raw_data'):
os.mkdir('raw_data')
fin = open('train.txt', 'r')
fout = open('raw_data/part-0', 'w')
for line_idx, line in enumerate(fin):
if line_idx % 200000 == 0 and line_idx != 0:
fout.close()
cur_part_idx = int(line_idx / 200000)
fout = open('raw_data/part-' + str(cur_part_idx), 'w')
fout.write(line)
fout.close()
fin.close()
def split_data():
split_rate_ = 0.9
dir_train_file_idx_ = 'aid_data/train_file_idx.txt'
filelist_ = [
'raw_data/part-%d' % x for x in range(len(os.listdir('raw_data')))
]
if not os.path.exists(dir_train_file_idx_):
train_file_idx = list(
numpy.random.choice(
len(filelist_), int(len(filelist_) * split_rate_), False))
with open(dir_train_file_idx_, 'w') as fout:
fout.write(str(train_file_idx))
else:
with open(dir_train_file_idx_, 'r') as fin:
train_file_idx = eval(fin.read())
for idx in range(len(filelist_)):
if idx in train_file_idx:
shutil.move(filelist_[idx], 'train_data')
else:
shutil.move(filelist_[idx], 'test_data')
def get_feat_dict():
freq_ = 10
dir_feat_dict_ = 'aid_data/feat_dict_' + str(freq_) + '.pkl2'
continuous_range_ = range(1, 14)
categorical_range_ = range(14, 40)
if not os.path.exists(dir_feat_dict_):
# print('generate a feature dict')
# Count the number of occurrences of discrete features
feat_cnt = Counter()
with open('train.txt', 'r') as fin:
for line_idx, line in enumerate(fin):
if line_idx % 100000 == 0:
print('generating feature dict', line_idx / 45000000)
features = line.rstrip('\n').split('\t')
for idx in categorical_range_:
if features[idx] == '': continue
feat_cnt.update([features[idx]])
# Only retain discrete features with high frequency
dis_feat_set = set()
for feat, ot in feat_cnt.items():
if ot >= freq_:
dis_feat_set.add(feat)
# Create a dictionary for continuous and discrete features
feat_dict = {}
tc = 1
# Continuous features
for idx in continuous_range_:
feat_dict[idx] = tc
tc += 1
# Discrete features
cnt_feat_set = set()
with open('train.txt', 'r') as fin:
for line_idx, line in enumerate(fin):
features = line.rstrip('\n').split('\t')
for idx in categorical_range_:
if features[idx] == '' or features[idx] not in dis_feat_set:
continue
if features[idx] not in cnt_feat_set:
cnt_feat_set.add(features[idx])
feat_dict[features[idx]] = tc
tc += 1
# Save dictionary
with open(dir_feat_dict_, 'wb') as fout:
pickle.dump(feat_dict, fout)
print('args.num_feat ', len(feat_dict) + 1)
if __name__ == '__main__':
if not os.path.isdir('train_data'):
os.mkdir('train_data')
if not os.path.isdir('test_data'):
os.mkdir('test_data')
if not os.path.isdir('aid_data'):
os.mkdir('aid_data')
get_raw_data()
split_data()
get_feat_dict()
print('Done!')
| 31.982143
| 80
| 0.563372
|
38c3c53873ba311585900575c4c5addb130c7abf
| 2,393
|
py
|
Python
|
BST/largest_smaller_bst_key.py
|
SebastianOpiyo/algorithms
|
3fd00dca4631e8515af89d86de290cad2e029843
|
[
"MIT"
] | 6
|
2022-01-06T13:51:11.000Z
|
2022-01-06T15:25:23.000Z
|
BST/largest_smaller_bst_key.py
|
SebastianOpiyo/Algorithms-tuts
|
ebf2f595a8cf018fd42a40c759922184a4696952
|
[
"MIT"
] | null | null | null |
BST/largest_smaller_bst_key.py
|
SebastianOpiyo/Algorithms-tuts
|
ebf2f595a8cf018fd42a40c759922184a4696952
|
[
"MIT"
] | null | null | null |
#!/bin/python3
# Author: Sebastian Opiyo
# Date Created: August 26, 2020
# Date Modified: August 26, 2020
# Description: bst challenge, Python3 Version.
"""
Problem: Given an int value, find smaller values of it from the bst,
from the smaller values, find the largest. If non exists, return -1
algo:
- start from root node
- s = -1
- repeat -- until leaf while (node):
- if num > node.key:
- s = node.key
- go to left // node = node.right
- else:
- go to the left // node = node.left
- return s
"""
from samples.commons.python import bst
def largest_smaller_key(tree: bst, num):
node = tree.root
print(node.key)
print(f'Confirm {node.key} is root: {node.is_root()}')
smaller = -1
while node:
if num > node.key:
smaller = node.key
node = node.right_child
else:
node = node.left_child
return smaller
# print(node)
# Helper code for tests.
let_arr = [20, 9, 25, 5, 12, 11, 14, 18, 19]
my_bst = bst.BST()
my_bst.create_bst_from_list(let_arr)
# test if the bst works ok.
# print(my_bst.__contains__(5))
# print(my_bst[0])
# print(my_bst[5])
# print(my_bst.postorder())
# largest = largest_smaller_key(my_bst, 20) # 19
# largest = largest_smaller_key(my_bst, 5) # -1
# largest = largest_smaller_key(my_bst, 17) # 14
largest = largest_smaller_key(my_bst, 23) # 20
# largest = largest_smaller_key(my_bst, 30) # 25
print(f'The largest of the smallest is: {largest}')
"""
# you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
max_length = 1000000
def solution(a: list):
# write your code in Python 3.6
# checking whether a is an array of integers.
if not len(a):
raise ValueError("Empty list, need a list with odd number of elements")
if len(a) > max_length:
raise ValueError("Error, length beyond required threshold!")
if not isinstance(a, list):
raise TypeError("Input must be a list of integers")
for i in a:
if not isinstance(i, int):
raise TypeError("Error, only integer values accepted!")
elements_dict = {}
for elem in a:
if elements_dict[elem]:
del elements_dict[elem]
raise KeyError("Error, Element doesn't exist!")
elements_dict[elem] = True
if len(unmatched) == 1:
return unmatched.keys()[0]
"""
| 26.01087
| 79
| 0.640201
|
2f80f76da1b3c0717d98e5b2c58f53e1529b1b2e
| 711
|
py
|
Python
|
pf_modeler/app/controller.py
|
Kitware/pf-simulation-modeler
|
e2129570d149991f73aaabdb8a261f7dda296f92
|
[
"BSD-3-Clause"
] | null | null | null |
pf_modeler/app/controller.py
|
Kitware/pf-simulation-modeler
|
e2129570d149991f73aaabdb8a261f7dda296f92
|
[
"BSD-3-Clause"
] | 1
|
2022-02-07T22:04:36.000Z
|
2022-02-07T22:04:36.000Z
|
pf_modeler/app/controller.py
|
Kitware/pf-simulation-modeler
|
e2129570d149991f73aaabdb8a261f7dda296f92
|
[
"BSD-3-Clause"
] | null | null | null |
r"""
Bind methods to the trame controller
"""
from pf_modeler.app.engine.simput import KeyDatabase
from trame import controller as ctrl
from . import engine
from . import ui
def bind_instances():
ctrl.ui_set_key_database()
def bind_methods():
ctrl.simput_save = KeyDatabase().save
ctrl.validate_run = engine.validate_run
def on_start():
engine.initialize()
ui.initialize()
bind_instances()
bind_methods()
def on_reload(reload_modules):
"""Method called when the module is reloaded
reload_modules is a function that takes modules to reload
We only need to reload the controller if the engine is reloaded.
"""
# reload_modules(engine)
bind_methods()
| 19.75
| 68
| 0.724332
|
932556662c61245a5bd217eb3fa418f9c1431d8d
| 71,956
|
py
|
Python
|
tests/chainer_tests/test_link.py
|
Evanc123/chainer
|
929af7189b1271683200aa9b0ba6da2dd3dee110
|
[
"MIT"
] | null | null | null |
tests/chainer_tests/test_link.py
|
Evanc123/chainer
|
929af7189b1271683200aa9b0ba6da2dd3dee110
|
[
"MIT"
] | null | null | null |
tests/chainer_tests/test_link.py
|
Evanc123/chainer
|
929af7189b1271683200aa9b0ba6da2dd3dee110
|
[
"MIT"
] | null | null | null |
import copy
import unittest
import warnings
import mock
import numpy
import chainer
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import initializers
from chainer import testing
from chainer.testing import attr
class TestLink(unittest.TestCase):
def setUp(self):
x_shape_0 = 2
x_shape_1 = numpy.int64(3)
with testing.assert_warns(DeprecationWarning):
self.link = chainer.Link(x=((x_shape_0, x_shape_1), 'd'),
u=(None, 'd'))
with self.link.init_scope():
self.link.y = chainer.Parameter(shape=(2,))
self.link.v = chainer.Parameter()
self.p = numpy.array([1, 2, 3], dtype='f')
self.link.add_persistent('p', self.p)
self.link.name = 'a'
self.link.x.update_rule = chainer.UpdateRule()
self.link.x.update_rule.enabled = False
self.link.u.update_rule = chainer.UpdateRule()
if cuda.available:
self.current_device_id = cuda.cupy.cuda.get_device_id()
def tearDown(self):
if cuda.available \
and cuda.cupy.cuda.get_device_id() != self.current_device_id:
cuda.Device(self.current_device_id).use()
def check_param_init(self, name, shape, dtype, data_value=numpy.nan):
self.assertTrue(hasattr(self.link, name))
var = getattr(self.link, name)
self.assertEqual(var.name, name)
self.assertIsInstance(var, chainer.Parameter)
self.assertEqual(var.data.shape, shape)
self.assertEqual(var.data.dtype, dtype)
numpy.testing.assert_array_equal(var.data, data_value)
self.assertEqual(var.grad.shape, shape)
self.assertEqual(var.grad.dtype, dtype)
numpy.testing.assert_array_equal(var.grad, numpy.nan)
def check_param_uninit(self, name, initializer=None):
self.assertTrue(hasattr(self.link, name))
var = getattr(self.link, name)
self.assertIsInstance(var, chainer.Parameter)
self.assertEqual(var.name, name)
self.assertIsNone(var.data)
if initializer is not None:
self.assertIs(var.initializer, initializer)
def test_init(self):
self.check_param_init('x', (2, 3), 'd')
self.check_param_init('y', (2,), 'f')
self.check_param_uninit('u')
self.link.u.initialize((2, 3))
self.check_param_init('u', (2, 3), 'd')
self.check_param_uninit('v')
self.link.v.initialize((2, 3))
self.check_param_init('v', (2, 3), 'f')
def test_assign_param_outside_of_init_scope(self):
p = chainer.Parameter()
self.link.p = p
self.assertTrue(all(p is not param for param in self.link.params()))
def test_assign_var_in_init_scope(self):
p = chainer.Variable()
with self.link.init_scope():
self.link.p = p
self.assertTrue(all(p is not param for param in self.link.params()))
def test_add_param(self):
with testing.assert_warns(DeprecationWarning):
self.link.add_param('z', (2, 3))
self.check_param_init('z', (2, 3), 'f')
with testing.assert_warns(DeprecationWarning):
self.link.add_param('w', (2, 3), dtype='d')
self.check_param_init('w', (2, 3), 'd')
with testing.assert_warns(DeprecationWarning):
self.link.add_param('r')
self.check_param_uninit('r')
self.link.r.initialize((2, 3))
self.check_param_init('r', (2, 3), 'f')
with testing.assert_warns(DeprecationWarning):
self.link.add_param('s', dtype='d')
self.check_param_uninit('s')
self.link.s.initialize((2, 3))
self.check_param_init('s', (2, 3), 'd')
initializer = initializers.Zero('d')
with testing.assert_warns(DeprecationWarning):
self.link.add_param('t', initializer=initializer)
self.check_param_uninit('t', initializer)
self.link.t.initialize((2, 3))
self.check_param_init('t', (2, 3), 'd', 0)
def test_add_param_direct_initialization(self):
z = numpy.random.rand(2, 3).astype('f')
with testing.assert_warns(DeprecationWarning):
self.link.add_param('z', initializer=z)
self.assertIsInstance(self.link.z.data, numpy.ndarray)
numpy.testing.assert_array_equal(self.link.z.data, z)
def test_add_param_duplicated_with_persistent(self):
self.link.add_persistent('z', 'abc')
with self.assertRaises(AttributeError):
with testing.assert_warns(DeprecationWarning):
self.link.add_param('z', (2, 3))
def test_add_persistent(self):
self.assertTrue(hasattr(self.link, 'p'))
self.assertIs(self.link.p, self.p)
self.link.add_persistent('q', 'abc')
self.assertTrue(hasattr(self.link, 'q'))
self.assertEqual(self.link.q, 'abc')
def test_delete(self):
del self.link.x
self.assertFalse(hasattr(self.link, 'x'))
self.assertNotIn('x', self.link._params)
self.assertNotIn('x', self.link._persistent)
del self.link.p
self.assertFalse(hasattr(self.link, 'p'))
self.assertNotIn('p', self.link._params)
self.assertNotIn('p', self.link._persistent)
def test_copy_with_share_mode(self):
link = self.link.copy(mode='share')
self.assertIsInstance(link._params, set)
self.assertIsInstance(link._persistent, set)
self.assertTrue(hasattr(link, 'x'))
self.assertTrue(hasattr(link, 'y'))
self.assertTrue(hasattr(link, 'u'))
self.assertTrue(hasattr(link, 'p'))
self.assertIsNot(link.x, self.link.x)
self.assertIs(link.x.array, self.link.x.array)
self.assertIsNot(link.y, self.link.y)
self.assertIs(link.y.array, self.link.y.array)
self.assertIsNone(link.u.array)
self.assertIs(link.p, self.link.p)
self.assertIs(link.name, None)
def test_copy_with_copy_mode(self):
link = self.link.copy(mode='copy')
self.assertIsInstance(link._params, set)
self.assertIsInstance(link._persistent, set)
self.assertTrue(hasattr(link, 'x'))
self.assertTrue(hasattr(link, 'y'))
self.assertTrue(hasattr(link, 'u'))
self.assertTrue(hasattr(link, 'p'))
self.assertIsNot(link.x, self.link.x)
self.assertIsNot(link.x.array, self.link.x.array)
self.assertIsNot(link.y, self.link.y)
self.assertIsNot(link.y.array, self.link.y.array)
self.assertIsNone(link.u.array)
self.assertIsNot(link.p, self.link.p)
self.assertIsNot(link.name, None)
def test_copy_with_init_mode(self):
self.link.u.initializer = initializers.Normal(
dtype=self.link.u.initializer.dtype)
self.link.u.initialize((2, 3))
link = self.link.copy(mode='init')
self.assertFalse(numpy.array_equal(self.link.u.array, link.u.array))
self.assertIsInstance(link._params, set)
self.assertIsInstance(link._persistent, set)
self.assertTrue(hasattr(link, 'x'))
self.assertTrue(hasattr(link, 'y'))
self.assertTrue(hasattr(link, 'u'))
self.assertTrue(hasattr(link, 'p'))
self.assertIsNot(link.x, self.link.x)
self.assertIsNot(link.x.array, self.link.x.array)
self.assertIsNot(link.y, self.link.y)
self.assertIsNot(link.y.array, self.link.y.array)
self.assertIsNot(link.p, self.link.p)
self.assertIsNot(link.name, None)
@attr.gpu
def test_copy_and_to_gpu_init(self):
cupy = cuda.cupy
l0 = self.link
l1 = l0.copy()
self.assertIs(l0.x.data, l1.x.data)
l1.to_gpu()
self.assertIsNot(l0.x.data, l1.x.data)
self.assertIsInstance(l0.x.data, numpy.ndarray)
self.assertIsInstance(l1.x.data, cupy.ndarray)
@attr.gpu
def test_copy_and_to_gpu_uninit(self):
cupy = cuda.cupy
l0 = self.link
l1 = l0.copy()
self.assertIsNone(l0.u.data)
self.assertIsNone(l1.u.data)
l1.to_gpu()
l1.u.initialize((2, 3))
self.assertIsNone(l0.u.data)
self.assertIsInstance(l1.u.data, cupy.ndarray)
@attr.multi_gpu(2)
def test_copy_and_to_gpu_uninit_multi_gpu(self):
cupy = cuda.cupy
l0 = self.link
l1 = l0.copy()
l2 = l0.copy()
self.assertIsNone(l0.u.data)
self.assertIsNone(l1.u.data)
self.assertIsNone(l2.u.data)
l1.to_gpu()
l1.u.initialize((2, 3))
l2.to_gpu()
l2.u.initialize((2, 3))
self.assertIsNone(l0.u.data)
self.assertIsInstance(l1.u.data, cupy.ndarray)
self.assertIsInstance(l2.u.data, cupy.ndarray)
self.assertNotEqual(l1.u.data.data, l2.u.data.data)
def _check_deepcopy(self, link):
self.assertIsInstance(link._params, set)
self.assertIsInstance(link._persistent, set)
self.assertTrue(hasattr(link, 'x'))
self.assertTrue(hasattr(link, 'y'))
self.assertTrue(hasattr(link, 'u'))
self.assertTrue(hasattr(link, 'p'))
self.assertIsNot(link.x, self.link.x)
self.assertIsNot(link.x.data, self.link.x.data)
numpy.testing.assert_array_equal(cuda.to_cpu(link.x.data),
cuda.to_cpu(self.link.x.data))
self.assertIsNot(link.y, self.link.y)
self.assertIsNot(link.y.data, self.link.y.data)
numpy.testing.assert_array_equal(cuda.to_cpu(link.y.data),
cuda.to_cpu(self.link.y.data))
self.assertIsNone(link.u.data)
self.assertIsNot(link.p, self.link.p)
self.assertEqual(link.name, self.link.name)
def test_deepcopy(self):
link = copy.deepcopy(self.link)
self._check_deepcopy(link)
self.assertIsNone(link._device_id)
@attr.multi_gpu(2)
def test_deepcopy_multi_device(self):
device_id = 1
self.link.to_gpu(device_id)
link = copy.deepcopy(self.link)
self._check_deepcopy(link)
self.assertEqual(link._device_id, device_id)
self.assertEqual(link.x.data.device.id, device_id)
self.assertEqual(link.y.data.device.id, device_id)
def test_to_cpu_on_cpu(self):
x = self.link.x.data
gx = self.link.x.grad
y = self.link.y.data
gy = self.link.y.grad
p = self.link.p
self.link.to_cpu()
self.assertIs(self.link.x.data, x)
self.assertIs(self.link.x.grad, gx)
self.assertIs(self.link.y.data, y)
self.assertIs(self.link.y.grad, gy)
self.assertIsNone(self.link.u.data)
self.assertIsNone(self.link.u.grad)
self.assertIs(self.link.p, p)
@attr.gpu
def test_to_cpu(self):
self.link.to_gpu()
self.link.to_cpu()
self.link.v.initialize((2, 3))
self.assertIs(self.link.xp, numpy)
self.assertIsInstance(self.link.x.data, numpy.ndarray)
self.assertIsInstance(self.link.x.grad, numpy.ndarray)
self.assertIsInstance(self.link.y.data, numpy.ndarray)
self.assertIsInstance(self.link.y.grad, numpy.ndarray)
self.assertIsNone(self.link.u.data)
self.assertIsNone(self.link.u.grad)
self.assertIsInstance(self.link.v.data, numpy.ndarray)
self.assertIsInstance(self.link.v.grad, numpy.ndarray)
self.assertIsInstance(self.link.p, numpy.ndarray)
@attr.gpu
def test_to_gpu(self):
cupy = cuda.cupy
self.link.to_gpu()
self.link.v.initialize((2, 3))
self.assertIs(self.link.xp, cupy)
self.assertIsInstance(self.link.x.data, cupy.ndarray)
self.assertIsInstance(self.link.x.grad, cupy.ndarray)
self.assertIsInstance(self.link.y.data, cupy.ndarray)
self.assertIsInstance(self.link.y.grad, cupy.ndarray)
self.assertIsNone(self.link.u.data)
self.assertIsNone(self.link.u.grad)
self.assertIsInstance(self.link.v.data, cupy.ndarray)
self.assertIsInstance(self.link.v.grad, cupy.ndarray)
self.assertIsInstance(self.link.p, cupy.ndarray)
@attr.multi_gpu(2)
def test_to_gpu_different_device(self):
cuda.Device(1).use()
self.link.to_gpu(0)
self.assertEqual(self.link._device_id, 0)
@attr.multi_gpu(2)
def test_to_gpu_current_device(self):
cuda.Device(1).use()
self.link.to_gpu()
self.assertEqual(self.link._device_id, 1)
def test_params(self):
params = list(self.link.params())
self.assertEqual({id(p) for p in params},
{id(self.link.x), id(self.link.y),
id(self.link.u), id(self.link.v)})
def test_params_skip_uninit(self):
params = list(self.link.params(include_uninit=False))
self.assertEqual({id(p) for p in params},
{id(self.link.x), id(self.link.y)})
def test_namedparams(self):
namedparams = list(self.link.namedparams())
self.assertEqual({(name, id(p)) for name, p in namedparams},
{('/x', id(self.link.x)), ('/y', id(self.link.y)),
('/u', id(self.link.u)), ('/v', id(self.link.v))})
def test_namedparams_skip_uninit(self):
namedparams = list(self.link.namedparams(include_uninit=False))
self.assertEqual({(name, id(p)) for name, p in namedparams},
{('/x', id(self.link.x)), ('/y', id(self.link.y))})
def test_links(self):
links = list(self.link.links())
self.assertIs(links[0], self.link)
def test_links_skipself(self):
links = list(self.link.links(skipself=True))
self.assertFalse(links) # empty
def test_namedlinks(self):
pl = list(self.link.namedlinks())
self.assertEqual(len(pl), 1)
self.assertEqual(pl[0][0], '/')
self.assertIs(pl[0][1], self.link)
def _setup_test_copyparams(self):
self.link.x.grad.fill(0)
self.link.y.grad.fill(1)
self.link.u.initialize((2, 3))
self.link.u.data.fill(0)
self.link.u.grad.fill(1)
self.link.v.cleargrad()
gx = self.link.x.grad.copy()
gy = self.link.y.grad.copy()
gu = self.link.u.grad.copy()
l = chainer.Link()
with l.init_scope():
l.x = chainer.Parameter(shape=(2, 3))
l.y = chainer.Parameter(shape=2)
l.u = chainer.Parameter(shape=(2, 3))
l.v = chainer.Parameter(shape=(3, 2))
l.x.data.fill(2)
l.x.grad.fill(3)
l.y.data.fill(4)
l.y.grad.fill(5)
l.u.data.fill(6)
l.u.grad.fill(7)
l.v.data.fill(8)
l.v.grad.fill(9)
l.add_persistent('p', numpy.full_like(self.link.p, 10))
return l, (gx, gy, gu)
def _check_copyparams(self, l, gs):
gx, gy, gu = gs
numpy.testing.assert_array_equal(self.link.x.data, l.x.data)
numpy.testing.assert_array_equal(self.link.x.grad, gx)
numpy.testing.assert_array_equal(self.link.y.data, l.y.data)
numpy.testing.assert_array_equal(self.link.y.grad, gy)
numpy.testing.assert_array_equal(self.link.u.data, l.u.data)
numpy.testing.assert_array_equal(self.link.u.grad, gu)
numpy.testing.assert_array_equal(self.link.v.data, l.v.data)
numpy.testing.assert_array_equal(self.link.v.grad, None)
def test_copyparams(self):
l, gs = self._setup_test_copyparams()
self.link.copyparams(l)
self._check_copyparams(l, gs)
numpy.testing.assert_array_equal(self.link.p, l.p)
def test_copyparams_no_copy_persistent(self):
orig_p = self.link.p.copy()
l, gs = self._setup_test_copyparams()
numpy.testing.assert_array_equal(False, orig_p == l.p)
self.link.copyparams(l, copy_persistent=False)
self._check_copyparams(l, gs)
numpy.testing.assert_array_equal(self.link.p, orig_p)
def test_cleargrads(self):
self.link.cleargrads()
self.assertIsNone(self.link.x.grad)
self.assertIsNone(self.link.y.grad)
self.link.u.initialize((2, 3))
self.link.v.initialize((2, 3))
self.assertIsNone(self.link.u.grad)
self.assertIsNone(self.link.v.grad)
def test_zerograds(self):
gx_expect = numpy.zeros_like(self.link.x.data)
gy_expect = numpy.zeros_like(self.link.y.data)
with testing.assert_warns(DeprecationWarning):
self.link.zerograds()
numpy.testing.assert_array_equal(self.link.x.grad, gx_expect)
numpy.testing.assert_array_equal(self.link.y.grad, gy_expect)
self.link.u.initialize((2, 3))
self.link.v.initialize((2, 3))
gu_expect = numpy.zeros_like(self.link.u.data)
gv_expect = numpy.zeros_like(self.link.v.data)
numpy.testing.assert_array_equal(self.link.u.grad, gu_expect)
numpy.testing.assert_array_equal(self.link.v.grad, gv_expect)
def test_addgrads(self):
l = chainer.Link()
with l.init_scope():
l.x = chainer.Parameter(shape=(2, 3),
initializer=initializers.NaN('d'))
l.y = chainer.Parameter(shape=2)
l.u = chainer.Parameter(shape=(2, 3))
l.v = chainer.Parameter()
l.x.grad.fill(1)
l.y.grad.fill(2)
l.u.grad.fill(3)
self.link.x.grad.fill(-1)
self.link.y.grad.fill(-2)
self.link.u.cleargrad()
self.link.addgrads(l)
gx_expect = numpy.zeros_like(l.x.grad)
gy_expect = numpy.zeros_like(l.y.grad)
gu_expect = l.u.grad
numpy.testing.assert_array_equal(self.link.x.grad, gx_expect)
numpy.testing.assert_array_equal(self.link.y.grad, gy_expect)
numpy.testing.assert_array_equal(self.link.u.grad, gu_expect)
self.assertIsNone(self.link.v.grad, None)
def test_serialize(self):
serializer = mock.MagicMock(return_value=3)
l = chainer.Link()
with l.init_scope():
l.x = chainer.Parameter(shape=(2, 3))
l.y = chainer.Parameter(shape=2)
l.add_persistent('z', 1)
l.serialize(serializer)
self.assertEqual(serializer.call_count, 3)
serializer.assert_any_call('x', l.x.data)
serializer.assert_any_call('y', l.y.data)
serializer.assert_any_call('z', 1)
self.assertEqual(l.z, 3)
def test_serialize_param_shape_placeholder(self):
serializer = mock.MagicMock(return_value=3)
l = chainer.Link()
with l.init_scope():
l.y = chainer.Parameter(shape=2)
l.x = chainer.Parameter()
l.x.initialize((2, 3))
l.add_persistent('z', 1)
l.serialize(serializer)
self.assertEqual(serializer.call_count, 3)
serializer.assert_any_call('x', l.x.data)
serializer.assert_any_call('y', l.y.data)
serializer.assert_any_call('z', 1)
self.assertEqual(l.z, 3)
def test_serialize_deserialize_to_uninitialized_param(self):
ret = numpy.random.rand(2, 3).astype('f')
serializer = mock.MagicMock(return_value=ret)
l = chainer.Link()
with l.init_scope():
l.x = chainer.Parameter()
l.serialize(serializer)
self.assertEqual(serializer.call_count, 1)
serializer.assert_any_call('x', None)
self.assertIsInstance(l.x.data, numpy.ndarray)
numpy.testing.assert_array_equal(l.x.data, ret)
def test_enable_update(self):
self.link.enable_update()
self.assertTrue(self.link.x.update_rule.enabled)
self.assertTrue(self.link.u.update_rule.enabled)
def test_disable_update(self):
self.link.disable_update()
self.assertFalse(self.link.x.update_rule.enabled)
self.assertFalse(self.link.u.update_rule.enabled)
def test_update_enabled(self):
self.assertTrue(self.link.update_enabled)
self.link.disable_update()
self.assertFalse(self.link.update_enabled)
self.link.enable_update()
self.assertTrue(self.link.update_enabled)
def test_count_params(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert self.link.count_params() == 8
assert len(w) == 2
assert w[0].category is UserWarning
self.link.u.initialize((2, 3))
self.link.v.initialize((2, 3))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.link.count_params()
assert not w
class TestLinkRepeat(unittest.TestCase):
def setUp(self):
class Layer(chainer.Link):
def __init__(self):
super(Layer, self).__init__()
with self.init_scope():
self.x = chainer.Parameter(
chainer.initializers.Normal(), shape=(2, 3))
def forward(self):
pass
self.link = Layer()
def test_no_repeat(self):
ret = self.link.repeat(0)
self.assertEqual(len(ret), 0)
def test_repeat_with_init(self):
ret = self.link.repeat(2, mode='init')
self.assertEqual(len(ret), 2)
# Both should be different objects from the original link
self.assertIsNot(ret[0], self.link)
self.assertIsNot(ret[1], self.link)
# Object IDs of elements should be different
self.assertIsNot(ret[0], ret[1])
self.assertIsNot(ret[0].x, ret[1].x)
# But shape and type of paratmeres shuld be same
self.assertEqual(ret[0].x.shape, self.link.x.shape)
self.assertEqual(ret[0].x.dtype, self.link.x.dtype)
self.assertEqual(ret[0].x.shape, ret[1].x.shape)
self.assertEqual(ret[0].x.dtype, ret[1].x.dtype)
# Parameters are re-initialized, so the values should be different
self.assertFalse(numpy.all(ret[0].x.array == ret[1].x.array))
def test_repeat_with_copy(self):
ret = self.link.repeat(2, mode='copy')
self.assertEqual(len(ret), 2)
# Both should be different objects from the original link
self.assertIsNot(ret[0], self.link)
self.assertIsNot(ret[1], self.link)
# Object IDs of elements should be different
self.assertIsNot(ret[0], ret[1])
self.assertIsNot(ret[0].x, ret[1].x)
# But shape, type, and value of paratmeres shuld be same
self.assertEqual(ret[0].x.shape, self.link.x.shape)
self.assertEqual(ret[0].x.dtype, self.link.x.dtype)
self.assertEqual(ret[0].x.shape, ret[1].x.shape)
self.assertEqual(ret[0].x.dtype, ret[1].x.dtype)
numpy.testing.assert_array_equal(ret[0].x.array, ret[1].x.array)
def test_repeat_with_share(self):
ret = self.link.repeat(2, mode='share')
self.assertEqual(len(ret), 2)
# Both should be different objects from the original link
self.assertIsNot(ret[0], self.link)
self.assertIsNot(ret[1], self.link)
# Object IDs of elements should be different
self.assertIsNot(ret[0], ret[1])
self.assertIsNot(ret[0].x, ret[1].x)
# But the array objects should be the same
self.assertIs(ret[0].x.array, ret[1].x.array)
# But shape, type, and value of paratmeres shuld be same
self.assertEqual(ret[0].x.shape, self.link.x.shape)
self.assertEqual(ret[0].x.dtype, self.link.x.dtype)
self.assertEqual(ret[0].x.shape, ret[1].x.shape)
self.assertEqual(ret[0].x.dtype, ret[1].x.dtype)
numpy.testing.assert_array_equal(ret[0].x.array, ret[1].x.array)
class CountParameter(chainer.Parameter):
def __init__(self, v):
super(CountParameter, self).__init__(v.data, name=v.name)
self.data = v.data
self.grad = v.grad
self.count_to_cpu = 0
self.count_to_gpu = 0
self.count_zerograd = 0
def to_cpu(self):
self.count_to_cpu += 1
super(CountParameter, self).to_cpu()
def to_gpu(self, device=None):
self.count_to_gpu += 1
super(CountParameter, self).to_gpu(device)
def zerograd(self):
self.count_zerograd += 1
super(CountParameter, self).zerograd()
class TestChain(unittest.TestCase):
def setUp(self):
self.l1 = chainer.Link()
with self.l1.init_scope():
self.l1.x = chainer.Parameter(shape=(2, 3))
self.l2 = chainer.Link()
with self.l2.init_scope():
self.l2.x = chainer.Parameter(shape=2)
self.l3 = chainer.Link()
with self.l3.init_scope():
self.l3.x = chainer.Parameter()
self.c1 = chainer.Chain()
with self.c1.init_scope():
self.c1.l1 = self.l1
with testing.assert_warns(DeprecationWarning):
self.c1.add_link('l2', self.l2)
self.c2 = chainer.Chain()
with self.c2.init_scope():
self.c2.c1 = self.c1
self.c2.l3 = self.l3
def test_init(self):
self.assertIs(self.c1.l1, self.l1)
self.assertIs(self.c1['l1'], self.l1)
self.assertEqual(self.l1.name, 'l1')
self.assertIs(self.c2.c1, self.c1)
self.assertIs(self.c2['c1'], self.c1)
self.assertEqual(self.c1.name, 'c1')
self.assertIs(self.c2.l3, self.l3)
self.assertIs(self.c2['l3'], self.l3)
self.assertEqual(self.l3.name, 'l3')
def test_add_link(self):
self.assertIs(self.c1.l2, self.l2)
self.assertEqual(self.l2.name, 'l2')
def test_add_link_to_existing_attribute(self):
self.l1.z = 0
with self.assertRaises(AttributeError):
self.l1.add_link('z', chainer.Link())
def test_assign_link_outside_of_init_scope(self):
l = chainer.Link()
self.l1.l = l
self.assertTrue(all(l is not link for link in self.l1.links()))
def test_delete_link(self):
del self.c1.l1
self.assertFalse(hasattr(self.c1, 'l1'))
self.assertNotIn('l1', self.c1._children)
def test_copy_with_share_mode(self):
self.l1.x.initializer = initializers.Normal(
dtype=self.l1.x.initializer.dtype)
self.l1.x.initialize(self.l1.x.shape)
self.l2.x.initializer = initializers.Normal(
dtype=self.l2.x.initializer.dtype)
self.l2.x.initialize(self.l2.x.shape)
c2 = self.c2.copy(mode='share')
self.assertIs(c2.name, None)
self.assertIsInstance(c2._children, set)
self.assertTrue(hasattr(c2, 'c1'))
self.assertEqual(c2.c1.name, 'c1')
self.assertIsInstance(c2.c1._children, set)
self.assertIsNot(c2.c1, self.c1)
self.assertEqual(c2.c1.l1.name, 'l1')
self.assertIsNot(c2.c1.l1, self.l1)
self.assertIsNot(c2.c1.l1.x, self.l1.x)
self.assertIs(c2.c1.l1.x.data, self.l1.x.data)
self.assertIs(c2.c1.l1.x.grad, None)
self.assertTrue(hasattr(c2.c1, 'l2'))
self.assertEqual(c2.c1.l2.name, 'l2')
self.assertIsNot(c2.c1.l2, self.l2)
self.assertIsNot(c2.c1.l2.x, self.l2.x)
self.assertIs(c2.c1.l2.x.data, self.l2.x.data)
self.assertIs(c2.c1.l2.x.grad, None)
self.assertTrue(hasattr(c2, 'l3'))
self.assertEqual(c2.l3.name, 'l3')
self.assertIsNot(c2.l3, self.l3)
self.assertIsNot(c2.l3.x, self.l3.x)
self.assertIs(c2.l3.x.data, self.l3.x.data)
self.assertIs(c2.l3.x.grad, None)
def test_copy_with_copy_mode(self):
self.l1.x.initializer = initializers.Normal(
dtype=self.l1.x.initializer.dtype)
self.l1.x.initialize(self.l1.x.shape)
self.l2.x.initializer = initializers.Normal(
dtype=self.l2.x.initializer.dtype)
self.l2.x.initialize(self.l2.x.shape)
c2 = self.c2.copy(mode='copy')
self.assertIs(c2.name, None)
self.assertIsInstance(c2._children, set)
self.assertTrue(hasattr(c2, 'c1'))
self.assertEqual(c2.c1.name, 'c1')
self.assertIsInstance(c2.c1._children, set)
self.assertIsNot(c2.c1, self.c1)
self.assertEqual(c2.c1.l1.name, 'l1')
self.assertIsNot(c2.c1.l1, self.l1)
self.assertIsNot(c2.c1.l1.x, self.l1.x)
self.assertIsNot(c2.c1.l1.x.data, self.l1.x.data)
self.assertTrue(numpy.array_equal(c2.c1.l1.x.data, self.l1.x.data))
self.assertIs(c2.c1.l1.x.grad, None)
self.assertTrue(hasattr(c2.c1, 'l2'))
self.assertEqual(c2.c1.l2.name, 'l2')
self.assertIsNot(c2.c1.l2, self.l2)
self.assertIsNot(c2.c1.l2.x, self.l2.x)
self.assertIsNot(c2.c1.l2.x.data, self.l2.x.data)
self.assertTrue(numpy.array_equal(c2.c1.l2.x.data, self.l2.x.data))
self.assertIs(c2.c1.l2.x.grad, None)
self.assertTrue(hasattr(c2, 'l3'))
self.assertEqual(c2.l3.name, 'l3')
self.assertIsNot(c2.l3, self.l3)
self.assertIsNot(c2.l3.x, self.l3.x)
self.assertIs(c2.l3.x.data, self.l3.x.data)
self.assertIs(c2.l3.x.grad, None)
def test_copy_with_init_mode(self):
self.l1.x.initializer = initializers.Normal(
dtype=self.l1.x.initializer.dtype)
self.l1.x.initialize(self.l1.x.shape)
self.l2.x.initializer = initializers.Normal(
dtype=self.l2.x.initializer.dtype)
self.l2.x.initialize(self.l2.x.shape)
c2 = self.c2.copy(mode='init')
self.assertIs(c2.name, None)
self.assertIsInstance(c2._children, set)
self.assertTrue(hasattr(c2, 'c1'))
self.assertEqual(c2.c1.name, 'c1')
self.assertIsInstance(c2.c1._children, set)
self.assertIsNot(c2.c1, self.c1)
self.assertEqual(c2.c1.l1.name, 'l1')
self.assertIsNot(c2.c1.l1, self.l1)
self.assertIsNot(c2.c1.l1.x, self.l1.x)
self.assertIsNot(c2.c1.l1.x.data, self.l1.x.data)
self.assertFalse(numpy.array_equal(c2.c1.l1.x.data, self.l1.x.data))
# _grad_initializer attribute in a copied Parameter has constant.NaN
# after calling initilize() method
self.assertTrue(numpy.isnan(c2.c1.l1.x.grad).all())
self.assertTrue(hasattr(c2.c1, 'l2'))
self.assertEqual(c2.c1.l2.name, 'l2')
self.assertIsNot(c2.c1.l2, self.l2)
self.assertIsNot(c2.c1.l2.x, self.l2.x)
self.assertIsNot(c2.c1.l2.x.data, self.l2.x.data)
self.assertFalse(numpy.array_equal(c2.c1.l2.x.data, self.l2.x.data))
# _grad_initializer attribute in a copied Parameter has constant.NaN
# after calling initilize() method
self.assertTrue(numpy.isnan(c2.c1.l2.x.grad).all())
self.assertTrue(hasattr(c2, 'l3'))
self.assertEqual(c2.l3.name, 'l3')
self.assertIsNot(c2.l3, self.l3)
self.assertIsNot(c2.l3.x, self.l3.x)
self.assertIs(c2.l3.x.data, self.l3.x.data)
# A Parameter constructed with shape argument but not initialized
# has None in grad
self.assertIs(c2.l3.x.grad, None)
def test_to_cpu_on_cpu(self):
x1 = self.l1.x.data
gx1 = self.l1.x.grad
x2 = self.l2.x.data
gx2 = self.l2.x.grad
x3 = self.l3.x.data
gx3 = self.l3.x.grad
self.c2.to_cpu()
self.assertIs(self.l1.x.data, x1)
self.assertIs(self.l1.x.grad, gx1)
self.assertIs(self.l2.x.data, x2)
self.assertIs(self.l2.x.grad, gx2)
self.assertIs(self.l3.x.data, x3)
self.assertIs(self.l3.x.grad, gx3)
def set_count_parameters(self):
self.l1.x = CountParameter(self.l1.x)
self.l2.x = CountParameter(self.l2.x)
self.l3.x = CountParameter(self.l3.x)
@attr.gpu
def test_to_cpu(self):
self.set_count_parameters()
self.c2.to_gpu()
self.c2.to_cpu()
self.assertIs(self.c2.xp, numpy)
self.assertIs(self.c1.xp, numpy)
self.assertIs(self.l1.xp, numpy)
self.assertIs(self.l2.xp, numpy)
self.assertIs(self.l3.xp, numpy)
self.assertIsInstance(self.l1.x.data, numpy.ndarray)
self.assertIsInstance(self.l1.x.grad, numpy.ndarray)
self.assertIsInstance(self.l2.x.data, numpy.ndarray)
self.assertIsInstance(self.l2.x.grad, numpy.ndarray)
self.assertIsNone(self.l3.x.data)
self.assertIsNone(self.l3.x.grad)
self.assertEqual(self.l1.x.count_to_cpu, 1)
self.assertEqual(self.l1.x.count_to_gpu, 1)
self.assertEqual(self.l2.x.count_to_cpu, 1)
self.assertEqual(self.l2.x.count_to_gpu, 1)
self.assertEqual(self.l3.x.count_to_cpu, 1)
self.assertEqual(self.l3.x.count_to_gpu, 1)
self.l3.x.initialize(3)
self.assertIsInstance(self.l3.x.data, numpy.ndarray)
self.assertIsInstance(self.l3.x.grad, numpy.ndarray)
@attr.gpu
def test_to_gpu(self):
self.set_count_parameters()
cupy = cuda.cupy
self.c2.to_gpu()
self.assertIs(self.c2.xp, cupy)
self.assertIs(self.c1.xp, cupy)
self.assertIs(self.l1.xp, cupy)
self.assertIs(self.l2.xp, cupy)
self.assertIs(self.l3.xp, cupy)
self.assertIsInstance(self.l1.x.data, cupy.ndarray)
self.assertIsInstance(self.l1.x.grad, cupy.ndarray)
self.assertIsInstance(self.l2.x.data, cupy.ndarray)
self.assertIsInstance(self.l2.x.grad, cupy.ndarray)
self.assertIsNone(self.l3.x.data)
self.assertIsNone(self.l3.x.grad)
self.assertEqual(self.l1.x.count_to_gpu, 1)
self.assertEqual(self.l2.x.count_to_gpu, 1)
self.assertEqual(self.l3.x.count_to_gpu, 1)
self.l3.x.initialize(3)
self.assertIsInstance(self.l3.x.data, cupy.ndarray)
self.assertIsInstance(self.l3.x.grad, cupy.ndarray)
def test_params(self):
params = list(self.c2.params())
self.assertEqual({id(p) for p in params},
{id(self.l1.x), id(self.l2.x), id(self.l3.x)})
def test_params_skip_uninit(self):
params = list(self.c2.params(include_uninit=False))
self.assertEqual({id(p) for p in params},
{id(self.l1.x), id(self.l2.x)})
def test_namedparams(self):
namedparams = list(self.c2.namedparams())
self.assertEqual({(name, id(p)) for name, p in namedparams},
{('/c1/l1/x', id(self.l1.x)),
('/c1/l2/x', id(self.l2.x)),
('/l3/x', id(self.l3.x))})
def test_namedparams_skip_uninit(self):
namedparams = list(self.c2.namedparams(include_uninit=False))
self.assertEqual({(name, id(p)) for name, p in namedparams},
{('/c1/l1/x', id(self.l1.x)),
('/c1/l2/x', id(self.l2.x))})
def test_links(self):
links = list(self.c2.links())
self.assertEqual({id(l) for l in links},
{id(l) for l in [self.l1, self.l2, self.l3,
self.c1, self.c2]})
def test_links_skipself(self):
links = list(self.c2.links(skipself=True))
self.assertEqual({id(l) for l in links},
{id(l) for l in [self.l1, self.l2, self.l3, self.c1]})
def test_namedlinks(self):
namedlinks = list(self.c2.namedlinks())
self.assertEqual({(name, id(l)) for name, l in namedlinks},
{('/', id(self.c2)),
('/c1', id(self.c1)),
('/c1/l1', id(self.l1)),
('/c1/l2', id(self.l2)),
('/l3', id(self.l3))})
def test_namedlinks_skipself(self):
namedlinks = list(self.c2.namedlinks(skipself=True))
self.assertEqual({(name, id(l)) for name, l in namedlinks},
{('/c1', id(self.c1)),
('/c1/l1', id(self.l1)),
('/c1/l2', id(self.l2)),
('/l3', id(self.l3))})
def test_children(self):
children = list(self.c2.children())
self.assertEqual({id(c) for c in children}, {id(self.c1), id(self.l3)})
def test_copyparams(self):
l1 = chainer.Link()
with l1.init_scope():
l1.x = chainer.Parameter(shape=(2, 3))
l2 = chainer.Link()
with l2.init_scope():
l2.x = chainer.Parameter(shape=2)
l3 = chainer.Link()
with l3.init_scope():
l3.x = chainer.Parameter(shape=3)
c1 = chainer.Chain()
with c1.init_scope():
c1.l1 = l1
c1.l2 = l2
c2 = chainer.Chain()
with c2.init_scope():
c2.c1 = c1
c2.l3 = l3
l1.x.data.fill(0)
l2.x.data.fill(1)
l3.x.data.fill(2)
self.c2.copyparams(c2)
numpy.testing.assert_array_equal(self.l1.x.data, l1.x.data)
numpy.testing.assert_array_equal(self.l2.x.data, l2.x.data)
numpy.testing.assert_array_equal(self.l3.x.data, l3.x.data)
def test_zerograds(self):
self.set_count_parameters()
with testing.assert_warns(DeprecationWarning):
self.c2.zerograds()
numpy.testing.assert_array_equal(self.l1.x.grad, numpy.zeros((2, 3)))
numpy.testing.assert_array_equal(self.l2.x.grad, numpy.zeros(2))
self.assertEqual(self.l1.x.count_zerograd, 1)
self.assertEqual(self.l2.x.count_zerograd, 1)
self.assertEqual(self.l3.x.count_zerograd, 1)
self.l3.x.initialize(3)
numpy.testing.assert_array_equal(self.l3.x.grad, numpy.zeros(3))
def test_addgrads(self):
l1 = chainer.Link()
with l1.init_scope():
l1.x = chainer.Parameter(shape=(2, 3))
l2 = chainer.Link()
with l2.init_scope():
l2.x = chainer.Parameter(shape=2)
l3 = chainer.Link()
with l3.init_scope():
l3.x = chainer.Parameter(shape=3)
c1 = chainer.Chain()
with c1.init_scope():
c1.l1 = l1
c1.l2 = l2
c2 = chainer.Chain()
with c2.init_scope():
c2.c1 = c1
c2.l3 = l3
l1.x.grad.fill(1)
l2.x.grad.fill(2)
l3.x.grad.fill(3)
self.l1.x.grad.fill(-1)
self.l2.x.grad.fill(-2)
self.l3.cleargrads()
self.c2.addgrads(c2)
numpy.testing.assert_array_equal(self.l1.x.grad, numpy.zeros((2, 3)))
numpy.testing.assert_array_equal(self.l2.x.grad, numpy.zeros(2))
numpy.testing.assert_array_equal(self.l3.x.grad, numpy.full(3, 3.))
def test_serialize(self):
mocks = {'l1': mock.MagicMock(), 'l2': mock.MagicMock()}
serializer = mock.MagicMock()
serializer.__getitem__.side_effect = lambda k: mocks[k]
self.c1.serialize(serializer)
self.assertEqual(serializer.call_count, 0)
self.assertEqual(serializer.__getitem__.call_count, 2)
serializer.__getitem__.assert_any_call('l1')
serializer.__getitem__.assert_any_call('l2')
mocks['l1'].assert_called_with('x', self.l1.x.data)
mocks['l2'].assert_called_with('x', self.l2.x.data)
def test_count_params(self):
assert self.c1.count_params() == 8
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.c2.count_params()
assert len(w) == 1
assert w[0].category is UserWarning
self.c2.l3.x.initialize((3,))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.c2.count_params()
assert not w
class TestChainRepeat(unittest.TestCase):
def setUp(self):
class ChainForTest(chainer.Chain):
def __init__(self):
super(ChainForTest, self).__init__()
with self.init_scope():
self.link = chainer.Link()
def forward(self):
pass
self.chain = ChainForTest()
self.link = self.chain.link
with self.link.init_scope():
self.link.x = chainer.Parameter(
chainer.initializers.Normal(), shape=(2, 3))
def test_no_repeat(self):
ret = self.chain.repeat(0)
self.assertEqual(len(ret), 0)
def test_repeat_with_share_mode(self):
ret = self.chain.repeat(2, mode='share')
self.assertEqual(len(ret), 2)
self.assertIsNot(ret[0], self.chain)
self.assertIsNot(ret[1], self.chain)
self.assertIsNot(ret[0], ret[1])
self.assertIsNot(ret[0].link, self.chain.link)
self.assertIsNot(ret[1].link, self.chain.link)
self.assertIsNot(ret[0].link, ret[1].link)
self.assertIsNot(ret[0].link.x, self.chain.link.x)
self.assertIsNot(ret[1].link.x, self.chain.link.x)
self.assertIsNot(ret[0].link.x, ret[1].link.x)
self.assertIs(ret[0].link.x.data, self.chain.link.x.data)
self.assertIs(ret[0].link.x.data, ret[1].link.x.data)
self.assertEqual(ret[0].link.x.shape, self.chain.link.x.shape)
self.assertEqual(ret[0].link.x.shape, ret[1].link.x.shape)
self.assertEqual(ret[0].link.x.dtype, self.chain.link.x.dtype)
self.assertEqual(ret[0].link.x.dtype, ret[1].link.x.dtype)
def test_repeat_with_copy_mode(self):
ret = self.chain.repeat(2, mode='copy')
self.assertEqual(len(ret), 2)
self.assertIsNot(ret[0], self.chain)
self.assertIsNot(ret[1], self.chain)
self.assertIsNot(ret[0], ret[1])
self.assertIsNot(ret[0].link, self.chain.link)
self.assertIsNot(ret[1].link, self.chain.link)
self.assertIsNot(ret[0].link, ret[1].link)
self.assertIsNot(ret[0].link.x, self.link.x)
self.assertIsNot(ret[1].link.x, self.link.x)
self.assertIsNot(ret[0].link.x, ret[1].link.x)
self.assertIsNot(ret[0].link.x.data, self.chain.link.x.data)
self.assertIsNot(ret[1].link.x.data, self.chain.link.x.data)
self.assertIsNot(ret[0].link.x.data, ret[1].link.x.data)
self.assertTrue(numpy.array_equal(
ret[0].link.x.data, self.chain.link.x.data))
self.assertTrue(numpy.array_equal(
ret[0].link.x.data, ret[1].link.x.data))
self.assertEqual(ret[0].link.x.shape, self.chain.link.x.shape)
self.assertEqual(ret[0].link.x.shape, ret[1].link.x.shape)
self.assertEqual(ret[0].link.x.dtype, self.chain.link.x.dtype)
self.assertEqual(ret[0].link.x.dtype, ret[1].link.x.dtype)
def test_repeat_with_init_mode(self):
ret = self.chain.repeat(2, mode='init')
self.assertEqual(len(ret), 2)
self.assertIsNot(ret[0], self.chain)
self.assertIsNot(ret[1], self.chain)
self.assertIsNot(ret[0], ret[1])
self.assertIsNot(ret[0].link, self.chain.link)
self.assertIsNot(ret[1].link, self.chain.link)
self.assertIsNot(ret[0].link.x, ret[1].link.x)
self.assertIsNot(ret[0].link.x.data, self.chain.link.x.data)
self.assertIsNot(ret[1].link.x.data, self.chain.link.x.data)
self.assertIsNot(ret[0].link.x.data, ret[1].link.x.data)
self.assertFalse(numpy.array_equal(
ret[0].link.x.data, self.chain.link.x.data))
self.assertFalse(numpy.array_equal(
ret[1].link.x.data, self.chain.link.x.data))
self.assertFalse(numpy.array_equal(
ret[0].link.x.data, ret[1].link.x.data))
self.assertEqual(ret[0].link.x.shape, self.chain.link.x.shape)
self.assertEqual(ret[0].link.x.shape, ret[1].link.x.shape)
self.assertEqual(ret[0].link.x.dtype, self.chain.link.x.dtype)
self.assertEqual(ret[0].link.x.dtype, ret[1].link.x.dtype)
class TestChainList(unittest.TestCase):
def setUp(self):
self.l1 = chainer.Link()
with self.l1.init_scope():
self.l1.x = chainer.Parameter(shape=(2, 3))
self.l1.y = chainer.Parameter()
self.l2 = chainer.Link()
with self.l2.init_scope():
self.l2.x = chainer.Parameter(shape=2)
self.l3 = chainer.Link()
with self.l3.init_scope():
self.l3.x = chainer.Parameter(shape=3)
self.l4 = chainer.Link()
self.l5 = chainer.Link()
self.l6 = chainer.Link()
self.c1 = chainer.ChainList(self.l1)
self.c1.add_link(self.l2)
self.c2 = chainer.ChainList(self.c1)
self.c2.append(self.l3)
self.c3 = chainer.ChainList(self.l4)
def test_init(self):
self.assertIs(self.c1[0], self.l1)
self.assertEqual(self.l1.name, '0')
self.assertIs(self.c2[0], self.c1)
self.assertEqual(self.c1.name, '0')
def test_add_link(self):
self.assertIs(self.c1[1], self.l2)
self.assertEqual(self.l2.name, '1')
def test_append(self):
self.assertIs(self.c2[1], self.l3)
self.assertEqual(self.l3.name, '1')
def test_setitem(self):
self.c1[1] = self.l3
self.assertEqual(self.l3.name, '1')
def test_setitem_slice(self):
self.c1.append(self.l3) # l1 l2 l3
self.c1[3:0:-1] = [self.l4, self.l5] # l1 l5 l4
self.assertEqual(len(self.c1), 3)
self.assertEqual(self.l1.name, '0')
self.assertEqual(self.l4.name, '2')
self.assertEqual(self.l5.name, '1')
def test_setitem_slice_short(self):
self.c1.append(self.l3) # l1 l2 l3
self.c1[1:3] = [self.l4] # l1 l4
self.assertEqual(len(self.c1), 2)
self.assertEqual(self.l1.name, '0')
self.assertEqual(self.l4.name, '1')
def test_setitem_slice_long(self):
self.c1.append(self.l3) # l1 l2 l3
self.c1[1:3] = [self.l4, self.l5, self.l6] # l1 l4 l5 l6
self.assertEqual(len(self.c1), 4)
self.assertEqual(self.l1.name, '0')
self.assertEqual(self.l4.name, '1')
self.assertEqual(self.l5.name, '2')
self.assertEqual(self.l6.name, '3')
def test_iadd(self):
self.c2 += self.c3
self.assertIs(len(self.c2), 3)
self.assertEqual(self.l4.name, '2')
def test_delete_item(self):
del self.c2[0]
self.assertEqual(len(self.c2), 1)
self.assertEqual(self.l3.name, '0')
def test_assign_param_in_init_scope(self):
p = chainer.Parameter()
with self.c1.init_scope():
self.c1.p = p
self.assertIn(p, self.c1.params())
def test_assign_link_in_init_scope(self):
l = chainer.Link()
with self.c1.init_scope():
with self.assertRaises(TypeError):
self.c1.l = l
def test_iter(self):
links = list(self.c2)
self.assertEqual(2, len(links))
self.assertIs(links[0], self.c1)
self.assertIs(links[1], self.l3)
def test_len(self):
self.assertEqual(len(self.c1), 2)
self.assertEqual(len(self.c2), 2)
def test_copy_with_share_mode(self):
c2 = self.c2.copy(mode='share')
self.l1.x.initializer = initializers.Normal(
dtype=self.l1.x.initializer.dtype)
self.l1.x.initialize(self.l1.x.shape)
self.l2.x.initializer = initializers.Normal(
dtype=self.l2.x.initializer.dtype)
self.l2.x.initialize(self.l2.x.shape)
self.assertIs(c2.name, None)
self.assertIsInstance(c2._children, list)
self.assertIsNot(c2[0], self.c1)
self.assertEqual(c2[0].name, '0')
self.assertIsInstance(c2[0]._children, list)
self.assertIsNot(c2[0][0], self.l1)
self.assertEqual(c2[0][0].name, '0')
self.assertIsNot(c2[0][0].x, self.l1.x)
self.assertIs(c2[0][0].x.data, self.l1.x.data)
self.assertIs(c2[0][0].x.grad, None)
self.assertIsNot(c2[0][1], self.l2)
self.assertEqual(c2[0][1].name, '1')
self.assertIsNot(c2[0][1].x, self.l2.x)
self.assertIs(c2[0][1].x.data, self.l2.x.data)
self.assertIs(c2[0][1].x.grad, None)
self.assertIsNot(c2[1], self.l3)
self.assertEqual(c2[1].name, '1')
self.assertIsNot(c2[1].x, self.l3.x)
self.assertIs(c2[1].x.data, self.l3.x.data)
self.assertIs(c2[1].x.grad, None)
def test_copy_with_copy_mode(self):
self.l1.x.initializer = initializers.Normal(
dtype=self.l1.x.initializer.dtype)
self.l1.x.initialize(self.l1.x.shape)
self.l2.x.initializer = initializers.Normal(
dtype=self.l2.x.initializer.dtype)
self.l2.x.initialize(self.l2.x.shape)
c2 = self.c2.copy(mode='copy')
self.assertIs(c2.name, None)
self.assertIsInstance(c2._children, list)
self.assertEqual(c2[0].name, '0')
self.assertIsInstance(c2[0]._children, list)
self.assertIsNot(c2[0][0], self.l1)
self.assertEqual(c2[0][0].name, '0')
self.assertIsNot(c2[0][0].x, self.l1.x)
self.assertIsNot(c2[0][0].x.data, self.l1.x.data)
self.assertTrue(numpy.array_equal(c2[0][0].x.data, self.l1.x.data))
self.assertIs(c2[0][0].x.grad, None)
self.assertIsNot(c2[0][1], self.l2)
self.assertEqual(c2[0][1].name, '1')
self.assertIsNot(c2[0][1].x, self.l2.x)
self.assertIsNot(c2[0][1].x.data, self.l2.x.data)
self.assertTrue(numpy.array_equal(c2[0][1].x.data, self.l2.x.data))
self.assertIs(c2[0][1].x.grad, None)
self.assertIsNot(c2[1], self.l3)
self.assertEqual(c2[1].name, '1')
self.assertIsNot(c2[1].x, self.l3.x)
self.assertIsNot(c2[1].x.data, self.l3.x.data)
# l3 is constructed with shape argument but not initialized
self.assertTrue(numpy.isnan(c2[1].x.grad).all())
def test_copy_with_init_mode(self):
self.l1.x.initializer = initializers.Normal(
dtype=self.l1.x.initializer.dtype)
self.l1.x.initialize(self.l1.x.shape)
self.l2.x.initializer = initializers.Normal(
dtype=self.l2.x.initializer.dtype)
self.l2.x.initialize(self.l2.x.shape)
c2 = self.c2.copy(mode='init')
self.assertIs(c2.name, None)
self.assertIsInstance(c2._children, list)
self.assertEqual(c2[0].name, '0')
self.assertIsInstance(c2[0]._children, list)
self.assertIsNot(c2[0][0], self.l1)
self.assertEqual(c2[0][0].name, '0')
self.assertIsNot(c2[0][0].x, self.l1.x)
self.assertIsNot(c2[0][0].x.data, self.l1.x.data)
self.assertFalse(numpy.array_equal(c2[0][0].x.data, self.l1.x.data))
# _grad_initializer attribute in a copied Parameter has constant.NaN
# after calling initilize() method
self.assertTrue(numpy.isnan(c2[0][0].x.grad).all())
self.assertIsNot(c2[0][1], self.l2)
self.assertEqual(c2[0][1].name, '1')
self.assertIsNot(c2[0][1].x, self.l2.x)
self.assertIsNot(c2[0][1].x.data, self.l2.x.data)
self.assertFalse(numpy.array_equal(c2[0][1].x.data, self.l2.x.data))
# _grad_initializer attribute in a copied Parameter has constant.NaN
# after calling initilize() method
self.assertTrue(numpy.isnan(c2[0][1].x.grad).all())
self.assertIsNot(c2[1], self.l3)
self.assertEqual(c2[1].name, '1')
self.assertIsNot(c2[1].x, self.l3.x)
self.assertTrue(numpy.isnan(c2[1].x.data).all())
self.assertTrue(numpy.isnan(c2[1].x.grad).all())
@attr.gpu
def test_copy_and_send_to_gpu(self):
c2 = self.c2.copy()
self.c2.to_gpu()
self.assertIsInstance(self.c2[0][0].x.data, cuda.cupy.ndarray)
self.assertIsInstance(self.c2[0][1].x.data, cuda.cupy.ndarray)
self.assertIsInstance(c2[0][0].x.data, numpy.ndarray)
self.assertIsInstance(c2[0][1].x.data, numpy.ndarray)
@attr.gpu
def test_copy_and_send_to_gpu_2(self):
c2 = self.c2.copy()
c2.to_gpu()
self.assertIsInstance(self.c2[0][0].x.data, numpy.ndarray)
self.assertIsInstance(self.c2[0][1].x.data, numpy.ndarray)
self.assertIsInstance(c2[0][0].x.data, cuda.cupy.ndarray)
self.assertIsInstance(c2[0][1].x.data, cuda.cupy.ndarray)
@attr.multi_gpu(2)
def test_copy_and_send_to_gpu_multi(self):
c2 = self.c2.copy()
self.c2.to_gpu(0)
c2.to_gpu(1)
self.assertEqual(self.c2[0][0].x.data.device.id, 0)
self.assertEqual(self.c2[0][1].x.data.device.id, 0)
self.assertEqual(c2[0][0].x.data.device.id, 1)
self.assertEqual(c2[0][1].x.data.device.id, 1)
def test_to_cpu_on_cpu(self):
x1 = self.l1.x.data
gx1 = self.l1.x.grad
x2 = self.l2.x.data
gx2 = self.l2.x.grad
x3 = self.l3.x.data
gx3 = self.l3.x.grad
self.c2.to_cpu()
self.assertIs(self.l1.x.data, x1)
self.assertIs(self.l1.x.grad, gx1)
self.assertIs(self.l2.x.data, x2)
self.assertIs(self.l2.x.grad, gx2)
self.assertIs(self.l3.x.data, x3)
self.assertIs(self.l3.x.grad, gx3)
@attr.gpu
def test_to_cpu(self):
self.c2.to_gpu()
self.c2.to_cpu()
self.assertIs(self.c2.xp, numpy)
self.assertIs(self.c1.xp, numpy)
self.assertIs(self.l1.xp, numpy)
self.assertIs(self.l2.xp, numpy)
self.assertIs(self.l3.xp, numpy)
self.assertIsInstance(self.l1.x.data, numpy.ndarray)
self.assertIsInstance(self.l1.x.grad, numpy.ndarray)
self.assertIsInstance(self.l2.x.data, numpy.ndarray)
self.assertIsInstance(self.l2.x.grad, numpy.ndarray)
self.assertIsInstance(self.l3.x.data, numpy.ndarray)
self.assertIsInstance(self.l3.x.grad, numpy.ndarray)
@attr.gpu
def test_to_gpu(self):
cupy = cuda.cupy
self.c2.to_gpu()
self.assertIs(self.c2.xp, cupy)
self.assertIs(self.c1.xp, cupy)
self.assertIs(self.l1.xp, cupy)
self.assertIs(self.l2.xp, cupy)
self.assertIs(self.l3.xp, cupy)
self.assertIsInstance(self.l1.x.data, cupy.ndarray)
self.assertIsInstance(self.l1.x.grad, cupy.ndarray)
self.assertIsInstance(self.l2.x.data, cupy.ndarray)
self.assertIsInstance(self.l2.x.grad, cupy.ndarray)
self.assertIsInstance(self.l3.x.data, cupy.ndarray)
self.assertIsInstance(self.l3.x.grad, cupy.ndarray)
def test_params(self):
params = list(self.c2.params())
self.assertEqual({id(p) for p in params},
{id(self.l1.x), id(self.l1.y),
id(self.l2.x), id(self.l3.x)})
def test_params_skip_uninit(self):
params = list(self.c2.params(include_uninit=False))
self.assertEqual({id(p) for p in params},
{id(self.l1.x), id(self.l2.x), id(self.l3.x)})
def test_namedparams(self):
namedparams = list(self.c2.namedparams())
self.assertEqual({(name, id(p)) for name, p in namedparams},
{('/0/0/x', id(self.l1.x)),
('/0/0/y', id(self.l1.y)),
('/0/1/x', id(self.l2.x)),
('/1/x', id(self.l3.x))})
def test_namedparams_skip_uninit(self):
namedparams = list(self.c2.namedparams(include_uninit=False))
self.assertEqual({(name, id(p)) for name, p in namedparams},
{('/0/0/x', id(self.l1.x)),
('/0/1/x', id(self.l2.x)),
('/1/x', id(self.l3.x))})
def test_links(self):
links = list(self.c2.links())
self.assertEqual({id(l) for l in links},
{id(l) for l in [self.l1, self.l2, self.l3,
self.c1, self.c2]})
def test_links_skipself(self):
links = list(self.c2.links(skipself=True))
self.assertEqual({id(l) for l in links},
{id(l) for l in [self.l1, self.l2, self.l3, self.c1]})
def test_namedlinks(self):
namedlinks = list(self.c2.namedlinks())
self.assertEqual({(name, id(l)) for name, l in namedlinks},
{('/', id(self.c2)),
('/0', id(self.c1)),
('/0/0', id(self.l1)),
('/0/1', id(self.l2)),
('/1', id(self.l3))})
def test_namedlinks_skipself(self):
namedlinks = list(self.c2.namedlinks(skipself=True))
self.assertEqual({(name, id(l)) for name, l in namedlinks},
{('/0', id(self.c1)),
('/0/0', id(self.l1)),
('/0/1', id(self.l2)),
('/1', id(self.l3))})
def test_children(self):
self.assertEqual(tuple(id(c) for c in self.c2.children()),
(id(self.c1), id(self.l3)))
self.assertEqual(tuple(id(c) for c in self.c1.children()),
(id(self.l1), id(self.l2)))
def test_copyparams(self):
l1 = chainer.Link()
with l1.init_scope():
l1.x = chainer.Parameter(shape=(2, 3))
l1.y = chainer.Parameter()
l2 = chainer.Link()
with l2.init_scope():
l2.x = chainer.Parameter(shape=2)
l3 = chainer.Link()
with l3.init_scope():
l3.x = chainer.Parameter(shape=3)
c1 = chainer.ChainList(l1, l2)
c2 = chainer.ChainList(c1, l3)
l1.x.data.fill(0)
l2.x.data.fill(1)
l3.x.data.fill(2)
self.c2.copyparams(c2)
numpy.testing.assert_array_equal(self.l1.x.data, l1.x.data)
numpy.testing.assert_array_equal(self.l2.x.data, l2.x.data)
numpy.testing.assert_array_equal(self.l3.x.data, l3.x.data)
def test_zerograds(self):
with testing.assert_warns(DeprecationWarning):
self.c2.zerograds()
numpy.testing.assert_array_equal(self.l1.x.grad, numpy.zeros((2, 3)))
numpy.testing.assert_array_equal(self.l2.x.grad, numpy.zeros(2))
numpy.testing.assert_array_equal(self.l3.x.grad, numpy.zeros(3))
self.l1.y.initialize((2, 3))
numpy.testing.assert_array_equal(self.l1.y.grad, numpy.zeros((2, 3)))
def test_cleargrads(self):
self.c2.cleargrads()
self.assertIsNone(self.l1.x.grad)
self.assertIsNone(self.l2.x.grad)
self.assertIsNone(self.l3.x.grad)
self.l1.y.initialize((2, 3))
self.assertIsNone(self.l1.y.grad)
def test_addgrads(self):
l1 = chainer.Link()
with l1.init_scope():
l1.x = chainer.Parameter(shape=(2, 3))
l1.y = chainer.Parameter(shape=(2, 3))
l2 = chainer.Link()
with l2.init_scope():
l2.x = chainer.Parameter(shape=2)
l3 = chainer.Link()
with l3.init_scope():
l3.x = chainer.Parameter(shape=3)
c1 = chainer.ChainList(l1, l2)
c2 = chainer.ChainList(c1, l3)
l1.x.grad.fill(1)
l2.x.grad.fill(2)
l3.x.grad.fill(3)
l1.y.grad.fill(4)
self.l1.x.grad.fill(-1)
self.l1.y.cleargrad()
self.l2.x.grad.fill(-2)
self.l3.x.grad.fill(-3)
self.c2.addgrads(c2)
numpy.testing.assert_array_equal(self.l1.x.grad, numpy.zeros((2, 3)))
numpy.testing.assert_array_equal(self.l1.y.grad, l1.y.grad)
numpy.testing.assert_array_equal(self.l2.x.grad, numpy.zeros(2))
numpy.testing.assert_array_equal(self.l3.x.grad, numpy.zeros(3))
def test_serialize(self):
l1 = chainer.Link()
with l1.init_scope():
l1.y = chainer.Parameter(shape=(1, 1))
l2 = chainer.Link()
with l2.init_scope():
l2.x = chainer.Parameter(0, 2)
c1 = chainer.ChainList(l1, l2)
mocks = {'0': mock.MagicMock(), '1': mock.MagicMock()}
serializer = mock.MagicMock()
serializer.__getitem__.side_effect = lambda k: mocks[k]
serializer.return_value = None
mocks['0'].return_value = None
mocks['1'].return_value = None
c1.serialize(serializer)
self.assertEqual(serializer.call_count, 0)
self.assertEqual(serializer.__getitem__.call_count, 2)
serializer.__getitem__.assert_any_call('0')
serializer.__getitem__.assert_any_call('1')
mocks['0'].assert_called_with('y', l1.y.data)
mocks['1'].assert_called_with('x', l2.x.data)
def test_count_params(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert self.c1.count_params() == 8
assert len(w) == 1
assert w[0].category is UserWarning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.c2.count_params()
assert len(w) == 1
assert w[0].category is UserWarning
self.c2[0][0].y.initialize((2, 3))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.c2.count_params()
assert not w
class TestChainListRepeat(unittest.TestCase):
def setUp(self):
class ChainListForTest(chainer.ChainList):
def __init__(self):
super(ChainListForTest, self).__init__(chainer.Link())
def forward(self):
pass
self.chainlist = ChainListForTest()
self.link = self.chainlist[0]
with self.link.init_scope():
self.link.x = chainer.Parameter(
chainer.initializers.Normal(), shape=(2, 3))
def test_no_repeat(self):
ret = self.chainlist.repeat(0)
self.assertEqual(len(ret), 0)
def test_repeat_with_share_mode(self):
ret = self.chainlist.repeat(2, mode='share')
self.assertEqual(len(ret), 2)
self.assertIsNot(ret[0], self.chainlist)
self.assertIsNot(ret[1], self.chainlist)
self.assertIsNot(ret[0], ret[1])
self.assertIsNot(ret[0][0], self.chainlist[0])
self.assertIsNot(ret[1][0], self.chainlist[0])
self.assertIsNot(ret[0][0], ret[1][0])
self.assertIsNot(ret[0][0].x, self.chainlist[0].x)
self.assertIsNot(ret[1][0].x, self.chainlist[0].x)
self.assertIsNot(ret[0][0].x, ret[1][0].x)
self.assertIs(ret[0][0].x.data, self.chainlist[0].x.data)
self.assertIs(ret[0][0].x.data, ret[1][0].x.data)
self.assertEqual(ret[0][0].x.shape, self.chainlist[0].x.shape)
self.assertEqual(ret[0][0].x.shape, ret[1][0].x.shape)
self.assertEqual(ret[0][0].x.dtype, self.chainlist[0].x.dtype)
self.assertEqual(ret[0][0].x.dtype, ret[1][0].x.dtype)
def test_repeat_with_copy_mode(self):
ret = self.chainlist.repeat(2, mode='copy')
self.assertEqual(len(ret), 2)
self.assertIsNot(ret[0], self.chainlist)
self.assertIsNot(ret[1], self.chainlist)
self.assertIsNot(ret[0], ret[1])
self.assertIsNot(ret[0][0], self.chainlist[0])
self.assertIsNot(ret[1][0], self.chainlist[0])
self.assertIsNot(ret[0][0], ret[1][0])
self.assertIsNot(ret[0][0].x, self.chainlist[0].x)
self.assertIsNot(ret[1][0].x, self.chainlist[0].x)
self.assertIsNot(ret[0][0].x, ret[1][0].x)
self.assertIsNot(ret[0][0].x.data, self.chainlist[0].x.data)
self.assertIsNot(ret[1][0].x.data, self.chainlist[0].x.data)
self.assertIsNot(ret[0][0].x.data, ret[1][0].x.data)
self.assertTrue(numpy.array_equal(
ret[0][0].x.data, self.chainlist[0].x.data))
self.assertTrue(numpy.array_equal(
ret[0][0].x.data, ret[1][0].x.data))
self.assertEqual(ret[0][0].x.shape, self.chainlist[0].x.shape)
self.assertEqual(ret[0][0].x.shape, ret[1][0].x.shape)
self.assertEqual(ret[0][0].x.dtype, self.chainlist[0].x.dtype)
self.assertEqual(ret[0][0].x.dtype, ret[1][0].x.dtype)
def test_repeat_with_init_mode(self):
ret = self.chainlist.repeat(2, mode='init')
self.assertEqual(len(ret), 2)
self.assertIsNot(ret[0], self.chainlist)
self.assertIsNot(ret[1], self.chainlist)
self.assertIsNot(ret[0], ret[1])
self.assertIsNot(ret[0][0], self.chainlist[0])
self.assertIsNot(ret[1][0], self.chainlist[0])
self.assertIsNot(ret[0][0], ret[1][0])
self.assertIsNot(ret[0][0].x, self.chainlist[0].x)
self.assertIsNot(ret[1][0].x, self.chainlist[0].x)
self.assertIsNot(ret[0][0].x, ret[1][0].x)
self.assertIsNot(ret[0][0].x.data, self.chainlist[0].x.data)
self.assertIsNot(ret[1][0].x.data, self.chainlist[0].x.data)
self.assertIsNot(ret[0][0].x.data, ret[1][0].x.data)
self.assertFalse(numpy.array_equal(
ret[0][0].x.data, self.chainlist[0].x.data))
self.assertFalse(numpy.array_equal(
ret[1][0].x.data, self.chainlist[0].x.data))
self.assertFalse(numpy.array_equal(
ret[0][0].x.data, ret[1][0].x.data))
self.assertEqual(ret[0][0].x.shape, self.chainlist[0].x.shape)
self.assertEqual(ret[0][0].x.shape, ret[1][0].x.shape)
self.assertEqual(ret[0][0].x.dtype, self.chainlist[0].x.dtype)
self.assertEqual(ret[0][0].x.dtype, ret[1][0].x.dtype)
@attr.ideep
class TestIntel64(unittest.TestCase):
def setUp(self):
self.link = chainer.Link()
shape = (2, 2)
dtype = numpy.float32
y_array = numpy.random.rand(*shape).astype(dtype)
pa_array = numpy.random.rand(*shape).astype(dtype)
ps_scalar = 2.4
with self.link.init_scope():
# Initialized parameter
self.link.y = chainer.Parameter(y_array)
# Uninitialized parameter
self.link.v = chainer.Parameter()
# Persistent ndarray
self.link.add_persistent('pa', pa_array)
# Persistent scalar
self.link.add_persistent('ps', ps_scalar)
self.y_array = y_array
self.pa_array = pa_array
self.ps_scalar = ps_scalar
def _assert_variable_array_equal(self, var, expected_array):
assert var.shape == expected_array.shape
assert var.dtype == expected_array.dtype
self._assert_arrays_equal(var.data, expected_array)
def _assert_arrays_equal(self, array, expected_array):
if isinstance(array, cuda.ndarray):
array = array.get()
assert array.shape == expected_array.shape
assert array.dtype == expected_array.dtype
assert (array == expected_array).all()
def test_cpu_to_intel64(self):
link = self.link
link.to_intel64()
assert link._device_id is None
# Arrays should be converted to ideep.mdarray
# Initialized parameter
assert isinstance(link.y.data, intel64.ideep.mdarray)
self._assert_variable_array_equal(link.y, self.y_array)
# Uninitialized parameter
assert link.v.data is None
# Persistent ndarray
assert isinstance(link.pa, intel64.ideep.mdarray)
self._assert_arrays_equal(link.pa, self.pa_array)
# Persistent scalar
assert link.ps == self.ps_scalar
def test_intel64_to_intel64(self):
link = self.link
link.to_intel64()
prev_y = link.y
prev_v = link.v
prev_pa = link.pa
prev_ps = link.ps
link.to_intel64()
assert link._device_id is None
# Everything should be left untouched
# Initialized parameter
assert link.y is prev_y
# Uninitialized parameter
assert link.v is prev_v
# Persistent ndarray
assert link.pa is prev_pa
# Persistent scalar
assert link.ps is prev_ps
@attr.gpu
def test_gpu_to_intel64(self):
link = self.link
link.to_gpu()
assert link._device_id == 0
link.to_intel64()
assert link._device_id is None
# Arrays should be converted to ideep.mdarray
# Initialized parameter
assert isinstance(link.y.data, intel64.ideep.mdarray)
self._assert_variable_array_equal(link.y, self.y_array)
# Uninitialized parameter
assert link.v.data is None
# Persistent ndarray
assert isinstance(link.pa, intel64.ideep.mdarray)
self._assert_arrays_equal(link.pa, self.pa_array)
# Persistent scalar
assert link.ps == self.ps_scalar
@attr.gpu
def test_intel64_to_gpu(self):
link = self.link
link.to_intel64()
assert link._device_id is None
link.to_gpu()
assert link._device_id == 0
# Arrays should be converted to cupy.ndarray
# Initialized parameter
assert isinstance(link.y.data, cuda.cupy.ndarray)
self._assert_variable_array_equal(link.y, self.y_array)
# Uninitialized parameter
assert link.v.data is None
# Persistent ndarray
assert isinstance(link.pa, cuda.ndarray)
self._assert_arrays_equal(link.pa, self.pa_array)
# Persistent scalar
assert link.ps == self.ps_scalar
def test_intel64_to_cpu(self):
link = self.link
link.to_intel64()
assert link._device_id is None
link.to_cpu()
assert link._device_id is None
# Arrays should be converted to numpy.ndarray
# Initialized parameter
assert isinstance(link.y.data, numpy.ndarray)
self._assert_variable_array_equal(link.y, self.y_array)
# Uninitialized parameter
assert link.v.data is None
# Persistent ndarray
assert isinstance(link.pa, numpy.ndarray)
self._assert_arrays_equal(link.pa, self.pa_array)
# Persistent scalar
assert link.ps == self.ps_scalar
def test_cpu_to_intel64_unsupported(self):
# Test for persistents that cannot be transferred to iDeep.
with self.link.init_scope():
self.link.no_ideep = numpy.ones((2, 2, 2), numpy.float32)
self.link.register_persistent('no_ideep')
self.link.to_intel64()
assert isinstance(self.link.no_ideep, numpy.ndarray)
@attr.gpu
def test_gpu_to_intel64_unsupported(self):
# Test for persistents that cannot be transferred to iDeep.
with self.link.init_scope():
self.link.no_ideep = cuda.cupy.ones((2, 2, 2), numpy.float32)
self.link.register_persistent('no_ideep')
self.link.to_intel64()
assert isinstance(self.link.no_ideep, numpy.ndarray)
class TestCallMethod(unittest.TestCase):
def setUp(self):
class Model(chainer.Chain):
def __init__(self):
super(Model, self).__init__()
self.model = Model()
def test_has_forward_no_call(self):
self.model.forward = mock.MagicMock()
self.model(0) # model.forward is called
self.model.forward.assert_called_once()
def test_has_call_and_forward(self):
self.model.__call__ = mock.MagicMock()
self.model.forward = mock.MagicMock()
self.model(0) # Link.__call__ is called
self.model.forward.assert_called_with(0)
self.model.__call__.assert_not_called()
def test_has_call_no_forward(self):
class Model(chainer.Chain):
def __init__(self):
super(Model, self).__init__()
self.mock = mock.MagicMock()
def __call__(self, x):
self.mock(x)
model = Model()
model(0) # model.__call__ is called
model.mock.assert_called_with(0)
def test_no_call_no_forward(self):
with self.assertRaises(AttributeError):
self.model(0)
testing.run_module(__name__, __file__)
| 38.665234
| 79
| 0.6053
|
225ecef8113c50e2dc003b79b4b73e233b2fa579
| 1,729
|
py
|
Python
|
cv2/file.py
|
NNDEV1/CV2Stuff
|
fc31a51cdd816e313351f0d83ffb0c7339e73ef3
|
[
"MIT"
] | 3
|
2021-01-09T23:55:34.000Z
|
2021-08-15T22:04:34.000Z
|
cv2/file.py
|
NNDEV1/CV2Stuff
|
fc31a51cdd816e313351f0d83ffb0c7339e73ef3
|
[
"MIT"
] | null | null | null |
cv2/file.py
|
NNDEV1/CV2Stuff
|
fc31a51cdd816e313351f0d83ffb0c7339e73ef3
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
###################################
widthImg=540
heightImg =640
#####################################
cap = cv2.VideoCapture(0)
cap.set(10,150)
def preProcessing(img):
imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray,(5,5),1)
imgCanny = cv2.Canny(imgBlur,200,200)
kernel = np.ones((5,5))
imgDial = cv2.dilate(imgCanny,kernel,iterations=2)
imgThres = cv2.erode(imgDial,kernel,iterations=1)
return imgThres
def getContours(img):
biggest = np.array([])
maxArea = 0
contours,hierarchy = cv2.findContours(img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
for cnt in contours:
area = cv2.contourArea(cnt)
if area>5000:
#cv2.drawContours(imgContour, cnt, -1, (255, 0, 0), 3)
peri = cv2.arcLength(cnt,True)
approx = cv2.approxPolyDP(cnt,0.02*peri,True)
if area >maxArea and len(approx) == 4:
biggest = approx
maxArea = area
cv2.drawContours(imgContour, biggest, -1, (255, 0, 0), 20)
return biggest
def getWarp(img,biggest):
pts1 = np.float32(biggest)
pts2 = np.float32([[0, 0], [widthImg, 0], [0, heightImg], [widthImg, heightImg]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
imgOutput = cv2.warpPerspective(img, matrix, (widthImg, heightImg))
return imgOutput
while True:
success, img = cap.read()
img = cv2.resize(img,(widthImg,heightImg))
imgContour = img.copy()
imgThres = preProcessing(img)
biggest = getContours(imgThres)
imgWarped = getWarp(img, biggest)
cv2.imshow("WorkFlow", imgWarped)
if cv2.waitKey(1) and 0xFF == ord('q'):
break
| 28.816667
| 86
| 0.614228
|
190c12b23432b6b102b09c682c468d30f51e80af
| 3,291
|
py
|
Python
|
CPAC/GUI/interface/pages/mdmr.py
|
Lawreros/C-PAC
|
ce26ba9a38cbd401cd405150eeed23b805007724
|
[
"BSD-3-Clause"
] | null | null | null |
CPAC/GUI/interface/pages/mdmr.py
|
Lawreros/C-PAC
|
ce26ba9a38cbd401cd405150eeed23b805007724
|
[
"BSD-3-Clause"
] | null | null | null |
CPAC/GUI/interface/pages/mdmr.py
|
Lawreros/C-PAC
|
ce26ba9a38cbd401cd405150eeed23b805007724
|
[
"BSD-3-Clause"
] | null | null | null |
import wx
import wx.html
from ..utils.generic_class import GenericClass
from ..utils.constants import control, dtype
import os
import pkg_resources as p
class MDMRSettings(wx.ScrolledWindow):
def __init__(self, parent, counter = 0):
wx.ScrolledWindow.__init__(self, parent)
self.counter = counter
self.page = GenericClass(self, "Multivariate Distance Matrix Regression (MDMR)")
self.page.add(label="Run MDMR?",
control=control.CHOICE_BOX,
name="runMDMR",
type=dtype.LSTR,
comment="Used to determine if Multivariate Distance Matrix Regression (MDMR) "
"will be added to the pipeline or not.",
values=["Off", "On"],
wkf_switch = True)
self.page.add(label="Mask ROI File",
control=control.COMBO_BOX,
name='mdmr_roi_file',
type=dtype.STR,
values=str(""),
validation_req=False,
comment="Path to a mask file. Voxels outside of the mask will "
"be excluded from MDMR.")
self.page.add(label="Regressor file",
control=control.COMBO_BOX,
name="mdmr_regressor_file",
type=dtype.STR,
values="",
comment="Path to a CSV file containing the phenotypic "
"regressor.")
self.page.add(label="Regressor Participant Column Name",
control=control.TEXT_BOX,
name="mdmr_regressor_participant_column",
type=dtype.STR,
comment="Name of the participants column in your "
"regressor file.",
values="")
self.page.add(label="Regressor of Interest columns",
control=control.TEXT_BOX,
name="mdmr_regressor_columns",
type=dtype.STR,
values="",
comment="Columns from the CSV file indicating factor "
"variables. Other columns will be handled as covariates. "
"Separated by commas.")
self.page.add(label="Permutations",
control=control.INT_CTRL,
name="mdmr_permutations",
type=dtype.NUM,
comment="Number of permutation tests to run on the "
"Pseudo-F statistics.",
values=500)
self.page.add(label="Parallel nodes",
control=control.INT_CTRL,
name="mdmr_parallel_nodes",
type=dtype.NUM,
comment="Number of Nipype nodes created while "
"computing MDMR. Dependent upon computing resources.",
values=1)
self.page.set_sizer()
parent.get_page_list().append(self)
def get_counter(self):
return self.counter
| 41.1375
| 100
| 0.488909
|
c74ecca7e483a94c7461c0e8edbe04da0d3c08c0
| 8,875
|
py
|
Python
|
azext_iot/sdk/dps/operations/registration_state_operations.py
|
srinivasaraogattupalli/azure-iot-cli-extension
|
9885dd528fe363f5c118f3b735ad22fd4e1a86f4
|
[
"MIT"
] | null | null | null |
azext_iot/sdk/dps/operations/registration_state_operations.py
|
srinivasaraogattupalli/azure-iot-cli-extension
|
9885dd528fe363f5c118f3b735ad22fd4e1a86f4
|
[
"MIT"
] | null | null | null |
azext_iot/sdk/dps/operations/registration_state_operations.py
|
srinivasaraogattupalli/azure-iot-cli-extension
|
9885dd528fe363f5c118f3b735ad22fd4e1a86f4
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class RegistrationStateOperations(object):
"""RegistrationStateOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: The API version to use for the request. Supported versions include: 2018-09-01-preview. Constant value: "2018-09-01-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2019-03-31"
self.config = config
def get_registration_state(
self, id, custom_headers=None, raw=False, **operation_config):
"""Gets the device registration state.
:param id: Registration ID.
:type id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DeviceRegistrationState or ClientRawResponse if raw=true
:rtype:
~microsoft.azure.management.provisioningservices.models.DeviceRegistrationState
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ProvisioningServiceErrorDetailsException<microsoft.azure.management.provisioningservices.models.ProvisioningServiceErrorDetailsException>`
"""
# Construct URL
url = self.get_registration_state.metadata['url']
path_format_arguments = {
'id': self._serialize.url("id", id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ProvisioningServiceErrorDetailsException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeviceRegistrationState', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_registration_state.metadata = {'url': '/registrations/{id}'}
def delete_registration_state(
self, id, if_match=None, custom_headers=None, raw=False, **operation_config):
"""Deletes the device registration.
:param id: Registration ID.
:type id: str
:param if_match: The ETag of the registration status record.
:type if_match: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ProvisioningServiceErrorDetailsException<microsoft.azure.management.provisioningservices.models.ProvisioningServiceErrorDetailsException>`
"""
# Construct URL
url = self.delete_registration_state.metadata['url']
path_format_arguments = {
'id': self._serialize.url("id", id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [204]:
raise models.ProvisioningServiceErrorDetailsException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete_registration_state.metadata = {'url': '/registrations/{id}'}
def query_registration_state(
self, id, custom_headers=None, raw=False, **operation_config):
"""Gets the registration state of devices in this enrollmentGroup.
:param id: Enrollment group ID.
:type id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype:
list[~microsoft.azure.management.provisioningservices.models.DeviceRegistrationState]
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ProvisioningServiceErrorDetailsException<microsoft.azure.management.provisioningservices.models.ProvisioningServiceErrorDetailsException>`
"""
# Construct URL
url = self.query_registration_state.metadata['url']
path_format_arguments = {
'id': self._serialize.url("id", id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ProvisioningServiceErrorDetailsException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[DeviceRegistrationState]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
query_registration_state.metadata = {'url': '/registrations/{id}/query'}
| 43.719212
| 155
| 0.673239
|
dd752578edce92b22d8d7ac060ad279da50c3740
| 10,359
|
py
|
Python
|
geneticml/algorithms.py
|
albarsil/geneticml
|
27f4eb7b73343493c6e5cac1d3f7500b0d819880
|
[
"MIT"
] | 7
|
2021-12-07T21:36:00.000Z
|
2021-12-25T02:01:59.000Z
|
geneticml/algorithms.py
|
albarsil/geneticml
|
27f4eb7b73343493c6e5cac1d3f7500b0d819880
|
[
"MIT"
] | 3
|
2021-12-08T20:50:26.000Z
|
2022-02-17T11:28:06.000Z
|
geneticml/algorithms.py
|
albarsil/geneticml
|
27f4eb7b73343493c6e5cac1d3f7500b0d819880
|
[
"MIT"
] | 1
|
2021-12-07T21:08:20.000Z
|
2021-12-07T21:08:20.000Z
|
from typing import Callable, Tuple
class DataLoader(object):
"""
A data loader object to create an abstraction for data points and targets
"""
def __init__(self, data, target):
"""
Create a class instance
Parameters:
data (?): The X data
data (?): The target data
"""
self._data = data
self._target = target
@property
def data(self):
return self._data
@property
def target(self):
return self._target
class EstimatorParameters(object):
"""
A class to wrap the estimator parameters
"""
def __init__(self, model_parameters: dict, data_balancing_parameters: dict = None):
"""
Create a class instance
Parameters:
model_parameters (?): The model parameters
data_balancing_parameters (?): The data balancing parameters
"""
self._model_parameters = model_parameters
self._data_balancing_parameters = data_balancing_parameters
@property
def model_parameters(self) -> dict:
"""
Property to get the model parameters
Returns:
(dict): The model parameters
"""
return self._model_parameters
@model_parameters.setter
def model_parameters(self, value: dict) -> None:
"""
Setter
Parameters:
value (dict): The new parameters
"""
self._model_parameters = value
@property
def data_balancing_parameters(self) -> dict:
"""
Property to get the data balancing parameters
Returns:
(dict): The data balancing parameters
"""
return self._data_balancing_parameters
@data_balancing_parameters.setter
def data_balancing_parameters(self, value: dict) -> None:
"""
Setter
Parameters:
value (dict): The new parameters
"""
self._data_balancing_parameters = value
class DefaultEstimatorMethods(object):
"""
A class with static methods and most common options to fill the estimators
"""
@staticmethod
def fit(model, data, target):
"""
A simple fit function
Parameters:
model (?): A model instance
data (?): The data that will be used to fit the algorithm
target (?): The target that will be used to fit the algorithm
Returns:
(?): The model fitted
"""
return model.fit(data, target)
@staticmethod
def predict(model, data):
"""
A simple predict function
Parameters:
model (?): A model instance
data (?): The data that will be used for predict
Returns:
(?): The model prediction
"""
return model.predict(data)
@staticmethod
def data_balance(balancing_model, data, target) -> Tuple:
"""
A simple fit function
Parameters:
balancing_model (?): A balancing model instance
data (?): The data that will be used to fit the algorithm
target (?): The target that will be used to fit the algorithm
Returns:
(tuple): A tuple containing the balanced data and targets
"""
return balancing_model.fit_resample(data, target)
class EstimatorBuilder(object):
"""
Examples
--------
estimator = EstimatorBuilder.of(class).parameters(dict).fit_with(fit_func).predict_with(predict_func).build()
"""
def of(self, model_type) -> 'EstimatorBuilder':
"""
Assign a model type for the estimator
Parameters:
model_type (?): A model type
Returns:
(EstimatorBuilder): The current object
"""
self._model_type = model_type
self._data_balance_model_type = None
self._data_balance = None
return self
def data_balance_algorithm(self, data_balance_model_type) -> 'EstimatorBuilder':
"""
Assign a data balance algorithm for the estimator
Parameters:
data_balance_model_type (?): A data balance model type
Returns:
(EstimatorBuilder): The current object
"""
self._data_balance_model_type = data_balance_model_type
return self
def fit_with(self, func: Callable = DefaultEstimatorMethods.fit) -> 'EstimatorBuilder':
"""
Define a function that will be used for the model training
Parameters:
func (Callable): A fit function used for model training
Returns:
(EstimatorBuilder): The current object
"""
self._fit = func
return self
def predict_with(self, func: Callable = DefaultEstimatorMethods.predict) -> 'EstimatorBuilder':
"""
Define a function that will be used for the model inference
Parameters:
func (Callable): A predict function used for model inference
Returns:
(EstimatorBuilder): The current object
"""
self._predict = func
return self
def data_balance_with(self, func: Callable) -> 'EstimatorBuilder':
"""
Define a function that will be used for the data balancing
Parameters:
func (Callable): A predict function used for data balancing
Returns:
(EstimatorBuilder): The current object
"""
self._data_balance = func
return self
def build(self) -> 'BaseEstimator':
"""
Creates an instance of BaseEstimator
Returns:
(BaseEstimator): An instance of BaseEstimator that will be used for the optimization
"""
return BaseEstimator(model_type=self._model_type, fit_func=self._fit, predict_func=self._predict, data_balance_model_type=self._data_balance_model_type,balance_func=self._data_balance)
class BaseEstimator(object):
"""
A base class to be used for model optimization
"""
def __init__(self, model_type, fit_func: Callable, predict_func: Callable, data_balance_model_type = None, balance_func: Callable = None):
"""
Create a class instance
Parameters:
model_type (?): A model type
fit_func (Callable): A fit function used for model training
predict_func (Callable): A predict function used for model inference
data_balance_model_type (?): A data balancing model type
balance_func (Callable): A data balancing function used for train data balancing
"""
self._parameters = None
self._model = None
self._model_type = model_type
self._data_balance_model = None
self._data_balance_model_type = data_balance_model_type
self._fit_func = fit_func
self._predict_func = predict_func
self._balance_func = balance_func
self._fitness = -1
def initialize(self, parameters: EstimatorParameters) -> 'BaseEstimator':
"""
Create a class instance. It's used by the strategy and shouln't be called directly
Parameters:
parameters (algorithms.EstimatorParameters): Possible parameters of the estimator
Returns:
(BaseEstimator): The current object with the model initialized
"""
self._parameters = parameters
self._model = self._model_type(**parameters.model_parameters)
if self._parameters.data_balancing_parameters is not None:
self._data_balance_model = self._data_balance_model_type(**parameters.data_balancing_parameters)
return self
@property
def has_data_balancing(self) -> bool:
"""
Property to disovery if the estimator have a data balancing algorithm
Returns:
(dict): The model parameters
"""
return self._data_balance_model is not None
@property
def model_type(self):
self._model_type
@property
def parameters(self) -> dict:
"""
Property to access the base model parameters
Returns:
(dict): The model parameters
"""
return self._parameters
@property
def model(self):
"""
Property to create a model from the model type and parameters
Returns:
(?): The base model
"""
return self._model
@property
def fitness(self) -> float:
"""
Property to access the base model metric value
Returns:
(float): The metric value
"""
return self._fitness
@fitness.setter
def fitness(self, value) -> None:
"""
Setter
Parameters:
value (float): The new fitness value
"""
self._fitness = value
@property
def parameters_balance(self) -> dict:
"""
Property to access the base model data balance parameters
Returns:
(dict): The data balance parameters
"""
return self._balance_parameters
def fit(self, x, y) -> None:
"""
Method that performs model training. The inheriting class must implement this method.
Parameters:
x (?): The data set
y (?): The correct label set
"""
self._model = self._fit_func(self._model, x, y)
def predict(self, x) -> list:
"""
Method that performs model inference. The inheriting class must implement this method.
Parameters:
x (?): The data set
Returns:
(list): The predict output
"""
return self._predict_func(self._model, x)
def data_balance(self, data, target) -> Tuple:
"""
Create a class instance
Parameters:
data (?): The data that will be used to fit the algorithm
target (?): The target that will be used to fit the algorithm
Returns:
(tuple): A tuple containing the balanced data and targets
"""
if self._data_balance_model is None:
raise ValueError('A data_balance_type was not specified on the estimator __init__ function')
else:
return self._balance_func(self._data_balance_model, data, target)
| 27.624
| 192
| 0.602761
|
8f8f502757836f4a9093100bfff38839bf46a9d2
| 5,200
|
py
|
Python
|
src/clients/ctm_api_client/models/request_parameters_wrapper_em_default_request_parameters_why_job_parameter.py
|
IceT-M/ctm-python-client
|
0ef1d8a3c9a27a01c088be1cdf5d177d25912bac
|
[
"BSD-3-Clause"
] | 5
|
2021-12-01T18:40:00.000Z
|
2022-03-04T10:51:44.000Z
|
src/clients/ctm_api_client/models/request_parameters_wrapper_em_default_request_parameters_why_job_parameter.py
|
IceT-M/ctm-python-client
|
0ef1d8a3c9a27a01c088be1cdf5d177d25912bac
|
[
"BSD-3-Clause"
] | 3
|
2022-02-21T20:08:32.000Z
|
2022-03-16T17:41:03.000Z
|
src/clients/ctm_api_client/models/request_parameters_wrapper_em_default_request_parameters_why_job_parameter.py
|
IceT-M/ctm-python-client
|
0ef1d8a3c9a27a01c088be1cdf5d177d25912bac
|
[
"BSD-3-Clause"
] | 7
|
2021-12-01T11:59:16.000Z
|
2022-03-01T18:16:40.000Z
|
# coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.215
Contact: customer_support@bmc.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from clients.ctm_api_client.configuration import Configuration
class RequestParametersWrapperEMDefaultRequestParametersWhyJobParameter(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
"ctm_parameters": "WhyJobParameters",
"em_parameters": "EMDefaultRequestParameters",
}
attribute_map = {
"ctm_parameters": "ctm_parameters",
"em_parameters": "em_parameters",
}
def __init__(
self, ctm_parameters=None, em_parameters=None, _configuration=None
): # noqa: E501
"""RequestParametersWrapperEMDefaultRequestParametersWhyJobParameter - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._ctm_parameters = None
self._em_parameters = None
self.discriminator = None
if ctm_parameters is not None:
self.ctm_parameters = ctm_parameters
if em_parameters is not None:
self.em_parameters = em_parameters
@property
def ctm_parameters(self):
"""Gets the ctm_parameters of this RequestParametersWrapperEMDefaultRequestParametersWhyJobParameter. # noqa: E501
:return: The ctm_parameters of this RequestParametersWrapperEMDefaultRequestParametersWhyJobParameter. # noqa: E501
:rtype: WhyJobParameters
"""
return self._ctm_parameters
@ctm_parameters.setter
def ctm_parameters(self, ctm_parameters):
"""Sets the ctm_parameters of this RequestParametersWrapperEMDefaultRequestParametersWhyJobParameter.
:param ctm_parameters: The ctm_parameters of this RequestParametersWrapperEMDefaultRequestParametersWhyJobParameter. # noqa: E501
:type: WhyJobParameters
"""
self._ctm_parameters = ctm_parameters
@property
def em_parameters(self):
"""Gets the em_parameters of this RequestParametersWrapperEMDefaultRequestParametersWhyJobParameter. # noqa: E501
:return: The em_parameters of this RequestParametersWrapperEMDefaultRequestParametersWhyJobParameter. # noqa: E501
:rtype: EMDefaultRequestParameters
"""
return self._em_parameters
@em_parameters.setter
def em_parameters(self, em_parameters):
"""Sets the em_parameters of this RequestParametersWrapperEMDefaultRequestParametersWhyJobParameter.
:param em_parameters: The em_parameters of this RequestParametersWrapperEMDefaultRequestParametersWhyJobParameter. # noqa: E501
:type: EMDefaultRequestParameters
"""
self._em_parameters = em_parameters
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(
RequestParametersWrapperEMDefaultRequestParametersWhyJobParameter, dict
):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(
other, RequestParametersWrapperEMDefaultRequestParametersWhyJobParameter
):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(
other, RequestParametersWrapperEMDefaultRequestParametersWhyJobParameter
):
return True
return self.to_dict() != other.to_dict()
| 32.5
| 138
| 0.637692
|
47d32c68c2b3db25dccb399c0eea2b403951fd3b
| 5,023
|
py
|
Python
|
snipy/sympy_helpers.py
|
navdeeprana/snipy
|
55a5286a17d1c471d147e9ed7cbe06945c3eef02
|
[
"MIT"
] | null | null | null |
snipy/sympy_helpers.py
|
navdeeprana/snipy
|
55a5286a17d1c471d147e9ed7cbe06945c3eef02
|
[
"MIT"
] | null | null | null |
snipy/sympy_helpers.py
|
navdeeprana/snipy
|
55a5286a17d1c471d147e9ed7cbe06945c3eef02
|
[
"MIT"
] | null | null | null |
"""
A Printer which converts an expression into its LaTeX equivalent.
"""
from sympy.core.function import _coeff_isneg, AppliedUndef
# sympy.printing imports
from sympy.printing.conventions import requires_partial
from sympy.printing.precedence import PRECEDENCE
from sympy.printing.latex import LatexPrinter, accepted_latex_functions
class MyLatexPrinter(LatexPrinter):
def _print_Derivative(self, expr):
if requires_partial(expr.expr):
diff_symbol = r"\partial"
else:
diff_symbol = r"d"
tex = r"%s_{" % (diff_symbol)
for x, num in reversed(expr.variable_count):
if num == 1:
tex += r"%s " % (self._print(x))
else:
for i in range(num):
tex += r"%s " % (self._print(x))
tex += r"}"
# if dim == 1:
# tex = r"\frac{%s}{%s}" % (diff_symbol, tex)
# else:
# tex = r"\frac{%s^{%s}}{%s}" % (diff_symbol, self._print(dim), tex)
if any(_coeff_isneg(i) for i in expr.args):
return r"%s %s" % (
tex,
self.parenthesize(
expr.expr, PRECEDENCE["Mul"], is_neg=True, strict=True
),
)
return r"%s %s" % (
tex,
self.parenthesize(
expr.expr, PRECEDENCE["Mul"], is_neg=False, strict=True
),
)
def _print_Function(self, expr, exp=None):
r"""
Render functions to LaTeX, handling functions that LaTeX knows about
e.g., sin, cos, ... by using the proper LaTeX command (\sin, \cos, ...).
For single-letter function names, render them as regular LaTeX math
symbols. For multi-letter function names that LaTeX does not know
about, (e.g., Li, sech) use \operatorname{} so that the function name
is rendered in Roman font and LaTeX handles spacing properly.
expr is the expression involving the function
exp is an exponent
"""
func = expr.func.__name__
if hasattr(self, "_print_" + func) and not isinstance(
expr, AppliedUndef
):
return getattr(self, "_print_" + func)(expr, exp)
else:
args = [str(self._print(arg)) for arg in expr.args]
# How inverse trig functions should be displayed, formats are:
# abbreviated: asin, full: arcsin, power: sin^-1
inv_trig_style = self._settings["inv_trig_style"]
# If we are dealing with a power-style inverse trig function
inv_trig_power_case = False
# If it is applicable to fold the argument brackets
can_fold_brackets = (
self._settings["fold_func_brackets"]
and len(args) == 1
and not self._needs_function_brackets(expr.args[0])
)
inv_trig_table = [
"asin",
"acos",
"atan",
"acsc",
"asec",
"acot",
"asinh",
"acosh",
"atanh",
"acsch",
"asech",
"acoth",
]
# If the function is an inverse trig function, handle the style
if func in inv_trig_table:
if inv_trig_style == "abbreviated":
pass
elif inv_trig_style == "full":
func = "arc" + func[1:]
elif inv_trig_style == "power":
func = func[1:]
inv_trig_power_case = True
# Can never fold brackets if we're raised to a power
if exp is not None:
can_fold_brackets = False
if inv_trig_power_case:
if func in accepted_latex_functions:
name = r"\%s^{-1}" % func
else:
name = r"\operatorname{%s}^{-1}" % func
elif exp is not None:
func_tex = self._hprint_Function(func)
func_tex = self.parenthesize_super(func_tex)
name = r"%s^{%s}" % (func_tex, exp)
else:
name = self._hprint_Function(func)
if not (
func in accepted_latex_functions or func in inv_trig_table
):
return name
else:
if can_fold_brackets:
if func in accepted_latex_functions:
# Wrap argument safely to avoid parse-time conflicts
# with the function name itself
name += r" {%s}"
else:
name += r"%s"
else:
name += r"{\left(%s \right)}"
if inv_trig_power_case and exp is not None:
name += r"^{%s}" % exp
return name % ",".join(args)
| 35.878571
| 80
| 0.497511
|
7d30169cbef6793d867c59971075417462db8f41
| 4,632
|
py
|
Python
|
erlterm/types.py
|
fycheung/python-erlterm
|
4f6dcb3ad4997f503d42640b840dd50d337deaf0
|
[
"BSD-3-Clause"
] | null | null | null |
erlterm/types.py
|
fycheung/python-erlterm
|
4f6dcb3ad4997f503d42640b840dd50d337deaf0
|
[
"BSD-3-Clause"
] | null | null | null |
erlterm/types.py
|
fycheung/python-erlterm
|
4f6dcb3ad4997f503d42640b840dd50d337deaf0
|
[
"BSD-3-Clause"
] | null | null | null |
__all__ = ['Atom', 'Binary', 'Tuple', 'Maps', 'ErlString',
'Reference', 'Port', 'PID', 'Export', 'List']
class Atom(str):
def __str__(self):
return super(Atom, self).__str__()
def is_simple_atom(self):
if not (self[0] >= 'a' and self[0] <= 'z'):
return False
for c in self:
if (c >= 'a' and c <= 'z') or \
(c >= 'A' and c <= 'Z') or \
(c >= '0' and c <= '9') or \
c == '_' or c == '@':
continue
else:
return False
return True
def __repr__(self):
if self.is_simple_atom():
return "Atom(%s)" % super(Atom, self).__repr__()
else:
return "Atom(%s)" % super(Atom, self).__repr__()
def __str__(self):
if self.is_simple_atom():
return super(Atom, self).__str__()
else:
return "'%s'"% super(Atom, self).__str__()
class Binary(bytes):
def is_visible(self):
for c in self:
if c < 32 or c > 126:
return False
return True
def __str__(self):
# b = self.hex()
# num_list = [str(int(b, 16)) for b in [b[i:i+2] for i in range(0, len(b), 2)]]
if self.is_visible():
return "<<\"%s\">>"% super(Binary, self).__str__()[2:-1]
else:
return "<<%s>>"%(",".join([str(c) for c in self]), )
class Tuple(tuple):
def __str__(self) -> str:
return "{%s}"%(",".join([str(i) for i in self]),)
class Maps(dict):
def __str__(self) -> str:
return "#{%s}"%(",".join(["%s => %s"%(k,v) for k,v in self.items()]),)
class List(list):
def __str__(self) -> str:
return "[%s]"%(",".join([str(item) for item in self]),)
# visible simple string
class ErlString(str):
def __init__(self, string) -> None:
if len(string) > 65535:
raise ValueError("string len over 65535")
for i in self:
if ord(i) > 126 or ord(i) < 32:
raise ValueError("string char must in range 32-126")
super(ErlString, self).__init__()
def __str__(self) -> str:
return '"%s"'%(super(ErlString,self).__str__(),)
class Reference(object):
def __init__(self, node, ref_id, creation):
if not isinstance(ref_id, tuple):
ref_id = tuple(ref_id)
self.node = node
self.ref_id = ref_id
self.creation = creation
def __eq__(self, other):
return isinstance(other, Reference) and self.node == other.node and self.ref_id == other.ref_id and self.creation == other.creation
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return "#Ref<%d.%s>" % (self.creation, ".".join(str(i) for i in self.ref_id))
def __repr__(self):
return "%s::%s" % (self.__str__(), self.node)
class Port(object):
def __init__(self, node, port_id, creation):
self.node = node
self.port_id = port_id
self.creation = creation
def __eq__(self, other):
return isinstance(other, Port) and self.node == other.node and self.port_id == other.port_id and self.creation == other.creation
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return "#Port<%d.%d>" % (self.creation, self.port_id)
def __repr__(self):
return "%s::%s" % (self.__str__(), self.node)
class PID(object):
def __init__(self, node, pid_id, serial, creation):
self.node = node
self.pid_id = pid_id
self.serial = serial
self.creation = creation
def __eq__(self, other):
return isinstance(other, PID) and self.node == other.node and self.pid_id == other.pid_id and self.serial == other.serial and self.creation == other.creation
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return "<%d.%d.%d>" % (self.creation, self.pid_id, self.serial)
def __repr__(self):
return "%s::%s" % (self.__str__(), self.node)
class Export(object):
def __init__(self, module, function, arity):
self.module = module
self.function = function
self.arity = arity
def __eq__(self, other):
return isinstance(other, Export) and self.module == other.module and self.function == other.function and self.arity == other.arity
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return "#Fun<%s.%s.%d>" % (self.module, self.function, self.arity)
def __repr__(self):
return self.__str__()
| 31.297297
| 165
| 0.558722
|
865a7944fb1ed7f14cdbb8498fd7083a049c7960
| 389
|
py
|
Python
|
test/categories/test_language.py
|
Augilar/qual-id
|
32c5d961fc4784a108deedde97f4632420ec6392
|
[
"MIT"
] | null | null | null |
test/categories/test_language.py
|
Augilar/qual-id
|
32c5d961fc4784a108deedde97f4632420ec6392
|
[
"MIT"
] | null | null | null |
test/categories/test_language.py
|
Augilar/qual-id
|
32c5d961fc4784a108deedde97f4632420ec6392
|
[
"MIT"
] | null | null | null |
import unittest
from qual_id.categories.language import Language
from test.utils.category_helper import CategoryHelper
class TestLanguage(unittest.TestCase):
def setUp(self):
self.language = Language()
def test__get_values__is_valid(self):
self.assertTrue(CategoryHelper.get_values_is_valid(self.language))
if __name__ == '__main__': # pragma: no cover
unittest.main()
| 24.3125
| 70
| 0.784062
|
605f9e4e3027693e11e2300b7fb4834364da7164
| 394
|
py
|
Python
|
src/deep_rlsp/model/gridworlds_feature_space.py
|
HumanCompatibleAI/deep-rlsp
|
81941693aba2aa9157ca96e96567f4e3cb95fbc3
|
[
"MIT"
] | 24
|
2021-04-17T21:32:43.000Z
|
2021-08-07T17:20:15.000Z
|
src/deep_rlsp/model/gridworlds_feature_space.py
|
HumanCompatibleAI/deep-rlsp
|
81941693aba2aa9157ca96e96567f4e3cb95fbc3
|
[
"MIT"
] | null | null | null |
src/deep_rlsp/model/gridworlds_feature_space.py
|
HumanCompatibleAI/deep-rlsp
|
81941693aba2aa9157ca96e96567f4e3cb95fbc3
|
[
"MIT"
] | 7
|
2021-04-17T21:32:48.000Z
|
2022-02-09T04:18:39.000Z
|
class GridworldsFeatureSpace:
def __init__(self, env):
self.env = env
s = self.env.init_state
f = self.env.s_to_f(s)
assert len(f.shape) == 1
self.state_size = f.shape[0]
def encoder(self, obs):
s = self.env.obs_to_s(obs)
f = self.env.s_to_f(s)
return f
def decoder(self, state):
raise NotImplementedError()
| 24.625
| 36
| 0.573604
|
cdfa1c727d142e61721c01869b5592b271cdebb1
| 22,402
|
py
|
Python
|
lib/tool_shed/util/shed_util_common.py
|
rhpvorderman/galaxy
|
178015f8eff0b0c7a59c0d6756658f6428222837
|
[
"CC-BY-3.0"
] | 47
|
2015-10-21T23:30:30.000Z
|
2022-03-09T06:51:32.000Z
|
lib/tool_shed/util/shed_util_common.py
|
rhpvorderman/galaxy
|
178015f8eff0b0c7a59c0d6756658f6428222837
|
[
"CC-BY-3.0"
] | 78
|
2019-01-18T08:12:49.000Z
|
2022-03-13T08:56:41.000Z
|
lib/tool_shed/util/shed_util_common.py
|
rhpvorderman/galaxy
|
178015f8eff0b0c7a59c0d6756658f6428222837
|
[
"CC-BY-3.0"
] | 35
|
2015-10-30T13:09:40.000Z
|
2021-05-03T23:17:46.000Z
|
import json
import logging
import os
import socket
import string
import sqlalchemy.orm.exc
from sqlalchemy import and_, false, true
import galaxy.tool_util.deps.requirements
from galaxy import util
from galaxy.tool_shed.util.shed_util_common import (
can_eliminate_repository_dependency,
can_eliminate_tool_dependency,
clean_dependency_relationships,
generate_tool_guid,
get_ctx_rev,
get_next_prior_import_or_install_required_dict_entry,
get_tool_panel_config_tool_path_install_dir,
get_user,
have_shed_tool_conf_for_install,
set_image_paths,
tool_shed_is_this_tool_shed,
)
from galaxy.util import (
checkers,
unicodify,
)
from tool_shed.util import (
basic_util,
common_util,
hg_util,
repository_util
)
log = logging.getLogger(__name__)
MAX_CONTENT_SIZE = 1048576
DATATYPES_CONFIG_FILENAME = 'datatypes_conf.xml'
REPOSITORY_DATA_MANAGER_CONFIG_FILENAME = 'data_manager_conf.xml'
new_repo_email_alert_template = """
Sharable link: ${sharable_link}
Repository name: ${repository_name}
Revision: ${revision}
Change description:
${description}
Uploaded by: ${username}
Date content uploaded: ${display_date}
${content_alert_str}
-----------------------------------------------------------------------------
This change alert was sent from the Galaxy tool shed hosted on the server
"${host}"
-----------------------------------------------------------------------------
You received this alert because you registered to receive email when
new repositories were created in the Galaxy tool shed named "${host}".
-----------------------------------------------------------------------------
"""
email_alert_template = """
Sharable link: ${sharable_link}
Repository name: ${repository_name}
Revision: ${revision}
Change description:
${description}
Changed by: ${username}
Date of change: ${display_date}
${content_alert_str}
-----------------------------------------------------------------------------
This change alert was sent from the Galaxy tool shed hosted on the server
"${host}"
-----------------------------------------------------------------------------
You received this alert because you registered to receive email whenever
changes were made to the repository named "${repository_name}".
-----------------------------------------------------------------------------
"""
contact_owner_template = """
GALAXY TOOL SHED REPOSITORY MESSAGE
------------------------
The user '${username}' sent you the following message regarding your tool shed
repository named '${repository_name}'. You can respond by sending a reply to
the user's email address: ${email}.
-----------------------------------------------------------------------------
${message}
-----------------------------------------------------------------------------
This message was sent from the Galaxy Tool Shed instance hosted on the server
'${host}'
"""
def count_repositories_in_category(app, category_id):
sa_session = app.model.session
return sa_session.query(app.model.RepositoryCategoryAssociation) \
.filter(app.model.RepositoryCategoryAssociation.table.c.category_id == app.security.decode_id(category_id)) \
.count()
def get_categories(app):
"""Get all categories from the database."""
sa_session = app.model.session
return sa_session.query(app.model.Category) \
.filter(app.model.Category.table.c.deleted == false()) \
.order_by(app.model.Category.table.c.name) \
.all()
def get_category(app, id):
"""Get a category from the database."""
sa_session = app.model.session
return sa_session.query(app.model.Category).get(app.security.decode_id(id))
def get_category_by_name(app, name):
"""Get a category from the database via name."""
sa_session = app.model.session
try:
return sa_session.query(app.model.Category).filter_by(name=name).one()
except sqlalchemy.orm.exc.NoResultFound:
return None
def get_tool_shed_repo_requirements(app, tool_shed_url, repositories=None, repo_info_dicts=None):
"""
Contact tool_shed_url for a list of requirements for a repository or a list of repositories.
Returns a list of requirements, where each requirement is a dictionary with name and version as keys.
"""
if not repositories and not repo_info_dicts:
raise Exception("Need to pass either repository or repo_info_dicts")
if repositories:
if not isinstance(repositories, list):
repositories = [repositories]
repository_params = [{'name': repository.name,
'owner': repository.owner,
'changeset_revision': repository.changeset_revision} for repository in repositories]
else:
if not isinstance(repo_info_dicts, list):
repo_info_dicts = [repo_info_dicts]
repository_params = []
for repo_info_dict in repo_info_dicts:
for name, repo_info_tuple in repo_info_dict.items():
# repo_info_tuple is a list, but keep terminology
owner = repo_info_tuple[4]
changeset_revision = repo_info_tuple[2]
repository_params.append({'name': name,
'owner': owner,
'changeset_revision': changeset_revision})
pathspec = ["api", "repositories", "get_repository_revision_install_info"]
tools = []
for params in repository_params:
response = util.url_get(tool_shed_url,
auth=app.tool_shed_registry.url_auth(tool_shed_url),
pathspec=pathspec,
params=params
)
json_response = json.loads(response)
valid_tools = json_response[1].get('valid_tools', [])
if valid_tools:
tools.extend(valid_tools)
return get_requirements_from_tools(tools)
def get_requirements_from_tools(tools):
return {tool['id']: galaxy.tool_util.deps.requirements.ToolRequirements.from_list(tool['requirements']) for tool in tools}
def get_requirements_from_repository(repository):
if not repository.includes_tools:
return {}
else:
return get_requirements_from_tools(repository.metadata.get('tools', []))
def get_repository_categories(app, id):
"""Get categories of a repository on the tool shed side from the database via id"""
sa_session = app.model.session
return sa_session.query(app.model.RepositoryCategoryAssociation) \
.filter(app.model.RepositoryCategoryAssociation.table.c.repository_id == app.security.decode_id(id))
def get_repository_file_contents(app, file_path, repository_id, is_admin=False):
"""Return the display-safe contents of a repository file for display in a browser."""
safe_str = ''
if not is_path_browsable(app, file_path, repository_id, is_admin):
log.warning('Request tries to access a file outside of the repository location. File path: %s', file_path)
return 'Invalid file path'
# Symlink targets are checked by is_path_browsable
if os.path.islink(file_path):
safe_str = f"link to: {basic_util.to_html_string(os.readlink(file_path))}"
return safe_str
elif checkers.is_gzip(file_path):
return '<br/>gzip compressed file<br/>'
elif checkers.is_bz2(file_path):
return '<br/>bz2 compressed file<br/>'
elif checkers.is_zip(file_path):
return '<br/>zip compressed file<br/>'
elif checkers.check_binary(file_path):
return '<br/>Binary file<br/>'
else:
for line in open(file_path):
safe_str = f'{safe_str}{basic_util.to_html_string(line)}'
# Stop reading after string is larger than MAX_CONTENT_SIZE.
if len(safe_str) > MAX_CONTENT_SIZE:
large_str = \
'<br/>File contents truncated because file size is larger than maximum viewing size of %s<br/>' % \
util.nice_size(MAX_CONTENT_SIZE)
safe_str = f'{safe_str}{large_str}'
break
if len(safe_str) > basic_util.MAX_DISPLAY_SIZE:
# Eliminate the middle of the file to display a file no larger than basic_util.MAX_DISPLAY_SIZE.
# This may not be ideal if the file is larger than MAX_CONTENT_SIZE.
join_by_str = \
"<br/><br/>...some text eliminated here because file size is larger than maximum viewing size of %s...<br/><br/>" % \
util.nice_size(basic_util.MAX_DISPLAY_SIZE)
safe_str = util.shrink_string_by_size(safe_str,
basic_util.MAX_DISPLAY_SIZE,
join_by=join_by_str,
left_larger=True,
beginning_on_size_error=True)
return safe_str
def get_repository_files(folder_path):
"""Return the file hierarchy of a tool shed repository."""
contents = []
for item in os.listdir(folder_path):
# Skip .hg directories
if item.startswith('.hg'):
continue
contents.append(item)
if contents:
contents.sort()
return contents
def get_repository_from_refresh_on_change(app, **kwd):
# The changeset_revision_select_field in several grids performs a refresh_on_change which sends in request parameters like
# changeset_revison_1, changeset_revision_2, etc. One of the many select fields on the grid performed the refresh_on_change,
# so we loop through all of the received values to see which value is not the repository tip. If we find it, we know the
# refresh_on_change occurred and we have the necessary repository id and change set revision to pass on.
repository_id = None
v = None
for k, v in kwd.items():
changeset_revision_str = 'changeset_revision_'
if k.startswith(changeset_revision_str):
repository_id = app.security.encode_id(int(k.lstrip(changeset_revision_str)))
repository = repository_util.get_repository_in_tool_shed(app, repository_id)
if repository.tip() != v:
return v, repository
# This should never be reached - raise an exception?
return v, None
def get_repository_type_from_tool_shed(app, tool_shed_url, name, owner):
"""
Send a request to the tool shed to retrieve the type for a repository defined by the
combination of a name and owner.
"""
tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry(app, tool_shed_url)
params = dict(name=name, owner=owner)
pathspec = ['repository', 'get_repository_type']
repository_type = util.url_get(tool_shed_url, auth=app.tool_shed_registry.url_auth(tool_shed_url), pathspec=pathspec, params=params)
return repository_type
def get_tool_dependency_definition_metadata_from_tool_shed(app, tool_shed_url, name, owner):
"""
Send a request to the tool shed to retrieve the current metadata for a
repository of type tool_dependency_definition defined by the combination
of a name and owner.
"""
tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry(app, tool_shed_url)
params = dict(name=name, owner=owner)
pathspec = ['repository', 'get_tool_dependency_definition_metadata']
metadata = util.url_get(tool_shed_url, auth=app.tool_shed_registry.url_auth(tool_shed_url), pathspec=pathspec, params=params)
return metadata
def get_tool_path_by_shed_tool_conf_filename(app, shed_tool_conf):
"""
Return the tool_path config setting for the received shed_tool_conf file by searching the tool box's in-memory list of shed_tool_confs for the
dictionary whose config_filename key has a value matching the received shed_tool_conf.
"""
for shed_tool_conf_dict in app.toolbox.dynamic_confs(include_migrated_tool_conf=True):
config_filename = shed_tool_conf_dict['config_filename']
if config_filename == shed_tool_conf:
return shed_tool_conf_dict['tool_path']
else:
file_name = basic_util.strip_path(config_filename)
if file_name == shed_tool_conf:
return shed_tool_conf_dict['tool_path']
return None
def handle_email_alerts(app, host, repository, content_alert_str='', new_repo_alert=False, admin_only=False):
"""
There are 2 complementary features that enable a tool shed user to receive email notification:
1. Within User Preferences, they can elect to receive email when the first (or first valid)
change set is produced for a new repository.
2. When viewing or managing a repository, they can check the box labeled "Receive email alerts"
which caused them to receive email alerts when updates to the repository occur. This same feature
is available on a per-repository basis on the repository grid within the tool shed.
There are currently 4 scenarios for sending email notification when a change is made to a repository:
1. An admin user elects to receive email when the first change set is produced for a new repository
from User Preferences. The change set does not have to include any valid content. This allows for
the capture of inappropriate content being uploaded to new repositories.
2. A regular user elects to receive email when the first valid change set is produced for a new repository
from User Preferences. This differs from 1 above in that the user will not receive email until a
change set that includes valid content is produced.
3. An admin user checks the "Receive email alerts" check box on the manage repository page. Since the
user is an admin user, the email will include information about both HTML and image content that was
included in the change set.
4. A regular user checks the "Receive email alerts" check box on the manage repository page. Since the
user is not an admin user, the email will not include any information about both HTML and image content
that was included in the change set.
"""
sa_session = app.model.session
repo = repository.hg_repo
sharable_link = repository_util.generate_sharable_link_for_repository_in_tool_shed(repository, changeset_revision=None)
smtp_server = app.config.smtp_server
if smtp_server and (new_repo_alert or repository.email_alerts):
# Send email alert to users that want them.
if app.config.email_from is not None:
email_from = app.config.email_from
elif host.split(':')[0] in ['localhost', '127.0.0.1', '0.0.0.0']:
email_from = f"galaxy-no-reply@{socket.getfqdn()}"
else:
email_from = f"galaxy-no-reply@{host.split(':')[0]}"
ctx = repo[repo.changelog.tip()]
username = unicodify(ctx.user())
try:
username = username.split()[0]
except Exception:
pass
# We'll use 2 template bodies because we only want to send content
# alerts to tool shed admin users.
if new_repo_alert:
template = new_repo_email_alert_template
else:
template = email_alert_template
display_date = hg_util.get_readable_ctx_date(ctx)
description = unicodify(ctx.description())
revision = f'{ctx.rev()}:{ctx}'
admin_body = string.Template(template).safe_substitute(host=host,
sharable_link=sharable_link,
repository_name=repository.name,
revision=revision,
display_date=display_date,
description=description,
username=username,
content_alert_str=content_alert_str)
body = string.Template(template).safe_substitute(host=host,
sharable_link=sharable_link,
repository_name=repository.name,
revision=revision,
display_date=display_date,
description=description,
username=username,
content_alert_str='')
admin_users = app.config.get("admin_users", "").split(",")
frm = email_from
if new_repo_alert:
subject = f"Galaxy tool shed alert for new repository named {str(repository.name)}"
subject = subject[:80]
email_alerts = []
for user in sa_session.query(app.model.User) \
.filter(and_(app.model.User.table.c.deleted == false(),
app.model.User.table.c.new_repo_alert == true())):
if admin_only:
if user.email in admin_users:
email_alerts.append(user.email)
else:
email_alerts.append(user.email)
else:
subject = f"Galaxy tool shed update alert for repository named {str(repository.name)}"
email_alerts = json.loads(repository.email_alerts)
for email in email_alerts:
to = email.strip()
# Send it
try:
if to in admin_users:
util.send_mail(frm, to, subject, admin_body, app.config)
else:
util.send_mail(frm, to, subject, body, app.config)
except Exception:
log.exception("An error occurred sending a tool shed repository update alert by email.")
def is_path_browsable(app, path, repository_id, is_admin=False):
"""
Detects whether the given path is browsable i.e. is within the
allowed repository folders. Admins can additionaly browse folders
with tool dependencies.
"""
if is_admin and is_path_within_dependency_dir(app, path):
return True
return is_path_within_repo(app, path, repository_id)
def is_path_within_dependency_dir(app, path):
"""
Detect whether the given path is within the tool_dependency_dir folder on the disk.
(Specified by the config option). Use to filter malicious symlinks targeting outside paths.
"""
allowed = False
resolved_path = os.path.realpath(path)
tool_dependency_dir = app.config.get('tool_dependency_dir', None)
if tool_dependency_dir:
dependency_path = os.path.abspath(tool_dependency_dir)
allowed = os.path.commonprefix([dependency_path, resolved_path]) == dependency_path
return allowed
def is_path_within_repo(app, path, repository_id):
"""
Detect whether the given path is within the repository folder on the disk.
Use to filter malicious symlinks targeting outside paths.
"""
repo_path = os.path.abspath(repository_util.get_repository_by_id(app, repository_id).repo_path(app))
resolved_path = os.path.realpath(path)
return os.path.commonprefix([repo_path, resolved_path]) == repo_path
def open_repository_files_folder(app, folder_path, repository_id, is_admin=False):
"""
Return a list of dictionaries, each of which contains information for a file or directory contained
within a directory in a repository file hierarchy.
"""
if not is_path_browsable(app, folder_path, repository_id, is_admin):
log.warning('Request tries to access a folder outside of the allowed locations. Folder path: %s', folder_path)
return []
try:
files_list = get_repository_files(folder_path)
except OSError as e:
if str(e).find('No such file or directory') >= 0:
# We have a repository with no contents.
return []
folder_contents = []
for filename in files_list:
is_folder = False
full_path = os.path.join(folder_path, filename)
is_link = os.path.islink(full_path)
path_is_browsable = is_path_browsable(app, full_path, repository_id)
if is_link and not path_is_browsable:
log.warning(f"Valid folder contains a symlink outside of the repository location. Link found in: {str(full_path)}")
if filename:
if os.path.isdir(full_path) and path_is_browsable:
# Append a '/' character so that our jquery dynatree will function properly.
filename = f'{filename}/'
full_path = f'{full_path}/'
is_folder = True
node = {"title": filename,
"isFolder": is_folder,
"isLazy": is_folder,
"tooltip": full_path,
"key": full_path}
folder_contents.append(node)
return folder_contents
__all__ = (
'can_eliminate_repository_dependency',
'can_eliminate_tool_dependency',
'clean_dependency_relationships',
'count_repositories_in_category',
'generate_tool_guid',
'get_categories',
'get_category',
'get_category_by_name',
'get_requirements_from_tools',
'get_requirements_from_repository',
'get_tool_shed_repo_requirements',
'get_ctx_rev',
'get_next_prior_import_or_install_required_dict_entry',
'get_repository_categories',
'get_repository_file_contents',
'get_repository_type_from_tool_shed',
'get_tool_dependency_definition_metadata_from_tool_shed',
'get_tool_panel_config_tool_path_install_dir',
'get_tool_path_by_shed_tool_conf_filename',
'get_user',
'handle_email_alerts',
'have_shed_tool_conf_for_install',
'is_path_browsable',
'is_path_within_dependency_dir',
'is_path_within_repo',
'open_repository_files_folder',
'set_image_paths',
'tool_shed_is_this_tool_shed',
)
| 44.536779
| 146
| 0.63847
|
b9698bb0bd7b0202b91cb77c1a9b33550397f8c1
| 4,602
|
py
|
Python
|
tests/test_dependency_loop.py
|
hunter-packages/fruit
|
71d9ada48f7bf1749ce2889250955404582a7c6b
|
[
"Apache-2.0"
] | 1
|
2018-08-29T11:10:35.000Z
|
2018-08-29T11:10:35.000Z
|
tests/test_dependency_loop.py
|
hunter-packages/fruit
|
71d9ada48f7bf1749ce2889250955404582a7c6b
|
[
"Apache-2.0"
] | 1
|
2018-08-29T11:29:53.000Z
|
2018-08-29T11:29:53.000Z
|
tests/test_dependency_loop.py
|
hunter-packages/fruit
|
71d9ada48f7bf1749ce2889250955404582a7c6b
|
[
"Apache-2.0"
] | 2
|
2020-10-01T04:19:30.000Z
|
2021-07-01T07:50:22.000Z
|
#!/usr/bin/env python3
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from fruit_test_common import *
COMMON_DEFINITIONS = '''
#include "test_common.h"
struct X;
struct Annotation1 {};
using XAnnot1 = fruit::Annotated<Annotation1, X>;
struct Annotation2 {};
using XAnnot2 = fruit::Annotated<Annotation2, X>;
struct Annotation3 {};
using XAnnot3 = fruit::Annotated<Annotation3, X>;
'''
@pytest.mark.parametrize('XAnnot,XConstRefAnnot,YAnnot,YConstRefAnnot', [
('X', 'const X&', 'Y', 'const Y&'),
('fruit::Annotated<Annotation1, X>', 'ANNOTATED(Annotation1, const X&)',
'fruit::Annotated<Annotation2, Y>', 'ANNOTATED(Annotation2, const Y&)')
])
def test_loop_in_autoinject(XAnnot, XConstRefAnnot, YAnnot, YConstRefAnnot):
source = '''
struct Y;
struct X {
INJECT(X(YConstRefAnnot)) {};
};
struct Y {
INJECT(Y(XConstRefAnnot)) {};
};
fruit::Component<XAnnot> mutuallyConstructibleComponent() {
return fruit::createComponent();
}
'''
expect_compile_error(
'SelfLoopError<XAnnot,YAnnot>',
'Found a loop in the dependencies',
COMMON_DEFINITIONS,
source,
locals())
@pytest.mark.parametrize('XAnnot,ConstXAnnot,XConstRefAnnot,YAnnot,YConstRefAnnot', [
('X', 'const X', 'const X&', 'Y', 'const Y&'),
('fruit::Annotated<Annotation1, X>', 'ANNOTATED(Annotation1, const X)', 'ANNOTATED(Annotation1, const X&)',
'fruit::Annotated<Annotation2, Y>', 'ANNOTATED(Annotation2, const Y&)')
])
def test_loop_in_autoinject_const(XAnnot, ConstXAnnot, XConstRefAnnot, YAnnot, YConstRefAnnot):
source = '''
struct Y;
struct X {
INJECT(X(YConstRefAnnot)) {};
};
struct Y {
INJECT(Y(XConstRefAnnot)) {};
};
fruit::Component<ConstXAnnot> mutuallyConstructibleComponent() {
return fruit::createComponent();
}
'''
expect_compile_error(
'SelfLoopError<XAnnot,YAnnot>',
'Found a loop in the dependencies',
COMMON_DEFINITIONS,
source,
locals())
def test_loop_in_register_provider():
source = '''
struct X {};
struct Y {};
fruit::Component<X> mutuallyConstructibleComponent() {
return fruit::createComponent()
.registerProvider<X(Y)>([](Y) {return X();})
.registerProvider<Y(X)>([](X) {return Y();});
}
'''
expect_compile_error(
'SelfLoopError<X,Y>',
'Found a loop in the dependencies',
COMMON_DEFINITIONS,
source,
locals())
def test_loop_in_register_provider_with_annotations():
source = '''
struct X {};
fruit::Component<fruit::Annotated<Annotation1, X>> mutuallyConstructibleComponent() {
return fruit::createComponent()
.registerProvider<fruit::Annotated<Annotation1, X>(fruit::Annotated<Annotation2, X>)>([](X x) {return x;})
.registerProvider<fruit::Annotated<Annotation2, X>(fruit::Annotated<Annotation1, X>)>([](X x) {return x;});
}
'''
expect_compile_error(
'SelfLoopError<fruit::Annotated<Annotation1, X>, fruit::Annotated<Annotation2, X>>',
'Found a loop in the dependencies',
COMMON_DEFINITIONS,
source,
locals())
def test_with_different_annotations_ok():
source = '''
struct X {};
fruit::Component<XAnnot3> getComponent() {
return fruit::createComponent()
.registerProvider<XAnnot1()>([](){return X();})
.registerProvider<XAnnot2(XAnnot1)>([](X x){return x;})
.registerProvider<XAnnot3(XAnnot2)>([](X x){return x;});
}
int main() {
fruit::Injector<XAnnot3> injector(getComponent);
injector.get<XAnnot3>();
}
'''
expect_success(
COMMON_DEFINITIONS,
source)
if __name__== '__main__':
main(__file__)
| 31.306122
| 121
| 0.61734
|
2ed3ebc3d967b37b2d87d680c31a7fde1e7ef1e8
| 1,159
|
py
|
Python
|
distributions/Generaldistribution.py
|
AmanMander123/probabilitydistributionspackage
|
a5fc5d6c0d5c3b35268bb4b0dcabb4e472a79812
|
[
"Unlicense"
] | null | null | null |
distributions/Generaldistribution.py
|
AmanMander123/probabilitydistributionspackage
|
a5fc5d6c0d5c3b35268bb4b0dcabb4e472a79812
|
[
"Unlicense"
] | null | null | null |
distributions/Generaldistribution.py
|
AmanMander123/probabilitydistributionspackage
|
a5fc5d6c0d5c3b35268bb4b0dcabb4e472a79812
|
[
"Unlicense"
] | null | null | null |
class Distribution:
def __init__(self, mu, sigma=1):
"""
Distribution class used for calculating and visualizing a probability
distribution
Attributes:
mean (float): representing the mean value of the distribution
stdev (float): representing the standard deviation of the
distribution
data_list (list of floats): data points extracted from the data
file
"""
self.mean = mu
self.stdev = sigma
self.data = []
def read_data_file(self, file_name):
"""
Function that reads in data from a txt file. txt file must contain
one number (float) per line. The numbers are stored in the data
attribute.
:param file_name: string representing the name of the txt file
containing the data points
:return: None
"""
with open(file_name) as file:
data_list = []
line = file.readline()
while line:
data_list.append(int(line))
line = file.readline()
file.close()
self.data = data_list
| 30.5
| 77
| 0.57377
|
776ca466b98a07ca51a88e537064fd2c298450a3
| 1,556
|
py
|
Python
|
src/env/data/imunizacaograph.py
|
caiobarretta/TPEL6
|
97decd3310efb7ba613702a4a8b43b6ce664b57f
|
[
"Apache-2.0"
] | null | null | null |
src/env/data/imunizacaograph.py
|
caiobarretta/TPEL6
|
97decd3310efb7ba613702a4a8b43b6ce664b57f
|
[
"Apache-2.0"
] | null | null | null |
src/env/data/imunizacaograph.py
|
caiobarretta/TPEL6
|
97decd3310efb7ba613702a4a8b43b6ce664b57f
|
[
"Apache-2.0"
] | null | null | null |
from data.helpers.datahelper import lista_dados_anos
from helpers import listhelper
from data.helpers import dataextractionhelper, csvhelper
from models.graph import Graph
"""função que monta o modelo gráfico"""
def build(UF):
serie_x = lista_dados_anos()
title = "Imunização X Mortalidade Tardia" # Título do gráfico
codigo_uf = dataextractionhelper.retorna_codigo_uf(UF)
serie_y = processa_dados_imunizacao(codigo_uf, serie_x)
serie_y2 = processa_dados_mortalidade_tardia(codigo_uf, serie_x)
grafico = Graph(title, serie_x, serie_y, serie_y2, "Anos", "Imunização", "Mortalidade Tardia", uf=UF)
return grafico
"""Retorna a lista de valores da mortalidade no respectivo código uf"""
def processa_dados_imunizacao(codigo_uf, lista_ano):
caminho_arquivo = dataextractionhelper.caminho_arquivo_imunizacoes_csv() # Retorna o caminho do arquivo csv contendo a mortalidade
dic_valores = csvhelper.processar_csv_para_dicionario_lista_de_dados(caminho_arquivo, 0)
return listhelper.filtra_converte_dicionario_para_serie(dic_valores, codigo_uf, lista_ano)
"""Retorna a lista de valores da mortalidade no respectivo código uf"""
def processa_dados_mortalidade_tardia(codigo_uf, lista_ano):
caminho_arquivo = dataextractionhelper.caminho_arquivo_mortalidade_tardia_csv() # Retorna o caminho do arquivo csv contendo a mortalidade
dic_valores = csvhelper.processar_csv_para_dicionario_lista_de_dados(caminho_arquivo, 0)
return listhelper.filtra_converte_dicionario_para_serie(dic_valores, codigo_uf, lista_ano)
| 57.62963
| 142
| 0.813625
|
af8742c426f75572f6cc4e09b5bbab22eb6cbd44
| 5,056
|
py
|
Python
|
iotronic/api/hooks.py
|
Zakaria-Ben/iotronic
|
646cd053366d4e5cd3c675eb537cda8348761abd
|
[
"Apache-2.0"
] | 3
|
2016-05-13T16:00:19.000Z
|
2021-07-18T12:01:08.000Z
|
iotronic/api/hooks.py
|
Zakaria-Ben/iotronic
|
646cd053366d4e5cd3c675eb537cda8348761abd
|
[
"Apache-2.0"
] | null | null | null |
iotronic/api/hooks.py
|
Zakaria-Ben/iotronic
|
646cd053366d4e5cd3c675eb537cda8348761abd
|
[
"Apache-2.0"
] | 1
|
2015-07-23T15:40:05.000Z
|
2015-07-23T15:40:05.000Z
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from pecan import hooks
from six.moves import http_client
from iotronic.common import context
from iotronic.common import policy
from iotronic.conductor import rpcapi
from iotronic.db import api as dbapi
LOG = log.getLogger(__name__)
CHECKED_DEPRECATED_POLICY_ARGS = False
class ConfigHook(hooks.PecanHook):
"""Attach the config object to the request so controllers can get to it."""
def before(self, state):
state.request.cfg = cfg.CONF
class DBHook(hooks.PecanHook):
"""Attach the dbapi object to the request so controllers can get to it."""
def before(self, state):
state.request.dbapi = dbapi.get_instance()
class ContextHook(hooks.PecanHook):
"""Configures a request context and attaches it to the request."""
def __init__(self, public_api_routes):
self.public_api_routes = public_api_routes
super(ContextHook, self).__init__()
def before(self, state):
headers = state.request.headers
is_public_api = state.request.environ.get(
'is_public_api', False)
ctx = context.RequestContext.from_environ(
state.request.environ,
is_public_api=is_public_api,
project_id=headers.get('X-Project-Id'),
user_id=headers.get('X-User-Id'),
)
# Do not pass any token with context for noauth mode
if cfg.CONF.auth_strategy == 'noauth':
ctx.auth_token = None
creds = ctx.to_policy_values()
is_admin = policy.check('is_admin', creds, creds)
ctx.is_admin = is_admin
state.request.context = ctx
def after(self, state):
if state.request.context == {}:
# An incorrect url path will not create RequestContext
return
# NOTE(lintan): RequestContext will generate a request_id if no one
# passing outside, so it always contain a request_id.
request_id = state.request.context.request_id
state.response.headers['Openstack-Request-Id'] = request_id
class RPCHook(hooks.PecanHook):
"""Attach the rpcapi object to the request so controllers can get to it."""
def before(self, state):
state.request.rpcapi = rpcapi.ConductorAPI()
class NoExceptionTracebackHook(hooks.PecanHook):
"""Workaround rpc.common: deserialize_remote_exception.
deserialize_remote_exception builds rpc exception traceback into error
message which is then sent to the client. Such behavior is a security
concern so this hook is aimed to cut-off traceback from the error message.
"""
# NOTE(max_lobur): 'after' hook used instead of 'on_error' because
# 'on_error' never fired for wsme+pecan pair. wsme @wsexpose decorator
# catches and handles all the errors, so 'on_error' dedicated for unhandled
# exceptions never fired.
def after(self, state):
# Omit empty body. Some errors may not have body at this level yet.
if not state.response.body:
return
# Do nothing if there is no error.
# Status codes in the range 200 (OK) to 399 (400 = BAD_REQUEST) are not
# an error.
if (http_client.OK <= state.response.status_int <
http_client.BAD_REQUEST):
return
json_body = state.response.json
# Do not remove traceback when traceback config is set
if cfg.CONF.debug_tracebacks_in_api:
return
faultstring = json_body.get('faultstring')
traceback_marker = 'Traceback (most recent call last):'
if faultstring and traceback_marker in faultstring:
# Cut-off traceback.
faultstring = faultstring.split(traceback_marker, 1)[0]
# Remove trailing newlines and spaces if any.
json_body['faultstring'] = faultstring.rstrip()
# Replace the whole json. Cannot change original one because it's
# generated on the fly.
state.response.json = json_body
class PublicUrlHook(hooks.PecanHook):
"""Attach the right public_url to the request.
Attach the right public_url to the request so resources can create
links even when the API service is behind a proxy or SSL terminator.
"""
def before(self, state):
state.request.public_url = (cfg.CONF.api.public_endpoint or
state.request.host_url)
| 34.630137
| 79
| 0.6786
|
4427f8e3c5b00cdaf23b86c33710416ae9b1a54f
| 409
|
py
|
Python
|
Data Structures and Algorithms/Edabit Algo Solutions/EASY PROBLEMS/SquareEveryDigit.py
|
akkik04/Python-DataStructures-and-Algorithms
|
8db63173218e5a9205dbb325935c71fec93b695c
|
[
"MIT"
] | 1
|
2022-01-22T18:19:07.000Z
|
2022-01-22T18:19:07.000Z
|
Data Structures and Algorithms/Edabit Algo Solutions/EASY PROBLEMS/SquareEveryDigit.py
|
akkik04/Python-DataStructures-and-Algorithms
|
8db63173218e5a9205dbb325935c71fec93b695c
|
[
"MIT"
] | null | null | null |
Data Structures and Algorithms/Edabit Algo Solutions/EASY PROBLEMS/SquareEveryDigit.py
|
akkik04/Python-DataStructures-and-Algorithms
|
8db63173218e5a9205dbb325935c71fec93b695c
|
[
"MIT"
] | null | null | null |
# SQUARE EVERY DIGIT EDABIT SOLUTION:
def square_digits(n):
# creating a string to add the squares into.
l = " "
# creating a for-loop to iterate for the string version of 'n'.
for i in str(n):
# code to add the string version of the square into the created string.
l += str(int(i) ** 2)
# returning the integer version of the string.
return int(l)
| 25.5625
| 80
| 0.613692
|
53a12fcad7e56b266b67fd42db4fce1b3858da18
| 18,557
|
py
|
Python
|
gluon/packages/dal/pydal/adapters/google.py
|
jessicadelrio/HandyHouse
|
058e8981da850790c84f990fd2a3bbcf9aa695cc
|
[
"BSD-3-Clause"
] | 2
|
2019-10-18T23:04:22.000Z
|
2019-10-24T04:03:10.000Z
|
gluon/packages/dal/pydal/adapters/google.py
|
jessicadelrio/HandyHouse
|
058e8981da850790c84f990fd2a3bbcf9aa695cc
|
[
"BSD-3-Clause"
] | 1
|
2021-06-01T22:32:25.000Z
|
2021-06-01T22:32:25.000Z
|
gluon/packages/dal/pydal/adapters/google.py
|
jessicadelrio/HandyHouse
|
058e8981da850790c84f990fd2a3bbcf9aa695cc
|
[
"BSD-3-Clause"
] | 3
|
2018-12-08T23:59:17.000Z
|
2019-02-13T23:04:38.000Z
|
import os
import re
from .._compat import pjoin
from .._globals import THREAD_LOCAL
from .._gae import gae, ndb, rdbms, namespace_manager, classobj, NDBPolyModel
from ..migrator import InDBMigrator
from ..helpers.classes import FakeDriver, SQLCustomType, SQLALL, Reference
from ..helpers.gae import NDBDecimalProperty
from ..helpers.methods import use_common_filters, xorify
from ..objects import Table, Field, Expression, Query
from .base import NoSQLAdapter
from .mysql import MySQL
from .postgres import PostgrePsyco
from . import adapters, with_connection_or_raise
class GoogleMigratorMixin(object):
migrator_cls = InDBMigrator
@adapters.register_for('google:sql')
class GoogleSQL(GoogleMigratorMixin, MySQL):
uploads_in_blob = True
REGEX_URI = re.compile('^(?P<instance>.*)/(?P<db>.*)$')
def _find_work_folder(self):
super(GoogleSQL, self)._find_work_folder()
if os.path.isabs(self.folder) and self.folder.startswith(os.getcwd()):
self.folder = os.path.relpath(self.folder, os.getcwd())
def _initialize_(self, do_connect):
super(GoogleSQL, self)._initialize_(do_connect)
self.folder = self.folder or pjoin(
'$HOME', THREAD_LOCAL._pydal_folder_.split(
os.sep+'applications'+os.sep, 1)[1])
ruri = self.uri.split('://', 1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL")
self.driver_args['instance'] = self.credential_decoder(
m.group('instance'))
self.dbstring = self.credential_decoder(m.group('db'))
self.createdb = self.adapter_args.get('createdb', True)
if not self.createdb:
self.driver_args['database'] = self.dbstring
def find_driver(self):
self.driver = "google"
def connector(self):
return rdbms.connect(**self.driver_args)
def after_connection(self):
if self.createdb:
self.execute('CREATE DATABASE IF NOT EXISTS %s' % self.dbstring)
self.execute('USE %s' % self.dbstring)
self.execute("SET FOREIGN_KEY_CHECKS=1;")
self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
@with_connection_or_raise
def execute(self, *args, **kwargs):
command = self.filter_sql_command(args[0]).decode('utf8')
handlers = self._build_handlers_for_execution()
for handler in handlers:
handler.before_execute(command)
rv = self.cursor.execute(command, *args[1:], **kwargs)
for handler in handlers:
handler.after_execute(command)
return rv
def clear_cache(self):
ndb.get_context().clear_cache()
def ignore_cache_for(self, entities=None):
entities = entities or []
ndb.get_context().set_cache_policy(
lambda key: key.kind() not in entities)
# based on this: https://cloud.google.com/appengine/docs/standard/python/cloud-sql/
@adapters.register_for('google:MySQLdb')
class GoogleMySQL(GoogleMigratorMixin, MySQL):
uploads_in_blob = True
drivers = ('MySQLdb',)
def _find_work_folder(self):
super(GoogleMySQL, self)._find_work_folder()
if os.path.isabs(self.folder) and self.folder.startswith(os.getcwd()):
self.folder = os.path.relpath(self.folder, os.getcwd())
def clear_cache(self):
ndb.get_context().clear_cache()
def ignore_cache_for(self, entities=None):
entities = entities or []
ndb.get_context().set_cache_policy(
lambda key: key.kind() not in entities)
def after_connection(self):
self.execute("SET FOREIGN_KEY_CHECKS=1;")
self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES,TRADITIONAL';")
@adapters.register_for('google:psycopg2')
class GooglePostgres(GoogleMigratorMixin, PostgrePsyco):
uploads_in_blob = True
drivers = ('psycopg2',)
def _find_work_folder(self):
super(GooglePostgres, self)._find_work_folder()
if os.path.isabs(self.folder) and self.folder.startswith(os.getcwd()):
self.folder = os.path.relpath(self.folder, os.getcwd())
def clear_cache(self):
ndb.get_context().clear_cache()
def ignore_cache_for(self, entities=None):
entities = entities or []
ndb.get_context().set_cache_policy(
lambda key: key.kind() not in entities)
@adapters.register_for('google:datastore', 'google:datastore+ndb')
class GoogleDatastore(NoSQLAdapter):
dbengine = "google:datastore"
REGEX_NAMESPACE = re.compile('.*://(?P<namespace>.+)')
def _initialize_(self, do_connect):
super(GoogleDatastore, self)._initialize_(do_connect)
match = self.REGEX_NAMESPACE.match(self.uri)
if match:
namespace_manager.set_namespace(match.group('namespace'))
self.ndb_settings = self.adapter_args.get('ndb_settings')
def find_driver(self):
pass
def connector(self):
return FakeDriver()
def create_table(self, table, migrate=True, fake_migrate=False,
polymodel=None):
myfields = {}
for field in table:
if isinstance(polymodel, Table) and \
field.name in polymodel.fields():
continue
attr = {}
if isinstance(field.custom_qualifier, dict):
#this is custom properties to add to the GAE field declartion
attr = field.custom_qualifier
field_type = field.type
if isinstance(field_type, SQLCustomType):
ftype = self.types[
field_type.native or field_type.type](**attr)
elif isinstance(field_type, ndb.Property):
ftype = field_type
elif field_type.startswith('id'):
continue
elif field_type.startswith('decimal'):
precision, scale = field_type[7:].strip('()').split(',')
precision = int(precision)
scale = int(scale)
dec_cls = NDBDecimalProperty
ftype = dec_cls(precision, scale, **attr)
elif field_type.startswith('reference'):
if field.notnull:
attr = dict(required=True)
ftype = self.types[field_type[:9]](**attr)
elif field_type.startswith('list:reference'):
if field.notnull:
attr['required'] = True
ftype = self.types[field_type[:14]](**attr)
elif field_type.startswith('list:'):
ftype = self.types[field_type](**attr)
elif field_type not in self.types or not self.types[field_type]:
raise SyntaxError('Field: unknown field type: %s' % field_type)
else:
ftype = self.types[field_type](**attr)
myfields[field.name] = ftype
if not polymodel:
model_cls = ndb.Model
table._tableobj = classobj(
table._tablename, (model_cls, ), myfields)
# Set NDB caching variables
if self.ndb_settings and (table._tablename in self.ndb_settings):
for k, v in self.ndb_settings.iteritems():
setattr(table._tableobj, k, v)
elif polymodel == True:
pm_cls = NDBPolyModel
table._tableobj = classobj(table._tablename, (pm_cls, ), myfields)
elif isinstance(polymodel, Table):
table._tableobj = classobj(
table._tablename, (polymodel._tableobj, ), myfields)
else:
raise SyntaxError(
"polymodel must be None, True, a table or a tablename")
return None
def _expand(self, expression, field_type=None, query_env={}):
if expression is None:
return None
elif isinstance(expression, Field):
if expression.type in ('text', 'blob', 'json'):
raise SyntaxError(
'AppEngine does not index by: %s' % expression.type)
return expression.name
elif isinstance(expression, (Expression, Query)):
if expression.second is not None:
return expression.op(expression.first, expression.second,
query_env=query_env)
elif expression.first is not None:
return expression.op(expression.first, query_env=query_env)
else:
return expression.op()
elif field_type:
return self.represent(expression, field_type)
elif isinstance(expression, (list, tuple)):
return ','.join([
self.represent(item, field_type) for item in expression])
elif hasattr(expression, "_FilterNode__name"):
# check for _FilterNode__name to avoid explicit
# import of FilterNode
return expression
else:
raise NotImplementedError
def _add_operators_to_parsed_row(self, rid, table, row):
row.gae_item = rid
lid = rid.key.id()
row.id = lid
super(GoogleDatastore, self)._add_operators_to_parsed_row(
lid, table, row)
def represent(self, obj, field_type, tablename=None):
if isinstance(obj, ndb.Key):
return obj
if field_type == 'id' and tablename:
if isinstance(obj, list):
return [
self.represent(item, field_type, tablename)
for item in obj]
elif obj is None:
return None
else:
return ndb.Key(tablename, long(obj))
if isinstance(obj, (Expression, Field)):
raise SyntaxError("non supported on GAE")
if isinstance(field_type, gae.Property):
return obj
return super(GoogleDatastore, self).represent(obj, field_type)
def truncate(self, table, mode=''):
self.db(self.id_query(table)).delete()
def select_raw(self, query, fields=None, attributes=None,
count_only=False):
db = self.db
fields = fields or []
attributes = attributes or {}
args_get = attributes.get
new_fields = []
for item in fields:
if isinstance(item, SQLALL):
new_fields += item._table
else:
new_fields.append(item)
fields = new_fields
if query:
table = self.get_table(query)
elif fields:
table = fields[0].table
query = db._adapter.id_query(fields[0].table)
else:
raise SyntaxError("Unable to determine the table")
if query:
if use_common_filters(query):
query = self.common_filter(query, [table])
#tableobj is a GAE/NDB Model class (or subclass)
tableobj = table._tableobj
filters = self.expand(query)
## DETERMINE PROJECTION
projection = None
if len(table.fields) == len(fields):
# getting all fields, not a projection query
projection = None
elif args_get('projection') == True:
projection = []
for f in fields:
if f.type in ['text', 'blob', 'json']:
raise SyntaxError(
"text and blob field types not allowed in " +
"projection queries")
else:
projection.append(f)
elif args_get('filterfields') is True:
projection = []
for f in fields:
projection.append(f)
# real projection's can't include 'id'.
# it will be added to the result later
if projection and args_get('projection') == True:
query_projection = [f.name for f in projection
if f.name != table._id.name]
else:
query_projection = None
## DONE WITH PROJECTION
cursor = args_get('reusecursor')
cursor = cursor if isinstance(cursor, str) else None
qo = ndb.QueryOptions(projection=query_projection, cursor=cursor)
if filters == None:
items = tableobj.query(default_options=qo)
elif getattr(filters, 'filter_all', None):
items = []
elif (getattr(filters, '_FilterNode__value', None) and
getattr(filters, '_FilterNode__name', None) == '__key__' and
getattr(filters, '_FilterNode__opsymbol', None) == '='):
item = ndb.Key.from_old_key(getattr(filters, '_FilterNode__value')).get()
items = [item] if item else []
else:
items = tableobj.query(filters, default_options=qo)
if count_only:
items = [len(items) if isinstance(items, list) else items.count()]
elif not isinstance(items, list):
if args_get('left', None):
raise SyntaxError('Set: no left join in appengine')
if args_get('groupby', None):
raise SyntaxError('Set: no groupby in appengine')
orderby = args_get('orderby', False)
if orderby:
if isinstance(orderby, (list, tuple)):
orderby = xorify(orderby)
if isinstance(orderby, Expression):
orderby = self.expand(orderby)
orders = orderby.split(', ')
tbl = tableobj
for order in orders:
order = str(order)
desc = order[:1] == '-'
name = order[1 if desc else 0:].split('.')[-1]
if name == 'id':
o = -tbl._key if desc else tbl._key
else:
o = -getattr(tbl, name) if desc else getattr(tbl, name)
items = items.order(o)
if args_get('limitby', None):
(lmin, lmax) = attributes['limitby']
limit = lmax-lmin
fetch_args = {'offset': lmin, 'keys_only': True}
keys, cursor, more = items.fetch_page(limit, **fetch_args)
items = ndb.get_multi(keys)
# cursor is only useful if there was a limit and we
# didn't return all results
if args_get('reusecursor'):
db['_lastcursor'] = cursor
return (items, table, projection or [f for f in table])
def select(self, query, fields, attributes):
"""
This is the GAE version of select. Some notes to consider:
- 'nativeRef' is a magical fieldname used for self references
on GAE
- optional attribute 'projection' when set to True will trigger
use of the GAE projection queries. note that there are rules for
what is accepted imposed by GAE: each field must be indexed,
projection queries cannot contain blob or text fields, and you
cannot use == and also select that same field.
see https://developers.google.com/appengine/docs/python/datastore/queries#Query_Projection
- optional attribute 'filterfields' when set to True web2py will
only parse the explicitly listed fields into the Rows object,
even though all fields are returned in the query. This can be
used to reduce memory usage in cases where true projection
queries are not usable.
- optional attribute 'reusecursor' allows use of cursor with
queries that have the limitby attribute. Set the attribute to
True for the first query, set it to the value of
db['_lastcursor'] to continue a previous query. The user must
save the cursor value between requests, and the filters must be
identical. It is up to the user to follow google's limitations:
https://developers.google.com/appengine/docs/python/datastore/queries#Query_Cursors
"""
items, table, fields = self.select_raw(query, fields, attributes)
rows = [
[
(t.name == table._id.name and item) or
(t.name == 'nativeRef' and item) or getattr(item, t.name)
for t in fields
] for item in items]
colnames = [t.longname for t in fields]
processor = attributes.get('processor', self.parse)
return processor(rows, fields, colnames, False)
def count(self, query, distinct=None, limit=None):
if distinct:
raise RuntimeError("COUNT DISTINCT not supported")
items, table, fields = self.select_raw(query, count_only=True)
return items[0]
def delete(self, table, query):
"""
This function was changed on 2010-05-04 because according to
http://code.google.com/p/googleappengine/issues/detail?id=3119
GAE no longer supports deleting more than 1000 records.
"""
items, table, fields = self.select_raw(query)
# items can be one item or a query
if not isinstance(items, list):
# use a keys_only query to ensure that this runs as a datastore
# small operations
leftitems = items.fetch(1000, keys_only=True)
counter = 0
while len(leftitems):
counter += len(leftitems)
ndb.delete_multi(leftitems)
leftitems = items.fetch(1000, keys_only=True)
else:
counter = len(items)
ndb.delete_multi([item.key for item in items])
return counter
def update(self, table, query, update_fields):
items, table, fields = self.select_raw(query)
counter = 0
for item in items:
for field, value in update_fields:
setattr(item, field.name, self.represent(value, field.type))
item.put()
counter += 1
self.db.logger.info(str(counter))
return counter
def insert(self, table, fields):
dfields = dict((f.name, self.represent(v, f.type)) for f, v in fields)
tmp = table._tableobj(**dfields)
tmp.put()
key = tmp.key
rid = Reference(key.id())
rid._table, rid._record, rid._gaekey = table, None, key
return rid
def bulk_insert(self, table, items):
parsed_items = []
for item in items:
dfields = dict(
(f.name, self.represent(v, f.type)) for f, v in item)
parsed_items.append(table._tableobj(**dfields))
return ndb.put_multi(parsed_items)
| 40.253796
| 100
| 0.592014
|
c9a0c593acbe1e50fa9ff62c2bddaab17c3b197f
| 52,110
|
py
|
Python
|
kuber/v1_20/rbac_v1alpha1.py
|
datalayer-externals/kuber
|
4d577950ce7d1be2b882fbe66827dc3d7e67b350
|
[
"MIT"
] | 1
|
2019-06-11T04:57:34.000Z
|
2019-06-11T04:57:34.000Z
|
kuber/v1_20/rbac_v1alpha1.py
|
datalayer-externals/kuber
|
4d577950ce7d1be2b882fbe66827dc3d7e67b350
|
[
"MIT"
] | 1
|
2019-05-05T22:08:13.000Z
|
2019-05-06T11:43:32.000Z
|
kuber/v1_20/rbac_v1alpha1.py
|
datalayer-externals/kuber
|
4d577950ce7d1be2b882fbe66827dc3d7e67b350
|
[
"MIT"
] | 2
|
2021-05-08T14:47:56.000Z
|
2021-10-15T21:47:04.000Z
|
import typing # noqa: F401
from kubernetes import client # noqa: F401
from kuber import kube_api as _kube_api # noqa: F401
from kuber import definitions as _kuber_definitions # noqa: F401
from kuber import _types # noqa: F401
from kuber.v1_20.meta_v1 import LabelSelector # noqa: F401
from kuber.v1_20.meta_v1 import ListMeta # noqa: F401
from kuber.v1_20.meta_v1 import ObjectMeta # noqa: F401
class AggregationRule(_kuber_definitions.Definition):
"""
AggregationRule describes how to locate ClusterRoles to
aggregate into the ClusterRole
"""
def __init__(
self,
cluster_role_selectors: typing.List["LabelSelector"] = None,
):
"""Create AggregationRule instance."""
super(AggregationRule, self).__init__(
api_version="rbac.authorization.k8s.io/v1alpha1", kind="AggregationRule"
)
self._properties = {
"clusterRoleSelectors": cluster_role_selectors
if cluster_role_selectors is not None
else [],
}
self._types = {
"clusterRoleSelectors": (list, LabelSelector),
}
@property
def cluster_role_selectors(self) -> typing.List["LabelSelector"]:
"""
ClusterRoleSelectors holds a list of selectors which will be
used to find ClusterRoles and create the rules. If any of
the selectors match, then the ClusterRole's permissions will
be added
"""
return typing.cast(
typing.List["LabelSelector"],
self._properties.get("clusterRoleSelectors"),
)
@cluster_role_selectors.setter
def cluster_role_selectors(
self, value: typing.Union[typing.List["LabelSelector"], typing.List[dict]]
):
"""
ClusterRoleSelectors holds a list of selectors which will be
used to find ClusterRoles and create the rules. If any of
the selectors match, then the ClusterRole's permissions will
be added
"""
cleaned: typing.List[LabelSelector] = []
for item in value:
if isinstance(item, dict):
item = typing.cast(
LabelSelector,
LabelSelector().from_dict(item),
)
cleaned.append(typing.cast(LabelSelector, item))
self._properties["clusterRoleSelectors"] = cleaned
def __enter__(self) -> "AggregationRule":
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
class ClusterRole(_kuber_definitions.Resource):
"""
ClusterRole is a cluster level, logical grouping of
PolicyRules that can be referenced as a unit by a
RoleBinding or ClusterRoleBinding. Deprecated in v1.17 in
favor of rbac.authorization.k8s.io/v1 ClusterRole, and will
no longer be served in v1.22.
"""
def __init__(
self,
aggregation_rule: "AggregationRule" = None,
metadata: "ObjectMeta" = None,
rules: typing.List["PolicyRule"] = None,
):
"""Create ClusterRole instance."""
super(ClusterRole, self).__init__(
api_version="rbac.authorization.k8s.io/v1alpha1", kind="ClusterRole"
)
self._properties = {
"aggregationRule": aggregation_rule
if aggregation_rule is not None
else AggregationRule(),
"metadata": metadata if metadata is not None else ObjectMeta(),
"rules": rules if rules is not None else [],
}
self._types = {
"aggregationRule": (AggregationRule, None),
"apiVersion": (str, None),
"kind": (str, None),
"metadata": (ObjectMeta, None),
"rules": (list, PolicyRule),
}
@property
def aggregation_rule(self) -> "AggregationRule":
"""
AggregationRule is an optional field that describes how to
build the Rules for this ClusterRole. If AggregationRule is
set, then the Rules are controller managed and direct
changes to Rules will be stomped by the controller.
"""
return typing.cast(
"AggregationRule",
self._properties.get("aggregationRule"),
)
@aggregation_rule.setter
def aggregation_rule(self, value: typing.Union["AggregationRule", dict]):
"""
AggregationRule is an optional field that describes how to
build the Rules for this ClusterRole. If AggregationRule is
set, then the Rules are controller managed and direct
changes to Rules will be stomped by the controller.
"""
if isinstance(value, dict):
value = typing.cast(
AggregationRule,
AggregationRule().from_dict(value),
)
self._properties["aggregationRule"] = value
@property
def metadata(self) -> "ObjectMeta":
"""
Standard object's metadata.
"""
return typing.cast(
"ObjectMeta",
self._properties.get("metadata"),
)
@metadata.setter
def metadata(self, value: typing.Union["ObjectMeta", dict]):
"""
Standard object's metadata.
"""
if isinstance(value, dict):
value = typing.cast(
ObjectMeta,
ObjectMeta().from_dict(value),
)
self._properties["metadata"] = value
@property
def rules(self) -> typing.List["PolicyRule"]:
"""
Rules holds all the PolicyRules for this ClusterRole
"""
return typing.cast(
typing.List["PolicyRule"],
self._properties.get("rules"),
)
@rules.setter
def rules(self, value: typing.Union[typing.List["PolicyRule"], typing.List[dict]]):
"""
Rules holds all the PolicyRules for this ClusterRole
"""
cleaned: typing.List[PolicyRule] = []
for item in value:
if isinstance(item, dict):
item = typing.cast(
PolicyRule,
PolicyRule().from_dict(item),
)
cleaned.append(typing.cast(PolicyRule, item))
self._properties["rules"] = cleaned
def create_resource(self, namespace: "str" = None):
"""
Creates the ClusterRole in the currently
configured Kubernetes cluster.
"""
names = ["create_namespaced_cluster_role", "create_cluster_role"]
_kube_api.execute(
action="create",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"body": self.to_dict()},
)
def replace_resource(self, namespace: "str" = None):
"""
Replaces the ClusterRole in the currently
configured Kubernetes cluster.
"""
names = ["replace_namespaced_cluster_role", "replace_cluster_role"]
_kube_api.execute(
action="replace",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"body": self.to_dict(), "name": self.metadata.name},
)
def patch_resource(self, namespace: "str" = None):
"""
Patches the ClusterRole in the currently
configured Kubernetes cluster.
"""
names = ["patch_namespaced_cluster_role", "patch_cluster_role"]
_kube_api.execute(
action="patch",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"body": self.to_dict(), "name": self.metadata.name},
)
def get_resource_status(self, namespace: "str" = None):
"""This resource does not have a status."""
pass
def read_resource(self, namespace: str = None):
"""
Reads the ClusterRole from the currently configured
Kubernetes cluster and returns the low-level definition object.
"""
names = [
"read_namespaced_cluster_role",
"read_cluster_role",
]
return _kube_api.execute(
action="read",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"name": self.metadata.name},
)
def delete_resource(
self,
namespace: str = None,
propagation_policy: str = "Foreground",
grace_period_seconds: int = 10,
):
"""
Deletes the ClusterRole from the currently configured
Kubernetes cluster.
"""
names = [
"delete_namespaced_cluster_role",
"delete_cluster_role",
]
body = client.V1DeleteOptions(
propagation_policy=propagation_policy,
grace_period_seconds=grace_period_seconds,
)
_kube_api.execute(
action="delete",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"name": self.metadata.name, "body": body},
)
@staticmethod
def get_resource_api(
api_client: client.ApiClient = None, **kwargs
) -> "client.RbacAuthorizationV1alpha1Api":
"""
Returns an instance of the kubernetes API client associated with
this object.
"""
if api_client:
kwargs["apl_client"] = api_client
return client.RbacAuthorizationV1alpha1Api(**kwargs)
def __enter__(self) -> "ClusterRole":
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
class ClusterRoleBinding(_kuber_definitions.Resource):
"""
ClusterRoleBinding references a ClusterRole, but not contain
it. It can reference a ClusterRole in the global namespace,
and adds who information via Subject. Deprecated in v1.17 in
favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding,
and will no longer be served in v1.22.
"""
def __init__(
self,
metadata: "ObjectMeta" = None,
role_ref: "RoleRef" = None,
subjects: typing.List["Subject"] = None,
):
"""Create ClusterRoleBinding instance."""
super(ClusterRoleBinding, self).__init__(
api_version="rbac.authorization.k8s.io/v1alpha1", kind="ClusterRoleBinding"
)
self._properties = {
"metadata": metadata if metadata is not None else ObjectMeta(),
"roleRef": role_ref if role_ref is not None else RoleRef(),
"subjects": subjects if subjects is not None else [],
}
self._types = {
"apiVersion": (str, None),
"kind": (str, None),
"metadata": (ObjectMeta, None),
"roleRef": (RoleRef, None),
"subjects": (list, Subject),
}
@property
def metadata(self) -> "ObjectMeta":
"""
Standard object's metadata.
"""
return typing.cast(
"ObjectMeta",
self._properties.get("metadata"),
)
@metadata.setter
def metadata(self, value: typing.Union["ObjectMeta", dict]):
"""
Standard object's metadata.
"""
if isinstance(value, dict):
value = typing.cast(
ObjectMeta,
ObjectMeta().from_dict(value),
)
self._properties["metadata"] = value
@property
def role_ref(self) -> "RoleRef":
"""
RoleRef can only reference a ClusterRole in the global
namespace. If the RoleRef cannot be resolved, the Authorizer
must return an error.
"""
return typing.cast(
"RoleRef",
self._properties.get("roleRef"),
)
@role_ref.setter
def role_ref(self, value: typing.Union["RoleRef", dict]):
"""
RoleRef can only reference a ClusterRole in the global
namespace. If the RoleRef cannot be resolved, the Authorizer
must return an error.
"""
if isinstance(value, dict):
value = typing.cast(
RoleRef,
RoleRef().from_dict(value),
)
self._properties["roleRef"] = value
@property
def subjects(self) -> typing.List["Subject"]:
"""
Subjects holds references to the objects the role applies
to.
"""
return typing.cast(
typing.List["Subject"],
self._properties.get("subjects"),
)
@subjects.setter
def subjects(self, value: typing.Union[typing.List["Subject"], typing.List[dict]]):
"""
Subjects holds references to the objects the role applies
to.
"""
cleaned: typing.List[Subject] = []
for item in value:
if isinstance(item, dict):
item = typing.cast(
Subject,
Subject().from_dict(item),
)
cleaned.append(typing.cast(Subject, item))
self._properties["subjects"] = cleaned
def create_resource(self, namespace: "str" = None):
"""
Creates the ClusterRoleBinding in the currently
configured Kubernetes cluster.
"""
names = [
"create_namespaced_cluster_role_binding",
"create_cluster_role_binding",
]
_kube_api.execute(
action="create",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"body": self.to_dict()},
)
def replace_resource(self, namespace: "str" = None):
"""
Replaces the ClusterRoleBinding in the currently
configured Kubernetes cluster.
"""
names = [
"replace_namespaced_cluster_role_binding",
"replace_cluster_role_binding",
]
_kube_api.execute(
action="replace",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"body": self.to_dict(), "name": self.metadata.name},
)
def patch_resource(self, namespace: "str" = None):
"""
Patches the ClusterRoleBinding in the currently
configured Kubernetes cluster.
"""
names = ["patch_namespaced_cluster_role_binding", "patch_cluster_role_binding"]
_kube_api.execute(
action="patch",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"body": self.to_dict(), "name": self.metadata.name},
)
def get_resource_status(self, namespace: "str" = None):
"""This resource does not have a status."""
pass
def read_resource(self, namespace: str = None):
"""
Reads the ClusterRoleBinding from the currently configured
Kubernetes cluster and returns the low-level definition object.
"""
names = [
"read_namespaced_cluster_role_binding",
"read_cluster_role_binding",
]
return _kube_api.execute(
action="read",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"name": self.metadata.name},
)
def delete_resource(
self,
namespace: str = None,
propagation_policy: str = "Foreground",
grace_period_seconds: int = 10,
):
"""
Deletes the ClusterRoleBinding from the currently configured
Kubernetes cluster.
"""
names = [
"delete_namespaced_cluster_role_binding",
"delete_cluster_role_binding",
]
body = client.V1DeleteOptions(
propagation_policy=propagation_policy,
grace_period_seconds=grace_period_seconds,
)
_kube_api.execute(
action="delete",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"name": self.metadata.name, "body": body},
)
@staticmethod
def get_resource_api(
api_client: client.ApiClient = None, **kwargs
) -> "client.RbacAuthorizationV1alpha1Api":
"""
Returns an instance of the kubernetes API client associated with
this object.
"""
if api_client:
kwargs["apl_client"] = api_client
return client.RbacAuthorizationV1alpha1Api(**kwargs)
def __enter__(self) -> "ClusterRoleBinding":
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
class ClusterRoleBindingList(_kuber_definitions.Collection):
"""
ClusterRoleBindingList is a collection of
ClusterRoleBindings. Deprecated in v1.17 in favor of
rbac.authorization.k8s.io/v1 ClusterRoleBindings, and will
no longer be served in v1.22.
"""
def __init__(
self,
items: typing.List["ClusterRoleBinding"] = None,
metadata: "ListMeta" = None,
):
"""Create ClusterRoleBindingList instance."""
super(ClusterRoleBindingList, self).__init__(
api_version="rbac.authorization.k8s.io/v1alpha1",
kind="ClusterRoleBindingList",
)
self._properties = {
"items": items if items is not None else [],
"metadata": metadata if metadata is not None else ListMeta(),
}
self._types = {
"apiVersion": (str, None),
"items": (list, ClusterRoleBinding),
"kind": (str, None),
"metadata": (ListMeta, None),
}
@property
def items(self) -> typing.List["ClusterRoleBinding"]:
"""
Items is a list of ClusterRoleBindings
"""
return typing.cast(
typing.List["ClusterRoleBinding"],
self._properties.get("items"),
)
@items.setter
def items(
self, value: typing.Union[typing.List["ClusterRoleBinding"], typing.List[dict]]
):
"""
Items is a list of ClusterRoleBindings
"""
cleaned: typing.List[ClusterRoleBinding] = []
for item in value:
if isinstance(item, dict):
item = typing.cast(
ClusterRoleBinding,
ClusterRoleBinding().from_dict(item),
)
cleaned.append(typing.cast(ClusterRoleBinding, item))
self._properties["items"] = cleaned
@property
def metadata(self) -> "ListMeta":
"""
Standard object's metadata.
"""
return typing.cast(
"ListMeta",
self._properties.get("metadata"),
)
@metadata.setter
def metadata(self, value: typing.Union["ListMeta", dict]):
"""
Standard object's metadata.
"""
if isinstance(value, dict):
value = typing.cast(
ListMeta,
ListMeta().from_dict(value),
)
self._properties["metadata"] = value
@staticmethod
def get_resource_api(
api_client: client.ApiClient = None, **kwargs
) -> "client.RbacAuthorizationV1alpha1Api":
"""
Returns an instance of the kubernetes API client associated with
this object.
"""
if api_client:
kwargs["apl_client"] = api_client
return client.RbacAuthorizationV1alpha1Api(**kwargs)
def __enter__(self) -> "ClusterRoleBindingList":
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
class ClusterRoleList(_kuber_definitions.Collection):
"""
ClusterRoleList is a collection of ClusterRoles. Deprecated
in v1.17 in favor of rbac.authorization.k8s.io/v1
ClusterRoles, and will no longer be served in v1.22.
"""
def __init__(
self,
items: typing.List["ClusterRole"] = None,
metadata: "ListMeta" = None,
):
"""Create ClusterRoleList instance."""
super(ClusterRoleList, self).__init__(
api_version="rbac.authorization.k8s.io/v1alpha1", kind="ClusterRoleList"
)
self._properties = {
"items": items if items is not None else [],
"metadata": metadata if metadata is not None else ListMeta(),
}
self._types = {
"apiVersion": (str, None),
"items": (list, ClusterRole),
"kind": (str, None),
"metadata": (ListMeta, None),
}
@property
def items(self) -> typing.List["ClusterRole"]:
"""
Items is a list of ClusterRoles
"""
return typing.cast(
typing.List["ClusterRole"],
self._properties.get("items"),
)
@items.setter
def items(self, value: typing.Union[typing.List["ClusterRole"], typing.List[dict]]):
"""
Items is a list of ClusterRoles
"""
cleaned: typing.List[ClusterRole] = []
for item in value:
if isinstance(item, dict):
item = typing.cast(
ClusterRole,
ClusterRole().from_dict(item),
)
cleaned.append(typing.cast(ClusterRole, item))
self._properties["items"] = cleaned
@property
def metadata(self) -> "ListMeta":
"""
Standard object's metadata.
"""
return typing.cast(
"ListMeta",
self._properties.get("metadata"),
)
@metadata.setter
def metadata(self, value: typing.Union["ListMeta", dict]):
"""
Standard object's metadata.
"""
if isinstance(value, dict):
value = typing.cast(
ListMeta,
ListMeta().from_dict(value),
)
self._properties["metadata"] = value
@staticmethod
def get_resource_api(
api_client: client.ApiClient = None, **kwargs
) -> "client.RbacAuthorizationV1alpha1Api":
"""
Returns an instance of the kubernetes API client associated with
this object.
"""
if api_client:
kwargs["apl_client"] = api_client
return client.RbacAuthorizationV1alpha1Api(**kwargs)
def __enter__(self) -> "ClusterRoleList":
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
class PolicyRule(_kuber_definitions.Definition):
"""
PolicyRule holds information that describes a policy rule,
but does not contain information about who the rule applies
to or which namespace the rule applies to.
"""
def __init__(
self,
api_groups: typing.List[str] = None,
non_resource_urls: typing.List[str] = None,
resource_names: typing.List[str] = None,
resources: typing.List[str] = None,
verbs: typing.List[str] = None,
):
"""Create PolicyRule instance."""
super(PolicyRule, self).__init__(
api_version="rbac.authorization.k8s.io/v1alpha1", kind="PolicyRule"
)
self._properties = {
"apiGroups": api_groups if api_groups is not None else [],
"nonResourceURLs": non_resource_urls
if non_resource_urls is not None
else [],
"resourceNames": resource_names if resource_names is not None else [],
"resources": resources if resources is not None else [],
"verbs": verbs if verbs is not None else [],
}
self._types = {
"apiGroups": (list, str),
"nonResourceURLs": (list, str),
"resourceNames": (list, str),
"resources": (list, str),
"verbs": (list, str),
}
@property
def api_groups(self) -> typing.List[str]:
"""
APIGroups is the name of the APIGroup that contains the
resources. If multiple API groups are specified, any action
requested against one of the enumerated resources in any API
group will be allowed.
"""
return typing.cast(
typing.List[str],
self._properties.get("apiGroups"),
)
@api_groups.setter
def api_groups(self, value: typing.List[str]):
"""
APIGroups is the name of the APIGroup that contains the
resources. If multiple API groups are specified, any action
requested against one of the enumerated resources in any API
group will be allowed.
"""
self._properties["apiGroups"] = value
@property
def non_resource_urls(self) -> typing.List[str]:
"""
NonResourceURLs is a set of partial urls that a user should
have access to. *s are allowed, but only as the full, final
step in the path Since non-resource URLs are not namespaced,
this field is only applicable for ClusterRoles referenced
from a ClusterRoleBinding. Rules can either apply to API
resources (such as "pods" or "secrets") or non-resource URL
paths (such as "/api"), but not both.
"""
return typing.cast(
typing.List[str],
self._properties.get("nonResourceURLs"),
)
@non_resource_urls.setter
def non_resource_urls(self, value: typing.List[str]):
"""
NonResourceURLs is a set of partial urls that a user should
have access to. *s are allowed, but only as the full, final
step in the path Since non-resource URLs are not namespaced,
this field is only applicable for ClusterRoles referenced
from a ClusterRoleBinding. Rules can either apply to API
resources (such as "pods" or "secrets") or non-resource URL
paths (such as "/api"), but not both.
"""
self._properties["nonResourceURLs"] = value
@property
def resource_names(self) -> typing.List[str]:
"""
ResourceNames is an optional white list of names that the
rule applies to. An empty set means that everything is
allowed.
"""
return typing.cast(
typing.List[str],
self._properties.get("resourceNames"),
)
@resource_names.setter
def resource_names(self, value: typing.List[str]):
"""
ResourceNames is an optional white list of names that the
rule applies to. An empty set means that everything is
allowed.
"""
self._properties["resourceNames"] = value
@property
def resources(self) -> typing.List[str]:
"""
Resources is a list of resources this rule applies to.
ResourceAll represents all resources.
"""
return typing.cast(
typing.List[str],
self._properties.get("resources"),
)
@resources.setter
def resources(self, value: typing.List[str]):
"""
Resources is a list of resources this rule applies to.
ResourceAll represents all resources.
"""
self._properties["resources"] = value
@property
def verbs(self) -> typing.List[str]:
"""
Verbs is a list of Verbs that apply to ALL the ResourceKinds
and AttributeRestrictions contained in this rule. VerbAll
represents all kinds.
"""
return typing.cast(
typing.List[str],
self._properties.get("verbs"),
)
@verbs.setter
def verbs(self, value: typing.List[str]):
"""
Verbs is a list of Verbs that apply to ALL the ResourceKinds
and AttributeRestrictions contained in this rule. VerbAll
represents all kinds.
"""
self._properties["verbs"] = value
def __enter__(self) -> "PolicyRule":
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
class Role(_kuber_definitions.Resource):
"""
Role is a namespaced, logical grouping of PolicyRules that
can be referenced as a unit by a RoleBinding. Deprecated in
v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and
will no longer be served in v1.22.
"""
def __init__(
self,
metadata: "ObjectMeta" = None,
rules: typing.List["PolicyRule"] = None,
):
"""Create Role instance."""
super(Role, self).__init__(
api_version="rbac.authorization.k8s.io/v1alpha1", kind="Role"
)
self._properties = {
"metadata": metadata if metadata is not None else ObjectMeta(),
"rules": rules if rules is not None else [],
}
self._types = {
"apiVersion": (str, None),
"kind": (str, None),
"metadata": (ObjectMeta, None),
"rules": (list, PolicyRule),
}
@property
def metadata(self) -> "ObjectMeta":
"""
Standard object's metadata.
"""
return typing.cast(
"ObjectMeta",
self._properties.get("metadata"),
)
@metadata.setter
def metadata(self, value: typing.Union["ObjectMeta", dict]):
"""
Standard object's metadata.
"""
if isinstance(value, dict):
value = typing.cast(
ObjectMeta,
ObjectMeta().from_dict(value),
)
self._properties["metadata"] = value
@property
def rules(self) -> typing.List["PolicyRule"]:
"""
Rules holds all the PolicyRules for this Role
"""
return typing.cast(
typing.List["PolicyRule"],
self._properties.get("rules"),
)
@rules.setter
def rules(self, value: typing.Union[typing.List["PolicyRule"], typing.List[dict]]):
"""
Rules holds all the PolicyRules for this Role
"""
cleaned: typing.List[PolicyRule] = []
for item in value:
if isinstance(item, dict):
item = typing.cast(
PolicyRule,
PolicyRule().from_dict(item),
)
cleaned.append(typing.cast(PolicyRule, item))
self._properties["rules"] = cleaned
def create_resource(self, namespace: "str" = None):
"""
Creates the Role in the currently
configured Kubernetes cluster.
"""
names = ["create_namespaced_role", "create_role"]
_kube_api.execute(
action="create",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"body": self.to_dict()},
)
def replace_resource(self, namespace: "str" = None):
"""
Replaces the Role in the currently
configured Kubernetes cluster.
"""
names = ["replace_namespaced_role", "replace_role"]
_kube_api.execute(
action="replace",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"body": self.to_dict(), "name": self.metadata.name},
)
def patch_resource(self, namespace: "str" = None):
"""
Patches the Role in the currently
configured Kubernetes cluster.
"""
names = ["patch_namespaced_role", "patch_role"]
_kube_api.execute(
action="patch",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"body": self.to_dict(), "name": self.metadata.name},
)
def get_resource_status(self, namespace: "str" = None):
"""This resource does not have a status."""
pass
def read_resource(self, namespace: str = None):
"""
Reads the Role from the currently configured
Kubernetes cluster and returns the low-level definition object.
"""
names = [
"read_namespaced_role",
"read_role",
]
return _kube_api.execute(
action="read",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"name": self.metadata.name},
)
def delete_resource(
self,
namespace: str = None,
propagation_policy: str = "Foreground",
grace_period_seconds: int = 10,
):
"""
Deletes the Role from the currently configured
Kubernetes cluster.
"""
names = [
"delete_namespaced_role",
"delete_role",
]
body = client.V1DeleteOptions(
propagation_policy=propagation_policy,
grace_period_seconds=grace_period_seconds,
)
_kube_api.execute(
action="delete",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"name": self.metadata.name, "body": body},
)
@staticmethod
def get_resource_api(
api_client: client.ApiClient = None, **kwargs
) -> "client.RbacAuthorizationV1alpha1Api":
"""
Returns an instance of the kubernetes API client associated with
this object.
"""
if api_client:
kwargs["apl_client"] = api_client
return client.RbacAuthorizationV1alpha1Api(**kwargs)
def __enter__(self) -> "Role":
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
class RoleBinding(_kuber_definitions.Resource):
"""
RoleBinding references a role, but does not contain it. It
can reference a Role in the same namespace or a ClusterRole
in the global namespace. It adds who information via
Subjects and namespace information by which namespace it
exists in. RoleBindings in a given namespace only have
effect in that namespace. Deprecated in v1.17 in favor of
rbac.authorization.k8s.io/v1 RoleBinding, and will no longer
be served in v1.22.
"""
def __init__(
self,
metadata: "ObjectMeta" = None,
role_ref: "RoleRef" = None,
subjects: typing.List["Subject"] = None,
):
"""Create RoleBinding instance."""
super(RoleBinding, self).__init__(
api_version="rbac.authorization.k8s.io/v1alpha1", kind="RoleBinding"
)
self._properties = {
"metadata": metadata if metadata is not None else ObjectMeta(),
"roleRef": role_ref if role_ref is not None else RoleRef(),
"subjects": subjects if subjects is not None else [],
}
self._types = {
"apiVersion": (str, None),
"kind": (str, None),
"metadata": (ObjectMeta, None),
"roleRef": (RoleRef, None),
"subjects": (list, Subject),
}
@property
def metadata(self) -> "ObjectMeta":
"""
Standard object's metadata.
"""
return typing.cast(
"ObjectMeta",
self._properties.get("metadata"),
)
@metadata.setter
def metadata(self, value: typing.Union["ObjectMeta", dict]):
"""
Standard object's metadata.
"""
if isinstance(value, dict):
value = typing.cast(
ObjectMeta,
ObjectMeta().from_dict(value),
)
self._properties["metadata"] = value
@property
def role_ref(self) -> "RoleRef":
"""
RoleRef can reference a Role in the current namespace or a
ClusterRole in the global namespace. If the RoleRef cannot
be resolved, the Authorizer must return an error.
"""
return typing.cast(
"RoleRef",
self._properties.get("roleRef"),
)
@role_ref.setter
def role_ref(self, value: typing.Union["RoleRef", dict]):
"""
RoleRef can reference a Role in the current namespace or a
ClusterRole in the global namespace. If the RoleRef cannot
be resolved, the Authorizer must return an error.
"""
if isinstance(value, dict):
value = typing.cast(
RoleRef,
RoleRef().from_dict(value),
)
self._properties["roleRef"] = value
@property
def subjects(self) -> typing.List["Subject"]:
"""
Subjects holds references to the objects the role applies
to.
"""
return typing.cast(
typing.List["Subject"],
self._properties.get("subjects"),
)
@subjects.setter
def subjects(self, value: typing.Union[typing.List["Subject"], typing.List[dict]]):
"""
Subjects holds references to the objects the role applies
to.
"""
cleaned: typing.List[Subject] = []
for item in value:
if isinstance(item, dict):
item = typing.cast(
Subject,
Subject().from_dict(item),
)
cleaned.append(typing.cast(Subject, item))
self._properties["subjects"] = cleaned
def create_resource(self, namespace: "str" = None):
"""
Creates the RoleBinding in the currently
configured Kubernetes cluster.
"""
names = ["create_namespaced_role_binding", "create_role_binding"]
_kube_api.execute(
action="create",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"body": self.to_dict()},
)
def replace_resource(self, namespace: "str" = None):
"""
Replaces the RoleBinding in the currently
configured Kubernetes cluster.
"""
names = ["replace_namespaced_role_binding", "replace_role_binding"]
_kube_api.execute(
action="replace",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"body": self.to_dict(), "name": self.metadata.name},
)
def patch_resource(self, namespace: "str" = None):
"""
Patches the RoleBinding in the currently
configured Kubernetes cluster.
"""
names = ["patch_namespaced_role_binding", "patch_role_binding"]
_kube_api.execute(
action="patch",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"body": self.to_dict(), "name": self.metadata.name},
)
def get_resource_status(self, namespace: "str" = None):
"""This resource does not have a status."""
pass
def read_resource(self, namespace: str = None):
"""
Reads the RoleBinding from the currently configured
Kubernetes cluster and returns the low-level definition object.
"""
names = [
"read_namespaced_role_binding",
"read_role_binding",
]
return _kube_api.execute(
action="read",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"name": self.metadata.name},
)
def delete_resource(
self,
namespace: str = None,
propagation_policy: str = "Foreground",
grace_period_seconds: int = 10,
):
"""
Deletes the RoleBinding from the currently configured
Kubernetes cluster.
"""
names = [
"delete_namespaced_role_binding",
"delete_role_binding",
]
body = client.V1DeleteOptions(
propagation_policy=propagation_policy,
grace_period_seconds=grace_period_seconds,
)
_kube_api.execute(
action="delete",
resource=self,
names=names,
namespace=namespace,
api_client=None,
api_args={"name": self.metadata.name, "body": body},
)
@staticmethod
def get_resource_api(
api_client: client.ApiClient = None, **kwargs
) -> "client.RbacAuthorizationV1alpha1Api":
"""
Returns an instance of the kubernetes API client associated with
this object.
"""
if api_client:
kwargs["apl_client"] = api_client
return client.RbacAuthorizationV1alpha1Api(**kwargs)
def __enter__(self) -> "RoleBinding":
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
class RoleBindingList(_kuber_definitions.Collection):
"""
RoleBindingList is a collection of RoleBindings Deprecated
in v1.17 in favor of rbac.authorization.k8s.io/v1
RoleBindingList, and will no longer be served in v1.22.
"""
def __init__(
self,
items: typing.List["RoleBinding"] = None,
metadata: "ListMeta" = None,
):
"""Create RoleBindingList instance."""
super(RoleBindingList, self).__init__(
api_version="rbac.authorization.k8s.io/v1alpha1", kind="RoleBindingList"
)
self._properties = {
"items": items if items is not None else [],
"metadata": metadata if metadata is not None else ListMeta(),
}
self._types = {
"apiVersion": (str, None),
"items": (list, RoleBinding),
"kind": (str, None),
"metadata": (ListMeta, None),
}
@property
def items(self) -> typing.List["RoleBinding"]:
"""
Items is a list of RoleBindings
"""
return typing.cast(
typing.List["RoleBinding"],
self._properties.get("items"),
)
@items.setter
def items(self, value: typing.Union[typing.List["RoleBinding"], typing.List[dict]]):
"""
Items is a list of RoleBindings
"""
cleaned: typing.List[RoleBinding] = []
for item in value:
if isinstance(item, dict):
item = typing.cast(
RoleBinding,
RoleBinding().from_dict(item),
)
cleaned.append(typing.cast(RoleBinding, item))
self._properties["items"] = cleaned
@property
def metadata(self) -> "ListMeta":
"""
Standard object's metadata.
"""
return typing.cast(
"ListMeta",
self._properties.get("metadata"),
)
@metadata.setter
def metadata(self, value: typing.Union["ListMeta", dict]):
"""
Standard object's metadata.
"""
if isinstance(value, dict):
value = typing.cast(
ListMeta,
ListMeta().from_dict(value),
)
self._properties["metadata"] = value
@staticmethod
def get_resource_api(
api_client: client.ApiClient = None, **kwargs
) -> "client.RbacAuthorizationV1alpha1Api":
"""
Returns an instance of the kubernetes API client associated with
this object.
"""
if api_client:
kwargs["apl_client"] = api_client
return client.RbacAuthorizationV1alpha1Api(**kwargs)
def __enter__(self) -> "RoleBindingList":
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
class RoleList(_kuber_definitions.Collection):
"""
RoleList is a collection of Roles. Deprecated in v1.17 in
favor of rbac.authorization.k8s.io/v1 RoleList, and will no
longer be served in v1.22.
"""
def __init__(
self,
items: typing.List["Role"] = None,
metadata: "ListMeta" = None,
):
"""Create RoleList instance."""
super(RoleList, self).__init__(
api_version="rbac.authorization.k8s.io/v1alpha1", kind="RoleList"
)
self._properties = {
"items": items if items is not None else [],
"metadata": metadata if metadata is not None else ListMeta(),
}
self._types = {
"apiVersion": (str, None),
"items": (list, Role),
"kind": (str, None),
"metadata": (ListMeta, None),
}
@property
def items(self) -> typing.List["Role"]:
"""
Items is a list of Roles
"""
return typing.cast(
typing.List["Role"],
self._properties.get("items"),
)
@items.setter
def items(self, value: typing.Union[typing.List["Role"], typing.List[dict]]):
"""
Items is a list of Roles
"""
cleaned: typing.List[Role] = []
for item in value:
if isinstance(item, dict):
item = typing.cast(
Role,
Role().from_dict(item),
)
cleaned.append(typing.cast(Role, item))
self._properties["items"] = cleaned
@property
def metadata(self) -> "ListMeta":
"""
Standard object's metadata.
"""
return typing.cast(
"ListMeta",
self._properties.get("metadata"),
)
@metadata.setter
def metadata(self, value: typing.Union["ListMeta", dict]):
"""
Standard object's metadata.
"""
if isinstance(value, dict):
value = typing.cast(
ListMeta,
ListMeta().from_dict(value),
)
self._properties["metadata"] = value
@staticmethod
def get_resource_api(
api_client: client.ApiClient = None, **kwargs
) -> "client.RbacAuthorizationV1alpha1Api":
"""
Returns an instance of the kubernetes API client associated with
this object.
"""
if api_client:
kwargs["apl_client"] = api_client
return client.RbacAuthorizationV1alpha1Api(**kwargs)
def __enter__(self) -> "RoleList":
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
class RoleRef(_kuber_definitions.Definition):
"""
RoleRef contains information that points to the role being
used
"""
def __init__(
self,
api_group: str = None,
kind: str = None,
name: str = None,
):
"""Create RoleRef instance."""
super(RoleRef, self).__init__(
api_version="rbac.authorization.k8s.io/v1alpha1", kind="RoleRef"
)
self._properties = {
"apiGroup": api_group if api_group is not None else "",
"kind": kind if kind is not None else "",
"name": name if name is not None else "",
}
self._types = {
"apiGroup": (str, None),
"kind": (str, None),
"name": (str, None),
}
@property
def api_group(self) -> str:
"""
APIGroup is the group for the resource being referenced
"""
return typing.cast(
str,
self._properties.get("apiGroup"),
)
@api_group.setter
def api_group(self, value: str):
"""
APIGroup is the group for the resource being referenced
"""
self._properties["apiGroup"] = value
@property
def kind(self) -> str:
"""
Kind is the type of resource being referenced
"""
return typing.cast(
str,
self._properties.get("kind"),
)
@kind.setter
def kind(self, value: str):
"""
Kind is the type of resource being referenced
"""
self._properties["kind"] = value
@property
def name(self) -> str:
"""
Name is the name of resource being referenced
"""
return typing.cast(
str,
self._properties.get("name"),
)
@name.setter
def name(self, value: str):
"""
Name is the name of resource being referenced
"""
self._properties["name"] = value
def __enter__(self) -> "RoleRef":
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
class Subject(_kuber_definitions.Definition):
"""
Subject contains a reference to the object or user
identities a role binding applies to. This can either hold
a direct API object reference, or a value for non-objects
such as user and group names.
"""
def __init__(
self,
api_version: str = None,
kind: str = None,
name: str = None,
namespace: str = None,
):
"""Create Subject instance."""
super(Subject, self).__init__(
api_version="rbac.authorization.k8s.io/v1alpha1", kind="Subject"
)
self._properties = {
"apiVersion": api_version if api_version is not None else "",
"kind": kind if kind is not None else "",
"name": name if name is not None else "",
"namespace": namespace if namespace is not None else "",
}
self._types = {
"apiVersion": (str, None),
"kind": (str, None),
"name": (str, None),
"namespace": (str, None),
}
@property
def api_version(self) -> str:
"""
APIVersion holds the API group and version of the referenced
subject. Defaults to "v1" for ServiceAccount subjects.
Defaults to "rbac.authorization.k8s.io/v1alpha1" for User
and Group subjects.
"""
return typing.cast(
str,
self._properties.get("apiVersion"),
)
@api_version.setter
def api_version(self, value: str):
"""
APIVersion holds the API group and version of the referenced
subject. Defaults to "v1" for ServiceAccount subjects.
Defaults to "rbac.authorization.k8s.io/v1alpha1" for User
and Group subjects.
"""
self._properties["apiVersion"] = value
@property
def kind(self) -> str:
"""
Kind of object being referenced. Values defined by this API
group are "User", "Group", and "ServiceAccount". If the
Authorizer does not recognized the kind value, the
Authorizer should report an error.
"""
return typing.cast(
str,
self._properties.get("kind"),
)
@kind.setter
def kind(self, value: str):
"""
Kind of object being referenced. Values defined by this API
group are "User", "Group", and "ServiceAccount". If the
Authorizer does not recognized the kind value, the
Authorizer should report an error.
"""
self._properties["kind"] = value
@property
def name(self) -> str:
"""
Name of the object being referenced.
"""
return typing.cast(
str,
self._properties.get("name"),
)
@name.setter
def name(self, value: str):
"""
Name of the object being referenced.
"""
self._properties["name"] = value
@property
def namespace(self) -> str:
"""
Namespace of the referenced object. If the object kind is
non-namespace, such as "User" or "Group", and this value is
not empty the Authorizer should report an error.
"""
return typing.cast(
str,
self._properties.get("namespace"),
)
@namespace.setter
def namespace(self, value: str):
"""
Namespace of the referenced object. If the object kind is
non-namespace, such as "User" or "Group", and this value is
not empty the Authorizer should report an error.
"""
self._properties["namespace"] = value
def __enter__(self) -> "Subject":
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
| 30.779681
| 88
| 0.566878
|
a8f7e211b9b0dfb88b31b0c8ecb7c6e746972713
| 2,488
|
py
|
Python
|
doc/conf.py
|
teapot9/fand
|
1bfcbf92ad8536b926d3f6076574437ea55c7406
|
[
"MIT"
] | null | null | null |
doc/conf.py
|
teapot9/fand
|
1bfcbf92ad8536b926d3f6076574437ea55c7406
|
[
"MIT"
] | null | null | null |
doc/conf.py
|
teapot9/fand
|
1bfcbf92ad8536b926d3f6076574437ea55c7406
|
[
"MIT"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import fand
# -- Project information -----------------------------------------------------
project = fand.__name__
copyright = fand.__copyright__
author = fand.__author__
# The full version, including alpha/beta/rc tags
release = fand.__version__
# -- General configuration ---------------------------------------------------
# Use index.rst
master_doc = 'index'
# Mock external libraries not needed for doc build
autodoc_mock_imports = [
'pySMART',
'gpiozero',
'psutil',
]
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
]
# External documentations
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'psutil': ('https://psutil.readthedocs.io/en/latest', None),
'gpiozero': ('https://gpiozero.readthedocs.io/en/latest', None),
}
# Use type hints in description
autodoc_typehints = 'description'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme' # 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 30.716049
| 79
| 0.672428
|
be3bc65b5d5cf3e638f1af9d2bcf78dd185877bb
| 1,866
|
py
|
Python
|
test/hummingbot/connector/connector/uniswap/test_uniswap_in_flight_order.py
|
pecuniafinance/hummingbot
|
2cbb19c187a429d3e6000dc938617ca2a1f9f357
|
[
"Apache-2.0"
] | 1
|
2022-03-10T17:24:38.000Z
|
2022-03-10T17:24:38.000Z
|
test/hummingbot/connector/connector/uniswap/test_uniswap_in_flight_order.py
|
pecuniafinance/hummingbot
|
2cbb19c187a429d3e6000dc938617ca2a1f9f357
|
[
"Apache-2.0"
] | null | null | null |
test/hummingbot/connector/connector/uniswap/test_uniswap_in_flight_order.py
|
pecuniafinance/hummingbot
|
2cbb19c187a429d3e6000dc938617ca2a1f9f357
|
[
"Apache-2.0"
] | null | null | null |
from decimal import Decimal
from unittest import TestCase
from hummingbot.connector.connector.uniswap.uniswap_in_flight_order import UniswapInFlightOrder
from hummingbot.core.event.events import OrderType, TradeType
class UniswapInFlightOrderTests(TestCase):
def test_deserialize_order_from_json(self):
json = {
"client_order_id": "OID1",
"exchange_order_id": "EOID",
"trading_pair": "COINALPHA-HBOT",
"order_type": OrderType.LIMIT.name,
"trade_type": TradeType.BUY.name,
"price": "1000.0",
"amount": "1.0",
"executed_amount_base": "0.5",
"executed_amount_quote": "510.0",
"fee_asset": "BNB",
"fee_paid": "10.0",
"last_state": "OPEN",
"creation_timestamp": 1640001112
}
order = UniswapInFlightOrder.from_json(json)
self.assertEqual(json["client_order_id"], order.client_order_id)
self.assertEqual(json["exchange_order_id"], order.exchange_order_id)
self.assertEqual(json["trading_pair"], order.trading_pair)
self.assertEqual(OrderType.LIMIT, order.order_type)
self.assertEqual(TradeType.BUY, order.trade_type)
self.assertEqual(Decimal(json["price"]), order.price)
self.assertEqual(Decimal(json["amount"]), order.amount)
self.assertEqual(Decimal(json["executed_amount_base"]), order.executed_amount_base)
self.assertEqual(Decimal(json["executed_amount_quote"]), order.executed_amount_quote)
self.assertEqual(json["fee_asset"], order.fee_asset)
self.assertEqual(Decimal(json["fee_paid"]), order.fee_paid)
self.assertEqual(json["last_state"], order.last_state)
self.assertEqual(json["creation_timestamp"], order.creation_timestamp)
self.assertIsNone(order.gas_price)
| 43.395349
| 95
| 0.673098
|
4c533fca9a82a1f31cb7842ca1006ed423d1fe60
| 125
|
py
|
Python
|
HomeAutomation/thingUtils.py
|
huvermann/MyPiHomeAutomation
|
dcda589e82456bb34d3bbbfdcb45ec1066f3d2f8
|
[
"MIT"
] | null | null | null |
HomeAutomation/thingUtils.py
|
huvermann/MyPiHomeAutomation
|
dcda589e82456bb34d3bbbfdcb45ec1066f3d2f8
|
[
"MIT"
] | null | null | null |
HomeAutomation/thingUtils.py
|
huvermann/MyPiHomeAutomation
|
dcda589e82456bb34d3bbbfdcb45ec1066f3d2f8
|
[
"MIT"
] | null | null | null |
import platform
def is_windows():
"""Returns true if current platform is windows"""
return any(platform.win32_ver())
| 25
| 53
| 0.72
|
b7d79805286b3686af8384046ec7bfac7369f0c8
| 3,446
|
py
|
Python
|
semantic_segmentation/src/models/decoders/segmentor_head.py
|
no-name-xiaosheng/PaddleViT
|
50226a3be5095b3727d3c62d2eab23ef1e9612ec
|
[
"Apache-2.0"
] | 2
|
2021-11-23T02:01:52.000Z
|
2021-11-23T02:02:03.000Z
|
semantic_segmentation/src/models/decoders/segmentor_head.py
|
Dongsheng-Bi/PaddleViT
|
c90a6c8dc3787e69cef3a37b9a260bd59eeff1f7
|
[
"Apache-2.0"
] | null | null | null |
semantic_segmentation/src/models/decoders/segmentor_head.py
|
Dongsheng-Bi/PaddleViT
|
c90a6c8dc3787e69cef3a37b9a260bd59eeff1f7
|
[
"Apache-2.0"
] | null | null | null |
import copy
import paddle
import paddle.nn as nn
from src.models.backbones.vit import EncoderLayer
class MaskTransformer(nn.Layer):
"""
Segmenter decoder use transformer as decoder for segmentation,
performs better than the linear layer.
the decoder has the same embedding dimensions as the encoder
Attributes:
layers: nn.LayerList contains multiple EncoderLayers
mask_tokens: several tokens added for segmentation, each for a certain class.
"""
def __init__(self, config):
super().__init__()
hidden_size = config.MODEL.TRANS.HIDDEN_SIZE
self.feature_size = (config.DATA.CROP_SIZE[0] // config.MODEL.TRANS.PATCH_SIZE,
config.DATA.CROP_SIZE[1] // config.MODEL.TRANS.PATCH_SIZE)
self.cls_num = config.DATA.NUM_CLASSES
self.layers = nn.LayerList([
copy.deepcopy(EncoderLayer(config)) for _ in range(config.MODEL.SEGMENTER.NUM_LAYERS)])
self.mask_tokens = self.create_parameter(shape=(1, self.cls_num, hidden_size))
self.proj_decoder = nn.Linear(hidden_size, hidden_size)
weight_attr_patch = paddle.ParamAttr(
initializer=nn.initializer.Normal(std=hidden_size ** -0.5)
)
self.proj_patch = nn.Linear(
hidden_size,
hidden_size,
weight_attr=weight_attr_patch,
bias_attr=False
)
weight_attr_class = paddle.ParamAttr(
initializer=nn.initializer.Normal(std=hidden_size ** -0.5)
)
self.proj_class = nn.Linear(
hidden_size,
hidden_size,
weight_attr=weight_attr_class,
bias_attr=False
)
self.decoder_norm = nn.LayerNorm(hidden_size)
self.mask_norm = nn.LayerNorm(self.cls_num)
def forward(self, x):
H, W = self.feature_size
x = self.proj_decoder(x)
mask_tokens = self.mask_tokens.expand((x.shape[0], -1, -1))
x = paddle.concat([x, mask_tokens], axis=1)
for layer in self.layers:
x, _ = layer(x)
x = self.decoder_norm(x)
patches, masks = x[:, :-self.cls_num], x[:, -self.cls_num:]
patches = self.proj_patch(patches)
masks = self.proj_class(masks)
patches = patches / paddle.norm(patches, axis=-1, keepdim=True)
masks = masks / paddle.norm(masks, axis=-1, keepdim=True)
masks = patches @ masks.transpose((0, 2, 1))
masks = self.mask_norm(masks)
#[b, (h w), n] -> [b, n, h, w]
masks = masks.reshape((masks.shape[0], H, W, masks.shape[-1]))
masks = masks.transpose((0, 3, 1, 2))
return masks
class LinearDecoder(nn.Layer):
"""
simple linear decoder with only one linear layer and the step to
resize the one-dimensional vectors to two-dimensional masks.
"""
def __init__(self, config):
super().__init__()
self.feature_size = (config.DATA.CROP_SIZE[0] // config.MODEL.TRANS.PATCH_SIZE,
config.DATA.CROP_SIZE[1] // config.MODEL.TRANS.PATCH_SIZE)
self.head = nn.Linear(config.MODEL.TRANS.HIDDEN_SIZE, config.DATA.NUM_CLASSES)
def forward(self, x):
H, W = self.feature_size
masks = self.head(x)
#[b, (h w), n] -> [b, n, h, w]
masks = masks.reshape((masks.shape[0], H, W, masks.shape[-1]))
masks = masks.transpose((0, 3, 1, 2))
return masks
| 37.868132
| 99
| 0.616657
|
eafc9eb874c1690ab6ac6f680820f7174f5f0450
| 34,647
|
py
|
Python
|
dask/optimization.py
|
jsargsyan/dask
|
71113eacbf83437f59984d053c10e52f6ce97f70
|
[
"BSD-3-Clause"
] | null | null | null |
dask/optimization.py
|
jsargsyan/dask
|
71113eacbf83437f59984d053c10e52f6ce97f70
|
[
"BSD-3-Clause"
] | null | null | null |
dask/optimization.py
|
jsargsyan/dask
|
71113eacbf83437f59984d053c10e52f6ce97f70
|
[
"BSD-3-Clause"
] | null | null | null |
import math
import numbers
import re
from . import config, core
from .core import (
istask,
get_dependencies,
subs,
toposort,
flatten,
reverse_dict,
ishashable,
)
from .utils_test import add, inc # noqa: F401
def cull(dsk, keys):
""" Return new dask with only the tasks required to calculate keys.
In other words, remove unnecessary tasks from dask.
``keys`` may be a single key or list of keys.
Examples
--------
>>> d = {'x': 1, 'y': (inc, 'x'), 'out': (add, 'x', 10)}
>>> dsk, dependencies = cull(d, 'out') # doctest: +SKIP
>>> dsk # doctest: +SKIP
{'x': 1, 'out': (add, 'x', 10)}
>>> dependencies # doctest: +SKIP
{'x': set(), 'out': set(['x'])}
Returns
-------
dsk: culled dask graph
dependencies: Dict mapping {key: [deps]}. Useful side effect to accelerate
other optimizations, notably fuse.
"""
if not isinstance(keys, (list, set)):
keys = [keys]
out_keys = []
seen = set()
dependencies = dict()
work = list(set(flatten(keys)))
while work:
new_work = []
out_keys += work
deps = [
(k, get_dependencies(dsk, k, as_list=True)) # fuse needs lists
for k in work
]
dependencies.update(deps)
for _, deplist in deps:
for d in deplist:
if d not in seen:
seen.add(d)
new_work.append(d)
work = new_work
out = {k: dsk[k] for k in out_keys}
return out, dependencies
def default_fused_linear_keys_renamer(keys):
"""Create new keys for fused tasks"""
typ = type(keys[0])
if typ is str:
names = [key_split(x) for x in keys[:0:-1]]
names.append(keys[0])
return "-".join(names)
elif typ is tuple and len(keys[0]) > 0 and isinstance(keys[0][0], str):
names = [key_split(x) for x in keys[:0:-1]]
names.append(keys[0][0])
return ("-".join(names),) + keys[0][1:]
else:
return None
def fuse_linear(dsk, keys=None, dependencies=None, rename_keys=True):
""" Return new dask graph with linear sequence of tasks fused together.
If specified, the keys in ``keys`` keyword argument are *not* fused.
Supply ``dependencies`` from output of ``cull`` if available to avoid
recomputing dependencies.
**This function is mostly superseded by ``fuse``**
Parameters
----------
dsk: dict
keys: list
dependencies: dict, optional
{key: [list-of-keys]}. Must be a list to provide count of each key
This optional input often comes from ``cull``
rename_keys: bool or func, optional
Whether to rename fused keys with ``default_fused_linear_keys_renamer``
or not. Renaming fused keys can keep the graph more understandable
and comprehensive, but it comes at the cost of additional processing.
If False, then the top-most key will be used. For advanced usage, a
func is also accepted, ``new_key = rename_keys(fused_key_list)``.
Examples
--------
>>> d = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}
>>> dsk, dependencies = fuse(d)
>>> dsk # doctest: +SKIP
{'a-b-c': (inc, (inc, 1)), 'c': 'a-b-c'}
>>> dsk, dependencies = fuse(d, rename_keys=False)
>>> dsk # doctest: +SKIP
{'c': (inc, (inc, 1))}
>>> dsk, dependencies = fuse(d, keys=['b'], rename_keys=False)
>>> dsk # doctest: +SKIP
{'b': (inc, 1), 'c': (inc, 'b')}
Returns
-------
dsk: output graph with keys fused
dependencies: dict mapping dependencies after fusion. Useful side effect
to accelerate other downstream optimizations.
"""
if keys is not None and not isinstance(keys, set):
if not isinstance(keys, list):
keys = [keys]
keys = set(flatten(keys))
if dependencies is None:
dependencies = {k: get_dependencies(dsk, k, as_list=True) for k in dsk}
# locate all members of linear chains
child2parent = {}
unfusible = set()
for parent in dsk:
deps = dependencies[parent]
has_many_children = len(deps) > 1
for child in deps:
if keys is not None and child in keys:
unfusible.add(child)
elif child in child2parent:
del child2parent[child]
unfusible.add(child)
elif has_many_children:
unfusible.add(child)
elif child not in unfusible:
child2parent[child] = parent
# construct the chains from ancestor to descendant
chains = []
parent2child = dict(map(reversed, child2parent.items()))
while child2parent:
child, parent = child2parent.popitem()
chain = [child, parent]
while parent in child2parent:
parent = child2parent.pop(parent)
del parent2child[parent]
chain.append(parent)
chain.reverse()
while child in parent2child:
child = parent2child.pop(child)
del child2parent[child]
chain.append(child)
chains.append(chain)
dependencies = {k: set(v) for k, v in dependencies.items()}
if rename_keys is True:
key_renamer = default_fused_linear_keys_renamer
elif rename_keys is False:
key_renamer = None
else:
key_renamer = rename_keys
# create a new dask with fused chains
rv = {}
fused = set()
aliases = set()
is_renamed = False
for chain in chains:
if key_renamer is not None:
new_key = key_renamer(chain)
is_renamed = (
new_key is not None and new_key not in dsk and new_key not in rv
)
child = chain.pop()
val = dsk[child]
while chain:
parent = chain.pop()
dependencies[parent].update(dependencies.pop(child))
dependencies[parent].remove(child)
val = subs(dsk[parent], child, val)
fused.add(child)
child = parent
fused.add(child)
if is_renamed:
rv[new_key] = val
rv[child] = new_key
dependencies[new_key] = dependencies[child]
dependencies[child] = {new_key}
aliases.add(child)
else:
rv[child] = val
for key, val in dsk.items():
if key not in fused:
rv[key] = val
if aliases:
for key, deps in dependencies.items():
for old_key in deps & aliases:
new_key = rv[old_key]
deps.remove(old_key)
deps.add(new_key)
rv[key] = subs(rv[key], old_key, new_key)
if keys is not None:
for key in aliases - keys:
del rv[key]
del dependencies[key]
return rv, dependencies
def _flat_set(x):
if x is None:
return set()
elif isinstance(x, set):
return x
elif not isinstance(x, (list, set)):
x = [x]
return set(x)
def inline(dsk, keys=None, inline_constants=True, dependencies=None):
""" Return new dask with the given keys inlined with their values.
Inlines all constants if ``inline_constants`` keyword is True. Note that
the constant keys will remain in the graph, to remove them follow
``inline`` with ``cull``.
Examples
--------
>>> d = {'x': 1, 'y': (inc, 'x'), 'z': (add, 'x', 'y')}
>>> inline(d) # doctest: +SKIP
{'x': 1, 'y': (inc, 1), 'z': (add, 1, 'y')}
>>> inline(d, keys='y') # doctest: +SKIP
{'x': 1, 'y': (inc, 1), 'z': (add, 1, (inc, 1))}
>>> inline(d, keys='y', inline_constants=False) # doctest: +SKIP
{'x': 1, 'y': (inc, 1), 'z': (add, 'x', (inc, 'x'))}
"""
if dependencies and isinstance(next(iter(dependencies.values())), list):
dependencies = {k: set(v) for k, v in dependencies.items()}
keys = _flat_set(keys)
if dependencies is None:
dependencies = {k: get_dependencies(dsk, k) for k in dsk}
if inline_constants:
keys.update(
k
for k, v in dsk.items()
if (ishashable(v) and v in dsk) or (not dependencies[k] and not istask(v))
)
# Keys may depend on other keys, so determine replace order with toposort.
# The values stored in `keysubs` do not include other keys.
replaceorder = toposort(
dict((k, dsk[k]) for k in keys if k in dsk), dependencies=dependencies
)
keysubs = {}
for key in replaceorder:
val = dsk[key]
for dep in keys & dependencies[key]:
if dep in keysubs:
replace = keysubs[dep]
else:
replace = dsk[dep]
val = subs(val, dep, replace)
keysubs[key] = val
# Make new dask with substitutions
dsk2 = keysubs.copy()
for key, val in dsk.items():
if key not in dsk2:
for item in keys & dependencies[key]:
val = subs(val, item, keysubs[item])
dsk2[key] = val
return dsk2
def inline_functions(
dsk, output, fast_functions=None, inline_constants=False, dependencies=None
):
""" Inline cheap functions into larger operations
Examples
--------
>>> dsk = {'out': (add, 'i', 'd'), # doctest: +SKIP
... 'i': (inc, 'x'),
... 'd': (double, 'y'),
... 'x': 1, 'y': 1}
>>> inline_functions(dsk, [], [inc]) # doctest: +SKIP
{'out': (add, (inc, 'x'), 'd'),
'd': (double, 'y'),
'x': 1, 'y': 1}
Protect output keys. In the example below ``i`` is not inlined because it
is marked as an output key.
>>> inline_functions(dsk, ['i', 'out'], [inc, double]) # doctest: +SKIP
{'out': (add, 'i', (double, 'y')),
'i': (inc, 'x'),
'x': 1, 'y': 1}
"""
if not fast_functions:
return dsk
output = set(output)
fast_functions = set(fast_functions)
if dependencies is None:
dependencies = {k: get_dependencies(dsk, k) for k in dsk}
dependents = reverse_dict(dependencies)
def inlinable(v):
try:
return functions_of(v).issubset(fast_functions)
except TypeError:
return False
keys = [
k
for k, v in dsk.items()
if istask(v) and dependents[k] and k not in output and inlinable(v)
]
if keys:
dsk = inline(
dsk, keys, inline_constants=inline_constants, dependencies=dependencies
)
for k in keys:
del dsk[k]
return dsk
def unwrap_partial(func):
while hasattr(func, "func"):
func = func.func
return func
def functions_of(task):
""" Set of functions contained within nested task
Examples
--------
>>> task = (add, (mul, 1, 2), (inc, 3)) # doctest: +SKIP
>>> functions_of(task) # doctest: +SKIP
set([add, mul, inc])
"""
funcs = set()
work = [task]
sequence_types = {list, tuple}
while work:
new_work = []
for task in work:
if type(task) in sequence_types:
if istask(task):
funcs.add(unwrap_partial(task[0]))
new_work += task[1:]
else:
new_work += task
work = new_work
return funcs
def default_fused_keys_renamer(keys, max_fused_key_length=120):
"""Create new keys for ``fuse`` tasks.
The optional parameter `max_fused_key_length` is used to limit the maximum string length for each renamed key.
If this parameter is set to `None`, there is no limit.
"""
it = reversed(keys)
first_key = next(it)
typ = type(first_key)
if max_fused_key_length: # Take into account size of hash suffix
max_fused_key_length -= 5
def _enforce_max_key_limit(key_name):
if max_fused_key_length and len(key_name) > max_fused_key_length:
name_hash = f"{hash(key_name):x}"[:4]
key_name = f"{key_name[:max_fused_key_length]}-{name_hash}"
return key_name
if typ is str:
first_name = key_split(first_key)
names = {key_split(k) for k in it}
names.discard(first_name)
names = sorted(names)
names.append(first_key)
concatenated_name = "-".join(names)
return _enforce_max_key_limit(concatenated_name)
elif typ is tuple and len(first_key) > 0 and isinstance(first_key[0], str):
first_name = key_split(first_key)
names = {key_split(k) for k in it}
names.discard(first_name)
names = sorted(names)
names.append(first_key[0])
concatenated_name = "-".join(names)
return (_enforce_max_key_limit(concatenated_name),) + first_key[1:]
def fuse(
dsk,
keys=None,
dependencies=None,
ave_width=None,
max_width=None,
max_height=None,
max_depth_new_edges=None,
rename_keys=None,
fuse_subgraphs=None,
):
""" Fuse tasks that form reductions; more advanced than ``fuse_linear``
This trades parallelism opportunities for faster scheduling by making tasks
less granular. It can replace ``fuse_linear`` in optimization passes.
This optimization applies to all reductions--tasks that have at most one
dependent--so it may be viewed as fusing "multiple input, single output"
groups of tasks into a single task. There are many parameters to fine
tune the behavior, which are described below. ``ave_width`` is the
natural parameter with which to compare parallelism to granularity, so
it should always be specified. Reasonable values for other parameters
will be determined using ``ave_width`` if necessary.
Parameters
----------
dsk: dict
dask graph
keys: list or set, optional
Keys that must remain in the returned dask graph
dependencies: dict, optional
{key: [list-of-keys]}. Must be a list to provide count of each key
This optional input often comes from ``cull``
ave_width: float (default 2)
Upper limit for ``width = num_nodes / height``, a good measure of
parallelizability
max_width: int
Don't fuse if total width is greater than this
max_height: int
Don't fuse more than this many levels
max_depth_new_edges: int
Don't fuse if new dependencies are added after this many levels
rename_keys: bool or func, optional
Whether to rename the fused keys with ``default_fused_keys_renamer``
or not. Renaming fused keys can keep the graph more understandable
and comprehensive, but it comes at the cost of additional processing.
If False, then the top-most key will be used. For advanced usage, a
function to create the new name is also accepted.
fuse_subgraphs : bool, optional
Whether to fuse multiple tasks into ``SubgraphCallable`` objects.
Returns
-------
dsk: output graph with keys fused
dependencies: dict mapping dependencies after fusion. Useful side effect
to accelerate other downstream optimizations.
"""
if not config.get("optimization.fuse.active", True):
return dsk, dependencies
if keys is not None and not isinstance(keys, set):
if not isinstance(keys, list):
keys = [keys]
keys = set(flatten(keys))
# Assign reasonable, not too restrictive defaults
if ave_width is None:
ave_width = config.get("optimization.fuse.ave-width", 1)
if max_height is None:
max_height = config.get("optimization.fuse.max-height", None) or len(dsk)
max_depth_new_edges = (
max_depth_new_edges
or config.get("optimization.fuse.max-depth-new-edges", None)
or ave_width * 1.5
)
max_width = (
max_width
or config.get("optimization.fuse.max-width", None)
or 1.5 + ave_width * math.log(ave_width + 1)
)
fuse_subgraphs = fuse_subgraphs or config.get("optimization.fuse.subgraphs", False)
if not ave_width or not max_height:
return dsk, dependencies
if rename_keys is None:
rename_keys = config.get("optimization.fuse.rename-keys", True)
if rename_keys is True:
key_renamer = default_fused_keys_renamer
elif rename_keys is False:
key_renamer = None
else:
key_renamer = rename_keys
rename_keys = key_renamer is not None
if dependencies is None:
deps = {k: get_dependencies(dsk, k, as_list=True) for k in dsk}
else:
deps = dict(dependencies)
rdeps = {}
for k, vals in deps.items():
for v in vals:
if v not in rdeps:
rdeps[v] = [k]
else:
rdeps[v].append(k)
deps[k] = set(vals)
reducible = {k for k, vals in rdeps.items() if len(vals) == 1}
if keys:
reducible -= keys
for k, v in dsk.items():
if type(v) is not tuple and not isinstance(v, (numbers.Number, str)):
reducible.discard(k)
if not reducible and (
not fuse_subgraphs or all(len(set(v)) != 1 for v in rdeps.values())
):
# Quick return if there's nothing to do. Only progress if there's tasks
# fusible by the main `fuse`, or by `fuse_subgraphs` if enabled.
return dsk, deps
rv = dsk.copy()
fused_trees = {}
# These are the stacks we use to store data as we traverse the graph
info_stack = []
children_stack = []
# For speed
deps_pop = deps.pop
reducible_add = reducible.add
reducible_pop = reducible.pop
reducible_remove = reducible.remove
fused_trees_pop = fused_trees.pop
info_stack_append = info_stack.append
info_stack_pop = info_stack.pop
children_stack_append = children_stack.append
children_stack_extend = children_stack.extend
children_stack_pop = children_stack.pop
while reducible:
parent = reducible_pop()
reducible_add(parent)
while parent in reducible:
# Go to the top
parent = rdeps[parent][0]
children_stack_append(parent)
children_stack_extend(reducible & deps[parent])
while True:
child = children_stack[-1]
if child != parent:
children = reducible & deps[child]
while children:
# Depth-first search
children_stack_extend(children)
parent = child
child = children_stack[-1]
children = reducible & deps[child]
children_stack_pop()
# This is a leaf node in the reduction region
# key, task, fused_keys, height, width, number of nodes, fudge, set of edges
info_stack_append(
(
child,
rv[child],
[child] if rename_keys else None,
1,
1,
1,
0,
deps[child] - reducible,
)
)
else:
children_stack_pop()
# Calculate metrics and fuse as appropriate
deps_parent = deps[parent]
edges = deps_parent - reducible
children = deps_parent - edges
num_children = len(children)
if num_children == 1:
(
child_key,
child_task,
child_keys,
height,
width,
num_nodes,
fudge,
children_edges,
) = info_stack_pop()
num_children_edges = len(children_edges)
if fudge > num_children_edges - 1 >= 0:
fudge = num_children_edges - 1
edges |= children_edges
no_new_edges = len(edges) == num_children_edges
if not no_new_edges:
fudge += 1
if (
(num_nodes + fudge) / height <= ave_width
and
# Sanity check; don't go too deep if new levels introduce new edge dependencies
(no_new_edges or height < max_depth_new_edges)
):
# Perform substitutions as we go
val = subs(dsk[parent], child_key, child_task)
deps_parent.remove(child_key)
deps_parent |= deps_pop(child_key)
del rv[child_key]
reducible_remove(child_key)
if rename_keys:
child_keys.append(parent)
fused_trees[parent] = child_keys
fused_trees_pop(child_key, None)
if children_stack:
if no_new_edges:
# Linear fuse
info_stack_append(
(
parent,
val,
child_keys,
height,
width,
num_nodes,
fudge,
edges,
)
)
else:
info_stack_append(
(
parent,
val,
child_keys,
height + 1,
width,
num_nodes + 1,
fudge,
edges,
)
)
else:
rv[parent] = val
break
else:
rv[child_key] = child_task
reducible_remove(child_key)
if children_stack:
# Allow the parent to be fused, but only under strict circumstances.
# Ensure that linear chains may still be fused.
if fudge > int(ave_width - 1):
fudge = int(ave_width - 1)
# This task *implicitly* depends on `edges`
info_stack_append(
(
parent,
rv[parent],
[parent] if rename_keys else None,
1,
width,
1,
fudge,
edges,
)
)
else:
break
else:
child_keys = []
height = 1
width = 0
num_single_nodes = 0
num_nodes = 0
fudge = 0
children_edges = set()
max_num_edges = 0
children_info = info_stack[-num_children:]
del info_stack[-num_children:]
for (
cur_key,
cur_task,
cur_keys,
cur_height,
cur_width,
cur_num_nodes,
cur_fudge,
cur_edges,
) in children_info:
if cur_height == 1:
num_single_nodes += 1
elif cur_height > height:
height = cur_height
width += cur_width
num_nodes += cur_num_nodes
fudge += cur_fudge
if len(cur_edges) > max_num_edges:
max_num_edges = len(cur_edges)
children_edges |= cur_edges
# Fudge factor to account for possible parallelism with the boundaries
num_children_edges = len(children_edges)
fudge += min(
num_children - 1, max(0, num_children_edges - max_num_edges)
)
if fudge > num_children_edges - 1 >= 0:
fudge = num_children_edges - 1
edges |= children_edges
no_new_edges = len(edges) == num_children_edges
if not no_new_edges:
fudge += 1
if (
(num_nodes + fudge) / height <= ave_width
and num_single_nodes <= ave_width
and width <= max_width
and height <= max_height
and
# Sanity check; don't go too deep if new levels introduce new edge dependencies
(no_new_edges or height < max_depth_new_edges)
):
# Perform substitutions as we go
val = dsk[parent]
children_deps = set()
for child_info in children_info:
cur_child = child_info[0]
val = subs(val, cur_child, child_info[1])
del rv[cur_child]
children_deps |= deps_pop(cur_child)
reducible_remove(cur_child)
if rename_keys:
fused_trees_pop(cur_child, None)
child_keys.extend(child_info[2])
deps_parent -= children
deps_parent |= children_deps
if rename_keys:
child_keys.append(parent)
fused_trees[parent] = child_keys
if children_stack:
info_stack_append(
(
parent,
val,
child_keys,
height + 1,
width,
num_nodes + 1,
fudge,
edges,
)
)
else:
rv[parent] = val
break
else:
for child_info in children_info:
rv[child_info[0]] = child_info[1]
reducible_remove(child_info[0])
if children_stack:
# Allow the parent to be fused, but only under strict circumstances.
# Ensure that linear chains may still be fused.
if width > max_width:
width = max_width
if fudge > int(ave_width - 1):
fudge = int(ave_width - 1)
# key, task, height, width, number of nodes, fudge, set of edges
# This task *implicitly* depends on `edges`
info_stack_append(
(
parent,
rv[parent],
[parent] if rename_keys else None,
1,
width,
1,
fudge,
edges,
)
)
else:
break
# Traverse upwards
parent = rdeps[parent][0]
if fuse_subgraphs:
_inplace_fuse_subgraphs(rv, keys, deps, fused_trees, rename_keys)
if rename_keys:
for root_key, fused_keys in fused_trees.items():
alias = key_renamer(fused_keys)
if alias is not None and alias not in rv:
rv[alias] = rv[root_key]
rv[root_key] = alias
deps[alias] = deps[root_key]
deps[root_key] = {alias}
return rv, deps
def _inplace_fuse_subgraphs(dsk, keys, dependencies, fused_trees, rename_keys):
"""Subroutine of fuse.
Mutates dsk, depenencies, and fused_trees inplace"""
# locate all members of linear chains
child2parent = {}
unfusible = set()
for parent in dsk:
deps = dependencies[parent]
has_many_children = len(deps) > 1
for child in deps:
if keys is not None and child in keys:
unfusible.add(child)
elif child in child2parent:
del child2parent[child]
unfusible.add(child)
elif has_many_children:
unfusible.add(child)
elif child not in unfusible:
child2parent[child] = parent
# construct the chains from ancestor to descendant
chains = []
parent2child = {v: k for k, v in child2parent.items()}
while child2parent:
child, parent = child2parent.popitem()
chain = [child, parent]
while parent in child2parent:
parent = child2parent.pop(parent)
del parent2child[parent]
chain.append(parent)
chain.reverse()
while child in parent2child:
child = parent2child.pop(child)
del child2parent[child]
chain.append(child)
# Skip chains with < 2 executable tasks
ntasks = 0
for key in chain:
ntasks += istask(dsk[key])
if ntasks > 1:
chains.append(chain)
break
# Mutate dsk fusing chains into subgraphs
for chain in chains:
subgraph = {k: dsk[k] for k in chain}
outkey = chain[0]
# Update dependencies and graph
inkeys_set = dependencies[outkey] = dependencies[chain[-1]]
for k in chain[1:]:
del dependencies[k]
del dsk[k]
# Create new task
inkeys = tuple(inkeys_set)
dsk[outkey] = (SubgraphCallable(subgraph, outkey, inkeys),) + inkeys
# Mutate `fused_trees` if key renaming is needed (renaming done in fuse)
if rename_keys:
chain2 = []
for k in chain:
subchain = fused_trees.pop(k, False)
if subchain:
chain2.extend(subchain)
else:
chain2.append(k)
fused_trees[outkey] = chain2
# Defining `key_split` (used by key renamers in `fuse`) in utils.py
# results in messy circular imports, so define it here instead.
hex_pattern = re.compile("[a-f]+")
def key_split(s):
"""
>>> key_split('x')
'x'
>>> key_split('x-1')
'x'
>>> key_split('x-1-2-3')
'x'
>>> key_split(('x-2', 1))
'x'
>>> key_split("('x-2', 1)")
'x'
>>> key_split('hello-world-1')
'hello-world'
>>> key_split(b'hello-world-1')
'hello-world'
>>> key_split('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split(None)
'Other'
>>> key_split('x-abcdefab') # ignores hex
'x'
>>> key_split('_(x)') # strips unpleasant characters
'x'
"""
if type(s) is bytes:
s = s.decode()
if type(s) is tuple:
s = s[0]
try:
words = s.split("-")
if not words[0][0].isalpha():
result = words[0].strip("_'()\"")
else:
result = words[0]
for word in words[1:]:
if word.isalpha() and not (
len(word) == 8 and hex_pattern.match(word) is not None
):
result += "-" + word
else:
break
if len(result) == 32 and re.match(r"[a-f0-9]{32}", result):
return "data"
else:
if result[0] == "<":
result = result.strip("<>").split()[0].split(".")[-1]
return result
except Exception:
return "Other"
class SubgraphCallable(object):
"""Create a callable object from a dask graph.
Parameters
----------
dsk : dict
A dask graph
outkey : hashable
The output key from the graph
inkeys : list
A list of keys to be used as arguments to the callable.
name : str, optional
The name to use for the function.
"""
__slots__ = ("dsk", "outkey", "inkeys", "name")
def __init__(self, dsk, outkey, inkeys, name="subgraph_callable"):
self.dsk = dsk
self.outkey = outkey
self.inkeys = inkeys
self.name = name
def __repr__(self):
return self.name
def __eq__(self, other):
return (
type(self) is type(other)
and self.dsk == other.dsk
and self.outkey == other.outkey
and set(self.inkeys) == set(other.inkeys)
and self.name == other.name
)
def __ne__(self, other):
return not (self == other)
def __call__(self, *args):
if not len(args) == len(self.inkeys):
raise ValueError("Expected %d args, got %d" % (len(self.inkeys), len(args)))
return core.get(self.dsk, self.outkey, dict(zip(self.inkeys, args)))
def __reduce__(self):
return (SubgraphCallable, (self.dsk, self.outkey, self.inkeys, self.name))
| 34.821106
| 114
| 0.504171
|
dbd01bcd38875e8543c252b30231bed40ac62f05
| 1,549
|
py
|
Python
|
PhotoBooth.py
|
alan412/GeekPi
|
4333ac19efdd230ae2ff8367a3b6f23639250462
|
[
"MIT"
] | null | null | null |
PhotoBooth.py
|
alan412/GeekPi
|
4333ac19efdd230ae2ff8367a3b6f23639250462
|
[
"MIT"
] | null | null | null |
PhotoBooth.py
|
alan412/GeekPi
|
4333ac19efdd230ae2ff8367a3b6f23639250462
|
[
"MIT"
] | null | null | null |
from datetime import date
import calendar
import tweet
import RPi.GPIO as GPIO
import time
from picamera import PiCamera, Color
message = " 2016 FLL Razorback \
447: It's All Geek to Me's pit"
GPIO.setmode(GPIO.BCM)
GPIO.setup(21, GPIO.IN, pull_up_down=GPIO.PUD_UP)
camera = PiCamera()
camera.resolution = (800, 480)
camera.start_preview()
camera.annotate_text = str(message)
print "Twitter turned off!"
twitter = tweet.Twitter(True);
dayStr = calendar.day_name[date.today().weekday()]
numVisitor = 1
tweetStr = dayStr + " Visitor "
#hashtags = " #photobooth #fll @fllrazorbackinv"
hashtags =" #photobooth #fll"
try:
while True:
input_state = GPIO.input(21);
while input_state == True:
input_state = GPIO.input(21);
camera.annotate_text = ""
camera.annotate_text_size = 160
for x in range(1, 4):
camera.annotate_text = str(4 - x)
time.sleep(1)
camera.annotate_text_size = 32
camera.annotate_text = str(message)
filename = '/home/pi/Projects/capture' + str(numVisitor) + dayStr + '.jpg'
camera.capture(filename, use_video_port = True)
camera.stop_preview()
camera.start_preview()
twitter.tweet(tweetStr + str(numVisitor) + hashtags, 36.068880, -94.175885, filename)
numVisitor = numVisitor + 1
#Ctrl C
except KeyboardInterrupt:
print "User cancelled"
#Error
except:
print "Unexpected error:", sys.exc_info()[0]
raise
finally:
camera.stop_preview()
| 26.254237
| 93
| 0.657844
|
7147b1a9a2fcf02eabcf9a18f7e35de6db8ccbb7
| 12,985
|
py
|
Python
|
experiments/thesis/framework/evaluation.py
|
mtanti/mtanti-phd
|
d915b6f96f1bae1a7f517eb1dbd9d4a88ca56576
|
[
"MIT"
] | 6
|
2019-05-20T06:48:37.000Z
|
2021-01-03T05:43:47.000Z
|
experiments/thesis/framework/evaluation.py
|
mtanti/mtanti-phd
|
d915b6f96f1bae1a7f517eb1dbd9d4a88ca56576
|
[
"MIT"
] | 1
|
2019-01-17T03:17:10.000Z
|
2019-02-23T17:31:41.000Z
|
experiments/thesis/framework/evaluation.py
|
mtanti/mtanti-phd
|
d915b6f96f1bae1a7f517eb1dbd9d4a88ca56576
|
[
"MIT"
] | null | null | null |
import sys
import numpy as np
import nltk
import collections
import json
from . import config
sys.path.append(config.mscoco_eval_dir)
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
from pycocoevalcap.meteor.meteor import Meteor
from pycocoevalcap.cider.cider import Cider
from pycocoevalcap.spice.spice import Spice
from pycocoevalcap.wmd.wmd import WMD
_meteor_scorer = Meteor()
_cider_scorer = Cider()
_spice_scorer = Spice()
_wmd_scorer = WMD()
########################################################################################
def get_logperplexities(logprobs, sent_lens):
# Let P = probability of a sentence with L words
# Let pi = probability of word i in sentence
# P = p1*...*pL
# log P = (log p1) + ... + (log pL)
# pplx = 2^(-1/L log P) = 2^(-(log p1 + ... + log pL)/L)
# log pplx = -(log p1 + ... + log pL)/L = -(log P)/L
return [ -logprob/sent_len for (logprob, sent_len) in zip(logprobs, sent_lens) ]
########################################################################################
def get_loggeomean_perplexity(logprobs, sent_lens):
# Let pplxi = perplexity of sentence i out of N sentences
# geomean = (pplx1*...*pplxN)**(1/N)
# log geomean = (1/N) log (pplx1*...*pplxN) = (log pplx1 + ... + log pplxN)/N
logpplxs = get_logperplexities(logprobs, sent_lens)
return (
np.sum(logpplx for logpplx in logpplxs if not np.isinf(logpplx))/len(logpplxs),
sum(np.isinf(logpplx) for logpplx in logpplxs)
)
########################################################################################
def get_probability_stats(logprobs, sent_lens, num_unknowns_per_sent=None, num_out_of_vocab_tokens=None):
if num_unknowns_per_sent is not None:
#Since the unknown token stands in place of every out of vocabulary word, the more out of vocabulary words in the sentences the greater the unknown token's probability (in the limiting case if every word is an out of vocabulary word then all words have a probability of 1).
#To compensate for this we shall assume that each different out of vocabulary word has an equal share of the unknown token's probability by dividing the unknown token's probability by the number of out of vocabulary words.
# P = p1*...*pUNK*...pi*...*pUNK*...*pL
# P' = p1*...*(pUNK/#oov)*...pi*...*(pUNK/#oov)*...*pL
# P' = (p1*...*pUNK*...pi*...*pUNK*...*pL)/(#oov^#unk)
# P' = P/(#oov^#unk)
# log P' = log P - #unk*(log #oov)
logprobs = [ logprob - num_unknowns*np.log2(num_out_of_vocab_tokens) for (logprob, num_unknowns) in zip(logprobs, num_unknowns_per_sent) ]
probs = [ 2**logprob for logprob in logprobs ]
logpplxs = get_logperplexities(logprobs, sent_lens)
pplxs = [ 2**logpplx for logpplx in logpplxs ]
return {
'mean_prob': np.mean(probs),
'median_prob': np.median(probs),
'geomean_prob': 2**np.mean(logprobs),
'mean_pplx': np.mean(pplxs),
'median_pplx': np.median(pplxs),
'geomean_pplx': 2**np.mean(logpplxs)
}
########################################################################################
def get_meteor_score(test_tokenized_grouped_sents, generated):
return _meteor_scorer.compute_score(
{i: [ ' '.join(t) for t in ts ] for (i, ts) in enumerate(test_tokenized_grouped_sents)},
{i: [ ' '.join(g) ] for (i, g) in enumerate(generated)}
)
########################################################################################
def get_cider_score(test_tokenized_grouped_sents, generated):
return _cider_scorer.compute_score(
{i: [ ' '.join(t) for t in ts ] for (i, ts) in enumerate(test_tokenized_grouped_sents)},
{i: [ ' '.join(g) ] for (i, g) in enumerate(generated)}
)
########################################################################################
def get_spice_score(test_tokenized_grouped_sents, generated):
return _spice_scorer.compute_score(
{i: [ ' '.join(t) for t in ts ] for (i, ts) in enumerate(test_tokenized_grouped_sents)},
{i: [ ' '.join(g) ] for (i, g) in enumerate(generated)}
)
########################################################################################
def get_wmd_score(test_tokenized_grouped_sents, generated):
return _wmd_scorer.compute_score(
{i: [ ' '.join(t) for t in ts ] for (i, ts) in enumerate(test_tokenized_grouped_sents)},
{i: [ ' '.join(g) ] for (i, g) in enumerate(generated)}
)
########################################################################################
def mscoco_eval(test_tokenized_sent_groups, generated_sents_tokenized):
with open(config.mscoco_eval_dir+'/annotations/references.json', 'w', encoding='utf-8') as f:
json.dump({
'info': {'description': None, 'url': None, 'version': None, 'year': None, 'contributor': None, 'date_created': None},
'images': [
{'license': None, 'url': None, 'file_name': None, 'id': image_id, 'width': None, 'date_captured': None, 'height': None}
for image_id in range(len(test_tokenized_sent_groups))
],
'licenses': [],
'type': 'captions',
'annotations': [
{
'image_id': image_id,
'id': caption_id,
'caption': ' '.join(sent)
}
for (caption_id, (image_id, sent)) in enumerate(
(image_id, sent)
for (image_id, sent_group) in enumerate(test_tokenized_sent_groups)
for sent in sent_group
)
]
}, f)
with open(config.mscoco_eval_dir+'/results/generated.json', 'w', encoding='utf-8') as f:
json.dump([
{
'image_id': image_id,
'caption': ' '.join(sent)
}
for (image_id, sent) in enumerate(generated_sents_tokenized)
], f)
coco = COCO(config.mscoco_eval_dir+'/annotations/references.json')
cocoRes = coco.loadRes(config.mscoco_eval_dir+'/results/generated.json')
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.evaluate()
return {
'Bleu_1': cocoEval.eval['Bleu_1'],
'Bleu_2': cocoEval.eval['Bleu_2'],
'Bleu_3': cocoEval.eval['Bleu_3'],
'Bleu_4': cocoEval.eval['Bleu_4'],
'METEOR': cocoEval.eval['METEOR'],
'ROUGE_L': cocoEval.eval['ROUGE_L'],
'CIDEr': cocoEval.eval['CIDEr'],
'SPICE': cocoEval.eval['SPICE'],
'WMD': cocoEval.eval['WMD'],
'Bleu_1_all': [ item['Bleu_1'] for item in cocoEval.evalImgs ],
'Bleu_2_all': [ item['Bleu_2'] for item in cocoEval.evalImgs ],
'Bleu_3_all': [ item['Bleu_3'] for item in cocoEval.evalImgs ],
'Bleu_4_all': [ item['Bleu_4'] for item in cocoEval.evalImgs ],
'METEOR_all': [ item['METEOR'] for item in cocoEval.evalImgs ],
'ROUGE_L_all': [ item['ROUGE_L'] for item in cocoEval.evalImgs ],
'CIDEr_all': [ item['CIDEr'] for item in cocoEval.evalImgs ],
'SPICE_all': [ item['SPICE']['All']['f'] for item in cocoEval.evalImgs ],
'WMD_all': [ item['WMD'] for item in cocoEval.evalImgs ],
}
########################################################################################
def diversity_eval(train_tokenized_sents, test_tokenized_grouped_sents, vocab, train_token_freqs, tokenized_generated_sents):
known_train_sents_full = { ' '.join(sent) for sent in train_tokenized_sents },
known_train_sents_3grams = { ' '.join(sent[i:i+3]) for sent in train_tokenized_sents for i in range(len(sent)-3+1) }
known_train_sents_4grams = { ' '.join(sent[i:i+4]) for sent in train_tokenized_sents for i in range(len(sent)-4+1) }
known_train_sents_5grams = { ' '.join(sent[i:i+5]) for sent in train_tokenized_sents for i in range(len(sent)-5+1) }
test_grouped_sents = [ [ ' '.join(sent) for sent in group ] for group in test_tokenized_grouped_sents ]
unique_sents = set()
num_reused_sents = 0
reused_sents = list()
reused_sent_test_sents = list()
num_reused_3grams = 0
num_reused_4grams = 0
num_reused_5grams = 0
sent_lens = list()
tagged_tokens = collections.defaultdict(set)
token_unigrams = set()
token_bigrams = set()
token_trigrams = set()
for (generated_sent_tokens, test_sents) in zip(tokenized_generated_sents, test_grouped_sents):
generated_sent = ' '.join(generated_sent_tokens)
token_tags = nltk.pos_tag(generated_sent_tokens, tagset='universal')
tags = [tag for (token, tag) in token_tags]
sent_len = len(generated_sent_tokens)
sent_lens.append(sent_len)
unique_sents.add(generated_sent)
if generated_sent in known_train_sents_full:
num_reused_sents += 1
reused_sents.append(generated_sent)
reused_sent_test_sents.append(test_sents)
for (token, tag) in token_tags:
tagged_tokens[tag].add(token)
for i in range(len(generated_sent_tokens)):
if i < sent_len-0:
token_unigrams.add(tuple(generated_sent_tokens[i:i+1]))
if i < sent_len-1:
token_bigrams.add(tuple(generated_sent_tokens[i:i+2]))
if i < sent_len-2:
token_trigrams.add(tuple(generated_sent_tokens[i:i+3]))
for i in range(len(generated_sent_tokens)):
if i < sent_len-2:
if ' '.join(generated_sent_tokens[i:i+3]) in known_train_sents_3grams:
num_reused_3grams += 1
if i < sent_len-3:
if ' '.join(generated_sent_tokens[i:i+4]) in known_train_sents_4grams:
num_reused_4grams += 1
if i < sent_len-4:
if ' '.join(generated_sent_tokens[i:i+5]) in known_train_sents_5grams:
num_reused_5grams += 1
num_vocab_used = sum((token in vocab.vocab_set) for (token,) in token_unigrams) #Filtered for the ceiling model sentences which would have all words in the test set rather than words in the vocabulary
min_freq_vocab_used = min(train_token_freqs[token] for (token,) in token_unigrams if token in vocab.vocab_set)
reused_sents_wmd = get_wmd_score(reused_sent_test_sents, reused_sents)[0] if num_reused_sents > 0 else None
return {
'vocab_used': num_vocab_used,
'vocab_used_frac': num_vocab_used/vocab.size,
'min_freq_vocab_used': min_freq_vocab_used,
'min_sent_len': min(sent_lens),
'mean_sent_len': np.mean(sent_lens),
'max_sent_len': max(sent_lens),
'num_reused_sents': len(reused_sents),
'num_reused_sents_frac': len(reused_sents)/len(tokenized_generated_sents),
'reused_sents_WMD': reused_sents_wmd,
'num_reused_3grams': num_reused_3grams,
'num_reused_4grams': num_reused_4grams,
'num_reused_5grams': num_reused_5grams,
'num_unique_sents': len(unique_sents),
'num_unique_sents_frac': len(unique_sents)/len(tokenized_generated_sents),
'num_types_nouns': len(tagged_tokens['NOUN']),
'num_types_adjectives': len(tagged_tokens['ADJ']),
'num_types_verbs': len(tagged_tokens['VERB']),
'num_types_adverbs': len(tagged_tokens['ADV']),
'num_types_unigrams': len(token_unigrams),
'num_types_bigrams': len(token_bigrams),
'num_types_trigrams': len(token_trigrams),
}
########################################################################################
def retrieval_eval(image_caption_logprobs_matrix):
r1 = 0
r5 = 0
r10 = 0
ranks = list()
for (correct_index, logprobs) in enumerate(image_caption_logprobs_matrix):
retrieved_indexes = np.argsort(logprobs)
correct_index_pos = len(retrieved_indexes) - retrieved_indexes.tolist().index(correct_index)
if correct_index_pos == 1:
r1 += 1
if correct_index_pos <= 5:
r5 += 1
if correct_index_pos <= 10:
r10 += 1
ranks.append(correct_index_pos)
median_rank = np.median(ranks)
return {
'R@1': r1,
'R@5': r5,
'R@10': r10,
'median_rank': median_rank,
'R@1_frac': r1/len(ranks),
'R@5_frac': r5/len(ranks),
'R@10_frac': r10/len(ranks),
'median_rank_frac': median_rank/len(ranks)
}
| 48.815789
| 281
| 0.56881
|
793c18c80259c896db46c0faf9b9642b77bf5222
| 18,547
|
py
|
Python
|
osctiny/tests/test_packages.py
|
bbrunner/osc-tiny
|
3a9e320745e593a7cefdab96ac4e98b8008256b1
|
[
"MIT"
] | null | null | null |
osctiny/tests/test_packages.py
|
bbrunner/osc-tiny
|
3a9e320745e593a7cefdab96ac4e98b8008256b1
|
[
"MIT"
] | null | null | null |
osctiny/tests/test_packages.py
|
bbrunner/osc-tiny
|
3a9e320745e593a7cefdab96ac4e98b8008256b1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from io import StringIO, BytesIO, IOBase
import re
from unittest import skip
from six import text_type
import responses
from .base import OscTest, CallbackFactory
from ..utils.errors import OscError
class TestPackage(OscTest):
@responses.activate
def test_get_files(self):
def callback(headers, params, request):
status = 200
body = """
<directory name="python.8549" rev="1" vrev="28.17"
srcmd5="9498d242f91b858372705d1bb4e26e1a">
<entry name="CVE-2017-18207.patch"
md5="d1e5e39cfdf5087933cabf7d7b541158" size="900"
mtime="1528391213" />
<entry name="Python-2.7.13.tar.xz"
md5="53b43534153bb2a0363f08bae8b9d990" size="12495628"
mtime="1484132166" />
<entry name="Python-2.7.13.tar.xz.asc"
md5="115f2de1793fa46a382f2a9f4e11b285" size="801"
mtime="1484132166" />
<entry name="README.SUSE"
md5="4a5a6c13a5b163d2e763b0d45f64c051" size="735"
mtime="1216853740" />
<entry name="python-2.7.13-docs-pdf-a4.tar.bz2"
md5="ccc87ad010f28d926ca09b1d5a55f4b0" size="10712181"
mtime="1484132167" />
<entry name="python-base.spec"
md5="af86457b494a667420e971e2e0febfcf" size="18454"
mtime="1537948380" />
<entry name="python-doc.changes"
md5="d5b75815a9455e231cc12d0aac045f9c" size="7290"
mtime="1537948380" />
<entry name="python-doc.spec"
md5="832b21401d9951d3b6e717e80944bd2b" size="6522"
mtime="1537948381" />
<entry name="python.changes"
md5="d479c6cd83559b1eed7d909087cf4785" size="54144"
mtime="1537948381" />
<entry name="python.spec"
md5="812c70b56ffd92d060bcf8b02ba479ff" size="20189"
mtime="1537948382" />
</directory>
"""
headers['request-id'] = '728d329e-0e86-11e4-a748-0c84dc037c13'
return status, headers, body
self.mock_request(
method=responses.GET,
url=self.osc.url + '/source/SUSE:SLE-12-SP1:Update/python.8549',
callback=CallbackFactory(callback)
)
response = self.osc.packages.get_files(
"SUSE:SLE-12-SP1:Update", "python.8549"
)
self.assertEqual(response.tag, "directory")
self.assertEqual(response.countchildren(), 10)
@responses.activate
def test_get_list(self):
def callback(headers, params, request):
status = 200
body = """
<directory count="14">
<entry name="SAPHanaSR"/>
<entry name="SAPHanaSR.4926"/>
<entry name="SAPHanaSR.7820"/>
<entry name="SUSEConnect"/>
<entry name="SUSEConnect.1732"/>
<entry name="SUSEConnect.1892"/>
<entry name="SUSEConnect.2196"/>
<entry name="SUSEConnect.2374"/>
<entry name="SUSEConnect.4293"/>
<entry name="SUSEConnect.4515"/>
<entry name="SUSEConnect.4773"/>
<entry name="SUSEConnect.7260"/>
<entry name="SUSEConnect.8868"/>
<entry name="SUSEConnect.9195"/>
</directory>
"""
headers['request-id'] = '728d329e-0e86-11e4-a748-0c84dc037c13'
return status, headers, body
self.mock_request(
method=responses.GET,
url=self.osc.url + '/source/SUSE:SLE-12-SP1:Update/python.8549',
callback=CallbackFactory(callback)
)
response = self.osc.packages.get_files(
"SUSE:SLE-12-SP1:Update", "python.8549"
)
self.assertEqual(response.tag, "directory")
self.assertEqual(response.countchildren(), 14)
@responses.activate
def test_get_meta(self):
def callback(headers, params, request):
status = 200
body = """
<package name="python.8549" project="SUSE:SLE-12-SP1:Update">
<title>Python Interpreter</title>
<description>Python is an interpreted, object-oriented programming
language, and is often compared to Tcl, Perl, Scheme, or Java. You
can find an overview of Python in the documentation and tutorials
included in the python-doc (HTML) or python-doc-pdf (PDF)
packages.
If you want to install third party modules using distutils, you
need to install python-devel package.</description>
<releasename>python</releasename>
</package>
"""
if params.get("view", None) == "blame":
body = """
1 (foo__bar 2018-10-29 16:28:55 1) <package name="python.8549" project="SUSE:SLE-12-SP1:Update">
1 (foo__bar 2018-10-29 16:28:55 2) <title>Python Interpreter</title>
1 (foo__bar 2018-10-29 16:28:55 3) <description>Python is an interpreted, object-oriented programming language, and is
1 (foo__bar 2018-10-29 16:28:55 4) often compared to Tcl, Perl, Scheme, or Java. You can find an overview
1 (foo__bar 2018-10-29 16:28:55 5) of Python in the documentation and tutorials included in the python-doc
1 (foo__bar 2018-10-29 16:28:55 6) (HTML) or python-doc-pdf (PDF) packages.
1 (foo__bar 2018-10-29 16:28:55 7)
1 (foo__bar 2018-10-29 16:28:55 8) If you want to install third party modules using distutils, you need to
1 (foo__bar 2018-10-29 16:28:55 9) install python-devel package.</description>
1 (foo__bar 2018-10-29 16:28:55 10) <releasename>python</releasename>
1 (foo__bar 2018-10-29 16:28:55 11) </package>
"""
return status, headers, body
self.mock_request(
method=responses.GET,
url=self.osc.url + '/source/SUSE:SLE-12-SP1:Update/python.8549/'
'_meta',
callback=CallbackFactory(callback)
)
with self.subTest("without blame"):
response = self.osc.packages.get_meta(
"SUSE:SLE-12-SP1:Update", "python.8549"
)
self.assertEqual(response.tag, "package")
self.assertEqual(
response.xpath("./title")[0].text,
"Python Interpreter"
)
with self.subTest("with blame"):
response = self.osc.packages.get_meta(
"SUSE:SLE-12-SP1:Update", "python.8549", blame=True
)
self.assertTrue(isinstance(response, text_type))
@skip("No test data available")
@responses.activate
def test_get_attribute(self):
def callback(headers, params, request):
status = 200
body = """</attributes>"""
headers['request-id'] = '728d329e-0e86-11e4-a748-0c84dc037c13'
return status, headers, body
self.mock_request(
method=responses.GET,
url=self.osc.url + '/source/SUSE:SLE-12-SP1:Update/python.8549/'
'_attribute',
callback=CallbackFactory(callback)
)
@responses.activate
def test_get_history(self):
self.mock_request(
method=responses.GET,
url=self.osc.url + '/source/SUSE:SLE-12-SP1:Update/python.8549/'
'_history',
body="""
<revisionlist>
<revision rev="1" vrev="1">
<srcmd5>b9b258599bb67a2a3d396b1515cabeab</srcmd5>
<version>unknown</version>
<time>1514367595</time>
<user>Fȱȱ Bar</user>
<comment>
Set link tȱ grub2.5745 via maintenance_release request
</comment>
<requestid>148865</requestid>
</revision>
<revision rev="2" vrev="2">
<srcmd5>9f5e43584f67e2a301b71b63bdf8e2e1</srcmd5>
<version>unknown</version>
<time>1520336862</time>
<user>HȨllȱ Wȱrld</user>
<comment>
Set link tȱ grub2.6584 via maintenance_release request
</comment>
<requestid>154349</requestid>
</revision>
</revisionlist>
"""
)
response = self.osc.packages.get_history(
"SUSE:SLE-12-SP1:Update", "python.8549"
)
self.assertEqual(response.tag, "revisionlist")
self.assertEqual(
len(response.xpath("./revision")), 2
)
@responses.activate
def test_cmd(self):
self.mock_request(
method=responses.POST,
url=self.osc.url + '/source/SUSE:SLE-12-SP1:Update/python.8549',
body="""
+==== //tools/python/2.6.2/src/base/Modules/_ctypes/libffi/src/sparc/ffi.c#1 - /home/build/clifford/gpdb/tools/python/2.6.2/src/base/Modules/_ctypes/libffi/src/sparc/ffi.c ====
+---
+ Modules/_ctypes/libffi/src/sparc/ffi.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/Modules/_ctypes/libffi/src/sparc/ffi.c
++++ b/Modules/_ctypes/libffi/src/sparc/ffi.c
+@@ -652,6 +652,11 @@
+ }
+ else
+ {
++#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
++ /* SparcV9 long double is 16-byte aligned; skip arg if necessary */
++ if (arg_types[i]->type == FFI_TYPE_LONGDOUBLE && (argn & 1))
++ argn++;
++#endif
+ /* Right-justify. */
+ argn += ALIGN(arg_types[i]->size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG;
+
"""
)
response = self.osc.packages.cmd(
"SUSE:SLE-12-SP1:Update", "python.8549", "diff"
)
self.assertTrue(isinstance(response, text_type))
self.assertIn(
"++#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE", response
)
@responses.activate
def test_set_meta(self):
bodies = []
def callback(headers, params, request):
bodies.append(request.body)
status, body = 200, ""
return status, headers, body
self.mock_request(
method=responses.PUT,
url=re.compile(self.osc.url + '/source/(?P<project>[^/]+)/'
'(?P<package>[^/]+)/_meta'),
callback=CallbackFactory(callback)
)
data = (
(
{
'project': "test:project",
'package': "test.package",
'title': "test",
'description': "foo"
},
b'<package><title>test</title><description>foo</description>'
b'</package>'
),
(
{
'project': "test:project",
'package': "test.package",
},
b'<package><title/><description/></package>'
),
(
{
'project': "test:project",
'package': "test.package",
'meta': """
<package name="test.package" project="test:project">
<title/>
<description/>
<build>
<enable repository="openSUSE_Leap_15.0"/>
<disable arch="i586"/>
</build>
</package>
"""
},
b'<package name="test.package" project="test:project"><title/>'
b'<description/><build>'
b'<enable repository="openSUSE_Leap_15.0"/>'
b'<disable arch="i586"/></build></package>'
),
(
{
'project': "test:project",
'package': "test.package",
'title': 'foo',
'description': 'bar',
'meta': """
<package name="test.package" project="test:project">
<title/>
<description/>
<build>
<enable repository="openSUSE_Leap_15.0"/>
<disable arch="i586"/>
</build>
</package>
"""
},
b'<package name="test.package" project="test:project">'
b'<title>foo</title><description>bar</description><build>'
b'<enable repository="openSUSE_Leap_15.0"/>'
b'<disable arch="i586"/></build></package>'
),
)
for params, expected in data:
with self.subTest():
self.osc.packages.set_meta(**params)
self.assertEqual(bodies[-1], expected)
@responses.activate
def test_push_file(self):
content = """
ლ(ಠ益ಠ)ლ ლ(ಠ益ಠ)ლ
Lorem ipsum dolor sit amet,
consectetur adipiscing elit.
Vestibulum id enim
fermentum, lobortis urna
quis, convallis justo.
ლ(ಠ益ಠ)ლ ლ(ಠ益ಠ)ლ
"""
bodies = []
def callback(headers, params, request):
if isinstance(request.body, IOBase):
request.body.seek(0)
bodies.append(request.body.read())
else:
bodies.append(request.body)
status, body = 200, ""
return status, headers, body
self.mock_request(
method=responses.PUT,
url=re.compile(
self.osc.url + '/source/(?P<project>[^/]+)/'
'(?P<package>[^/]+)/(?P<filename>.+)'
),
callback=CallbackFactory(callback)
)
with self.subTest("as unicode"):
self.osc.packages.push_file("prj", "pkg", "readme.txt", content)
self.assertEqual(bodies[-1], content.encode('utf-8'))
with self.subTest("as bytes"):
self.osc.packages.push_file("prj", "pkg", "readme.txt",
content.encode('utf-8'))
self.assertEqual(bodies[-1], content.encode('utf-8'))
with self.subTest("as StringIO"):
self.osc.packages.push_file("prj", "pkg", "readme.txt",
StringIO(content))
self.assertEqual(bodies[-1], content.encode('utf-8'))
with self.subTest("as BytesIO"):
self.osc.packages.push_file("prj", "pkg", "readme.txt",
BytesIO(content.encode('utf-8')))
self.assertEqual(bodies[-1], content.encode('utf-8'))
@responses.activate
def test_aggregate(self):
put_called = []
def exists_callback(headers, params, request):
status, body = 404, ""
if "exists" in request.url:
if "_aggregate" in request.url and "already.agged" not in request.url:
status = 404
else:
status = 200
return status, headers, body
def put_callback(headers, params, request):
put_called.append({'request': request, 'params': params})
status, body = 200, ""
return status, headers, body
def meta_callback(headers, params, request):
status = 200
body = """<package><title/><description/></package>"""
return status, headers, body
self.mock_request(
method=responses.HEAD,
url=re.compile(
self.osc.url + '/source/.*'
),
callback=CallbackFactory(exists_callback)
)
self.mock_request(
method=responses.PUT,
url=re.compile(
self.osc.url + '/source/.*'
),
callback=CallbackFactory(put_callback)
)
self.mock_request(
method=responses.GET,
url=re.compile(
self.osc.url + '/source/.+/_meta'
),
callback=CallbackFactory(meta_callback)
)
with self.subTest("identical package"):
self.assertRaises(
OscError,
self.osc.packages.aggregate,
"test:project", "test.package",
"test:project", "test.package",
)
with self.subTest("non-existing package"):
self.assertRaises(
OscError,
self.osc.packages.aggregate,
"test:project", "test.package",
"test:project:2", "test.package",
)
with self.subTest("already existing aggregate"):
self.assertRaises(
OscError,
self.osc.packages.aggregate,
"test:project:exists", "test.package",
"test:project2:exists", "already.agged",
)
with self.subTest("non-existing target package"):
old_len = len(put_called)
self.osc.packages.aggregate(
"test:project:exists", "test.package",
"test:project2:foo", "test.pkg",
)
self.assertEqual(len(put_called), old_len + 2)
with self.subTest("existing target package"):
old_len = len(put_called)
self.osc.packages.aggregate(
"test:project:exists", "test.package",
"test:project2:exists", "test.pkg",
)
self.assertEqual(len(put_called), old_len + 1)
| 39.046316
| 192
| 0.498086
|
6b71abf068fe6022ea118032c97e0789f39a7f0a
| 4,901
|
py
|
Python
|
external-deps/spyder-kernels/spyder_kernels/customize/umr.py
|
Earthman100/spyder
|
949ce0f9100a69504c70a5678e8589a05aee7d38
|
[
"MIT"
] | 7,956
|
2015-02-17T01:19:09.000Z
|
2022-03-31T21:52:15.000Z
|
external-deps/spyder-kernels/spyder_kernels/customize/umr.py
|
Earthman100/spyder
|
949ce0f9100a69504c70a5678e8589a05aee7d38
|
[
"MIT"
] | 16,326
|
2015-02-16T23:15:21.000Z
|
2022-03-31T23:34:34.000Z
|
external-deps/spyder-kernels/spyder_kernels/customize/umr.py
|
Earthman100/spyder
|
949ce0f9100a69504c70a5678e8589a05aee7d38
|
[
"MIT"
] | 1,918
|
2015-02-20T19:26:26.000Z
|
2022-03-31T19:03:25.000Z
|
# Copyright (c) 2009- Spyder Kernels Contributors
#
# Licensed under the terms of the MIT License
# (see spyder_kernels/__init__.py for details)
"""User module reloader."""
import os
import sys
from spyder_kernels.customize.utils import path_is_library
from spyder_kernels.py3compat import PY2, _print
class UserModuleReloader(object):
"""
User Module Reloader (UMR) aims at deleting user modules
to force Python to deeply reload them during import
pathlist [list]: blacklist in terms of module path
namelist [list]: blacklist in terms of module name
"""
def __init__(self, namelist=None, pathlist=None):
if namelist is None:
namelist = []
else:
try:
namelist = namelist.split(',')
except Exception:
namelist = []
# Spyder modules
spy_modules = ['spyder_kernels']
# Matplotlib modules
mpl_modules = ['matplotlib', 'tkinter', 'Tkinter']
# Add other, necessary modules to the UMR blacklist
# astropy: See spyder-ide/spyder#6962
# pytorch: See spyder-ide/spyder#7041
# fastmat: See spyder-ide/spyder#7190
# pythoncom: See spyder-ide/spyder#7190
# tensorflow: See spyder-ide/spyder#8697
other_modules = ['pytorch', 'pythoncom', 'tensorflow']
if PY2:
py2_modules = ['astropy', 'fastmat']
other_modules = other_modules + py2_modules
self.namelist = namelist + spy_modules + mpl_modules + other_modules
self.pathlist = pathlist
# List of previously loaded modules
self.previous_modules = list(sys.modules.keys())
# List of module names to reload
self.modnames_to_reload = []
# Activate Cython support
self.has_cython = False
self.activate_cython()
# Check if the UMR is enabled or not
enabled = os.environ.get("SPY_UMR_ENABLED", "")
self.enabled = enabled.lower() == "true"
# Check if the UMR should print the list of reloaded modules or not
verbose = os.environ.get("SPY_UMR_VERBOSE", "")
self.verbose = verbose.lower() == "true"
def is_module_reloadable(self, module, modname):
"""Decide if a module is reloadable or not."""
if self.has_cython:
# Don't return cached inline compiled .PYX files
return False
else:
if (path_is_library(getattr(module, '__file__', None),
self.pathlist) or
self.is_module_in_namelist(modname)):
return False
else:
return True
def is_module_in_namelist(self, modname):
"""Decide if a module can be reloaded or not according to its name."""
return set(modname.split('.')) & set(self.namelist)
def activate_cython(self):
"""
Activate Cython support.
We need to run this here because if the support is
active, we don't to run the UMR at all.
"""
run_cython = os.environ.get("SPY_RUN_CYTHON") == "True"
if run_cython:
try:
__import__('Cython')
self.has_cython = True
except Exception:
pass
if self.has_cython:
# Import pyximport to enable Cython files support for
# import statement
import pyximport
pyx_setup_args = {}
# Add Numpy include dir to pyximport/distutils
try:
import numpy
pyx_setup_args['include_dirs'] = numpy.get_include()
except Exception:
pass
# Setup pyximport and enable Cython files reload
pyximport.install(setup_args=pyx_setup_args,
reload_support=True)
def run(self):
"""
Delete user modules to force Python to deeply reload them
Do not del modules which are considered as system modules, i.e.
modules installed in subdirectories of Python interpreter's binary
Do not del C modules
"""
self.modnames_to_reload = []
for modname, module in list(sys.modules.items()):
if modname not in self.previous_modules:
# Decide if a module can be reloaded or not
if self.is_module_reloadable(module, modname):
self.modnames_to_reload.append(modname)
del sys.modules[modname]
else:
continue
# Report reloaded modules
if self.verbose and self.modnames_to_reload:
modnames = self.modnames_to_reload
_print("\x1b[4;33m%s\x1b[24m%s\x1b[0m"
% ("Reloaded modules", ": "+", ".join(modnames)))
| 34.272727
| 78
| 0.582942
|
e4a27f995ac49ea85b027d0747f65ced66639cb8
| 263
|
py
|
Python
|
myapp/models.py
|
purveshmakode24/django-react
|
4e75e73646c7debe18b85422c8db6d148d1390c7
|
[
"MIT"
] | 1
|
2020-06-02T19:23:59.000Z
|
2020-06-02T19:23:59.000Z
|
myapp/models.py
|
purveshmakode24/django-react
|
4e75e73646c7debe18b85422c8db6d148d1390c7
|
[
"MIT"
] | 3
|
2021-03-30T13:08:41.000Z
|
2021-09-22T18:56:16.000Z
|
myapp/models.py
|
purveshmakode24/django-react
|
4e75e73646c7debe18b85422c8db6d148d1390c7
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class Post(models.Model):
name = models.CharField(max_length=100)
email = models.EmailField()
message = models.CharField(max_length=300)
created_at = models.DateTimeField(auto_now_add=True)
| 29.222222
| 56
| 0.749049
|
866118ea8c8a639f4d41f9fa1c66cc8f77cf8e29
| 27
|
py
|
Python
|
test_python_import_issue/pacx/j.py
|
zengmeng1094/test-python
|
79aa30789c2bb8700f660a4d6b13f06960e169e5
|
[
"MIT"
] | null | null | null |
test_python_import_issue/pacx/j.py
|
zengmeng1094/test-python
|
79aa30789c2bb8700f660a4d6b13f06960e169e5
|
[
"MIT"
] | null | null | null |
test_python_import_issue/pacx/j.py
|
zengmeng1094/test-python
|
79aa30789c2bb8700f660a4d6b13f06960e169e5
|
[
"MIT"
] | null | null | null |
def add():
print('add')
| 13.5
| 16
| 0.518519
|
e0041febc8bb3f9a03b4863266a8bbf3bf713fc3
| 2,501
|
py
|
Python
|
dateparser/data/date_translation_data/bem.py
|
bazingarj/dateparser
|
48c4563fb7f6ce685fbd6d27e9e83257521d2203
|
[
"BSD-3-Clause"
] | 8
|
2019-11-15T21:00:15.000Z
|
2021-12-21T22:09:42.000Z
|
dateparser/data/date_translation_data/bem.py
|
bazingarj/dateparser
|
48c4563fb7f6ce685fbd6d27e9e83257521d2203
|
[
"BSD-3-Clause"
] | 9
|
2020-06-05T21:28:57.000Z
|
2022-02-12T12:30:39.000Z
|
dateparser/data/date_translation_data/bem.py
|
bazingarj/dateparser
|
48c4563fb7f6ce685fbd6d27e9e83257521d2203
|
[
"BSD-3-Clause"
] | 21
|
2019-03-11T04:25:23.000Z
|
2022-02-03T08:54:33.000Z
|
# -*- coding: utf-8 -*-
info = {
"name": "bem",
"date_order": "DMY",
"january": [
"januari",
"jan"
],
"february": [
"februari",
"feb"
],
"march": [
"machi",
"mac"
],
"april": [
"epreo",
"epr"
],
"may": [
"mei"
],
"june": [
"juni",
"jun"
],
"july": [
"julai",
"jul"
],
"august": [
"ogasti",
"oga"
],
"september": [
"septemba",
"sep"
],
"october": [
"oktoba",
"okt"
],
"november": [
"novemba",
"nov"
],
"december": [
"disemba",
"dis"
],
"monday": [
"palichimo"
],
"tuesday": [
"palichibuli"
],
"wednesday": [
"palichitatu"
],
"thursday": [
"palichine"
],
"friday": [
"palichisano"
],
"saturday": [
"pachibelushi"
],
"sunday": [
"pa mulungu"
],
"am": [
"uluchelo"
],
"pm": [
"akasuba"
],
"year": [
"umwaka"
],
"month": [
"umweshi"
],
"week": [
"umulungu"
],
"day": [
"ubushiku"
],
"hour": [
"insa"
],
"minute": [
"mineti"
],
"second": [
"sekondi"
],
"relative-type": {
"1 year ago": [
"last year"
],
"0 year ago": [
"this year"
],
"in 1 year": [
"next year"
],
"1 month ago": [
"last month"
],
"0 month ago": [
"this month"
],
"in 1 month": [
"next month"
],
"1 week ago": [
"last week"
],
"0 week ago": [
"this week"
],
"in 1 week": [
"next week"
],
"1 day ago": [
"yesterday"
],
"0 day ago": [
"lelo"
],
"in 1 day": [
"tomorrow"
],
"0 hour ago": [
"this hour"
],
"0 minute ago": [
"this minute"
],
"0 second ago": [
"now"
]
},
"locale_specific": {},
"skip": [
" ",
".",
",",
";",
"-",
"/",
"'",
"|",
"@",
"[",
"]",
","
]
}
| 15.438272
| 26
| 0.279488
|
902ffcf72797c47cda953d92bb77b974566d2344
| 11,550
|
py
|
Python
|
maple_debugger/m_bp_cmds.py
|
lvyitian/maple_engine
|
d423c2b205bcc6d977bd91689493dc7237258fae
|
[
"MulanPSL-1.0"
] | null | null | null |
maple_debugger/m_bp_cmds.py
|
lvyitian/maple_engine
|
d423c2b205bcc6d977bd91689493dc7237258fae
|
[
"MulanPSL-1.0"
] | null | null | null |
maple_debugger/m_bp_cmds.py
|
lvyitian/maple_engine
|
d423c2b205bcc6d977bd91689493dc7237258fae
|
[
"MulanPSL-1.0"
] | null | null | null |
#
# Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved.
#
# Licensed under the Mulan Permissive Software License v2.
# You can use this software according to the terms and conditions of the MulanPSL - 2.0.
# You may obtain a copy of MulanPSL - 2.0 at:
#
# https://opensource.org/licenses/MulanPSL-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the MulanPSL - 2.0 for more details.
#
import gdb
import m_block
import m_frame
import m_breakpoint
import m_symbol
import m_util
from m_util import MColors
from m_util import gdb_print
import m_debug
class MapleBreakpointCmd(gdb.Command):
"""Set and manage Maple breakpoints
mbreak <symbol>: Set a new Maple breakpoint at symbol
mbreak <symbol>: Set a new Maple breakpoint at symbol
mbreak -set <symbol>: Same to 'mbreak <symbol>'
mbreak -enable <symbol|index>: Enable an existing Maple breakpoint at symbol
mbreak -disable <symbol|index>: Disable an existing Maple breakpoint at symbol
mbreak -clear <symbol|index>: Delete an existing Maple breakpoint at symbol
mbreak -clearall : Delete all existing Maple breakpoints
mbreak -listall : List all existing Maple breakpoints
mbreak -ignore <symbol | index> <count>: Set ignore count for specified Maple breakpoints
"""
def __init__(self):
gdb.Command.__init__ (self,
"mbreak",
gdb.COMMAND_BREAKPOINTS,
gdb.COMPLETE_NONE)
"""
mbp_table:
mbp_table is a runtime table mbreak command keeps.
mbp_table is a dict, key of mbp_table item is the symbol.
value of each item in mbp_table is also a dict.
item dict defined as
{
'count' : int, a count down number of a Maple symbol to be ignored
'disabled': True|False,
'object' : Instance object of class MapleBreakpoint,
'address' : Breakpoint reported by 'info b' command + 0x4
}
"""
self.mbp_object = None
self.mbp_id = None
self.symbol_index = 1
self.initialized_gdb_bp = False
# create alias mb to mbreak
m_util.gdb_exec('alias mb = mbreak')
def init_gdb_breakpoint(self):
# create a gdb.Breakpoint object
self.mbp_object = m_breakpoint.MapleBreakpoint()
self.mbp_id = m_breakpoint.get_mbp_id()
def invoke(self, args, from_tty):
self.mbp_func(args, from_tty)
def usage(self):
gdb_print(" mbreak <symbol>: Sets a new Maple breakpoint at symbol\n"
" mbreak -set <symbol>: An alias for 'mbreak <symbol>'\n"
" mbreak -enable <symbol|index>: Enables an existing Maple breakpoint at symbol\n"
" mbreak -disable <symbol|index>: Disables an existing Maple breakpoint at symbol\n"
" mbreak -clear <symbol|index>: Deletes an existing Maple breakpoint at symbol\n"
" mbreak -clearall : Deletes all existing Maple breakpoints\n"
" mbreak -listall : Lists all existing Maple breakpoints\n"
" mbreak -ignore <symbol | index> <count> : Sets ignore count for specified Maple breakpoints\n"
" mbreak : Displays usage and syntax")
def mbp_func(self, args, from_tty):
'''
mbreak cmd syntax:
# mbreak <symbol>
# mbreak -set <symbol>
# mbreak -disable <symbol | index>
# mbreak -enable <symbol | index>
# mbreak -clear <symbol | index>
# mbreak -clearall
# mbreak -listall
# mbreak -ignore <symbol | index> <count>
'''
s = str(args)
if len(s) is 0: #nothing specified
self.usage()
return
x = s.replace('_mirbin_info','').replace('_mirbin_code', '').split()
if len(x) == 1: #symbol or clearall or listall
if x[0] == '-clearall':
self.clearall_breakpoint()
elif x[0] == '-listall' or x[0] == '-list' or x[0] == '-ls':
self.listall_breakpoint()
elif x[0] == '-debug' :
self.debug()
elif x[0] == '-restart' :
self.clearup_bp_symbol_data()
elif x[0][0] != '-' : # a symbol is passed in
self.set_breakpoint(x[0])
else:
self.usage()
return
elif len(x) is 2: # disable/enable/clear/set
if x[0] == '-disable':
self.disable_breakpoint(x[1])
elif x[0] == '-enable':
self.enable_breakpoint(x[1])
elif x[0] == '-clear':
self.clear_breakpoint(x[1])
elif x[0] == '-set':
self.set_breakpoint(x[1])
else:
self.usage()
return
elif len(x) is 3: # ignore
if x[0] == '-ignore':
self.ignore_breakpoint(x[1],x[2])
else:
self.usage()
return
else:
self.usage()
return
def set_breakpoint(self, symbol):
buf = "set breakpoint " + str(symbol)
gdb_print(buf)
mbp_exist, mbp_id = m_breakpoint.is_mbp_existed()
if mbp_exist:
if mbp_id != self.mbp_id:
gdb_print("There are one or more breakpints already created at maple::maple_invoke_method.")
gdb_print("In order to use mbreakpoint command, please delete those breakpoints first")
gdb_print("")
return
if self.initialized_gdb_bp == False:
self.init_gdb_breakpoint()
self.initialized_gdb_bp = True
if symbol in self.mbp_object.mbp_table:
self.mbp_object.mbp_table[symbol]['disabled'] = False
else:
self.mbp_object.mbp_table[symbol] = {}
self.mbp_object.mbp_table[symbol]['disabled'] = False
self.mbp_object.mbp_table[symbol]['hit_count'] = 0
self.mbp_object.mbp_table[symbol]['ignore_count'] = 0
self.mbp_object.mbp_table[symbol]['index'] = self.symbol_index
# NOTE!!! symbol we pass in here is NOT a mirbin_info symbol.
addr = m_symbol.get_symbol_address(symbol)
if not addr:
self.mbp_object.mbp_table[symbol]['address'] = 0
self.mbp_object.mbp_table[symbol]['hex_addr'] = None
else:
self.mbp_object.mbp_table[symbol]['address'] = int(addr, 16)
self.mbp_object.mbp_table[symbol]['hex_addr'] = addr
self.mbp_object.add_known_addr_symbol_into_addr_sym_table(symbol)
self.symbol_index += 1
self.mbp_object.update_mbp()
def disable_breakpoint(self, s):
if not self.mbp_object:
return
buf = "disable breakpoint " + str(s)
gdb_print(buf)
if s.isdigit():
symbol = self.lookup_symbol_by_index(int(s))
else:
symbol = s
if symbol is None:
buf = "no symbol is found for index " + str(s)
gdb_print(buf)
return
if symbol in self.mbp_object.mbp_table:
self.mbp_object.mbp_table[symbol]['disabled'] = True
else:
buf = "disable symbol: " + str(symbol) + " not found"
gdb_print(buf)
self.mbp_object.update_mbp()
def enable_breakpoint(self, s):
if not self.mbp_object:
return
buf = "enable breakpoint " + str(s)
gdb_print(buf)
if s.isdigit():
symbol = self.lookup_symbol_by_index(int(s))
else:
symbol = s
if symbol is None:
buf = "no symbol is found for index " + str(s)
gdb_print(buf)
return
if symbol in self.mbp_object.mbp_table:
self.mbp_object.mbp_table[symbol]['disabled'] = False
else:
buf = "enable symbol: " + str(symbol) + " not found"
gdb_print(buf)
self.mbp_object.update_mbp()
def clear_breakpoint(self, s):
if not self.mbp_object:
return
buf = "clear breakpoint " + str(s)
gdb_print(buf)
if s.isdigit():
symbol = self.lookup_symbol_by_index(int(s))
else:
symbol = s
if symbol is None:
buf = "no symbol is found for index " + str(s)
gdb_print(buf)
return
if symbol in self.mbp_object.mbp_table:
self.mbp_object.clear_one_symbol(symbol)
else:
buf = "clear symbol: " + str(symbol) + " not found"
gdb_print(buf)
self.mbp_object.update_mbp()
def ignore_breakpoint(self, s, c):
if not self.mbp_object:
return
buf = "ignore breakpoint " + str(s) + ' ' + str(c)
gdb_print(buf)
if not c.isdigit():
gdb_print ("ignore count must be a number")
return
if s.isdigit():
symbol = self.lookup_symbol_by_index(int(s))
else:
symbol = s
if symbol is None:
buf = "no symbol is found for index " + str(s)
gdb_print(buf)
return
count = int(c)
if count < 0:
count = 0
self.mbp_object.mbp_table[symbol]['ignore_count'] = count
def clearall_breakpoint(self):
if not self.mbp_object:
return
gdb_print ("clear all breakpoint")
self.mbp_object.clear_all_symbol()
self.mbp_object.update_mbp()
def listall_breakpoint(self):
if not self.mbp_object:
return
gdb_print ("list all Maple breakpoints")
# sort the dict with the index, so that we can display in index order
blist = [{k:v} for k, v in sorted(self.mbp_object.mbp_table.items(), key=(lambda x:x[1]['index']))]
bp_info = self.mbp_object.bp_info if self.mbp_object.bp_info else "in maple::maple_invoke_method()"
bp_addr = hex(self.mbp_object.bp_addr) if self.mbp_object.bp_addr else "pending address"
for v in blist:
key = [*v][0]
state = MColors.BP_STATE_RD + 'disabled' + MColors.ENDC if v[key]['disabled'] else MColors.BP_STATE_GR + 'enabled' + MColors.ENDC
gdb_print('#%i %s %s %s %d %s %d at %s %s' % \
(v[key]['index'], m_util.color_symbol(MColors.BP_SYMBOL, key), state, MColors.BP_ATTR + 'hit_count' + MColors.ENDC, v[key]['hit_count'],\
MColors.BP_ATTR + 'ignore_count' + MColors.ENDC, v[key]['ignore_count'],\
MColors.BP_ADDR + bp_addr + MColors.ENDC, bp_info))
def debug(self):
if not self.mbp_object:
return
gdb_print(" ======= Maple breakpoint mbp table: ===========")
self.mbp_object.display_mbp_table()
gdb_print(" ======= Maple breakpoint addr_sym_table: ===========")
self.mbp_object.display_mbp_addr_sym_table()
def lookup_symbol_by_index(self,index):
for k,v in self.mbp_object.mbp_table.items():
if v['index'] == index:
return k
return None
def clearup_bp_symbol_data(self):
if self.mbp_object:
self.mbp_object.clearup_mbp_symbol_related_data()
| 37.62215
| 157
| 0.574026
|
a828be016c49fb394892ae757da10ccf856de148
| 9,523
|
py
|
Python
|
code/utils/config.py
|
btaille/sincere
|
dd1c34916ddcdc5ceb2799d64b17e80cdf1a5b31
|
[
"Apache-2.0"
] | 12
|
2020-10-22T13:04:36.000Z
|
2021-12-30T10:34:28.000Z
|
code/utils/config.py
|
btaille/sincere
|
dd1c34916ddcdc5ceb2799d64b17e80cdf1a5b31
|
[
"Apache-2.0"
] | null | null | null |
code/utils/config.py
|
btaille/sincere
|
dd1c34916ddcdc5ceb2799d64b17e80cdf1a5b31
|
[
"Apache-2.0"
] | 2
|
2020-11-27T12:23:22.000Z
|
2021-11-16T09:17:06.000Z
|
import os
import torch
import json
import argparse
from global_vars import RUN_DIR
from global_vars import EMBEDDINGS_DIR
def config_from_args(arg_list=None):
""" Argument Parser """
parser = argparse.ArgumentParser()
# Parameter in json to reload an already define config
parser.add_argument("-pf", "--parameter_file", help="parameter json file", default=None)
# From command_line
# Training hyperparameters
parser.add_argument("-ds", "--dataset", help="dataset", default="conll04")
parser.add_argument("-t", "--tasks", help="tasks", nargs="+", default=["ner", "re"])
parser.add_argument("-m", "--train_mode", help="'train' or 'train+dev'", default="train")
parser.add_argument("-s", "--seed", type=int, help="torch manual random seed", default=0)
parser.add_argument("-ep", "--epochs", type=int, help="max number of epochs", default=100)
parser.add_argument("-p", "--patience", type=int, help="patience", default=5)
parser.add_argument("-min", "--min_epochs", type=int, help="min number of epochs", default=10)
parser.add_argument("-tb", "--tensorboard", type=int, help="whether to log a tensorboard summary", default=1)
parser.add_argument("-lr", "--learning_rate", type=float, help="learning rate", default=1e-3)
parser.add_argument("-bs", "--batch_size", type=int, help="batch size", default=1)
parser.add_argument("-d", "--dropout", type=float, help="dropout", default=0.1)
parser.add_argument("-dev", "--device", help="pytorch device", default="cuda")
# Model Architecture
parser.add_argument("-emb", "--embedder", help="embedder list", nargs="+", default=["bert-base"])
parser.add_argument("-enc", "--encoder", help="encoder", default=None)
parser.add_argument("-ner_dec", "--ner_decoder", help="ner decoder", default="iobes")
#### Embedders
# Char Embedder
parser.add_argument("-ce", "--char_dim", type=int, help="dimension of char embeddings", default=100)
parser.add_argument("-ch", "--char_hidden", type=int, help="dimension of char hidden layer", default=25)
parser.add_argument("-cp", "--char_pool", help="pooling for the char-level encoder", default="last")
# Word Embedder
parser.add_argument("-wp", "--word_path", help="path of pretrained word embeddings",
default=EMBEDDINGS_DIR + "glove.840B/glove.840B.300d.txt")
parser.add_argument("-we", "--word_dim", type=int, help="dimension of word embeddings", default=300)
parser.add_argument("-wd", "--word_dropout", type=float, help="word_dropout", default=0.1)
parser.add_argument("-f", "--freeze", type=int, help="freeze embeddings", default=0)
# BERT Embedder
parser.add_argument("-bft", "--bert_finetune", help="finetune BERT", type=int, default=1)
parser.add_argument("-bpool", "--bert_pool", help="pooling of subwords", default="max")
#### BiLSTM Encoder
parser.add_argument("-lh", "--bilstm_hidden", type=int, help="dimension of bilstm hidden layer", default=384)
parser.add_argument("-ll", "--bilstm_layers", type=int, help="num bilstm layers", default=1)
### NER Decoder
parser.add_argument("-ner_max_span", "--ner_max_span", type=int, help="Max considered span length in Span NER",
default=10)
parser.add_argument("-ner_ns", "--ner_negative_sampling", type=int, help="Negative sampling in Span NER",
default=100)
parser.add_argument("-ner_span_emb", "--ner_span_embedding_dim", type=int,
help="Span length embedding dim in Span NER", default=25)
parser.add_argument("-pool", "--pool_fn", help="Pooling function for Span Representations and Context",
default="max")
#### RE Decoder
parser.add_argument("-re_ns", "--re_negative_sampling", type=int, help="Negative sampling in Span RE",
default=100)
parser.add_argument("-re_biaffine", "--re_biaffine", type=int, help="Add bilinear term in RE", default=0)
parser.add_argument("-re_context", "--re_context", type=int, help="Use middle context max pooling in RE", default=1)
### Joint decoding
parser.add_argument("-crt", "--criterion", help="early stopping criterion", default="re")
### Run dir Prefix and Suffix
parser.add_argument("-pfx", "--prefix", help="add a prefix to run dir", default="")
parser.add_argument("-sfx", "--suffix", help="add a suffix to run dir", default="")
""" Convert arg_list into dictionary """
if arg_list is None:
args = parser.parse_args()
else:
args = parser.parse_args(arg_list)
if args.parameter_file is not None:
return Config.from_saved(args.parameter_file)
else:
return Config(args)
class Config:
def __init__(self, args):
if args is not None:
self.dataset = args.dataset
self.embedder = args.embedder
self.encoder = args.encoder
self.ner_decoder = args.ner_decoder
self.seed = args.seed
self.epochs = args.epochs
self.patience = args.patience
self.min_epochs = args.min_epochs
self.train_mode = args.train_mode
self.tasks = args.tasks
self.tensorboard = args.tensorboard
self.learning_rate = args.learning_rate
self.batch_size = args.batch_size
self.dropout = args.dropout
self.device = args.device
# Embedders
if "char" in self.embedder:
self.char_embedding_dim = args.char_dim
self.char_hidden_dim = args.char_hidden
self.char_pool = args.char_pool
if "word" in self.embedder:
self.word_embedding_path = args.word_path
self.word_embedding_dim = args.word_dim
self.word_dropout = args.word_dropout
self.word_freeze = args.freeze
if "bert-base" in self.embedder or "bert-large" in self.embedder:
self.bert_finetune = args.bert_finetune
self.bert_pool = args.bert_pool
# Encoders
if self.encoder == "bilstm":
self.bilstm_layers = args.bilstm_layers
self.bilstm_hidden = args.bilstm_hidden
self.pool_fn = args.pool_fn
# NER Decoder
if self.ner_decoder == "span":
self.ner_negative_sampling = args.ner_negative_sampling
self.ner_max_span = args.ner_max_span
self.ner_span_emb = args.ner_span_embedding_dim
# RE Decoder
if "re" in self.tasks:
self.re_biaffine = args.re_biaffine
self.re_context = args.re_context
self.re_negative_sampling = args.re_negative_sampling
# Joint Decoder
self.criterion = args.criterion
assert self.criterion in self.tasks
# RUN DIR
self.run_dir = format_run_dir(self, args.prefix, args.suffix)
# Check validity of config
check_config(self)
# Dump config to json
if not os.path.exists(self.run_dir):
os.makedirs(self.run_dir)
self.to_json(os.path.join(self.run_dir, "config.json"))
def to_json(self, json_path):
with open(json_path, "w") as file:
json.dump(self.__dict__, file)
@classmethod
def from_json(cls, json_path):
config = cls(None)
with open(json_path, "r") as file:
config.__dict__ = json.load_file
def format_run_dir(config, prefix="", suffix=""):
# Format run directory
if not "run_dir" in config.__dict__:
run_dir = f"{config.dataset}_{'-'.join(sorted(config.embedder))}"
if config.encoder == "bilstm":
run_dir += f"_bilstm-{config.bilstm_layers}-{config.bilstm_hidden}"
if "ner" in config.tasks:
run_dir += f"_ner-{config.ner_decoder}"
if "re" in config.tasks:
run_dir += "_re"
if config.re_biaffine:
run_dir += "-biaff"
if config.re_context:
run_dir += "-ctxt"
run_dir += f"_lr-{config.learning_rate}_bs-{config.batch_size}_d-{config.dropout}"
if config.device == "cpu":
run_dir += "_cpu"
if config.criterion != "re" and len(config.tasks) >= 2:
run_dir += f"_crt-{config.criterion}"
if len(prefix):
run_dir = f"{prefix}_" + run_dir
if len(suffix):
run_dir += f"_{suffix}"
run_dir += f"/seed_{config.seed}/"
if config.train_mode == "train+dev":
run_dir += "train+dev/"
return os.path.join(RUN_DIR, run_dir)
def check_config(config):
"""Assert parameters are a valid set and format training folder."""
# Check config
assert config.dataset in ["conll04", "ace05"]
assert config.train_mode in ["train", "train+dev"]
for emb in config.embedder:
assert emb in ["word", "char", "bert-base", "bert-large"], emb
if "char" in config.embedder:
assert config.char_pool in ["last", "avg", "max"]
if config.encoder is not None:
assert config.encoder == "bilstm"
for task in config.tasks:
assert task in ["ner", "re"]
assert config.ner_decoder in ["iobes", "span"]
if "cuda" in config.device:
assert torch.cuda.is_available(), "CUDA not available"
| 38.711382
| 120
| 0.618503
|
418b2826a094e669a767200d31a70e96d8d26d65
| 2,332
|
py
|
Python
|
src/robot/htmldata/testdata/create_jsdata.py
|
userzimmermann/robotframework
|
7aa16338ce2120cb082605cf548c0794956ec901
|
[
"Apache-2.0"
] | 7
|
2015-02-25T10:55:02.000Z
|
2015-11-04T03:20:05.000Z
|
src/robot/htmldata/testdata/create_jsdata.py
|
userzimmermann/robotframework
|
7aa16338ce2120cb082605cf548c0794956ec901
|
[
"Apache-2.0"
] | 12
|
2015-02-24T17:00:06.000Z
|
2015-07-31T08:32:07.000Z
|
src/robot/htmldata/testdata/create_jsdata.py
|
userzimmermann/robotframework
|
7aa16338ce2120cb082605cf548c0794956ec901
|
[
"Apache-2.0"
] | 2
|
2015-12-15T11:00:35.000Z
|
2018-02-24T18:11:24.000Z
|
#!/usr/bin/env python
from __future__ import print_function
from os.path import abspath, dirname, normpath, join
import os
import sys
BASEDIR = dirname(abspath(__file__))
LOG = normpath(join(BASEDIR, '..', 'log.html'))
TESTDATA = join(BASEDIR, 'dir.suite')
OUTPUT = join(BASEDIR, 'output.xml')
TARGET = join(BASEDIR, 'data.js')
SRC = normpath(join(BASEDIR, '..', '..', '..'))
sys.path.insert(0, SRC)
import robot
from robot.conf.settings import RebotSettings
from robot.reporting.resultwriter import Results
from robot.reporting.jswriter import JsResultWriter
def run_robot(testdata, outxml):
robot.run(testdata, loglevel='DEBUG', log='NONE', report='NONE', output=outxml)
def create_jsdata(outxml, target):
settings = RebotSettings({
'name': '<Suite.Name>',
'critical': ['i?'],
'noncritical': ['*kek*kone*'],
'tagstatlink': ['force:http://google.com:<kuukkeli>',
'i*:http://%1/?foo=bar&zap=%1:Title of i%1',
'?1:http://%1/<&>:Title',
'</script>:<url>:<title>'],
'tagdoc': ['test:this_is_*my_bold*_test',
'IX:*Combined* and escaped << tag doc',
'i*:Me, myself, and I.',
'</script>:<doc>'],
'tagstatcombine': ['fooANDi*:No Match',
'long1ORcollections',
'i?:IX',
'<*>:<any>']
})
result = Results(settings, outxml).js_result
config = {'logURL': 'log.html',
'title': 'This is a long long title. A very long title indeed. '
'And it even contains some stuff to <esc&ape>. '
'Yet it should still look good.',
'minLevel': 'DEBUG',
'defaultLevel': 'DEBUG',
'reportURL': 'report.html',
'background': {'fail': 'DeepPink'}}
with open(target, 'wb') as output:
writer = JsResultWriter(output, start_block='', end_block='')
writer.write(result, config)
print 'Log: ', normpath(join(BASEDIR, '..', 'rebot', 'log.html'))
print 'Report: ', normpath(join(BASEDIR, '..', 'rebot', 'report.html'))
if __name__ == '__main__':
run_robot(TESTDATA, OUTPUT)
create_jsdata(OUTPUT, TARGET)
os.remove(OUTPUT)
| 35.876923
| 83
| 0.55789
|
3d4bbb5b1e72fe6ae57a3b8ad643dbccdf5ab233
| 23,702
|
py
|
Python
|
docmaker/_version.py
|
HurricaneLabs/docmaker
|
4c1ce0c967ddca0a6113225c2041cfe0770bc93e
|
[
"MIT"
] | null | null | null |
docmaker/_version.py
|
HurricaneLabs/docmaker
|
4c1ce0c967ddca0a6113225c2041cfe0770bc93e
|
[
"MIT"
] | null | null | null |
docmaker/_version.py
|
HurricaneLabs/docmaker
|
4c1ce0c967ddca0a6113225c2041cfe0770bc93e
|
[
"MIT"
] | null | null | null |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.22 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
from typing import Callable, Dict
import functools
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "docmaker/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, Callable]] = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
popen_kwargs = {}
if sys.platform == "win32":
# This hides the console window if pythonw.exe is used
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
popen_kwargs["startupinfo"] = startupinfo
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen([command] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None), **popen_kwargs)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, process.returncode
return stdout, process.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r'\d', r):
continue
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# GIT_DIR can interfere with correct operation of Versioneer.
# It may be intended to be passed to the Versioneer-versioned project,
# but that should not change where we get our version from.
env = os.environ.copy()
env.pop("GIT_DIR", None)
runner = functools.partial(runner, env=env)
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
MATCH_ARGS = ["--match", "%s*" % tag_prefix] if tag_prefix else []
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(GITS, ["describe", "--tags", "--dirty",
"--always", "--long", *MATCH_ARGS],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def pep440_split_post(ver):
"""Split pep440 version string at the post-release segment.
Returns the release segments before the post-release and the
post-release version number (or -1 if no post-release segment is present).
"""
vc = str.split(ver, ".post")
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
def render_pep440_pre(pieces):
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
rendered += ".post%d.dev%d" % (post_version+1, pieces["distance"])
else:
rendered += ".post0.dev%d" % (pieces["distance"])
else:
# no commits, use the tag as the version
rendered = pieces["closest-tag"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_post_branch(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| 35.966616
| 84
| 0.586026
|
5586452b459eecf3a6cbdeda8aef62b421379512
| 1,732
|
py
|
Python
|
web/service/github/api/v3/miscellaneous/Licenses.py
|
trysrv/GitHub.Upload.UserRegister.Update.SshKey.201704081527
|
b0a40f95b39ea2bb243715702eecbe5d04d12e99
|
[
"CC0-1.0"
] | null | null | null |
web/service/github/api/v3/miscellaneous/Licenses.py
|
trysrv/GitHub.Upload.UserRegister.Update.SshKey.201704081527
|
b0a40f95b39ea2bb243715702eecbe5d04d12e99
|
[
"CC0-1.0"
] | null | null | null |
web/service/github/api/v3/miscellaneous/Licenses.py
|
trysrv/GitHub.Upload.UserRegister.Update.SshKey.201704081527
|
b0a40f95b39ea2bb243715702eecbe5d04d12e99
|
[
"CC0-1.0"
] | null | null | null |
#!python3
#encoding:utf-8
import time
import pytz
import requests
import json
import datetime
class Licenses:
def __init__(self, reqp, response):
self.__reqp = reqp
self.__response = response
"""
全ライセンス情報を取得する。
使用してみると一部ライセンスしか取得できない。CC0は取得できなかった。
@return {array} ライセンス情報
"""
def GetLicenses(self):
licenses = []
url = 'https://api.github.com/licenses'
params = self.__reqp.get('GET', 'licenses')
params['headers']['Accept'] = 'application/vnd.github.drax-preview+json'
while (None is not url):
r = requests.get(url, headers=params['headers'])
licenses += self.__response.Get(r)
url = self.__response.Headers.Link.Next(r)
return licenses
"""
指定したライセンスの情報を取得する。
@param {string} keyはGitHubにおけるライセンスを指定するキー。
@return {dict} 結果(JSON)
"""
def GetLicense(self, key):
url = 'https://api.github.com/licenses/' + key
params = self.__reqp.get('GET', 'licenses/:license')
params['headers']['Accept'] = 'application/vnd.github.drax-preview+json'
r = requests.get(url, headers=params['headers'])
return self.__response.Get(r)
"""
リポジトリのライセンスを取得する。
@param {string} usernameはユーザ名
@param {string} repo_nameは対象リポジトリ名
@return {dict} 結果(JSON形式)
"""
def GetRepositoryLicense(self, username, repo_name):
url = 'https://api.github.com/repos/{0}/{1}'.format(username, repo_name)
params = self.__reqp.get('GET', 'repos/:owner/:repo')
params['headers']['Accept'] = 'application/vnd.github.drax-preview+json'
r = requests.get(url, headers=params['headers'])
return self.__response.Get(r)
| 32.074074
| 80
| 0.624711
|
bf0022dede92b488824831401ba8f6727d12acf4
| 408
|
py
|
Python
|
Configuration/Geometry/python/GeometryExtended2015FlatPlus05Percent_cff.py
|
gputtley/cmssw
|
c1ef8454804e4ebea8b65f59c4a952a6c94fde3b
|
[
"Apache-2.0"
] | 2
|
2020-05-09T16:03:43.000Z
|
2020-05-09T16:03:50.000Z
|
Configuration/Geometry/python/GeometryExtended2015FlatPlus05Percent_cff.py
|
gputtley/cmssw
|
c1ef8454804e4ebea8b65f59c4a952a6c94fde3b
|
[
"Apache-2.0"
] | 8
|
2020-03-20T23:18:36.000Z
|
2020-05-27T11:00:06.000Z
|
Configuration/Geometry/python/GeometryExtended2015FlatPlus05Percent_cff.py
|
gputtley/cmssw
|
c1ef8454804e4ebea8b65f59c4a952a6c94fde3b
|
[
"Apache-2.0"
] | 3
|
2017-06-07T15:22:28.000Z
|
2019-02-28T20:48:30.000Z
|
import FWCore.ParameterSet.Config as cms
#
# Geometry master configuration
#
# Ideal geometry, needed for simulation
from Geometry.CMSCommonData.cmsExtendedGeometry2015FlatPlus05PercentXML_cfi import *
from Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi import *
from Geometry.EcalCommonData.ecalSimulationParameters_cff import *
from Geometry.HcalCommonData.hcalDDDSimConstants_cff import *
| 37.090909
| 84
| 0.872549
|
38c7598218effc764bbbce6f722ef15e3debe03c
| 2,152
|
py
|
Python
|
HLS/nngen/tests/onnx_matrix_conv2d/test_onnx_matrix_conv2d_leaky_relu_int8_3x3_stride1.py
|
kmichikura/aiedge_contest
|
909d08049d81f9da56e6f0b92df68065ed8da19d
|
[
"MIT"
] | 7
|
2020-06-08T13:36:13.000Z
|
2021-12-24T06:55:30.000Z
|
HLS/nngen/tests/onnx_matrix_conv2d/test_onnx_matrix_conv2d_leaky_relu_int8_3x3_stride1.py
|
kmichikura/aiedge_contest
|
909d08049d81f9da56e6f0b92df68065ed8da19d
|
[
"MIT"
] | null | null | null |
HLS/nngen/tests/onnx_matrix_conv2d/test_onnx_matrix_conv2d_leaky_relu_int8_3x3_stride1.py
|
kmichikura/aiedge_contest
|
909d08049d81f9da56e6f0b92df68065ed8da19d
|
[
"MIT"
] | 1
|
2021-03-12T03:51:56.000Z
|
2021-03-12T03:51:56.000Z
|
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))))
import nngen as ng
import veriloggen
import onnx_matrix_conv2d
act_shape = (1, 7, 7, 3)
weight_shape = (9, 3, 3, 3)
act_dtype = ng.int8
weight_dtype = ng.int8
stride = 1
padding = 0
with_batchnorm = False
act_func = 'leaky_relu'
disable_fusion = False
par_ich = 1
par_och = 1
par_col = 1
par_row = 1
concur_och = None
stationary = 'filter'
chunk_size = 64
axi_datawidth = 32
def test(request, silent=True):
veriloggen.reset()
simtype = request.config.getoption('--sim')
rslt = onnx_matrix_conv2d.run(act_shape, weight_shape,
act_dtype, weight_dtype,
stride, padding,
with_batchnorm, act_func, disable_fusion,
par_ich, par_och, par_col, par_row,
concur_och, stationary,
chunk_size,
axi_datawidth, silent,
filename=None, simtype=simtype,
outputfile=os.path.splitext(os.path.basename(__file__))[0] + '.out')
verify_rslt = rslt.splitlines()[-1]
assert(verify_rslt == '# verify: PASSED')
if __name__ == '__main__':
rslt = onnx_matrix_conv2d.run(act_shape, weight_shape,
act_dtype, weight_dtype,
stride, padding,
with_batchnorm, act_func, disable_fusion,
par_ich, par_och, par_col, par_row,
concur_och, stationary,
chunk_size,
axi_datawidth, silent=False,
filename='tmp.v',
outputfile=os.path.splitext(os.path.basename(__file__))[0] + '.out')
print(rslt)
| 31.647059
| 102
| 0.538569
|
1c9b0124105916bff12afc7c9f8cf6840d38f1bf
| 686
|
py
|
Python
|
app/hooks/enforcement.py
|
BrettPowell/real-time-enforcer
|
a1f7a7436975c16e74e6b5396312271ccb3b8ec5
|
[
"Apache-2.0"
] | 11
|
2019-04-12T21:23:49.000Z
|
2020-09-02T11:16:49.000Z
|
app/hooks/enforcement.py
|
BrettPowell/real-time-enforcer
|
a1f7a7436975c16e74e6b5396312271ccb3b8ec5
|
[
"Apache-2.0"
] | 18
|
2019-04-09T16:23:03.000Z
|
2021-04-26T14:25:17.000Z
|
app/hooks/enforcement.py
|
forseti-security/forseti-policy-enforcer
|
11eca7e7012759be2730297ef362708695885da7
|
[
"Apache-2.0"
] | 11
|
2019-05-08T09:08:08.000Z
|
2021-04-26T19:23:24.000Z
|
# Copyright 2020 The Forseti Real Time Enforcer Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def process_enforcement_decision(decision, trigger):
pass
| 40.352941
| 77
| 0.771137
|
1add6757e659b7548c67df6b828cfcbb545d9e95
| 409
|
py
|
Python
|
mojezeby/asgi.py
|
pythonsway/mojezebypl-tools
|
710cc8ca704855b6ffbe8f7e8febac64a9d53708
|
[
"MIT"
] | null | null | null |
mojezeby/asgi.py
|
pythonsway/mojezebypl-tools
|
710cc8ca704855b6ffbe8f7e8febac64a9d53708
|
[
"MIT"
] | null | null | null |
mojezeby/asgi.py
|
pythonsway/mojezebypl-tools
|
710cc8ca704855b6ffbe8f7e8febac64a9d53708
|
[
"MIT"
] | null | null | null |
"""
ASGI config for mojezeby project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mojezeby.settings')
application = get_asgi_application()
| 24.058824
| 79
| 0.755501
|
f47ed9e53aae3fdc9ba0c72b4913df27eee8f784
| 385
|
py
|
Python
|
MOCC/src/configurations/Get-Descendant/src/DescendantHelper.py
|
arpitgogia/mars_city
|
30cacd80487a8c2354bbc15b4fad211ed1cb4f9d
|
[
"BSD-2-Clause-FreeBSD"
] | 25
|
2016-07-20T04:49:14.000Z
|
2021-08-25T09:05:04.000Z
|
MOCC/src/configurations/Get-Descendant/src/DescendantHelper.py
|
arpitgogia/mars_city
|
30cacd80487a8c2354bbc15b4fad211ed1cb4f9d
|
[
"BSD-2-Clause-FreeBSD"
] | 16
|
2016-12-27T08:30:27.000Z
|
2018-06-18T08:51:44.000Z
|
MOCC/src/configurations/Get-Descendant/src/DescendantHelper.py
|
arpitgogia/mars_city
|
30cacd80487a8c2354bbc15b4fad211ed1cb4f9d
|
[
"BSD-2-Clause-FreeBSD"
] | 49
|
2016-07-20T13:08:27.000Z
|
2020-06-02T18:26:12.000Z
|
#!/usr/bin/env python
from __future__ import division, print_function
def getDescendants(device_id, dev_names):
devices = []
for dev in dev_names:
if dev == device_id:
pass
else:
devices.append(dev)
descendants = []
for dev in devices:
if device_id in dev:
descendants.append(dev)
return descendants
| 19.25
| 47
| 0.605195
|
3491d64f19b951d9158137c64a61f17e3ff616ef
| 5,178
|
py
|
Python
|
lib/python3.8/site-packages/ansible_collections/community/network/plugins/modules/panos_set.py
|
cjsteel/python3-venv-ansible-2.10.5
|
c95395c4cae844dc66fddde9b4343966f4b2ecd5
|
[
"Apache-1.1"
] | null | null | null |
lib/python3.8/site-packages/ansible_collections/community/network/plugins/modules/panos_set.py
|
cjsteel/python3-venv-ansible-2.10.5
|
c95395c4cae844dc66fddde9b4343966f4b2ecd5
|
[
"Apache-1.1"
] | null | null | null |
lib/python3.8/site-packages/ansible_collections/community/network/plugins/modules/panos_set.py
|
cjsteel/python3-venv-ansible-2.10.5
|
c95395c4cae844dc66fddde9b4343966f4b2ecd5
|
[
"Apache-1.1"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2018, Jasper Mackenzie <jasper.mackenzie@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
---
module: panos_set
short_description: Execute arbitrary commands on a PAN-OS device using XPath and element
description:
- Run an arbitrary 'xapi' command taking an XPath (i.e get) or XPath and element (i.e set).
- See https://github.com/kevinsteves/pan-python/blob/master/doc/pan.xapi.rst for details
- Runs a 'set' command by default
- This should support _all_ commands that your PAN-OS device accepts vi it's cli
- cli commands are found as
- Once logged in issue 'debug cli on'
- Enter configuration mode by issuing 'configure'
- Enter your set (or other) command, for example 'set deviceconfig system timezone Australia/Melbourne'
- returns
- >
"<request cmd="set"
obj="/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system"
cookie=XXXX><timezone>Australia/Melbourne</timezone></request>
- The 'xpath' is "/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system"
- The 'element' is "<timezone>Australia/Melbourne</timezone>"
author: "Jasper Mackenzie (@spmp)"
deprecated:
alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead.
removed_in: 2.0.0 # was Ansible 2.12
why: Consolidating code base.
requirements:
- pan-python
options:
ip_address:
description:
- IP address or host FQDN of the target PAN-OS NVA
required: true
username:
description:
- User name for a user with admin rights on the PAN-OS NVA
default: admin
password:
description:
- Password for the given 'username'
required: true
command:
description:
- Xapi method name which supports 'xpath' or 'xpath' and 'element'
choices:
- set
- edit
- delete
- get
- show
- override
default: set
xpath:
description:
- The 'xpath' for the commands configurable
required: true
element:
description:
- The 'element' for the 'xpath' if required
extends_documentation_fragment:
- community.network.panos
'''
EXAMPLES = '''
- name: Set timezone on PA NVA
community.network.panos_set:
ip_address: "192.168.1.1"
username: "my-random-admin"
password: "admin1234"
xpath: "/config/devices/entry/deviceconfig/system"
element: "<timezone>Australia/Melbourne</timezone>"
- name: Commit configuration
panos_commit:
ip_address: "192.168.1.1"
username: "my-random-admin"
password: "admin1234"
'''
RETURN = '''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
try:
import pan.xapi
HAS_LIB = True
except ImportError:
HAS_LIB = False
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
username=dict(default='admin'),
command=dict(default='set', choices=['set', 'edit', 'delete', 'get', 'show', 'override']),
xpath=dict(required=True),
element=dict(default=None)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
xpath = module.params['xpath']
element = module.params['element']
xcommand = module.params['command']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password,
timeout=60
)
if element is None:
# Issue command with no `element`
try:
getattr(xapi, xcommand)(xpath=xpath)
except Exception as e:
raise Exception("Failed to run '%s' with xpath: '%s' with the following error: %s" %
(xcommand, xpath, e))
else:
# Issue command with `element`
try:
getattr(xapi, xcommand)(xpath=xpath, element=element)
except Exception as e:
raise Exception("Failed to run '%s' with xpath: '%s' and element '%s' with the following error: %s" %
(xcommand, xpath, element, e))
module.exit_json(
status="success"
)
if __name__ == '__main__':
main()
| 31.005988
| 113
| 0.675164
|
54b1c995455bd815ff9ed53723e6a62841ba2571
| 2,831
|
py
|
Python
|
code/prepare_for_lamda.py
|
liwenran/OpenCausal
|
0522b838562d65fdf3bf3ccdb6b7a986c7c675bb
|
[
"MIT"
] | 2
|
2020-10-12T08:55:53.000Z
|
2022-02-04T01:08:15.000Z
|
code/prepare_for_lamda.py
|
liwenran/OpenCausal
|
0522b838562d65fdf3bf3ccdb6b7a986c7c675bb
|
[
"MIT"
] | 1
|
2021-10-06T02:50:24.000Z
|
2021-10-06T02:50:24.000Z
|
code/prepare_for_lamda.py
|
liwenran/OpenCausal
|
0522b838562d65fdf3bf3ccdb6b7a986c7c675bb
|
[
"MIT"
] | 3
|
2020-05-09T02:43:52.000Z
|
2021-07-17T23:33:27.000Z
|
import os,sys,gzip
import numpy as np
def tissue_index_info():
donorsID = open('GTEx_WGS_635Ind_PASSQC_donorID.txt').readline().strip().split('\t')[9:]
file = open('donors_for_lamda.txt')
samplesID = {}
for line in file:
line = line.strip().split('\t')
samplesID[line[0]]=line[3]
file.close()
index = []
selected_donors_emum = []
num = 0
for ID in donorsID:
if ID in samplesID:
index.append(num)
selected_donors_emum.append(samplesID[ID])
num += 1
return index, selected_donors_emum
def vars_line_info(filename):
file = open(filename+'.lamda_donor_info')
vars_nline = {}
count = 0
for line in file:
line = line.strip().split('\t')
count += 1
vars_nline['\t'.join(line[:8])] = str(count)
file.close()
return vars_nline
def extract_vars_for_each_donor(filename):
file = open(filename+'.lamda_donor_info')
donor_vars_list = [[] for i in range(len(selected_donors_emum))]
for line in file:
line = line.strip().split('\t')
i = 0
for t in line[8:]:
if t!='0':
donor_vars_list[i].append(line[:8])
i += 1
return donor_vars_list
def get_index_from_vardict(filename):
donor_vars_list = extract_vars_for_each_donor(filename)
vars_nline = vars_line_info(filename)
donor_vars_nline = [{} for i in range(len(selected_donors_emum))]
for i in range(len(selected_donors_emum)):
file = open('var_binding_count.donor'+selected_donors_emum[i])
count = 0
nline = 0
print len(donor_vars_list[i])
for line in file:
line = line.strip().split('\t')
nline += 1
if line[:8] in donor_vars_list[i]:
count += 1
donor_vars_nline[i]['\t'.join(line[:8])] = str(nline)
if count==len(donor_vars_list[i]):
break
print i
fout = open('temp/'+filename+'.lamda_donorvar_index.donor'+selected_donors_emum[i],'w')
for var in donor_vars_nline[i]:
fout.write(var+'\t'+donor_vars_nline[i][var]+'\t'+vars_nline[var]+'\n')
fout.close()
return
def match_var_to_RE(filename):
file = open('RE_index_alldonors/'+filename+'.RE_index')
RE_count_dict = {}
count = 0
for line in file:
count += 1
line = line.strip().split('\t')
RE_count_dict['\t'.join(line[:3])] = str(count)
file.close()
file = open(filename+'.lamda_donor_info')
fout = open(filename+'.match_RE_var', 'w')
for line in file:
line = line.strip().split('\t')
fout.write('\t'.join(line[:8])+'\t'+RE_count_dict['\t'.join(line[:3])]+'\n')
file.close()
fout.close()
return
##
index,selected_donors_emum = tissue_index_info()
filelist = os.listdir('riskloci/') #given the filenames of riskloci
NUM = 635 #total number of donors
for i in range(NUM):
filename = filelist[i]
index,selected_donors_emum = tissue_index_info()
get_index_from_vardict(filename)
match_var_to_RE(filename)
print 'Done'
| 25.972477
| 92
| 0.671141
|
a8d6b2c5b63b6edaf946a68410789ea5a8695425
| 4,680
|
py
|
Python
|
lastfmtimeline/TimelineData.py
|
D3r3k23/LastFmTimeline
|
3843539c2787d5e85d8ac4509783e05bf19b9787
|
[
"MIT"
] | 2
|
2020-12-17T07:10:54.000Z
|
2022-02-10T04:42:21.000Z
|
lastfmtimeline/TimelineData.py
|
D3r3k23/LastFmTimeline
|
3843539c2787d5e85d8ac4509783e05bf19b9787
|
[
"MIT"
] | null | null | null |
lastfmtimeline/TimelineData.py
|
D3r3k23/LastFmTimeline
|
3843539c2787d5e85d8ac4509783e05bf19b9787
|
[
"MIT"
] | null | null | null |
"""
TimelineData.data:
{
item1: {
chart1: playcount
chart2: playcount
...
}
item2: {
chart1: playcount
chart2: playcount
...
}
...
}
TimelineData.ranking:
{
item1: {
chart1: rank
chart2: rank
...
}
item2: {
chart1: rank
chart2: rank
...
}
...
}
"""
from collections import namedtuple
import lastfmget
from Util import *
class TimelineData:
"""
Contains the data needed by LastFmTimeline to create a timeline
"""
def __init__(self, username, itemtype, numitems):
self.username = username
self.numitems = numitems
self.itemtype = itemtype
self.items = get_items(self.username, self.itemtype, self.numitems)
self.charts = get_target_charts(self.username)
self.init_data()
def init_data(self):
"""
Initialize self.data to 0
"""
self.data = {}
for item in self.items:
self.data[item] = { chart: 0 for chart in self.charts }
def load(self, mode):
"""
For each of the user's top $numitems items ($itemtype), store cumulative scrobbles
for each chart since the user's first scrobble.
"""
print('' * 40)
prevchart = None
for i, chart in enumerate(self.charts):
if prevchart is not None:
# Copy each item's playount from the previous chart
for item in self.data.values():
item[chart] = item[prevchart]
# Add new playcounts from weekly chart
weeklychart = get_user_weekly_item_chart(self.itemtype, self.username, chart.start, chart.end)
for item in weeklychart['chart']:
itemname = item['name']
if itemname in self.data:
self.data[itemname][chart] += item['playcount']
prevchart = chart
if i % (len(self.charts) / 40) == 0:
print(',', end='')
print()
print('' * 40)
if (mode is Mode.Rank):
self.convert_to_rank()
def convert_to_rank(self):
RankItem = namedtuple('RankItem', ['name', 'playcount'])
rankdata = { item: {} for item in self.data.keys() }
for chart in self.charts:
ranking = [ RankItem(itemdata[chart], itemname) for itemname, itemdata in self.data.items() ]
ranking.sort(key=lambda item: item.name)
ranking.sort(key=lambda item: item.playcount, reverse=True)
for rank, item in enumerate(ranking, 1):
rankdata[item.name][chart] = rank
self.data = rankdata
def get(self):
return self.data
def dump(self, fn):
dump_pickle(fn, self.data)
def print(self, fn):
dump_yaml(self.data, fn)
def get_items(username, itemtype, numitems):
"""
Returns a list of items from the user's profile
"""
if itemtype is Item.Artists:
topartists = lastfmget.user_top_artists(username, numitems)
return [ artist['name'] for artist in topartists ]
if itemtype is Item.Albums:
topalbums = lastfmget.user_top_albums(username, numitems)
return [ f"{album['artist']} - {album['name']}" for album in topalbums ]
if itemtype is Item.Tracks:
toptracks = lastfmget.user_top_tracks(username, numitems)
return [ f"{track['artist']} - {track['name']}" for track in toptracks ]
def get_user_top_items(itemtype, username, *params): # params: limit, page
method = getattr(lastfmget, f'user_top_{itemtype}') # Ex. Item.Artists -> lastfmget.user_top_artists
topitems = method(username, *params)
return topitems
def get_target_charts(username):
"""
Returns list of charts available in the user's history
"""
firstpage = lastfmget.user_recent_tracks_raw(username, page=1)['recenttracks']
totalpages = int(firstpage['@attr']['totalPages'])
lastpage = lastfmget.user_recent_tracks_raw(username, page=totalpages)['recenttracks']
firstscrobble = int(lastpage['track'][-1]['date']['uts'])
charts = []
for chart in lastfmget.user_weekly_chart_list(username):
start = int(chart['start']) # int() -- Remove
end = int(chart['end']) # int() -- Remove
if end >= firstscrobble:
charts.append(Chart(start, end))
return charts
def get_user_weekly_item_chart(itemtype, username, *params): # params: start, end
method = getattr(lastfmget, f'user_weekly_{str(itemtype).rstrip("s")}_chart') # Ex. Item.Artists -> lastfmget.user_weekly_artist_chart
weeklyitemchart = method(username, *params)
return weeklyitemchart
| 30
| 138
| 0.609402
|
75351f257dca0c084875a82177fbde1c75efe557
| 4,214
|
py
|
Python
|
Tests/test_binary.py
|
jugmac00/Pyjion
|
62038c73f1d622099ced04aeddbc5bc11c2756df
|
[
"MIT"
] | null | null | null |
Tests/test_binary.py
|
jugmac00/Pyjion
|
62038c73f1d622099ced04aeddbc5bc11c2756df
|
[
"MIT"
] | null | null | null |
Tests/test_binary.py
|
jugmac00/Pyjion
|
62038c73f1d622099ced04aeddbc5bc11c2756df
|
[
"MIT"
] | null | null | null |
import gc
import sys
import unittest
import pyjion
class BinaryOperationTestCase(unittest.TestCase):
def setUp(self) -> None:
pyjion.enable()
pyjion.disable_pgc()
def tearDown(self) -> None:
pyjion.disable()
gc.collect()
def test_addition(self):
a = 987654
b = 123456
c = 192837
before_ref_a = sys.getrefcount(a)
before_ref_b = sys.getrefcount(b)
before_ref_c = sys.getrefcount(c)
c = a + b
self.assertEqual(sys.getrefcount(a), before_ref_a)
self.assertEqual(sys.getrefcount(b), before_ref_b)
self.assertEqual(sys.getrefcount(c), before_ref_c - 2)
self.assertEqual(c, 1111110)
def test_subtraction(self):
a = 987654
b = 123456
c = 192837
before_ref_a = sys.getrefcount(a)
before_ref_b = sys.getrefcount(b)
before_ref_c = sys.getrefcount(c)
c = a - b
self.assertEqual(sys.getrefcount(a), before_ref_a)
self.assertEqual(sys.getrefcount(b), before_ref_b)
self.assertEqual(sys.getrefcount(c), before_ref_c - 2)
self.assertEqual(c, 864198)
def test_multiplication(self):
a = 987
b = 1001
c = 1234321
before_ref_a = sys.getrefcount(a)
before_ref_b = sys.getrefcount(b)
before_ref_c = sys.getrefcount(c)
c = a * b
self.assertEqual(sys.getrefcount(a), before_ref_a)
self.assertEqual(sys.getrefcount(b), before_ref_b)
self.assertEqual(sys.getrefcount(c), before_ref_c - 1)
self.assertEqual(c, 987987)
def test_division(self):
a = 12341234
b = 10001
c = 98789
before_ref_a = sys.getrefcount(a)
before_ref_b = sys.getrefcount(b)
before_ref_c = sys.getrefcount(c)
c = a / b
self.assertEqual(sys.getrefcount(a), before_ref_a)
self.assertEqual(sys.getrefcount(b), before_ref_b)
self.assertEqual(sys.getrefcount(c), before_ref_c - 1)
self.assertEqual(c, 1234)
def test_floor_division(self):
a = 7777777
b = 55555
c = 10040
before_ref_a = sys.getrefcount(a)
before_ref_b = sys.getrefcount(b)
before_ref_c = sys.getrefcount(c)
c = a // b
self.assertEqual(sys.getrefcount(a), before_ref_a)
self.assertEqual(sys.getrefcount(b), before_ref_b)
self.assertEqual(c, 140)
def test_power(self):
a = 0.5
b = -8
c = 8401
before_ref_a = sys.getrefcount(a)
before_ref_b = sys.getrefcount(b)
before_ref_c = sys.getrefcount(c)
c = a ** b
self.assertEqual(sys.getrefcount(a), before_ref_a)
self.assertEqual(sys.getrefcount(b), before_ref_b)
self.assertEqual(sys.getrefcount(c), before_ref_c - 1)
self.assertEqual(c, 256)
def test_or(self):
a = 1999
b = 2999
c = 1234
before_ref_a = sys.getrefcount(a)
before_ref_b = sys.getrefcount(b)
before_ref_c = sys.getrefcount(c)
c = a | b
self.assertEqual(sys.getrefcount(a), before_ref_a)
self.assertEqual(sys.getrefcount(b), before_ref_b)
self.assertEqual(sys.getrefcount(c), 2)
self.assertEqual(c, 4095)
def test_and(self):
a = 1999
b = 2999
c = 1234
before_ref_a = sys.getrefcount(a)
before_ref_b = sys.getrefcount(b)
before_ref_c = sys.getrefcount(c)
c = a & b
self.assertEqual(sys.getrefcount(a), before_ref_a)
self.assertEqual(sys.getrefcount(b), before_ref_b)
self.assertEqual(sys.getrefcount(c), 2)
self.assertEqual(c, 903)
class CPythonComparison(unittest.TestCase):
def tearDown(self) -> None:
gc.collect()
def test_floor_division(self):
a = 7777777
b = 55555
c = 10040
before_ref_a = sys.getrefcount(a)
before_ref_b = sys.getrefcount(b)
before_ref_c = sys.getrefcount(c)
c = a // b
self.assertEqual(sys.getrefcount(a), before_ref_a)
self.assertEqual(sys.getrefcount(b), before_ref_b)
self.assertEqual(c, 140)
| 30.536232
| 62
| 0.607024
|
fdae204697aab3cb1feae4dedcb14abc9d6c966c
| 744
|
py
|
Python
|
test/fs/integration/ide_write/test.py
|
paulyc/IncludeOS
|
5c82bad4a22838bc2219fbadef57d94f006b4760
|
[
"Apache-2.0"
] | null | null | null |
test/fs/integration/ide_write/test.py
|
paulyc/IncludeOS
|
5c82bad4a22838bc2219fbadef57d94f006b4760
|
[
"Apache-2.0"
] | null | null | null |
test/fs/integration/ide_write/test.py
|
paulyc/IncludeOS
|
5c82bad4a22838bc2219fbadef57d94f006b4760
|
[
"Apache-2.0"
] | 1
|
2021-06-16T22:48:53.000Z
|
2021-06-16T22:48:53.000Z
|
#! /usr/bin/env python
import sys
import os
import subprocess
import subprocess32
thread_timeout = 30
includeos_src = os.environ.get('INCLUDEOS_SRC',
os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))).split('/test')[0])
sys.path.insert(0,includeos_src)
from vmrunner import vmrunner
# Get an auto-created VM from the vmrunner
vm = vmrunner.vms[0]
def cleanup():
# Call the cleanup script - let python do the printing to get it synced
print subprocess.check_output(["./fat32_disk.sh", "clean"])
# Setup disk
subprocess32.call(["./fat32_disk.sh"], shell=True, timeout = thread_timeout)
# Clean up on exit
vm.on_exit(cleanup)
# Boot the VM
vm.cmake().boot(thread_timeout).clean()
| 24.8
| 120
| 0.708333
|
d355d34f90650cdbafc8c377a24c9e99fed0d6c7
| 1,733
|
py
|
Python
|
radar.py
|
Jean1dev/FlightRadar
|
5652ebb7edb245be1d2607faa64632e05b457700
|
[
"MIT"
] | null | null | null |
radar.py
|
Jean1dev/FlightRadar
|
5652ebb7edb245be1d2607faa64632e05b457700
|
[
"MIT"
] | null | null | null |
radar.py
|
Jean1dev/FlightRadar
|
5652ebb7edb245be1d2607faa64632e05b457700
|
[
"MIT"
] | null | null | null |
from Tkinter import *
import math
#master = Tk()
class radar(Frame):
def __init__(self,master,size=200,dot='blue',**kw):
Frame.__init__(self,master,width=size,height=size)
self.master = master
self.configure(**kw)
self._c = Canvas(self,width=self['width'],height=self['height'],bg="black")
self._c.bind("<Button-1>",self.kill)
self.rad = 5
self.size = size
#self._point = self.drawcircle(size/2,size/2,self.rad)
self.drawradar()
self._c.grid()
def kill(self,event):
self.master.destroy()
def drawcircle(self,x,y,rad):
return self._c.create_oval(x-rad,y-rad,x+rad,y+rad,width=0,fill='blue')
def draw_trace(self,angle,dist):
dx = dist * math.sin(math.radians(angle))
dy = dist * math.cos(math.radians(angle))
return self.draw_spoke(dx,dy)
def draw_spoke(self,x,y):
centre_x = self.size/2
centre_y = self.size/2
return self._c.create_line(centre_x,centre_y,centre_x+x,centre_y+y,fill="green")
def drawradar(self):
x = self.size/2
y = self.size/2
maxrad = self.size/2
rad = 2
self._c.create_oval(x-rad,y-rad,x+rad,y+rad,width=1,outline="green")
rad = maxrad / 4
self._c.create_oval(x-rad,y-rad,x+rad,y+rad,width=1,outline="green")
rad = maxrad / 2
self._c.create_oval(x-rad,y-rad,x+rad,y+rad,width=1,outline="green")
rad = (maxrad / 4)*3
self._c.create_oval(x-rad,y-rad,x+rad,y+rad,width=1,outline="green")
def main():
root = Tk()
xy = radar(root,300)
xy.grid(row=0)
xy.draw_spoke(50,-10)
xy.draw_trace(90,100)
line = xy.draw_trace(200,100)
root.mainloop()
if __name__ == '__main__':
main()
| 27.951613
| 83
| 0.619158
|
19b40411b5504c722cc3a9c7da4ae06ac582bcd5
| 2,806
|
py
|
Python
|
deprecated/gui/src/widgets/util/navigation.py
|
magnusoy/Sparkie
|
428b716a50cd0c274670971ee007e82571a04a80
|
[
"MIT"
] | 15
|
2020-03-07T12:25:50.000Z
|
2021-11-27T03:13:47.000Z
|
gui/src/widgets/util/navigation.py
|
magnusoy/Sparkie
|
428b716a50cd0c274670971ee007e82571a04a80
|
[
"MIT"
] | null | null | null |
gui/src/widgets/util/navigation.py
|
magnusoy/Sparkie
|
428b716a50cd0c274670971ee007e82571a04a80
|
[
"MIT"
] | 9
|
2020-04-08T04:37:34.000Z
|
2021-11-27T03:13:50.000Z
|
# #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This module ...
__author__ = "Magnus Kvendseth Øye"
__copyright__ = "Copyright 2019, Sparkie Quadruped Robot"
__credits__ = ["Magnus Kvendseth Øye", "Petter Drønnen", "Vegard Solheim"]
__version__ = "1.0.0"
__license__ = "MIT"
__maintainer__ = "Magnus Kvendseth Øye"
__email__ = "magnus.oye@gmail.com"
__status__ = "Development"
"""
# Importing package
from os import path
class Waypoint(object):
def __init__(self, x=0, z=0, action=0):
if type(x) is str:
self.x = float(x)
self.z = float(z)
else:
self.x = x
self.z = z
self.action = action
def __repr__(self):
return f'{self.x},{self.z},{self.action}'
class Path(object):
def __init__(self):
self.waypoints = []
self.index = 0
def __repr__(self):
return f'Size: {len(self.waypoints)} | Waypoints: {self.waypoints}'
def increment(self, n=1):
self.index += n
def decrement(self, n=1):
self.index -= n
def add_waypoint(self, waypoint):
self.waypoints.append(waypoint)
self.increment()
def remove_waypoint(self, index):
try:
self.waypoints.pop(index)
self.decrement()
return True;
except IndexError:
return False;
def clear_waypoints(self):
self.waypoints.clear()
def copy_waypoints(self):
return self.waypoints.copy()
def reverse_waypoints(self):
self.waypoints.reverse()
def get_current_waypoint(self):
return self.waypoints[self.index]
def get_previous_waypoint(self):
if self.index > 0:
return self.waypoints[self.index-1]
else:
return None
def get_next_waypoint(self):
if self.index < len(self.waypoints):
return self.waypoints[self.index+1]
else:
return None
def save_path(self, filename, overwrite=False):
with open(filename, "w") as f:
for waypoint in self.waypoints:
f.write(f'{waypoint}\n')
def load_path(self, filename):
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
x, z = line.split(',')
self.waypoints.append(Waypoint(x, z))
if __name__ == "__main__":
path = Path()
path.add_waypoint(Waypoint(0, 0))
path.add_waypoint(Waypoint(10, 10))
path.add_waypoint(Waypoint(30, 12))
path.save_path("path.txt")
new_path = Path()
new_path.load_path("path.txt")
new_path.add_waypoint(Waypoint(20, 10))
#print(new_path)
print(new_path.get_current_waypoint())
| 25.053571
| 75
| 0.575196
|
ac64af3e2eb707ed4debd35bc8501994dcfb7b58
| 15,977
|
py
|
Python
|
tests/test_auth.py
|
uwcirg/true_nth_usa_portal
|
e2434731aed86f1c43f15d428dde8ffc28ac7e5f
|
[
"BSD-3-Clause"
] | 3
|
2017-01-15T10:11:57.000Z
|
2018-10-02T23:46:44.000Z
|
tests/test_auth.py
|
uwcirg/true_nth_usa_portal
|
e2434731aed86f1c43f15d428dde8ffc28ac7e5f
|
[
"BSD-3-Clause"
] | 876
|
2016-04-04T20:45:11.000Z
|
2019-02-28T00:10:36.000Z
|
tests/test_auth.py
|
uwcirg/truenth-portal
|
459a0d157982f010175c50b9cccd860a61790370
|
[
"BSD-3-Clause"
] | 9
|
2016-04-13T01:18:55.000Z
|
2018-09-19T20:44:23.000Z
|
"""Unit test module for auth"""
from collections import namedtuple
import datetime
from flask import url_for
from flask_webtest import SessionScope
import pytest
from werkzeug.exceptions import Unauthorized
from portal.extensions import db
from portal.models.auth import AuthProvider, Token, create_service_token
from portal.models.client import Client, validate_origin
from portal.models.intervention import INTERVENTION
from portal.models.role import ROLE
from portal.models.user import (
RoleError,
User,
UserRelationship,
add_role,
add_user,
)
from tests import OAUTH_INFO_PROVIDER_LOGIN, TEST_USER_ID
@pytest.fixture
def test_auth_user(add_user):
# Create a user
email = 'localuser@test.com'
password = 'Password1'
user = add_user(
username='username',
email=email,
password=password
)
return user
def test_nouser_logout(client, initialized_db):
"""Confirm logout works without a valid user"""
response = client.get('/logout')
assert 302 == response.status_code
def test_local_user_add(client):
"""Add a local user via flask_user forms"""
data = {
'password': 'one2Three',
'retype_password': 'one2Three',
'email': 'otu@example.com'}
response = client.post('/user/register', data=data)
assert response.status_code == 302
new_user = User.query.filter_by(username=data['email']).first()
assert new_user.active
def test_local_login_valid_username_and_password(test_auth_user, local_login):
test_auth_user = db.session.merge(test_auth_user)
"""login through the login form"""
# Attempt to login with valid creds
response = local_login(test_auth_user.email, 'Password1')
# Validate login was successful
assert response.status_code == 200
assert test_auth_user.password_verification_failures == 0
def test_local_login_failure_increments_lockout(test_auth_user, local_login):
test_auth_user = db.session.merge(test_auth_user)
"""login through the login form"""
# Attempt to login with an invalid password
response = local_login(test_auth_user.email, 'invalidpassword')
# Verify there was a password failure
db.session.refresh(test_auth_user)
assert test_auth_user.password_verification_failures == 1
def test_local_login_valid_username_and_password_resets_lockout(
test_auth_user, local_login):
test_auth_user = db.session.merge(test_auth_user)
"""login through the login form"""
# Mock a failed password attempt
test_auth_user.add_password_verification_failure()
assert test_auth_user.password_verification_failures == 1
# Atempt to login with valid creds
response = local_login(test_auth_user.email, 'Password1')
# Verify lockout was reset
db.session.refresh(test_auth_user)
assert test_auth_user.password_verification_failures == 0
def test_local_login_lockout_after_unsuccessful_attempts(
test_auth_user, local_login):
test_auth_user = db.session.merge(test_auth_user)
"""login through the login form"""
# Use up all of the permitted login attempts
attempts = test_auth_user.failed_login_attempts_before_lockout - 1
for failureIndex in range(0, attempts):
response = local_login(test_auth_user.email, 'invalidpassword')
assert response.status_code is 200
db.session.refresh(test_auth_user)
assert test_auth_user.password_verification_failures == (
failureIndex + 1)
assert not test_auth_user.is_locked_out
# Validate that after using up all permitted attempts
# the next is locked out
response = local_login(test_auth_user.email, 'invalidpassword')
db.session.refresh(test_auth_user)
assert test_auth_user.is_locked_out
def test_local_login_verify_lockout_resets_after_lockout_period(
test_auth_user):
test_auth_user = db.session.merge(test_auth_user)
"""login through the login form"""
# Lock the user out
attempts = test_auth_user.failed_login_attempts_before_lockout
for failureIndex in range(0, attempts):
test_auth_user.add_password_verification_failure()
# Verify the user is locked out
assert test_auth_user.is_locked_out
# Move time to the end of the lockout period
test_auth_user.last_password_verification_failure = \
datetime.datetime.utcnow() - test_auth_user.lockout_period_timedelta
# Verify we are no longer locked out
assert not test_auth_user.is_locked_out
def test_local_login_verify_cant_login_when_locked_out(
test_auth_user, local_login):
test_auth_user = db.session.merge(test_auth_user)
"""login through the login form"""
# Lock the user out
attempts = test_auth_user.failed_login_attempts_before_lockout
for failureIndex in range(0, attempts):
test_auth_user.add_password_verification_failure()
assert test_auth_user.is_locked_out
# Atempt to login with valid creds
response = local_login(test_auth_user.email, 'Password1')
# Verify the user is still locked out
assert test_auth_user.is_locked_out
def test_register_now(
app, promote_user, login, assert_redirects, client, test_user):
"""Initiate process to register exiting account"""
app.config['NO_CHALLENGE_WO_DATA'] = False
# added to avoid detached instance error
test_user = db.session.merge(test_user)
test_user.password = None
test_user.birthdate = '1998-01-31'
promote_user(role_name=ROLE.ACCESS_ON_VERIFY.value)
user = db.session.merge(test_user)
email = user.email
login()
response = client.get('/api/user/register-now')
assert_redirects(response, url_for('user.register', email=email))
def test_client_add(promote_user, login, client):
"""Test adding a client application"""
origins = "https://test.com https://two.com"
promote_user(role_name=ROLE.APPLICATION_DEVELOPER.value)
login()
response = client.post('/client', data=dict(
application_origins=origins))
assert 302 == response.status_code
client = Client.query.filter_by(user_id=TEST_USER_ID).first()
assert client.application_origins == origins
def test_client_bad_add(
promote_user, login, client, initialized_db):
"""Test adding a bad client application"""
promote_user(role_name=ROLE.APPLICATION_DEVELOPER.value)
login()
response = client.post(
'/client',
data=dict(application_origins="bad data in")).get_data(
as_text=True)
assert "Invalid URL" in response
def test_client_edit(client, test_user_login, test_client):
"""Test editing a client application"""
test_url = 'http://tryme.com'
origins = "{} {}".format(test_client.application_origins, test_url)
response = client.post(
'/client/{0}'.format(test_client.client_id),
data=dict(
callback_url=test_url, application_origins=origins,
application_role=INTERVENTION.DEFAULT.name))
assert 302 == response.status_code
test_client = Client.query.get('test_client')
assert test_client.callback_url == test_url
invalid_url = "http://invalid.org"
response2 = client.post(
'/client/{0}'.format(test_client.client_id),
data=dict(
callback_url=invalid_url, application_origins=origins,
application_role=INTERVENTION.DEFAULT.name))
# 200 response, because page is reloaded with validation errors
assert 200 == response2.status_code
error_text = 'URL host must match a provided Application Origin URL'
assert error_text in response2.get_data(as_text=True)
test_client = Client.query.get('test_client')
assert test_client.callback_url != invalid_url
def test_callback_validation(client, test_user_login, test_client):
"""Confirm only valid urls can be set"""
response = client.post(
'/client/{0}'.format(test_client.client_id),
data=dict(
callback_url='badprotocol.com',
application_origins=test_client.application_origins))
assert 200 == response.status_code
test_client = Client.query.get('test_client')
assert test_client.callback_url is None
def test_service_account_creation(test_client):
"""Confirm we can create a service account and token"""
test_user = User.query.get(TEST_USER_ID)
service_user = test_user.add_service_account()
with SessionScope(db):
db.session.add(service_user)
db.session.add(test_client)
db.session.commit()
service_user = db.session.merge(service_user)
test_client = db.session.merge(test_client)
# Did we get a service account with the correct roles and relationships
assert len(service_user.roles) == 1
assert 'service' == service_user.roles[0].name
sponsorship = UserRelationship.query.filter_by(
other_user_id=service_user.id).first()
assert sponsorship.user_id == TEST_USER_ID
assert sponsorship.relationship.name == 'sponsor'
# Can we get a usable Bearer Token
create_service_token(client=test_client, user=service_user)
token = Token.query.filter_by(user_id=service_user.id).first()
assert token
# The token should have a very long life
assert (token.expires > datetime.datetime.utcnow()
+ datetime.timedelta(days=364))
def test_service_account_promotion(test_client):
"""Confirm we can not promote a service account """
test_user = User.query.get(TEST_USER_ID)
service_user = test_user.add_service_account()
with SessionScope(db):
db.session.add(service_user)
db.session.commit()
service_user = db.session.merge(service_user)
# try to promote - which should fail
assert pytest.raises(RoleError, add_role, service_user,
ROLE.APPLICATION_DEVELOPER.value)
assert len(service_user.roles) == 1
def test_token_status(client, test_user):
with SessionScope(db):
test_client = Client(
client_id='test-id', client_secret='test-secret',
user_id=TEST_USER_ID)
token = Token(
access_token='test-token',
client=test_client,
user_id=TEST_USER_ID,
token_type='bearer',
expires=(datetime.datetime.utcnow() +
datetime.timedelta(seconds=30)))
db.session.add(test_client)
db.session.add(token)
db.session.commit()
token = db.session.merge(token)
response = client.get(
"/oauth/token-status",
headers={'Authorization': 'Bearer {}'.format(token.access_token)})
assert 200 == response.status_code
data = response.json
assert pytest.approx(30, 5) == data['expires_in']
def test_token_status_wo_header(client):
"""Call for token_status w/o token should return 401"""
response = client.get("/oauth/token-status")
assert 401 == response.status_code
def test_origin_validation(app, test_client):
client_url = test_client._redirect_uris
local_url = "http://{}/home?test".format(
app.config.get('SERVER_NAME'))
invalid_url = 'http://invalid.org'
assert validate_origin(client_url)
assert validate_origin(local_url)
assert pytest.raises(Unauthorized, validate_origin, invalid_url)
def test_origin_validation_origin_not_in_whitelist(app):
valid_origin = 'www.domain.com'
app.config['CORS_WHITELIST'] = [valid_origin]
invalid_origin = 'www.invaliddomain.com'
url = 'http://{}/'.format(invalid_origin)
assert pytest.raises(Unauthorized, validate_origin, url)
def test_origin_validation_origin_in_whitelist(app):
valid_origin = 'www.domain.com'
app.config['CORS_WHITELIST'] = [valid_origin]
url = 'http://{}/'.format(valid_origin)
assert validate_origin(url)
def test_oauth_with_new_auth_provider_and_new_user(login):
# Login using the test backdoor
response = login(oauth_info=OAUTH_INFO_PROVIDER_LOGIN)
# Verify a new user was created
user = User.query.filter_by(
email=OAUTH_INFO_PROVIDER_LOGIN['email']
).first()
assert user
# Verify a new auth provider was created
assert AuthProvider.query.filter_by(
provider=OAUTH_INFO_PROVIDER_LOGIN['provider_name'],
provider_id=OAUTH_INFO_PROVIDER_LOGIN['provider_id'],
user_id=user.id,
).first()
def test_oauth_with_new_auth_provider_and_new_user_unicode_name(login):
# Set a unicode name
oauth_info = dict(OAUTH_INFO_PROVIDER_LOGIN)
oauth_info['last_name'] = 'Bugn\xed'
# Login using the test backdoor
response = login(oauth_info=OAUTH_INFO_PROVIDER_LOGIN)
# Verify a new user was created
user = User.query.filter_by(
last_name=OAUTH_INFO_PROVIDER_LOGIN['last_name']
).first()
assert user
# Verify a new auth provider was created
assert AuthProvider.query.filter_by(
provider=OAUTH_INFO_PROVIDER_LOGIN['provider_name'],
provider_id=OAUTH_INFO_PROVIDER_LOGIN['provider_id'],
user_id=user.id,
).first()
pass
def test_oauth_with_new_auth_provider_and_existing_user(login):
# Create the user
user = add_user_from_oauth_info(OAUTH_INFO_PROVIDER_LOGIN)
# Login through the test backdoor
response = login(oauth_info=OAUTH_INFO_PROVIDER_LOGIN)
# Verify the response returned successfully
assert response.status_code == 200
# Verify a new auth provider was created
assert AuthProvider.query.filter_by(
provider=OAUTH_INFO_PROVIDER_LOGIN['provider_name'],
provider_id=OAUTH_INFO_PROVIDER_LOGIN['provider_id'],
user_id=user.id,
).first()
def test_oauth_with_existing_auth_provider_and_existing_user(login):
# Create the user
user = add_user_from_oauth_info(OAUTH_INFO_PROVIDER_LOGIN)
# Create the auth provider
add_auth_provider(OAUTH_INFO_PROVIDER_LOGIN, user)
# Login through the test backdoor
response = login(oauth_info=OAUTH_INFO_PROVIDER_LOGIN)
# Verify the response returned successfully
assert response.status_code == 200
def test_oauth_when_mock_provider_fails_to_get_user_json(login):
# Make the mock provider fail to get user json
oauth_info = dict(OAUTH_INFO_PROVIDER_LOGIN)
oauth_info['fail_to_get_user_json'] = True
# Attempt to login through the test backdoor
response = login(oauth_info=oauth_info)
# Verify 500
assert response.status_code == 500
def test_oauth_when_non_required_value_undefined(login):
# Make the mock provider fail to get user json
oauth_info = dict(OAUTH_INFO_PROVIDER_LOGIN)
del oauth_info['birthdate']
# Attempt to login through the test backdoor
response = login(oauth_info=oauth_info)
# Verify the response returned successfully
assert response.status_code == 200
def test_oauth_when_required_value_undefined(login):
# Make the mock provider fail to get user json
oauth_info = dict(OAUTH_INFO_PROVIDER_LOGIN)
del oauth_info['provider_id']
# Attempt to login through the test backdoor
response = login(oauth_info=oauth_info)
# Verify 500
assert response.status_code == 500
def test_oauth_with_invalid_token(login, assert_redirects):
# Set an invalid token
oauth_info = dict(OAUTH_INFO_PROVIDER_LOGIN)
oauth_info.pop('token', None)
# Attempt to login through the test backdoor
response = login(oauth_info=oauth_info, follow_redirects=False)
# Verify force reload
assert_redirects(response, oauth_info['next'])
def add_user_from_oauth_info(oauth_info):
user_to_add = namedtuple('Mock', oauth_info.keys())(*oauth_info.values())
user = add_user(user_to_add)
db.session.commit()
return user
def add_auth_provider(oauth_info, user):
auth_provider = AuthProvider(
provider=oauth_info['provider_name'],
provider_id=oauth_info['provider_id'],
user=user,
)
db.session.add(auth_provider)
db.session.commit()
return auth_provider
| 33.078675
| 78
| 0.724229
|
0749ac94162bc8d4bd64c79998b547d6c474de6e
| 1,994
|
py
|
Python
|
lite/tests/unittest_py/op/common/test_elementwise_mul_op_base.py
|
laiou/Paddle-Lite
|
a99080a48186ec7df546d77d39db58d84d1dda3e
|
[
"Apache-2.0"
] | null | null | null |
lite/tests/unittest_py/op/common/test_elementwise_mul_op_base.py
|
laiou/Paddle-Lite
|
a99080a48186ec7df546d77d39db58d84d1dda3e
|
[
"Apache-2.0"
] | null | null | null |
lite/tests/unittest_py/op/common/test_elementwise_mul_op_base.py
|
laiou/Paddle-Lite
|
a99080a48186ec7df546d77d39db58d84d1dda3e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('..')
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import numpy as np
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
import hypothesis
import hypothesis.strategies as st
def sample_program_configs(draw):
input_data_x_shape = draw(st.lists(st.integers(min_value=1, max_value=8), min_size = 1, max_size = 8))
axis = draw(st.integers(min_value=-1, max_value=(len(input_data_x_shape) - 1)))
input_data_y_shape = input_data_x_shape[axis:]
def gen_input_data_x():
return np.random.randint(1, 3, size=(input_data_x_shape)).astype(np.int64)
def gen_input_data_y():
return np.random.randint(1, 3, size=(input_data_y_shape)).astype(np.int64)
elementwise_mul_op = OpConfig(
type = "elementwise_mul",
inputs = {"X" : ["input_data_x"],
"Y": ["input_data_y"]},
outputs = {"Out": ["output_data"]},
attrs = {"axis" : axis})
program_config = ProgramConfig(
ops=[elementwise_mul_op],
weights={},
inputs={
"input_data_x": TensorConfig(data_gen=gen_input_data_x),
"input_data_y": TensorConfig(data_gen=gen_input_data_y)
},
outputs=["output_data"])
return program_config
| 38.346154
| 125
| 0.706118
|
b97e86f7d4d2ecfc050be3707bfed4b37d296bbc
| 14,504
|
py
|
Python
|
automol/tests/test_instab.py
|
sjklipp/automol
|
ba87f4443ebe2ceb5929d4269c4be93fd28f68ca
|
[
"Apache-2.0"
] | null | null | null |
automol/tests/test_instab.py
|
sjklipp/automol
|
ba87f4443ebe2ceb5929d4269c4be93fd28f68ca
|
[
"Apache-2.0"
] | null | null | null |
automol/tests/test_instab.py
|
sjklipp/automol
|
ba87f4443ebe2ceb5929d4269c4be93fd28f68ca
|
[
"Apache-2.0"
] | 7
|
2019-12-18T20:11:06.000Z
|
2020-10-14T08:54:16.000Z
|
"""
test automol.rotor
"""
import automol
# CH2OOH
ZMA1 = (
('C', (None, None, None), (None, None, None),
(None, None, None)),
('O', (0, None, None), ('R1', None, None),
(2.638561158117497, None, None)),
('H', (0, 1, None), ('R2', 'A2', None),
(2.0966833415410435, 1.9181232230723193, None)),
('H', (0, 1, 2), ('R3', 'A3', 'D3'),
(2.0974908160702483, 1.9169003262790676, 2.132511227259572)),
('O', (1, 0, 2), ('R4', 'A4', 'D4'),
(2.4914580601748972, 1.8299443073232646, 1.797247663068249)),
('H', (4, 1, 0), ('R5', 'A5', 'D5'),
(1.8737560578471502, 1.8329273093157175, 1.8571985995692384)))
PROD_ZMAS1 = (
# OH
(('O', (None, None, None), (None, None, None),
(None, None, None)),
('H', (0, None, None), ('R1', None, None),
(1.847788877632985, None, None))),
# CH2O
(('C', (None, None, None), (None, None, None),
(None, None, None)),
('O', (0, None, None), ('R1', None, None),
(2.3142184670910955, None, None)),
('H', (0, 1, None), ('R2', 'A2', None),
(2.081910294028233, 2.1334159256991865, None)),
('H', (0, 1, 2), ('R3', 'A3', 'D3'),
(2.0819105104503435, 2.1334159293636286, 3.141592653589793)))
)
BAD_PROD_ZMAS = (
# O
(('O', (None, None, None), (None, None, None),
(None, None, None)),),
# CH2OH
(('C', (None, None, None), (None, None, None),
(None, None, None)),
('O', (0, None, None), ('R1', None, None),
(2.5598206437904127, None, None)),
('H', (0, 1, None), ('R2', 'A2', None),
(2.0350667091575714, 2.008340705389423, None)),
('H', (0, 1, 2), ('R3', 'A3', 'D3'),
(2.0327423747809856, 2.0493848092212823, 3.141588744001008)),
('H', (1, 0, 2), ('R4', 'A4', 'D4'),
(1.840826770401138, 1.9676117503333632, 3.141594966632109)))
)
INSTAB_ZRXN_STR = """
reaction class: beta scission
forward TS atoms:
1: {symbol: C, implicit_hydrogen_valence: 0, stereo_parity: null}
2: {symbol: O, implicit_hydrogen_valence: 0, stereo_parity: null}
3: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
4: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
5: {symbol: O, implicit_hydrogen_valence: 0, stereo_parity: null}
6: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
forward TS bonds:
1-2: {order: 1, stereo_parity: null}
1-3: {order: 1, stereo_parity: null}
1-4: {order: 1, stereo_parity: null}
2-5: {order: 0.9, stereo_parity: null}
5-6: {order: 1, stereo_parity: null}
reactants keys:
- [1, 2, 3, 4, 5, 6]
backward TS atoms:
1: {symbol: C, implicit_hydrogen_valence: 0, stereo_parity: null}
2: {symbol: O, implicit_hydrogen_valence: 0, stereo_parity: null}
3: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
4: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
5: {symbol: O, implicit_hydrogen_valence: 0, stereo_parity: null}
6: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
backward TS bonds:
1-2: {order: 1, stereo_parity: null}
1-3: {order: 1, stereo_parity: null}
1-4: {order: 1, stereo_parity: null}
2-5: {order: 0.1, stereo_parity: null}
5-6: {order: 1, stereo_parity: null}
products keys:
- [1, 2, 3, 4]
- [5, 6]
"""
INSTAB_ZRXN_ZMA = (
('C', (None, None, None), (None, None, None),
(None, None, None)),
('O', (0, None, None), ('R1', None, None),
(2.6086764535623344, None, None)),
('H', (0, 1, None), ('R2', 'A2', None),
(2.0966833415410435, 1.9070806550803825, None)),
('H', (0, 1, 2), ('R3', 'A3', 'D3'),
(2.0974908160702483, 1.8832236285512667, 2.104638003820193)),
('O', (1, 0, 2), ('R4', 'A4', 'D4'),
(2.7476437083330785, 1.7824634600747093, 1.8136801951838313)),
('H', (4, 1, 0), ('R5', 'A5', 'D5'),
(1.8447154354308446, 1.7658112135783564, 1.8571985995692388)))
# CH2ONO2
ZMA2 = (
('C', (None, None, None), (None, None, None),
(None, None, None)),
('O', (0, None, None), ('R1', None, None),
(2.602130817471561, None, None)),
('H', (0, 1, None), ('R2', 'A2', None),
(2.0967335093840496, 1.9102333367742663, None)),
('H', (0, 1, 2), ('R3', 'A3', 'D3'),
(2.0963463040538937, 1.9113187477402638, 2.0944804672275423)),
('N', (1, 0, 2), ('R4', 'A4', 'D4'),
(2.4982729475739274, 2.1411283443347364, 2.1041435884947064)),
('O', (4, 1, 0), ('R5', 'A5', 'D5'),
(2.2538855452609683, 2.0850904561190813, 3.1497888417306883)),
('O', (4, 1, 5), ('R6', 'A6', 'D6'),
(2.2586314858549517, 2.1117278699480875, 3.1466204524158154)))
PROD_ZMAS2 = (
# NO2
(('O', (None, None, None), (None, None, None),
(None, None, None)),
('N', (0, None, None), ('R1', None, None),
(2.6908226786788956, None, None)),
('O', (1, 0, None), ('R2', 'A2', None),
(2.690824057320017, 1.8901908487803016, None))),
# CH2O
(('C', (None, None, None), (None, None, None),
(None, None, None)),
('O', (0, None, None), ('R1', None, None),
(2.314218121713856, None, None)),
('H', (0, 1, None), ('R2', 'A2', None),
(2.081910657025832, 2.133415907619935, None)),
('H', (0, 1, 2), ('R3', 'A3', 'D3'),
(2.081910076584056, 2.133416208019412, 3.141594388471524)))
)
# CH3OO
ZMA3 = (
('C', (None, None, None), (None, None, None),
(None, None, None)),
('O', (0, None, None), ('R1', None, None),
(2.599568669917405, None, None)),
('H', (0, 1, None), ('R2', 'A2', None),
(2.0720614763737584, 2.0034137221826973, None)),
('H', (0, 1, 2), ('R3', 'A3', 'D3'),
(2.1033532434977693, 1.96846586552775, 2.1568237825269154)),
('H', (0, 1, 2), ('R4', 'A4', 'D4'),
(2.102894293957403, 1.8826666760922017, 4.18922576686547)),
('O', (1, 0, 2), ('R5', 'A5', 'D5'),
(2.4633037728539113, 1.9322884438952062, 5.722447660200159)))
# STE
STE_ZMA = (
('C', (None, None, None), (None, None, None),
(None, None, None)),
('C', (0, None, None), ('R1', None, None),
(2.879329461537935, None, None)),
('H', (0, 1, None), ('R2', 'A2', None),
(2.0685853010041964, 1.936203621625536, None)),
('H', (0, 1, 2), ('R3', 'A3', 'D3'),
(2.0681998385698437, 1.9437991078452497, 2.1080659988667785)),
('H', (0, 1, 2), ('R4', 'A4', 'D4'),
(2.068569230988053, 1.9221145780688327, 4.196383631606691)),
('C', (1, 0, 2), ('R5', 'A5', 'D5'),
(2.8951853965121304, 1.9564542588074854, 5.252745506231529)),
('H', (1, 0, 5), ('R6', 'A6', 'D6'),
(2.0727280210037247, 1.8998644928532522, 2.130145952014642)),
('H', (1, 0, 5), ('R7', 'A7', 'D7'),
(2.0740453471105984, 1.8851505787673934, 4.161901847872003)),
('C', (5, 1, 0), ('R8', 'A8', 'D8'),
(2.815645112515456, 1.92909277095781, 3.1002400398684897)),
('O', (5, 1, 8), ('R9', 'A9', 'D9'),
(2.710554414591679, 1.8963614225749115, 2.0628190070391694)),
('H', (5, 1, 8), ('R10', 'A10', 'D10'),
(2.0738054178739627, 1.9061743903687773, 4.152460126257847)),
('O', (8, 5, 1), ('R11', 'A11', 'D11'),
(2.57957319683146, 2.0630955394242707, 3.9898373226106236)),
('H', (8, 5, 11), ('R12', 'A12', 'D12'),
(2.037337829840982, 2.161046773185505, 3.342493863112154)),
('O', (11, 8, 5), ('R13', 'A13', 'D13'),
(2.7396264771381125, 1.8404255371622773, 3.200314265734847)),
('H', (13, 11, 8), ('R14', 'A14', 'D14'),
(1.8396656471039372, 1.6762185407776191, 4.304713884757464)),
('O', (9, 5, 1), ('R15', 'A15', 'D15'),
(2.759974384617291, 1.8588964031436905, 2.682937554897634)),
('H', (15, 9, 5), ('R16', 'A16', 'D16'),
(1.8427096812519266, 1.6838436964284405, 1.7402981108559878))
)
STE_PROD_ZMAS = (
# OH
(('O', (None, None, None), (None, None, None),
(None, None, None)),
('H', (0, None, None), ('R1', None, None),
(1.8477888298795644, None, None))),
# stereo CHO
(('C', (None, None, None), (None, None, None),
(None, None, None)),
('C', (0, None, None), ('R1', None, None),
(2.874942617600433, None, None)),
('H', (0, 1, None), ('R2', 'A2', None),
(2.068486178501242, 1.9368605648050443, None)),
('H', (0, 1, 2), ('R3', 'A3', 'D3'),
(2.068867675123655, 1.920464980301257, 2.084268267429751)),
('H', (0, 1, 2), ('R4', 'A4', 'D4'),
(2.06670996549009, 1.9490219370816204, 4.169403811281169)),
('C', (1, 0, 2), ('R5', 'A5', 'D5'),
(2.892054198885529, 1.978109466699197, 0.9931015869119763)),
('H', (1, 0, 5), ('R6', 'A6', 'D6'),
(2.0731098941442894, 1.895113111124007, 4.156030631730728)),
('H', (1, 0, 5), ('R7', 'A7', 'D7'),
(2.0735983863560614, 1.9083973887518564, 2.148304203029353)),
('C', (5, 1, 0), ('R8', 'A8', 'D8'),
(2.8781935635948996, 1.9361198955628287, 1.2783798744140946)),
('O', (5, 1, 8), ('R9', 'A9', 'D9'),
(2.6986014630716118, 1.8964799675479287, 2.0837938021682145)),
('H', (5, 1, 8), ('R10', 'A10', 'D10'),
(2.07270883018819, 1.9463407836628417, 4.2023952224973335)),
('O', (8, 5, 1), ('R11', 'A11', 'D11'),
(2.3208847722366746, 2.189986883591254, 2.1757813432302395)),
('H', (8, 5, 11), ('R12', 'A12', 'D12'),
(2.0841000255210202, 2.0077189801794337, 3.0900070546866)),
('O', (9, 5, 1), ('R13', 'A13', 'D13'),
(2.7550592578760584, 1.8564133553322275, 1.5438089821509182)),
('H', (13, 9, 5), ('R14', 'A14', 'D14'),
(1.8433348334954938, 1.6767577456458034, 2.2296868763866917)))
)
STE_INSTAB_ZRXN_STR = """
reaction class: beta scission
forward TS atoms:
1: {symbol: C, implicit_hydrogen_valence: 0, stereo_parity: null}
2: {symbol: C, implicit_hydrogen_valence: 0, stereo_parity: null}
3: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
4: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
5: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
6: {symbol: C, implicit_hydrogen_valence: 0, stereo_parity: false}
7: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
8: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
9: {symbol: C, implicit_hydrogen_valence: 0, stereo_parity: null}
10: {symbol: O, implicit_hydrogen_valence: 0, stereo_parity: null}
11: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
12: {symbol: O, implicit_hydrogen_valence: 0, stereo_parity: null}
13: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
14: {symbol: O, implicit_hydrogen_valence: 0, stereo_parity: null}
15: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
16: {symbol: O, implicit_hydrogen_valence: 0, stereo_parity: null}
17: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
forward TS bonds:
1-2: {order: 1, stereo_parity: null}
1-3: {order: 1, stereo_parity: null}
1-4: {order: 1, stereo_parity: null}
1-5: {order: 1, stereo_parity: null}
2-6: {order: 1, stereo_parity: null}
2-7: {order: 1, stereo_parity: null}
2-8: {order: 1, stereo_parity: null}
6-9: {order: 1, stereo_parity: null}
6-10: {order: 1, stereo_parity: null}
6-11: {order: 1, stereo_parity: null}
9-12: {order: 1, stereo_parity: null}
9-13: {order: 1, stereo_parity: null}
10-16: {order: 1, stereo_parity: null}
12-14: {order: 0.9, stereo_parity: null}
14-15: {order: 1, stereo_parity: null}
16-17: {order: 1, stereo_parity: null}
reactants keys:
- [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]
backward TS atoms:
1: {symbol: C, implicit_hydrogen_valence: 0, stereo_parity: null}
2: {symbol: C, implicit_hydrogen_valence: 0, stereo_parity: null}
3: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
4: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
5: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
6: {symbol: C, implicit_hydrogen_valence: 0, stereo_parity: false}
7: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
8: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
9: {symbol: C, implicit_hydrogen_valence: 0, stereo_parity: null}
10: {symbol: O, implicit_hydrogen_valence: 0, stereo_parity: null}
11: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
12: {symbol: O, implicit_hydrogen_valence: 0, stereo_parity: null}
13: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
14: {symbol: O, implicit_hydrogen_valence: 0, stereo_parity: null}
15: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
16: {symbol: O, implicit_hydrogen_valence: 0, stereo_parity: null}
17: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
backward TS bonds:
1-2: {order: 1, stereo_parity: null}
1-3: {order: 1, stereo_parity: null}
1-4: {order: 1, stereo_parity: null}
1-5: {order: 1, stereo_parity: null}
2-6: {order: 1, stereo_parity: null}
2-7: {order: 1, stereo_parity: null}
2-8: {order: 1, stereo_parity: null}
6-9: {order: 1, stereo_parity: null}
6-10: {order: 1, stereo_parity: null}
6-11: {order: 1, stereo_parity: null}
9-12: {order: 1, stereo_parity: null}
9-13: {order: 1, stereo_parity: null}
10-14: {order: 1, stereo_parity: null}
12-16: {order: 0.1, stereo_parity: null}
14-15: {order: 1, stereo_parity: null}
16-17: {order: 1, stereo_parity: null}
products keys:
- [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
- [16, 17]
"""
def test__prod_zmas():
""" test.automol.reac.instability_product_zmas
"""
instab_zmas1 = automol.reac.instability_product_zmas(ZMA1)
instab_zmas2 = automol.reac.instability_product_zmas(ZMA2)
instab_zmas3 = automol.reac.instability_product_zmas(ZMA3)
for zma, ref_zma in zip(instab_zmas1, PROD_ZMAS1):
assert automol.zmat.almost_equal(zma, ref_zma)
for zma, ref_zma in zip(instab_zmas2, PROD_ZMAS2):
assert automol.zmat.almost_equal(zma, ref_zma)
assert not instab_zmas3
def test__transformation():
""" test automol.zmat
"""
ref_zrxn = automol.reac.from_string(INSTAB_ZRXN_STR)
zrxn, zma = automol.reac.instability_transformation(ZMA1, PROD_ZMAS1)
assert zrxn == ref_zrxn
assert automol.zmat.almost_equal(zma, INSTAB_ZRXN_ZMA)
def test__stereo():
""" test the stereo functions
"""
ref_zrxn = automol.reac.from_string(STE_INSTAB_ZRXN_STR)
zrxn, zma = automol.reac.instability_transformation(STE_ZMA, STE_PROD_ZMAS)
assert zrxn == ref_zrxn
| 41.919075
| 79
| 0.60866
|
dff9a0312ae88b1272174022746fac3ffb46839e
| 6,084
|
py
|
Python
|
wimp/xsec/InelasticCS.py
|
lopez86/WimpSim
|
096cf9f3e3e3d8e4552418a9f4f690593d099160
|
[
"MIT"
] | 2
|
2020-09-25T10:48:24.000Z
|
2020-12-29T19:56:04.000Z
|
wimp/xsec/InelasticCS.py
|
lopez86/WimpSim
|
096cf9f3e3e3d8e4552418a9f4f690593d099160
|
[
"MIT"
] | 1
|
2017-06-12T01:12:15.000Z
|
2017-06-17T06:33:16.000Z
|
wimp/xsec/InelasticCS.py
|
lopez86/PyWIMPs
|
096cf9f3e3e3d8e4552418a9f4f690593d099160
|
[
"MIT"
] | null | null | null |
""" InelasticCrossSection.py
Basic inelastic cross section model: Isotropic in center of mass frame
with a small mass splitting between two dark matter states
"""
__author__ = "Jeremy P. Lopez"
__date__ = "June 2017"
__copyright__ = "(c) 2017, Jeremy P. Lopez"
from . import CrossSection
from .. import units
import numpy as np
class InelasticCS(CrossSection):
""" Cross section for a basic inelastic dark matter model.
This model assumes that there is some small mass splitting
between dark matter WIMPs and a second type of WIMP. For
conservation of energy, there will be a non-zero velocity
threshold for the interaction to happen.
"""
def __init__(self):
"""
Initialize to some default values:
100 GeV masses, 10 keV splitting, 1 cm^2 cross section
"""
super(InelasticCS,self).__init__()
self.deltaM = 10*units.keV
def set_params(self,pars):
""" Set parameters from a dictionary.
Args:
pars: {string}
Parameters:
MassSplitting: Mass difference between the two states
Other CrossSection parameters
"""
super(InelasticCS,self).set_params(pars)
if 'MassSplitting' in pars:
self.deltaM = pars['MassSplitting']
@property
def MxPrime(self):
""" Final state WIMP mass"""
return self.Mx + self.deltaM
@property
def PthresholdCM(self):
""" Momentum threshold in CM frame"""
return np.sqrt(2*self.MuPrime * self.deltaM)
@property
def EthresholdCM(self):
""" WIMP energy threshold in CM frame"""
return self.MuPrime * self.deltaM / self.Mx
@property
def VthresholdCM(self):
""" WIMP velocity threshold in CM frame"""
return self.PthresholdCM / self.Mx
@property
def Pthreshold(self):
""" WIMP momentum threshold in lab frame"""
return self.Mx *np.sqrt(2*self.deltaM/self.Mu)
@property
def Ethreshold(self):
""" WIMP kinetic energy threshold in lab frame"""
return self.Mx * self.deltaM /self.Mu
@property
def Vthreshold(self):
""" WIMP velocity threshold in lab frame"""
return units.speed_of_light \
* np.sqrt(2*self.deltaM/self.Mu)
@property
def Mu(self):
""" Initial state reduced mass"""
return self.Mx*self.Mt/(self.Mx+self.Mt)
@property
def MuPrime(self):
""" Final state reduced mass"""
return self.MxPrime*self.Mt/(self.MxPrime+self.Mt);
def MinEr(self,Ex):
""" Minimum recoil energy given a WIMP energy
Args:
Ex: WIMP kinetic energy
"""
vprime = self.PfCM(Ex) / self.Mt
u = self.PiCM(Ex)/self.Mt
return 0.5*self.Mt*(vprime*vprime-u*u+2*u*vprime)
def MaxEr(self,Ex):
""" Maximum recoil energy given a WIMP energy
Args:
Ex: WIMP kinetic energy
"""
vprime = self.PfCM(Ex) / self.Mt
u = self.PiCM(Ex)/self.Mt
return 0.5*self.Mt*(vprime*vprime+u*u+2*u*vprime)
def dSdErLab(self,Ex,Er):
""" Single differential cross section with respect to
recoil energy
Args:
Ex: WIMP kinetic energy
Er: WIMP recoil energy
"""
if Ex <= self.Ethreshold: return 0
if Er < minE or Er > maxE: return 0
minE = self.MinEr(Ex)
maxE = self.MaxEr(Ex)
return self.totalxs / (maxE - minE)
def PfCM(self,Ex):
""" Center of mass momentum in final state
Args:
Ex: WIMP kinetic energy
"""
return np.sqrt(2*self.Mx*Ex*self.Mu*self.MuPrime
-2*self.MuPrime*self.deltaM)
def PiCM(self,Ex):
""" Center of mass momentum in initial state
Args:
Ex: WIMP kinetic energy
"""
return self.Mu * np.sqrt(2*self.Mx*Ex)
def ErLab(self,Ex,cosTh):
""" Not implemented at this point. Recoil energy as
function of recoil angle
Args:
Ex: WIMP kinetic energy
cosTh: Recoil cos(theta) in lab frame
"""
print("InelasticCS::ErLab(Ex,cosTh_lab) not yet "
"implemented\n")
return -1
def cosThetaCMFromCosTheta(self,Ex,cosTh):
""" Not implemented at this point. CM recoil angle as
function of recoil angle
Args:
Ex: WIMP kinetic energy
cosTh: Recoil cos(theta) in lab frame
"""
print("InelasticCS::cosThetaCMFromCosTheta "
"not yet implemented\n")
return -2
def cosThetaLab(self,Ex,Er):
""" Lab frame recoil angle as function of recoil energy
Args:
Ex: WIMP kinetic energy
Er: Recoil kinetic energy
"""
cosThCM = self.cosThetaCMFromEr(Ex,Er)
return self.cosThetaFromCosThetaCM(Ex,cosThCM)
def cosThetaCMFromEr(self,Ex,Er):
""" CM recoil angle as function of lab frame recoil energy
Args:
Ex: WIMP kinetic energy
Er: Recoil kinetic energy
"""
vprime = self.PfCM(Ex) / self.Mt
u = self.PiCM(Ex)/self.Mt
return (2*Er/self.Mt - vprime*vprime-u*u)/(2*vprime*u)
def ErFromCosThetaCM(self,Ex,cosTh):
""" Lab frame recoil energy as function of CM recoil angle
Args:
Ex: WIMP kinetic energy
cosTh: Recoil cos(theta) in CM frame
"""
vprime = self.PfCM(Ex) / self.Mt
u = self.PiCM(Ex)/self.Mt
return 0.5*self.Mt*(vprime*vprime+u*u+2*u*vprime*cosTh)
def cosThetaFromCosThetaCM(self,Ex,cosTh):
""" Lab frame recoil angle as function of CM recoil angle
Args:
Ex: WIMP kinetic energy
cosTh: Recoil cos(theta) in CM frame
"""
pf = self.PfCM(Ex)
pi = self.PiCM(Ex)
return (pf*cosTh+pi) / np.sqrt(pf*pf+pi*pi+2*pf*pi*cosTh)
| 28.429907
| 70
| 0.58021
|
12cb697a67a23ba7055053727a670cf472a39dc0
| 2,480
|
py
|
Python
|
docs/blueprints/make_blueprints_table.py
|
Niehaus/kytos
|
9e220e7207dfe292dddb20dc7296f53fe467213d
|
[
"MIT"
] | null | null | null |
docs/blueprints/make_blueprints_table.py
|
Niehaus/kytos
|
9e220e7207dfe292dddb20dc7296f53fe467213d
|
[
"MIT"
] | null | null | null |
docs/blueprints/make_blueprints_table.py
|
Niehaus/kytos
|
9e220e7207dfe292dddb20dc7296f53fe467213d
|
[
"MIT"
] | null | null | null |
import re
import os
import glob
def create_table(directory):
# Create the table header and cells
table_header = ''
table_cell = ''
bps_rst = []
bps_titles = []
bps_status = []
max_len_title = -1
max_len_status = -1
max_len_bps = -1
for fp in glob.glob(f'{directory}/EP*.rst'):
split_dir = ''.join(fp.split('./blueprints/'))
bp_rst = ''.join(split_dir.split('.rst'))
bps_rst.append(f" :doc:`{bp_rst}<{bp_rst}/>` ")
if max_len_bps < len(bps_rst[-1]): max_len_bps = len(bps_rst[-1])
with open(fp) as origin_file:
title = ''
status = ''
for line in origin_file:
if re.findall(r':Title:', line):
title = ''.join(line.split(':Title:'))
bps_titles.append(''.join(title.split("\n")))
if max_len_title < len(title): max_len_title = len(title)
if re.findall(r':Status:', line):
status = ''.join(line.split(':Status:'))
bps_status.append(''.join(status.split("\n")))
if max_len_status < len(status): max_len_status = len(status)
break
th_title_len = (max_len_title - len(' Title'))
th_status_len = (max_len_status - len(' Status'))
th_bps_len = (max_len_bps - len(' Blueprint'))
table_header += f"+{'-' * max_len_bps}+{'-' * max_len_title}+{'-' * max_len_status}+\n"
table_header += f"|{' Blueprint'}{' ' * th_bps_len}|{' Title'}{' ' * th_title_len}|{' Status'}{' ' * th_status_len}|\n"
table_header += f"+{'=' * max_len_bps}+{'=' * max_len_title}+{'=' * max_len_status}+\n"
for i in range(len(bps_rst)):
title_space = max_len_title - len(bps_titles[i])
status_space = max_len_status - len(bps_status[i])
bps_space = max_len_bps - len(bps_rst[i])
table_cell += f"|{bps_rst[i]}{' ' * bps_space}|{bps_titles[i]}{' ' * title_space}|{bps_status[i]}{' ' * status_space}|\n"
table_cell += f"+{'-' * max_len_bps}+{'-' * max_len_title}+{'-' * max_len_status}+\n"
return table_header + table_cell
def write_new_index_rst(directory):
blueprints_table = create_table(directory)
with open(f'{directory}/bp_table.rst', 'w') as fp:
fp.write(blueprints_table)
if __name__ == '__main__':
write_new_index_rst('./blueprints')
| 37.014925
| 129
| 0.55
|
37f956f62af149e1c8f42c81d2432237e90dc6bc
| 3,958
|
py
|
Python
|
Scrapy_zzuliacgn/pipelines.py
|
DeSireFire/zzuliacgnSyders
|
0e4d6b9663771d8ddc65598bae58a5b4b8c22e88
|
[
"MIT"
] | 2
|
2019-03-23T16:05:16.000Z
|
2021-04-19T02:14:09.000Z
|
Scrapy_zzuliacgn/pipelines.py
|
DeSireFire/zzuliacgnSyders
|
0e4d6b9663771d8ddc65598bae58a5b4b8c22e88
|
[
"MIT"
] | null | null | null |
Scrapy_zzuliacgn/pipelines.py
|
DeSireFire/zzuliacgnSyders
|
0e4d6b9663771d8ddc65598bae58a5b4b8c22e88
|
[
"MIT"
] | 1
|
2020-10-11T15:33:31.000Z
|
2020-10-11T15:33:31.000Z
|
# -*- coding: utf-8 -*-
import json,pymysql
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class mysqlPipeline(object):
pass
# class mysqlPipeline(object):
# def __init__(self, host, database, user, password, port):
# self.host = host
# self.database = database
# self.user = user
# self.password = password
# self.port = port
#
# @classmethod
# def from_crawler(cls, crawler):
# return cls(
# host=crawler.settings.get('MYSQL_HOST'),
# database=crawler.settings.get('MYSQL_DATABASE'),
# user=crawler.settings.get('MYSQL_USER'),
# password=crawler.settings.get('MYSQL_PASSWORD'),
# port=crawler.settings.get('MYSQL_PORT'),
# )
#
# def open_spider(self,spider):
# self.db = pymysql.connect(self.host,self.user,self.password,self.database,self.port,charset='utf8',)
# self.cursor = self.db.cursor()
#
# def close_spider(self,spider):
# self.db.close()
#
# def process_item(self,item,spider):
# # return item
# # 如果爬虫名是movie
# if 'dmhy' in spider.name :
# print('老子是dmhy的管道,我感受到了力量')
# # print(item)
# # print(type(item))
# self.mysql_insert_update(item,'ZA_BT_items')
# elif spider.name == 'nyaa':
# print('老子是nyaa的管道,我感受到了力量')
# self.mysql_insert_IGNORE(item, 'ZA_BT_items')
# elif spider.name == 'wenku8':
# print('老子是wenku8的管道,我感受到了力量')
# if 'writer' in dict(item).keys():
# print('是小说主表')
# # todo 似乎不适合使用更新插入,待定
# self.mysql_insert_update(item, 'ZA_Novel_info')
# else:
# print('是小说章节表')
# self.mysql_insert_update(item, 'ZA_Novel_detail')
# else:
# print("我是谁,我在哪,我在做什么")
# return item
#
# def mysql_insert_update(self,item,tableName):
# '''
# 针对mysql复用的管道函数,存在就进行更新,不存在则插入新条目
# 注意: 数据库表中,必须存在有唯一约束的字段
# :param item:框架传递过来的item
# :param tableName:要存储到的表名
# :return:
# '''
# data = dict(item)
# # print(type(data))
# mykeys = ",".join(data.keys())
# myvalues = ",".join(['%s'] * len(data))
# myUpdate = ",".join([" {key} = %s".format(key=key) for key in data])+ ";"
# sql = "INSERT INTO {table}({keys}) VALUES ({values}) ON DUPLICATE KEY UPDATE".format(table=tableName,keys=mykeys,values=myvalues)
# # sql = "alter table {table} AUTO_INCREMENT=1;INSERT INTO {table}({keys}) VALUES ({values}) ON DUPLICATE KEY UPDATE".format(table=tableName,keys=mykeys,values=myvalues)
# sql += myUpdate
# try:
# if self.cursor.execute(sql, tuple(data.values()) * 2):
# print("更新成功!")
# self.db.commit()
# except Exception as e:
# print("更新数据 时发生错误:%s" % e)
#
# def mysql_insert_IGNORE(self,item,tableName):
# '''
# 针对mysql复用的管道函数,存在就进行更新,不存在则插入新条目
# 注意: 数据库表中,必须存在有唯一约束的字段
# :param item:框架传递过来的item
# :param tableName:要存储到的表名
# :return:
# '''
# data = dict(item)
# # # print(type(data))
# mykeys = ",".join(data.keys())
# myvalues = ",".join(['%s'] * len(data))
# # myUpdate = ",".join([" {key} = %s".format(key=key) for key in data])
# sql = "INSERT IGNORE INTO {table}({keys}) VALUES ({values})".format(table=tableName, keys=mykeys,values=myvalues)
# try:
# if self.cursor.execute(sql, tuple(data.values())):
# print("忽略中出成功!")
# self.cursor.commit()
# except Exception as e:
# print("忽略以存在数据插入 时发生错误:%s" % e)
# self.cursor.rollback()
| 38.057692
| 178
| 0.549267
|
da8572896cebc29eca9e2fc4afbc56ea07c7a22d
| 1,099
|
py
|
Python
|
mfcauto/event_emitter.py
|
asperities/mfcauto.py
|
275f4e7e09af3c869c8a746f85e0b40f5dfe791a
|
[
"MIT"
] | 2
|
2020-08-16T03:09:50.000Z
|
2021-08-30T00:40:23.000Z
|
mfcauto/event_emitter.py
|
asperities/mfcauto.py
|
275f4e7e09af3c869c8a746f85e0b40f5dfe791a
|
[
"MIT"
] | null | null | null |
mfcauto/event_emitter.py
|
asperities/mfcauto.py
|
275f4e7e09af3c869c8a746f85e0b40f5dfe791a
|
[
"MIT"
] | 11
|
2018-06-10T08:17:20.000Z
|
2021-09-28T05:11:42.000Z
|
class EventEmitter:
"""Rudimentary EventEmitter class that somewhat mimics the NodeJS EventEmitter class"""
def __init__(self):
self.listeners = dict()
def add_listener(self, event, func):
"""Adds func as a listener for event"""
self.listeners.setdefault(event, set()).add(func)
def on(self, event, func):
"""Adds func as a listener for event"""
self.add_listener(event, func)
def remove_listener(self, event, func):
"""Removes func as a listener for event"""
if event in self.listeners and func in self.listeners[event]:
self.listeners[event].remove(func)
def remove_all_listeners(self, event):
"""Removes all listeners from event"""
if event in self.listeners:
del self.listeners[event]
def emit(self, event, *args):
"""Emits event causing all listeners to be called with *args"""
if event in self.listeners:
listener_copy = self.listeners[event].copy()
for func in listener_copy:
func(*args)
| 43.96
| 92
| 0.617834
|
9a4de2afdce8e894646a3ab8151827a925a12ec4
| 9,031
|
py
|
Python
|
mapgen.py
|
ghlmtz/airline-sim
|
5899e0390aaa5792e0bc6b1673ad2f0b3dd11d1d
|
[
"MIT"
] | null | null | null |
mapgen.py
|
ghlmtz/airline-sim
|
5899e0390aaa5792e0bc6b1673ad2f0b3dd11d1d
|
[
"MIT"
] | null | null | null |
mapgen.py
|
ghlmtz/airline-sim
|
5899e0390aaa5792e0bc6b1673ad2f0b3dd11d1d
|
[
"MIT"
] | null | null | null |
import random
import math
import noise
import json
from multiprocessing import Pool
from enums import *
from util import *
from fileio import *
import ginit as g
from time import process_time
class TileGroup:
def __init__(self):
self.tiles = []
self.towns = []
self.area = 0
class Tile:
def __init__(self,x,y):
self.floodFilled = False
self.country = None
self.isLand = True
self.X = x
self.Y = y
self.town = None
def updateBitmask(self):
self.count = 0
if self.Top.heightType == self.heightType:
self.count += 1
if self.Right.heightType == self.heightType:
self.count += 2
if self.Bottom.heightType == self.heightType:
self.count += 4
if self.Left.heightType == self.heightType:
self.count += 8
class MapData:
def __init__(self,width,height):
self.data = [ [ 0 for y in range(g.mapy) ] for x in range(g.mapx)]
self.maxValue = -1000
self.minValue = 1000
def getBiomeType(t):
biomeTable = [[BiomeType.ICE, BiomeType.TUNDRA, BiomeType.BOREAL, BiomeType.TEMPERATE, BiomeType.RAINFOREST, BiomeType.RAINFOREST],
[BiomeType.ICE, BiomeType.TUNDRA, BiomeType.BOREAL, BiomeType.SEASONAL, BiomeType.RAINFOREST, BiomeType.RAINFOREST],
[BiomeType.ICE, BiomeType.TUNDRA, BiomeType.BOREAL, BiomeType.WOODLAND, BiomeType.SAVANNA, BiomeType.SAVANNA],
[BiomeType.ICE, BiomeType.TUNDRA, BiomeType.WOODLAND, BiomeType.WOODLAND, BiomeType.SAVANNA, BiomeType.SAVANNA],
[BiomeType.ICE, BiomeType.TUNDRA, BiomeType.GRASSLAND, BiomeType.DESERT, BiomeType.DESERT, BiomeType.DESERT],
[BiomeType.ICE, BiomeType.TUNDRA, BiomeType.GRASSLAND, BiomeType.DESERT, BiomeType.DESERT, BiomeType.DESERT]]
return biomeTable[t.moistureType.value-1][t.heatType.value-1]
def setHeightType(t):
value = t.heightValue
if value < 0.9*g.sea_level:
t.heightType = HType.DEEPWATER
ival = (0,0,128)
elif 0.9*g.sea_level <= value < g.sea_level:
t.heightType = HType.SHALLOW
ival = (25,25,150)
elif g.sea_level <= value < 1.08*g.sea_level:
t.heightType = HType.SAND
ival = (240,240,64)
elif 1.08*g.sea_level <= value < 1.25*g.sea_level:
t.heightType = HType.GRASS
ival = (50,220,20)
elif 1.25*g.sea_level <= value < 1.4*g.sea_level:
t.heightType = HType.FOREST
ival = (16,160,0)
elif 1.4*g.sea_level <= value < 1.55*g.sea_level:
t.heightType = HType.HILLS
ival = (128,128,128)
else:
t.heightType = HType.MOUNTAIN
ival = (255,255,255)
if t.heightType in [HType.DEEPWATER,HType.SHALLOW]:
t.isLand = False
return ival
def setHeatType(t):
heatval = t.heatVal
if heatval < 0.03:
t.heatType = HeatType.COLDEST
elif heatval < 0.18:
t.heatType = HeatType.COLDER
elif heatval < 0.37:
t.heatType = HeatType.COLD
elif heatval < 0.55:
t.heatType = HeatType.WARM
elif heatval < 0.8:
t.heatType = HeatType.WARMER
else:
t.heatType = HeatType.WARMEST
def setMoistureType(t):
moistval = t.moistVal
if moistval < 0.27:
t.moistureType = MoistureType.DRYEST
elif moistval < 0.4:
t.moistureType = MoistureType.DRYER
elif moistval < 0.6:
t.moistureType = MoistureType.DRY
elif moistval < 0.75:
t.moistureType = MoistureType.WET
elif moistval < 0.85:
t.moistureType = MoistureType.WETTER
else:
t.moistureType = MoistureType.WETTEST
def heightNoise(x, y, z,r_offset):
multiplier = 10
return (noise.snoise3(multiplier*(x+r_offset)*0.5,multiplier*(y+r_offset)*0.5,multiplier*(z+r_offset)*0.5,octaves=8, persistence=0.5, lacunarity=2.0))
def heatNoise(x, y, z,r_offset):
multiplier = 10
return (noise.snoise3(multiplier*(x+r_offset)*0.5,multiplier*(y+r_offset)*0.5,multiplier*(z+r_offset)*0.5,octaves=6, persistence=0.5, lacunarity=2.0)+1)/2
def moistureNoise(x, y, z,r_offset):
multiplier = 10
return (noise.snoise3(multiplier*(x+r_offset)*0.5,multiplier*(y+r_offset)*0.5,multiplier*(z+r_offset)*0.5,octaves=6, persistence=0.5, lacunarity=2.0)+1)/2
def heatGradient(y):
return 1 - abs(g.mapy/2 - y)/(g.mapy/2)
def getTop(t):
return g.tiles[t.X][wrap(t.Y-1,g.mapy)]
def getBottom(t):
return g.tiles[t.X][wrap(t.Y+1,g.mapy)]
def getLeft(t):
return g.tiles[wrap(t.X-1,g.mapx)][t.Y]
def getRight(t):
return g.tiles[wrap(t.X+1,g.mapx)][t.Y]
def setMapDataXY(arandom,hrandom,mrandom,xy,yin=-1):
if yin != -1:
x = xy
y = yin
else:
x,y = xy
s = x/g.mapx
t = y/g.mapy
nx = math.cos(s*2*math.pi) / (2*math.pi)
ny = math.sin(s*2*math.pi) / (2*math.pi)
nz = t/(g.mapx/g.mapy)
value = heightNoise(nx,ny,nz,arandom)
heatval = heatNoise(nx,ny,nz,hrandom)
wetval = moistureNoise(nx,ny,nz,mrandom)
return (value,heatval,wetval)
def updateNeighbours():
for x in range(g.mapx):
for y in range(g.mapy):
t = g.tiles[x][y]
t.Top = getTop(t)
t.Bottom = getBottom(t)
t.Left = getLeft(t)
t.Right = getRight(t)
def updateBitmasks():
for x in range(g.mapx):
for y in range(g.mapy):
g.tiles[x][y].updateBitmask()
def setTile(aval,hval,mval,xy,yin=-1):
if yin != -1:
x = xy
y = yin
else:
x,y = xy
t = Tile(x,y)
t.heightValue = aval
setHeightType(t)
heatval = hval*0.9
lat,lon = lat_long((x,y))
coldness = (abs(lat) / 90)**1.1
heat = 1 - (abs(lat) / 90)**1
heatval += heat
heatval -= coldness
if t.heightType == HType.GRASS:
heatval -= 0.1 * aval
elif t.heightType == HType.FOREST:
heatval -= 0.25 * aval
elif t.heightType == HType.HILLS:
heatval -= 0.4 * aval
elif t.heightType == HType.MOUNTAIN:
heatval -= 0.75 * aval
t.heatVal = heatval
setHeatType(t)
moistval = mval
if t.heightType == HType.DEEPWATER:
moistval += 8 * t.heightValue
elif t.heightType == HType.SHALLOW:
moistval += 3 * t.heightValue
elif t.heightType == HType.SAND:
moistval += 0.5 * t.heightValue
elif t.heightType == HType.GRASS:
moistval += 0.25 * t.heightValue
t.moistVal = moistval
setMoistureType(t)
t.biomeType = getBiomeType(t)
return t
def _floodFill(tile,group,stack):
if tile.floodFilled:
return
if not(tile.isLand):
return
tile.floodFilled = True
group.tiles.append((tile.X,tile.Y))
lat,lon = lat_long((tile.X,tile.Y))
group.area += (360/g.mapx*math.cos(math.radians(lat)))**2
t = getTop(tile)
if not(t.floodFilled and t.isLand == t.isLand):
stack.append(t)
t = getBottom(tile)
if not(t.floodFilled and t.isLand == t.isLand):
stack.append(t)
t = getLeft(tile)
if not(t.floodFilled and t.isLand == t.isLand):
stack.append(t)
t = getRight(tile)
if not(t.floodFilled and t.isLand == t.isLand):
stack.append(t)
def floodFill():
stack = []
for x in range(g.mapx):
for y in range(g.mapy):
t = g.tiles[x][y]
if t.floodFilled:
continue
if t.isLand:
stack.append(t)
group = TileGroup()
while len(stack) > 0:
_floodFill(stack.pop(),group,stack)
if len(group.tiles) > 0:
g.lands.append(group)
else:
t.floodFilled = True
def prepareTilemap():
global mapData
global heatFractal
global moistureFractal
global r_offset
global heat_random
global moisture_random
r_offset = random.random()*1234
heat_random = random.random()*1234
moisture_random = random.random()*1234
mapData = MapData(g.mapx,g.mapy)
heatFractal = [[0 for _ in range(g.mapy)] for _ in range(g.mapx)]
moistureFractal = [[0 for _ in range(g.mapy)] for _ in range(g.mapx)]
tile_list = []
for x in range(g.mapx):
for y in range(g.mapy):
tile_list.append((r_offset,heat_random,moisture_random,x,y))
with Pool() as p:
squares = p.starmap(setMapDataXY,tile_list)
for N,sq in enumerate(squares):
x = N//512
y = N %512
mapData.data[x][y] = sq[0]
heatFractal[x][y] = sq[1]
moistureFractal[x][y] = sq[2]
if sq[0] > mapData.maxValue:
mapData.maxValue = sq[0]
if sq[0] < mapData.minValue:
mapData.minValue = sq[0]
timepunch("Initial map data: ")
tile_list = []
for x in range(g.mapx):
for y in range(g.mapy):
hval = (mapData.data[x][y] - mapData.minValue) / (mapData.maxValue - mapData.minValue)
tile_list.append((hval,heatFractal[x][y],moistureFractal[x][y],x,y))
with Pool() as p:
tiles = p.starmap(setTile,tile_list)
for N,tile in enumerate(tiles):
x = N//512
y = N %512
g.tiles[x][y] = tile
updateNeighbours()
timepunch("Tile stuff: ")
if g.have_savefile:
f = get_tar_data('lands.dat')
json_str = f.decode('utf-8')
json_lands = json.loads(json_str)
for land in json_lands:
our_land = TileGroup()
our_land.tiles = land['tiles']
our_land.area = float(land['area'])
g.lands.append(our_land)
maxmin = json.loads(get_tar_data('map.dat').decode('utf-8'))
mapData.maxValue = maxmin['max']
mapData.minValue = maxmin['min']
#updateBitmasks()
else:
#updateBitmasks()
floodFill()
timepunch("Flood filling: ")
json_lands = []
for land in g.lands:
our_dict = {}
our_dict['tiles'] = land.tiles
our_dict['area'] = land.area
json_lands.append(our_dict)
json_str = json.dumps(json_lands)
json_bytes = json_str.encode('utf-8')
add_to_tarfile((json_bytes,"lands.dat"))
json_str = json.dumps({'max':mapData.maxValue,'min':mapData.minValue})
json_bytes = json_str.encode('utf-8')
add_to_tarfile((json_bytes,"map.dat"))
| 26.176812
| 155
| 0.685971
|
c7e0610f1fe147a583541a20e0c5410cfe81c1c0
| 67
|
py
|
Python
|
pages/themes/basicIOFormatedStrings/examples/tmp.py
|
WWWCourses/ProgressBG-Python-UniCredit-Slides
|
87539aa2f73738370ac8df865cf3a1adac447391
|
[
"MIT"
] | null | null | null |
pages/themes/basicIOFormatedStrings/examples/tmp.py
|
WWWCourses/ProgressBG-Python-UniCredit-Slides
|
87539aa2f73738370ac8df865cf3a1adac447391
|
[
"MIT"
] | null | null | null |
pages/themes/basicIOFormatedStrings/examples/tmp.py
|
WWWCourses/ProgressBG-Python-UniCredit-Slides
|
87539aa2f73738370ac8df865cf3a1adac447391
|
[
"MIT"
] | null | null | null |
print('|Jhoh\'s pub|')
print('|line1\nline2|')
print('|abc\t123|')
| 16.75
| 23
| 0.61194
|
ee19f0e82dcbc510f54843419f121334c5ab2734
| 4,277
|
py
|
Python
|
huaweicloud-sdk-iam/huaweicloudsdkiam/v3/model/keystone_show_security_compliance_by_option_request.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-iam/huaweicloudsdkiam/v3/model/keystone_show_security_compliance_by_option_request.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-iam/huaweicloudsdkiam/v3/model/keystone_show_security_compliance_by_option_request.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class KeystoneShowSecurityComplianceByOptionRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'domain_id': 'str',
'option': 'str'
}
attribute_map = {
'domain_id': 'domain_id',
'option': 'option'
}
def __init__(self, domain_id=None, option=None):
"""KeystoneShowSecurityComplianceByOptionRequest - a model defined in huaweicloud sdk"""
self._domain_id = None
self._option = None
self.discriminator = None
self.domain_id = domain_id
self.option = option
@property
def domain_id(self):
"""Gets the domain_id of this KeystoneShowSecurityComplianceByOptionRequest.
待查询的账号ID,获取方式请参见:[获取账号、IAM用户、项目、用户组、委托的名称和ID](https://support.huaweicloud.com/api-iam/iam_17_0002.html)。
:return: The domain_id of this KeystoneShowSecurityComplianceByOptionRequest.
:rtype: str
"""
return self._domain_id
@domain_id.setter
def domain_id(self, domain_id):
"""Sets the domain_id of this KeystoneShowSecurityComplianceByOptionRequest.
待查询的账号ID,获取方式请参见:[获取账号、IAM用户、项目、用户组、委托的名称和ID](https://support.huaweicloud.com/api-iam/iam_17_0002.html)。
:param domain_id: The domain_id of this KeystoneShowSecurityComplianceByOptionRequest.
:type: str
"""
self._domain_id = domain_id
@property
def option(self):
"""Gets the option of this KeystoneShowSecurityComplianceByOptionRequest.
查询条件。该字段内容为:password_regex或password_regex_description。 password_regex:密码强度策略的正则表达式;password_regex_description:密码强度策略的描述。
:return: The option of this KeystoneShowSecurityComplianceByOptionRequest.
:rtype: str
"""
return self._option
@option.setter
def option(self, option):
"""Sets the option of this KeystoneShowSecurityComplianceByOptionRequest.
查询条件。该字段内容为:password_regex或password_regex_description。 password_regex:密码强度策略的正则表达式;password_regex_description:密码强度策略的描述。
:param option: The option of this KeystoneShowSecurityComplianceByOptionRequest.
:type: str
"""
self._option = option
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, KeystoneShowSecurityComplianceByOptionRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.55
| 131
| 0.610942
|
4de4225bbfda6ae17c3617448b324227f9d9a081
| 165
|
py
|
Python
|
posts/admin.py
|
yyyyyyyan/esquer.dev
|
eb43dc3a8e2db2bba42f3d487c721a69d55ff361
|
[
"MIT"
] | 9
|
2020-03-02T17:31:07.000Z
|
2022-01-03T03:36:02.000Z
|
posts/admin.py
|
yyyyyyyan/esquer.dev
|
eb43dc3a8e2db2bba42f3d487c721a69d55ff361
|
[
"MIT"
] | 6
|
2021-04-03T21:45:57.000Z
|
2022-02-10T08:13:13.000Z
|
posts/admin.py
|
yyyyyyyan/esquer.dev
|
eb43dc3a8e2db2bba42f3d487c721a69d55ff361
|
[
"MIT"
] | 1
|
2020-06-08T10:53:35.000Z
|
2020-06-08T10:53:35.000Z
|
from django.contrib import admin
from .models import Post
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("title",)}
| 18.333333
| 46
| 0.739394
|
e495b11f078ea1d71d002df6d172c2f7397bb08e
| 15,082
|
py
|
Python
|
pyscf/prop/polarizability/uhf.py
|
azag0/pyscf
|
1e3e27b61b3cfd22c9679d2c9851c13b3ebc5a1b
|
[
"Apache-2.0"
] | 2
|
2021-06-30T22:33:35.000Z
|
2021-11-22T18:02:36.000Z
|
pyscf/prop/polarizability/uhf.py
|
azag0/pyscf
|
1e3e27b61b3cfd22c9679d2c9851c13b3ebc5a1b
|
[
"Apache-2.0"
] | null | null | null |
pyscf/prop/polarizability/uhf.py
|
azag0/pyscf
|
1e3e27b61b3cfd22c9679d2c9851c13b3ebc5a1b
|
[
"Apache-2.0"
] | 2
|
2021-09-16T23:37:42.000Z
|
2021-10-14T23:00:39.000Z
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Non-relativistic static and dynamic polarizability and hyper-polarizability tensor
(In testing)
'''
import time
from functools import reduce
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.scf import ucphf
from pyscf.scf import _response_functions
from pyscf.prop.polarizability import rhf as rhf_polarizability
def dipole(mf):
return mf.dip_moment(mf.mol, mf.make_rdm1())
# Note: polarizability and relevant properties are demanding on basis sets.
# ORCA recommends to use Sadlej basis for these properties.
def polarizability(polobj, with_cphf=True):
from pyscf.prop.nmr import uhf as uhf_nmr
log = logger.new_logger(polobj)
mf = polobj._scf
mol = mf.mol
mo_energy = mf.mo_energy
mo_coeff = mf.mo_coeff
mo_occ = mf.mo_occ
occidxa = mo_occ[0] > 0
occidxb = mo_occ[1] > 0
mo0a, mo0b = mo_coeff
orboa = mo0a[:, occidxa]
orbva = mo0a[:,~occidxa]
orbob = mo0b[:, occidxb]
orbvb = mo0b[:,~occidxb]
charges = mol.atom_charges()
coords = mol.atom_coords()
charge_center = numpy.einsum('i,ix->x', charges, coords) / charges.sum()
with mol.with_common_orig(charge_center):
int_r = mol.intor_symmetric('int1e_r', comp=3)
h1a = lib.einsum('xpq,pi,qj->xij', int_r, mo0a.conj(), orboa)
h1b = lib.einsum('xpq,pi,qj->xij', int_r, mo0b.conj(), orbob)
s1a = numpy.zeros_like(h1a)
s1b = numpy.zeros_like(h1b)
vind = polobj.gen_vind(mf, mo_coeff, mo_occ)
if with_cphf:
mo1 = ucphf.solve(vind, mo_energy, mo_occ, (h1a,h1b), (s1a,s1b),
polobj.max_cycle_cphf, polobj.conv_tol,
verbose=log)[0]
else:
mo1 = uhf_nmr._solve_mo1_uncoupled(mo_energy, mo_occ, (h1a,h1b),
(s1a,s1b))[0]
e2 = numpy.einsum('xpi,ypi->xy', h1a, mo1[0])
e2+= numpy.einsum('xpi,ypi->xy', h1b, mo1[1])
e2 = -(e2 + e2.T)
if mf.verbose >= logger.INFO:
xx, yy, zz = e2.diagonal()
log.note('Isotropic polarizability %.12g', (xx+yy+zz)/3)
log.note('Polarizability anisotropy %.12g',
(.5 * ((xx-yy)**2 + (yy-zz)**2 + (zz-xx)**2))**.5)
log.debug('Static polarizability tensor\n%s', e2)
return e2
def hyper_polarizability(polobj, with_cphf=True):
from pyscf.prop.nmr import uhf as uhf_nmr
log = logger.new_logger(polobj)
mf = polobj._scf
mol = mf.mol
mo_energy = mf.mo_energy
mo_coeff = mf.mo_coeff
mo_occ = mf.mo_occ
occidxa = mo_occ[0] > 0
occidxb = mo_occ[1] > 0
mo0a, mo0b = mo_coeff
orboa = mo0a[:, occidxa]
orbva = mo0a[:,~occidxa]
orbob = mo0b[:, occidxb]
orbvb = mo0b[:,~occidxb]
charges = mol.atom_charges()
coords = mol.atom_coords()
charge_center = numpy.einsum('i,ix->x', charges, coords) / charges.sum()
with mol.with_common_orig(charge_center):
int_r = mol.intor_symmetric('int1e_r', comp=3)
h1a = lib.einsum('xpq,pi,qj->xij', int_r, mo0a.conj(), orboa)
h1b = lib.einsum('xpq,pi,qj->xij', int_r, mo0b.conj(), orbob)
s1a = numpy.zeros_like(h1a)
s1b = numpy.zeros_like(h1b)
vind = polobj.gen_vind(mf, mo_coeff, mo_occ)
if with_cphf:
mo1, e1 = ucphf.solve(vind, mo_energy, mo_occ, (h1a,h1b), (s1a,s1b),
polobj.max_cycle_cphf, polobj.conv_tol, verbose=log)
else:
mo1, e1 = uhf_nmr._solve_mo1_uncoupled(mo_energy, mo_occ, (h1a,h1b),
(s1a,s1b))
mo1a = lib.einsum('xqi,pq->xpi', mo1[0], mo0a)
mo1b = lib.einsum('xqi,pq->xpi', mo1[1], mo0b)
dm1a = lib.einsum('xpi,qi->xpq', mo1a, orboa)
dm1b = lib.einsum('xpi,qi->xpq', mo1b, orbob)
dm1a = dm1a + dm1a.transpose(0,2,1)
dm1b = dm1b + dm1b.transpose(0,2,1)
vresp = mf.gen_response(hermi=1)
h1ao = int_r + vresp(numpy.stack((dm1a, dm1b)))
s0 = mf.get_ovlp()
e3 = lib.einsum('xpq,ypi,zqi->xyz', h1ao[0], mo1a, mo1a)
e3 += lib.einsum('xpq,ypi,zqi->xyz', h1ao[1], mo1b, mo1b)
e3 -= lib.einsum('pq,xpi,yqj,zij->xyz', s0, mo1a, mo1a, e1[0])
e3 -= lib.einsum('pq,xpi,yqj,zij->xyz', s0, mo1b, mo1b, e1[1])
e3 = (e3 + e3.transpose(1,2,0) + e3.transpose(2,0,1) +
e3.transpose(0,2,1) + e3.transpose(1,0,2) + e3.transpose(2,1,0))
e3 = -e3
log.debug('Static hyper polarizability tensor\n%s', e3)
return e3
# Solve the frequency-dependent CPHF problem
# [A-wI, B ] [X] + [h1] = [0]
# [B , A+wI] [Y] [h1] [0]
def ucphf_with_freq(mf, mo_energy, mo_occ, h1, freq=0,
max_cycle=20, tol=1e-9, hermi=False, verbose=logger.WARN):
log = logger.new_logger(verbose=verbose)
t0 = (time.clock(), time.time())
occidxa = mo_occ[0] > 0
occidxb = mo_occ[1] > 0
viridxa = ~occidxa
viridxb = ~occidxb
mo_ea, mo_eb = mo_energy
# e_ai - freq may produce very small elements which can cause numerical
# issue in krylov solver
LEVEL_SHIF = 0.1
e_ai_a = lib.direct_sum('a-i->ai', mo_ea[viridxa], mo_ea[occidxa]).ravel()
e_ai_b = lib.direct_sum('a-i->ai', mo_eb[viridxb], mo_eb[occidxb]).ravel()
diag = (e_ai_a - freq,
e_ai_b - freq,
e_ai_a + freq,
e_ai_b + freq)
diag[0][diag[0] < LEVEL_SHIF] += LEVEL_SHIF
diag[1][diag[1] < LEVEL_SHIF] += LEVEL_SHIF
diag[2][diag[2] < LEVEL_SHIF] += LEVEL_SHIF
diag[3][diag[3] < LEVEL_SHIF] += LEVEL_SHIF
mo0a, mo0b = mf.mo_coeff
nao, nmoa = mo0a.shape
nmob = mo0b.shape
orbva = mo0a[:,viridxa]
orbvb = mo0b[:,viridxb]
orboa = mo0a[:,occidxa]
orbob = mo0b[:,occidxb]
nvira = orbva.shape[1]
nvirb = orbvb.shape[1]
nocca = orboa.shape[1]
noccb = orbob.shape[1]
h1a = h1[0].reshape(-1,nvira*nocca)
h1b = h1[1].reshape(-1,nvirb*noccb)
ncomp = h1a.shape[0]
mo1base = numpy.hstack((-h1a/diag[0],
-h1b/diag[1],
-h1a/diag[2],
-h1b/diag[3]))
offsets = numpy.cumsum((nocca*nvira, noccb*nvirb, nocca*nvira))
vresp = mf.gen_response(hermi=0)
def vind(xys):
nz = len(xys)
dm1a = numpy.empty((nz,nao,nao))
dm1b = numpy.empty((nz,nao,nao))
for i in range(nz):
xa, xb, ya, yb = numpy.split(xys[i], offsets)
dmx = reduce(numpy.dot, (orbva, xa.reshape(nvira,nocca) , orboa.T))
dmy = reduce(numpy.dot, (orboa, ya.reshape(nvira,nocca).T, orbva.T))
dm1a[i] = dmx + dmy # AX + BY
dmx = reduce(numpy.dot, (orbvb, xb.reshape(nvirb,noccb) , orbob.T))
dmy = reduce(numpy.dot, (orbob, yb.reshape(nvirb,noccb).T, orbvb.T))
dm1b[i] = dmx + dmy # AX + BY
v1ao = vresp(numpy.stack((dm1a,dm1b)))
v1voa = lib.einsum('xpq,pi,qj->xij', v1ao[0], orbva, orboa).reshape(nz,-1)
v1vob = lib.einsum('xpq,pi,qj->xij', v1ao[1], orbvb, orbob).reshape(nz,-1)
v1ova = lib.einsum('xpq,pi,qj->xji', v1ao[0], orboa, orbva).reshape(nz,-1)
v1ovb = lib.einsum('xpq,pi,qj->xji', v1ao[1], orbob, orbvb).reshape(nz,-1)
for i in range(nz):
xa, xb, ya, yb = numpy.split(xys[i], offsets)
v1voa[i] += (e_ai_a - freq - diag[0]) * xa
v1voa[i] /= diag[0]
v1vob[i] += (e_ai_b - freq - diag[1]) * xb
v1vob[i] /= diag[1]
v1ova[i] += (e_ai_a + freq - diag[2]) * ya
v1ova[i] /= diag[2]
v1ovb[i] += (e_ai_b + freq - diag[3]) * yb
v1ovb[i] /= diag[3]
v = numpy.hstack((v1voa, v1vob, v1ova, v1ovb))
return v
# FIXME: krylov solver is not accurate enough for many freqs. Using tight
# tol and lindep could offer small help. A better linear equation solver
# is needed.
mo1 = lib.krylov(vind, mo1base, tol=tol, max_cycle=max_cycle,
hermi=hermi, lindep=1e-18, verbose=log)
log.timer('krylov solver in CPHF', *t0)
dm1a = numpy.empty((ncomp,nao,nao))
dm1b = numpy.empty((ncomp,nao,nao))
for i in range(ncomp):
xa, xb, ya, yb = numpy.split(mo1[i], offsets)
dmx = reduce(numpy.dot, (orbva, xa.reshape(nvira,nocca) *2, orboa.T))
dmy = reduce(numpy.dot, (orboa, ya.reshape(nvira,nocca).T*2, orbva.T))
dm1a[i] = dmx + dmy
dmx = reduce(numpy.dot, (orbvb, xb.reshape(nvirb,noccb) *2, orbob.T))
dmy = reduce(numpy.dot, (orbob, yb.reshape(nvirb,noccb).T*2, orbvb.T))
dm1b[i] = dmx + dmy
v1ao = vresp(numpy.stack((dm1a,dm1b)))
mo_e1_a = lib.einsum('xpq,pi,qj->xij', v1ao[0], orboa, orboa)
mo_e1_b = lib.einsum('xpq,pi,qj->xij', v1ao[1], orbob, orbob)
mo_e1 = (mo_e1_a, mo_e1_b)
xa, xb, ya, yb = numpy.split(mo1, offsets, axis=1)
mo1 = (xa.reshape(ncomp,nvira,nocca),
xb.reshape(ncomp,nvirb,noccb),
ya.reshape(ncomp,nvira,nocca),
yb.reshape(ncomp,nvirb,noccb))
return mo1, mo_e1
def polarizability_with_freq(polobj, freq=None):
from pyscf.prop.nmr import rhf as rhf_nmr
log = logger.new_logger(polobj)
mf = polobj._scf
mol = mf.mol
mo_energy = mf.mo_energy
mo_coeff = mf.mo_coeff
mo_occ = mf.mo_occ
occidxa = mo_occ[0] > 0
occidxb = mo_occ[1] > 0
mo0a, mo0b = mo_coeff
orboa = mo0a[:, occidxa]
orbva = mo0a[:,~occidxa]
orbob = mo0b[:, occidxb]
orbvb = mo0b[:,~occidxb]
charges = mol.atom_charges()
coords = mol.atom_coords()
charge_center = numpy.einsum('i,ix->x', charges, coords) / charges.sum()
with mol.with_common_orig(charge_center):
int_r = mol.intor_symmetric('int1e_r', comp=3)
h1a = lib.einsum('xpq,pi,qj->xij', int_r, orbva.conj(), orboa)
h1b = lib.einsum('xpq,pi,qj->xij', int_r, orbvb.conj(), orbob)
mo1 = ucphf_with_freq(mf, mo_energy, mo_occ, (h1a,h1b), freq,
polobj.max_cycle_cphf, polobj.conv_tol,
verbose=log)[0]
# *-1 from the definition of dipole moment.
e2 = -numpy.einsum('xpi,ypi->xy', h1a, mo1[0])
e2 -= numpy.einsum('xpi,ypi->xy', h1b, mo1[1])
e2 -= numpy.einsum('xpi,ypi->xy', h1a, mo1[2])
e2 -= numpy.einsum('xpi,ypi->xy', h1b, mo1[3])
log.debug('Polarizability tensor with freq %s', freq)
log.debug('%s', e2)
return e2
class Polarizability(lib.StreamObject):
def __init__(self, mf):
mol = mf.mol
self.mol = mol
self.verbose = mol.verbose
self.stdout = mol.stdout
self._scf = mf
self.cphf = True
self.max_cycle_cphf = 20
self.conv_tol = 1e-9
self._keys = set(self.__dict__.keys())
def gen_vind(self, mf, mo_coeff, mo_occ):
'''Induced potential'''
vresp = mf.gen_response(hermi=1)
occidxa = mo_occ[0] > 0
occidxb = mo_occ[1] > 0
mo0a, mo0b = mo_coeff
orboa = mo0a[:, occidxa]
orbob = mo0b[:, occidxb]
nocca = orboa.shape[1]
noccb = orbob.shape[1]
nmoa = mo0a.shape[1]
nmob = mo0b.shape[1]
def vind(mo1):
mo1 = mo1.reshape(-1,nmoa*nocca+nmob*noccb)
mo1a = mo1[:,:nmoa*nocca].reshape(-1,nmoa,nocca)
mo1b = mo1[:,nmoa*nocca:].reshape(-1,nmob,noccb)
dm1a = lib.einsum('xai,pa,qi->xpq', mo1a, mo0a, orboa.conj())
dm1b = lib.einsum('xai,pa,qi->xpq', mo1b, mo0b, orbob.conj())
dm1a = dm1a + dm1a.transpose(0,2,1).conj()
dm1b = dm1b + dm1b.transpose(0,2,1).conj()
v1ao = vresp(numpy.stack((dm1a,dm1b)))
v1a = lib.einsum('xpq,pi,qj->xij', v1ao[0], mo0a.conj(), orboa)
v1b = lib.einsum('xpq,pi,qj->xij', v1ao[1], mo0b.conj(), orbob)
v1mo = numpy.hstack((v1a.reshape(-1,nmoa*nocca),
v1b.reshape(-1,nmob*noccb)))
return v1mo.ravel()
return vind
polarizability = polarizability
polarizability_with_freq = polarizability_with_freq
hyper_polarizability = hyper_polarizability
from pyscf import scf
scf.uhf.UHF.Polarizability = lib.class_as_method(Polarizability)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
mol = gto.Mole()
# Disagreement between analytical results and finite difference found for
# linear molecule
#mol.atom = '''h , 0. 0. 0.
# F , 0. 0. .917'''
mol.atom='''O 0. 0. 0.
H 0. -0.757 0.587
H 0. 0.757 0.587'''
mol.spin = 2
mol.basis = '631g'
mol.build()
mf = scf.UHF(mol).run(conv_tol=1e-14)
polar = mf.Polarizability().polarizability()
hpol = mf.Polarizability().hyper_polarizability()
print(polar)
mf.verbose = 0
charges = mol.atom_charges()
coords = mol.atom_coords()
charge_center = numpy.einsum('i,ix->x', charges, coords) / charges.sum()
with mol.with_common_orig(charge_center):
ao_dip = mol.intor_symmetric('int1e_r', comp=3)
h1 = mf.get_hcore()
def apply_E(E):
mf.get_hcore = lambda *args, **kwargs: h1 + numpy.einsum('x,xij->ij', E, ao_dip)
mf.run(conv_tol=1e-14)
return mf.dip_moment(mol, mf.make_rdm1(), unit='AU', verbose=0)
e1 = apply_E([ 0.0001, 0, 0])
e2 = apply_E([-0.0001, 0, 0])
print((e1 - e2) / 0.0002)
e1 = apply_E([0, 0.0001, 0])
e2 = apply_E([0,-0.0001, 0])
print((e1 - e2) / 0.0002)
e1 = apply_E([0, 0, 0.0001])
e2 = apply_E([0, 0,-0.0001])
print((e1 - e2) / 0.0002)
# Small discrepancy found between analytical derivatives and finite
# differences
print(hpol)
def apply_E(E):
mf.get_hcore = lambda *args, **kwargs: h1 + numpy.einsum('x,xij->ij', E, ao_dip)
mf.run(conv_tol=1e-14)
return Polarizability(mf).polarizability()
e1 = apply_E([ 0.0001, 0, 0])
e2 = apply_E([-0.0001, 0, 0])
print((e1 - e2) / 0.0002)
e1 = apply_E([0, 0.0001, 0])
e2 = apply_E([0,-0.0001, 0])
print((e1 - e2) / 0.0002)
e1 = apply_E([0, 0, 0.0001])
e2 = apply_E([0, 0,-0.0001])
print((e1 - e2) / 0.0002)
print(Polarizability(mf).polarizability())
print(Polarizability(mf).polarizability_with_freq(freq= 0.))
print(Polarizability(mf).polarizability_with_freq(freq= 0.1))
print(Polarizability(mf).polarizability_with_freq(freq=-0.1))
| 36.606796
| 88
| 0.59276
|
6c9922ec6e84367876ea19738168bd429db0fb77
| 2,618
|
py
|
Python
|
flexget/plugins/cli/perf_tests.py
|
sillygreen89/Flexget
|
60f24ab0dda7b94c87ba43451921c50c3cef391f
|
[
"MIT"
] | null | null | null |
flexget/plugins/cli/perf_tests.py
|
sillygreen89/Flexget
|
60f24ab0dda7b94c87ba43451921c50c3cef391f
|
[
"MIT"
] | null | null | null |
flexget/plugins/cli/perf_tests.py
|
sillygreen89/Flexget
|
60f24ab0dda7b94c87ba43451921c50c3cef391f
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import logging
from flexget import options
from flexget.event import event
from flexget.logger import console
from flexget.manager import Session
log = logging.getLogger('perftests')
TESTS = ['imdb_query']
def cli_perf_test(manager, options):
if options.test_name not in TESTS:
console('Unknown performance test %s' % options.test_name)
return
session = Session()
try:
if options.test_name == 'imdb_query':
imdb_query(session)
finally:
session.close()
def imdb_query(session):
import time
from flexget.plugins.metainfo.imdb_lookup import Movie
from flexget.plugins.cli.performance import log_query_count
from sqlalchemy.sql.expression import select
from progressbar import ProgressBar, Percentage, Bar, ETA
from sqlalchemy.orm import joinedload_all
imdb_urls = []
log.info('Getting imdb_urls ...')
# query so that we avoid loading whole object (maybe cached?)
for _, url in session.execute(select([Movie.id, Movie.url])):
imdb_urls.append(url)
log.info('Got %i urls from database' % len(imdb_urls))
if not imdb_urls:
log.info('so .. aborting')
return
# commence testing
widgets = ['Benchmarking - ', ETA(), ' ', Percentage(), ' ', Bar(left='[', right=']')]
bar = ProgressBar(widgets=widgets, maxval=len(imdb_urls)).start()
log_query_count('test')
start_time = time.time()
for index, url in enumerate(imdb_urls):
bar.update(index)
# movie = session.query(Movie).filter(Movie.url == url).first()
# movie = session.query(Movie).options(subqueryload(Movie.genres)).filter(Movie.url == url).one()
movie = session.query(Movie). \
options(joinedload_all(Movie.genres, Movie.languages,
Movie.actors, Movie.directors)). \
filter(Movie.url == url).first()
# access it's members so they're loaded
[x.name for x in movie.genres]
[x.name for x in movie.directors]
[x.name for x in movie.actors]
[x.name for x in movie.languages]
log_query_count('test')
took = time.time() - start_time
log.debug('Took %.2f seconds to query %i movies' % (took, len(imdb_urls)))
@event('options.register')
def register_parser_arguments():
perf_parser = options.register_command('perf-test', cli_perf_test)
perf_parser.add_argument('test_name', metavar='<test name>', choices=TESTS)
| 32.725
| 105
| 0.668449
|
24d3f632b5aeb8356d9dfaaca8ceb0bf8dc6197e
| 3,591
|
py
|
Python
|
recognition/vpl/dataset.py
|
qaz734913414/insightface
|
4101fe608ca1d38604a23d53f32314ce8a28fe79
|
[
"MIT"
] | 12,377
|
2017-12-04T02:46:57.000Z
|
2022-03-31T16:48:31.000Z
|
recognition/vpl/dataset.py
|
qaz734913414/insightface
|
4101fe608ca1d38604a23d53f32314ce8a28fe79
|
[
"MIT"
] | 1,851
|
2017-12-05T05:41:23.000Z
|
2022-03-30T13:06:22.000Z
|
recognition/vpl/dataset.py
|
qaz734913414/insightface
|
4101fe608ca1d38604a23d53f32314ce8a28fe79
|
[
"MIT"
] | 4,198
|
2017-12-05T02:57:19.000Z
|
2022-03-30T10:29:37.000Z
|
import numbers
import os
import queue as Queue
import threading
import mxnet as mx
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
class BackgroundGenerator(threading.Thread):
def __init__(self, generator, local_rank, max_prefetch=6):
super(BackgroundGenerator, self).__init__()
self.queue = Queue.Queue(max_prefetch)
self.generator = generator
self.local_rank = local_rank
self.daemon = True
self.start()
def run(self):
torch.cuda.set_device(self.local_rank)
for item in self.generator:
self.queue.put(item)
self.queue.put(None)
def next(self):
next_item = self.queue.get()
if next_item is None:
raise StopIteration
return next_item
def __next__(self):
return self.next()
def __iter__(self):
return self
class DataLoaderX(DataLoader):
def __init__(self, local_rank, **kwargs):
super(DataLoaderX, self).__init__(**kwargs)
self.stream = torch.cuda.Stream(local_rank)
self.local_rank = local_rank
def __iter__(self):
self.iter = super(DataLoaderX, self).__iter__()
self.iter = BackgroundGenerator(self.iter, self.local_rank)
self.preload()
return self
def preload(self):
self.batch = next(self.iter, None)
if self.batch is None:
return None
with torch.cuda.stream(self.stream):
for k in range(len(self.batch)):
self.batch[k] = self.batch[k].to(device=self.local_rank,
non_blocking=True)
def __next__(self):
torch.cuda.current_stream().wait_stream(self.stream)
batch = self.batch
if batch is None:
raise StopIteration
self.preload()
return batch
class MXFaceDataset(Dataset):
def __init__(self, root_dir, local_rank):
super(MXFaceDataset, self).__init__()
self.transform = transforms.Compose(
[transforms.ToPILImage(),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])
self.root_dir = root_dir
self.local_rank = local_rank
path_imgrec = os.path.join(root_dir, 'train.rec')
path_imgidx = os.path.join(root_dir, 'train.idx')
self.imgrec = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r')
s = self.imgrec.read_idx(0)
header, _ = mx.recordio.unpack(s)
#print('ddd1')
if header.flag > 0:
if len(header.label)==2:
self.header0 = (int(header.label[0]), int(header.label[1]))
self.imgidx = np.array(range(1, int(header.label[0])))
else:
self.imgidx = np.array(list(self.imgrec.keys))
else:
self.imgidx = np.array(list(self.imgrec.keys))
#print('ddd2')
def __getitem__(self, index):
idx = self.imgidx[index]
s = self.imgrec.read_idx(idx)
header, img = mx.recordio.unpack(s)
label = header.label
if not isinstance(label, numbers.Number):
label = label[0]
sample = mx.image.imdecode(img).asnumpy()
label = torch.tensor(label, dtype=torch.long)
if self.transform is not None:
sample = self.transform(sample)
return sample, label
def __len__(self):
return len(self.imgidx)
| 31.5
| 82
| 0.601504
|
a1b323e37910e437217b7d8c64189731e175698a
| 1,579
|
py
|
Python
|
setup.py
|
parth115/APIv3-python-library
|
cba7cb905d8b2206ee51b051eee63bbe3fb68522
|
[
"MIT"
] | null | null | null |
setup.py
|
parth115/APIv3-python-library
|
cba7cb905d8b2206ee51b051eee63bbe3fb68522
|
[
"MIT"
] | null | null | null |
setup.py
|
parth115/APIv3-python-library
|
cba7cb905d8b2206ee51b051eee63bbe3fb68522
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
SendinBlue API
SendinBlue provide a RESTFul API that can be used with any languages. With this API, you will be able to : - Manage your campaigns and get the statistics - Manage your contacts - Send transactional Emails and SMS - and much more... You can download our wrappers at https://github.com/orgs/sendinblue **Possible responses** | Code | Message | | :-------------: | ------------- | | 200 | OK. Successful Request | | 201 | OK. Successful Creation | | 202 | OK. Request accepted | | 204 | OK. Successful Update/Deletion | | 400 | Error. Bad Request | | 401 | Error. Authentication Needed | | 402 | Error. Not enough credit, plan upgrade needed | | 403 | Error. Permission denied | | 404 | Error. Object does not exist | | 405 | Error. Method not allowed |
OpenAPI spec version: 3.0.0
Contact: contact@sendinblue.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import sys
from setuptools import setup, find_packages
NAME = "sib-api-v3-sdk"
VERSION = "4.0.1"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil"]
setup(
name=NAME,
version=VERSION,
description="SendinBlue API",
author_email="contact@sendinblue.com",
url="",
keywords=["Swagger", "SendinBlue API"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True
)
| 40.487179
| 806
| 0.662445
|
a9c5b6f235a1505928c031aceb1974d233d6d52b
| 16,924
|
py
|
Python
|
tensorflow/compiler/tests/depthwise_conv_op_test.py
|
fraudies/tensorflow
|
a42423e302b71893bbd24aa896869941013c07fb
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/compiler/tests/depthwise_conv_op_test.py
|
fraudies/tensorflow
|
a42423e302b71893bbd24aa896869941013c07fb
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/compiler/tests/depthwise_conv_op_test.py
|
fraudies/tensorflow
|
a42423e302b71893bbd24aa896869941013c07fb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for depthwise convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
# Reference implementation of depthwise_conv2d
def ReferenceDepthwiseConv2D(input_tensor, filter_tensor, strides, padding,
data_format=None):
# Reference implementation of depthwise convolution that uses regular
# convolution.
convs = []
in_channels = filter_tensor.shape[2]
# Use a custom implementation of depthwise conv2d using slicing.
for channel in xrange(in_channels):
# Slice the input along channel
if data_format == "NCHW":
input_slice = input_tensor[:, channel:channel+1, :, :]
else:
input_slice = input_tensor[:, :, :, channel:channel+1]
# Slice the filters. Filters are H, W, InC, DepthMultiplier
filter_slice = filter_tensor[:, :, channel:channel+1, :]
# Do conv
convs.append(nn_ops.conv2d(input_slice, filter_slice,
strides, padding,
data_format=data_format,
name="depthwise_slice_%d" % channel))
# Concat along dimension.
if data_format == "NCHW":
return array_ops.concat(convs, 1)
else:
return array_ops.concat(convs, 3)
def ConfigsToTest():
"""Iterator for different convolution shapes, strides and paddings.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the depthwise
convolution parameters.
"""
input_sizes = [[4, 5, 5, 48], [4, 8, 8, 84], [4, 17, 17, 48], [4, 9, 27, 8],
[4, 31, 31, 7], [4, 35, 35, 2], [4, 147, 147, 2],
[3, 299, 299, 3], [5, 183, 183, 1]]
filter_sizes = [[1, 1, 48, 2], [1, 3, 84, 1], [3, 1, 48, 4], [3, 3, 8, 1],
[3, 3, 7, 1], [5, 5, 2, 1], [3, 3, 2, 8], [2, 2, 3,
8], [5, 5, 1, 2]]
out_sizes = [[4, 5, 5, 96], [4, 8, 8, 84], [4, 17, 17, 192], [4, 9, 27, 8],
[4, 31, 31, 7], [4, 35, 35, 2], [4, 49, 49, 16],
[3, 150, 150, 24], [5, 92, 92, 2]]
strides = [1, 1, 1, 1, 1, 1, 3, 2, 2]
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [SAME, SAME, SAME, SAME, SAME, SAME, VALID, SAME, SAME, SAME]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
def CheckGradConfigsToTest():
"""Iterator for different convolution shapes, strides and paddings.
compute_gradient_error() is very expensive. So the configs should be
relatively small.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the depthwise
convolution parameters.
"""
input_sizes = [[2, 5, 8, 1], [4, 5, 5, 1], [2, 4, 4, 2], [1, 15, 15, 2],
[2, 15, 16, 1]]
filter_sizes = [[4, 4, 1, 2], [2, 2, 1, 2], [3, 1, 2, 2], [1, 3, 2, 1],
[3, 3, 1, 2]]
out_sizes = [[2, 5, 8, 2], [4, 2, 2, 2], [2, 4, 4, 4], [1, 15, 15, 2],
[2, 5, 5, 2]]
strides = [1, 2, 1, 1, 3]
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [SAME, VALID, SAME, SAME, VALID]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
class DepthwiseConv2DTest(xla_test.XLATestCase):
# This is testing that depthwise_conv2d and depthwise_conv2d_native
# produce the same results. It also tests that NCHW and NWHC
# formats agree, by comparing the depthwise_conv2d_native with
# 'NCHW' format (with transposition) matches the 'NHWC' format using
# the higher level interface.
def _VerifyValues(self,
tensor_in_sizes,
filter_in_sizes,
stride,
padding,
data_type,
data_format="NHWC"):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
data_type: The data type to use.
data_format: The data_format of the input. "NHWC" or "NCHW".
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input and filter tensor with numbers incrementing from 1.
x1 = np.array([f * 1.0 for f in range(1, total_size_1 + 1)],
dtype=data_type).reshape(tensor_in_sizes)
x2 = np.array([f * 1.0 for f in range(1, total_size_2 + 1)],
dtype=data_type).reshape(filter_in_sizes)
with self.cached_session() as sess:
if data_type == np.float32:
tolerance = 1e-4
else:
self.assertEqual(data_type, np.float64)
tolerance = 1e-8
t1 = array_ops.placeholder(shape=tensor_in_sizes, dtype=data_type)
t2 = array_ops.placeholder(shape=filter_in_sizes, dtype=data_type)
native_t1 = t1
strides = [1, stride, stride, 1]
if data_format == "NCHW":
# Transpose from NWHC input to NCHW
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
native_t1 = array_ops.transpose(t1, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
with self.test_scope():
conv_native = nn_ops.depthwise_conv2d_native(
native_t1,
t2,
strides=strides,
data_format=data_format,
padding=padding)
if data_format == "NCHW":
# Transpose back from NCHW to NHWC
conv_native = array_ops.transpose(conv_native, [0, 2, 3, 1])
with ops.device("CPU"):
conv_interface = ReferenceDepthwiseConv2D(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
native_result = sess.run(conv_native, {t1: x1, t2: x2})
interface_result = sess.run(conv_interface, {t1: x1, t2: x2})
print("data_type:", data_type, "max diff = ",
np.amax(np.absolute(native_result - interface_result)))
self.assertAllClose(
np.ravel(native_result), np.ravel(interface_result), rtol=tolerance)
def testDepthwiseConv2D(self):
for index, (input_size, filter_size, _, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2D,", index, "th config:", input_size, "*",
filter_size, "stride:", stride, "padding:", padding)
for data_type in self.float_types:
# TODO(phawkins): the reference implementation only supports float32.
if data_type == np.float32:
self._VerifyValues(
input_size, filter_size, stride, padding, data_type)
def testDepthwiseConv2DFormat(self):
for index, (input_size, filter_size, _, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2DFormat,", index, "th config:", input_size,
"*", filter_size, "stride:", stride, "padding:", padding)
for data_type in self.float_types:
# TODO(phawkins): the reference implementation only supports float32.
if data_type == np.float32:
self._VerifyValues(
input_size,
filter_size,
stride,
padding,
data_type,
data_format="NCHW")
# This is testing against hand calculated results.
def _VerifyHandValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected):
"""Verifies the output values of the depthwise convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = np.array([f * 1.0 for f in range(1, total_size_1 + 1)],
dtype=np.float32).reshape(tensor_in_sizes)
x2 = np.array([f * 1.0 for f in range(1, total_size_2 + 1)],
dtype=np.float32).reshape(filter_in_sizes)
with self.cached_session() as sess:
t1 = array_ops.placeholder(shape=tensor_in_sizes, dtype=np.float32)
t2 = array_ops.placeholder(shape=filter_in_sizes, dtype=np.float32)
with self.test_scope():
conv = nn_ops.depthwise_conv2d_native(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
value = sess.run(conv, {t1: x1, t2: x2})
print("value = ", value)
self.assertArrayNear(expected, np.ravel(value), 1e-5)
self.assertShapeEqual(value, conv)
def testConv2D2x2Filter(self):
# The inputs look like this (it's a 3 x 2 matrix, each of depth 2):
#
# [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]
# [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]
# We can view this as two inputs
#
# input depth 0:
#
# [ 1.0, 3.0, 5.0 ]
# [ 7.0, 9.0, 11.0 ]
#
# input depth 1:
#
# [ 2.0, 4.0, 6.0 ]
# [ 8.0, 10.0, 12.0 ]
#
# The filter looks like this (it has two 2 x 2 patches, each generating 2
# depths):
#
# filter #0:
#
# [ (1.0, 3.0), ( 5.0, 7.0)]
# [ (9.0, 11.0), (13.0, 15.0)]
#
# filter #1:
#
# [ ( 2.0, 4.0), ( 6.0, 8.0)]
# [ (10.0, 12.0), (14.0, 16.0)]
#
# So the outputs are:
#
# (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)
# 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196
# (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)
# 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216
# (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)
# 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272
# (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)
# 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296
#
# (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)
# 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252
# (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)
# 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280
# (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)
# 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344
# (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
# 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
self._VerifyHandValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output)
def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes,
stride, padding):
x1 = np.random.rand(*filter_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(use_xla):
with self.cached_session():
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
t1 = array_ops.placeholder(np.float32, shape=filter_sizes)
t2 = array_ops.placeholder(np.float32, shape=output_sizes)
if use_xla:
with self.test_scope():
backprop = nn_ops.depthwise_conv2d_native_backprop_input(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
else:
backprop = nn_ops.depthwise_conv2d_native_backprop_input(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = backprop.eval({t1: x1, t2: x2})
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_xla=True)
cpu_value = _GetVal(use_xla=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-3, atol=1e-3)
def testDepthwiseConv2DInputGradCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2DInputGradCompare,", index, "th config:",
input_size, "*", filter_size, "stride:", stride, "padding:",
padding)
self._CompareBackpropInput(input_size, filter_size, output_size, stride,
padding)
def _CompareBackpropFilter(self,
input_sizes,
filter_sizes,
output_sizes,
stride,
padding,
data_format="NHWC"):
x0 = np.random.rand(*input_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(use_xla):
with self.cached_session():
t0 = array_ops.placeholder(np.float32, shape=input_sizes)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = array_ops.placeholder(np.float32, shape=output_sizes)
native_t0 = t0
native_t2 = t2
strides = [1, stride, stride, 1]
if use_xla:
if data_format == "NCHW":
# Transpose from NWHC input to NCHW
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
native_t0 = array_ops.transpose(t0, [0, 3, 1, 2])
native_t2 = array_ops.transpose(t2, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
with self.test_scope():
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
native_t0,
t1,
native_t2,
strides=strides,
padding=padding,
data_format=data_format)
else:
# For CPU, the format NCHW is not supported. Therefore we always use
# NHWC here.
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
native_t0, t1, native_t2, strides=strides, padding=padding)
ret = backprop.eval({t0: x0, t2: x2})
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_xla=True)
cpu_value = _GetVal(use_xla=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)
def testDepthwiseConv2DFilterGradCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2DFilterGradCompare,", index, "th config:",
input_size, "*", filter_size, "stride:", stride, "padding:",
padding)
self._CompareBackpropFilter(input_size, filter_size, output_size,
stride, padding)
def testDepthwiseConv2DFilterGradFormatNCHWCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2DFilterGradFormatNCHWCompare,", index,
"th config:", input_size, "*", filter_size, "producing output",
output_size, "stride:", stride, "padding:", padding)
self._CompareBackpropFilter(
input_size,
filter_size,
output_size,
stride,
padding,
data_format="NCHW")
if __name__ == "__main__":
test.main()
| 39.821176
| 80
| 0.598026
|
ad988ecf19daa7983d1674c821908aaf4ae2419f
| 937
|
py
|
Python
|
django_mock_rest/admin.py
|
mverleg/django_mock_rest
|
dd6468c1d966353dc2a2fed77b299261ceb76a14
|
[
"BSD-3-Clause"
] | 1
|
2018-09-26T19:33:36.000Z
|
2018-09-26T19:33:36.000Z
|
django_mock_rest/admin.py
|
mverleg/django_mock_rest
|
dd6468c1d966353dc2a2fed77b299261ceb76a14
|
[
"BSD-3-Clause"
] | 12
|
2018-09-25T19:38:37.000Z
|
2018-09-26T19:29:48.000Z
|
django_mock_rest/admin.py
|
mverleg/django_mock_rest
|
dd6468c1d966353dc2a2fed77b299261ceb76a14
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import admin
from django import forms
from django_mock_rest.models import Endpoint, Response
class ResponseInline(admin.StackedInline):
model = Response
fields = ('weight', 'status', 'data',)
extra = 1
class EndpointAdminForm(forms.ModelForm):
def clean(self):
path = self.cleaned_data['path']
if not path.startswith('/'):
path = '/' + path
if not path.endswith('/'):
path = path + '/'
self.cleaned_data['path'] = path
return self.cleaned_data
class EndpointAdmin(admin.ModelAdmin):
model = Endpoint
fields = ('method', 'path', 'explanation', 'require_authentication',)
# fields = ('method', 'path', 'parameters', 'explanation', 'require_authentication',)
list_display = ('method', 'path_pattern', 'response_count',)
list_display_links = list_display
list_filter = ('method',)
form = EndpointAdminForm
inlines = [
ResponseInline,
]
admin.site.register(Endpoint, EndpointAdmin)
| 24.657895
| 86
| 0.715048
|
98184ca11a82e915a672af7d8462f036fcd1521b
| 2,790
|
py
|
Python
|
recog_with_components/train.py
|
hushukai/Chinese-ancient-book-recognition-HSK
|
de5b6474dc4346524d95b405223c721aae5b500b
|
[
"Apache-2.0"
] | 2
|
2020-04-12T08:33:50.000Z
|
2020-07-03T09:15:56.000Z
|
recog_with_components/train.py
|
hushukai/Chinese-ancient-book-recognition-HSK
|
de5b6474dc4346524d95b405223c721aae5b500b
|
[
"Apache-2.0"
] | null | null | null |
recog_with_components/train.py
|
hushukai/Chinese-ancient-book-recognition-HSK
|
de5b6474dc4346524d95b405223c721aae5b500b
|
[
"Apache-2.0"
] | 4
|
2020-07-03T09:15:58.000Z
|
2020-07-17T09:24:08.000Z
|
# -*- coding: utf-8 -*-
# Author: hushukai
import os
import tensorflow as tf
from tensorflow.keras import backend as K
from .model import work_net, compile, add_metrics
from .callback import tf_config, get_callbacks
from .data_pipeline import data_generator
from config import CHAR_IMG_SIZE
from config import CHAR_RECOG_CKPT_DIR, CHAR_RECOG_LOGS_DIR
from config import CHAR_IMAGE_PATHS_FILE, CHAR_TFRECORDS_PATHS_FILE
def train(data_file, src_type, epochs, init_epochs=0, model_struc="densenet_gru", weights_path=""):
tf_config()
K.set_learning_phase(True)
# 加载模型
train_model = work_net(stage="train", img_size=CHAR_IMG_SIZE, model_struc=model_struc)
compile(train_model, loss_names=["char_struc_loss", "sc_char_loss", "lr_compo_loss"])
# 增加度量汇总
metrics_summary = train_model.get_layer('summary_fn').output
add_metrics(train_model,
metric_name_list=["char_struc_acc", "sc_acc", "sc_top3", "sc_top5",
"lr_acc", "lr_top3", "lr_top5",
"correct_lr_acc", "correct_lr_top3", "correct_lr_top5",
"total_acc", "total_top3", "total_top5"],
metric_val_list=metrics_summary)
train_model.summary()
# for layer in train_model.layers:
# print(layer.name, " trainable: ", layer.trainable)
# load model
load_path = os.path.join(CHAR_RECOG_CKPT_DIR, "char_recog_with_compo_" + model_struc + "_{:04d}.h5".format(init_epochs))
weights_path = weights_path if os.path.exists(weights_path) else load_path
if os.path.exists(weights_path):
train_model.load_weights(weights_path, by_name=True)
print("\nLoad model weights from %s\n" % weights_path)
training_generator, validation_generator = data_generator(data_file=data_file, src_type=src_type)
# 开始训练
train_model.fit_generator(generator=training_generator,
steps_per_epoch=500,
epochs=epochs + init_epochs,
initial_epoch=init_epochs,
verbose=1,
validation_data=validation_generator,
validation_steps=20,
callbacks=get_callbacks(model_struc),
max_queue_size=100)
# 保存模型
train_model.save_weights(os.path.join(CHAR_RECOG_CKPT_DIR, "char_recog_with_compo_" + model_struc + "_finished.h5"))
def main():
train(data_file=CHAR_TFRECORDS_PATHS_FILE,
src_type="tfrecords",
epochs=50*10,
init_epochs=121,
model_struc="densenet_gru",
weights_path="")
if __name__ == "__main__":
print("Done !")
| 38.219178
| 124
| 0.63871
|
d0dbcf5b116ca8ebbe3a8081cb95667540f76dde
| 2,026
|
py
|
Python
|
single_tech_samples/databricks/sample4_ci_cd/notebook_jobs/tests/main_notebook_sql_test.py
|
swbabberz/modern-data-warehouse-dataops
|
9ea71cfcc1e6df9b6c850c115169817105c6242e
|
[
"MIT"
] | null | null | null |
single_tech_samples/databricks/sample4_ci_cd/notebook_jobs/tests/main_notebook_sql_test.py
|
swbabberz/modern-data-warehouse-dataops
|
9ea71cfcc1e6df9b6c850c115169817105c6242e
|
[
"MIT"
] | null | null | null |
single_tech_samples/databricks/sample4_ci_cd/notebook_jobs/tests/main_notebook_sql_test.py
|
swbabberz/modern-data-warehouse-dataops
|
9ea71cfcc1e6df9b6c850c115169817105c6242e
|
[
"MIT"
] | null | null | null |
# Databricks notebook source
# MAGIC %pip install nutter
# COMMAND ----------
from runtime.nutterfixture import NutterFixture, tag
class Test1Fixture(NutterFixture):
total = 0
first_year = 0
# Arrange All
def before_all(self):
sqlContext.sql('CREATE TABLE US_POPULATION (date STRING, value BIGINT)')
sqlContext.sql('INSERT INTO US_POPULATION VALUES ("1960", 100)')
dbutils.notebook.run('../main_notebook_sql', 600)
# ************** Test Case 1 ********************
# Act
def run_Records_Exist_Returns_Positive_Number(self):
temp_result = sqlContext.sql('SELECT TOTAL FROM POPULATION_COUNT LIMIT 1')
Test1Fixture.total = temp_result.first()[0]
#Assert
def assertion_Records_Exist_Returns_Positive_Number(self):
assert (Test1Fixture.total > 0)
#Clean
def after_Records_Exist_Returns_Positive_Number(self):
sqlContext.sql('DROP TABLE IF EXISTS POPULATION_COUNT;')
# ************** Test Case 2 ********************
# Act
def run_First_Year_Returns_One_Record(self):
temp_result = sqlContext.sql('SELECT COUNT(*) AS TOTAL FROM FIRST_YEAR_POPULATION WHERE YEAR = "1960"')
Test1Fixture.first_year = temp_result.first()[0]
#Assert
def assertion_First_Year_Returns_One_Record(self):
assert (Test1Fixture.first_year > 0)
#Clean
def after_First_Year_Returns_One_Record(self):
sqlContext.sql('DROP TABLE IF EXISTS FIRST_YEAR_POPULATION;')
# ************** Clean All ********************
def after_all(self):
sqlContext.sql('DROP TABLE IF EXISTS US_POPULATION;')
# COMMAND ----------
result = Test1Fixture().execute_tests()
print(result.to_string())
# Comment out the next line (result.exit(dbutils)) to see the test result report from within the notebook
is_job = dbutils.notebook.entry_point.getDbutils().notebook().getContext().currentRunId().isDefined()
if is_job:
result.exit(dbutils)
| 34.338983
| 110
| 0.652024
|
36dcc8358fa00c4c4f9465a1b396aeea445cdfd8
| 1,101
|
py
|
Python
|
web2py-appliances-master/CustomerRelationshipManagement/models/plugin_tagging.py
|
wantsomechocolate/WantsomeBeanstalk
|
8c8a0a80490d04ea52661a3114fd3db8de65a01e
|
[
"BSD-3-Clause"
] | null | null | null |
web2py-appliances-master/CustomerRelationshipManagement/models/plugin_tagging.py
|
wantsomechocolate/WantsomeBeanstalk
|
8c8a0a80490d04ea52661a3114fd3db8de65a01e
|
[
"BSD-3-Clause"
] | null | null | null |
web2py-appliances-master/CustomerRelationshipManagement/models/plugin_tagging.py
|
wantsomechocolate/WantsomeBeanstalk
|
8c8a0a80490d04ea52661a3114fd3db8de65a01e
|
[
"BSD-3-Clause"
] | null | null | null |
if not 'db' in globals():
raise HTTP(500,"plugin _tagging requires 'db' and 'auth'")
db.define_table('plugin_tagging_tag',
Field('name'),
Field('counter','integer',default=0,writable=False),
Field('created_by',db.auth_user,writable=False,readable=False),
Field('created_on','datetime',default=request.now,writable=False,readable=False))
db.plugin_tagging_tag.created_by.default=(auth.user and auth.user.id) or 0
db.define_table('plugin_tagging_tag_link',
Field('tag',db.plugin_tagging_tag),
Field('table_name'),
Field('record_id','integer'))
db.plugin_tagging_tag.name.requires = IS_NOT_EMPTY()
db.plugin_tagging_tag_link.tag.requires = IS_IN_DB(db,'plugin_tagging_tag.id','%(name)s')
def tag(table_name=None,record_id=0):
"""
You can tag a record of a table by embedding this::
{{=tag('mytable',45)}}
where 'mytable' is a table name and 45 is a record id.
It will display a tagging widget.
"""
return LOAD('plugin_tagging','tag',args=(table_name,record_id),ajax=True)
def tag_cloud():
return LOAD('plugin_tagging','tag_cloud')
| 32.382353
| 89
| 0.713896
|
6488cbed190c4d98404cc8ad370a7ff3a80c83bb
| 5,293
|
py
|
Python
|
ecosante/api/blueprint.py
|
betagouv/recosante-api
|
4560b2cf2ff4dc19597792fe15a3805f6259201d
|
[
"MIT"
] | 3
|
2021-09-24T14:07:51.000Z
|
2021-12-14T13:48:34.000Z
|
ecosante/api/blueprint.py
|
betagouv/recosante-api
|
4560b2cf2ff4dc19597792fe15a3805f6259201d
|
[
"MIT"
] | 187
|
2021-03-25T16:43:49.000Z
|
2022-03-23T14:40:31.000Z
|
ecosante/api/blueprint.py
|
betagouv/recosante-api
|
4560b2cf2ff4dc19597792fe15a3805f6259201d
|
[
"MIT"
] | null | null | null |
from datetime import date
import json
from indice_pollution.history.models.commune import Commune
from indice_pollution.history.models.episode_pollution import EpisodePollution
from sqlalchemy.orm import joinedload
from ecosante.extensions import rebar, db
from .schemas import ResponseSchema, QuerySchema
from indice_pollution import forecast, raep, episodes as get_episodes
from indice_pollution.history.models import PotentielRadon, IndiceATMO, VigilanceMeteo, vigilance_meteo
from ecosante.recommandations.models import Recommandation
from flask.wrappers import Response
from flask import stream_with_context
from flask_rebar import SwaggerV2Generator
registry = rebar.create_handler_registry(
prefix='/v1',
swagger_generator=SwaggerV2Generator(
title="API recosante.beta.gouv.fr",
description='Toutes les données sont diffusées sous la licence <a href="https://opendatacommons.org/licenses/odbl/1-0/">ODbL v1.0</a>'
)
)
def get_advice(advices, type_, **kwargs):
kwargs['types'] = [type_]
kwargs['media'] = 'dashboard'
try:
return next(filter(
lambda r: r.is_relevant(**kwargs),
advices
))
except StopIteration:
return None
@registry.handles(
rule='/',
method='GET',
query_string_schema=QuerySchema(),
response_body_schema=ResponseSchema()
)
def index():
advices = Recommandation.published_query().all()
insee = rebar.validated_args.get('insee')
date_ = rebar.validated_args.get('date')
time_ = rebar.validated_args.get('time')
show_raep = rebar.validated_args.get('show_raep')
commune = Commune.get(insee)
indice_atmo = forecast(insee, date_=date_, use_make_resp=False)
indice_raep = raep(insee, date_=date_) if show_raep else None
potentiel_radon = PotentielRadon.get(insee)
episodes = get_episodes(insee, date_=date_, use_make_resp=False)
vigilance_meteo = VigilanceMeteo.get(insee=insee, date_=date_, time_=time_)
advice_atmo = get_advice(advices, "indice_atmo", qualif=indice_atmo.indice) if indice_atmo and not hasattr(indice_atmo, "error") else None
advice_raep = get_advice(advices, "pollens", raep=int(indice_raep["data"]["total"])) if indice_raep and indice_raep.get('data') else None
advice_radon = get_advice(advices, "radon", potentiel_radon=potentiel_radon.classe_potentiel)
advice_episode = get_advice(advices, "episode_pollution", polluants=[e.lib_pol_normalized for e in EpisodePollution.filter_etat_haut(episodes)])
resp = {
"commune": commune,
"indice_atmo": {
"indice": indice_atmo,
"advice": advice_atmo
},
"potentiel_radon": {
"indice": potentiel_radon,
"advice": advice_radon,
"sources": [{
"label": "Institut de radioprotection et de sûreté nucléaire (IRSN)",
"url": "https://www.irsn.fr/FR/connaissances/Environnement/expertises-radioactivite-naturelle/radon/Pages/5-cartographie-potentiel-radon-commune.aspx#.YUyf32aA6dY"
}],
"validity": {
"area": commune.nom
}
},
"episodes_pollution": {
"indice": episodes,
"advice": advice_episode
},
"vigilance_meteo": {
"indice": {
"details": vigilance_meteo,
},
"sources": [{
"label": "Météo France",
"url": "https://donneespubliques.meteofrance.fr/?fond=produit&id_produit=299&id_rubrique=50"
}],
"validity": {
"area": commune.departement_nom
}
}
}
if show_raep:
resp['raep'] = {
"indice": indice_raep,
"advice": advice_raep,
"sources": [{
"label": "Le Réseau national de surveillance aérobiologique (RNSA)",
"url": "https://www.pollens.fr/"
}]
}
return resp
@registry.handles(
rule='/_batch',
method='GET',
query_string_schema=QuerySchema(),
)
def batch():
date_ = rebar.validated_args.get('date', date.today())
def iter():
indices = IndiceATMO.get_all_query(
date_
).options(joinedload(IndiceATMO.zone)
).yield_per(100)
schema = ResponseSchema()
all_episodes = EpisodePollution.get_all(date_)
yield "["
first = True
for commune_id, indice in indices:
if not first:
yield ","
commune = Commune.get_from_id(commune_id)
indice.region = commune.departement.region
indice.commune = commune
episodes = all_episodes.get(commune.zone_pollution_id)
if episodes:
for e in episodes:
e.commune = commune
value = {
"commune": commune,
"indice_atmo": {
"indice": indice
},
"episodes_pollution": {
"indice": episodes or []
}
}
r = schema.dump(value)
yield json.dumps(r)
first = False
yield ']'
return Response(stream_with_context(iter()))
| 35.763514
| 179
| 0.614396
|
6df860dbf291877fbef0bd5d9cb03ede7caa99c1
| 3,486
|
py
|
Python
|
ClientsManagementSystemDjangoPlugin/ClientsManagementSystem/settings.py
|
CiganOliviu/ClientsManagementSystemDjangoPlugin
|
58b628f8f4eed1ccf86bb4d4150456dad54795b9
|
[
"MIT"
] | 1
|
2021-04-02T16:45:52.000Z
|
2021-04-02T16:45:52.000Z
|
ClientsManagementSystem/ClientsManagementSystem/settings.py
|
CiganOliviu/ClientsManagementSystem
|
6271dd007e549fd0369c4df7c017980b915a91b5
|
[
"MIT"
] | null | null | null |
ClientsManagementSystem/ClientsManagementSystem/settings.py
|
CiganOliviu/ClientsManagementSystem
|
6271dd007e549fd0369c4df7c017980b915a91b5
|
[
"MIT"
] | null | null | null |
"""
Django settings for ClientsManagementSystem project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ''
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ClientsManagement',
'ProjectsManagement',
'ProductsManagement',
'Index',
'django.contrib.admin',
'django.contrib.auth',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ClientsManagementSystem.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ClientsManagementSystem.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/static/'
LOGIN_REDIRECT_URL = '/client/home'
LOGOUT_REDIRECT_URL = '/accounts/login'
EMAIL_BACKEND = "django.core.mail.backends.filebased.EmailBackend"
EMAIL_FILE_PATH = os.path.join(BASE_DIR, "sent_emails")
MEDIA_URL = '/MEDIA/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'MEDIA')
| 25.26087
| 91
| 0.703672
|
e17d6ea3cf8d4d0783995651eac52a24758645b3
| 581
|
py
|
Python
|
fermat_v1.py
|
Filipe-Barbos/Teste
|
105831ddf2767ec16f7e9f4f4f11fdcd3d6512b0
|
[
"Apache-2.0"
] | null | null | null |
fermat_v1.py
|
Filipe-Barbos/Teste
|
105831ddf2767ec16f7e9f4f4f11fdcd3d6512b0
|
[
"Apache-2.0"
] | null | null | null |
fermat_v1.py
|
Filipe-Barbos/Teste
|
105831ddf2767ec16f7e9f4f4f11fdcd3d6512b0
|
[
"Apache-2.0"
] | null | null | null |
# Rodar script online
# https://trinket.io/features/python3
a = int(input('Digite um número qualquer: '))
p = int(input('Digite um número primo: '))
result = 0
if (a % p) == 0:
print('Primeira Fórmula')
result = ((a**p) - a) / p
if (((a**p) - a) % p) == 0:
print(result)
print('a e p são congruentes')
else:
print(result)
print('a e p não são congruentes')
else:
print('Segunda Fórmula')
p2 = p -1
result = (((a**p2)-1) / p)
if (((a**p2)-1) % p) == 0:
print(result)
print('a e p são congruentes')
else:
print(result)
print('a e p não são congruentes')
| 20.75
| 46
| 0.595525
|
00b79303a21e9550b76e676ebaa0ae105f19f0d1
| 2,424
|
py
|
Python
|
src/main/python/infra_buddy/deploy/s3_deploy.py
|
AlienVault-Engineering/infra-buddry
|
841fea588720ca755a60c19cccfdf49adfcb6976
|
[
"Apache-2.0"
] | 3
|
2018-03-31T09:09:40.000Z
|
2021-11-08T10:26:46.000Z
|
src/main/python/infra_buddy/deploy/s3_deploy.py
|
AlienVault-Engineering/infra-buddry
|
841fea588720ca755a60c19cccfdf49adfcb6976
|
[
"Apache-2.0"
] | 2
|
2021-05-21T04:17:02.000Z
|
2021-06-01T18:49:26.000Z
|
src/main/python/infra_buddy/deploy/s3_deploy.py
|
AlienVault-Engineering/infra-buddry
|
841fea588720ca755a60c19cccfdf49adfcb6976
|
[
"Apache-2.0"
] | 3
|
2020-10-12T23:00:58.000Z
|
2021-07-29T19:03:50.000Z
|
import os
import tempfile
from infra_buddy.aws import s3 as s3util
from infra_buddy.aws.cloudformation import CloudFormationBuddy
from infra_buddy.aws.s3 import S3Buddy
from infra_buddy.deploy.deploy import Deploy
from infra_buddy.utility import print_utility
class S3Deploy(Deploy):
def __init__(self, artifact_id, location, ctx):
super(S3Deploy, self).__init__(ctx)
self.location = location
self.artifact_id = artifact_id
self.cloud_formation_buddy = CloudFormationBuddy(self.deploy_ctx)
def _internal_deploy(self, dry_run):
mkdtemp = tempfile.mkdtemp()
if not self.artifact_id.endswith(".zip" ):
self.artifact_id = "{}.zip".format(self.artifact_id)
artifact_download = "s3://{location}/{artifact_id}".format(location=self.location,artifact_id=self.artifact_id)
destination_bucket = self.cloud_formation_buddy.get_export_value(param="WWW-Files")
s3util.download_zip_from_s3_url(artifact_download,destination=mkdtemp)
to_upload = self.get_filepaths(mkdtemp)
if dry_run:
print_utility.banner_warn("Dry Run: Uploading files to - {}".format(destination_bucket),
str(to_upload))
else:
split = destination_bucket.split("/")
if len(split)>1:
path = "/".join(split[1:])
else:
path = ''
s3 = S3Buddy(self.deploy_ctx, path, split[0])
print_utility.progress("S3 Deploy: Uploading files to - {}".format(destination_bucket))
for s3_key, path in to_upload.items():
print_utility.info("{} - {}".format(destination_bucket, s3_key))
s3.upload(key_name=s3_key, file=path)
def get_filepaths(self, local_directory):
rel_paths = {}
for root, dirs, files in os.walk(local_directory):
for filename in files:
# construct the full local path
local_path = os.path.join(root, filename)
# construct the full Dropbox path
relative_path = os.path.relpath(local_path, local_directory)
# s3_path = os.path.join(destination, relative_path)
rel_paths[relative_path] = local_path
return rel_paths
def __str__(self):
return "{} - {}:{}".format(self.__class__.__name__,self.location,self.artifact_id)
| 41.793103
| 119
| 0.642739
|
e5f1c382c1ab9bf483a5ce3be285cb31c3009ab5
| 7,187
|
py
|
Python
|
visualize/generate_figures.py
|
dojitza/ddnn
|
adbe5c20f2f6b2c3d875af8b651cce22138928ff
|
[
"Unlicense"
] | 110
|
2017-09-07T03:29:34.000Z
|
2022-03-28T12:41:00.000Z
|
visualize/generate_figures.py
|
dojitza/ddnn
|
adbe5c20f2f6b2c3d875af8b651cce22138928ff
|
[
"Unlicense"
] | 9
|
2017-10-11T12:12:44.000Z
|
2021-04-02T01:40:49.000Z
|
visualize/generate_figures.py
|
dojitza/ddnn
|
adbe5c20f2f6b2c3d875af8b651cce22138928ff
|
[
"Unlicense"
] | 58
|
2017-07-30T12:48:40.000Z
|
2022-03-07T01:18:24.000Z
|
import os
import matplotlib
matplotlib.rcParams['font.size'] = 20.0
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def align_y_axis(ax1, ax2, minresax1, minresax2, ticks):
""" Sets tick marks of twinx axes to line up with 7 total tick marks
ax1 and ax2 are matplotlib axes
Spacing between tick marks will be a factor of minresax1 and minresax2"""
ax1ylims = ax1.get_ybound()
ax2ylims = ax2.get_ybound()
ax1factor = minresax1 * (ticks - 1)
ax2factor = minresax2 * (ticks - 1)
ax1.set_yticks(np.linspace(ax1ylims[0],
ax1ylims[1]+(ax1factor -
(ax1ylims[1]-ax1ylims[0]) % ax1factor) %
ax1factor,
ticks))
ax2.set_yticks(np.linspace(ax2ylims[0],
ax2ylims[1]+(ax2factor -
(ax2ylims[1]-ax2ylims[0]) % ax2factor) %
ax2factor,
ticks))
data_dir = 'data/'
save_dir = 'figures/'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
img_type = '.pdf'
linewidth = 4
ms = 8
colors = {'cam': '#FF944D', 'local': '#FF8F80', 'cloud': '#98D1F1', 'overall': '#84618D'}
styles = {'cam': ':o', 'local': '-.o', 'cloud': '--o', 'overall': '-o'}
legend = {'cam': 'Individual', 'local': 'Local', 'cloud': 'Cloud', 'overall': 'Overall'}
names = ['overall', 'cloud', 'local', 'cam']
acc_df = pd.read_csv(data_dir + 'add_cam.csv', delimiter=' ')
mem_df = pd.read_csv(data_dir + 'mem.csv', delimiter=' ')
miss_df = pd.read_csv(data_dir + 'missing_cam.csv', delimiter=' ')
comm_df = pd.read_csv(data_dir + 'comm.csv', delimiter=' ')
ent_df = pd.read_csv(data_dir + 'entropy.csv', delimiter=' ')
# thres_df = comm_df[comm_df['T'] == 0.8]
idxs = np.arange(1, 7)
#Accuracy figure
plt.figure(figsize=(8, 6.5))
for name in names:
plt.plot(idxs, acc_df[name]*100., styles[name], linewidth=linewidth, ms=ms,
color=colors[name], label=legend[name])
plt.xticks(idxs)
plt.xlim(0.5, 6.5)
plt.title('Scaling End Devices')
plt.xlabel('Number of End Devices')
plt.ylabel('Classification Accuracy')
plt.legend(loc=0, prop={'size': 14})
plt.tight_layout()
plt.grid()
plt.savefig(save_dir + 'increasing' + img_type)
plt.clf()
names = ['overall', 'cloud', 'local', 'cam']
#Impact of missing camera
plt.figure(figsize=(8, 6.5))
for name in names:
plt.plot(idxs, miss_df[name]*100., styles[name], linewidth=linewidth, ms=ms,
color=colors[name], label=legend[name])
plt.xticks(idxs)
plt.xlim(0.5, 6.5)
# plt.ylim(90, 100)
plt.title('DDNN Fault Tolerance')
plt.xlabel('Device Failure')
plt.ylabel('Classification Accuracy')
plt.legend(loc=0, prop={'size': 14})
plt.tight_layout()
plt.grid()
plt.savefig(save_dir + 'fault' + img_type)
plt.clf()
colors = {'overall': '#84618D', 'local': '#FF8F80', 'cloud': '#98D1F1'}
styles = {'overall': '-o', 'local': '-.o', 'cloud': '--o'}
legend = {'overall': 'Overall', 'local': 'Local', 'cloud': 'Cloud'}
names = ['overall', 'cloud', 'local']
#Impact of local exit
plt.figure(figsize=(8, 6.5))
for name in names:
plt.plot(comm_df['device_size'], comm_df[name]*100., styles[name], linewidth=linewidth, ms=ms,
color=colors[name], label=legend[name])
# plt.xticks(idxs)
# plt.xlim(0.5, 6.5)
plt.xlabel('Exit')
plt.ylabel('Classification Accuracy')
plt.legend(loc=0, prop={'size': 14})
plt.tight_layout()
plt.grid()
plt.savefig(save_dir + 'local_exit' + img_type)
plt.clf()
#comm cost
# plt.figure(figsize=(8,6.5))
# for name in names:
# idxs = np.argsort(comm_df['comm'])
# plt.plot(comm_df['comm'][idxs], comm_df[name][idxs]*100., styles[name],
# linewidth=linewidth, ms=ms, color=colors[name],
# label=legend[name])
# # plt.xticks(comm_df['filters'].values)
# # plt.xlim(comm_df['filters'].values[0]-0.5, comm_df['filters'].values[-1]+0.5)
# plt.xlabel('Communication (B)')
# plt.ylabel('Classification Accuracy')
# plt.ylim(80, 100)
# plt.xlim(12, 32)
# plt.grid()
# plt.legend(loc='lower right', prop={'size': 14})
# plt.tight_layout()
# plt.savefig(save_dir + 'commvsacc' + img_type)
# plt.clf()
fig, ax1 = plt.subplots(figsize=(8, 6.5))
for name in names:
ax1.plot(comm_df['comm'], comm_df[name]*100., styles[name],
linewidth=linewidth, ms=ms, color=colors[name],
label=legend[name] + ' Acc.')
ax2 = ax1.twinx()
ax2.plot(comm_df['comm'], comm_df['device_size']/1000., 'o--k',
linewidth=linewidth, ms=ms, label='Device Memory')
ax1.set_xlabel('Communication (B)')
ax1.set_ylabel('Classification Accuracy')
ax1.set_xlim(12, 32)
ax1.set_ylim(80, 100)
ax2.set_ylabel('End Deivce Memory (KB)')
ax2.set_ylim(0, 4)
h1, l1 = ax1.get_legend_handles_labels()
h2, l2 = ax2.get_legend_handles_labels()
leg = ax1.legend(h1+h2, l1+l2, loc='lower right', prop={'size': 14})
for l in leg.legendHandles:
l._sizes = [6]
align_y_axis(ax1, ax2, 1, 1, 6)
ax1.grid(zorder=0)
leg.set_zorder(102)
# ax2.grid(None)
plt.tight_layout()
plt.savefig(save_dir + 'commvsacc' + img_type)
plt.clf()
#entropy
fig, ax1 = plt.subplots(figsize=(8, 6.5))
name = 'overall'
ax1.plot(ent_df['T'], ent_df[name]*100., styles[name],
linewidth=linewidth, ms=ms, color=colors[name],
label=legend[name] + ' Acc.')
ax2 = ax1.twinx()
ax2.plot(ent_df['T'], ent_df['exit'], 'o--k',
linewidth=linewidth, ms=ms, label='Local Exit (%)')
ax1.set_title('Impact of Exit Threshold')
ax1.set_xlabel(r'Exit Threshold')
ax1.set_yticks([75, 80, 85, 90, 95, 100, 100])
ax2.set_yticks([0, 20, 40, 60, 80, 100])
ax1.set_ylabel('Classification Accuracy')
ax1.set_xlim(0, 1.05)
ax1.set_ylim(73.75, 101.25)
ax2.set_ylabel('Local Exit (%)')
ax2.set_ylim(-5, 105)
h1, l1 = ax1.get_legend_handles_labels()
h2, l2 = ax2.get_legend_handles_labels()
leg = ax1.legend(h1+h2, l1+l2, loc='upper left', prop={'size': 14})
for l in leg.legendHandles:
l._sizes = [6]
# align_y_axis(ax1, ax2, 1, 1, 8)
# ax2.set_ylim(-5, 100)
ax1.grid(zorder=0)
# ax2.grid(None)
leg.set_zorder(102)
plt.tight_layout()
plt.savefig(save_dir + 'thresholdvsacc' + img_type)
plt.clf()
#exit thres
plt.figure(figsize=(8, 6.5))
plt.plot(ent_df['exit'], ent_df['overall']*100., '-o', color=colors['overall'],
linewidth=linewidth, ms=ms, label='Local Exit (%)')
plt.xlim(-2, 105)
plt.xlabel('Percentage Locally Exited')
plt.ylim(90, 100)
plt.ylabel('Classification Accuracy')
# plt.legend(loc=0)
plt.tight_layout()
plt.grid()
plt.savefig(save_dir + 'exitvsacc' + img_type)
plt.clf()
plt.figure(figsize=(8, 6.5))
plt.plot(mem_df['mem']/1000., mem_df['acc']*100., '-o', color=colors['overall'],
linewidth=linewidth, ms=ms, label='Local Exit (%)')
plt.xlim(0.5, 0.86)
plt.xlabel('End Device Memory (KB)')
plt.ylim(65, 100)
plt.ylabel('Classification Accuracy')
# plt.legend(loc=0)
plt.tight_layout()
plt.grid()
plt.savefig(save_dir + 'memvsacc' + img_type)
plt.clf()
| 32.817352
| 99
| 0.623348
|
8b809e575d515181faebec13d218fb10e7048cd3
| 839
|
py
|
Python
|
stage/forms.py
|
hasithsen/sayit
|
c4de556a4f8cc2f7585b553046285dc6ac0107f4
|
[
"MIT"
] | null | null | null |
stage/forms.py
|
hasithsen/sayit
|
c4de556a4f8cc2f7585b553046285dc6ac0107f4
|
[
"MIT"
] | null | null | null |
stage/forms.py
|
hasithsen/sayit
|
c4de556a4f8cc2f7585b553046285dc6ac0107f4
|
[
"MIT"
] | null | null | null |
from django.forms import ModelForm, TextInput, Textarea
from .models import Message
class MessageCreateForm(ModelForm):
class Meta:
model = Message
fields = ['content', 'sender', 'receiver']
# initial = {'content': 'Your say here'}
widgets = {
'sender': TextInput(attrs={'placeholder': 'Enter name of sayer'}),
'receiver': TextInput(attrs={'placeholder': 'Enter name of sayee'}),
'content': Textarea(attrs={'placeholder': 'Enter content to say', 'rows': 5}),
}
labels = {
'sender': 'Said by (optional)',
'receiver': 'Said to (optional)',
'content': 'Message',
}
def __init__(self, *args, **kwargs):
super(MessageCreateForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'form-control shadow'
| 33.56
| 84
| 0.638856
|
890f6db3fc0da3506da6dfad282dafe0ea772c0f
| 73
|
py
|
Python
|
logical_backup/__init__.py
|
ammesonb/logical-backup
|
0d52c5a05c7d3eaa267c79799582e4a607b9c35f
|
[
"MIT"
] | null | null | null |
logical_backup/__init__.py
|
ammesonb/logical-backup
|
0d52c5a05c7d3eaa267c79799582e4a607b9c35f
|
[
"MIT"
] | 36
|
2020-06-06T13:40:19.000Z
|
2021-06-14T14:02:04.000Z
|
logical_backup/__init__.py
|
ammesonb/logical-backup
|
0d52c5a05c7d3eaa267c79799582e4a607b9c35f
|
[
"MIT"
] | null | null | null |
"""
Wrapper for files
"""
__all__ = ["main", "db", "library", "utility"]
| 14.6
| 46
| 0.575342
|
4ecb2803844b9c8091db742a8971b6ac92badd52
| 1,764
|
py
|
Python
|
main/main.py
|
salamander-mh/PyQrCode
|
76749b49402b570d7b8b1cfa2bb947db3cb2fa11
|
[
"MIT"
] | 2
|
2020-04-15T09:07:05.000Z
|
2020-12-30T02:24:54.000Z
|
main/main.py
|
salamander-mh/PyQrCode
|
76749b49402b570d7b8b1cfa2bb947db3cb2fa11
|
[
"MIT"
] | 1
|
2020-04-27T03:06:19.000Z
|
2020-04-27T03:06:19.000Z
|
main/main.py
|
salamander-mh/PyQrCode
|
76749b49402b570d7b8b1cfa2bb947db3cb2fa11
|
[
"MIT"
] | 1
|
2020-05-14T02:17:52.000Z
|
2020-05-14T02:17:52.000Z
|
from pyzbar import pyzbar
import cv2
from typing import Callable
import time
import numpy as np
from fastapi import FastAPI, File, UploadFile
from fastapi.exceptions import RequestValidationError
from fastapi.responses import JSONResponse, HTMLResponse
from fastapi.staticfiles import StaticFiles
from pydantic import BaseModel
app = FastAPI()
# 静态目录
app.mount("/static", StaticFiles(directory="static"), name="static")
@app.exception_handler(RequestValidationError)
async def validation_exception_handler(request, exc: RequestValidationError):
return JSONResponse(
status_code=200,
content={"errno": 1, "errmsg": str(exc)},
)
@app.get("/")
def read_root():
htmlContent = ""
with open('index.html', 'r') as file:
htmlContent = file.read()
return HTMLResponse(content=htmlContent, status_code=200)
@app.post("/decode")
def decodeImage(file: UploadFile = File(...)):
res = detectImage(file)
data = []
if len(res) <= 0:
return {"errno": 1, "errmsg": "识别失败"}
else:
for barcode in res:
if barcode.type == 'QRCODE':
data.append(barcode.data.decode("utf-8"))
if len(data) <= 0:
return {"errno": 1, "errmsg": "没有识别到二维码"}
else:
return {"errno": 0, "errmsg": "识别成功", "data": data}
# 用OpenCV和Python识别
def detectImage(imageFile):
imageFile.seek(0)
img_array = np.asarray(bytearray(imageFile.file.read()), dtype=np.uint8)
img = cv2.imdecode(img_array, cv2.IMREAD_GRAYSCALE)
barcodes = pyzbar.decode(img)
return barcodes
# 计算函数执行时间
def timeExec(name: str, func: Callable[[], None]) -> None:
time_start = time.time()
func()
time_end = time.time()
print(f"{name} used {time_end - time_start} second(s)")
| 27.138462
| 77
| 0.671202
|
43a66e0d4848430d37cecb21387fa89ddac71ea8
| 1,949
|
py
|
Python
|
models/create_message_response.py
|
ajrice6713/bw-messaging-emulator
|
d1be4976e2486ec91b419597afc8411c78ebfda7
|
[
"MIT"
] | null | null | null |
models/create_message_response.py
|
ajrice6713/bw-messaging-emulator
|
d1be4976e2486ec91b419597afc8411c78ebfda7
|
[
"MIT"
] | null | null | null |
models/create_message_response.py
|
ajrice6713/bw-messaging-emulator
|
d1be4976e2486ec91b419597afc8411c78ebfda7
|
[
"MIT"
] | null | null | null |
import datetime
import json
import random
import string
from typing import Dict
from sms_counter import SMSCounter
class CreateMessageResponse:
def __init__(self, request):
self.id = self.generate_id()
self.owner = request['from']
self.applicationId = request['applicationId']
self.time = str(datetime.datetime.utcnow().isoformat())
self.segmentCount = 1
self.direction = 'out'
if type(request['to']) is str:
self.to = [request['to']]
else:
self.to = request['to']
self.mfrom = request['from']
if 'media' in request:
self.media = request['media']
if 'text' in request:
self.text = request['text']
if 'tag' in request:
self.tag = request['tag']
if 'priority' in request:
self.priority = request['priority']
def calculate_segments(self, message) -> int:
count = SMSCounter.count(message)
return count['messages']
def generate_id(self) -> str:
pre = random.randint(1400000000000,1799999999999)
return str(pre) + ''.join(random.choice(string.ascii_lowercase) for x in range(16))
def to_json(self) -> str:
dict_response = {
'id': self.id,
'owner': self.owner,
'applicationId': self.applicationId,
'time': self.time,
'direction': self.direction,
'to': self.to,
'from': self.mfrom
}
if hasattr(self, 'media'): dict_response['media'] = self.media
if hasattr(self, 'text'):
dict_response['text'] = self.text
dict_response['segmentCount'] = self.calculate_segments(self.text)
if hasattr(self, 'tag'): dict_response['tag'] = self.tag
if hasattr(self, 'priority'): dict_response['priority'] = self.priority
return json.dumps(dict_response)
| 30.936508
| 91
| 0.578758
|
2dda361fbb59e09ae5d91c88e2d90f0d5c8b11e6
| 8,052
|
py
|
Python
|
Methods/RL/run_deepaffinity.py
|
wengelearning/Drug-Combo-Generator
|
82f9f9adb22de6e8e8f0a198708a1c277d72c802
|
[
"Apache-2.0"
] | 24
|
2020-03-29T00:06:52.000Z
|
2022-03-14T02:01:29.000Z
|
Methods/RL/run_deepaffinity.py
|
wengelearning/Drug-Combo-Generator
|
82f9f9adb22de6e8e8f0a198708a1c277d72c802
|
[
"Apache-2.0"
] | 4
|
2020-09-26T00:51:42.000Z
|
2022-02-10T01:50:04.000Z
|
Methods/RL/run_deepaffinity.py
|
Shen-Lab/Drug-Combo-Generator
|
82f9f9adb22de6e8e8f0a198708a1c277d72c802
|
[
"Apache-2.0"
] | 10
|
2020-03-29T04:12:52.000Z
|
2021-11-05T02:32:15.000Z
|
#!/usr/bin/env python3
from mpi4py import MPI
from baselines.common import set_global_seeds
from baselines import logger
from tensorboardX import SummaryWriter
import os
import tensorflow as tf
import gym
from gym_molecule.envs.molecule import GraphEnv,get_disease_name_info
def train(args,seed,writer=None):
from baselines.ppo1 import just_oracle, gcn_policy
import baselines.common.tf_util as U
vocab_comp,disease_feat,disease_1hop,disease_1hop_name,disease_gene_list = get_disease_name_info('../../Data/')
rank = MPI.COMM_WORLD.Get_rank()
sess = U.single_threaded_session()
sess.__enter__()
args.path = os.getcwd()
print(args.path)
if rank == 0:
logger.configure()
else:
logger.configure(format_strs=[])
workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank()
set_global_seeds(workerseed)
if args.env=='molecule':
env1 = gym.make('molecule-v0')
env1.init(args.path,vocab_comp,disease_feat,disease_1hop,disease_gene_list,data_type=args.dataset,logp_ratio=args.logp_ratio,qed_ratio=args.qed_ratio,sa_ratio=args.sa_ratio,reward_step_total=args.reward_step_total,is_normalize=args.normalize_adj,reward_type=args.reward_type,reward_target=args.reward_target,has_feature=bool(args.has_feature),is_conditional=True,conditional="d1",max_action=args.max_action,min_action=args.min_action) # remember call this after gym.make!!
elif args.env=='graph':
env1 = GraphEnv()
env1.init(reward_step_total=args.reward_step_total,is_normalize=args.normalize_adj,dataset=args.dataset) # remember call this after gym.make!!
print(env1.observation_space)
def policy_fn(name, ob_space, ac_space,disease_dim):
return gcn_policy.GCNPolicy(name=name, ob_space=ob_space, ac_space=ac_space, disease_dim=disease_dim,atom_type_num=env1.atom_type_num,args=args)
env1.seed(workerseed+1)
if args.env=='molecule':
env2 = gym.make('molecule-v0')
env2.init(args.path,vocab_comp,disease_feat,disease_1hop,disease_gene_list,data_type=args.dataset,logp_ratio=args.logp_ratio,qed_ratio=args.qed_ratio,sa_ratio=args.sa_ratio,reward_step_total=args.reward_step_total,is_normalize=args.normalize_adj,reward_type=args.reward_type,reward_target=args.reward_target,has_feature=bool(args.has_feature),is_conditional=True,conditional="d2",max_action=args.max_action,min_action=args.min_action) # remember call this after gym.make!!
elif args.env=='graph':
env2 = GraphEnv()
env2.init(reward_step_total=args.reward_step_total,is_normalize=args.normalize_adj,dataset=args.dataset) # remember call this after gym.make!!
print(env2.observation_space)
env2.seed(workerseed+2)
just_oracle.deepaffinity(args,env1, env2,policy_fn,
vocab_comp,disease_feat.shape[0],args.disease_id,disease_1hop_name[args.disease_id],
max_timesteps=args.num_steps,
timesteps_per_actorbatch=256,
clip_param=0.2, entcoeff=0.01,
optim_epochs=8, optim_stepsize=args.lr, optim_batchsize=32,
gamma=1, lam=0.95,
schedule='linear', writer=writer
)
env1.close()
env2.close()
def arg_parser():
"""
Create an empty argparse.ArgumentParser.
"""
import argparse
return argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
def molecule_arg_parser():
parser = arg_parser()
parser.add_argument('--env', type=str, help='environment name: molecule; graph',
default='molecule')
parser.add_argument('--seed', help='RNG seed', type=int, default=666)
parser.add_argument('--num_steps', type=int, default=int(5e7))
parser.add_argument('--name', type=str, default='test_conditional')
parser.add_argument('--name_load', type=str, default='test_conditional')
# parser.add_argument('--name_load', type=str, default='test')
parser.add_argument('--dataset', type=str, default='zinc',help='caveman; grid; ba; zinc; gdb')
parser.add_argument('--dataset_load', type=str, default='zinc')
parser.add_argument('--reward_type', type=str, default='gan',help='logppen;logp_target;qed;qedsa;qed_target;mw_target;gan')
parser.add_argument('--reward_target', type=float, default=0.5,help='target reward value')
parser.add_argument('--logp_ratio', type=float, default=1)
parser.add_argument('--qed_ratio', type=float, default=1)
parser.add_argument('--sa_ratio', type=float, default=1)
parser.add_argument('--gan_step_ratio', type=float, default=1)
parser.add_argument('--gan_final_ratio', type=float, default=1)
parser.add_argument('--reward_step_total', type=float, default=0.5)
parser.add_argument('--lr', type=float, default=1e-3)
# parser.add_argument('--has_rl', type=int, default=1)
# parser.add_argument('--has_expert', type=int, default=1)
parser.add_argument('--has_d_step', type=int, default=1)
parser.add_argument('--has_d_final', type=int, default=1)
parser.add_argument('--has_ppo', type=int, default=1)
parser.add_argument('--rl_start', type=int, default=250)
parser.add_argument('--rl_end', type=int, default=int(1e6))
parser.add_argument('--expert_start', type=int, default=0)
parser.add_argument('--expert_end', type=int, default=int(1e6))
parser.add_argument('--save_every', type=int, default=50)
parser.add_argument('--load', type=int, default=1)
parser.add_argument('--load_step', type=int, default=6800)
# parser.add_argument('--load_step', type=int, default=0)
parser.add_argument('--curriculum', type=int, default=0)
parser.add_argument('--curriculum_num', type=int, default=6)
parser.add_argument('--curriculum_step', type=int, default=200)
parser.add_argument('--supervise_time', type=int, default=4)
parser.add_argument('--normalize_adj', type=int, default=0)
parser.add_argument('--layer_num_g', type=int, default=3)
parser.add_argument('--layer_num_d', type=int, default=3)
parser.add_argument('--graph_emb', type=int, default=0)
parser.add_argument('--stop_shift', type=int, default=-3)
parser.add_argument('--has_residual', type=int, default=0)
parser.add_argument('--has_concat', type=int, default=0)
parser.add_argument('--has_feature', type=int, default=1)
parser.add_argument('--emb_size', type=int, default=64) # default 64
parser.add_argument('--gcn_aggregate', type=str, default='mean')# sum, mean, concat
parser.add_argument('--gan_type', type=str, default='wgan')
parser.add_argument('--gate_sum_d', type=int, default=0)
parser.add_argument('--mask_null', type=int, default=0)
parser.add_argument('--is_conditional', type=int, default=1) # default 0
parser.add_argument('--conditional', type=str, default='d1') # default 0
parser.add_argument('--max_action', type=int, default=128) # default 0
parser.add_argument('--min_action', type=int, default=20) # default 0
parser.add_argument('--bn', type=int, default=0)
parser.add_argument('--name_full',type=str,default='')
parser.add_argument('--name_full_load',type=str,default='')
parser.add_argument('--disease_id',type=int,default=200)
parser.add_argument('--network_weight',type=int,default=10)
parser.add_argument('--deepaffinity_thr',type=int,default=6)
parser.add_argument('--others_weight',type=int,default=1)
parser.add_argument('--path',type=str,default=os.getcwd())
return parser
def main():
args = molecule_arg_parser().parse_args()
args.name_full = args.env + '_' + args.dataset + '_' + args.name
args.name_full_load1 = args.env + '_' + args.dataset_load + '_' + args.name_load + '_' + str(args.load_step) + '_1'
args.name_full_load2 = args.env + '_' + args.dataset_load + '_' + args.name_load + '_' + str(args.load_step) + '_2'
print(args)
train(args,seed=args.seed,writer=None)
if __name__ == '__main__':
main()
| 56.307692
| 481
| 0.710755
|
e60973576ab5ffbe97cdc0acc96e226ea03db28d
| 211
|
py
|
Python
|
python/pygimli/solver/__init__.py
|
mjziebarth/gimli
|
196ac4d6dd67e0326cccc44a87b367f64051e490
|
[
"Apache-2.0"
] | 3
|
2021-07-10T00:56:59.000Z
|
2022-02-17T12:43:38.000Z
|
python/pygimli/solver/__init__.py
|
ivek1312/gimli
|
5fafebb7c96dd0e04e2616df402fa27a01609d63
|
[
"Apache-2.0"
] | null | null | null |
python/pygimli/solver/__init__.py
|
ivek1312/gimli
|
5fafebb7c96dd0e04e2616df402fa27a01609d63
|
[
"Apache-2.0"
] | 1
|
2022-03-29T04:28:40.000Z
|
2022-03-29T04:28:40.000Z
|
# -*- coding: utf-8 -*-
"""General physics independent solver interface."""
from .green import greenDiffusion1D
from .solver import *
from .solverFiniteVolume import *
__all__ = []
class WorkSpace:
pass
| 16.230769
| 51
| 0.7109
|
8c23a2b92e26212ef1a9b32f3123d6e0f9857007
| 39,093
|
py
|
Python
|
SapGuiLibrary/SapGuiLibrary.py
|
jduncan8142/robotframework-sapguilibrary
|
006fbf4be69e9fcd35c334751dfb07c8e4a1447d
|
[
"Apache-2.0"
] | null | null | null |
SapGuiLibrary/SapGuiLibrary.py
|
jduncan8142/robotframework-sapguilibrary
|
006fbf4be69e9fcd35c334751dfb07c8e4a1447d
|
[
"Apache-2.0"
] | null | null | null |
SapGuiLibrary/SapGuiLibrary.py
|
jduncan8142/robotframework-sapguilibrary
|
006fbf4be69e9fcd35c334751dfb07c8e4a1447d
|
[
"Apache-2.0"
] | null | null | null |
import pythoncom
import win32com.client
import time
from pythoncom import com_error
import robot.libraries.Screenshot as screenshot
import os
from robot.api import logger
import datetime
class SapGuiLibrary:
"""The SapGuiLibrary is a library that enables users to create tests for the Sap Gui application
The library uses the Sap Scripting Engine, therefore Scripting must be enabled in Sap in order for this library to work.
= Opening a connection / Before running tests =
First of all, you have to *make sure the Sap Logon Pad is started*. You can automate this process by using the
AutoIT library or the Process Library.
After the Sap Login Pad is started, you can connect to the Sap Session using the keyword `connect to session`.
If you have a successful connection you can use `Open Connection` to open a new connection from the Sap Logon Pad
or `Connect To Existing Connection` to connect to a connection that is already open.
= Locating or specifying elements =
You need to specify elements starting from the window ID, for example, wnd[0]/tbar[1]/btn[8]. In some cases the SAP
ID contains backslashes. Make sure you escape these backslashes by adding another backslash in front of it.
= Screenshots (on error) =
The SapGUILibrary offers an option for automatic screenshots on error.
Default this option is enabled, use keyword `disable screenshots on error` to skip the screenshot functionality.
Alternatively, this option can be set at import.
"""
__version__ = '1.1'
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
def __init__(self, screenshots_on_error=True, screenshot_directory=None):
"""Sets default variables for the library
"""
self.explicit_wait = float(0.0)
self.sapapp = -1
self.session = -1
self.connection = -1
self.take_screenshots = screenshots_on_error
self.screenshot = screenshot.Screenshot()
if screenshot_directory is not None:
if not os.path.exists(screenshot_directory):
os.makedirs(screenshot_directory)
self.screenshot.set_screenshot_directory(screenshot_directory)
def click_element(self, element_id):
"""Performs a single click on a given element. Used only for buttons, tabs and menu items.
In case you want to change a value of an element like checkboxes of selecting an option in dropdown lists,
use `select checkbox` or `select from list by label` instead.
"""
# Performing the correct method on an element, depending on the type of element
element_type = self.get_element_type(element_id)
if (element_type == "GuiTab"
or element_type == "GuiMenu"):
self.session.findById(element_id).select()
elif element_type == "GuiButton":
self.session.findById(element_id).press()
else:
self.take_screenshot()
message = "You cannot use 'click_element' on element type '%s', maybe use 'select checkbox' instead?" % element_type
raise Warning(message)
time.sleep(self.explicit_wait)
def click_toolbar_button(self, table_id, button_id):
"""Clicks a button of a toolbar within a GridView 'table_id' which is contained within a shell object.
Use the Scripting tracker recorder to find the 'button_id' of the button to click
"""
self.element_should_be_present(table_id)
try:
self.session.findById(table_id).pressToolbarButton(button_id)
except AttributeError:
self.take_screenshot()
self.session.findById(table_id).pressButton(button_id)
except com_error:
self.take_screenshot()
message = "Cannot find Button_id '%s'." % button_id
raise ValueError(message)
time.sleep(self.explicit_wait)
def connect_to_existing_connection(self, connection_name):
"""Connects to an open connection. If the connection matches the given connection_name, the session is connected
to this connection.
"""
self.connection = self.sapapp.Children(0)
if self.connection.Description == connection_name:
self.session = self.connection.children(0)
else:
self.take_screenshot()
message = "No existing connection for '%s' found." % connection_name
raise ValueError(message)
def connect_to_session(self, explicit_wait=0):
"""Connects to an open session SAP.
See `Opening a connection / Before running tests` for details about requirements before connecting to a session.
Optionally `set explicit wait` can be used to set the explicit wait time.
*Examples*:
| *Keyword* | *Attributes* |
| connect to session | |
| connect to session | 3 |
| connect to session | explicit_wait=500ms |
"""
lenstr = len("SAPGUI")
rot = pythoncom.GetRunningObjectTable()
rotenum = rot.EnumRunning()
while True:
monikers = rotenum.Next()
if not monikers:
break
ctx = pythoncom.CreateBindCtx(0)
name = monikers[0].GetDisplayName(ctx, None);
if name[-lenstr:] == "SAPGUI":
obj = rot.GetObject(monikers[0])
sapgui = win32com.client.Dispatch(obj.QueryInterface(pythoncom.IID_IDispatch))
self.sapapp = sapgui.GetScriptingEngine
# Set explicit_wait after connection succeed
self.set_explicit_wait(explicit_wait)
if hasattr(self.sapapp, "OpenConnection") == False:
self.take_screenshot()
message = "Could not connect to Session, is Sap Logon Pad open?"
raise Warning(message)
# run explicit wait last
time.sleep(self.explicit_wait)
def disable_screenshots_on_error(self):
"""Disables automatic screenshots on error.
"""
self.take_screenshots = False
def doubleclick_element(self, element_id, item_id, column_id):
"""Performs a double-click on a given element. Used only for shell objects.
"""
# Performing the correct method on an element, depending on the type of element
element_type = self.get_element_type(element_id)
if element_type == "GuiShell":
self.session.findById(element_id).doubleClickItem(item_id, column_id)
else:
self.take_screenshot()
message = "You cannot use 'doubleclick element' on element type '%s', maybe use 'click element' instead?" % element_type
raise Warning(message)
time.sleep(self.explicit_wait)
def element_should_be_present(self, element_id, message=None):
"""Checks whether an element is present on the screen.
"""
try:
self.session.findById(element_id)
except com_error:
self.take_screenshot()
if message is None:
message = "Cannot find Element '%s'." % element_id
raise ValueError(message)
def element_value_should_be(self, element_id, expected_value, message=None):
"""Checks whether the element value is the same as the expected value.
The possible expected values depend on the type of element (see usage).
Usage:
| *Element type* | *possible values* |
| textfield | text |
| label | text |
| checkbox | checked / unchecked |
| radiobutton | checked / unchecked |
| combobox | text of the option to be expected |
"""
element_type = self.get_element_type(element_id)
actual_value = self.get_value(element_id)
# Breaking up the different element types so we can check the value the correct way
if (element_type == "GuiTextField"
or element_type == "GuiCTextField"
or element_type == "GuiComboBox"
or element_type == "GuiLabel"):
self.session.findById(element_id).setfocus()
time.sleep(self.explicit_wait)
# In these cases we can simply check the text value against the value of the element
if expected_value != actual_value:
if message is None:
message = "Element value of '%s' should be '%s', but was '%s'" % (
element_id, expected_value, actual_value)
self.take_screenshot()
raise AssertionError(message)
elif element_type == "GuiStatusPane":
if expected_value != actual_value:
if message is None:
message = "Element value of '%s' should be '%s', but was '%s'" % (
element_id, expected_value, actual_value)
self.take_screenshot()
raise AssertionError(message)
elif (element_type == "GuiCheckBox"
or element_type == "GuiRadioButton"):
# First check if there is a correct value given, otherwise raise an assertion error
self.session.findById(element_id).setfocus()
if (expected_value.lower() != "checked"
and expected_value.lower() != "unchecked"):
# Raise an AsertionError when no correct expected_value is given
self.take_screenshot()
if message is None:
message = "Incorrect value for element type '%s', provide checked or unchecked" % element_type
raise AssertionError(message)
# Check whether the expected value matches the actual value. If not, raise an assertion error
if expected_value.lower() != actual_value:
self.take_screenshot()
if message is None:
message = "Element value of '%s' didn't match the expected value" % element_id
raise AssertionError(message)
else:
# When the type of element can't be checked, raise an assertion error
self.take_screenshot()
message = "Cannot use keyword 'element value should be' for element type '%s'" % element_type
raise Warning(message)
# Run explicit wait as last
time.sleep(self.explicit_wait)
def element_value_should_contain(self, element_id, expected_value, message=None):
"""Checks whether the element value contains the expected value.
The possible expected values depend on the type of element (see usage).
Usage:
| *Element type* | *possible values* |
| textfield | text |
| label | text |
| combobox | text of the option to be expected |
"""
element_type = self.get_element_type(element_id)
# Breaking up the different element types so we can check the value the correct way
if (element_type == "GuiTextField"
or element_type == "GuiCTextField"
or element_type == "GuiComboBox"
or element_type == "GuiLabel"):
self.session.findById(element_id).setfocus()
actual_value = self.get_value(element_id)
time.sleep(self.explicit_wait)
# In these cases we can simply check the text value against the value of the element
if expected_value not in actual_value:
self.take_screenshot()
if message is None:
message = "Element value '%s' does not contain '%s', (but was '%s')" % (
element_id, expected_value, actual_value)
raise AssertionError(message)
else:
# When the element content can't be checked, raise an assertion error
self.take_screenshot()
message = "Cannot use keyword 'element value should contain' for element type '%s'" % element_type
raise Warning(message)
# Run explicit wait as last
time.sleep(self.explicit_wait)
def enable_screenshots_on_error(self):
"""Enables automatic screenshots on error.
"""
self.take_screenshots = True
def get_cell_value(self, table_id, row_num, col_id):
"""Returns the cell value for the specified cell.
"""
self.element_should_be_present(table_id)
try:
cellValue = self.session.findById(table_id).getCellValue(row_num, col_id)
return cellValue
except com_error:
self.take_screenshot()
message = "Cannot find Column_id '%s'." % col_id
raise ValueError(message)
def set_combobox(self, element_id, key):
"""Sets the value of a SAP GuiComboBox element
"""
element_type = self.get_element_type(element_id)
if element_type == "GuiComboBox":
self.session.findById(element_id).key = key
logger.info(f"ComboBox value {key} selected from {element_id}")
time.sleep(self.explicit_wait)
else:
self.take_screenshot()
message = "Element type '%s' has no set key method." % element_type
raise ValueError(message)
def get_element_location(self, element_id):
"""Returns the Sap element location for the given element.
"""
self.element_should_be_present(element_id)
screenleft = self.session.findById(element_id).screenLeft
screentop = self.session.findById(element_id).screenTop
return screenleft, screentop
def get_element_type(self, element_id):
"""Returns the Sap element type for the given element.
"""
try:
type = self.session.findById(element_id).type
return type
except com_error:
self.take_screenshot()
message = "Cannot find element with id '%s'" % element_id
raise ValueError(message)
def get_row_count(self, table_id):
"""Returns the number of rows found in the specified table.
"""
self.element_should_be_present(table_id)
rowCount = self.session.findById(table_id).rowCount
return rowCount
def get_scroll_position(self, element_id):
"""Returns the scroll position of the scrollbar of an element 'element_id' that is contained within a shell object.
"""
self.element_should_be_present(element_id)
currentPosition = self.session.findById(element_id).verticalScrollbar.position
return currentPosition
def get_value(self, element_id):
"""Gets the value of the given element. The possible return values depend on the type of element (see Return values).
Return values:
| *Element type* | *Return values* |
| textfield | text |
| label | text |
| checkbox | checked / unchecked |
| radiobutton | checked / unchecked |
| combobox | text of the selected option |
| guibutton | text |
| guititlebar | text |
| guistatusbar | text |
| guitab | text |
"""
element_type = self.get_element_type(element_id)
return_value = ""
if (element_type == "GuiTextField"
or element_type == "GuiCTextField"
or element_type == "GuiLabel"
or element_type == "GuiTitlebar"
or element_type == "GuiStatusbar"
or element_type == "GuiButton"
or element_type == "GuiTab"
or element_type == "GuiShell"):
self.set_focus(element_id)
return_value = self.session.findById(element_id).text
elif element_type == "GuiStatusPane":
return_value = self.session.findById(element_id).text
elif (element_type == "GuiCheckBox"
or element_type == "GuiRadioButton"):
actual_value = self.session.findById(element_id).selected
# In these situations we return check / unchecked, so we change these values here
if actual_value == True:
return_value = "checked"
elif actual_value == False:
return_value = "unchecked"
elif element_type == "GuiComboBox":
return_value = self.session.findById(element_id).text
# In comboboxes there are many spaces after the value. In order to check the value, we strip them away.
return_value = return_value.strip()
else:
# If we can't return the value for this element type, raise an assertion error
self.take_screenshot()
message = "Cannot get value for element type '%s'" % element_type
raise Warning(message)
return return_value
def get_window_title(self, locator):
"""Retrieves the window title of the given window.
"""
return_value = ""
try:
return_value = self.session.findById(locator).text
except com_error:
self.take_screenshot()
message = "Cannot find window with locator '%s'" % locator
raise ValueError(message)
return return_value
def input_password(self, element_id, password):
"""Inserts the given password into the text field identified by locator.
The password is not recorded in the log.
"""
element_type = self.get_element_type(element_id)
if (element_type == "GuiTextField"
or element_type == "GuiCTextField"
or element_type == "GuiShell"
or element_type == "GuiPasswordField"):
self.session.findById(element_id).text = password
logger.info("Typing password into text field '%s'." % element_id)
time.sleep(self.explicit_wait)
else:
self.take_screenshot()
message = "Cannot use keyword 'input password' for element type '%s'" % element_type
raise ValueError(message)
def input_text(self, element_id, text):
"""Inserts the given text into the text field identified by locator.
Use keyword `input password` to insert a password in a text field.
"""
element_type = self.get_element_type(element_id)
if (element_type == "GuiTextField"
or element_type == "GuiCTextField"
or element_type == "GuiShell"
or element_type == "GuiPasswordField"):
self.session.findById(element_id).text = text
logger.info("Typing text '%s' into text field '%s'." % (text, element_id))
time.sleep(self.explicit_wait)
else:
self.take_screenshot()
message = "Cannot use keyword 'input text' for element type '%s'" % element_type
raise ValueError(message)
def input_date(self, element_id, date=datetime.datetime.now().strftime("%M/%d/%Y")):
"""Inserts the current time from datetime.datetime.now() into the text field
identified by locator.
"""
element_type = self.get_element_type(element_id)
if (element_type == "GuiTextField"
or element_type == "GuiCTextField"
or element_type == "GuiShell"
or element_type == "GuiPasswordField"):
self.session.findById(element_id).text = date
logger.info("Typing date '%s' into date field '%s'." % (date, element_id))
time.sleep(self.explicit_wait)
else:
self.take_screenshot()
message = "Cannot use keyword 'input date' for element type '%s'" % element_type
raise ValueError(message)
def maximize_window(self, window=0):
"""Maximizes the SapGui window.
"""
try:
self.session.findById("wnd[%s]" % window).maximize()
time.sleep(self.explicit_wait)
except com_error:
self.take_screenshot()
message = "Cannot maximize window wnd[% s], is the window actually open?" % window
raise ValueError(message)
# run explicit wait last
time.sleep(self.explicit_wait)
def open_connection(self, connection_name):
"""Opens a connection to the given connection name. Be sure to provide the full connection name, including the bracket part.
"""
# First check if the sapapp is set and OpenConnection method exists
if hasattr(self.sapapp, "OpenConnection") == False:
self.take_screenshot()
message = "Cannot find an open Sap Login Pad, is Sap Logon Pad open?"
raise Warning(message)
try:
self.connection = self.sapapp.OpenConnection(connection_name, True)
except com_error:
self.take_screenshot()
message = "Cannot open connection '%s', please check connection name." % connection_name
raise ValueError(message)
self.session = self.connection.children(0)
# run explicit wait last
time.sleep(self.explicit_wait)
def run_transaction(self, transaction):
"""Runs a Sap transaction. An error is given when an unknown transaction is specified.
"""
self.session.findById("wnd[0]/tbar[0]/okcd").text = transaction
time.sleep(self.explicit_wait)
self.send_vkey(0)
if transaction == '/nex':
return
pane_value = self.session.findById("wnd[0]/sbar/pane[0]").text
if pane_value in ("Transactie %s bestaat niet" % transaction.upper(),
"Transaction %s does not exist" % transaction.upper(),
"Transaktion %s existiert nicht" % transaction.upper()):
self.take_screenshot()
message = "Unknown transaction: '%s'" % transaction
raise ValueError(message)
def scroll(self, element_id, position):
"""Scrolls the scrollbar of an element 'element_id' that is contained within a shell object.
'Position' is the number of rows to scroll.
"""
self.element_should_be_present(element_id)
self.session.findById(element_id).verticalScrollbar.position = position
time.sleep(self.explicit_wait)
def select_checkbox(self, element_id):
"""Selects checkbox identified by locator.
Does nothing if the checkbox is already selected.
"""
element_type = self.get_element_type(element_id)
if element_type == "GuiCheckBox":
self.session.findById(element_id).selected = True
else:
self.take_screenshot()
message = "Cannot use keyword 'select checkbox' for element type '%s'" % element_type
raise ValueError(message)
time.sleep(self.explicit_wait)
def select_context_menu_item(self, element_id, menu_or_button_id, item_id):
"""Selects an item from the context menu by clicking a button or right-clicking in the node context menu.
"""
self.element_should_be_present(element_id)
# The function checks if the element has an attribute "nodeContextMenu" or "pressContextButton"
if hasattr(self.session.findById(element_id), "nodeContextMenu"):
self.session.findById(element_id).nodeContextMenu(menu_or_button_id)
elif hasattr(self.session.findById(element_id), "pressContextButton"):
self.session.findById(element_id).pressContextButton(menu_or_button_id)
# The element has neither attributes, give an error message
else:
self.take_screenshot()
element_type = self.get_element_type(element_id)
message = "Cannot use keyword 'select context menu item' for element type '%s'" % element_type
raise ValueError(message)
self.session.findById(element_id).selectContextMenuItem(item_id)
time.sleep(self.explicit_wait)
def select_from_list_by_label(self, element_id, value):
"""Selects the specified option from the selection list.
"""
element_type = self.get_element_type(element_id)
if element_type == "GuiComboBox":
self.session.findById(element_id).key = value
time.sleep(self.explicit_wait)
else:
self.take_screenshot()
message = "Cannot use keyword 'select from list by label' for element type '%s'" % element_type
raise ValueError(message)
def select_node(self, tree_id, node_id, expand=False):
"""Selects a node of a TableTreeControl 'tree_id' which is contained within a shell object.
Use the Scripting tracker recorder to find the 'node_id' of the node.
Expand can be set to True to expand the node. If the node cannot be expanded, no error is given.
"""
self.element_should_be_present(tree_id)
self.session.findById(tree_id).selectedNode = node_id
if expand:
#TODO: elegantere manier vinden om dit af te vangen
try:
self.session.findById(tree_id).expandNode(node_id)
except com_error:
pass
time.sleep(self.explicit_wait)
def select_node_link(self, tree_id, link_id1, link_id2):
"""Selects a link of a TableTreeControl 'tree_id' which is contained within a shell object.
Use the Scripting tracker recorder to find the 'link_id1' and 'link_id2' of the link to select.
"""
self.element_should_be_present(tree_id)
self.session.findById(tree_id).selectItem(link_id1, link_id2)
self.session.findById(tree_id).clickLink(link_id1, link_id2)
time.sleep(self.explicit_wait)
def select_radio_button(self, element_id):
"""Sets radio button to the specified value.
"""
element_type = self.get_element_type(element_id)
if element_type == "GuiRadioButton":
self.session.findById(element_id).selected = True
else:
self.take_screenshot()
message = "Cannot use keyword 'select radio button' for element type '%s'" % element_type
raise ValueError(message)
time.sleep(self.explicit_wait)
def select_table_column(self, table_id, column_id):
"""Selects an entire column of a GridView 'table_id' which is contained within a shell object.
Use the Scripting tracker recorder to find the 'column_id' of the column to select.
"""
self.element_should_be_present(table_id)
try:
self.session.findById(table_id).selectColumn(column_id)
except com_error:
self.take_screenshot()
message = "Cannot find Column_id '%s'." % column_id
raise ValueError(message)
time.sleep(self.explicit_wait)
def select_table_row(self, table_id, row_num):
"""Selects an entire row of a table. This can either be a TableControl or a GridView 'table_id'
which is contained within a shell object. The row is an index to select the row, starting from 0.
"""
element_type = self.get_element_type(table_id)
if (element_type == "GuiTableControl"):
id = self.session.findById(table_id).getAbsoluteRow(row_num)
id.selected = -1
else:
try:
self.session.findById(table_id).selectedRows = row_num
except com_error:
self.take_screenshot()
message = "Cannot use keyword 'select table row' for element type '%s'" % element_type
raise ValueError(message)
time.sleep(self.explicit_wait)
def send_vkey(self, vkey_id, window=0):
"""Sends a SAP virtual key combination to the window, not into an element.
If you want to send a value to a text field, use `input text` instead.
To send a vkey, you can either use te *VKey ID* or the *Key combination*.
Sap Virtual Keys (on Windows)
| *VKey ID* | *Key combination* | *VKey ID* | *Key combination* | *VKey ID* | *Key combination* |
| *0* | Enter | *26* | Ctrl + F2 | *72* | Ctrl + A |
| *1* | F1 | *27* | Ctrl + F3 | *73* | Ctrl + D |
| *2* | F2 | *28* | Ctrl + F4 | *74* | Ctrl + N |
| *3* | F3 | *29* | Ctrl + F5 | *75* | Ctrl + O |
| *4* | F4 | *30* | Ctrl + F6 | *76* | Shift + Del |
| *5* | F5 | *31* | Ctrl + F7 | *77* | Ctrl + Ins |
| *6* | F6 | *32* | Ctrl + F8 | *78* | Shift + Ins |
| *7* | F7 | *33* | Ctrl + F9 | *79* | Alt + Backspace |
| *8* | F8 | *34* | Ctrl + F10 | *80* | Ctrl + Page Up |
| *9* | F9 | *35* | Ctrl + F11 | *81* | Page Up |
| *10* | F10 | *36* | Ctrl + F12 | *82* | Page Down |
| *11* | F11 or Ctrl + S | *37* | Ctrl + Shift + F1 | *83* | Ctrl + Page Down |
| *12* | F12 or ESC | *38* | Ctrl + Shift + F2 | *84* | Ctrl + G |
| *14* | Shift + F2 | *39* | Ctrl + Shift + F3 | *85* | Ctrl + R |
| *15* | Shift + F3 | *40* | Ctrl + Shift + F4 | *86* | Ctrl + P |
| *16* | Shift + F4 | *41* | Ctrl + Shift + F5 | *87* | Ctrl + B |
| *17* | Shift + F5 | *42* | Ctrl + Shift + F6 | *88* | Ctrl + K |
| *18* | Shift + F6 | *43* | Ctrl + Shift + F7 | *89* | Ctrl + T |
| *19* | Shift + F7 | *44* | Ctrl + Shift + F8 | *90* | Ctrl + Y |
| *20* | Shift + F8 | *45* | Ctrl + Shift + F9 | *91* | Ctrl + X |
| *21* | Shift + F9 | *46* | Ctrl + Shift + F10 | *92* | Ctrl + C |
| *22* | Ctrl + Shift + 0 | *47* | Ctrl + Shift + F11 | *93* | Ctrl + V |
| *23* | Shift + F11 | *48* | Ctrl + Shift + F12 | *94* | Shift + F10 |
| *24* | Shift + F12 | *70* | Ctrl + E | *97* | Ctrl + # |
| *25* | Ctrl + F1 | *71* | Ctrl + F | | |
Examples:
| *Keyword* | *Attributes* | |
| send_vkey | 8 | |
| send_vkey | Ctrl + Shift + F1 | |
| send_vkey | Ctrl + F7 | window=1 |
"""
vkey_id = str(vkey_id)
vkeys_array = ["ENTER", "F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8", "F9", "F10", "F11", "F12",
None, "SHIFT+F2", "SHIFT+F3", "SHIFT+F4", "SHIFT+F5", "SHIFT+F6", "SHIFT+F7", "SHIFT+F8",
"SHIFT+F9", "CTRL+SHIFT+0", "SHIFT+F11", "SHIFT+F12", "CTRL+F1", "CTRL+F2", "CTRL+F3", "CTRL+F4",
"CTRL+F5", "CTRL+F6", "CTRL+F7", "CTRL+F8", "CTRL+F9", "CTRL+F10", "CTRL+F11", "CTRL+F12",
"CTRL+SHIFT+F1", "CTRL+SHIFT+F2", "CTRL+SHIFT+F3", "CTRL+SHIFT+F4", "CTRL+SHIFT+F5",
"CTRL+SHIFT+F6", "CTRL+SHIFT+F7", "CTRL+SHIFT+F8", "CTRL+SHIFT+F9", "CTRL+SHIFT+F10",
"CTRL+SHIFT+F11", "CTRL+SHIFT+F12", None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, "CTRL+E", "CTRL+F", "CTRL+A",
"CTRL+D", "CTRL+N", "CTRL+O", "SHIFT+DEL", "CTRL+INS", "SHIFT+INS", "ALT+BACKSPACE",
"CTRL+PAGEUP", "PAGEUP",
"PAGEDOWN", "CTRL+PAGEDOWN", "CTRL+G", "CTRL+R", "CTRL+P", "CTRL+B", "CTRL+K", "CTRL+T",
"CTRL+Y",
"CTRL+X", "CTRL+C", "CTRL+V", "SHIFT+F10", None, None, "CTRL+#"]
# If a key combi is given, replace vkey_id by correct id based on given combination
if not vkey_id.isdigit():
search_comb = vkey_id.upper()
search_comb = search_comb.replace(" ", "")
search_comb = search_comb.replace("CONTROL", "CTRL")
search_comb = search_comb.replace("DELETE", "DEL")
search_comb = search_comb.replace("INSERT", "INS")
try:
vkey_id = vkeys_array.index(search_comb)
except ValueError:
if search_comb == "CTRL+S":
vkey_id = 11
elif search_comb == "ESC":
vkey_id = 12
else:
message = "Cannot find given Vkey, provide a valid Vkey number or combination"
raise ValueError(message)
try:
self.session.findById("wnd[% s]" % window).sendVKey(vkey_id)
except com_error:
self.take_screenshot()
message = "Cannot send Vkey to given window, is window wnd[% s] actually open?" % window
raise ValueError(message)
time.sleep(self.explicit_wait)
def set_cell_value(self, table_id, row_num, col_id, text):
"""Sets the cell value for the specified cell of a GridView 'table_id' which is contained within a shell object.
Use the Scripting tracker recorder to find the 'col_id' of the cell to set.
"""
self.element_should_be_present(table_id)
try:
self.session.findById(table_id).modifyCell(row_num, col_id, text)
logger.info("Typing text '%s' into cell '%s', '%s'" % (text, row_num, col_id))
time.sleep(self.explicit_wait)
except com_error:
self.take_screenshot()
message = "Cannot type text '%s' into cell '%s', '%s'" % (text, row_num, col_id)
raise ValueError(message)
def set_explicit_wait(self, speed):
"""Sets the delay time that is waited after each SapGui keyword.
The value can be given as a number that is considered to be seconds or as a human-readable string like 1 second
or 700 ms.
This functionality is designed to be used for demonstration and debugging purposes. It is not advised to use
this keyword to wait for an element to appear or function to finish.
*Possible time formats:*
| miliseconds | milliseconds, millisecond, millis, ms |
| seconds | seconds, second, secs, sec, s |
| minutes | minutes, minute, mins, min, m |
*Example:*
| *Keyword* | *Attributes* |
| Set explicit wait | 1 |
| Set explicit wait | 3 seconds |
| Set explicit wait | 500 ms |
"""
speed = str(speed)
if not speed.isdigit():
speed_elements = speed.split()
if not speed_elements[0].isdigit():
message = "The given speed %s doesn't begin with an numeric value, but it should" % speed
raise ValueError(message)
else:
speed_elements[0] = float(speed_elements[0])
speed_elements[1] = speed_elements[1].lower()
if (speed_elements[1] == "seconds"
or speed_elements[1] == "second"
or speed_elements[1] == "s"
or speed_elements[1] == "secs"
or speed_elements[1] == "sec"):
self.explicit_wait = speed_elements[0]
elif (speed_elements[1] == "minutes"
or speed_elements[1] == "minute"
or speed_elements[1] == "mins"
or speed_elements[1] == "min"
or speed_elements[1] == "m"):
self.explicit_wait = speed_elements[0] * 60
elif (speed_elements[1] == "milliseconds"
or speed_elements[1] == "millisecond"
or speed_elements[1] == "millis"
or speed_elements[1] == "ms"):
self.explicit_wait = speed_elements[0] / 1000
else:
self.take_screenshot()
message = "%s is a unknown time format" % speed_elements[1]
raise ValueError(message)
else:
# No timeformat given, so time is expected to be given in seconds
self.explicit_wait = float(speed)
def set_focus(self, element_id):
"""Sets the focus to the given element.
"""
element_type = self.get_element_type(element_id)
if element_type != "GuiStatusPane":
self.session.findById(element_id).setFocus()
time.sleep(self.explicit_wait)
def take_screenshot(self, screenshot_name="sap-screenshot"):
"""Takes a screenshot, only if 'screenshots on error' has been enabled,
either at import of with keyword `enable screenshots on error`.
This keyword uses Robots' internal `Screenshot` library.
"""
if self.take_screenshots == True:
self.screenshot.take_screenshot(screenshot_name)
def unselect_checkbox(self, element_id):
"""Removes selection of checkbox identified by locator.
Does nothing if the checkbox is not selected.
"""
element_type = self.get_element_type(element_id)
if element_type == "GuiCheckBox":
self.session.findById(element_id).selected = False
else:
self.take_screenshot()
message = "Cannot use keyword 'unselect checkbox' for element type '%s'" % element_type
raise ValueError(message)
time.sleep(self.explicit_wait)
def goto_menu(self):
self.run_transaction("/n")
| 48.623134
| 132
| 0.570844
|
2f3dfc935e0431e2aa6395e00b948ff4f3fa6083
| 7,384
|
py
|
Python
|
synthetic_linear_programming/calc.py
|
PredOptwithSoftConstraint/PredOptwithSoftConstraint
|
c0ec41a8c2c48034851cf04cd848013ceba1dd40
|
[
"MIT"
] | 7
|
2021-12-12T15:23:28.000Z
|
2022-01-15T23:24:02.000Z
|
synthetic_linear_programming/calc.py
|
PredOptwithSoftConstraint/PredOptwithSoftConstraint
|
c0ec41a8c2c48034851cf04cd848013ceba1dd40
|
[
"MIT"
] | null | null | null |
synthetic_linear_programming/calc.py
|
PredOptwithSoftConstraint/PredOptwithSoftConstraint
|
c0ec41a8c2c48034851cf04cd848013ceba1dd40
|
[
"MIT"
] | 3
|
2021-12-23T01:02:59.000Z
|
2022-01-14T02:12:40.000Z
|
import numpy as np
from config import get_K, device, CLIP
import gurobipy
from scipy.optimize import minimize, LinearConstraint
from util import merge_constraints
import torch
from torch.optim import SGD
from torch.autograd import Variable
import gurobipy as gp
from gurobipy import GRB
import torch.nn as nn
import cvxpy as cp
from cvxpylayers.torch import CvxpyLayer
class quad_surro_tensor(torch.autograd.Function): # with gradient.
@staticmethod
def forward(ctx, *args):
QUAD_SOFT_K = get_K()
theta, truetheta, inver, diaga, alpha, C, d, delta, gamma = args
inver, diaga, alpha, C, d, delta, gamma = torch.from_numpy(inver).double().to(device), diaga.double().to(device), torch.from_numpy(
alpha).double().to(device), torch.from_numpy(C).double().to(device), torch.from_numpy(d).double().to(device), delta.double().to(device), gamma.double().to(device)
# x = (C^T * delta * diaga * C)^(-1) * (theta + C^T * delta * diaga * d - C^T * (1/4k * delta + gamma) * alpha)
x = inver @ (theta + C.t() @ delta @ diaga @ d - C.t() @ (1.0 / (4 * QUAD_SOFT_K) * delta + gamma) @ alpha)
z = C @ x - d
result = truetheta.t() @ x - alpha.t() @ (delta / 2.0 @ ((z + 1.0 / (4.0 * QUAD_SOFT_K)) ** 2) + gamma @ z)
ctx.save_for_backward(truetheta, x, inver, diaga, alpha, C, d, delta, gamma)
return result
@staticmethod
def backward(ctx, *grad_output):
QUAD_SOFT_K = get_K()
torch.set_printoptions(precision=8) # for debugging.
truetheta, x, inver, diaga, alpha, C, d, delta, gamma = ctx.saved_tensors
dx_dtheta = inver
z = C @ x - d
df_dx = truetheta - C.t() @ delta @ diaga @ z - C.t() @ (delta * 1 / (4 * QUAD_SOFT_K) + gamma) @ alpha
grd = grad_output[0] * dx_dtheta @ df_dx
if CLIP == "CLIP" and grd.abs().max() > 0.0001: grd = grd / grd.abs().max() * 0.0001 # normalizing gradients
elif CLIP == "NORMALIZE" and grd.abs().max() > 0: grd = grd / grd.abs().max() * 0.0001
return grd, None, None, None, None, None, None, None, None
# np_dx_dtheta = np.linalg.inv(np.matmul(C.T, np.matmul(delta, np.matmul(diaga, C)))).T
# np_df_dx = ground_truth_theta.numpy() - np.matmul(C.T, np.matmul(delta, np.matmul(diaga, np.matmul(C, x.reshape(-1, 1)) - d))) - np.matmul(np.matmul(C.T, delta * 1 / (4 * QUAD_SOFT_K) + gamma), alpha)
buffer_C, buffer_d, buffer_alpha = None, None, None
def getopt(theta, alpha0, A0, b0, C0, d0): # get optimal true value.
# SURROVAL is only not zero at melding, i.e. soft=False, naked=False.
QUAD_SOFT_K = get_K()
global buffer_C, buffer_d, buffer_alpha
x0 = np.zeros(A0.shape[1])
if buffer_C is None: buffer_C, buffer_d, buffer_alpha = merge_constraints(A0, b0, C0, d0, alpha0, theta) # TODO: optimization and cut duplicated code.
C, d, alpha = buffer_C, buffer_d, buffer_alpha
ev = gp.Env(empty=True)
ev.setParam('OutputFlag', 0)
ev.start()
m = gp.Model("matrix1", env=ev)
# solve twice: first solve the naked problem, then solve it again to get the surrogate optimal.
x = m.addMVar(shape=theta.shape[0], vtype=GRB.CONTINUOUS, name='x')
z = m.addMVar(shape=d0.shape[0], vtype=GRB.CONTINUOUS, name='z')
m.setObjective(theta.T @ x - alpha0.T @ z, GRB.MAXIMIZE)
m.addConstr(z >= 0, name="c1")
m.addConstr(z >= C0 @ x - d0.squeeze(), name='c2')
m.addConstr(x >= 0, name="c3")
m.addConstr(A0 @ x <= b0.squeeze(), name='c4')
# print(A0, b0, C0, d0, alpha0)
m.optimize()
idx_none, idx_quad, idx_linear = [], [], []
naked_x = x.getAttr('x')
soft_constr = np.matmul(C, naked_x.reshape(-1, 1)) - d
for i in range(d.shape[0]):
# print(i, soft_constr[i])
if -1.0 / (4 * QUAD_SOFT_K) <= soft_constr[i] and soft_constr[i] <= 1.0 / (4 * QUAD_SOFT_K):
idx_quad.append(i)
elif soft_constr[i] > 1.0 / (4 * QUAD_SOFT_K):
idx_linear.append(i)
else:
idx_none.append(i)
diaga = np.zeros((alpha.shape[0], alpha.shape[0]))
gamma, delta = np.zeros((C.shape[0], C.shape[0])), np.zeros((C.shape[0], C.shape[0]))
for i in range(len(idx_linear)):
gamma[idx_linear[i], idx_linear[i]] = 1
#print("idx_none soft:", list(filter(lambda x: x < alpha0.shape[0], idx_none)))
#print("idx_linear soft:", list(filter(lambda x: x < alpha0.shape[0], idx_linear)))
#print("idx_quad soft:", list(filter(lambda x: x < alpha0.shape[0], idx_quad)))
for i in range(len(idx_quad)):
delta[idx_quad[i], idx_quad[i]] = 2 * QUAD_SOFT_K
for i in range(alpha.shape[0]):
diaga[i, i] = alpha[i]
return getval(theta, naked_x, alpha0, A0, b0, C0, d0), naked_x# m.objVal, x.getAttr('x')
def resetbuffer():
global buffer_C, buffer_d, buffer_alpha
buffer_C, buffer_d, buffer_alpha = None, None, None
def getopt_surro(ground_truth_theta, thetatensor, alpha0, A0, b0, C0, d0, nograd=False, backlog=None): # the surrogate function
QUAD_SOFT_K = get_K()
# print("K:", QUAD_SOFT_K)
global buffer_C, buffer_d, buffer_alpha
theta = thetatensor.cpu().detach().numpy()
if buffer_C is None: buffer_C, buffer_d, buffer_alpha = merge_constraints(A0, b0, C0, d0,
alpha0, torch.max(ground_truth_theta, thetatensor).cpu().detach().numpy()) # TODO: optimization and cut duplicated code.
C, d, alpha = buffer_C, buffer_d, buffer_alpha
_, x = getopt(theta, alpha0, A0, b0, C0, d0)
idx_none, idx_quad, idx_linear = [], [], []
soft_constr = np.matmul(C, x.reshape(-1, 1)) - d
for i in range(C.shape[0]):
if -1.0 / (4 * QUAD_SOFT_K) < soft_constr[i] and soft_constr[i] <= 1.0 / (4 * QUAD_SOFT_K):
idx_quad.append(i)
elif soft_constr[i] > 1.0 / (4 * QUAD_SOFT_K):
idx_linear.append(i)
else: idx_none.append(i)
L = d.shape[0]
delta, gamma = torch.zeros((L, L)), torch.zeros((L, L))
diagz = torch.zeros((L, L))
diaga = torch.zeros((L, L))
for i in idx_quad: delta[i, i] = 2 * QUAD_SOFT_K
for i in idx_linear: gamma[i, i] = 1
for i in range(L): diagz[i, i], diaga[i, i] = soft_constr[i, 0], alpha[i, 0]
if(len(idx_quad)) < C.shape[1]: # potential error: the inverse is not full rank!
if backlog is not None:
backlog.write("potential singular! " + str(soft_constr) + " " + str(len(idx_quad)) + "\n\n")
backlog.flush()
val = getval(ground_truth_theta.cpu().numpy(), x, alpha0, A0, b0, C0, d0)
return val, None
ctr = 0
while True: # if is not full rank
try:
inver = np.linalg.inv(np.matmul(np.matmul(C.T, np.matmul(delta.cpu().numpy(), diaga.cpu().numpy())), C)) + ctr * 1e-7
break
except np.linalg.LinAlgError:
ctr += 1
exit(0)
grd = None if nograd else quad_surro_tensor.apply(thetatensor, ground_truth_theta, inver, diaga, alpha, C, d, delta, gamma)
val = getval(ground_truth_theta.cpu().numpy(), x, alpha0, A0, b0, C0, d0)
return val, grd # grd is the objective value with real theta, while val is the objective value with predicted theta.
def getval(theta, x, alpha0, A0, b0, C0, d0):
return theta.T.dot(x) - alpha0.T.dot(np.maximum(np.matmul(C0, x.reshape(-1, 1)) - d0, 0))
| 52.368794
| 210
| 0.615926
|
44f2c4b8222181748eb033d82628f72e3fa1906d
| 708
|
py
|
Python
|
2017/170304/bowling/main.py
|
emmenezes/CodingDojo
|
50468e29ea9c724a4dc1196c8374125c5b1cfc20
|
[
"Beerware"
] | 11
|
2017-06-30T00:33:14.000Z
|
2019-04-18T01:16:13.000Z
|
2017/170304/bowling/main.py
|
emmenezes/CodingDojo
|
50468e29ea9c724a4dc1196c8374125c5b1cfc20
|
[
"Beerware"
] | 7
|
2017-10-29T20:05:01.000Z
|
2018-07-06T20:18:51.000Z
|
2017/170304/bowling/main.py
|
emmenezes/CodingDojo
|
50468e29ea9c724a4dc1196c8374125c5b1cfc20
|
[
"Beerware"
] | 5
|
2019-10-23T22:49:55.000Z
|
2021-01-11T01:39:58.000Z
|
class Frames:
def jogada(self, play):
return play
def verifica_par(self, play):
qtd = len(play)
resto_divisao = qtd % 2
if resto_divisao == 0:
return True
else:
return False
def aglutinarJogadas(self, lista1):
resultado = []
parcial = []
for percorre in lista1:
if percorre == 10:
resultado.append([percorre])
else:
parcial.append(percorre)
if len(parcial) is 2:
resultado.append(parcial)
parcial = []
if len(parcial) > 0:
resultado.append(parcial)
return resultado
| 26.222222
| 45
| 0.490113
|
a4c685bfaef6f28982d1d32fab08e1ddf2987ffa
| 1,042
|
py
|
Python
|
app/core/migrations/0004_recipe.py
|
gabrielmdc/receipe-app-api
|
c07d5cb776e5498212e8b3a3ce8dd96e4eeaf260
|
[
"MIT"
] | null | null | null |
app/core/migrations/0004_recipe.py
|
gabrielmdc/receipe-app-api
|
c07d5cb776e5498212e8b3a3ce8dd96e4eeaf260
|
[
"MIT"
] | null | null | null |
app/core/migrations/0004_recipe.py
|
gabrielmdc/receipe-app-api
|
c07d5cb776e5498212e8b3a3ce8dd96e4eeaf260
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.3 on 2020-02-14 13:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_ingredient'),
]
operations = [
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('time_minutes', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('link', models.CharField(blank=True, max_length=255)),
('ingredients', models.ManyToManyField(to='core.Ingredient')),
('tags', models.ManyToManyField(to='core.Tag')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 35.931034
| 118
| 0.603647
|
5d7b07467702346aacc6ba59ef3c734490312039
| 4,780
|
py
|
Python
|
noise_robust_cobras/cobras_logger.py
|
jonassoenen/noise_robust_cobras
|
0e5823dbba0263c3ccb3c2afb4267f2f542fc568
|
[
"Apache-2.0"
] | 2
|
2020-07-30T15:09:53.000Z
|
2020-07-31T06:33:36.000Z
|
noise_robust_cobras/cobras_logger.py
|
magicalJohn/noise_robust_cobras
|
0e5823dbba0263c3ccb3c2afb4267f2f542fc568
|
[
"Apache-2.0"
] | null | null | null |
noise_robust_cobras/cobras_logger.py
|
magicalJohn/noise_robust_cobras
|
0e5823dbba0263c3ccb3c2afb4267f2f542fc568
|
[
"Apache-2.0"
] | 1
|
2021-12-12T11:11:25.000Z
|
2021-12-12T11:11:25.000Z
|
import copy
import time
from typing import List
import numpy as np
from noise_robust_cobras.noise_robust.datastructures.constraint import Constraint
class NopLogger(object):
def nop(*args, **kw):
pass
def __getattr__(self, _):
return self.nop
class ClusteringLogger:
def __init__(self):
# start time
self.start_time = None
# basic logging of intermediate results
self.intermediate_results = []
# all constraints obtained from the user
self.all_user_constraints = []
# algorithm phases
self.current_phase = None
self.algorithm_phases = []
# (detected) noisy constraints
self.corrected_constraint_sets = []
self.noisy_constraint_data = []
self.detected_noisy_constraint_data = []
# clustering to store
self.clustering_to_store = None
# execution time
self.execution_time = None
#########################
# information retrieval #
#########################
def get_all_clusterings(self):
return [cluster for cluster, _, _ in self.intermediate_results]
def get_runtimes(self):
return [runtime for _, runtime, _ in self.intermediate_results]
def get_ml_cl_constraint_lists(self):
ml = []
cl = []
for constraint in self.all_user_constraints:
if constraint.is_ML():
ml.append(constraint.get_instance_tuple())
else:
cl.append(constraint.get_instance_tuple())
return ml, cl
def add_mistake_information(self, ground_truth_querier):
for i, (constraint_number, constraint_copy) in enumerate(
self.corrected_constraint_sets
):
mistakes = []
for con in constraint_copy:
if (
ground_truth_querier.query(*con.get_instance_tuple()).is_ML()
!= con.is_ML()
):
mistakes.append(con)
self.corrected_constraint_sets[i] = (
constraint_number,
constraint_copy,
mistakes,
)
###################
# log constraints #
###################
def log_new_user_query(self, constraint):
# add the constraint to all_user_constraints
self.all_user_constraints.append(constraint)
# keep algorithm phases up to date
self.algorithm_phases.append(self.current_phase)
# intermediate clustering results
self.intermediate_results.append(
(
self.clustering_to_store,
time.time() - self.start_time,
len(self.all_user_constraints),
)
)
##################
# execution time #
##################
def log_start_clustering(self):
self.start_time = time.time()
def log_end_clustering(self):
self.execution_time = time.time() - self.start_time
##############
# phase data #
##############
def log_entering_phase(self, phase):
self.current_phase = phase
###############
# clusterings #
###############
def update_clustering_to_store(self, clustering):
if isinstance(clustering, np.ndarray):
self.clustering_to_store = clustering.tolist()
elif isinstance(clustering, list):
self.clustering_to_store = list(clustering)
else:
self.clustering_to_store = clustering.construct_cluster_labeling()
def update_last_intermediate_result(self, clustering):
if len(self.intermediate_results) == 0:
return
if not isinstance(clustering, np.ndarray):
self.intermediate_results[-1] = (
clustering.construct_cluster_labeling(),
time.time() - self.start_time,
len(self.all_user_constraints),
)
else:
self.intermediate_results[-1] = (
clustering.tolist(),
time.time() - self.start_time,
len(self.all_user_constraints),
)
#####################
# noisy constraints #
#####################
def log_corrected_constraint_set(self, constraints):
constraint_copy: List[Constraint] = [copy.copy(con) for con in constraints]
current_constraint_number = len(self.all_user_constraints)
self.corrected_constraint_sets.append(
(current_constraint_number, constraint_copy)
)
def log_detected_noisy_constraints(self, constraints):
con_length = len(self.all_user_constraints)
for con in constraints:
self.detected_noisy_constraint_data.append((con_length, copy.copy(con)))
| 29.875
| 84
| 0.580753
|
06779124819b644bb0af761a2c522aca9a15466b
| 9,064
|
py
|
Python
|
Master_Mind.py
|
cmoradev/MesterMind_cenfotec
|
233fd17a5cb3bf1b5246286581caed95819ad43d
|
[
"MIT"
] | null | null | null |
Master_Mind.py
|
cmoradev/MesterMind_cenfotec
|
233fd17a5cb3bf1b5246286581caed95819ad43d
|
[
"MIT"
] | null | null | null |
Master_Mind.py
|
cmoradev/MesterMind_cenfotec
|
233fd17a5cb3bf1b5246286581caed95819ad43d
|
[
"MIT"
] | null | null | null |
from turtle import *
from random import *
import pickle
import os.path
#Declaracion de listas a usar
#Generacion de colores random
colors = ["r","y","g","b","o","p"]
lista = []
codebreaker = []
#Si declaraba lista vacia a la hora de comparar, error decia que guess estaba fuera de rango
guess = ["-","-","-","-"]
#Variable global puntaje con pickle
if os.path.isfile("puntuaciones_mm") == False:
puntuaciones = []
archivo = open("puntuaciones_mm", "wb")
pickle.dump(puntuaciones, archivo)
archivo.close()
else:
archivo = open("puntuaciones_mm", "rb")
puntuaciones = pickle.load(archivo)
archivo.close()
#Recibir intento de usuario
def ingresar_intento(color1,color2,color3,color4,intento_guess):
nuevo_intento = [color1,color2,color3,color4]
intento_guess.append(nuevo_intento)
return intento_guess
#Comparacion de colores lineas en misma posicion
def comparar(lista,codebreaker):
for n in range(len(codebreaker)):
if lista[n] == codebreaker[n]:
guess[n] = "X"
#Comparacion de lista random con cada linea de codebreaker
for x in range(len(codebreaker)):
for y in range(len(codebreaker)):
if guess[x] != "X":
if codebreaker[x] == lista[y]:
guess[x] = "O"
else:
break
print("Su intento es: ", codebreaker)
print("╔═ .*. ══════╗")
print(" Pistas ")
print("╚══════ .*. ═╝")
print(guess)
print()
return guess
def evaluacion_guess(guess):
#Ciclo de intentos, evaluando
guess = ["-","-","-","-"]
suma_puntaje=0
evaluation = True
while evaluation:
#Conteo para puntaje
suma_puntaje += 1
if guess == ["X","X","X","X"]:
print("╔═ .*. ═══════════════╗")
print(" Codigo acertado ")
print(" Puntos logrados: ", suma_puntaje)
print("╚═══════════════ .*. ═╝")
print()
evaluation = False
#para guardar el puntaje
puntaje = [player,suma_puntaje]
#guardar mi lista de puntajes
puntuaciones.append(puntaje)
#Archivo en pickle
archivo = open("puntuaciones_mm", "wb")
pickle.dump(puntuaciones, archivo)
archivo.close()
graphic(lista)
print("")
else:
print("╔═ .*. ═══════════╗")
print(" Intentos agotados ")
print (" (x_x)")
print("╚═══════════ .*. ═╝")
print("")
#incluyendo para que agregue puntaje en caso que agote intentos
#para guardar el puntaje
puntaje = [player,suma_puntaje]
#guardar mi lista de puntajes
puntuaciones.append(puntaje)
#Archivo en pickle
archivo = open("puntuaciones_mm", "wb")
pickle.dump(puntuaciones, archivo)
archivo.close()
evaluation = False
#Borrar todos los elementos de la variable que recibe los intentos, por que no puede sobre escribir en elementos ya establecidos
def limpiar_codebreaker(codebreaker):
codebreaker.clear()
#Ordenar lista de posiciones
def sort(puntuaciones2):
l = len(puntuaciones2)
for i in range(0, l):
for j in range(0, l-i-1):
if (puntuaciones2[j][1] > puntuaciones2[j + 1][1]):
tempo = puntuaciones2[j]
puntuaciones2[j]= puntuaciones2[j + 1]
puntuaciones2[j + 1]= tempo
print (i+1,"Jugador: ", puntuaciones2[i][0], "Puntos:", puntuaciones2[i][1])
print()
return puntuaciones2
#Funcion Turtle solo se muesta cuando el usuario acierta
def graphic(lista):
#Setup Turtle Python
setup(550,400)
tl = Turtle()
tl.up()
tl.speed(0)
#Muestra de colores en Pantalla Turtle
position=-150
i = 0
while i < (len(lista)):
tl.goto(position,0)
if lista[i] == "r" or lista[i] == "R":
tl.color("red")
elif lista[i] == "y" or lista[i] == "Y":
tl.color("yellow")
elif lista[i] == "g" or lista[i] == "G":
tl.color("green")
elif lista[i] == "b" or lista[i] == "B":
tl.color("blue")
elif lista[i] == "o" or lista[i] == "O":
tl.color("orange")
elif lista[i] == "p" or lista[i] == "P":
tl.color("purple")
else:
continue
tl.begin_fill()
tl.circle(50)
tl.end_fill()
position += 110
i += 1
tl.up()
## Menu del programa:
running = True
while running:
print("Menu:")
print("1. Iniciar juego")
print("2. Ver instrucciones del juego")
print("3. Ver las mejores 10 puntuaciones")
print("4. Borrar puntuaciones")
print("5. Creditos")
print("6. Salir")
select_option = input("Seleccione una opcion: ")
print()
if select_option == '1':
count = 0
player = input("Introduzca el nombre: ")
print("╔═ .*. ══════════════════════╗")
print(" Colores aleatorios Generados")
print("╚══════════════════════ .*. ═╝")
print()
lista = choices(colors,k=4)
#PENDIENTE generaciòn de colores ocultado ****************************************
print("Aleatorio",lista)
print("contador",count)
print()
if count < 2:
print( "Escriba la letra del color que desea elegir: ")
print("Colores: (Red = r) (Yellow = y) (Green = g) ")
print(" (Blue = b) (Orange = o) (Purple = p)")
print()
color1 = input("Ingrese color #1 ")
color2 = input("Ingrese color #2 ")
color3 = input("Ingrese color #3 ")
color4 = input("Ingrese color #4 ")
print()
#llamada a funcion Se registraba lista con lista interna [[]], tuve que sustituir variable original por la misma solo con el indice cero,
#probé varias formas de ingresar datos a lista vacia
nuevo_ingreso = ingresar_intento(color1,color2,color3,color4,codebreaker)
codebreaker = codebreaker[0]
#llamada a funcion de comparacion
comparar(lista,codebreaker)
count += 1
limpiar_codebreaker(codebreaker)
evaluacion_guess(guess)
elif select_option == '2':
print("╔═ .*. ═══════════════╗")
print(" Instrucciones del Juego")
print("╚═══════════════ .*. ═╝")
print("Mastermind consiste en un juego de mesa de dos jugadores en el cual un jugador")
print("crea un codigo de 4 colores (codemaker) y el otro jugador intenta adivinar")
print("este codigo (codebreaker) basado en pistas que el codemaker debe darle al codebreaker")
print("Usted tendra una pista en cada intento")
print(" ∗ Una “x” indica que el color de la adivinanza es correcto y esta en la posicion correcta.")
print(" ∗ Una “o” indica que en la adivinanza se utilizo un color correcto pero esta en la posicion incorrecta.")
print(" ∗ Un “-” indica que hay un elemento de la adivinanza que no esta en el codigo secreto.")
print()
elif select_option == '3':
print("╔═ .*. ═══════════════╗")
print(" P U N T U A C I O N E S ")
print("╚═══════════════ .*. ═╝")
archivo2 = open("puntuaciones_mm", "rb")
puntuaciones2 = pickle.load(archivo2)
if puntuaciones2 == []:
print ("No hay puntuaciones guardadas")
print()
else:
#funcion de ordenar puntuaciones
sort(puntuaciones2)
elif select_option == '4':
print ("¿Desea borrar las puntuaciones?")
print ("Si, digite el numero 1")
print ("No, digite el numero 2")
running = True
while running:
select = input("Seleccione una opcion: ")
if select == '1':
puntuaciones = []
archivo = open("puntuaciones_mm", "wb")
pickle.dump(puntuaciones, archivo)
archivo.close()
print ("Historial de puntuaciones eliminado")
print ()
break
break
if select == '2':
break
print()
elif select_option == '5':
print("♪ღ♪*•.¸¸.•*¨¨*•.♪ღ♪*•.¸¸.•*¨¨*•.♪ღ♪*•.¸¸.•*¨¨*•.♪ღ♪")
print(" Desarrollado por: Cindy Mora y Diana Alvarez")
print(" Gracias Especiales a: Andrés Morales(Profesor)")
print(" Universidad CENFOTEC 2021 ")
print("♪ღ♪*•.¸¸.•*¨¨*•.♪ღ♪*•.¸¸.•*¨¨*•.♪ღ♪*•.¸¸.•*¨¨*•.♪ღ♪")
print()
elif select_option == '6':
running = False
print("Game Over!")
| 32.487455
| 149
| 0.519417
|
5ff31c66b30af3cdb9d0c3627d8f257b88075828
| 32,894
|
py
|
Python
|
exchangelib/protocol.py
|
lmverity/exchangelib
|
15002565dfea30705c74b256b3a601f95e2afb00
|
[
"BSD-2-Clause"
] | null | null | null |
exchangelib/protocol.py
|
lmverity/exchangelib
|
15002565dfea30705c74b256b3a601f95e2afb00
|
[
"BSD-2-Clause"
] | null | null | null |
exchangelib/protocol.py
|
lmverity/exchangelib
|
15002565dfea30705c74b256b3a601f95e2afb00
|
[
"BSD-2-Clause"
] | null | null | null |
"""
A protocol is an endpoint for EWS service connections. It contains all necessary information to make HTTPS connections.
Protocols should be accessed through an Account, and are either created from a default Configuration or autodiscovered
when creating an Account.
"""
import datetime
import logging
import os
from threading import Lock
from queue import LifoQueue, Empty, Full
import requests.adapters
import requests.sessions
import requests.utils
from oauthlib.oauth2 import BackendApplicationClient, WebApplicationClient
from requests_oauthlib import OAuth2Session
from .credentials import OAuth2AuthorizationCodeCredentials, OAuth2Credentials
from .errors import TransportError, SessionPoolMinSizeReached, SessionPoolMaxSizeReached
from .properties import FreeBusyViewOptions, MailboxData, TimeWindow, TimeZone
from .services import GetServerTimeZones, GetRoomLists, GetRooms, ResolveNames, GetUserAvailability, \
GetSearchableMailboxes, ExpandDL, ConvertId
from .transport import get_auth_instance, get_service_authtype, NTLM, OAUTH2, CREDENTIALS_REQUIRED, DEFAULT_HEADERS
from .version import Version, API_VERSIONS
log = logging.getLogger(__name__)
def close_connections():
CachingProtocol.clear_cache()
class BaseProtocol:
"""Base class for Protocol which implements the bare essentials"""
# The maximum number of sessions (== TCP connections, see below) we will open to this service endpoint. Keep this
# low unless you have an agreement with the Exchange admin on the receiving end to hammer the server and
# rate-limiting policies have been disabled for the connecting user. Changing this setting only makes sense if
# you are using a thread pool to run multiple concurrent workers in this process.
SESSION_POOLSIZE = 1
# We want only 1 TCP connection per Session object. We may have lots of different credentials hitting the server and
# each credential needs its own session (NTLM auth will only send credentials once and then secure the connection,
# so a connection can only handle requests for one credential). Having multiple connections per Session could
# quickly exhaust the maximum number of concurrent connections the Exchange server allows from one client.
CONNECTIONS_PER_SESSION = 1
# The number of times a session may be reused before creating a new session object. 'None' means "infinite".
# Discarding sessions after a certain number of usages may limit memory leaks in the Session object.
MAX_SESSION_USAGE_COUNT = None
# Timeout for HTTP requests
TIMEOUT = 120
# The adapter class to use for HTTP requests. Override this if you need e.g. proxy support or specific TLS versions
HTTP_ADAPTER_CLS = requests.adapters.HTTPAdapter
# The User-Agent header to use for HTTP requests. Override this to set an app-specific one
USERAGENT = None
def __init__(self, config):
from .configuration import Configuration
if not isinstance(config, Configuration):
raise ValueError("'config' %r must be a Configuration instance" % config)
if not config.service_endpoint:
raise AttributeError("'config.service_endpoint' must be set")
self.config = config
self._session_pool_size = 0
self._session_pool_maxsize = config.max_connections or self.SESSION_POOLSIZE
# Autodetect authentication type if necessary
if self.config.auth_type is None:
self.config.auth_type = self.get_auth_type()
# Try to behave nicely with the remote server. We want to keep the connection open between requests.
# We also want to re-use sessions, to avoid the NTLM auth handshake on every request. We must know the
# authentication method to create sessions.
self._session_pool = LifoQueue()
self._session_pool_lock = Lock()
@property
def service_endpoint(self):
return self.config.service_endpoint
@property
def auth_type(self):
return self.config.auth_type
@property
def credentials(self):
return self.config.credentials
@credentials.setter
def credentials(self, value):
# We are updating credentials, but that doesn't automatically propagate to the session objects. The simplest
# solution is to just kill the sessions in the pool.
with self._session_pool_lock:
self.config._credentials = value
self.close()
@property
def retry_policy(self):
return self.config.retry_policy
@property
def server(self):
return self.config.server
def __getstate__(self):
# The session pool and lock cannot be pickled
state = self.__dict__.copy()
del state['_session_pool']
del state['_session_pool_lock']
return state
def __setstate__(self, state):
# Restore the session pool and lock
self.__dict__.update(state)
self._session_pool = LifoQueue()
self._session_pool_lock = Lock()
def __del__(self):
# pylint: disable=bare-except
try:
self.close()
except Exception: # nosec
# __del__ should never fail
pass
def close(self):
log.debug('Server %s: Closing sessions', self.server)
while True:
try:
session = self._session_pool.get(block=False)
self.close_session(session)
self._session_pool_size -= 1
except Empty:
break
@classmethod
def get_adapter(cls):
# We want just one connection per session. No retries, since we wrap all requests in our own retry handler
return cls.HTTP_ADAPTER_CLS(
pool_block=True,
pool_connections=cls.CONNECTIONS_PER_SESSION,
pool_maxsize=cls.CONNECTIONS_PER_SESSION,
max_retries=0,
)
def get_auth_type(self):
# Autodetect and return authentication type
raise NotImplementedError()
@property
def session_pool_size(self):
return self._session_pool_size
def increase_poolsize(self):
"""Increases the session pool size. We increase by one session per call.
"""
# Create a single session and insert it into the pool. We need to protect this with a lock while we are changing
# the pool size variable, to avoid race conditions. We must not exceed the pool size limit.
if self._session_pool_size == self._session_pool_maxsize:
raise SessionPoolMaxSizeReached('Session pool size cannot be increased further')
with self._session_pool_lock:
if self._session_pool_size >= self._session_pool_maxsize:
log.debug('Session pool size was increased in another thread')
return
log.debug('Server %s: Increasing session pool size from %s to %s', self.server, self._session_pool_size,
self._session_pool_size + 1)
self._session_pool.put(self.create_session(), block=False)
self._session_pool_size += 1
def decrease_poolsize(self):
"""Decreases the session pool size in response to error messages from the server requesting to rate-limit
requests. We decrease by one session per call.
"""
# Take a single session from the pool and discard it. We need to protect this with a lock while we are changing
# the pool size variable, to avoid race conditions. We must keep at least one session in the pool.
if self._session_pool_size <= 1:
raise SessionPoolMinSizeReached('Session pool size cannot be decreased further')
with self._session_pool_lock:
if self._session_pool_size <= 1:
log.debug('Session pool size was decreased in another thread')
return
log.warning('Server %s: Decreasing session pool size from %s to %s', self.server, self._session_pool_size,
self._session_pool_size - 1)
session = self.get_session()
self.close_session(session)
self._session_pool_size -= 1
def get_session(self):
# Try to get a session from the queue. If the queue is empty, try to add one more session to the queue. If the
# queue is already at its max, wait until a session becomes available.
_timeout = 60 # Rate-limit messages about session starvation
try:
session = self._session_pool.get(block=False)
log.debug('Server %s: Got session immediately', self.server)
except Empty:
try:
self.increase_poolsize()
except SessionPoolMaxSizeReached:
pass
while True:
try:
log.debug('Server %s: Waiting for session', self.server)
session = self._session_pool.get(timeout=_timeout)
break
except Empty:
# This is normal when we have many worker threads starving for available sessions
log.debug('Server %s: No sessions available for %s seconds', self.server, _timeout)
log.debug('Server %s: Got session %s', self.server, session.session_id)
session.usage_count += 1
return session
def release_session(self, session):
# This should never fail, as we don't have more sessions than the queue contains
log.debug('Server %s: Releasing session %s', self.server, session.session_id)
if self.MAX_SESSION_USAGE_COUNT and session.usage_count > self.MAX_SESSION_USAGE_COUNT:
log.debug('Server %s: session %s usage exceeded limit. Discarding', self.server, session.session_id)
session = self.renew_session(session)
try:
self._session_pool.put(session, block=False)
except Full:
log.debug('Server %s: Session pool was already full %s', self.server, session.session_id)
def close_session(self, session):
session.close()
del session
def retire_session(self, session):
# The session is useless. Close it completely and place a fresh session in the pool
log.debug('Server %s: Retiring session %s', self.server, session.session_id)
self.close_session(session)
self.release_session(self.create_session())
def renew_session(self, session):
# The session is useless. Close it completely and place a fresh session in the pool
log.debug('Server %s: Renewing session %s', self.server, session.session_id)
self.close_session(session)
return self.create_session()
def refresh_credentials(self, session):
# Credentials need to be refreshed, probably due to an OAuth
# access token expiring. If we've gotten here, it's because the
# application didn't provide an OAuth client secret, so we can't
# handle token refreshing for it.
with self.credentials.lock:
if self.credentials.sig() == session.credentials_sig:
# Credentials have not been refreshed by another thread:
# they're the same as the session was created with. If
# this isn't the case, we can just go ahead with a new
# session using the already-updated credentials.
self.credentials.refresh(session=session)
return self.renew_session(session)
def create_session(self):
if self.auth_type is None:
raise ValueError('Cannot create session without knowing the auth type')
if self.credentials is None:
if self.auth_type in CREDENTIALS_REQUIRED:
raise ValueError('Auth type %r requires credentials' % self.auth_type)
session = self.raw_session(self.service_endpoint)
session.auth = get_auth_instance(auth_type=self.auth_type)
else:
with self.credentials.lock:
if isinstance(self.credentials, OAuth2Credentials):
session = self.create_oauth2_session()
# Keep track of the credentials used to create this session. If
# and when we need to renew credentials (for example, refreshing
# an OAuth access token), this lets us easily determine whether
# the credentials have already been refreshed in another thread
# by the time this session tries.
session.credentials_sig = self.credentials.sig()
else:
if self.auth_type == NTLM and self.credentials.type == self.credentials.EMAIL:
username = '\\' + self.credentials.username
else:
username = self.credentials.username
session = self.raw_session(self.service_endpoint)
session.auth = get_auth_instance(auth_type=self.auth_type, username=username,
password=self.credentials.password)
# Add some extra info
session.session_id = sum(map(ord, str(os.urandom(100)))) # Used for debugging messages in services
session.usage_count = 0
session.protocol = self
log.debug('Server %s: Created session %s', self.server, session.session_id)
return session
def create_oauth2_session(self):
if self.auth_type != OAUTH2:
raise ValueError('Auth type must be %r for credentials type OAuth2Credentials' % OAUTH2)
has_token = False
scope = ['https://outlook.office365.com/.default']
session_params = {}
token_params = {}
if isinstance(self.credentials, OAuth2AuthorizationCodeCredentials):
# Ask for a refresh token
scope.append('offline_access')
# We don't know (or need) the Microsoft tenant ID. Use
# common/ to let Microsoft select the appropriate tenant
# for the provided authorization code or refresh token.
#
# Suppress looks-like-password warning from Bandit.
token_url = 'https://login.microsoftonline.com/common/oauth2/v2.0/token' # nosec
client_params = {}
has_token = self.credentials.access_token is not None
if has_token:
session_params['token'] = self.credentials.access_token
elif self.credentials.authorization_code is not None:
token_params['code'] = self.credentials.authorization_code
self.credentials.authorization_code = None
if self.credentials.client_id is not None and self.credentials.client_secret is not None:
# If we're given a client ID and secret, we have enough
# to refresh access tokens ourselves. In other cases the
# session will raise TokenExpiredError and we'll need to
# ask the calling application to refresh the token (that
# covers cases where the caller doesn't have access to
# the client secret but is working with a service that
# can provide it refreshed tokens on a limited basis).
session_params.update({
'auto_refresh_kwargs': {
'client_id': self.credentials.client_id,
'client_secret': self.credentials.client_secret,
},
'auto_refresh_url': token_url,
'token_updater': self.credentials.on_token_auto_refreshed,
})
client = WebApplicationClient(self.credentials.client_id, **client_params)
else:
token_url = 'https://login.microsoftonline.com/%s/oauth2/v2.0/token' % self.credentials.tenant_id
client = BackendApplicationClient(client_id=self.credentials.client_id)
session = self.raw_session(self.service_endpoint, oauth2_client=client, oauth2_session_params=session_params)
if not has_token:
# Fetch the token explicitly -- it doesn't occur implicitly
token = session.fetch_token(token_url=token_url, client_id=self.credentials.client_id,
client_secret=self.credentials.client_secret, scope=scope,
**token_params)
# Allow the credentials object to update its copy of the new
# token, and give the application an opportunity to cache it
self.credentials.on_token_auto_refreshed(token)
session.auth = get_auth_instance(auth_type=OAUTH2, client=client)
return session
@classmethod
def raw_session(cls, prefix, oauth2_client=None, oauth2_session_params=None):
if oauth2_client:
session = OAuth2Session(client=oauth2_client, **(oauth2_session_params or {}))
else:
session = requests.sessions.Session()
session.headers.update(DEFAULT_HEADERS)
session.headers['User-Agent'] = cls.USERAGENT
session.mount(prefix, adapter=cls.get_adapter())
return session
def __repr__(self):
return self.__class__.__name__ + repr((self.service_endpoint, self.credentials, self.auth_type))
class CachingProtocol(type):
_protocol_cache = {}
_protocol_cache_lock = Lock()
def __call__(cls, *args, **kwargs):
# Cache Protocol instances that point to the same endpoint and use the same credentials. This ensures that we
# re-use thread and connection pools etc. instead of flooding the remote server. This is a modified Singleton
# pattern.
#
# We ignore auth_type from kwargs in the cache key. We trust caller to supply the correct auth_type - otherwise
# __init__ will guess the correct auth type.
# We may be using multiple different credentials and changing our minds on TLS verification. This key
# combination should be safe.
_protocol_cache_key = kwargs['config'].service_endpoint, kwargs['config'].credentials
protocol = cls._protocol_cache.get(_protocol_cache_key)
if isinstance(protocol, Exception):
# The input data leads to a TransportError. Re-throw
raise protocol
if protocol is not None:
return protocol
# Acquire lock to guard against multiple threads competing to cache information. Having a per-server lock is
# probably overkill although it would reduce lock contention.
log.debug('Waiting for _protocol_cache_lock')
with cls._protocol_cache_lock:
protocol = cls._protocol_cache.get(_protocol_cache_key)
if isinstance(protocol, Exception):
# Someone got ahead of us while holding the lock, but the input data leads to a TransportError. Re-throw
raise protocol
if protocol is not None:
# Someone got ahead of us while holding the lock
return protocol
log.debug("Protocol __call__ cache miss. Adding key '%s'", str(_protocol_cache_key))
try:
protocol = super().__call__(*args, **kwargs)
except TransportError as e:
# This can happen if, for example, autodiscover supplies us with a bogus EWS endpoint
log.warning('Failed to create cached protocol with key %s: %s', _protocol_cache_key, e)
cls._protocol_cache[_protocol_cache_key] = e
raise e
cls._protocol_cache[_protocol_cache_key] = protocol
return protocol
@classmethod
def clear_cache(mcs):
with mcs._protocol_cache_lock:
for key, protocol in mcs._protocol_cache.items():
if isinstance(protocol, Exception):
continue
service_endpoint = key[0]
log.debug("Service endpoint '%s': Closing sessions", service_endpoint)
with protocol._session_pool_lock:
protocol.close()
mcs._protocol_cache.clear()
class Protocol(BaseProtocol, metaclass=CachingProtocol):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._api_version_hint = None
self._version_lock = Lock()
def get_auth_type(self):
# Autodetect authentication type. We also set version hint here.
name = str(self.credentials) if self.credentials and str(self.credentials) else 'DUMMY'
auth_type, api_version_hint = get_service_authtype(
service_endpoint=self.service_endpoint, retry_policy=self.retry_policy, api_versions=API_VERSIONS, name=name
)
self._api_version_hint = api_version_hint
return auth_type
@property
def version(self):
# Make sure only one thread does the guessing.
if not self.config.version or not self.config.version.build:
with self._version_lock:
if not self.config.version or not self.config.version.build:
# Version.guess() needs auth objects and a working session pool
self.config.version = Version.guess(self, api_version_hint=self._api_version_hint)
return self.config.version
def get_timezones(self, timezones=None, return_full_timezone_data=False):
"""Get timezone definitions from the server
Args:
timezones: A list of EWSDateTime instances. If None, fetches all timezones from server (Default value = None)
return_full_timezone_data: If true, also returns periods and transitions (Default value = False)
Returns:
A list of (tz_id, name, periods, transitions) tuples
"""
return GetServerTimeZones(protocol=self).call(
timezones=timezones, return_full_timezone_data=return_full_timezone_data
)
def get_free_busy_info(self, accounts, start, end, merged_free_busy_interval=30, requested_view='DetailedMerged'):
"""Returns free/busy information for a list of accounts
Args:
accounts: A list of (account, attendee_type, exclude_conflicts) tuples, where account is either an Account
object or a string, attendee_type is a MailboxData.attendee_type choice, and exclude_conflicts is a boolean.
start: The start datetime of the request
end: The end datetime of the request
merged_free_busy_interval: The interval, in minutes, of merged free/busy information (Default value = 30)
requested_view: The type of information returned. Possible values are defined in the
FreeBusyViewOptions.requested_view choices. (Default value = 'DetailedMerged')
Returns:
A generator of FreeBusyView objects
"""
from .account import Account
for account, attendee_type, exclude_conflicts in accounts:
if not isinstance(account, (Account, str)):
raise ValueError("'accounts' item %r must be an 'Account' or 'str' instance" % account)
if attendee_type not in MailboxData.ATTENDEE_TYPES:
raise ValueError("'accounts' item %r must be one of %s" % (attendee_type, MailboxData.ATTENDEE_TYPES))
if not isinstance(exclude_conflicts, bool):
raise ValueError("'accounts' item %r must be a 'bool' instance" % exclude_conflicts)
if start >= end:
raise ValueError("'start' must be less than 'end' (%s -> %s)" % (start, end))
if not isinstance(merged_free_busy_interval, int):
raise ValueError("'merged_free_busy_interval' value %r must be an 'int'" % merged_free_busy_interval)
if requested_view not in FreeBusyViewOptions.REQUESTED_VIEWS:
raise ValueError(
"'requested_view' value %r must be one of %s" % (requested_view, FreeBusyViewOptions.REQUESTED_VIEWS))
_, _, periods, transitions, transitions_groups = list(self.get_timezones(
timezones=[start.tzinfo],
return_full_timezone_data=True
))[0]
return GetUserAvailability(self).call(
timezone=TimeZone.from_server_timezone(
periods=periods,
transitions=transitions,
transitionsgroups=transitions_groups,
for_year=start.year
),
mailbox_data=[MailboxData(
email=account.primary_smtp_address if isinstance(account, Account) else account,
attendee_type=attendee_type,
exclude_conflicts=exclude_conflicts
) for account, attendee_type, exclude_conflicts in accounts],
free_busy_view_options=FreeBusyViewOptions(
time_window=TimeWindow(start=start, end=end),
merged_free_busy_interval=merged_free_busy_interval,
requested_view=requested_view,
),
)
def get_roomlists(self):
return GetRoomLists(protocol=self).call()
def get_rooms(self, roomlist):
from .properties import RoomList
return GetRooms(protocol=self).call(roomlist=RoomList(email_address=roomlist))
def resolve_names(self, names, return_full_contact_data=False, search_scope=None, shape=None):
"""Resolve accounts on the server using partial account data, e.g. an email address or initials
Args:
names: A list of identifiers to query
return_full_contact_data: If True, returns full contact data (Default value = False)
search_scope: The scope to perform the search. Must be one of SEARCH_SCOPE_CHOICES (Default value = None)
shape: return: A list of Mailbox items or, if return_full_contact_data is True, tuples of (Mailbox, Contact)
items (Default value = None)
Returns:
A list of Mailbox items or, if return_full_contact_data is True, tuples of (Mailbox, Contact) items
"""
return list(ResolveNames(protocol=self).call(
unresolved_entries=names, return_full_contact_data=return_full_contact_data, search_scope=search_scope,
contact_data_shape=shape,
))
def expand_dl(self, distribution_list):
"""Expand distribution list into it's members
Args:
distribution_list: SMTP address of the distribution list to expand, or a DLMailbox representing the list
Returns:
List of Mailbox items that are members of the distribution list
"""
from .properties import DLMailbox
if isinstance(distribution_list, str):
distribution_list = DLMailbox(email_address=distribution_list, mailbox_type='PublicDL')
return list(ExpandDL(protocol=self).call(distribution_list=distribution_list))
def get_searchable_mailboxes(self, search_filter=None, expand_group_membership=False):
"""This method is only available to users who have been assigned the Discovery Management RBAC role. See
https://docs.microsoft.com/en-us/exchange/permissions-exo/permissions-exo
Args:
search_filter: Is set, must be a single email alias (Default value = None)
expand_group_membership: If True, returned distribution lists are expanded (Default value = False)
Returns:
a list of SearchableMailbox, FailedMailbox or Exception instances
"""
return list(GetSearchableMailboxes(protocol=self).call(
search_filter=search_filter,
expand_group_membership=expand_group_membership,
))
def convert_ids(self, ids, destination_format):
"""Converts item and folder IDs between multiple formats
Args:
ids: a list of AlternateId, AlternatePublicFolderId or AlternatePublicFolderItemId instances
destination_format: A string
Returns:
a generator of AlternateId, AlternatePublicFolderId or AlternatePublicFolderItemId instances
"""
from .properties import AlternateId, AlternatePublicFolderId, AlternatePublicFolderItemId
cls_map = {cls.response_tag(): cls for cls in (
AlternateId, AlternatePublicFolderId, AlternatePublicFolderItemId
)}
for i in ConvertId(protocol=self).call(items=ids, destination_format=destination_format):
if isinstance(i, Exception):
yield i
else:
id_cls = cls_map[i.tag]
yield id_cls.from_xml(i, account=None)
def __getstate__(self):
# The lock and thread pool cannot be pickled
state = super().__getstate__()
del state['_version_lock']
try:
del state['thread_pool']
except KeyError:
# thread_pool is a cached property and may not exist
pass
return state
def __setstate__(self, state):
# Restore the lock. The thread pool is a cached property and will be recreated automatically.
self.__dict__.update(state)
self._version_lock = Lock()
def __str__(self):
# Don't trigger version guessing here just for the sake of printing
if self.config.version:
fullname, api_version, build = self.version.fullname, self.version.api_version, self.version.build
else:
fullname, api_version, build = '[unknown]', '[unknown]', '[unknown]'
return '''\
EWS url: %s
Product name: %s
EWS API version: %s
Build number: %s
EWS auth: %s''' % (self.service_endpoint, fullname, api_version, build, self.auth_type)
class NoVerifyHTTPAdapter(requests.adapters.HTTPAdapter):
"""An HTTP adapter that ignores TLS validation errors. Use at own risk."""
def cert_verify(self, conn, url, verify, cert):
# pylint: disable=unused-argument
# We're overriding a method so we have to keep the signature
super().cert_verify(conn=conn, url=url, verify=False, cert=cert)
class TLSClientAuth(requests.adapters.HTTPAdapter):
"""An HTTP adapter that implements Certificate Based Authentication (CBA)"""
cert_file = None
def init_poolmanager(self, *args, **kwargs):
kwargs['cert_file'] = self.cert_file
return super().init_poolmanager(*args, **kwargs)
class RetryPolicy:
"""Stores retry logic used when faced with errors from the server"""
@property
def fail_fast(self):
# Used to choose the error handling policy. When True, a fault-tolerant policy is used. False, a fail-fast
# policy is used.
raise NotImplementedError()
@property
def back_off_until(self):
raise NotImplementedError()
@back_off_until.setter
def back_off_until(self, value):
raise NotImplementedError()
def back_off(self, seconds):
raise NotImplementedError()
class FailFast(RetryPolicy):
"""Fail immediately on server errors"""
@property
def fail_fast(self):
return True
@property
def back_off_until(self):
return None
class FaultTolerance(RetryPolicy):
"""Enables fault-tolerant error handling. Tells internal methods to do an exponential back off when requests start
failing, and wait up to max_wait seconds before failing.
"""
# Back off 60 seconds if we didn't get an explicit suggested value
DEFAULT_BACKOFF = 60
def __init__(self, max_wait=3600):
self.max_wait = max_wait
self._back_off_until = None
self._back_off_lock = Lock()
def __getstate__(self):
# Locks cannot be pickled
state = self.__dict__.copy()
del state['_back_off_lock']
return state
def __setstate__(self, state):
# Restore the lock
self.__dict__.update(state)
self._back_off_lock = Lock()
@property
def fail_fast(self):
return False
@property
def back_off_until(self):
"""Returns the back off value as a datetime. Resets the current back off value if it has expired."""
if self._back_off_until is None:
return None
with self._back_off_lock:
if self._back_off_until is None:
return None
if self._back_off_until < datetime.datetime.now():
self._back_off_until = None # The back off value has expired. Reset
return None
return self._back_off_until
@back_off_until.setter
def back_off_until(self, value):
with self._back_off_lock:
self._back_off_until = value
def back_off(self, seconds):
if seconds is None:
seconds = self.DEFAULT_BACKOFF
value = datetime.datetime.now() + datetime.timedelta(seconds=seconds)
with self._back_off_lock:
self._back_off_until = value
| 44.937158
| 120
| 0.6593
|
b386573f9a94749510ac020913318a3df0f0abb6
| 88
|
py
|
Python
|
Unet_Mobile/test.py
|
Ice833/Semantic-Segmentation
|
23d23f6da3b34884c044a2253d65a1e4097adb2d
|
[
"MIT"
] | 1
|
2020-12-16T08:29:13.000Z
|
2020-12-16T08:29:13.000Z
|
Unet_Mobile/test.py
|
Ice833/Semantic-Segmentation
|
23d23f6da3b34884c044a2253d65a1e4097adb2d
|
[
"MIT"
] | null | null | null |
Unet_Mobile/test.py
|
Ice833/Semantic-Segmentation
|
23d23f6da3b34884c044a2253d65a1e4097adb2d
|
[
"MIT"
] | null | null | null |
from nets.unet import mobilenet_unet
model = mobilenet_unet(2,416,416)
model.summary()
| 29.333333
| 37
| 0.795455
|
527f4c396bf697f9dbcd22d80234ac10ff6c2d68
| 8,618
|
py
|
Python
|
skmob/core/tests/test_trajectorydataframe.py
|
FilippoSimini/scikit-mobility
|
d34270a478db5822d400c925d41aed7bfffce75b
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 489
|
2019-05-05T07:47:35.000Z
|
2022-03-31T10:36:00.000Z
|
skmob/core/tests/test_trajectorydataframe.py
|
FilippoSimini/scikit-mobility
|
d34270a478db5822d400c925d41aed7bfffce75b
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 204
|
2019-05-02T22:51:05.000Z
|
2022-03-19T17:14:29.000Z
|
skmob/core/tests/test_trajectorydataframe.py
|
FilippoSimini/scikit-mobility
|
d34270a478db5822d400c925d41aed7bfffce75b
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 119
|
2019-05-28T19:51:29.000Z
|
2022-03-23T03:43:00.000Z
|
import numpy as np
import pandas as pd
import geopandas as gpd
from datetime import datetime
from operator import itemgetter
from ...utils import constants
from ...utils.constants import UID, DATETIME, LATITUDE, LONGITUDE, GEOLIFE_SAMPLE
from ...core.trajectorydataframe import TrajDataFrame
from ...core.flowdataframe import FlowDataFrame
from ...preprocessing import detection, clustering
import shapely
import folium
import matplotlib
import pytest
EXPECTED_NUM_OF_COLUMNS_IN_TDF = 4
class TestTrajectoryDataFrame:
def setup_method(self):
self.default_data_list = [[1, 39.984094, 116.319236, '2008-10-23 13:53:05'],
[1, 39.984198, 116.319322, '2008-10-23 13:53:06'],
[1, 39.984224, 116.319402, '2008-10-23 13:53:11'],
[1, 39.984211, 116.319389, '2008-10-23 13:53:16']]
self.default_data_df = pd.DataFrame(self.default_data_list, columns=['user', 'latitude', 'lng', 'hour'])
self.default_data_dict = self.default_data_df.to_dict(orient='list')
# instantiate a TrajDataFrame
lats_lngs = np.array([[39.978253, 116.327275],
[40.013819, 116.306532],
[39.878987, 116.126686],
[40.013819, 116.306532],
[39.979580, 116.313649],
[39.978696, 116.326220],
[39.981537, 116.310790],
[39.978161, 116.327242],
[39.900000, 116.000000]])
traj = pd.DataFrame(lats_lngs, columns=[constants.LATITUDE, constants.LONGITUDE])
traj[constants.DATETIME] = pd.to_datetime([
'20130101 8:34:04', '20130101 10:34:08', '20130105 10:34:08',
'20130110 12:34:15', '20130101 1:34:28', '20130101 3:34:54',
'20130101 4:34:55', '20130105 5:29:12', '20130115 00:29:12'])
traj[constants.UID] = [1 for _ in range(5)] + [2 for _ in range(3)] + [3]
self.tdf0 = TrajDataFrame(traj)
self.stdf = detection.stops(self.tdf0)
self.cstdf = clustering.cluster(self.stdf)
# tessellation
tess_features = {'type': 'FeatureCollection',
'features': [{'id': '0',
'type': 'Feature',
'properties': {'tile_ID': '0'},
'geometry': {'type': 'Polygon',
'coordinates': [[[116.14407581909998, 39.8846396072],
[116.14407581909998, 39.98795822127371],
[116.27882311171793, 39.98795822127371],
[116.27882311171793, 39.8846396072],
[116.14407581909998, 39.8846396072]]]}},
{'id': '1',
'type': 'Feature',
'properties': {'tile_ID': '1'},
'geometry': {'type': 'Polygon',
'coordinates': [[[116.14407581909998, 39.98795822127371],
[116.14407581909998, 40.091120806035285],
[116.27882311171793, 40.091120806035285],
[116.27882311171793, 39.98795822127371],
[116.14407581909998, 39.98795822127371]]]}},
{'id': '2',
'type': 'Feature',
'properties': {'tile_ID': '2'},
'geometry': {'type': 'Polygon',
'coordinates': [[[116.27882311171793, 39.8846396072],
[116.27882311171793, 39.98795822127371],
[116.41357040433583, 39.98795822127371],
[116.41357040433583, 39.8846396072],
[116.27882311171793, 39.8846396072]]]}},
{'id': '3',
'type': 'Feature',
'properties': {'tile_ID': '3'},
'geometry': {'type': 'Polygon',
'coordinates': [[[116.27882311171793, 39.98795822127371],
[116.27882311171793, 40.091120806035285],
[116.41357040433583, 40.091120806035285],
[116.41357040433583, 39.98795822127371],
[116.27882311171793, 39.98795822127371]]]}}]}
self.tessellation = gpd.GeoDataFrame.from_features(tess_features, crs={"init": "epsg:4326"})
def perform_default_asserts(self, tdf):
assert tdf._is_trajdataframe()
assert tdf.shape == (4, EXPECTED_NUM_OF_COLUMNS_IN_TDF)
assert tdf[UID][0] == 1
assert tdf[DATETIME][0] == datetime(2008, 10, 23, 13, 53, 5)
assert tdf[LATITUDE][0] == 39.984094
assert tdf[LONGITUDE][3] == 116.319389
def test_tdf_from_list(self):
tdf = TrajDataFrame(self.default_data_list, latitude=1, longitude=2, datetime=3, user_id=0)
self.perform_default_asserts(tdf)
print(tdf.head()) # raised TypeError: 'BlockManager' object is not iterable
def test_tdf_from_df(self):
tdf = TrajDataFrame(self.default_data_df, latitude='latitude', datetime='hour', user_id='user')
self.perform_default_asserts(tdf)
def test_tdf_from_dict(self):
tdf = TrajDataFrame(self.default_data_dict, latitude='latitude', datetime='hour', user_id='user')
self.perform_default_asserts(tdf)
def test_tdf_from_csv_file(self):
tdf = TrajDataFrame.from_file(GEOLIFE_SAMPLE, sep=',')
assert tdf._is_trajdataframe()
assert tdf.shape == (217653, EXPECTED_NUM_OF_COLUMNS_IN_TDF)
assert list(tdf[UID].unique()) == [1, 5]
def test_timezone_conversion(self):
tdf = TrajDataFrame(self.default_data_df, latitude='latitude', datetime='hour', user_id='user')
tdf.timezone_conversion(from_timezone='Europe/London', to_timezone='Europe/Berlin')
assert tdf[DATETIME][0] == pd.Timestamp('2008-10-23 14:53:05')
def test_slicing_a_tdf_returns_a_tdf(self):
tdf = TrajDataFrame(self.default_data_df, latitude='latitude', datetime='hour', user_id='user')
assert isinstance(tdf[tdf[UID] == 1][:1], TrajDataFrame)
def test_sort_by_uid_and_datetime(self):
# shuffle the TrajDataFrame rows
tdf1 = self.tdf0.sample(frac=1)
tdf = tdf1.sort_by_uid_and_datetime()
assert isinstance(tdf, TrajDataFrame)
assert np.all(tdf[[UID, DATETIME]].values == sorted(tdf1[[UID, DATETIME]].values, key=itemgetter(0, 1)))
def test_plot_trajectory(self):
map_f = self.tdf0.plot_trajectory()
assert isinstance(map_f, folium.folium.Map)
def test_plot_stops(self):
map_f = self.stdf.plot_stops()
assert isinstance(map_f, folium.folium.Map)
def test_plot_diary(self):
ax = self.cstdf.plot_diary(self.tdf0[UID].iloc[0])
assert isinstance(ax, matplotlib.axes._subplots.Subplot)
@pytest.mark.parametrize('self_loops', [True, False])
def test_to_flowdataframe(self, self_loops):
expected_flows = {'origin': {0: '2', 1: '2'},
'destination': {0: '2', 1: '3'},
'flow': {0: 3, 1: 1}}
expected_fdf = FlowDataFrame(expected_flows, tessellation=self.tessellation)
if not self_loops:
expected_fdf.drop(0, inplace=True)
fdf = self.tdf0.to_flowdataframe(self.tessellation, self_loops=self_loops)
assert isinstance(fdf, FlowDataFrame)
pd.testing.assert_frame_equal(expected_fdf, fdf)
def test_to_geodataframe(self):
assert isinstance(self.tdf0.to_geodataframe(), gpd.GeoDataFrame)
@pytest.mark.parametrize('remove_na', [True, False])
def test_mapping(self, remove_na):
mtdf = self.tdf0.mapping(self.tessellation, remove_na=remove_na)
def _point_in_poly(x, tess):
point = shapely.geometry.Point([x[constants.LONGITUDE], x[constants.LATITUDE]])
try:
poly = tess[tess[constants.TILE_ID] == x[constants.TILE_ID]][['geometry']].values[0, 0]
return poly.contains(point)
except IndexError:
poly = shapely.ops.unary_union(self.tessellation.geometry.values)
return not poly.contains(point)
assert np.all(mtdf.apply(lambda x: _point_in_poly(x, self.tessellation), axis=1).values)
| 48.689266
| 112
| 0.569274
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.