hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
54fffcde0435df6ac71fd7c894c28a55193912b1
| 1,168
|
py
|
Python
|
trace_simexp/task/clean.py
|
damar-wicaksono/trace-simexp
|
e2640ff44de461c70ffbd9c8faa68965821b37f2
|
[
"MIT"
] | null | null | null |
trace_simexp/task/clean.py
|
damar-wicaksono/trace-simexp
|
e2640ff44de461c70ffbd9c8faa68965821b37f2
|
[
"MIT"
] | null | null | null |
trace_simexp/task/clean.py
|
damar-wicaksono/trace-simexp
|
e2640ff44de461c70ffbd9c8faa68965821b37f2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
trace_simexp.task.clean
***********************
Module to clean up files and directories contents
"""
__author__ = "Damar Wicaksono"
def rm_files(files: list):
"""Remove the listed files
:param csv_files: the list files or directories, fullname
"""
import subprocess
import os
for file in files:
if os.path.isfile(file):
subprocess.call(["rm", file])
elif os.path.islink(file):
subprocess.call(["rm", file])
elif os.path.isdir(file):
subprocess.call(["rm", "-rf", file])
def rm_except(directories: list, files: list):
"""Remove the all the directory contents except a single file
:param directories: the list of directories
:param files: files within the directories not to be deleted
"""
import subprocess
for directory, file in zip(directories, files):
tmp_script = "find '{}' ! -name {} -type f -delete" .format(directory,
file)
# Use the whole string with shell interpreter
subprocess.call(tmp_script, shell=True)
| 27.809524
| 78
| 0.583904
|
cbe470bae95f7848b51ff3570c5a554f35be0d11
| 5,259
|
py
|
Python
|
src/cool/Parsing/Lexer.py
|
matcom-compilers-2019/cool-compiler-greidy-ariel-quevedo
|
d6d3ecedad5821817cc62040691ded5dcb84e252
|
[
"MIT"
] | 1
|
2019-11-21T22:06:45.000Z
|
2019-11-21T22:06:45.000Z
|
src/cool/Parsing/Lexer.py
|
matcom-compilers-2019/cool-compiler-greidy-ariel-quevedo
|
d6d3ecedad5821817cc62040691ded5dcb84e252
|
[
"MIT"
] | null | null | null |
src/cool/Parsing/Lexer.py
|
matcom-compilers-2019/cool-compiler-greidy-ariel-quevedo
|
d6d3ecedad5821817cc62040691ded5dcb84e252
|
[
"MIT"
] | null | null | null |
import ply.lex as lex
from ply.lex import TOKEN
class Lexer(object):
def __init__(self):
self.reserved = self.keyword
self.tokens = self.tokens_list + tuple(self.keyword.values())
self.states = (
('string', 'exclusive'),
('comment', 'exclusive')
)
self.lexer = lex.lex(module=self)
def test(self, text):
self.lexer.input(text)
while True:
token = self.lexer.token()
if not token:
break
print(token)
t_ignore_COMMENT_LINE = r'\-\-[^\n]*'
t_ignore = ' \t\r\f'
t_comment_ignore = ''
t_string_ignore = ''
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_COLON = r'\:'
t_COMMA = r'\,'
t_DOT = r'\.'
t_SEMICOLON = r'\;'
t_AT = r'\@'
t_PLUS = r'\+'
t_MINUS = r'\-'
t_TIMES = r'\*'
t_DIVIDE = r'\/'
t_LT = r'\<'
t_LTEQ = r'\<\='
t_EQUALS = r'\='
t_NOT_INT = r'\~'
t_ASSIGN = r'\<\-'
t_ARROW = r'\=\>'
t_TYPE = r'[A-Z][a-zA-Z_0-9]*'
@TOKEN(r"true|false")
def t_BOOL(self, lextoken):
lextoken.value = True if lextoken.value == "true" else False
return lextoken
@TOKEN(r"[a-z][a-zA-Z_0-9]*")
def t_ID(self, lextoken):
# if lextoken.value is not a reserved word then get "ID" type
if lextoken.value in self.keyword.keys():
lextoken.type = self.keyword[lextoken.value]
else:
lextoken.type = "ID"
# lextoken.type = self.keyword.get(lextoken.value, "ID")
return lextoken
@TOKEN(r"\d+")
def t_INT(self, lextoken):
lextoken.value = int(lextoken.value)
return lextoken
@TOKEN(r"\n+")
def t_newline(self, lextoken):
lextoken.lexer.lineno += len(lextoken.value)
# lextoken.lexer.lexpos = 0
@TOKEN(r"\"")
def t_begin_string(self, lextoken):
lextoken.lexer.push_state('string')
lextoken.lexer.string = ""
lextoken.lexer.is_backslash = False
@TOKEN(r"\n")
def t_string_newline(self, lextoken):
if lextoken.lexer.is_backslash:
lextoken.lexer.is_backslash = False
else:
print("-----String Error in word %s -----" % (lextoken.value))
@TOKEN(r"\"")
def t_string_end(self, lextoken):
if not lextoken.lexer.is_backslash:
lextoken.type = "STRING"
lextoken.value = lextoken.lexer.string
lextoken.lexer.pop_state()
return lextoken
else:
lextoken.lexer.string += '"'
lextoken.lexer.is_backslash = False
@TOKEN(r"[^\n]")
def t_string_save_character(self, lextoken):
if lextoken.lexer.is_backslash:
if lextoken.value == 'b':
lextoken.lexer.string += "\b"
elif lextoken.value == 't':
lextoken.lexer.string += "\t"
elif lextoken.value == 'f':
lextoken.lexer.string += "\f"
elif lextoken.value == 'n':
lextoken.lexer.string += "\n"
elif lextoken.value == '\\':
lextoken.lexer.string += "\\"
else:
lextoken.lexer.string += lextoken.value
lextoken.lexer.is_backslash = False
else:
if lextoken.value != "\\":
lextoken.lexer.string += lextoken.value
else:
lextoken.lexer.is_backslash = True
@TOKEN(r"\(\*")
def t_begin_comment(self, lextoken):
lextoken.lexer.push_state('comment')
lextoken.lexer.level = 1
@TOKEN(r"\(\*")
def t_comment_open(self, lextoken):
lextoken.lexer.level += 1
@TOKEN(r"\*\)")
def t_comment_end(self, lextoken):
lextoken.lexer.level -=1
if lextoken.lexer.level == 0:
lextoken.lexer.pop_state()
def t_error(self, lextoken):
print("LexicographicError: Bad token %s -----" % (lextoken.value))
def t_string_error(self, lextoken):
print("LexicographicError: String Error in word %s -----" % (lextoken.value))
def t_comment_error(self, lextoken):
lextoken.lexer.skip(1)
@property
def tokens_list(self):
return (
# Symbols
"LPAREN", "RPAREN", "LBRACE", "RBRACE",
"COLON", "COMMA", "DOT", "SEMICOLON", "AT",
# Binary arithmetic operators
"PLUS", "MINUS", "TIMES", "DIVIDE",
# Comparison operators
"LT", "LTEQ", "EQUALS",
# Unary operators
"NOT_INT",
# Arrows ( <-, => )
"ASSIGN", "ARROW",
# Identifiers
"TYPE", "ID",
# Types
"INT", "STRING", "BOOL"
)
@property
def keyword(self):
return {
# FALSE, TRUE
'class':"CLASS", "inherits":"INHERITS",
"if":"IF", "then":"THEN", "else":"ELSE", "fi":"FI",
"let":"LET", "in":"IN",
"while":"WHILE", "loop":"LOOP", "pool":"POOL",
"case":"CASE", "of":"OF", "esac":"ESAC",
"new":"NEW",
"isvoid":"ISVOID",
"not":"NOT"
}
| 29.379888
| 85
| 0.512075
|
0930fddd7ce4fc72d52fdd6a6ce54304cc82fe2d
| 1,335
|
py
|
Python
|
src/control/convertStrategy/DOCAlgorithm.py
|
alfredo-milani/ParseScript
|
58847537b53bfb7b88710761963dc94b06041195
|
[
"MIT"
] | null | null | null |
src/control/convertStrategy/DOCAlgorithm.py
|
alfredo-milani/ParseScript
|
58847537b53bfb7b88710761963dc94b06041195
|
[
"MIT"
] | null | null | null |
src/control/convertStrategy/DOCAlgorithm.py
|
alfredo-milani/ParseScript
|
58847537b53bfb7b88710761963dc94b06041195
|
[
"MIT"
] | null | null | null |
from subprocess import Popen, PIPE
from constants import NEW_USER
from control.convertStrategy.ConversionAlgorithm import ConversionAlgorithm
from control.convertStrategy.BaseAlgorithm import BaseAlgorithm
from utils import Common
class DOCAlgorithm(BaseAlgorithm, ConversionAlgorithm):
"""
Classe che definisce l'algoritmo per il parsing di documenti in formato *.doc
"""
CMD_DOC = "antiword"
def __init__(self):
super(DOCAlgorithm, self).__init__()
def do_convert(self, file_to_convert):
cmd = [DOCAlgorithm.CMD_DOC, file_to_convert]
p = Popen(cmd, stdout=PIPE)
stdout, stderr = p.communicate()
content = stdout.decode(ConversionAlgorithm.DECODE_FORMAT, 'ignore')
data_list = filter(None, content.split("\n"))
raw_data_num_users = Common.count_occurences(data_list, NEW_USER)
list_of_users = self._parse_users_list(data_list)
if raw_data_num_users != len(list_of_users):
from control.convertStrategy import Logging
self.logs.append_logs(
Logging.W,
"WARNING:\tUser raw data: %d\tUser parsed: %d.\tCheck if some user missing\n" % (
raw_data_num_users,
len(list_of_users)
)
)
return list_of_users
| 32.560976
| 97
| 0.665169
|
9e8b4fb8f4563e0745db4ebe70cff51cccd58444
| 177
|
py
|
Python
|
django/contrib/admin/__init__.py
|
rawwell/django
|
6b3264671ead4604f26cbd2b71e8d6a02945bf0c
|
[
"BSD-3-Clause"
] | 1
|
2016-05-08T12:24:22.000Z
|
2016-05-08T12:24:22.000Z
|
django/contrib/admin/__init__.py
|
rawwell/django
|
6b3264671ead4604f26cbd2b71e8d6a02945bf0c
|
[
"BSD-3-Clause"
] | null | null | null |
django/contrib/admin/__init__.py
|
rawwell/django
|
6b3264671ead4604f26cbd2b71e8d6a02945bf0c
|
[
"BSD-3-Clause"
] | 1
|
2015-11-19T14:45:16.000Z
|
2015-11-19T14:45:16.000Z
|
from django.contrib.admin.options import ModelAdmin
from django.contrib.admin.options import StackedInline, TabularInline
from django.contrib.admin.sites import AdminSite, site
| 44.25
| 69
| 0.858757
|
b838e8801e36a8cc92a66943656bdc16a346a2d7
| 666
|
py
|
Python
|
backoffice/manage.py
|
AlejandroUPC/pythonmicroservices
|
9d42bd6dfd9847ad4a8e6029e808de927292c251
|
[
"MIT"
] | null | null | null |
backoffice/manage.py
|
AlejandroUPC/pythonmicroservices
|
9d42bd6dfd9847ad4a8e6029e808de927292c251
|
[
"MIT"
] | null | null | null |
backoffice/manage.py
|
AlejandroUPC/pythonmicroservices
|
9d42bd6dfd9847ad4a8e6029e808de927292c251
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backoffice.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.956522
| 74
| 0.68018
|
928a17415c9c6c69a860878da03a62449a0581ea
| 1,805
|
py
|
Python
|
fax/competitive/cg.py
|
niklasschmitz/fax
|
ab2edcc375a45dedcf715edd407d5037e0f70bf2
|
[
"MIT"
] | 4
|
2020-07-20T18:56:13.000Z
|
2020-07-31T02:51:12.000Z
|
fax/competitive/cg.py
|
niklasschmitz/fax
|
ab2edcc375a45dedcf715edd407d5037e0f70bf2
|
[
"MIT"
] | null | null | null |
fax/competitive/cg.py
|
niklasschmitz/fax
|
ab2edcc375a45dedcf715edd407d5037e0f70bf2
|
[
"MIT"
] | null | null | null |
from functools import partial
from jax import lax
from jax import tree_util
from fax import converge
from fax import loop
from fax import math
def cg_step(a_lin_op, i, current_state):
del i
x, r, r_sqr, p = current_state
amat_p = a_lin_op(p)
alpha = r_sqr / math.pytree_dot(p, amat_p)
x_new = tree_util.tree_multimap(lambda x, p: x + alpha * p, x, p)
r_new = tree_util.tree_multimap(lambda r, amat_p: r - alpha * amat_p,
r, amat_p)
r_new_sqr = math.pytree_dot(r_new, r_new)
beta = r_new_sqr/r_sqr
p_new = tree_util.tree_multimap(lambda r_new, p: r_new + beta * p, r_new, p)
return x_new, r_new, r_new_sqr, p_new
def conjugate_gradient_solve(linear_op, bvec, init_x, max_iter=1000,
atol=1e-10):
dtype = converge.tree_smallest_float_dtype(bvec)
_, atol = converge.adjust_tol_for_dtype(0., atol=atol, dtype=dtype)
init_r = tree_util.tree_multimap(lax.sub, bvec, linear_op(init_x))
init_p = init_r
init_r_sqr = math.pytree_dot(init_r, init_r)
def convergence_test(state_new, state_old):
del state_old
return state_new[2] < atol
solution = loop.fixed_point_iteration(
(init_x, init_r, init_r_sqr, init_p),
func=partial(cg_step, linear_op),
convergence_test=convergence_test,
max_iter=max_iter
)
return solution._replace(
value=solution.value[0],
previous_value=solution.value[0],
)
def fixed_point_solve(linear_op, bvec, init_x, max_iter=1000,
atol=1e-10):
return conjugate_gradient_solve(
linear_op=lambda x: tree_util.tree_multimap(lax.sub, x, linear_op(x)),
bvec=bvec,
init_x=init_x,
max_iter=max_iter,
atol=atol,
)
| 30.083333
| 80
| 0.655956
|
9c3fa909f08bbba853b962da19fb992763810cfb
| 13,771
|
py
|
Python
|
builder/main.py
|
vladkozlov69/platform-nordicnrf52
|
0ea23a266125377c2ff410c5e8dc5388254de6b1
|
[
"Apache-2.0"
] | null | null | null |
builder/main.py
|
vladkozlov69/platform-nordicnrf52
|
0ea23a266125377c2ff410c5e8dc5388254de6b1
|
[
"Apache-2.0"
] | null | null | null |
builder/main.py
|
vladkozlov69/platform-nordicnrf52
|
0ea23a266125377c2ff410c5e8dc5388254de6b1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from platform import system
from os import makedirs
from os.path import isdir, join, basename
from SCons.Script import (ARGUMENTS, COMMAND_LINE_TARGETS, AlwaysBuild,
Builder, Default, DefaultEnvironment)
from platformio.util import get_serialports
def BeforeUpload(target, source, env): # pylint: disable=W0613,W0621
env.AutodetectUploadPort()
upload_options = {}
if "BOARD" in env:
upload_options = env.BoardConfig().get("upload", {})
if not bool(upload_options.get("disable_flushing", False)):
env.FlushSerialBuffer("$UPLOAD_PORT")
before_ports = get_serialports()
if bool(upload_options.get("use_1200bps_touch", False)):
env.TouchSerialPort("$UPLOAD_PORT", 1200)
if bool(upload_options.get("wait_for_upload_port", False)):
env.Replace(UPLOAD_PORT=env.WaitForNewSerialPort(before_ports))
# use only port name for BOSSA
if ("/" in env.subst("$UPLOAD_PORT") and
env.subst("$UPLOAD_PROTOCOL") == "sam-ba"):
env.Replace(UPLOAD_PORT=basename(env.subst("$UPLOAD_PORT")))
env = DefaultEnvironment()
platform = env.PioPlatform()
board = env.BoardConfig()
variant = board.get("build.variant", "")
use_adafruit = board.get(
"build.bsp.name", "nrf5") == "adafruit" and "arduino" in env.get("PIOFRAMEWORK", [])
if use_adafruit:
FRAMEWORK_DIR = platform.get_package_dir("framework-arduinoadafruitnrf52")
os_platform = sys.platform
if os_platform == "win32":
nrfutil_path = join(FRAMEWORK_DIR, "tools", "adafruit-nrfutil", os_platform, "adafruit-nrfutil.exe")
elif os_platform == "darwin":
nrfutil_path = join(FRAMEWORK_DIR, "tools", "adafruit-nrfutil", "macos", "adafruit-nrfutil")
else:
nrfutil_path = "adafruit-nrfutil"
else:
# set it to empty since we won't need it
nrfutil_path = ""
env.Replace(
AR="arm-none-eabi-ar",
AS="arm-none-eabi-as",
CC="arm-none-eabi-gcc",
CXX="arm-none-eabi-g++",
GDB="arm-none-eabi-gdb",
OBJCOPY="arm-none-eabi-objcopy",
RANLIB="arm-none-eabi-ranlib",
SIZETOOL="arm-none-eabi-size",
ARFLAGS=["rc"],
SIZEPROGREGEXP=r"^(?:\.text|\.data|\.rodata|\.text.align|\.ARM.exidx)\s+(\d+).*",
SIZEDATAREGEXP=r"^(?:\.data|\.bss|\.noinit)\s+(\d+).*",
SIZECHECKCMD="$SIZETOOL -A -d $SOURCES",
SIZEPRINTCMD='$SIZETOOL -B -d $SOURCES',
ERASEFLAGS=["--eraseall", "-f", "nrf52"],
ERASECMD="nrfjprog $ERASEFLAGS",
PROGSUFFIX=".elf"
)
# Allow user to override via pre:script
if env.get("PROGNAME", "program") == "program":
env.Replace(PROGNAME="firmware")
env.Append(
BUILDERS=dict(
ElfToBin=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"binary",
"$SOURCES",
"$TARGET"
]), "Building $TARGET"),
suffix=".bin"
),
ElfToHex=Builder(
action=env.VerboseAction(" ".join([
"$OBJCOPY",
"-O",
"ihex",
"-R",
".eeprom",
"$SOURCES",
"$TARGET"
]), "Building $TARGET"),
suffix=".hex"
),
MergeHex=Builder(
action=env.VerboseAction(" ".join([
join(platform.get_package_dir("tool-sreccat") or "",
"srec_cat"),
"$SOFTDEVICEHEX",
"-intel",
"$SOURCES",
"-intel",
"-o",
"$TARGET",
"-intel",
"--line-length=44"
]), "Building $TARGET"),
suffix=".hex"
)
)
)
# TODO disable if softdevice
if (use_adafruit and not env.get("SOFTDEVICEHEX")):
env.Append(
BUILDERS=dict(
PackageDfu=Builder(
action=env.VerboseAction(" ".join([
'"%s"' % nrfutil_path,
"dfu",
"genpkg",
"--dev-type",
"0x0052",
"--sd-req",
board.get("build.softdevice.sd_fwid"),
"--application",
"$SOURCES",
"$TARGET"
]), "Building $TARGET"),
suffix=".zip"
),
SignBin=Builder(
action=env.VerboseAction(" ".join([
"$PYTHONEXE",
join(FRAMEWORK_DIR or "",
"tools", "pynrfbintool", "pynrfbintool.py"),
"--signature",
"$TARGET",
"$SOURCES"
]), "Signing $SOURCES"),
suffix="_signature.bin"
)
)
)
if not env.get("PIOFRAMEWORK"):
env.SConscript("frameworks/_bare.py")
#
# Target: Build executable and linkable firmware
#
if "zephyr" in env.get("PIOFRAMEWORK", []):
env.SConscript(
join(platform.get_package_dir(
"framework-zephyr"), "scripts", "platformio", "platformio-build-pre.py"),
exports={"env": env}
)
upload_protocol = env.subst("$UPLOAD_PROTOCOL")
target_elf = None
if "nobuild" in COMMAND_LINE_TARGETS:
target_elf = join("$BUILD_DIR", "${PROGNAME}.elf")
target_firm = join("$BUILD_DIR", "${PROGNAME}.hex")
else:
target_elf = env.BuildProgram()
if "SOFTDEVICEHEX" in env:
target_firm = env.MergeHex(
join("$BUILD_DIR", "${PROGNAME}"),
env.ElfToHex(join("$BUILD_DIR", "userfirmware"), target_elf))
print('SoftDevice ' + env.get("SOFTDEVICEHEX") +' will be used!')
elif "nrfutil" == upload_protocol and use_adafruit:
target_firm = env.PackageDfu(
join("$BUILD_DIR", "${PROGNAME}"),
env.ElfToHex(join("$BUILD_DIR", "${PROGNAME}"), target_elf))
elif "nrfjprog" == upload_protocol:
target_firm = env.ElfToHex(
join("$BUILD_DIR", "${PROGNAME}"), target_elf)
elif "sam-ba" == upload_protocol:
target_firm = env.ElfToBin(join("$BUILD_DIR", "${PROGNAME}"), target_elf)
else:
if "DFUBOOTHEX" in env:
target_firm = env.SignBin(
join("$BUILD_DIR", "${PROGNAME}"),
env.ElfToBin(join("$BUILD_DIR", "${PROGNAME}"), target_elf))
else:
target_firm = env.ElfToHex(
join("$BUILD_DIR", "${PROGNAME}"), target_elf)
AlwaysBuild(env.Alias("nobuild", target_firm))
target_buildprog = env.Alias("buildprog", target_firm, target_firm)
if "DFUBOOTHEX" in env:
env.Append(
# Check the linker script for the correct location
BOOT_SETTING_ADDR=board.get("build.bootloader.settings_addr", "0x7F000")
)
AlwaysBuild(env.Alias("dfu", env.PackageDfu(
join("$BUILD_DIR", "${PROGNAME}"),
env.ElfToHex(join("$BUILD_DIR", "${PROGNAME}"), target_elf))))
AlwaysBuild(env.Alias("bootloader", None, [
env.VerboseAction("nrfjprog --program $DFUBOOTHEX -f nrf52 --chiperase", "Uploading $DFUBOOTHEX"),
env.VerboseAction("nrfjprog --erasepage $BOOT_SETTING_ADDR -f nrf52", "Erasing bootloader config"),
env.VerboseAction("nrfjprog --memwr $BOOT_SETTING_ADDR --val 0x00000001 -f nrf52", "Disable CRC check"),
env.VerboseAction("nrfjprog --reset -f nrf52", "Reset nRF52")
]))
if "bootloader" in COMMAND_LINE_TARGETS and "DFUBOOTHEX" not in env:
sys.stderr.write("Error. The board is missing the bootloader binary.\n")
env.Exit(1)
#
# Target: Print binary size
#
target_size = env.Alias(
"size", target_elf,
env.VerboseAction("$SIZEPRINTCMD", "Calculating size $SOURCE"))
AlwaysBuild(target_size)
#
# Target: Upload by default .bin file
#
debug_tools = env.BoardConfig().get("debug.tools", {})
upload_actions = []
if upload_protocol == "mbed":
upload_actions = [
env.VerboseAction(env.AutodetectUploadPort, "Looking for upload disk..."),
env.VerboseAction(env.UploadToDisk, "Uploading $SOURCE")
]
elif upload_protocol.startswith("blackmagic"):
env.Replace(
UPLOADER="$GDB",
UPLOADERFLAGS=[
"-nx",
"--batch",
"-ex", "target extended-remote $UPLOAD_PORT",
"-ex", "monitor %s_scan" %
("jtag" if upload_protocol == "blackmagic-jtag" else "swdp"),
"-ex", "attach 1",
"-ex", "load",
"-ex", "compare-sections",
"-ex", "kill"
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS $BUILD_DIR/${PROGNAME}.elf"
)
upload_actions = [
env.VerboseAction(env.AutodetectUploadPort, "Looking for BlackMagic port..."),
env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")
]
elif upload_protocol == "nrfjprog":
env.Replace(
UPLOADER="nrfjprog",
UPLOADERFLAGS=[
"--sectorerase" if "DFUBOOTHEX" in env else "--chiperase",
"--reset"
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS --program $SOURCE"
)
upload_actions = [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]
elif upload_protocol == "nrfutil":
env.Replace(
UPLOADER=nrfutil_path,
UPLOADERFLAGS=[
"dfu",
"serial",
"-p",
"$UPLOAD_PORT",
"-b",
"$UPLOAD_SPEED",
"--singlebank",
],
UPLOADCMD='"$UPLOADER" $UPLOADERFLAGS -pkg $SOURCE'
)
upload_actions = [
env.VerboseAction(BeforeUpload, "Looking for upload port..."),
env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")
]
elif upload_protocol == "sam-ba":
env.Replace(
UPLOADER="bossac",
UPLOADERFLAGS=[
"--port", '"$UPLOAD_PORT"', "--write", "--erase", "-U", "--reset"
],
UPLOADCMD="$UPLOADER $UPLOADERFLAGS $SOURCES"
)
if int(ARGUMENTS.get("PIOVERBOSE", 0)):
env.Prepend(UPLOADERFLAGS=["--info", "--debug"])
upload_actions = [
env.VerboseAction(BeforeUpload, "Looking for upload port..."),
env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")
]
elif upload_protocol.startswith("jlink"):
def _jlink_cmd_script(env, source):
build_dir = env.subst("$BUILD_DIR")
if not isdir(build_dir):
makedirs(build_dir)
script_path = join(build_dir, "upload.jlink")
commands = [ "h" ]
if "DFUBOOTHEX" in env:
commands.append("loadbin %s,%s" % (str(source).replace("_signature", ""),
env.BoardConfig().get("upload.offset_address", "0x26000")))
commands.append("loadbin %s,%s" % (source, env.get("BOOT_SETTING_ADDR")))
else:
commands.append("loadbin %s,%s" % (source, env.BoardConfig().get(
"upload.offset_address", "0x0")))
commands.append("r")
commands.append("q")
with open(script_path, "w") as fp:
fp.write("\n".join(commands))
return script_path
env.Replace(
__jlink_cmd_script=_jlink_cmd_script,
UPLOADER="JLink.exe" if system() == "Windows" else "JLinkExe",
UPLOADERFLAGS=[
"-device", env.BoardConfig().get("debug", {}).get("jlink_device"),
"-speed", "4000",
"-if", ("jtag" if upload_protocol == "jlink-jtag" else "swd"),
"-autoconnect", "1"
],
UPLOADCMD='$UPLOADER $UPLOADERFLAGS -CommanderScript "${__jlink_cmd_script(__env__, SOURCE)}"'
)
upload_actions = [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]
elif upload_protocol in debug_tools:
openocd_args = [
"-d%d" % (2 if int(ARGUMENTS.get("PIOVERBOSE", 0)) else 1)
]
openocd_args.extend(
debug_tools.get(upload_protocol).get("server").get("arguments", []))
openocd_args.extend([
"-c", "program {$SOURCE} %s verify reset; shutdown;" %
board.get("upload.offset_address", "")
])
openocd_args = [
f.replace("$PACKAGE_DIR",
platform.get_package_dir("tool-openocd") or "")
for f in openocd_args
]
env.Replace(
UPLOADER="openocd",
UPLOADERFLAGS=openocd_args,
UPLOADCMD="$UPLOADER $UPLOADERFLAGS")
upload_actions = [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]
# custom upload tool
elif upload_protocol == "custom":
upload_actions = [env.VerboseAction("$UPLOADCMD", "Uploading $SOURCE")]
else:
sys.stderr.write("Warning! Unknown upload protocol %s\n" % upload_protocol)
AlwaysBuild(env.Alias("upload", target_firm, upload_actions))
#
# Target: Erase Flash
#
AlwaysBuild(
env.Alias("erase", None, env.VerboseAction("$ERASECMD",
"Erasing...")))
#
# Information about obsolete method of specifying linker scripts
#
if any("-Wl,-T" in f for f in env.get("LINKFLAGS", [])):
print("Warning! '-Wl,-T' option for specifying linker scripts is deprecated. "
"Please use 'board_build.ldscript' option in your 'platformio.ini' file.")
#
# Default targets
#
Default([target_buildprog, target_size])
| 32.788095
| 112
| 0.582819
|
68bcedbea2eef7ec83cae89ce6881cd71973e85c
| 51
|
py
|
Python
|
hacker/challenges/crypto/duke_box.py
|
Tenebrar/codebase
|
59c9a35289fb29afedad0e3edd0519b67372ef9f
|
[
"Unlicense"
] | 1
|
2020-04-21T11:39:25.000Z
|
2020-04-21T11:39:25.000Z
|
hacker/challenges/crypto/duke_box.py
|
Tenebrar/codebase
|
59c9a35289fb29afedad0e3edd0519b67372ef9f
|
[
"Unlicense"
] | 7
|
2020-02-12T01:08:01.000Z
|
2022-02-10T11:56:56.000Z
|
hacker/challenges/crypto/duke_box.py
|
Tenebrar/codebase
|
59c9a35289fb29afedad0e3edd0519b67372ef9f
|
[
"Unlicense"
] | null | null | null |
# See text file for explanation
print('gnomeking')
| 17
| 31
| 0.764706
|
cc01759e4371fa0cdfb1acb1192a75b13e394daf
| 2,777
|
py
|
Python
|
gallery_django/accounts/serializers.py
|
Reincarnationist/Memory_gallery
|
5a916dcc6a7ae734413083bf9c39ee507d3856e9
|
[
"MIT",
"Unlicense"
] | 1
|
2022-02-05T15:31:06.000Z
|
2022-02-05T15:31:06.000Z
|
gallery_django/accounts/serializers.py
|
Reincarnationist/Memory_gallery
|
5a916dcc6a7ae734413083bf9c39ee507d3856e9
|
[
"MIT",
"Unlicense"
] | null | null | null |
gallery_django/accounts/serializers.py
|
Reincarnationist/Memory_gallery
|
5a916dcc6a7ae734413083bf9c39ee507d3856e9
|
[
"MIT",
"Unlicense"
] | null | null | null |
from rest_framework import serializers
from django.contrib.auth import get_user_model, authenticate, password_validation
from django.core import exceptions
from django.contrib.auth.models import User
#User Serializer
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ('id', 'username')
#Register Serializer
class RegisterSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ('id', 'username', 'password')
extra_kwargs = {
'password': {'write_only': True}
}
def validate(self, data):
#Can not use the create_user method here because it will save the user right away
#even the validation fails
user = User(**data)
password = data['password']
errors = {}
try:
# validate the password and catch the exception
password_validation.validate_password(password=password, user=user)
# the exception raised here is different than serializers.ValidationError
except exceptions.ValidationError as e:
errors['password'] = e.messages[0]
if errors:
raise serializers.ValidationError(errors)
return super(RegisterSerializer, self).validate(data)
def create(self, validated_data):
user = get_user_model().objects.create_user(
username=validated_data['username'],
password=validated_data['password'],
)
return user
#Login Serializer
class LoginSerializer(serializers.Serializer):
username = serializers.CharField(max_length=15)
password = serializers.CharField(max_length=15)
def validate(self, data):
user = authenticate(**data)
if user and user.is_active:
return user
raise serializers.ValidationError('Incorrect Credentials')
#Change Password Serializer
class ChangePasswordSerializer(serializers.Serializer):
old_password = serializers.CharField()
new_password1 = serializers.CharField(max_length=15)
new_password2 = serializers.CharField(max_length=15)
def validate(self, data):
user = self.context['request'].user
password = data['new_password1']
errors = {}
try:
# validate the password and catch the exception
password_validation.validate_password(password=password, user=user)
# the exception raised here is different than serializers.ValidationError
except exceptions.ValidationError as e:
errors['password'] = e.messages[0]
if errors:
raise serializers.ValidationError(errors)
return super(ChangePasswordSerializer, self).validate(data)
| 33.059524
| 89
| 0.673389
|
bed7a2c99e9fdc4049a8c0136c5fba5f21cdf428
| 698
|
py
|
Python
|
src/hebi/settings.py
|
Xifax/suzu-web
|
ebe6b87093f73bf8a100d7b78b1d4a83cf203315
|
[
"BSD-2-Clause"
] | null | null | null |
src/hebi/settings.py
|
Xifax/suzu-web
|
ebe6b87093f73bf8a100d7b78b1d4a83cf203315
|
[
"BSD-2-Clause"
] | null | null | null |
src/hebi/settings.py
|
Xifax/suzu-web
|
ebe6b87093f73bf8a100d7b78b1d4a83cf203315
|
[
"BSD-2-Clause"
] | null | null | null |
# Scrapy settings for hebi project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/topics/settings.html
#
BOT_NAME = 'hebi'
SPIDER_MODULES = ['hebi.spiders']
NEWSPIDER_MODULE = 'hebi.spiders'
# Filter duplicates and save to mongo
ITEM_PIPELINES = [
'hebi.pipelines.DuplicatesPipeline',
'hebi.pipelines.MongoPipeline',
]
EXTENSIONS = {
'scrapy.contrib.closespider.CloseSpider' : 500,
}
# In seconds (5 minutes)
#CLOSESPIDER_TIMEOUT = 300
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'hebi (+http://www.yourdomain.com)'
| 24.068966
| 80
| 0.737822
|
7ba763228abe7d74c5fb7cbfd26db301b15c5abc
| 1,628
|
py
|
Python
|
other/sensor_magnet/calib/plot_magnet_smooth_interpol.py
|
Gabs48/tigrillo2
|
66ad26c0aff39da74ca76f712b6f01b40d383f34
|
[
"MIT"
] | 1
|
2018-11-05T14:21:43.000Z
|
2018-11-05T14:21:43.000Z
|
other/sensor_magnet/calib/plot_magnet_smooth_interpol.py
|
Gabs48/tigrillo2
|
66ad26c0aff39da74ca76f712b6f01b40d383f34
|
[
"MIT"
] | null | null | null |
other/sensor_magnet/calib/plot_magnet_smooth_interpol.py
|
Gabs48/tigrillo2
|
66ad26c0aff39da74ca76f712b6f01b40d383f34
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy.interpolate import interp1d
import pickle
import matplotlib
matplotlib.use("Agg")
from matplotlib.mlab import *
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rc('axes', facecolor='white')
plt.rc('savefig', facecolor='white')
plt.rc('figure', autolayout=True)
def get_style_colors():
if 'axes.prop_cycle' in plt.rcParams:
cols = plt.rcParams['axes.color_cycle']
else:
cols = ['b', 'r', 'y', 'g', 'k']
return cols
angles = [40, 45, 50, 60, 70, 80, 90, 100, 110, 120, 180]
fl = [0, 870, 1540, 2435, 2807, 2923, 3002, 3040, 3065, 3077, 3200]
x = np.array(fl)
y = np.array(angles)
plt.plot(x, y, "*", linewidth=2, color=get_style_colors()[0], label="Samples")
f = interp1d(x, y)
f2 = interp1d(x, y, kind='cubic')
f3 = interp1d(x, y, kind='quadratic')
f_dict = {}
for key in f2.__dict__.keys():
if key != '_function' and key!= 'norm':
f_dict[key] = f2.__getattribute__(key)
print f_dict
print pickle.dumps(f_dict)
x2 = np.linspace(0, 3200)
#plt.plot(x2, f(x2), "--", linewidth=1, color=get_style_colors()[1], label="Linear Interpolation")
plt.plot(x2, f2(x2), "--", linewidth=1, color=get_style_colors()[2], label="Cubic Interpolation")
#plt.plot(x2, f3(x2), "--", linewidth=1, color=get_style_colors()[3], label="Quadratic Interpolation")
plt.title("Interpolated Transfer Function")
plt.xlabel("Sensor value")
plt.ylabel('Knee angle')
plt.ylim([30, 140])
plt.legend(loc="upper left", fontsize="x-small")
plt.savefig("magnet_interpol.png", format='png', dpi=300)
plt.close()
| 28.068966
| 102
| 0.678133
|
48b4ba7a2aab16fe16f78f1d9b0dca86c188a0b7
| 150
|
py
|
Python
|
Waveforms/results/I_ij.py
|
keefemitman/PostNewtonian
|
853d6577cb0002da5eebe1cb55f0c28fbc114324
|
[
"MIT"
] | 18
|
2015-03-26T01:04:36.000Z
|
2022-02-01T19:26:21.000Z
|
Waveforms/results/I_ij.py
|
keefemitman/PostNewtonian
|
853d6577cb0002da5eebe1cb55f0c28fbc114324
|
[
"MIT"
] | 4
|
2015-01-08T23:46:29.000Z
|
2017-09-20T19:13:51.000Z
|
Waveforms/results/I_ij.py
|
keefemitman/PostNewtonian
|
853d6577cb0002da5eebe1cb55f0c28fbc114324
|
[
"MIT"
] | 3
|
2016-05-13T02:36:14.000Z
|
2021-11-23T21:36:32.000Z
|
( ( ( A*mu*r(t)**2 )*nHat*nHat )
+( ( B*mu*r(t)**2*v(t)**2/c**2 )*lambdaHat*lambdaHat )
+( ( 48*C*G**2*m**2*mu*nu*v(t)/(7*c**5) )*lambdaHat*nHat ) )
| 50
| 61
| 0.48
|
cb2a082e3b4dbeab938640739b2dac8d08de6bd7
| 3,944
|
py
|
Python
|
bayespy/demos/pattern_search.py
|
dungvtdev/upsbayescpm
|
f6ee877c689046d3c57a2ac06742cfe4a0b6550e
|
[
"MIT"
] | 622
|
2015-01-15T19:46:06.000Z
|
2022-03-22T13:40:22.000Z
|
bayespy/demos/pattern_search.py
|
dungvtdev/upsbayescpm
|
f6ee877c689046d3c57a2ac06742cfe4a0b6550e
|
[
"MIT"
] | 118
|
2015-01-04T06:38:23.000Z
|
2021-11-05T17:28:02.000Z
|
bayespy/demos/pattern_search.py
|
dungvtdev/upsbayescpm
|
f6ee877c689046d3c57a2ac06742cfe4a0b6550e
|
[
"MIT"
] | 160
|
2015-02-16T15:30:43.000Z
|
2022-03-14T00:52:36.000Z
|
################################################################################
# Copyright (C) 2015 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Demonstration of the pattern search method for PCA.
The pattern searches are compared to standard VB-EM algorithm in CPU
time. For more info on the pattern search method, see
:cite:`Honkela:2002`.
"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
import bayespy.plot as myplt
from bayespy.utils import misc
from bayespy.utils import random
from bayespy import nodes
from bayespy.inference.vmp.vmp import VB
from bayespy.inference.vmp import transformations
import bayespy.plot as bpplt
from bayespy.demos import pca
def run(M=40, N=100, D_y=6, D=8, seed=42, rotate=False, maxiter=1000, debug=False, plot=True):
"""
Run pattern search demo for PCA.
"""
if seed is not None:
np.random.seed(seed)
# Generate data
w = np.random.normal(0, 1, size=(M,1,D_y))
x = np.random.normal(0, 1, size=(1,N,D_y))
f = misc.sum_product(w, x, axes_to_sum=[-1])
y = f + np.random.normal(0, 0.2, size=(M,N))
# Construct model
Q = pca.model(M, N, D)
# Data with missing values
mask = random.mask(M, N, p=0.5) # randomly missing
y[~mask] = np.nan
Q['Y'].observe(y, mask=mask)
# Initialize some nodes randomly
Q['X'].initialize_from_random()
Q['W'].initialize_from_random()
# Use a few VB-EM updates at the beginning
Q.update(repeat=10)
Q.save()
# Standard VB-EM as a baseline
Q.update(repeat=maxiter)
if plot:
bpplt.pyplot.plot(np.cumsum(Q.cputime), Q.L, 'k-')
# Restore initial state
Q.load()
# Pattern search method for comparison
for n in range(maxiter):
Q.pattern_search('W', 'tau', maxiter=3, collapsed=['X', 'alpha'])
Q.update(repeat=20)
if Q.has_converged():
break
if plot:
bpplt.pyplot.plot(np.cumsum(Q.cputime), Q.L, 'r:')
bpplt.pyplot.xlabel('CPU time (in seconds)')
bpplt.pyplot.ylabel('VB lower bound')
bpplt.pyplot.legend(['VB-EM', 'Pattern search'], loc='lower right')
if __name__ == '__main__':
import sys, getopt, os
try:
opts, args = getopt.getopt(sys.argv[1:],
"",
["m=",
"n=",
"d=",
"k=",
"seed=",
"maxiter=",
"debug"])
except getopt.GetoptError:
print('python demo_pca.py <options>')
print('--m=<INT> Dimensionality of data vectors')
print('--n=<INT> Number of data vectors')
print('--d=<INT> Dimensionality of the latent vectors in the model')
print('--k=<INT> Dimensionality of the true latent vectors')
print('--maxiter=<INT> Maximum number of VB iterations')
print('--seed=<INT> Seed (integer) for the random number generator')
print('--debug Check that the rotations are implemented correctly')
sys.exit(2)
kwargs = {}
for opt, arg in opts:
if opt == "--rotate":
kwargs["rotate"] = True
elif opt == "--maxiter":
kwargs["maxiter"] = int(arg)
elif opt == "--debug":
kwargs["debug"] = True
elif opt == "--seed":
kwargs["seed"] = int(arg)
elif opt in ("--m",):
kwargs["M"] = int(arg)
elif opt in ("--n",):
kwargs["N"] = int(arg)
elif opt in ("--d",):
kwargs["D"] = int(arg)
elif opt in ("--k",):
kwargs["D_y"] = int(arg)
run(**kwargs)
plt.show()
| 29.214815
| 94
| 0.521805
|
60a92d54c183b789113eb7bc3dae03e478be3d0f
| 1,988
|
py
|
Python
|
sahara_plugin_cdh/plugins/cdh/v5_11_0/resources/cdh_config.py
|
openstack/sahara-plugin-cdh
|
063c3ee8de7e56831ef6b3bc8807706bd5fd7cea
|
[
"Apache-2.0"
] | 2
|
2019-01-28T22:10:54.000Z
|
2019-02-20T08:35:58.000Z
|
sahara_plugin_cdh/plugins/cdh/v5_11_0/resources/cdh_config.py
|
openstack/sahara-plugin-cdh
|
063c3ee8de7e56831ef6b3bc8807706bd5fd7cea
|
[
"Apache-2.0"
] | null | null | null |
sahara_plugin_cdh/plugins/cdh/v5_11_0/resources/cdh_config.py
|
openstack/sahara-plugin-cdh
|
063c3ee8de7e56831ef6b3bc8807706bd5fd7cea
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017 Massachusetts Open Cloud
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cm_api.api_client import ApiResource
cm_host = "localhost"
api = ApiResource(cm_host, username="admin", password="admin") # nosec
c = api.get_all_clusters()[0]
services = c.get_all_services()
def process_service(service):
service_name = service.name
if service_name == "spark_on_yarn":
service_name = "spark"
for role_cfgs in service.get_all_role_config_groups():
role_cm_cfg = role_cfgs.get_config(view='full')
role_cfg = parse_config(role_cm_cfg)
role_name = role_cfgs.roleType.lower()
write_cfg(role_cfg, '%s-%s.json' % (service_name, role_name))
service_cm_cfg = service.get_config(view='full')[0]
service_cfg = parse_config(service_cm_cfg)
write_cfg(service_cfg, '%s-service.json' % service_name)
def parse_config(config):
cfg = []
for name, value in config.items():
p = {
'name': value.name,
'value': value.default,
'display_name': value.displayName,
'desc': value.description
}
cfg.append(p)
return cfg
def write_cfg(cfg, file_name):
to_write = __import__('json').dumps(cfg, sort_keys=True, indent=4,
separators=(',', ': '))
with open(file_name, 'w') as f:
f.write(to_write)
if __name__ == '__main__':
for service in services:
process_service(service)
| 30.584615
| 71
| 0.670523
|
31aaaf41cb942aeba5a0b07fcc5af2bf0a65a80a
| 217
|
py
|
Python
|
exemplos/ex008.py
|
figueiredo-alef/estud-python
|
f22351ecb966ec84433bb6078d92d4f31d5a0a7e
|
[
"MIT"
] | null | null | null |
exemplos/ex008.py
|
figueiredo-alef/estud-python
|
f22351ecb966ec84433bb6078d92d4f31d5a0a7e
|
[
"MIT"
] | null | null | null |
exemplos/ex008.py
|
figueiredo-alef/estud-python
|
f22351ecb966ec84433bb6078d92d4f31d5a0a7e
|
[
"MIT"
] | null | null | null |
print('=' * 5, 'EX_008', '=' * 5)
# conversor de medidas
n1 = float(input('Digite a distância (em metros): '))
cm = n1 * 100
mm = n1 * 1000
print('{}m em centímetros é {}cm e em milímetros é {}mm'.format(n1, cm, mm))
| 31
| 76
| 0.608295
|
5434b744bcaf8fd83b2750a3af875fd7a382f1f5
| 1,925
|
py
|
Python
|
src/server/main.py
|
lee14257/delphi-epidata
|
b007d778321e68be5526ca9ce1113b13d24d6fe8
|
[
"MIT"
] | null | null | null |
src/server/main.py
|
lee14257/delphi-epidata
|
b007d778321e68be5526ca9ce1113b13d24d6fe8
|
[
"MIT"
] | null | null | null |
src/server/main.py
|
lee14257/delphi-epidata
|
b007d778321e68be5526ca9ce1113b13d24d6fe8
|
[
"MIT"
] | null | null | null |
import pathlib
import logging
from typing import Dict, Callable
from flask import request, send_file, Response, send_from_directory, jsonify
from ._config import URL_PREFIX, VERSION
from ._common import app, set_compatibility_mode
from ._exceptions import MissingOrWrongSourceException
from .endpoints import endpoints
__all__ = ["app"]
endpoint_map: Dict[str, Callable[[], Response]] = {}
for endpoint in endpoints:
endpoint_map[endpoint.bp.name] = endpoint.handle
app.register_blueprint(endpoint.bp, url_prefix=f"{URL_PREFIX}/{endpoint.bp.name}")
alias = getattr(endpoint, "alias", None)
if alias:
endpoint_map[alias] = endpoint.handle
@app.route(f"{URL_PREFIX}/api.php", methods=["GET", "POST"])
def handle_generic():
# mark as compatibility mode
set_compatibility_mode()
endpoint = request.values.get("endpoint", request.values.get("source"))
if not endpoint or endpoint not in endpoint_map:
raise MissingOrWrongSourceException(endpoint_map.keys())
return endpoint_map[endpoint]()
@app.route(f"{URL_PREFIX}")
@app.route(f"{URL_PREFIX}/")
@app.route(f"{URL_PREFIX}/index.html")
def send_index_file():
return send_file(pathlib.Path(__file__).parent / "index.html")
@app.route(f"{URL_PREFIX}/version")
def send_version():
return jsonify(dict(version=VERSION))
@app.route(f"{URL_PREFIX}/lib/<path:path>")
def send_lib_file(path: str):
return send_from_directory(pathlib.Path(__file__).parent / "lib", path)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000)
else:
# propagate gunicorn logging settings
gunicorn_logger = logging.getLogger("gunicorn.error")
app.logger.handlers = gunicorn_logger.handlers
app.logger.setLevel(gunicorn_logger.level)
sqlalchemy_logger = logging.getLogger("sqlalchemy")
sqlalchemy_logger.handlers = gunicorn_logger.handlers
sqlalchemy_logger.setLevel(gunicorn_logger.level)
| 31.048387
| 86
| 0.743896
|
c3adfa90a2564f83a94546c1994c657a2718aedf
| 7,494
|
py
|
Python
|
p2_continuous-control/agent_reacher.py
|
parksoy/Soyoung_Udacity_ND_DeepReinforcementLearning
|
28c4cd3ce76094a0e682736fcdceeec3f21ba754
|
[
"MIT"
] | null | null | null |
p2_continuous-control/agent_reacher.py
|
parksoy/Soyoung_Udacity_ND_DeepReinforcementLearning
|
28c4cd3ce76094a0e682736fcdceeec3f21ba754
|
[
"MIT"
] | null | null | null |
p2_continuous-control/agent_reacher.py
|
parksoy/Soyoung_Udacity_ND_DeepReinforcementLearning
|
28c4cd3ce76094a0e682736fcdceeec3f21ba754
|
[
"MIT"
] | null | null | null |
import numpy as np
import random
import copy
from collections import namedtuple, deque
from nnmodels_reacher import Actor, Critic
from replay_buffer import ReplayBuffer
import torch
import torch.nn.functional as F
import torch.optim as optim
import os
from config_settings import Args
# %load_ext autoreload
# %autoreload 2
args=Args()
BUFFER_SIZE = args.buffer_size # replay buffer size
BATCH_SIZE = args.batch_size # minibatch size
GAMMA = args.gamma # discount factor
TAU = args.tau # for soft update of target parameters
LR_ACTOR = args.actor_learn_rate #1e-4 # learning rate of the actor
LR_CRITIC = args.critic_learn_rate #1e-3 # learning rate of the critic
WEIGHT_DECAY = 0 # L2 weight decay
UPDATE_EVERY = args.update_every # # timesteps between updates
num_updates = args.num_updates # update num of update
noise_factor = args.noise_factor # noise decay process
noise_factor_decay = args.noise_factor_decay # noise decay
noise_sigma=args.noise_sigma
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, random_seed):
"""Initialize an Agent object.
state_size (int): dimension of each state
action_size (int): dimension of each action
random_seed (int): random seed """
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(random_seed)
self.noise_factor = noise_factor
# Actor Network (w/ Target Network)
self.actor_local = Actor(state_size, action_size, random_seed).to(device)
self.actor_target = Actor(state_size, action_size, random_seed).to(device) #33,4
self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)
# Critic Network (w/ Target Network)
self.critic_local = Critic(state_size, action_size, random_seed).to(device)
self.critic_target = Critic(state_size, action_size, random_seed).to(device) #33,4
self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY)
self.noise = OUNoise(action_size, random_seed) # Noise process
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, random_seed) # Replay memory
self.hard_copy(self.actor_target, self.actor_local)
self.hard_copy(self.critic_target, self.critic_local)
def step(self, states, actions, rewards, next_states, dones, t): #, done
"""Save experience in replay memory, and use random sample from buffer to learn."""
# Save experience / reward
for state, action, reward, next_state, done in zip(states, actions, rewards, next_states, dones):
self.memory.add(state, action, reward, next_state, done)
# Learn, if enough samples are available in memory
if len(self.memory) > BATCH_SIZE and t % UPDATE_EVERY == 0:
experiences = self.memory.sample()
for _ in range(num_updates):
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, add_noise=True):
"""Returns actions for given state as per current policy."""
state = torch.from_numpy(state).float().to(device)
self.actor_local.eval()
with torch.no_grad():
actions = self.actor_local(state).cpu().data.numpy()
self.actor_local.train()
if add_noise:
actions += self.noise_factor * self.noise.sample() #decay noise
#actions += self.noise.sample()
return np.clip(actions, -1, 1)
def reset(self):
self.noise.reset()
def learn(self, experiences, gamma):
"""Update policy and value parameters using given batch of experience tuples.
Q_targets = r + γ * critic_target(next_state, actor_target(next_state))
actor_target(state) -> action
critic_target(state, action) -> Q-value
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor """
states, actions, rewards, next_states, dones = experiences
# ---------------------------- update critic ---------------------------- #
next_states=next_states.to('cuda')
actions_next = self.actor_target(next_states) # Get predicted next-state actions and Q values from target models
actions_next=actions_next.to('cuda')
Q_targets_next = self.critic_target(next_states, actions_next)
rewards=rewards.to('cuda')
dones=dones.to('cuda')
states=states.to('cuda')
actions=actions.to('cuda')
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones)) # Compute Q targets for current states (y_i)
Q_expected = self.critic_local(states, actions) # Compute critic loss
critic_loss = F.mse_loss(Q_expected, Q_targets)
self.critic_optimizer.zero_grad() # Minimize the loss
critic_loss.backward()
torch.nn.utils.clip_grad_norm_(self.critic_local.parameters(), 1) #grad clipping
self.critic_optimizer.step()
# ---------------------------- update actor ---------------------------- #
actions_pred = self.actor_local(states)
actor_loss = -self.critic_local(states, actions_pred).mean() # Compute actor loss
self.actor_optimizer.zero_grad() # Minimize the loss
actor_loss.backward()
self.actor_optimizer.step()
# ----------------------- update target networks ----------------------- #
self.soft_update(self.critic_local, self.critic_target, TAU)
self.soft_update(self.actor_local, self.actor_target, TAU)
# ---------------------------- decrease noise ---------- ------------- #
self.noise_factor -= noise_factor_decay
self.noise.reset()
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
local_model: PyTorch model (weights will be copied from)
target_model: PyTorch model (weights will be copied to)
tau (float): interpolation parameter"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
def hard_copy(self, target, source):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
class OUNoise:
"""Ornstein-Uhlenbeck process."""
def __init__(self, size, seed, mu=0., theta=0.15, sigma=noise_sigma):
"""Initialize parameters and noise process."""
self.mu = mu * np.ones(size)
self.theta = theta
self.sigma = sigma
self.seed = random.seed(seed)
self.reset()
def reset(self):
"""Reset the internal state (= noise) to mean (mu)."""
self.state = copy.copy(self.mu)
def sample(self):
"""Update internal state and return it as a noise sample."""
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])
self.state = x + dx
return self.state
| 44.607143
| 120
| 0.640112
|
e8c283acc3b8fe8a32fe130368afde6fccb40d4e
| 10,582
|
py
|
Python
|
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_split.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_split.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_split.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 1
|
2021-12-09T08:59:17.000Z
|
2021-12-09T08:59:17.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons
from program_config import TensorConfig, ProgramConfig
import unittest
import numpy as np
import paddle.inference as paddle_infer
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
class TrtConvertSplitTest(TrtLayerAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
inputs = program_config.inputs
weights = program_config.weights
outputs = program_config.outputs
attrs = [
program_config.ops[i].attrs for i in range(len(program_config.ops))
]
# the dimensions of input and axis match
if len(inputs['split_input'].shape) <= attrs[0]['axis']:
return False
#Sections and num cannot both be equal to 0.
if len(attrs[0]['sections']) == 0:
if attrs[0]['num'] == 0:
return False
#When sections and num are not both equal to 0, sections has higher priority.
#The sum of sections should be equal to the input size.
if len(attrs[0]['sections']) != 0:
if attrs[0]['num'] != 0:
return False
if len(outputs) != len(attrs[0]['sections']):
return False
sum = 0
for num in attrs[0]['sections']:
sum += num
if sum != inputs['split_input'].shape[attrs[0]['axis']]:
return False
#The size of num should be equal to the input dimension.
if attrs[0]['num'] != 0:
if len(outputs) != attrs[0]['num']:
return False
#Test AxisTensor and SectionsTensorList
if self.num_input == 0:
if self.dims == 2 and attrs[0]['sections'] == [
10, 14
] and len(outputs) == 2:
return True
else:
return False
return True
def sample_program_configs(self):
def generate_input1(attrs: List[Dict[str, Any]], batch):
if self.dims == 4:
return np.random.random([batch, 3, 3, 24]).astype(np.float32)
elif self.dims == 3:
return np.random.random([batch, 3, 24]).astype(np.float32)
elif self.dims == 2:
return np.random.random([batch, 24]).astype(np.float32)
elif self.dims == 1:
return np.random.random([24]).astype(np.float32)
def generate_AxisTensor(attrs: List[Dict[str, Any]]):
return np.ones([1]).astype(np.int32)
def generate_SectionsTensorList1(attrs: List[Dict[str, Any]]):
return np.array([10]).astype(np.int32)
def generate_SectionsTensorList2(attrs: List[Dict[str, Any]]):
return np.array([14]).astype(np.int32)
for num_input in [0, 1]:
for dims in [1, 2, 3, 4]:
for batch in [3, 6, 9]:
for Out in [["output_var0", "output_var1"],
["output_var0", "output_var1", "output_var2"]]:
for sections in [[], [1, 2], [2, 1], [10, 14],
[1, 1, 1], [2, 2, 2], [3, 3, 3],
[3, 7, 14]]:
for num in [0, 3]:
for axis in [0, 1, 2, 3]:
self.batch = batch
self.num_input = num_input
self.dims = dims
dics = [{
"sections": sections,
"num": num,
"axis": axis
}, {}]
dics_intput = [{
"X": ["split_input"],
"AxisTensor": ["AxisTensor"],
"SectionsTensorList": [
"SectionsTensorList1",
"SectionsTensorList2"
]
}, {
"X": ["split_input"]
}]
dics_intputs = [{
"AxisTensor":
TensorConfig(data_gen=partial(
generate_AxisTensor, dics)),
"SectionsTensorList1":
TensorConfig(data_gen=partial(
generate_SectionsTensorList1,
dics)),
"SectionsTensorList2":
TensorConfig(data_gen=partial(
generate_SectionsTensorList2, dics))
}, {}]
ops_config = [{
"op_type":
"split",
"op_inputs":
dics_intput[num_input],
"op_outputs": {
"Out": Out
},
"op_attrs":
dics[0]
}]
ops = self.generate_op_config(ops_config)
program_config = ProgramConfig(
ops=ops,
weights=dics_intputs[num_input],
inputs={
"split_input":
TensorConfig(data_gen=partial(
generate_input1, dics, batch))
},
outputs=Out)
yield program_config
def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs):
if self.dims == 4:
self.dynamic_shape.min_input_shape = {
"split_input": [1, 3 - 1, 3 - 1, 24 - 1]
}
self.dynamic_shape.max_input_shape = {
"split_input": [9, 3 + 1, 3 + 1, 24 + 1]
}
self.dynamic_shape.opt_input_shape = {
"split_input": [1, 3, 3, 24]
}
elif self.dims == 3:
self.dynamic_shape.min_input_shape = {
"split_input": [1, 3 - 1, 24 - 1]
}
self.dynamic_shape.max_input_shape = {
"split_input": [9, 3 + 1, 24 + 1]
}
self.dynamic_shape.opt_input_shape = {"split_input": [1, 3, 24]}
elif self.dims == 2:
self.dynamic_shape.min_input_shape = {
"split_input": [1, 24 - 1]
}
self.dynamic_shape.max_input_shape = {
"split_input": [9, 24 + 1]
}
self.dynamic_shape.opt_input_shape = {"split_input": [1, 24]}
elif self.dims == 1:
self.dynamic_shape.min_input_shape = {"split_input": [24 - 1]}
self.dynamic_shape.max_input_shape = {"split_input": [24 + 1]}
self.dynamic_shape.opt_input_shape = {"split_input": [24]}
def clear_dynamic_shape():
self.dynamic_shape.min_input_shape = {}
self.dynamic_shape.max_input_shape = {}
self.dynamic_shape.opt_input_shape = {}
def generate_trt_nodes_num(attrs, dynamic_shape):
if len(program_config.outputs) == 2:
if attrs[0]['axis'] != 0:
return 1, 3
else:
return 0, 4
else:
if attrs[0]['axis'] != 0:
return 1, 4
else:
return 0, 5
attrs = [
program_config.ops[i].attrs for i in range(len(program_config.ops))
]
self.trt_param.max_batch_size = 9
# for static_shape
clear_dynamic_shape()
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False), 1e-5
# for dynamic_shape
generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True), 1e-5
def add_skip_trt_case(self):
def teller1(program_config, predictor_config):
if len(program_config.weights) == 3:
return True
return False
self.add_skip_case(
teller1, SkipReasons.TRT_NOT_SUPPORT,
"INPUT AxisTensor AND SectionsTensorList NOT SUPPORT.")
def test(self):
self.add_skip_trt_case()
self.run_test()
if __name__ == "__main__":
unittest.main()
| 42.159363
| 85
| 0.460404
|
e4bd32f256917211ba20f6cf259c819461f5fe50
| 2,788
|
py
|
Python
|
conftest.py
|
sab0tag/python_training_mantis
|
161c1994e8c27ff8ede510da0054abdce978b9cd
|
[
"Apache-2.0"
] | null | null | null |
conftest.py
|
sab0tag/python_training_mantis
|
161c1994e8c27ff8ede510da0054abdce978b9cd
|
[
"Apache-2.0"
] | null | null | null |
conftest.py
|
sab0tag/python_training_mantis
|
161c1994e8c27ff8ede510da0054abdce978b9cd
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import json
import os.path
from fixture.application import Application
import ftputil
fixture = None
target = None
def loadconfig(file):
global target
if target is None:
config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), file)
with open(config_file) as f:
target = json.load(f)
return target
# fixture init
@pytest.fixture
def app(request):
global fixture # define global variable inside of the method
browser = request.config.getoption("--browser")
webconfig = loadconfig(request.config.getoption("--target"))['web']
if fixture is None or not fixture.is_valid():
fixture = Application(browser=browser, base_url=webconfig['baseUrl']) # constructor application
return fixture
@pytest.fixture(scope="session", autouse=True)
def stop(request):
def fin():
fixture.destroy()
# destroy fixture
request.addfinalizer(fin)
return fixture
@pytest.fixture
def check_ui(request):
return request.config.getoption("--check_ui")
# add additional parameters inside of function; called once at the beginning ot the test run
def pytest_addoption(parser):
parser.addoption("--browser", action="store", default="firefox")
parser.addoption("--target", action="store", default="target.json")
parser.addoption("--check_ui", action="store_true") # where action will be automatically true if flag is present;
@pytest.fixture(scope="session")
def config(request):
return loadconfig(request.config.getoption("--target"))
@pytest.fixture(scope="session", autouse=True)
def configure_server(request, config):
install_server_configuration(config['ftp']['host'],
config['ftp']['username'],
config['ftp']['password'])
def fin():
restore_server_configuration(config['ftp']['host'],
config['ftp']['username'],
config['ftp']['password'])
request.addfinalizer(fin)
def install_server_configuration(host, username, password):
with ftputil.FTPHost(host, username, password) as remote:
if remote.path.isfile("config_inc.php.bak"):
remote.remove("config_inc.php.bak")
if remote.path.isfile("config_inc.php"):
remote.rename("config_inc.php", "config_inc.php.bak")
remote.upload(os.path.join(os.path.dirname(__file__), "resources/config_inc.php", "config_inc.php"))
def restore_server_configuration(host, username, password):
with ftputil.FTPHost(host, username, password) as remote:
if remote.path.isfile("config_inc.php.bak"):
remote.remove("config_inc_php")
remote.rename("config_inc.php.bak", "config_inc.php")
| 32.418605
| 118
| 0.668938
|
9e362d810a17a7ba7ad101e7816ee5145c392aac
| 709
|
py
|
Python
|
app/controllers/overlay_controller.py
|
namuan/task-rider
|
4474bb1acde98a489f52834ad3b9705f3f6e7034
|
[
"MIT"
] | null | null | null |
app/controllers/overlay_controller.py
|
namuan/task-rider
|
4474bb1acde98a489f52834ad3b9705f3f6e7034
|
[
"MIT"
] | null | null | null |
app/controllers/overlay_controller.py
|
namuan/task-rider
|
4474bb1acde98a489f52834ad3b9705f3f6e7034
|
[
"MIT"
] | null | null | null |
from app.widgets.overlay_widget import Overlay
class OverlayController:
def __init__(self, parent, app):
self.parent = parent
self.app = app
self.overlay = Overlay(self.parent.lst_tasks)
self.overlay.hide()
# app events
self.app.data.app_events.timer_started.connect(self.display_overlay)
self.app.data.app_events.timer_paused.connect(self.hide_overlay)
def resize(self, event_size):
self.overlay.resize(event_size)
def display_overlay(self):
top_task = self.app.data.get_top_task()
self.overlay.setTitle(top_task.task_title)
self.overlay.show()
def hide_overlay(self):
self.overlay.hide()
| 27.269231
| 76
| 0.675599
|
49e8b29bc57067aaf84fe05308cdf2921994fa52
| 683
|
py
|
Python
|
mymoney/accounts/models.py
|
ychab/mymoney-server
|
40dc9fdd08b3561287a9153342b25c58de8ad8ce
|
[
"BSD-3-Clause"
] | 6
|
2015-12-11T13:36:27.000Z
|
2018-10-17T03:08:15.000Z
|
mymoney/accounts/models.py
|
ychab/mymoney-server
|
40dc9fdd08b3561287a9153342b25c58de8ad8ce
|
[
"BSD-3-Clause"
] | 2
|
2016-06-12T12:42:47.000Z
|
2017-12-12T14:05:14.000Z
|
mymoney/accounts/models.py
|
ychab/mymoney-server
|
40dc9fdd08b3561287a9153342b25c58de8ad8ce
|
[
"BSD-3-Clause"
] | 1
|
2022-02-21T21:20:51.000Z
|
2022-02-21T21:20:51.000Z
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from mymoney.core.utils.currencies import get_currencies
class Account(models.Model):
"""
For the moment, a bank account is a singleton.
"""
label = models.CharField(max_length=255, verbose_name=_('Label'))
balance = models.DecimalField(
max_digits=10,
decimal_places=2,
default=0,
verbose_name=_('Balance'),
)
currency = models.CharField(
max_length=3,
choices=get_currencies(),
verbose_name=_('Currency'),
)
class Meta:
db_table = 'accounts'
def __str__(self):
return self.label
| 23.551724
| 69
| 0.647145
|
19e4efc505283a5275f9c45527dc2163a25e0817
| 35,508
|
py
|
Python
|
src/q-learning-pong-ai-v3.5.py
|
KumarUniverse/pong-ai
|
719d85fc62a206ecc4e4453e7222912274299605
|
[
"MIT"
] | null | null | null |
src/q-learning-pong-ai-v3.5.py
|
KumarUniverse/pong-ai
|
719d85fc62a206ecc4e4453e7222912274299605
|
[
"MIT"
] | null | null | null |
src/q-learning-pong-ai-v3.5.py
|
KumarUniverse/pong-ai
|
719d85fc62a206ecc4e4453e7222912274299605
|
[
"MIT"
] | null | null | null |
# A Q learning Pong AI agent built using Python.
import pyglet
from pyglet import shapes
from pyglet import text
import math
import random
import time
#import multiprocessing
import matplotlib.pyplot as plt
import os
import pickle
# Global variables
DIRECTION = {"IDLE": 0, "UP": 1, "DOWN": 2, "LEFT": 3, "RIGHT": 4}
# winning_score = 11
class Ball():
"""The ball object (The cube that bounces back and forth)."""
def __init__(self, pong_game):
self.width = 18 #1000 // 10 # for debugging, need to change back to 18.
self.height = 18 #1000 // 10 # for debugging, need to change back to 18.
self.x = pong_game.canvas_width // 2
self.y = pong_game.canvas_height // 2
self.move_x = DIRECTION["IDLE"]
self.move_y = DIRECTION["IDLE"]
self.speed = 9
class Paddle():
"""The paddle object (The 2 lines that move up and down)."""
def __init__(self, pong_game, side):
self.width = 18
self.height = 70
self.x = 150 if side == 'left' else pong_game.canvas_width - 150
self.y = (pong_game.canvas_height // 2) - (self.height // 2) # pong_game.canvas_height
self.score = 0
self.move = DIRECTION["IDLE"]
self.speed = 6 # (ball.speed / 1.5)
class PongGame(pyglet.window.Window):
"""The Pong game."""
def __init__(self):
self.canvas_width = 1400 # 700
self.canvas_height = 1000 # 500
self.ai1 = Paddle(self, 'left') # Q agent
#self.ai1.height = self.canvas_height // 3 # for debugging, need to remove.
#self.ai1.speed = 18 # comment out after debugging.
self.ai2 = Paddle(self, 'right')
self.ball = Ball(self)
self.winning_score = 11
self.qagent = Qagent(self, self.ai1, self.ai2, self.ball)
# self.ai2.speed = 8 # make ai2's paddle speed slower than ai1.
self.turn = self.ai2 # it's the ai2's turn first.
self.qlearn_mode = False
self.sim_sample_nums = [] # x values of visited percents graph.
self.visited_percents = [] # y values of visited percents graph.
self.qagent_action = 0
self.ball_init_direction = 1 # 1 means UP, 2 means DOWN
########################vvv PYGLET CODE vvv################################################
super().__init__(width=self.canvas_width//2, height=self.canvas_height//2, caption='Q-learning Pong',
resizable=True)
# create the paddles and the ball.
self.paddle_colors = (255,255,255) # paddle color is white
# self.ai1_rect = shapes.Rectangle(self.ai1.x, self.ai1.y, self.ai1.width, self.ai1.height,
# color=self.paddle_colors)
# #self.ai1_rect.opacity = 255
# self.ai2_rect = shapes.Rectangle(self.ai2.x, self.ai2.y, self.ai2.width, self.ai2.height,
# color=self.paddle_colors)
# self.ball_rect = shapes.Rectangle(self.ball.x, self.ball.y, self.ball.width, self.ball.height,
# color=self.paddle_colors)
self.ai1_rect = shapes.Rectangle(self.ai1.x, self.canvas_height-self.ai1.height-self.ai1.y,
self.ai1.width, self.ai1.height, color=self.paddle_colors)
self.ai2_rect = shapes.Rectangle(self.ai2.x, self.canvas_height-self.ai2.height-self.ai2.y,
self.ai2.width, self.ai2.height, color=self.paddle_colors)
self.ball_rect = shapes.Rectangle(self.ball.x, self.canvas_height-self.ball.height-self.ball.y,
self.ball.width, self.ball.height, color=self.paddle_colors)
self.line = shapes.Line(self.canvas_width//2, 0, self.canvas_width//2, self.canvas_height)
# AI1's score:
self.font = 'Courier New'
self.fsize = 50
self.font_ratio = self.fsize / self.canvas_height
self.ai1_scoreboard_x = (self.canvas_width // 2) - 500
self.ai2_scoreboard_x = (self.canvas_width // 2) + 200
self.ai_scoreboard_y = 800
self.ai1_scoreboard_x_ratio = self.ai1_scoreboard_x / self.canvas_width
self.ai2_scoreboard_x_ratio = self.ai2_scoreboard_x / self.canvas_width
self.ai_scoreboard_y_ratio = self.ai_scoreboard_y / self.canvas_height
self.ai1_scoreboard = text.Label("AI1: " + str(self.ai1.score), font_name=self.font, font_size=self.fsize,
x=self.ai1_scoreboard_x, y=self.ai_scoreboard_y)
# AI2's score:
self.ai2_scoreboard = text.Label("AI2: " + str(self.ai2.score), font_name=self.font, font_size=self.fsize,
x=self.ai2_scoreboard_x, y=self.ai_scoreboard_y)
# AI1's current action:
qaction = "IDLE 0"
if self.qagent_action < 0:
qaction = "UP 1"
elif self.qagent_action > 0:
qaction = "DOWN 2"
self.ai1_action_x = self.canvas_width // 2 - 300
self.ai1_action_y = 200
self.ai1_action_x_ratio = self.ai1_action_x / self.canvas_width
self.ai1_action_y_ratio = self.ai1_action_y / self.canvas_height
self.ai1_action = text.Label("Action: " + qaction, font_name=self.font, font_size=self.fsize,
x=self.ai1_action_x, y=self.ai1_action_y)
def on_draw(self):
self.clear()
self.ai1_rect.draw()
self.ai2_rect.draw()
self.ball_rect.draw()
self.line.draw()
self.ai1_scoreboard.draw()
self.ai2_scoreboard.draw()
self.ai1_action.draw()
def update(self):
curr_game_width, curr_game_height = self.get_size()
# self.ai1_rect.x = self.ai1.x
# #self.ai1_rect.y = self.ai1.y
# self.ai1_rect.y = self.canvas_height-self.ai1.height-self.ai1.y
# self.ai2_rect.x = self.ai2.x
# #self.ai2_rect.y = self.ai2.y
# self.ai2_rect.y = self.canvas_height-self.ai2.height-self.ai2.y
# self.ball_rect.x = self.ball.x
# #self.ball_rect.y = self.ball.y
# self.ball_rect.y = self.canvas_height-self.ball.height-self.ball.y
# self.ai1_scoreboard.text = "AI1: " + str(self.ai1.score)
# self.ai2_scoreboard.text = "AI2: " + str(self.ai2.score)
# qaction = "IDLE 0"
# if self.qagent_action < 0:
# qaction = "UP 1"
# elif self.qagent_action > 0:
# qaction = "DOWN 2"
# self.ai1_action.text = "Action: " + qaction
# Make the Pong game GUI dynamically scale based on the size of the pyglet window.
game_width_ratio = curr_game_width / self.canvas_width
game_height_ratio = curr_game_height / self.canvas_height
self.ai1_rect.width = self.ai1.width * game_width_ratio
self.ai1_rect.height = self.ai1.height * game_height_ratio
self.ai1_rect.x = self.ai1.x * game_width_ratio
self.ai1_rect.y = curr_game_height - self.ai1_rect.height \
- (self.ai1.y * game_height_ratio)
self.ai2_rect.width = self.ai2.width * game_width_ratio
self.ai2_rect.height = self.ai2.height * game_height_ratio
self.ai2_rect.x = self.ai2.x * game_width_ratio
self.ai2_rect.y = curr_game_height - self.ai2_rect.height \
- (self.ai2.y * game_height_ratio)
self.ball_rect.width = self.ball.width * game_width_ratio
self.ball_rect.height = self.ball.height * game_height_ratio
self.ball_rect.x = self.ball.x * game_width_ratio
self.ball_rect.y = curr_game_height - self.ball_rect.height \
- (self.ball.y * game_height_ratio)
self.line.x = curr_game_width / 2 #int(self.line.x * game_width_ratio)
self.line.x2 = self.line.x
#self.line.y = self.line.y * game_height_ratio
self.line.y2 = curr_game_height #int(self.line.y2 * game_height_ratio)
self.ai1_scoreboard.text = "AI1: " + str(self.ai1.score)
self.ai1_scoreboard.font_size = curr_game_height * self.font_ratio #prev_font_size * game_height_ratio
self.ai1_scoreboard.x = curr_game_width * self.ai1_scoreboard_x_ratio #self.ai1_scoreboard.x * game_width_ratio
self.ai1_scoreboard.y = curr_game_height * self.ai_scoreboard_y_ratio #self.ai1_scoreboard.y * game_height_ratio
self.ai2_scoreboard.text = "AI2: " + str(self.ai2.score)
self.ai2_scoreboard.font_size = self.ai1_scoreboard.font_size
self.ai2_scoreboard.x = curr_game_width * self.ai2_scoreboard_x_ratio #self.ai2_scoreboard.x * game_width_ratio
self.ai2_scoreboard.y = curr_game_height * self.ai_scoreboard_y_ratio #self.ai2_scoreboard.y * game_height_ratio
qaction = "IDLE 0"
if self.qagent_action < 0:
qaction = "UP 1"
elif self.qagent_action > 0:
qaction = "DOWN 2"
self.ai1_action.text = "Action: " + qaction
self.ai1_action.font_size = self.ai1_scoreboard.font_size
self.ai1_action.x = curr_game_width * self.ai1_action_x_ratio #self.ai1_action.x * game_width_ratio
self.ai1_action.y = curr_game_height * self.ai1_action_y_ratio #self.ai1_action.y * game_height_ratio
##################################^^^ PYGLET CODE ^^^##########################################
def plot_visited_states_percents(self):
fig = plt.figure()
plt.plot(self.sim_sample_nums, self.visited_percents)
plt.title("Percent of states visited vs. Number of Trials")
plt.xlabel('Number of Trials')
plt.ylabel('Percent of states visited')
#plt.show() # use this to display the graph.
plt.savefig("pong-ai-visited-graph.png")
plt.close(fig)
def reset_turn(self, victor, loser):
"""Reset the turn to the loser once the ball goes past the loser's paddle."""
self.ball = Ball(self)
self.turn = loser
victor.score += 1
self.qagent.ball = self.ball
def play(self, winning_score=11, qlearn_mode=False):
"""
Play the Pong game and keep playing until one of the players reaches the winning score.
"""
self.winning_score = winning_score
self.qlearn_mode = qlearn_mode
if self.qlearn_mode:
super().set_visible(False) # hide the Pyglet window during learning.
else:
super().set_visible(True)
opponent_prob = 0.85 # probability the opponent paddle hits the ball.
# middle_state = self.qagent.num_paddle_states // 2
self.qagent_action = 0
# Keep track of the percent of visited states as the number of sims increases
# and later plot these percents in a graph.
num_samples = 100 # max number of samples we want to sample.
sample_mod_num = 0
prev_sim_num = -1
if winning_score > num_samples:
sample_mod_num = winning_score / num_samples
else:
sample_mod_num = 1
if self.qlearn_mode:
self.sim_sample_nums = []
self.visited_percents = []
# Stop the q-learning if a trial exceeds the given time limit (in minutes).
TRIAL_TIME_LIMIT = 5
TRIAL_TIME_LIMIT_SECS = TRIAL_TIME_LIMIT * 60
trial_start_time = time.time()
# NOT NEEDED. But may be used in future to make Pyglet GUI more responsive.
# pyglet.clock.schedule_interval(self.update, 1/120.0)
# pyglet.app.run()
while self.ai1.score < winning_score and self.ai2.score < winning_score:
if self.qlearn_mode:
trial_curr_time = time.time()
trial_elapsed_time = trial_curr_time - trial_start_time
# Stop the q-learning if a trial exceeds the given time limit (in minutes).
if trial_elapsed_time >= TRIAL_TIME_LIMIT_SECS:
print(f"Time limit of {TRIAL_TIME_LIMIT} minutes reached!")
break
self.qagent_action = self.qagent.qlearn(self.ball.y, self.ai1.y)
if self.ai2.score > prev_sim_num and self.ai2.score % sample_mod_num == 0:
self.sim_sample_nums.append(self.ai2.score)
self.visited_percents.append(self.qagent.get_percent_of_states_explored())
prev_sim_num = self.ai2.score
else:
self.qagent_action = self.qagent.play_game(self.ball.y, self.ai1.y)
# PYGLET CODE: (Uncomment to display Pong GUI)
if not self.qlearn_mode:
pyglet.clock.tick()
for window in pyglet.app.windows:
window.switch_to()
window.dispatch_events()
window.dispatch_event('on_draw')
window.update()
window.flip()
# On new serve (start of each turn), reset the paddles to their
# center position and move the ball to the correct side.
if self.turn:
#print("your turn")
if self.qlearn_mode:
# Initialize AI1's paddle to a random possible position:
#self.ai1.y = random.randint(0, self.canvas_height - self.ai1.height)
self.ai1.y = random.choice(self.qagent.possible_pad_states)
else:
self.ai1.y = (self.canvas_height // 2) - (self.ai1.height // 2)
self.ai2.y = (self.canvas_height // 2) - (self.ai2.height // 2)
self.ball.move_x = DIRECTION["LEFT"] if self.turn == self.ai1 else DIRECTION["RIGHT"]
# Switch the initial direction of the ball
if self.ball_init_direction == 1:
self.ball.move_y = DIRECTION["UP"] # throw the ball from the bottom up
self.ball.y = self.canvas_height - 150 # ball starts from the bottom
self.ball_init_direction = 2
else:
self.ball.move_y = DIRECTION["DOWN"] # throw the ball from the top down
self.ball.y = 150 #154 # ball starts from the top
self.ball_init_direction = 1
self.qagent_action = 0
self.turn = None
# If the ball makes it past either of the paddles,
# add a point to the winner and reset the turn to the loser.
if self.ball.x <= 0: # ai1 lost, ai2 won the round.
trial_start_time = time.time()
self.reset_turn(self.ai2, self.ai1)
# Punish the AI every time it misses the ball.
# if qlearn_mode and self.qagent.prev_state is not None:
# self.qagent.update_reward(-1)
elif self.ball.x >= self.canvas_width - self.ball.width: # ai1 won, ai2 lost.
#print("AI1 scored a goal.")
trial_start_time = time.time()
self.reset_turn(self.ai1, self.ai2)
# # If the ball collides with the top and bottom bound limits, bounce it.
# if self.ball.y <= 0:
# self.ball.y = 0
# self.ball.move_y = DIRECTION["DOWN"]
# elif self.ball.y >= self.canvas_height - self.ball.height:
# self.ball.y = self.canvas_height - self.ball.height
# self.ball.move_y = DIRECTION["UP"]
# # Handle ai1 wall collision.
# if self.ai1.y <= 0:
# self.ai1.y = 0
# elif self.ai1.y >= self.canvas_height - self.ai1.height:
# self.ai1.y = self.canvas_height - self.ai1.height
# # Handle ai2 wall collision.
# if self.ai2.y <= 0:
# self.ai2.y = 0
# elif self.ai2.y >= self.canvas_height - self.ai2.height:
# self.ai2.y = self.canvas_height - self.ai2.height
# Handle ball movement.
# Move ball in intended direction based on move_y and move_x values.
# The ball travels faster in the x direction than in the y direction.
if self.ball.move_y == DIRECTION["UP"]:
self.ball.y -= int(self.ball.speed / 1.5)
elif self.ball.move_y == DIRECTION["DOWN"]:
self.ball.y += int(self.ball.speed / 1.5)
if self.ball.move_x == DIRECTION["LEFT"]:
self.ball.x -= self.ball.speed
elif self.ball.move_x == DIRECTION["RIGHT"]:
self.ball.x += self.ball.speed
# If the ball collides with the top and bottom bound limits, bounce it.
if self.ball.y <= 0:
self.ball.y = 0
self.ball.move_y = DIRECTION["DOWN"]
elif self.ball.y >= self.canvas_height - self.ball.height:
self.ball.y = self.canvas_height - self.ball.height
self.ball.move_y = DIRECTION["UP"]
# Handle ai1 UP and DOWN movement.
self.ai1.y += self.qagent_action
# Handle ai2 UP and DOWN movement.
# The ai2 paddle's y always follows the y position of the ball.
if self.ai2.y + (self.ai2.height // 2) > self.ball.y:
self.ai2.y -= self.ai2.speed
elif self.ai2.y + (self.ai2.height // 2) < self.ball.y:
self.ai2.y += self.ai2.speed
# Handle ai1 wall collision.
if self.ai1.y <= 0:
self.ai1.y = 0
elif self.ai1.y >= self.canvas_height - self.ai1.height:
self.ai1.y = self.canvas_height - self.ai1.height
# Handle ai2 wall collision.
if self.ai2.y <= 0:
self.ai2.y = 0
elif self.ai2.y >= self.canvas_height - self.ai2.height:
self.ai2.y = self.canvas_height - self.ai2.height
# Handle ai1 (q agent) ball collision.
if self.ball.x <= self.ai1.x + self.ai1.width and \
self.ball.x + self.ball.width >= self.ai1.x:
if self.ball.y <= self.ai1.y + self.ai1.height and \
self.ball.y + self.ball.height >= self.ai1.y:
self.ball.x = self.ai1.x + self.ball.width
self.ball.move_x = DIRECTION["RIGHT"]
# Reward the Q agent every time it hits the ball.
# if qlearn_mode and self.qagent.prev_state is not None:
# self.qagent.update_reward(1)
# Move the Q agent's paddle back to the center.
# self.qagent_action = middle_state
# Handle ai2 ball collision.
if self.ball.x <= self.ai2.x + self.ai2.width and \
self.ball.x + self.ball.width >= self.ai2.x:
if self.ball.y <= self.ai2.y + self.ai2.height and \
self.ball.y + self.ball.height >= self.ai2.y:
# Q agent learns or plays the game.
if self.qlearn_mode:
self.ball.x = self.ai2.x - self.ball.width
self.ball.move_x = DIRECTION["LEFT"]
#print("Q learning method called.")
#self.qagent_action = self.qagent.qlearn()
else: # In actual gameplay, the opponent hits the ball with probability p.
rand_num = round(random.random(), 2)
if rand_num <= opponent_prob:
self.ball.x = self.ai2.x - self.ball.width
self.ball.move_x = DIRECTION["LEFT"]
#self.qagent_action = self.qagent.play_game()
else: # misses ball
self.ball.x += self.ball.width * 2 + 1
if qlearn_mode:
#self.qagent.write_q_table('pong-qtable.dat') # Save the Q table in a file.
print("Q learning finished!")
else:
print("Pong game finished!")
if self.ai1.score == winning_score:
print("AI1 is the winner!")
elif self.ai2.score == winning_score:
print("AI2 is the winner!")
print("AI1 score (Q agent): " + str(self.ai1.score))
print(f"AI2 score ({int(opponent_prob*100)}% perfect agent): " + str(self.ai2.score))
self.ai1.score = 0
self.ai2.score = 0
self.turn = self.ai2
print()
#self.qagent.glimpse_qtable()
class Qagent():
"""The Q agent playing the Pong game."""
def __init__(self, pong_game, paddle, opponent, ball):
self.pong_game = pong_game
self.paddle = paddle
self.opponent = opponent
self.ball = ball
self.alpha = 0.1 # learning rate.
self.gamma = 0.8 # discount factor. # Before: 0.8
self.epsilon = 1 # randomness factor. e=0 makes the agent greedy.
self.num_y_directions = 2
self.num_paddle_states = math.ceil(pong_game.canvas_height / self.paddle.height) # 15
self.num_ball_states = math.ceil(pong_game.canvas_height / self.ball.height) # 56
self.rewards = [
[0 for _ in range(self.pong_game.canvas_height) # y position of paddle.
] for _ in range(self.pong_game.canvas_height) # y position of ball.
]
self.ball_actions = {0: 0, 1: -self.ball.speed, 2: self.ball.speed}
self.pad_actions = {0: 0, 1: -self.paddle.speed, 2: self.paddle.speed}
self.min_visits = 3 # The minimum number of times every state in the env should be visited.
# Initialize positive rewards in the reward table.
# Reward the agent whenever the ball's center is in the range of its paddle.
# for ball_y in range(len(self.rewards)):
# for pad_y in range(len(self.rewards[0])):
# ball_center = (ball_y + self.ball.height) // 2
# pad_right = pad_y + self.paddle.height
# if ball_center <= pad_right and ball_center >= pad_y:
# self.rewards[ball_y][pad_y] = 1
# Reward the agent more when the ball's center is aligned with the paddle's center.
# (This reward system seems to make Q agent 20% faster at learning for large # of trails.)
# Best reward system.
for ball_y in range(len(self.rewards)):
for pad_y in range(len(self.rewards[0])):
ball_right = ball_y + self.ball.height
ball_center = (ball_y + ball_right) // 2
pad_right = pad_y + self.paddle.height
pad_center = (pad_y + pad_right) // 2
abs_y_diff = abs(ball_center - pad_center)
reward = 1 / (abs_y_diff + 1)
# if ball_center <= pad_right and ball_center >= pad_y:
# reward = 1 / (abs_y_diff + 1)
# reward *= 100
# else:
# reward = -abs_y_diff / 10
self.rewards[ball_y][pad_y] = reward
self.qtable = None
if os.path.isfile('pong-qtable.dat'):
self.read_q_table('pong-qtable.dat')
else:
self.qtable = [
[[0 for _ in range(3) # since 3 possible actions: up, down, idle.
] for _ in range(self.pong_game.canvas_height) # y position of paddle.
] for _ in range(self.pong_game.canvas_height) # y position of ball.
]
self.visited_states = [ # used to keep track of previously visited states.
[0 for _ in range(self.pong_game.canvas_height) # y position of paddle.
] for _ in range(self.pong_game.canvas_height) # y position of ball.
]
self.possible_pad_states = []
for y in range(0, self.pong_game.canvas_height-self.paddle.height+1, self.paddle.speed):
self.possible_pad_states.append(y)
for y in range(self.pong_game.canvas_height-self.paddle.height, -1, -self.paddle.speed):
self.possible_pad_states.append(y)
starting_pos = (self.pong_game.canvas_height // 2) - (self.paddle.height // 2)
for y in range(starting_pos, self.pong_game.canvas_height-self.paddle.height+1, self.paddle.speed):
self.possible_pad_states.append(y)
for y in range(starting_pos, -1, -self.paddle.speed):
self.possible_pad_states.append(y)
self.possible_pad_states.sort()
#print(self.possible_pad_states)
def r(self, s, a):
"""
A reward function R(s,a) that gives the agent a reward for taking
action a in state s.
"""
ball_y, pad_y = s
new_ball_y = ball_y + self.ball_actions[self.ball.move_y]
new_pad_y = pad_y + self.pad_actions[a]
# Make sure the new states are within the bounds of the Pong env.
if new_ball_y < 0:
new_ball_y = 0
elif new_ball_y > self.pong_game.canvas_height - self.ball.height:
new_ball_y = self.pong_game.canvas_height - self.ball.height
if new_pad_y < 0:
new_pad_y = 0
elif new_pad_y > self.pong_game.canvas_height - self.paddle.height:
new_pad_y = self.pong_game.canvas_height - self.paddle.height
return self.rewards[new_ball_y][new_pad_y]
# Reward the Q agent whenever the y distance between the paddle and ball is reduced
# and punish it otherwise.
# ball_center = (ball_y + self.ball.height) // 2
# pad_center = (pad_y + self.paddle.height) // 2
# ball_pad_diff = abs(ball_center - pad_center)
# new_ball_center = (new_ball_y + self.ball.height) // 2
# new_pad_center = (new_pad_y + self.paddle.height) // 2
# new_ball_pad_diff = abs(new_ball_center - new_pad_center)
# range_reward = 0
# new_ball_right_y = new_ball_y + self.ball.height
# new_pad_right_y = new_pad_y + self.paddle.height
# if (new_ball_y >= new_pad_y and new_ball_y <= new_pad_right_y) or \
# (new_ball_right_y <= new_pad_right_y and new_ball_right_y >= new_pad_y):
# range_reward = 1000
# diff_reward = 0
# if new_ball_pad_diff < ball_pad_diff:
# diff_reward = 1
# elif new_ball_pad_diff > ball_pad_diff:
# diff_reward = -1 #-1
# return diff_reward + range_reward
def exploration_fn(self, ball_y, pad_y):
"""
Returns the best action to take in order to explore unseen states in the environment
while reducing the probability of revisiting bad states.
"""
new_ball_y = ball_y + self.ball_actions[self.ball.move_y]
if new_ball_y < 0:
new_ball_y = 0
elif new_ball_y > self.pong_game.canvas_height - self.ball.height:
new_ball_y = self.pong_game.canvas_height - self.ball.height
possible_next_states = []
for pad_action, pad_y_change in self.pad_actions.items():
new_pad_y = pad_y + pad_y_change
if new_pad_y < 0:
new_pad_y = 0
elif new_pad_y > self.pong_game.canvas_height - self.paddle.height:
new_pad_y = self.pong_game.canvas_height - self.paddle.height
possible_next_states.append((new_ball_y, new_pad_y, pad_action))
# Stochastic exploration:
moves = [0,1,2] # idle, up, down
move_weights = [0,0,0]
max_q = max(self.qtable[new_ball_y][new_pad_y])
for (new_ball_y, new_pad_y, pad_action) in possible_next_states:
qval = self.qtable[new_ball_y][new_pad_y][pad_action]
move_weight = 0
if self.visited_states[new_ball_y][new_pad_y] < self.min_visits:
move_weight = 10
elif qval == max_q:
move_weight = 1
move_weights[pad_action] = move_weight
move = random.choices(moves, weights=move_weights)[0]
# Deterministic exploration: (for quicker training? - no, exploits prematurely)
# Always prefers unexplored states to optimal states.
# Doesn't seem to work. Agent is not exploring the environment.
# moves = [0,1,2]
# move_weights = [0,0,0]
# move = -1
# max_q = max(self.qtable[new_ball_y][new_pad_y])
# for (new_ball_y, new_pad_y, pad_action) in possible_next_states:
# move_weight = 0
# if self.visited_states[new_ball_y][new_pad_y] < self.min_visits:
# move = pad_action
# # if move != 0:
# # print(new_ball_y, new_pad_y, move)
# # print(f"Num visits: {self.visited_states[new_ball_y][new_pad_y]}")
# break
# qval = self.qtable[new_ball_y][new_pad_y][pad_action]
# if qval == max_q:
# move_weight = 1
# move_weights[pad_action] = move_weight
# if move != -1:
# move = random.choices(moves, weights=move_weights)[0]
return move
def get_percent_of_states_explored(self):
total_num_states = self.pong_game.canvas_height*self.pong_game.canvas_height
num_visited_states = 0
for i in range(self.pong_game.canvas_height):
for j in range(self.pong_game.canvas_height):
if self.visited_states[i][j] != 0:
num_visited_states += 1
percent_visited = num_visited_states / total_num_states * 100
return percent_visited
def q(self, s, a):
"""The Q function Q(s,a) gives the quality of taking action a in state s."""
ball_y = s[0]
pad_y = s[1]
try:
new_ball_y = ball_y + self.ball_actions[self.ball.move_y]
if new_ball_y < 0:
new_ball_y = 0
elif new_ball_y > self.pong_game.canvas_height - self.ball.height:
new_ball_y = self.pong_game.canvas_height - self.ball.height
pad_y_change = self.pad_actions[a]
new_pad_y = pad_y + pad_y_change
if new_pad_y < 0:
new_pad_y = 0
elif new_pad_y > self.pong_game.canvas_height - self.paddle.height:
new_pad_y = self.pong_game.canvas_height - self.paddle.height
next_state_q_values = self.qtable[new_ball_y][new_pad_y]
except IndexError:
raise Exception("One of the 2 indices for the Q table is out of bounds.")
next_state_max_q = max(next_state_q_values)
# Q-value equation for deterministic environment:
self.qtable[ball_y][pad_y][a] = self.r(s, a) + self.gamma * next_state_max_q
# print(f"ball_y: {ball_y}, pad_y: {pad_y}") # for debugging.
# print(self.r(s,a), self.qtable[ball_y][pad_y]) # for debugging.
return self.qtable[ball_y][pad_y][a]
def qlearn(self, ball_y, pad_y):
"""Make the Q agent learn about its environment."""
self.visited_states[ball_y][pad_y] += 1
# Remove the following line to exclude epsilon decay:
self.epsilon = max(0.1, round(1 - (self.opponent.score+1)/self.pong_game.winning_score, 2)) # makes the game finish much faster.
#self.epsilon = max(0.01, round(1 - (self.opponent.score+1)/self.pong_game.winning_score, 2)) # makes the game finish much faster.
#self.epsilon = round(1 - self.opponent.score/self.pong_game.winning_score, 10)
rand_num = round(random.random(), 2) #0
move = 0
# move is the next state the paddle will go to.
if rand_num < self.epsilon: # exploration. Always true if rand_num = 0 and self.epsilon = 1
#move = random.randint(0,2) # 3 possible moves.
move = self.exploration_fn(ball_y, pad_y)
state = (ball_y, pad_y)
self.q(state, move) # Update Q value in Q-table.
else: # exploitation
return self.play_game(ball_y, pad_y)
#move = self.exploration_fn(ball_y, pad_y)
return self.pad_actions[move]
def play_game(self, ball_y, pad_y):
"""
Make the Q agent play the Pong game after having
learned all the Q values.
"""
best_next_action = 0
actions_q_values = self.qtable[ball_y][pad_y]
# try:
# new_ball_y = ball_y + self.ball_actions[self.ball.move_y]
# if new_ball_y < 0:
# new_ball_y = 0
# elif new_ball_y > self.pong_game.canvas_height - self.ball.height:
# new_ball_y = self.pong_game.canvas_height - self.ball.height
# # pad_y_change = self.pad_actions[a]
# # new_pad_y = pad_y + pad_y_change
# # if new_pad_y < 0:
# # new_pad_y = 0
# # elif new_pad_y > self.pong_game.canvas_height - self.paddle.height:
# # new_pad_y = self.pong_game.canvas_height - self.paddle.height
# next_state_q_values = self.qtable[new_ball_y][pad_y]
# except IndexError:
# raise Exception("One of the 2 indices for the Q table is out of bounds.")
curr_q = actions_q_values[best_next_action]
for action in range(len(actions_q_values)):
action_q = actions_q_values[action]
if action_q > curr_q:
curr_q = action_q
best_next_action = action
return self.pad_actions[best_next_action]
def reset_q_table(self):
"""Reset all Q values in the Q table to 0."""
for i in range(self.num_y_directions):
for j in range(self.num_ball_states):
for k in range(self.num_paddle_states):
self.qtable[i][j][k] = 0
self.epsilon = 1 # reset espilon as well.
def read_q_table(self, filename):
"""Read the Q table from a file, if such a file exists."""
with open(filename, 'rb') as fp:
try:
self.qtable = pickle.load(fp)
except EOFError:
print("No objects in the data file.")
def write_q_table(self, filename):
"""Save the contents of the Q table to a file."""
with open(filename, 'wb') as fp:
pickle.dump(self.qtable, fp)
def glimpse_qtable(self):
"""Print out a small part of the Q table."""
print("Q table:")
ball_y_states = [84, 324, 780]
for ball_y_state in ball_y_states:
for pad_y_state in range(0, len(self.qtable[1])+1, self.paddle.speed*10):
# num_ball_states, num_paddle_states, num_actions
print(f"State ({ball_y_state},{pad_y_state}): \
{self.qtable[ball_y_state][pad_y_state]}") # take a glimpse at the q table.
#print(self.qtable)
if __name__ == '__main__':
pong_game = PongGame()
# If the Q table already exists, then load the table and make the Q agent play the game.
# Else train the Q agent by playing n games.
num_simulations = 11 #20000 # 100000000
if os.path.isfile('pong-qtable.dat'):
print("Game started.")
pong_game.play()
print("Game finished.")
else:
print("Q-learning started.")
start_time = time.time()
pong_game.play(winning_score=num_simulations, qlearn_mode=True)
end_time = time.time()
total_time_elapsed = end_time - start_time
print(f"Total time elapsed for {num_simulations} simulations: %.2fs" % (total_time_elapsed))
avg_time_per_simulation = round(total_time_elapsed / num_simulations, 7)
print(f"Avg. time per simulation: {avg_time_per_simulation}s")
# print("Game started.")
# pong_game.play()
# print("Game finished.")
pong_game.plot_visited_states_percents()
| 46.234375
| 138
| 0.594289
|
e7a900a88030cd76a15ce20676fa0bcb0b34607a
| 223
|
py
|
Python
|
__init__.py
|
aroraumang/email-queue-1
|
1770bda5110b12a24cedeef03e083269e9d9ad46
|
[
"BSD-3-Clause"
] | null | null | null |
__init__.py
|
aroraumang/email-queue-1
|
1770bda5110b12a24cedeef03e083269e9d9ad46
|
[
"BSD-3-Clause"
] | null | null | null |
__init__.py
|
aroraumang/email-queue-1
|
1770bda5110b12a24cedeef03e083269e9d9ad46
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
__init__.py
"""
from trytond.pool import Pool
from .email_queue import EmailQueue
def register():
Pool.register(
EmailQueue,
module='email_queue', type_='model'
)
| 13.9375
| 43
| 0.609865
|
4b90bca90ff89406b009615cfd569a3cd95e66b0
| 8,573
|
py
|
Python
|
djangoproject/settings/docker.py
|
Vibhu-Agarwal/djangoproject.com
|
1382ed115536dcd789eae9825ba746ba72271c4b
|
[
"BSD-3-Clause"
] | 2
|
2019-09-26T05:47:56.000Z
|
2019-09-26T08:19:03.000Z
|
djangoproject/settings/docker.py
|
Vibhu-Agarwal/djangoproject.com
|
1382ed115536dcd789eae9825ba746ba72271c4b
|
[
"BSD-3-Clause"
] | 11
|
2020-06-06T00:57:10.000Z
|
2022-03-12T00:07:58.000Z
|
djangoproject/settings/docker.py
|
Vibhu-Agarwal/djangoproject.com
|
1382ed115536dcd789eae9825ba746ba72271c4b
|
[
"BSD-3-Clause"
] | 3
|
2021-02-09T12:02:56.000Z
|
2021-08-13T03:35:05.000Z
|
# Settings for www.djangoproject.com
import os
from pathlib import Path
# Utilities
PROJECT_PACKAGE = Path(__file__).resolve().parent.parent
# The full path to the repository root.
BASE_DIR = PROJECT_PACKAGE.parent
data_dir_key = 'DJANGOPROJECT_DATA_DIR'
DATA_DIR = Path(os.environ[data_dir_key]) if data_dir_key in os.environ else BASE_DIR.parent
# Django settings
CACHE_MIDDLEWARE_SECONDS = 60 * 5 # 5 minutes
CACHE_MIDDLEWARE_KEY_PREFIX = 'django'
DATABASES = {
'default': {
'ENGINE': os.environ.get('SQL_ENGINE'),
'NAME': os.environ.get('SQL_DATABASE'),
'USER': os.environ.get('SQL_USER'),
'PASSWORD': os.environ.get('SQL_PASSWORD'),
'HOST': os.environ.get('SQL_HOST'),
'PORT': os.environ.get('SQL_PORT'),
}
}
DATABASE_ROUTERS = ['tracdb.db_router.TracRouter']
DEFAULT_FROM_EMAIL = "noreply@djangoproject.com"
FUNDRAISING_DEFAULT_FROM_EMAIL = "fundraising@djangoproject.com"
FIXTURE_DIRS = [str(PROJECT_PACKAGE.joinpath('fixtures'))]
INSTALLED_APPS = [
'accounts',
'aggregator',
'blog',
'contact',
'dashboard',
'docs.apps.DocsConfig',
'foundation',
'legacy',
'members',
'releases',
'svntogit',
'tracdb',
'fundraising',
'captcha',
'registration',
'django_hosts',
'sorl.thumbnail',
'djmoney',
'django.contrib.sites',
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.flatpages',
'django.contrib.humanize',
'django.contrib.messages',
'django.contrib.postgres',
'django.contrib.redirects',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django_push.subscriber',
]
LANGUAGE_CODE = 'en-us'
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"simple": {"format": "[%(name)s] %(levelname)s: %(message)s"},
"full": {"format": "%(asctime)s [%(name)s] %(levelname)s: %(message)s"},
'django.server': {
'()': 'django.utils.log.ServerFormatter',
'format': '[%(server_time)s] %(message)s',
},
},
"filters": {
"require_debug_false": {
"()": "django.utils.log.RequireDebugFalse",
},
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "simple",
},
'django.server': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'django.server',
},
},
"loggers": {
"django.request": {
"handlers": [],
"level": "ERROR",
"propagate": False,
},
'django.server': {
'handlers': ['django.server'],
'level': 'INFO',
'propagate': False,
},
}
}
LOGIN_REDIRECT_URL = 'edit_profile'
MEDIA_URL = '/m/'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_hosts.middleware.HostsRequestMiddleware',
# Put LocaleMiddleware before SessionMiddleware to prevent the former from accessing the
# session and adding 'Vary: Cookie' to all responses.
'djangoproject.middleware.ExcludeHostsLocaleMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
'django.contrib.redirects.middleware.RedirectFallbackMiddleware',
'django_hosts.middleware.HostsResponseMiddleware',
]
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'accounts.hashers.PBKDF2WrappedSHA1PasswordHasher',
]
ROOT_URLCONF = 'djangoproject.urls.www'
SECRET_KEY = os.environ.get('SECRET_KEY')
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTOCOL", "https")
SERVER_EMAIL = "root@djangoproject.com"
SESSION_COOKIE_HTTPONLY = True
SILENCED_SYSTEM_CHECKS = [
'fields.W342', # tracdb has ForeignKey(unique=True) in lieu of multi-col PKs
'security.W008', # SSL redirect is handled by nginx
'security.W009', # SECRET_KEY is setup through Ansible secrets
'captcha.recaptcha_test_key_error',
]
SITE_ID = 1
STATICFILES_DIRS = [str(PROJECT_PACKAGE.joinpath('static'))]
STATIC_URL = '/s/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [str(PROJECT_PACKAGE.joinpath('templates'))],
'APP_DIRS': True,
'OPTIONS': {
'builtins': [
'django_hosts.templatetags.hosts_override',
],
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.static',
'django.template.context_processors.media',
'django.contrib.messages.context_processors.messages',
'docs.context_processors.docs_version',
'releases.context_processors.django_version',
'aggregator.context_processors.community_stats',
'django.template.context_processors.request',
],
},
},
]
TIME_ZONE = 'America/Chicago'
USE_I18N = True
USE_L10N = False
USE_TZ = False
# django-contact-form / Akismet settings
AKISMET_API_KEY = "c892e4962244"
# django-hosts settings
DEFAULT_HOST = 'www'
HOST_SCHEME = 'http'
HOST_SITE_TIMEOUT = 3600
ROOT_HOSTCONF = 'djangoproject.hosts'
# django-registration settings
ACCOUNT_ACTIVATION_DAYS = 3
REGISTRATION_EMAIL_HTML = False
# aggregator / PubSubHubbub settings
FEED_APPROVERS_GROUP_NAME = "feed-approver"
# django-push settings
PUSH_HUB = 'https://push.superfeedr.com/'
PUSH_CREDENTIALS = 'aggregator.utils.push_credentials'
# SUPERFEEDR_CREDS is a 2 element list in the form of [email,secretkey]
SUPERFEEDR_CREDS = os.environ.get('superfeedr_creds', ["any@email.com", "some_string"])
# Fastly credentials
FASTLY_API_KEY = os.environ.get('fastly_api_key', 'xyz')
FASTLY_SERVICE_URL = os.environ.get('fastly_service_url', 'xyz')
# Stripe settings
# only testing keys as fallback values here please!
STRIPE_SECRET_KEY = os.environ.get('stripe_secret_key', 'sk_test_x6zP4wd7Z5jcvDOJbbHZlHHt')
STRIPE_PUBLISHABLE_KEY = os.environ.get('stripe_publishable_key', 'pk_test_TyB5jcROwK8mlCNrn3dCwW7l')
# sorl-thumbnail settings
THUMBNAIL_PRESERVE_FORMAT = True
THUMBNAIL_ALTERNATIVE_RESOLUTIONS = [2]
# dashboard settings
TRAC_RPC_URL = "https://code.djangoproject.com/rpc"
TRAC_URL = "https://code.djangoproject.com/"
ALLOWED_HOSTS = ['.localhost', '127.0.0.1', 'www.127.0.0.1']
LOCALE_MIDDLEWARE_EXCLUDED_HOSTS = ['docs.djangoproject.localhost']
DEBUG = True
THUMBNAIL_DEBUG = DEBUG
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'trololololol',
},
'docs-pages': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'docs-pages',
},
}
CSRF_COOKIE_SECURE = False
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
MEDIA_ROOT = str(DATA_DIR.joinpath('media_root'))
SESSION_COOKIE_SECURE = False
STATIC_ROOT = str(DATA_DIR.joinpath('static_root'))
# Docs settings
DOCS_BUILD_ROOT = DATA_DIR.joinpath('djangodocs')
# django-hosts settings
PARENT_HOST = 'localhost:8000'
# django-push settings
PUSH_SSL_CALLBACK = False
# Enable optional components
if DEBUG:
try:
import debug_toolbar # NOQA
except ImportError:
pass
else:
INSTALLED_APPS.append('debug_toolbar')
INTERNAL_IPS = ['127.0.0.1']
MIDDLEWARE.insert(
MIDDLEWARE.index('django.middleware.common.CommonMiddleware') + 1,
'debug_toolbar.middleware.DebugToolbarMiddleware'
)
MIDDLEWARE.insert(
MIDDLEWARE.index('debug_toolbar.middleware.DebugToolbarMiddleware') + 1,
'djangoproject.middleware.CORSMiddleware'
)
| 27.044164
| 101
| 0.672343
|
b88392820e6e6b8bfbd47b29aabe04e0b335c0ae
| 3,438
|
py
|
Python
|
util/clean.py
|
mananshah99/diseasenetworks
|
14f910af07d8d62cf5643b828598dd8c958db750
|
[
"MIT"
] | null | null | null |
util/clean.py
|
mananshah99/diseasenetworks
|
14f910af07d8d62cf5643b828598dd8c958db750
|
[
"MIT"
] | null | null | null |
util/clean.py
|
mananshah99/diseasenetworks
|
14f910af07d8d62cf5643b828598dd8c958db750
|
[
"MIT"
] | null | null | null |
'''
clean.py
Description: Performs extensive tweet cleaning procedures including removing
html character codes, links, separating sentences, and fixing ascii.
Copyright (c) 2016, Manan Shah. All rights reserved. Redistribution and use in
source and binary forms, with or without modification, are not permitted without
retention of this notice.
'''
import re
import sys
sys.path.append('../tagger/') # to use NLPlib
import NLPlib as nlp
from helper import * # helper funcions
# some popular html character codes
d = {'&':'&',
'"':'"',
''':"'",
'<':'<',
'>':'>',
'¢':'cent',
'£':'pound',
'¥':'yen',
'€':'euro',
'§':'section',
'©':'copyright',
'®':'registered trademark',
'™':'trademark'
}
def remove_html(tweet):
return re.sub(r'<.*?>', '', tweet)
def convert_to_ascii(tweet):
while len(re.findall(r'&\w+;', tweet)) > 0: # while there exists the pattern "&...;"
for key in d:
if re.search(key, tweet): # convert html code to ascii
tweet = re.sub(key, d[key], tweet)
return tweet
def remove_links(tweet):
return re.sub(r'((http|https|ssh|ftp|www)|\w+\.\w+).*?( |$)', '', tweet, flags=re.IGNORECASE) #http, Http, HTTP, ssh, ftp, www, etc.
def remove_twitter_tags(tweet):
regex = '(@|#)(?P<tag_name>\w+)(?P<end>.*?( |$))'
while len(re.findall(regex, tweet)) > 0:
match = re.search(regex, tweet) #finds the first occurence of the regex in tweet
replace = match.group('tag_name') + match.group('end')
tweet = re.sub(regex, replace, tweet, 1)
return tweet
def separate_sentences(tweet):
symbols = ['.', '!', '?']
processed = tweet.rstrip()
for sym in symbols:
processed = edit_line_r(processed, sym, '\n')
return processed
def space(tweet):
regex = '(?P<prefix>\w+?)(?P<end>!+|\?+|\.+)'
# ..., !!, ?? will be kept together, but spaced from whatever is before it
while len(re.findall(regex, tweet)) > 0:
match = re.search(regex, tweet)
replace = match.group('prefix') + ' ' + match.group('end')
tweet = re.sub(regex, replace, tweet, 1)
return tweet
def tokenize(tweet):
tweet = re.sub("'(?!t)", " '", tweet)
return re.sub("n't", " n't", tweet)
tagger = nlp.NLPlib()
def tag(tweet):
sentences = tweet.rstrip().split('\n')
processed = ''
for i in range(len(sentences)): #go through each sentence in a tweet
sent = sentences[i].strip().split(' ')
tags = tagger.tag(sent)
tagged = []
for i in range(len(tags)):
tagged.append(sent[i] + '/' + tags[i]) #tag each token in the sentence
processed += ' '.join(tagged) + '\n' #join into a processed tweet
return '|\n' + processed.rstrip() + '\n'
def twtt(raw_file, processed_file):
raw = open(raw_file, 'r')
processed = open(processed_file, 'w+')
for line in raw:
#line = remove_html(line) #html removed
#line = convert_to_ascii(line) #html character codes changed to ascii
line = remove_links(line) #urls removed
line = remove_twitter_tags(line) #hash tags and @-tags removed
line = separate_sentences(line)
line = space(line)
line = tokenize(line)
line = tag(line)
processed.write(line)
processed.write('|')
raw.close()
processed.close()
'''
if __name__ == '__main__':
raw_file = sys.argv[1]
processed_file = sys.argv[2]
twtt(raw_file, processed_file)
print "[clean.py] finished processing and tagging tweets"
'''
| 30.696429
| 134
| 0.633799
|
4dee67961f0cfb8e42892a8b6f1b01bbec0b9055
| 1,737
|
py
|
Python
|
zerver/webhooks/codeship/view.py
|
noabenefraim/zulip
|
708b5e12353513911c2cb8cad35db699a48ea860
|
[
"Apache-2.0"
] | 4
|
2020-09-26T17:46:27.000Z
|
2021-06-24T16:56:17.000Z
|
zerver/webhooks/codeship/view.py
|
noabenefraim/zulip
|
708b5e12353513911c2cb8cad35db699a48ea860
|
[
"Apache-2.0"
] | 1
|
2016-07-16T16:54:33.000Z
|
2016-07-16T16:54:33.000Z
|
zerver/webhooks/codeship/view.py
|
noabenefraim/zulip
|
708b5e12353513911c2cb8cad35db699a48ea860
|
[
"Apache-2.0"
] | 1
|
2020-11-26T14:09:56.000Z
|
2020-11-26T14:09:56.000Z
|
# Webhooks for external integrations.
from typing import Any, Dict
from django.http import HttpRequest, HttpResponse
from zerver.decorator import webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
CODESHIP_TOPIC_TEMPLATE = '{project_name}'
CODESHIP_MESSAGE_TEMPLATE = '[Build]({build_url}) triggered by {committer} on {branch} branch {status}.'
CODESHIP_DEFAULT_STATUS = 'has {status} status'
CODESHIP_STATUS_MAPPER = {
'testing': 'started',
'error': 'failed',
'success': 'succeeded',
}
@webhook_view('Codeship')
@has_request_variables
def api_codeship_webhook(request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:
payload = payload['build']
subject = get_subject_for_http_request(payload)
body = get_body_for_http_request(payload)
check_send_webhook_message(request, user_profile, subject, body)
return json_success()
def get_subject_for_http_request(payload: Dict[str, Any]) -> str:
return CODESHIP_TOPIC_TEMPLATE.format(project_name=payload['project_name'])
def get_body_for_http_request(payload: Dict[str, Any]) -> str:
return CODESHIP_MESSAGE_TEMPLATE.format(
build_url=payload['build_url'],
committer=payload['committer'],
branch=payload['branch'],
status=get_status_message(payload),
)
def get_status_message(payload: Dict[str, Any]) -> str:
build_status = payload['status']
return CODESHIP_STATUS_MAPPER.get(build_status, CODESHIP_DEFAULT_STATUS.format(status=build_status))
| 34.058824
| 104
| 0.755901
|
9ac1e938cbfcc013ad82582f3f83ee8a80660614
| 3,114
|
py
|
Python
|
tests/test_dist.py
|
loricsstudios/python-semantic-release
|
93f28b15c972e0b2a43c5cbe7ab98cbc747bb9b7
|
[
"MIT"
] | null | null | null |
tests/test_dist.py
|
loricsstudios/python-semantic-release
|
93f28b15c972e0b2a43c5cbe7ab98cbc747bb9b7
|
[
"MIT"
] | null | null | null |
tests/test_dist.py
|
loricsstudios/python-semantic-release
|
93f28b15c972e0b2a43c5cbe7ab98cbc747bb9b7
|
[
"MIT"
] | null | null | null |
from semantic_release.dist import build_dists, should_build, should_remove_dist, remove_dists
from . import pytest
@pytest.mark.parametrize(
"commands",
["sdist bdist_wheel", "sdist", "bdist_wheel", "sdist bdist_wheel custom_cmd"],
)
def test_build_command(mocker, commands):
mocker.patch("semantic_release.dist.config.get", lambda *a: commands)
mock_run = mocker.patch("semantic_release.dist.run")
build_dists()
mock_run.assert_called_once_with(commands)
@pytest.mark.parametrize(
"config,expected",
[
(
{
"upload_to_pypi": True,
"upload_to_release": True,
"build_command": "python setup.py build",
},
True,
),
(
{"upload_to_pypi": True, "upload_to_release": True, "build_command": False},
False,
),
(
{"upload_to_pypi": True, "upload_to_release": True, "build_command": None},
False,
),
(
{"upload_to_pypi": True, "upload_to_release": True, "build_command": ""},
False,
),
(
{
"upload_to_pypi": False,
"upload_to_release": True,
"build_command": "python setup.py build",
},
True,
),
(
{
"upload_to_pypi": True,
"upload_to_release": False,
"build_command": "python setup.py build",
},
True,
),
(
{
"upload_to_pypi": False,
"upload_to_release": False,
"build_command": "python setup.py build",
},
False,
),
],
)
def test_should_build(config, expected, mocker):
mocker.patch("semantic_release.cli.config.get", lambda key: config.get(key))
assert should_build() == expected
@pytest.mark.parametrize(
"config,expected",
[
(
{
"upload_to_pypi": True,
"upload_to_release": True,
"build_command": "python setup.py build",
"remove_dist": True,
},
True,
),
(
{
"upload_to_pypi": True,
"upload_to_release": True,
"build_command": "python setup.py build",
"remove_dist": False,
},
False,
),
(
{
"upload_to_pypi": False,
"upload_to_release": False,
"build_command": False,
"remove_dist": True,
},
False,
),
],
)
def test_should_remove_dist(config, expected, mocker):
mocker.patch("semantic_release.cli.config.get", lambda key: config.get(key))
assert should_remove_dist() == expected
def test_remove_dists(mocker):
mock_rmtree = mocker.patch("semantic_release.dist.shutil.rmtree")
remove_dists('somepath')
mock_rmtree.assert_called_once_with('somepath', ignore_errors=True)
| 28.309091
| 93
| 0.51413
|
e625500a33c42da6be01e41bc75f50a774228ecc
| 900
|
py
|
Python
|
libraries/botbuilder-azure/botbuilder/azure/__init__.py
|
Fl4v/botbuilder-python
|
4003d713beb8fb986a01cfd11632eabc65858618
|
[
"MIT"
] | 388
|
2019-05-07T15:53:21.000Z
|
2022-03-28T20:29:46.000Z
|
libraries/botbuilder-azure/botbuilder/azure/__init__.py
|
Fl4v/botbuilder-python
|
4003d713beb8fb986a01cfd11632eabc65858618
|
[
"MIT"
] | 1,286
|
2019-05-07T23:38:19.000Z
|
2022-03-31T10:44:16.000Z
|
libraries/botbuilder-azure/botbuilder/azure/__init__.py
|
Fl4v/botbuilder-python
|
4003d713beb8fb986a01cfd11632eabc65858618
|
[
"MIT"
] | 168
|
2019-05-14T20:23:25.000Z
|
2022-03-16T06:49:14.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from .about import __version__
from .azure_queue_storage import AzureQueueStorage
from .cosmosdb_storage import CosmosDbStorage, CosmosDbConfig, CosmosDbKeyEscape
from .cosmosdb_partitioned_storage import (
CosmosDbPartitionedStorage,
CosmosDbPartitionedConfig,
)
from .blob_storage import BlobStorage, BlobStorageSettings
__all__ = [
"AzureQueueStorage",
"BlobStorage",
"BlobStorageSettings",
"CosmosDbStorage",
"CosmosDbConfig",
"CosmosDbKeyEscape",
"CosmosDbPartitionedStorage",
"CosmosDbPartitionedConfig",
"__version__",
]
| 32.142857
| 80
| 0.644444
|
0914ebbb45a3088d755a3b310dc03568b57a7d8e
| 23,589
|
py
|
Python
|
datasets/common_voice/common_voice.py
|
yazdanbakhsh/datasets
|
c81e353cffa5a631847bd9cdc106806ec88fe6fe
|
[
"Apache-2.0"
] | null | null | null |
datasets/common_voice/common_voice.py
|
yazdanbakhsh/datasets
|
c81e353cffa5a631847bd9cdc106806ec88fe6fe
|
[
"Apache-2.0"
] | null | null | null |
datasets/common_voice/common_voice.py
|
yazdanbakhsh/datasets
|
c81e353cffa5a631847bd9cdc106806ec88fe6fe
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Common Voice Dataset"""
import datasets
from datasets.tasks import AutomaticSpeechRecognition
_DATA_URL = "https://voice-prod-bundler-ee1969a6ce8178826482b88e843c335139bd3fb4.s3.amazonaws.com/cv-corpus-6.1-2020-12-11/{}.tar.gz"
_CITATION = """\
@inproceedings{commonvoice:2020,
author = {Ardila, R. and Branson, M. and Davis, K. and Henretty, M. and Kohler, M. and Meyer, J. and Morais, R. and Saunders, L. and Tyers, F. M. and Weber, G.},
title = {Common Voice: A Massively-Multilingual Speech Corpus},
booktitle = {Proceedings of the 12th Conference on Language Resources and Evaluation (LREC 2020)},
pages = {4211--4215},
year = 2020
}
"""
_DESCRIPTION = """\
Common Voice is Mozilla's initiative to help teach machines how real people speak.
The dataset currently consists of 7,335 validated hours of speech in 60 languages, but we’re always adding more voices and languages.
"""
_HOMEPAGE = "https://commonvoice.mozilla.org/en/datasets"
_LICENSE = "https://github.com/common-voice/common-voice/blob/main/LICENSE"
_LANGUAGES = {
"ab": {
"Language": "Abkhaz",
"Date": "2020-12-11",
"Size": "39 MB",
"Version": "ab_1h_2020-12-11",
"Validated_Hr_Total": 0.05,
"Overall_Hr_Total": 1,
"Number_Of_Voice": 14,
},
"ar": {
"Language": "Arabic",
"Date": "2020-12-11",
"Size": "2 GB",
"Version": "ar_77h_2020-12-11",
"Validated_Hr_Total": 49,
"Overall_Hr_Total": 77,
"Number_Of_Voice": 672,
},
"as": {
"Language": "Assamese",
"Date": "2020-12-11",
"Size": "21 MB",
"Version": "as_0.78h_2020-12-11",
"Validated_Hr_Total": 0.74,
"Overall_Hr_Total": 0.78,
"Number_Of_Voice": 17,
},
"br": {
"Language": "Breton",
"Date": "2020-12-11",
"Size": "444 MB",
"Version": "br_16h_2020-12-11",
"Validated_Hr_Total": 7,
"Overall_Hr_Total": 16,
"Number_Of_Voice": 157,
},
"ca": {
"Language": "Catalan",
"Date": "2020-12-11",
"Size": "19 GB",
"Version": "ca_748h_2020-12-11",
"Validated_Hr_Total": 623,
"Overall_Hr_Total": 748,
"Number_Of_Voice": 5376,
},
"cnh": {
"Language": "Hakha Chin",
"Date": "2020-12-11",
"Size": "39 MB",
"Version": "ab_1h_2020-12-11",
"Validated_Hr_Total": 0.05,
"Overall_Hr_Total": 1,
"Number_Of_Voice": 14,
},
"cs": {
"Language": "Czech",
"Date": "2020-12-11",
"Size": "39 MB",
"Version": "ab_1h_2020-12-11",
"Validated_Hr_Total": 0.05,
"Overall_Hr_Total": 1,
"Number_Of_Voice": 14,
},
"cv": {
"Language": "Chuvash",
"Date": "2020-12-11",
"Size": "419 MB",
"Version": "cv_16h_2020-12-11",
"Validated_Hr_Total": 4,
"Overall_Hr_Total": 16,
"Number_Of_Voice": 92,
},
"cy": {
"Language": "Welsh",
"Date": "2020-12-11",
"Size": "3 GB",
"Version": "cy_124h_2020-12-11",
"Validated_Hr_Total": 95,
"Overall_Hr_Total": 124,
"Number_Of_Voice": 1382,
},
"de": {
"Language": "German",
"Date": "2020-12-11",
"Size": "22 GB",
"Version": "de_836h_2020-12-11",
"Validated_Hr_Total": 777,
"Overall_Hr_Total": 836,
"Number_Of_Voice": 12659,
},
"dv": {
"Language": "Dhivehi",
"Date": "2020-12-11",
"Size": "515 MB",
"Version": "dv_19h_2020-12-11",
"Validated_Hr_Total": 18,
"Overall_Hr_Total": 19,
"Number_Of_Voice": 167,
},
"el": {
"Language": "Greek",
"Date": "2020-12-11",
"Size": "364 MB",
"Version": "el_13h_2020-12-11",
"Validated_Hr_Total": 6,
"Overall_Hr_Total": 13,
"Number_Of_Voice": 118,
},
"en": {
"Language": "English",
"Date": "2020-12-11",
"Size": "56 GB",
"Version": "en_2181h_2020-12-11",
"Validated_Hr_Total": 1686,
"Overall_Hr_Total": 2181,
"Number_Of_Voice": 66173,
},
"eo": {
"Language": "Esperanto",
"Date": "2020-12-11",
"Size": "3 GB",
"Version": "eo_102h_2020-12-11",
"Validated_Hr_Total": 90,
"Overall_Hr_Total": 102,
"Number_Of_Voice": 574,
},
"es": {
"Language": "Spanish",
"Date": "2020-12-11",
"Size": "15 GB",
"Version": "es_579h_2020-12-11",
"Validated_Hr_Total": 324,
"Overall_Hr_Total": 579,
"Number_Of_Voice": 19484,
},
"et": {
"Language": "Estonian",
"Date": "2020-12-11",
"Size": "732 MB",
"Version": "et_27h_2020-12-11",
"Validated_Hr_Total": 19,
"Overall_Hr_Total": 27,
"Number_Of_Voice": 543,
},
"eu": {
"Language": "Basque",
"Date": "2020-12-11",
"Size": "3 GB",
"Version": "eu_131h_2020-12-11",
"Validated_Hr_Total": 89,
"Overall_Hr_Total": 131,
"Number_Of_Voice": 1028,
},
"fa": {
"Language": "Persian",
"Date": "2020-12-11",
"Size": "8 GB",
"Version": "fa_321h_2020-12-11",
"Validated_Hr_Total": 282,
"Overall_Hr_Total": 321,
"Number_Of_Voice": 3655,
},
"fi": {
"Language": "Finnish",
"Date": "2020-12-11",
"Size": "48 MB",
"Version": "fi_1h_2020-12-11",
"Validated_Hr_Total": 1,
"Overall_Hr_Total": 1,
"Number_Of_Voice": 27,
},
"fr": {
"Language": "French",
"Date": "2020-12-11",
"Size": "18 GB",
"Version": "fr_682h_2020-12-11",
"Validated_Hr_Total": 623,
"Overall_Hr_Total": 682,
"Number_Of_Voice": 12953,
},
"fy-NL": {
"Language": "Frisian",
"Date": "2020-12-11",
"Size": "1 GB",
"Version": "fy-NL_46h_2020-12-11",
"Validated_Hr_Total": 14,
"Overall_Hr_Total": 46,
"Number_Of_Voice": 467,
},
"ga-IE": {
"Language": "Irish",
"Date": "2020-12-11",
"Size": "149 MB",
"Version": "ga-IE_5h_2020-12-11",
"Validated_Hr_Total": 3,
"Overall_Hr_Total": 5,
"Number_Of_Voice": 101,
},
"hi": {
"Language": "Hindi",
"Date": "2020-12-11",
"Size": "20 MB",
"Version": "hi_0.8h_2020-12-11",
"Validated_Hr_Total": 0.54,
"Overall_Hr_Total": 0.8,
"Number_Of_Voice": 31,
},
"hsb": {
"Language": "Sorbian, Upper",
"Date": "2020-12-11",
"Size": "76 MB",
"Version": "hsb_2h_2020-12-11",
"Validated_Hr_Total": 2,
"Overall_Hr_Total": 2,
"Number_Of_Voice": 19,
},
"hu": {
"Language": "Hungarian",
"Date": "2020-12-11",
"Size": "232 MB",
"Version": "hu_8h_2020-12-11",
"Validated_Hr_Total": 8,
"Overall_Hr_Total": 8,
"Number_Of_Voice": 47,
},
"ia": {
"Language": "InterLinguia",
"Date": "2020-12-11",
"Size": "216 MB",
"Version": "ia_8h_2020-12-11",
"Validated_Hr_Total": 6,
"Overall_Hr_Total": 8,
"Number_Of_Voice": 36,
},
"id": {
"Language": "Indonesian",
"Date": "2020-12-11",
"Size": "454 MB",
"Version": "id_17h_2020-12-11",
"Validated_Hr_Total": 9,
"Overall_Hr_Total": 17,
"Number_Of_Voice": 219,
},
"it": {
"Language": "Italian",
"Date": "2020-12-11",
"Size": "5 GB",
"Version": "it_199h_2020-12-11",
"Validated_Hr_Total": 158,
"Overall_Hr_Total": 199,
"Number_Of_Voice": 5729,
},
"ja": {
"Language": "Japanese",
"Date": "2020-12-11",
"Size": "146 MB",
"Version": "ja_5h_2020-12-11",
"Validated_Hr_Total": 3,
"Overall_Hr_Total": 5,
"Number_Of_Voice": 235,
},
"ka": {
"Language": "Georgian",
"Date": "2020-12-11",
"Size": "99 MB",
"Version": "ka_3h_2020-12-11",
"Validated_Hr_Total": 3,
"Overall_Hr_Total": 3,
"Number_Of_Voice": 44,
},
"kab": {
"Language": "Kabyle",
"Date": "2020-12-11",
"Size": "16 GB",
"Version": "kab_622h_2020-12-11",
"Validated_Hr_Total": 525,
"Overall_Hr_Total": 622,
"Number_Of_Voice": 1309,
},
"ky": {
"Language": "Kyrgyz",
"Date": "2020-12-11",
"Size": "553 MB",
"Version": "ky_22h_2020-12-11",
"Validated_Hr_Total": 11,
"Overall_Hr_Total": 22,
"Number_Of_Voice": 134,
},
"lg": {
"Language": "Luganda",
"Date": "2020-12-11",
"Size": "199 MB",
"Version": "lg_8h_2020-12-11",
"Validated_Hr_Total": 3,
"Overall_Hr_Total": 8,
"Number_Of_Voice": 76,
},
"lt": {
"Language": "Lithuanian",
"Date": "2020-12-11",
"Size": "129 MB",
"Version": "lt_4h_2020-12-11",
"Validated_Hr_Total": 2,
"Overall_Hr_Total": 4,
"Number_Of_Voice": 30,
},
"lv": {
"Language": "Latvian",
"Date": "2020-12-11",
"Size": "199 MB",
"Version": "lv_7h_2020-12-11",
"Validated_Hr_Total": 6,
"Overall_Hr_Total": 7,
"Number_Of_Voice": 99,
},
"mn": {
"Language": "Mongolian",
"Date": "2020-12-11",
"Size": "464 MB",
"Version": "mn_17h_2020-12-11",
"Validated_Hr_Total": 11,
"Overall_Hr_Total": 17,
"Number_Of_Voice": 376,
},
"mt": {
"Language": "Maltese",
"Date": "2020-12-11",
"Size": "405 MB",
"Version": "mt_15h_2020-12-11",
"Validated_Hr_Total": 7,
"Overall_Hr_Total": 15,
"Number_Of_Voice": 171,
},
"nl": {
"Language": "Dutch",
"Date": "2020-12-11",
"Size": "2 GB",
"Version": "nl_63h_2020-12-11",
"Validated_Hr_Total": 59,
"Overall_Hr_Total": 63,
"Number_Of_Voice": 1012,
},
"or": {
"Language": "Odia",
"Date": "2020-12-11",
"Size": "190 MB",
"Version": "or_7h_2020-12-11",
"Validated_Hr_Total": 0.87,
"Overall_Hr_Total": 7,
"Number_Of_Voice": 34,
},
"pa-IN": {
"Language": "Punjabi",
"Date": "2020-12-11",
"Size": "67 MB",
"Version": "pa-IN_2h_2020-12-11",
"Validated_Hr_Total": 0.5,
"Overall_Hr_Total": 2,
"Number_Of_Voice": 26,
},
"pl": {
"Language": "Polish",
"Date": "2020-12-11",
"Size": "3 GB",
"Version": "pl_129h_2020-12-11",
"Validated_Hr_Total": 108,
"Overall_Hr_Total": 129,
"Number_Of_Voice": 2647,
},
"pt": {
"Language": "Portuguese",
"Date": "2020-12-11",
"Size": "2 GB",
"Version": "pt_63h_2020-12-11",
"Validated_Hr_Total": 50,
"Overall_Hr_Total": 63,
"Number_Of_Voice": 1120,
},
"rm-sursilv": {
"Language": "Romansh Sursilvan",
"Date": "2020-12-11",
"Size": "263 MB",
"Version": "rm-sursilv_9h_2020-12-11",
"Validated_Hr_Total": 5,
"Overall_Hr_Total": 9,
"Number_Of_Voice": 78,
},
"rm-vallader": {
"Language": "Romansh Vallader",
"Date": "2020-12-11",
"Size": "103 MB",
"Version": "rm-vallader_3h_2020-12-11",
"Validated_Hr_Total": 2,
"Overall_Hr_Total": 3,
"Number_Of_Voice": 39,
},
"ro": {
"Language": "Romanian",
"Date": "2020-12-11",
"Size": "250 MB",
"Version": "ro_9h_2020-12-11",
"Validated_Hr_Total": 6,
"Overall_Hr_Total": 9,
"Number_Of_Voice": 130,
},
"ru": {
"Language": "Russian",
"Date": "2020-12-11",
"Size": "3 GB",
"Version": "ru_130h_2020-12-11",
"Validated_Hr_Total": 111,
"Overall_Hr_Total": 130,
"Number_Of_Voice": 1412,
},
"rw": {
"Language": "Kinyarwanda",
"Date": "2020-12-11",
"Size": "40 GB",
"Version": "rw_1510h_2020-12-11",
"Validated_Hr_Total": 1183,
"Overall_Hr_Total": 1510,
"Number_Of_Voice": 410,
},
"sah": {
"Language": "Sakha",
"Date": "2020-12-11",
"Size": "173 MB",
"Version": "sah_6h_2020-12-11",
"Validated_Hr_Total": 4,
"Overall_Hr_Total": 6,
"Number_Of_Voice": 42,
},
"sl": {
"Language": "Slovenian",
"Date": "2020-12-11",
"Size": "212 MB",
"Version": "sl_7h_2020-12-11",
"Validated_Hr_Total": 5,
"Overall_Hr_Total": 7,
"Number_Of_Voice": 82,
},
"sv-SE": {
"Language": "Swedish",
"Date": "2020-12-11",
"Size": "402 MB",
"Version": "sv-SE_15h_2020-12-11",
"Validated_Hr_Total": 12,
"Overall_Hr_Total": 15,
"Number_Of_Voice": 222,
},
"ta": {
"Language": "Tamil",
"Date": "2020-12-11",
"Size": "648 MB",
"Version": "ta_24h_2020-12-11",
"Validated_Hr_Total": 14,
"Overall_Hr_Total": 24,
"Number_Of_Voice": 266,
},
"th": {
"Language": "Thai",
"Date": "2020-12-11",
"Size": "325 MB",
"Version": "th_12h_2020-12-11",
"Validated_Hr_Total": 8,
"Overall_Hr_Total": 12,
"Number_Of_Voice": 182,
},
"tr": {
"Language": "Turkish",
"Date": "2020-12-11",
"Size": "592 MB",
"Version": "tr_22h_2020-12-11",
"Validated_Hr_Total": 20,
"Overall_Hr_Total": 22,
"Number_Of_Voice": 678,
},
"tt": {
"Language": "Tatar",
"Date": "2020-12-11",
"Size": "741 MB",
"Version": "tt_28h_2020-12-11",
"Validated_Hr_Total": 26,
"Overall_Hr_Total": 28,
"Number_Of_Voice": 185,
},
"uk": {
"Language": "Ukrainian",
"Date": "2020-12-11",
"Size": "1 GB",
"Version": "uk_43h_2020-12-11",
"Validated_Hr_Total": 30,
"Overall_Hr_Total": 43,
"Number_Of_Voice": 459,
},
"vi": {
"Language": "Vietnamese",
"Date": "2020-12-11",
"Size": "50 MB",
"Version": "vi_1h_2020-12-11",
"Validated_Hr_Total": 0.74,
"Overall_Hr_Total": 1,
"Number_Of_Voice": 62,
},
"vot": {
"Language": "Votic",
"Date": "2020-12-11",
"Size": "7 MB",
"Version": "vot_0.28h_2020-12-11",
"Validated_Hr_Total": 0,
"Overall_Hr_Total": 0.28,
"Number_Of_Voice": 3,
},
"zh-CN": {
"Language": "Chinese (China)",
"Date": "2020-12-11",
"Size": "2 GB",
"Version": "zh-CN_78h_2020-12-11",
"Validated_Hr_Total": 56,
"Overall_Hr_Total": 78,
"Number_Of_Voice": 3501,
},
"zh-HK": {
"Language": "Chinese (Hong Kong)",
"Date": "2020-12-11",
"Size": "3 GB",
"Version": "zh-HK_100h_2020-12-11",
"Validated_Hr_Total": 50,
"Overall_Hr_Total": 100,
"Number_Of_Voice": 2536,
},
"zh-TW": {
"Language": "Chinese (Taiwan)",
"Date": "2020-12-11",
"Size": "2 GB",
"Version": "zh-TW_78h_2020-12-11",
"Validated_Hr_Total": 55,
"Overall_Hr_Total": 78,
"Number_Of_Voice": 1444,
},
}
class CommonVoiceConfig(datasets.BuilderConfig):
"""BuilderConfig for CommonVoice."""
def __init__(self, name, sub_version, **kwargs):
"""
Args:
data_dir: `string`, the path to the folder containing the files in the
downloaded .tar
citation: `string`, citation for the data set
url: `string`, url for information about the data set
**kwargs: keyword arguments forwarded to super.
"""
self.sub_version = sub_version
self.language = kwargs.pop("language", None)
self.date_of_snapshot = kwargs.pop("date", None)
self.size = kwargs.pop("size", None)
self.validated_hr_total = kwargs.pop("val_hrs", None)
self.total_hr_total = kwargs.pop("total_hrs", None)
self.num_of_voice = kwargs.pop("num_of_voice", None)
description = f"Common Voice speech to text dataset in {self.language} version {self.sub_version} of {self.date_of_snapshot}. The dataset comprises {self.validated_hr_total} of validated transcribed speech data from {self.num_of_voice} speakers. The dataset has a size of {self.size}"
super(CommonVoiceConfig, self).__init__(
name=name, version=datasets.Version("6.1.0", ""), description=description, **kwargs
)
class CommonVoice(datasets.GeneratorBasedBuilder):
DEFAULT_WRITER_BATCH_SIZE = 1000
BUILDER_CONFIGS = [
CommonVoiceConfig(
name=lang_id,
language=_LANGUAGES[lang_id]["Language"],
sub_version=_LANGUAGES[lang_id]["Version"],
date=_LANGUAGES[lang_id]["Date"],
size=_LANGUAGES[lang_id]["Size"],
val_hrs=_LANGUAGES[lang_id]["Validated_Hr_Total"],
total_hrs=_LANGUAGES[lang_id]["Overall_Hr_Total"],
num_of_voice=_LANGUAGES[lang_id]["Number_Of_Voice"],
)
for lang_id in _LANGUAGES.keys()
]
def _info(self):
features = datasets.Features(
{
"client_id": datasets.Value("string"),
"path": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=48_000),
"sentence": datasets.Value("string"),
"up_votes": datasets.Value("int64"),
"down_votes": datasets.Value("int64"),
"age": datasets.Value("string"),
"gender": datasets.Value("string"),
"accent": datasets.Value("string"),
"locale": datasets.Value("string"),
"segment": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
task_templates=[
AutomaticSpeechRecognition(audio_file_path_column="path", transcription_column="sentence")
],
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
archive = dl_manager.download(_DATA_URL.format(self.config.name))
path_to_data = "/".join(["cv-corpus-6.1-2020-12-11", self.config.name])
path_to_clips = "/".join([path_to_data, "clips"])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"files": dl_manager.iter_archive(archive),
"filepath": "/".join([path_to_data, "train.tsv"]),
"path_to_clips": path_to_clips,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"files": dl_manager.iter_archive(archive),
"filepath": "/".join([path_to_data, "test.tsv"]),
"path_to_clips": path_to_clips,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"files": dl_manager.iter_archive(archive),
"filepath": "/".join([path_to_data, "dev.tsv"]),
"path_to_clips": path_to_clips,
},
),
datasets.SplitGenerator(
name="other",
gen_kwargs={
"files": dl_manager.iter_archive(archive),
"filepath": "/".join([path_to_data, "other.tsv"]),
"path_to_clips": path_to_clips,
},
),
datasets.SplitGenerator(
name="validated",
gen_kwargs={
"files": dl_manager.iter_archive(archive),
"filepath": "/".join([path_to_data, "validated.tsv"]),
"path_to_clips": path_to_clips,
},
),
datasets.SplitGenerator(
name="invalidated",
gen_kwargs={
"files": dl_manager.iter_archive(archive),
"filepath": "/".join([path_to_data, "invalidated.tsv"]),
"path_to_clips": path_to_clips,
},
),
]
def _generate_examples(self, files, filepath, path_to_clips):
"""Yields examples."""
data_fields = list(self._info().features.keys())
# audio is not a header of the csv files
data_fields.remove("audio")
path_idx = data_fields.index("path")
all_field_values = {}
metadata_found = False
for path, f in files:
if path == filepath:
metadata_found = True
lines = f.readlines()
headline = lines[0].decode("utf-8")
column_names = headline.strip().split("\t")
assert (
column_names == data_fields
), f"The file should have {data_fields} as column names, but has {column_names}"
for line in lines[1:]:
field_values = line.decode("utf-8").strip().split("\t")
# set full path for mp3 audio file
audio_path = "/".join([path_to_clips, field_values[path_idx]])
all_field_values[audio_path] = field_values
elif path.startswith(path_to_clips):
assert metadata_found, "Found audio clips before the metadata TSV file."
if not all_field_values:
break
if path in all_field_values:
field_values = all_field_values[path]
# if data is incomplete, fill with empty values
if len(field_values) < len(data_fields):
field_values += (len(data_fields) - len(field_values)) * ["''"]
result = {key: value for key, value in zip(data_fields, field_values)}
# set audio feature
result["audio"] = {"path": path, "bytes": f.read()}
yield path, result
| 31.161162
| 292
| 0.514604
|
517047f73c4cf8c74006afd0adac0bc9ff5c86e4
| 2,930
|
py
|
Python
|
synthesis/evaluation/visual.py
|
myrthewouters/synthetic_data_generation
|
b64189fcb8cc4fbba9f307dcfd7d8301e7a09da1
|
[
"MIT"
] | null | null | null |
synthesis/evaluation/visual.py
|
myrthewouters/synthetic_data_generation
|
b64189fcb8cc4fbba9f307dcfd7d8301e7a09da1
|
[
"MIT"
] | null | null | null |
synthesis/evaluation/visual.py
|
myrthewouters/synthetic_data_generation
|
b64189fcb8cc4fbba9f307dcfd7d8301e7a09da1
|
[
"MIT"
] | null | null | null |
"""Module with functions for visualizing the difference between datasets"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from lifelines import KaplanMeierFitter
from synthesis.evaluation import metrics
def plot_feature_distances(x1, x2, labels=None):
x1 = x1.copy().astype(str).replace('\.0', '', regex=True)
x2 = x2.copy().astype(str).replace('\.0', '', regex=True)
if labels is None:
labels = ['x1', 'x2']
features, feature_distances = metrics.feature_distances(x1, x2)
y_pos = np.arange(len(features))
plt.barh(y_pos, feature_distances)
plt.yticks(y_pos, features)
plt.xlim(0, 1)
plt.xlabel('Feature distance')
plt.title('Distances per feature')
plt.tight_layout()
plt.show()
def compare_value_counts(x1, x2):
x1 = x1.copy().astype(str)
x2 = x2.copy().astype(str)
for c in x1.columns:
counts_X, counts_y = x1[c].value_counts(dropna=False).align(x2[c].value_counts(dropna=False), join='outer',
axis=0, fill_value=0)
df_compare = pd.concat([counts_X, counts_y], axis=1).astype(int)
df_compare.columns = ['x1', 'x2']
print('='*100)
print(c)
print(df_compare)
def plot_kmf_comparison(datasets, dataset_names, T_varname, E_varname, G_varname):
"""
Plot side-bys-side kaplan-meier of input datasets
Parameters
----------
datasets: list of input data
dataset_names: names of input data - note: len equal to datasets
T_varname: time variable name
E_varname: event variable name
G_varname: grouping variable name
Returns Kaplan-Meier plot of input datasets
-------
"""
if not isinstance(datasets, list):
datasets = [datasets]
if not isinstance(dataset_names, list):
dataset_names = [dataset_names]
assert len(datasets) == len(dataset_names), "input datasets and dataset_names are of different lengths"
figsize = (8 * len(datasets), 7)
fig, ax = plt.subplots(1, len(datasets), figsize=figsize, sharey=True)
sns.set(font_scale=1.5)
# sns.set_context('paper', rc={"lines.linewidth": 1.2})
sns.despine()
palette = ['#0d3d56', '#006887', '#0098b5', '#00cbde', '#00ffff']
for X, X_name, ax_cur in zip(datasets, dataset_names, ax):
T = X[T_varname].astype(float)
E = X[E_varname].astype(float)
kmf = KaplanMeierFitter()
unique_values = np.sort(X[G_varname].unique())
for g, color in zip(unique_values, palette):
mask = (X[G_varname] == g)
kmf.fit(T[mask], event_observed=E[mask], label=g)
ax_cur = kmf.plot(ax=ax_cur, color=color)
ax_cur.legend(title=G_varname)
ax_cur.set_title('Survival Analysis C50 - {} Data'.format(X_name))
ax_cur.set_ylim(0, 1)
plt.tight_layout()
| 33.295455
| 115
| 0.63413
|
2780bdefaa1b43d4f36384587543524302924bd3
| 5,957
|
py
|
Python
|
src/global_capitalization_system.py
|
phseiff/gender-render
|
51db02193b46b38284290c4f5bde500208bf1605
|
[
"MIT"
] | 24
|
2021-01-08T00:32:50.000Z
|
2022-03-06T11:20:26.000Z
|
src/global_capitalization_system.py
|
phseiff/gender-render
|
51db02193b46b38284290c4f5bde500208bf1605
|
[
"MIT"
] | 6
|
2021-01-08T00:19:25.000Z
|
2021-03-22T16:27:40.000Z
|
src/global_capitalization_system.py
|
phseiff/gender-render
|
51db02193b46b38284290c4f5bde500208bf1605
|
[
"MIT"
] | null | null | null |
"""
Implementation of the *global capitalization system* (a concept explained in-depth in the specification), which
analyses and understands capitalization of a tag's context value, stores it into a special section of the tag, and
re-applies said capitalization to the value the tag is rendered to.
The types of capitalization are as follows:
name | example
----------------|--------
lower-case | foobar
capitalized | Foobar
all-caps | FOOBAR
studly-caps | FoObAR
alt-studly-caps | fOoBaR
"""
from collections import namedtuple, OrderedDict
from typing import Dict, NamedTuple, Callable, Union, List
from . import errors
# Helper functions
# - these functions are tolerant versions of str.isupper() and str.islower(), in that they don't return False if there
# are no cased characters, and evaluate to True for empty strings.
def isupper(s: str) -> bool:
return all((not c.islower()) for c in s)
def islower(s: str) -> bool:
return all((not c.isupper()) for c in s)
# Define types for capitalization methods:
CapitalizationMethod = namedtuple("CapitalizationMethod", "is_applied apply")
CapitalizationMethodTable = Dict[str, NamedTuple("CapitalizationMethod", [
("apply", Callable[[str], str]),
("is_applied", Callable[[str], bool])
])]
# Define Capitalization methods:
# note that dicts are ordered in their insertion order starting with Python 3.6!
# the functions in this dict assume that `is_applied` is never used on "", since context values are never empty.
# `apply` does take this possibility into account, though.
CAPITALIZATION_TABLE: CapitalizationMethodTable = OrderedDict([
("lower-case", CapitalizationMethod(
apply=lambda s: s.lower(),
is_applied=lambda s: islower(s)
)),
("capitalized", CapitalizationMethod(
apply=lambda s: (s[0].upper() + s[1:].lower()) if len(s) > 0 else "",
is_applied=lambda s: isupper(s[0]) and islower(s[1:])
)),
("all-caps", CapitalizationMethod(
apply=lambda s: s.upper(),
is_applied=lambda s: isupper(s)
)),
("studly-caps", CapitalizationMethod(
apply=lambda s: "".join([(s[i].lower() if i % 2 else s[i].upper()) for i in range(len(s))]),
is_applied=lambda s: all((islower(s[i]) if i % 2 else isupper(s[i])) for i in range(len(s)))
)),
("alt-studly-caps", CapitalizationMethod(
apply=lambda s: "".join([(s[i].upper() if i % 2 else s[i].lower()) for i in range(len(s))]),
is_applied=lambda s: all((isupper(s[i]) if i % 2 else islower(s[i])) for i in range(len(s)))
)),
])
# Funktionen:
def get_capitalization_from_context_value(context_value: str) -> str:
"""Returns the capitalization type of a context value, and raises an error if it matches none."""
for capitalization_type_name, capitalization_method in CAPITALIZATION_TABLE.items():
if capitalization_method.is_applied(context_value):
return capitalization_type_name
raise errors.InvalidCapitalizationError("A tag has the context value '" + context_value + "'.\n"
+ "This does not fit any allowed capitalization type.\n"
+ "Refer to the specification to learn how to use capitalization in tags.")
def assign_and_check_capitalization_value_of_tag(tag: Dict[str, Union[str, List[str]]])\
-> Dict[str, Union[str, List[str]]]:
"""Assigns a tag (with one context value) (given in the same format as the format they have in
`parse_templates.ParsedTemplateRefined`) a capitalization value if it does not have one yet, and raises an
`errors.InvalidCapitalizationError` should it find issues with the tag's capitalization value or its context value's
capitalization, and makes the tag's context value lower-case."""
# raise an error if capitalization value is specified, yet invalid:
if "capitalization" in tag and tag["capitalization"] not in CAPITALIZATION_TABLE:
raise errors.InvalidCapitalizationError("A tag has an explicitly specified capitalization value of '"
+ tag["capitalization"] + "'.\nThis is not a valid value.")
# raise an error if context value is capitalized, yet in an invalid way:
capitalization_of_context_value = get_capitalization_from_context_value(tag["context"])
# raise an error if the capitalization value is specified, plus implied using semantic sugar:
if capitalization_of_context_value != "lower-case" and "capitalization" in tag:
raise errors.InvalidCapitalizationError("A tag explicitly specified its capitalization value as '"
+ tag["capitalization"] + "', but has already has capitalization in"
+ " its context value '" + tag["context"] + "'.")
# assign the tag a capitalization value derived from semantic sugar if it doesn't have one already:
if "capitalization" not in tag:
tag["capitalization"] = capitalization_of_context_value
# make context value lower-case:
tag["context"] = tag["context"].lower()
return tag # <-- technically not necessary since this function is supposed to run in-place, but it eases testing.
def apply_capitalization_to_tag(tag: Dict[str, Union[str, List[str]]]) -> str:
"""Applies capitalization to tag's context value (which should store the rendered version of the tag) in accordance
to the tag's capitalization value, and returns the correctly capitalized finished context value.
This is supposed to be called during the rendering process when the tag has its rendered value, minus proper
capitalization, already stored in its context value (a design decision that isn't made by the spec, but by this
implementation since it comes in handy)."""
return CAPITALIZATION_TABLE[tag["capitalization"]].apply(tag["context"])
| 47.277778
| 120
| 0.684573
|
00eee4b2d78240cbe6ebd532b7e145ecada1a6e4
| 1,147
|
py
|
Python
|
sites/engadget.py
|
callback-demons/callback_news_collectors
|
0361c0101c782c233cc1c4dd615fbe0697bee79b
|
[
"MIT"
] | 1
|
2020-06-13T23:07:46.000Z
|
2020-06-13T23:07:46.000Z
|
sites/engadget.py
|
callback-demons/callback_news_collectors
|
0361c0101c782c233cc1c4dd615fbe0697bee79b
|
[
"MIT"
] | 1
|
2021-03-31T19:49:58.000Z
|
2021-03-31T19:49:58.000Z
|
sites/engadget.py
|
callback-demons/callback-news-collectors
|
0361c0101c782c233cc1c4dd615fbe0697bee79b
|
[
"MIT"
] | null | null | null |
from html2text import html2text
import requests
from bs4 import BeautifulSoup
def extract(url):
print('Engadget extract {}'.format(url))
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html5lib')
article = soup.find('article', {'class': 'c-gray-1'})
content_blocks = article.findAll('div', {'class': 'article-text'})
contents = list()
for element in content_blocks:
p_tag = element.findAll('p')
for item in p_tag:
contents.append(item)
contents_blocks = ''.join(element.decode() for element in contents)
content = html2text(contents_blocks) + '\n\n*[Extracted from engadget](' + url + ' "source")*'
tag = article.find('div')
if tag.has_attr('id') and tag.attrs['id'] == 'page_body':
description = contents[0].get_text()
image = soup.find('img', {'class': 'stretch-img'})['src']
else:
description = article.find('div', {'class': 'mt-15'}).get_text()
image = article.find('img', {'class': 'stretch-img'})['src']
return {
'image': image,
'content': content,
'description': description
}
| 33.735294
| 98
| 0.614647
|
36a7420d739f96dd63bd0fd3bb16547a5c968fdc
| 6,656
|
py
|
Python
|
examples/script_eBGP.py
|
netcloudag/AciL3outModular
|
fb8e887bbed37e8458b4cad31eb4f8aebd256c2f
|
[
"MIT"
] | 1
|
2020-06-04T14:02:36.000Z
|
2020-06-04T14:02:36.000Z
|
examples/script_eBGP.py
|
netcloud/AciL3outModular
|
fb8e887bbed37e8458b4cad31eb4f8aebd256c2f
|
[
"MIT"
] | null | null | null |
examples/script_eBGP.py
|
netcloud/AciL3outModular
|
fb8e887bbed37e8458b4cad31eb4f8aebd256c2f
|
[
"MIT"
] | null | null | null |
# ==============================================================================
from L3Out import ModularL3Out
from configparser import ConfigParser
import os
import sys
import requests
import json
import openpyxl
import warnings
import urllib3
warnings.filterwarnings("ignore", category=DeprecationWarning)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# ==============================================================================
# ==============================================================================
# changeDir
# ==============================================================================
if os.path.dirname(sys.argv[0]) != "":
os.chdir(os.path.dirname(sys.argv[0]))
# ==============================================================================
# init ACI & login
# ==============================================================================
config = ConfigParser()
config.read("settings.conf")
apicIp = config.get("APIC", "ip")
apicUrl = 'https://' + apicIp + '/api/'
apicUser = config.get("APIC", "user")
apicPw = config.get("APIC", "password")
# create reqeuests session
session = requests.Session()
# create credentials structure
userPass = json.dumps({'aaaUser': {'attributes': {'name': apicUser, 'pwd': apicPw}}})
# login to API
response = session.post(apicUrl + 'aaaLogin.json', data=userPass, verify=False, timeout=10)
token = None
# Error Handling
if response.status_code == 401:
raise Exception('Unauthorized')
# Raise a exception for all other 4xx and 5xx status_codes
elif response.raise_for_status():
raise Exception('Error occured: ' + response.status_code)
else:
token = response.json()['imdata'][0]['aaaLogin']['attributes']['token']
# ==============================================================================
# postJson
# ==============================================================================
def postJson(jsonData, url='mo.json'):
response = session.post(apicUrl + url, verify=False, data=json.dumps(jsonData, sort_keys=True))
if response.raise_for_status:
if response.status_code == 200:
return response.status_code
elif response.status_code == 400:
return '400: ' + response.json()['imdata'][0]['error']['attributes']['text']
else:
return response.status_code
# ==============================================================================
# check excel-content
# ==============================================================================
def check_set(content):
if content != "None" and content != "" and content is not None:
return True
else:
return False
def get_excel_data():
xlsxFo = "L3OUT_eBGP.xlsx"
wbObj = openpyxl.load_workbook(xlsxFo, data_only=True)
sheet = wbObj.get_sheet_by_name("L3OUT")
L3Out = []
Keys = []
for row in range(2, sheet.max_row + 1):
if row == 2:
for col in range(1,sheet.max_column+1):
value = sheet.cell(row=row, column=col).value
if value != None: Keys.append(value)
# print(Keys)
else:
if check_set(str(sheet["A" + str(row)].value)):
line = {}
for x in range(len(Keys)):
line[Keys[x]]=str(sheet.cell(row=row, column=x+1).value)
L3Out.append(line)
return L3Out
def get_remote_bgp_ip(ip):
BGP_IP = ip.partition(("/")[0])[0].split(".")
BGP_IP[3] = int(BGP_IP[3]) - 1
return '.'.join(str(e) for e in BGP_IP)
if __name__ == '__main__':
Tenant = "ModularL3Out"
Contract = "CT-PERMIT-ALL"
Path1 = "topology/pod-1/node-111"
Paths1 = "topology/pod-1/paths-111/pathep-[eth1/50]"
Path2 = "topology/pod-1/node-112"
Paths2 = "topology/pod-1/paths-112/pathep-[eth1/50]"
Path3 = "topology/pod-1/node-211"
Paths3 = "topology/pod-1/paths-211/pathep-[eth1/50]"
Path4 = "topology/pod-1/node-212"
Paths4 = "topology/pod-1/paths-212/pathep-[eth1/50]"
Lo1 = "10.10.20.128"
Lo2 = "10.10.20.129"
Lo3 = "10.10.20.142"
Lo4 = "10.10.20.143"
BGP_PWD = "SecureTest"
AST1 = "65164"
LocalAS = "65162"
L3Out_Data = get_excel_data()
for item in L3Out_Data:
L3Out2Post = ModularL3Out.L3Out(item["NAME"], Tenant)
L3Out2Post.setl3domain(item["Dom"])
L3Out2Post.setVrf(item["VRF"])
L3Out2Post.setExternalEpg(item["Ext-EPG"].split(":")[0])
L3Out2Post.externalEpg().setConsumeContract(Contract)
L3Out2Post.externalEpg().setProvideContract(Contract)
L3Out2Post.externalEpg().setL3ExtSubnet()
L3Out2Post.setEnableBgp()
L3Out2Post.setNodeProfile(item["NP"])
L3Out2Post.nodeProfile().setNode(Path1, Lo1)
L3Out2Post.nodeProfile().setNode(Path2, Lo2)
L3Out2Post.nodeProfile().setNode(Path3, Lo3)
L3Out2Post.nodeProfile().setNode(Path4, Lo4)
L3Out2Post.nodeProfile().setInt(item["INT"])
L3Out2Post.nodeProfile().Int().setIntNode(item["TYP"], "vlan-" + item["VLAN1"], item["P-IP1"] + "/30", Paths1)
RemoteIP = get_remote_bgp_ip(item["P-IP1"])
L3Out2Post.nodeProfile().Int().intNode().setBgpPeer(RemoteIP, BGP_PWD)
L3Out2Post.nodeProfile().Int().intNode().bgpPeer().setBgpAS(AST1)
L3Out2Post.nodeProfile().Int().intNode().bgpPeer().setBgpLocalAS(LocalAS)
L3Out2Post.nodeProfile().Int().setIntNode(item["TYP"], "vlan-" + item["VLAN2"], item["P-IP2"] + "/30", Paths2)
RemoteIP = get_remote_bgp_ip(item["P-IP2"])
L3Out2Post.nodeProfile().Int().intNode().setBgpPeer(RemoteIP, BGP_PWD)
L3Out2Post.nodeProfile().Int().intNode().bgpPeer().setBgpAS(AST1)
L3Out2Post.nodeProfile().Int().setIntNode(item["TYP"], "vlan-" + item["VLAN3"], item["P-IP3"] + "/30", Paths3)
RemoteIP = get_remote_bgp_ip(item["P-IP3"])
L3Out2Post.nodeProfile().Int().intNode().setBgpPeer(RemoteIP, BGP_PWD)
L3Out2Post.nodeProfile().Int().intNode().bgpPeer().setBgpAS(AST1)
L3Out2Post.nodeProfile().Int().setIntNode(item["TYP"], "vlan-" + item["VLAN4"], item["P-IP4"] + "/30", Paths4)
RemoteIP = get_remote_bgp_ip(item["P-IP4"])
L3Out2Post.nodeProfile().Int().intNode().setBgpPeer(RemoteIP, BGP_PWD)
L3Out2Post.nodeProfile().Int().intNode().bgpPeer().setBgpAS(AST1)
L3Out2Post.nodeProfile().Int().intNode().bgpPeer().setBgpLocalAS(LocalAS)
print('Connecting To: ' + apicUrl + ' and posting: \n' + str(L3Out2Post.tostring()))
response = postJson(L3Out2Post.tostring())
print('Response-Code is: ' + str(response))
| 35.593583
| 118
| 0.565805
|
7fe798aa4817f8076bce09d62d9dac41e9a91ef3
| 1,665
|
py
|
Python
|
podcast_convert/config.py
|
MikeDacre/m4a_to_mp3_podcast
|
87071a2787d887e23e6f1602c21340128f35e33c
|
[
"MIT"
] | null | null | null |
podcast_convert/config.py
|
MikeDacre/m4a_to_mp3_podcast
|
87071a2787d887e23e6f1602c21340128f35e33c
|
[
"MIT"
] | null | null | null |
podcast_convert/config.py
|
MikeDacre/m4a_to_mp3_podcast
|
87071a2787d887e23e6f1602c21340128f35e33c
|
[
"MIT"
] | null | null | null |
"""
Get podcast config info.
Created: 2016-07-04 13:02
Last modified: 2016-02-04 13:45
"""
import os
try:
import configparser as cp
except ImportError:
import ConfigParsers as cp
REQUIRED_CONFIG = {'url': 'The URL of the initial feed',
'folder': 'The folder to write to'}
# Python2/3 user interaction
input = input if input else raw_input
def get_config(file):
"""Return a config parser class with config.
If file is empty, request defaults from user and write to file.
:file: The config file to use.
:returns: Dictionary of config params
"""
file = os.path.abspath(os.path.expanduser(file))
conf = cp.ConfigParser()
if os.path.isfile(file):
conf.read(file)
for i in REQUIRED_CONFIG:
if i not in conf['DEFAULT']:
j = input('{} is a required config parameter\n.'.format(i) +
'{}: '.format(REQUIRED_CONFIG[i]))
else:
for arg, help in REQUIRED_CONFIG.items():
conf['DEFAULT'][arg] = input(help + ': ')
conf['DEFAULT']['folder'] = os.path.abspath(
os.path.expanduser(conf['DEFAULT']['folder']))
if not os.path.exists(conf['DEFAULT']['folder']):
os.mkdir(conf['DEFAULT']['folder'])
if not os.path.isdir(conf['DEFAULT']['folder']):
raise ConfigError('{} is not a folder.'.format(
conf['DEFAULT']['folder']))
with open(file, 'w') as fout:
conf.write(fout)
config = {}
for key, value in conf['DEFAULT'].items():
config[key] = value
return config
class ConfigError(Exception):
"""Failure in the config."""
pass
| 28.220339
| 76
| 0.599399
|
f1d960654fbad3aef9c0fe266b23c8479ccc6b12
| 1,111
|
py
|
Python
|
test/test_variable_set_resource.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
test/test_variable_set_resource.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
test/test_variable_set_resource.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Octopus Server API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2019.6.7+Branch.tags-2019.6.7.Sha.aa18dc6809953218c66f57eff7d26481d9b23d6a
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import octopus_deploy_swagger_client
from octopus_deploy_swagger_client.models.variable_set_resource import VariableSetResource # noqa: E501
from octopus_deploy_swagger_client.rest import ApiException
class TestVariableSetResource(unittest.TestCase):
"""VariableSetResource unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testVariableSetResource(self):
"""Test VariableSetResource"""
# FIXME: construct object with mandatory attributes with example values
# model = octopus_deploy_swagger_client.models.variable_set_resource.VariableSetResource() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 27.097561
| 119
| 0.747975
|
962ff499f7459d5d003128c8a99b1bd7b17acc8e
| 14,318
|
py
|
Python
|
src/application/modules/cb_index.py
|
maskayman/cbprometheus_python
|
ff1163512eac0449055113a130a2a216630666be
|
[
"Apache-2.0"
] | 9
|
2019-10-15T22:46:34.000Z
|
2021-07-09T16:07:34.000Z
|
src/application/modules/cb_index.py
|
maskayman/cbprometheus_python
|
ff1163512eac0449055113a130a2a216630666be
|
[
"Apache-2.0"
] | 76
|
2019-10-08T17:14:24.000Z
|
2021-03-04T21:03:22.000Z
|
src/application/modules/cb_index.py
|
maskayman/cbprometheus_python
|
ff1163512eac0449055113a130a2a216630666be
|
[
"Apache-2.0"
] | 8
|
2019-09-27T04:05:49.000Z
|
2021-11-30T14:07:31.000Z
|
import sys
from application import application
if sys.version_info[0] == 3:
from .cb_utilities import *
from . import cb_cluster, cb_bucket
else:
from cb_utilities import *
import cb_cluster, cb_bucket
class view():
def __init__(self):
self.methods = ["GET"]
self.name = "indexes"
self.filters = [{"variable":"nodes","type":"default","name":"nodes_list","value":[]},
{"variable":"buckets","type":"default","name":"bucket_list","value":[]},
{"variable":"indexes","type":"default","name":"indexes_list","value":[]},
{"variable":"result_set","type":"int","name":"num_samples","value":60}]
self.comment = '''This is the method used to access FTS metrics'''
self.service_identifier = "index"
self.inputs = [{"value":"user"},
{"value":"passwrd"},
{"value":"cluster_values['serviceNodes']['{}']".format(self.service_identifier)},
{"value": "index_buckets"},
{"value":"cluster_values['clusterName']"},
{"value":"result_set"}]
self.exclude = False
def run(url="", user="", passwrd="", index=[], buckets=[], nodes=[], num_samples = 60, result_set=60):
'''Entry point for getting the metrics for the index nodes'''
url = check_cluster(url, user, passwrd)
metrics = []
cluster_values = cb_cluster._get_cluster(url, user, passwrd, [])
if num_samples != 60:
result_set = num_samples
if len(buckets) == 0:
buckets = cb_bucket._get_index_buckets(url, user, passwrd)
if len(nodes) == 0:
if len(cluster_values['serviceNodes']['index']) > 0 and len(buckets) > 0:
# get the index replica stats
index_replicas = _get_index_replica_counts(cluster_values['serviceNodes']['index'][0], user, passwrd, cluster_values['clusterName'])
metrics = metrics + index_replicas['metrics']
index_metrics = _get_metrics(
user,
passwrd,
cluster_values['serviceNodes']['index'],
buckets,
cluster_values['clusterName'],
result_set)
metrics = metrics + index_metrics['metrics']
else:
if len(buckets) > 0:
# get the index replica stats
index_replicas = _get_index_replica_counts(nodes[0], user, passwrd, cluster_values['clusterName'])
metrics = metrics + index_replicas['metrics']
index_metrics = _get_metrics(
user,
passwrd,
nodes,
buckets,
cluster_values['clusterName'],
result_set)
metrics = metrics + index_metrics['metrics']
return metrics
def _get_metrics(user, passwrd, nodes, buckets, cluster_name="", result_set=60):
'''Gets the metrics for the indexes nodes, then gets the metrics for each index'''
index_info = {}
index_info['metrics'] = []
auth = basic_authorization(user, passwrd)
sample_list = get_sample_list(result_set)
# get cluster index info
for node in nodes:
node_hostname = node.split(":")[0]
_index_url = "http://{}:8091/pools/default/buckets/@index/nodes/{}:8091/stats".format(
node_hostname, node_hostname)
try:
i_json = rest_request(auth, _index_url)
for record in i_json['op']['samples']:
samples_count = len(i_json['op']['samples'][record])
if record != "timestamp":
# if the sample list value is greater than the samples count, just use the last sample
if samples_count < sample_list[0]:
index_info['metrics'].append(
"{} {{cluster=\"{}\", node=\"{}\", "
"type=\"index-service\"}} {} {}".format(
record,
cluster_name,
node_hostname,
i_json['op']['samples'][record][samples_count - 1],
i_json['op']['samples']['timestamp'][samples_count - 1]
)
)
else:
for idx, datapoint in enumerate(i_json['op']['samples'][record]):
if idx in sample_list:
index_info['metrics'].append(
"{} {{cluster=\"{}\", node=\"{}\", "
"type=\"index-service\"}} {} {}".format(
record,
cluster_name,
node_hostname,
datapoint,
i_json['op']['samples']['timestamp'][idx]))
except Exception as e:
print("index base: " + str(e))
for node in nodes:
node_hostname = node.split(":")[0]
for bucket in buckets:
try:
index_info_url = "http://{}:8091/pools/default/buckets/@index-{}/" \
"nodes/{}:8091/stats".format(node_hostname,
bucket,
node_hostname)
ii_json = rest_request(auth, index_info_url)
for record in ii_json['op']['samples']:
name = ""
index_type = ""
try:
split_record = record.split("/")
samples_count = len(ii_json['op']['samples'][record])
if len(split_record) == 3:
name = (split_record[1]).replace("+", "_")
index_type = (split_record[2]).replace("+", "_")
if isinstance(ii_json['op']['samples'][record], type([])):
# if the sample list value is greater than the samples count, just use the last sample
if samples_count < sample_list[0]:
index_info['metrics'].append(
"{} {{cluster=\"{}\", node=\"{}\","
"index=\"{}\", "
"bucket=\"{}\", "
"type=\"index\"}} {} {}".format(
index_type,
cluster_name,
node_hostname,
name,
bucket,
ii_json['op']['samples'][record][samples_count - 1],
ii_json['op']['samples']['timestamp'][samples_count - 1]
)
)
else:
for idx, datapoint in enumerate(ii_json['op']['samples'][record]):
if idx in sample_list:
index_info['metrics'].append(
"{} {{cluster=\"{}\", node=\"{}\","
"index=\"{}\", "
"bucket=\"{}\", "
"type=\"index\"}} {} {}".format(
index_type,
cluster_name,
node_hostname,
name,
bucket,
datapoint,
ii_json['op']['samples']['timestamp'][idx]
)
)
else:
index_info['metrics'].append(
"{} {{cluster=\"{}\", node=\"{}\", "
"index=\"{}\", "
"bucket=\"{}\", "
"type=\"index\"}} {}".format(
index_type,
cluster_name,
node_hostname,
name,
bucket,
ii_json['op']['samples'][record]
)
)
elif len(split_record) == 2:
index_type = split_record[1]
if isinstance(ii_json['op']['samples'][record], type([])):
# if the sample list value is greater than the samples count, just use the last sample
if samples_count < sample_list[0]:
index_info['metrics'].append(
"{} {{cluster=\"{}\", node=\"{}\", "
"bucket=\"{}\", "
"type=\"index\"}} {} {}".format(
index_type,
cluster_name,
node_hostname,
bucket,
ii_json['op']['samples'][record][samples_count - 1],
ii_json['op']['samples']['timestamp'][samples_count - 1]
)
)
else:
for idx, datapoint in enumerate(ii_json['op']['samples'][record]):
if idx in sample_list:
index_info['metrics'].append(
"{} {{cluster=\"{}\", node=\"{}\", "
"bucket=\"{}\", "
"type=\"index\"}} {} {}".format(
index_type,
cluster_name,
node_hostname,
bucket,
datapoint,
ii_json['op']['samples']['timestamp'][idx]
)
)
else:
index_info['metrics'].append(
"{} {{cluster=\"{}\", node=\"{}\", "
"bucket=\"{}\", "
"type=\"index\"}} {}".format(
index_type,
cluster_name,
node_hostname,
bucket,
ii_json['op']['samples'][record]
)
)
else:
next
except Exception as e:
print("index specific: " + str(e))
except Exception as e:
print("index: " + str(e))
return index_info
def _get_index_replica_counts(url, user, passwrd, cluster_name=""):
'''Get a list of all the indexes and their replica counts'''
replica_info = {}
replica_info['metrics'] = []
auth = basic_authorization(user, passwrd)
try:
_url = "http://{}:8091/indexStatus".format(url.split(":")[0])
result = rest_request(auth, _url)
for _index in result['indexes']:
try:
num_replica = 0
# in CB6.5+ there is a stat called numReplica, use it
try:
num_replica = _index['numReplica']
except:
pass
# if it is an earlier version of CB we need to get the number of replicas from the index definition
try:
num_replica = _index['definition'].split('\"num_replica\":')[1].split(' ')[0]
if not num_replica.isdigit():
num_replica = 0
except:
pass
# only output the index_num_replica stat for non-replica indexes, if we're in cluster mode, or if we're in local mode
# and the local nodes Couchbase hostname is in the indexes hosts list
if ('(replica' not in _index['index']) and (application.config['CB_EXPORTER_MODE'] == "local" and url in _index['hosts']) or application.config['CB_EXPORTER_MODE'] == "cluster":
replica_info['metrics'].append(
"index_num_replica {{cluster=\"{}\", node=\"{}\","
"index=\"{}\", "
"bucket=\"{}\", "
"type=\"index\"}} {}".format(
cluster_name,
_index['hosts'][0].split(":")[0],
_index['index'],
_index['bucket'],
num_replica))
except Exception as e:
print("error: {}, {}".format(_index, str(e)))
except Exception as e:
print("indexReplicas: {}".format(str(e)))
return replica_info
| 50.77305
| 193
| 0.377846
|
f20941658fabc42dbc3c196dfd98f0aa3438d08a
| 19,511
|
py
|
Python
|
Imbalanced-Class/Santander/train.py
|
Twinparadox/DeepLearning
|
2746c22e987bb509eaa8257744f0d5248a1f1264
|
[
"MIT"
] | null | null | null |
Imbalanced-Class/Santander/train.py
|
Twinparadox/DeepLearning
|
2746c22e987bb509eaa8257744f0d5248a1f1264
|
[
"MIT"
] | null | null | null |
Imbalanced-Class/Santander/train.py
|
Twinparadox/DeepLearning
|
2746c22e987bb509eaa8257744f0d5248a1f1264
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 28 19:14:22 2020
@author: nww73
"""
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
import gc
import os
warnings.filterwarnings('ignore')
from datetime import datetime
from sklearn.feature_selection import VarianceThreshold
from sklearn.model_selection import StratifiedKFold, RandomizedSearchCV
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import KFold, GridSearchCV
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.utils import class_weight, resample
from keras.models import Sequential
from keras.layers import Dense, Dropout, GaussianNoise, Conv1D
from keras.layers import LSTM
from keras.layers import Conv1D, MaxPooling1D, TimeDistributed
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Dense, Dropout, BatchNormalization
from keras.layers import Flatten, Reshape
from keras.layers import Embedding, Input
from keras.models import Sequential
from keras.models import load_model
from keras import optimizers
from keras.regularizers import L1L2
from keras.callbacks import ModelCheckpoint
from keras.utils import to_categorical, np_utils
from keras.callbacks import Callback, EarlyStopping
from keras.initializers import RandomUniform
import keras.backend as K
import tensorflow as tf
from xgboost import XGBClassifier
scaler = StandardScaler()
def plot_new_feature_distribution(df1, df2, label1, label2, features):
i = 0
sns.set_style('whitegrid')
plt.figure()
fig, ax = plt.subplots(2,4,figsize=(18,8))
for feature in features:
i += 1
plt.subplot(2,4,i)
sns.kdeplot(df1[feature], bw=0.5,label=label1)
sns.kdeplot(df2[feature], bw=0.5,label=label2)
plt.xlabel(feature, fontsize=11)
locs, labels = plt.xticks()
plt.tick_params(axis='x', which='major', labelsize=8)
plt.tick_params(axis='y', which='major', labelsize=8)
plt.show();
# define roc_callback, inspired by https://github.com/keras-team/keras/issues/6050#issuecomment-329996505
def auc_roc(y_true, y_pred):
# any tensorflow metric
value, update_op = tf.contrib.metrics.streaming_auc(y_pred, y_true)
# find all variables created for this metric
metric_vars = [i for i in tf.local_variables() if 'auc_roc' in i.name.split('/')[1]]
# Add metric variables to GLOBAL_VARIABLES collection.
# They will be initialized for new session.
for v in metric_vars:
tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, v)
# force to update metric values
with tf.control_dependencies([update_op]):
value = tf.identity(value)
return value
def resampling(df_train, ratio):
train_freq = df_train['target'].value_counts()
print(train_freq)
train_freq_mean = train_freq[1]
# Under & Over Sampling store_nbr
df_list = []
target_max = 2
multiple = ratio
for i in range(0, target_max):
df_list.append(df_train[df_train['target']==i])
for i in range(0, target_max):
if i==0:
df_list[i] = df_list[i].sample(n=int(train_freq_mean*multiple), random_state=123, replace=True)
else:
df_list[i] = df_list[i].sample(n=train_freq_mean, random_state=123, replace=True)
df_sampling_train = pd.concat(df_list)
train_freq = df_sampling_train['target'].value_counts()
return pd.DataFrame(df_sampling_train)
def DNN(train, test):
X_train = train.drop(['ID_code', 'target'], axis=1)
gc.collect()
X_columns = X_train.columns
Y_columns = ['target']
Y_train = train['target']
X_train, X_valid, Y_train, Y_valid = train_test_split(X_train, Y_train,
test_size=0.10, stratify=Y_train,
random_state=123, shuffle=True)
X_train = pd.DataFrame(X_train, columns=X_columns)
Y_train = pd.DataFrame(Y_train, columns=Y_columns)
print(X_train.describe())
X_train = pd.concat([X_train, Y_train], axis=1)
print(X_train.describe())
#X_train = resampling(X_train, 2)
Y_train = X_train['target']
X_train = X_train.drop('target', axis=1)
print(Y_train.value_counts())
X_train = scaler.fit_transform(X_train)
X_valid = scaler.fit_transform(X_valid)
y_integers = Y_train
#print(y_integers)
class_weights = class_weight.compute_class_weight(None, np.unique(y_integers), y_integers)
print(class_weights)
d_class_weights = dict(enumerate(class_weights))
d_class_weights = {0:1.0, 1:1.0}
optimizer=optimizers.SGD(lr=0.01)
print(Y_train)
model=Sequential()
model.add(Dense(32, input_dim=X_train.shape[1], activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.5))
model.add(BatchNormalization())
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy', auc_roc])
model.summary()
history = model.fit(X_train, Y_train, batch_size=1000, epochs=100,
verbose=1, class_weight = d_class_weights,
validation_data=(X_valid, Y_valid), shuffle=True)
train_loss = history.history['loss']
val_loss = history.history['val_loss']
x_epochs = range(1, len(train_loss) + 1)
plt.plot(x_epochs, train_loss, 'b', label='Training loss')
plt.plot(x_epochs, val_loss, 'r', label='Validation loss')
plt.title('Loss')
plt.legend()
plt.show()
score = model.evaluate(X_valid, Y_valid, batch_size=1000)
print(score)
Y_pred = model.predict(X_valid)
Y_pred = np.argmax(Y_pred, axis=1)
Y_valid = np.argmax(Y_valid, axis=1)
print(confusion_matrix(Y_valid, Y_pred))
print(classification_report(Y_valid, Y_pred, labels=[0, 1]))
def CNN1D(train, test):
X_train = train.drop(['ID_code', 'target'], axis=1)
gc.collect()
X_columns = X_train.columns
Y_columns = ['target']
Y_train = train['target']
X_train, X_valid, Y_train, Y_valid = train_test_split(X_train, Y_train,
test_size=0.10, stratify=Y_train,
random_state=123, shuffle=True)
X_train = pd.DataFrame(X_train, columns=X_columns)
Y_train = pd.DataFrame(Y_train, columns=Y_columns)
print(X_train.describe())
X_train = pd.concat([X_train, Y_train], axis=1)
print(X_train.describe())
X_train = resampling(X_train, 1)
Y_train = X_train['target']
X_train = X_train.drop('target', axis=1)
print(Y_train.value_counts())
X_train = scaler.fit_transform(X_train)
X_valid = scaler.fit_transform(X_valid)
y_integers = Y_train
#print(y_integers)
class_weights = class_weight.compute_class_weight(None, np.unique(y_integers), y_integers)
print(class_weights)
d_class_weights = dict(enumerate(class_weights))
d_class_weights = {0:1.0, 1:1.0}
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)
X_valid = X_valid.reshape(X_valid.shape[0], X_valid.shape[1], 1)
#hyperparameters
input_dimension = 226
learning_rate = 0.0025
momentum = 0.85
hidden_initializer = RandomUniform(seed=123)
dropout_rate = 0.3
optimizer=optimizers.Adam()
# create model
model = Sequential()
model.add(Conv1D(nb_filter=32, filter_length=3, input_shape=X_train.shape[1:3], activation='relu'))
model.add(Conv1D(nb_filter=16, filter_length=1, activation='relu'))
model.add(Flatten())
model.add(Dropout(dropout_rate))
model.add(Dense(128, input_dim=input_dimension, kernel_initializer=hidden_initializer, activation='relu'))
model.add(Dense(64, kernel_initializer=hidden_initializer, activation='relu'))
model.add(Dropout(dropout_rate))
model.add(Dense(1, kernel_initializer=hidden_initializer, activation='sigmoid'))
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy', auc_roc])
model.summary()
history = model.fit(X_train, Y_train, batch_size=1000, epochs=10,
verbose=1, class_weight = d_class_weights,
validation_data=(X_valid, Y_valid), shuffle=True)
train_loss = history.history['loss']
val_loss = history.history['val_loss']
x_epochs = range(1, len(train_loss) + 1)
plt.plot(x_epochs, train_loss, 'b', label='Training loss')
plt.plot(x_epochs, val_loss, 'r', label='Validation loss')
plt.title('Loss')
plt.legend()
plt.show()
score = model.evaluate(X_valid, Y_valid, batch_size=100)
print(score)
Y_pred = model.predict(X_valid)
Y_pred = np.where(Y_pred > 0.5, 1, 0)
#Y_pred = np.argmax(Y_pred, axis=1).reshape(-1,1)
#Y_test = np.argmax(Y_test, axis=1).reshape(-1,1)
print(confusion_matrix(Y_valid, Y_pred))
print(classification_report(Y_valid, Y_pred, labels=[0, 1]))
ID_test = test['ID_code'].values
test = test.drop('ID_code', axis=1)
test = scaler.transform(test)
test = test.reshape(test.shape[0], test.shape[1], 1)
pred = model.predict(test)
result = pd.DataFrame({"ID_code": ID_test})
result["target"] = pred
result.to_csv("submission.csv", index=False)
def CNN2D(train, test):
X_train = train.drop(['ID_code', 'target'], axis=1)
gc.collect()
X_columns = X_train.columns
Y_columns = ['target']
Y_train = train['target']
X_train, X_valid, Y_train, Y_valid = train_test_split(X_train, Y_train,
test_size=0.10, stratify=Y_train,
random_state=123, shuffle=True)
X_train = pd.DataFrame(X_train, columns=X_columns)
Y_train = pd.DataFrame(Y_train, columns=Y_columns)
print(X_train.describe())
X_train = pd.concat([X_train, Y_train], axis=1)
print(X_train.describe())
X_train = resampling(X_train, 1)
Y_train = X_train['target']
X_train = X_train.drop('target', axis=1)
print(Y_train.value_counts())
X_train = scaler.fit_transform(X_train)
X_valid = scaler.fit_transform(X_valid)
y_integers = Y_train
#print(y_integers)
class_weights = class_weight.compute_class_weight(None, np.unique(y_integers), y_integers)
print(class_weights)
d_class_weights = dict(enumerate(class_weights))
d_class_weights = {0:1.0, 1:1.0}
X_train = X_train.reshape(X_train.shape[0], 104, 6, 1)
X_valid = X_valid.reshape(X_valid.shape[0], 104, 6, 1)
#hyperparameters
input_dimension = 226
learning_rate = 0.0025
momentum = 0.85
hidden_initializer = RandomUniform(seed=123)
dropout_rate = 0.3
kernel_size = (3, 3)
strides = (1, 1)
optimizer=optimizers.Adam()
# create model
model = Sequential()
model.add(Conv2D(nb_filter=32, kernel_size = kernel_size, strides = strides,
input_shape=X_train.shape[1:4], activation='relu'))
model.add(Conv2D(nb_filter=16, kernel_size = kernel_size, strides = strides,
activation='relu'))
model.add(Flatten())
model.add(Dropout(dropout_rate))
model.add(Dense(128, input_dim=input_dimension, kernel_initializer=hidden_initializer, activation='relu'))
model.add(Dense(64, kernel_initializer=hidden_initializer, activation='relu'))
model.add(Dropout(dropout_rate))
model.add(Dense(1, kernel_initializer=hidden_initializer, activation='sigmoid'))
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy', auc_roc])
model.summary()
history = model.fit(X_train, Y_train, batch_size=1000, epochs=10,
verbose=1, class_weight = d_class_weights,
validation_data=(X_valid, Y_valid), shuffle=True)
train_loss = history.history['loss']
val_loss = history.history['val_loss']
x_epochs = range(1, len(train_loss) + 1)
plt.plot(x_epochs, train_loss, 'b', label='Training loss')
plt.plot(x_epochs, val_loss, 'r', label='Validation loss')
plt.title('Loss')
plt.legend()
plt.show()
score = model.evaluate(X_valid, Y_valid, batch_size=100)
print(score)
Y_pred = model.predict(X_valid)
Y_pred = np.where(Y_pred > 0.5, 1, 0)
#Y_pred = np.argmax(Y_pred, axis=1).reshape(-1,1)
#Y_test = np.argmax(Y_test, axis=1).reshape(-1,1)
print(confusion_matrix(Y_valid, Y_pred))
print(classification_report(Y_valid, Y_pred, labels=[0, 1]))
ID_test = test['ID_code'].values
test = test.drop('ID_code', axis=1)
test = scaler.transform(test)
test = test.reshape(test.shape[0], 104, 6, 1)
pred = model.predict(test)
result = pd.DataFrame({"ID_code": ID_test})
result["target"] = pred
result.to_csv("submission.csv", index=False)
def XGBRandom(train, test):
kfold = 3
param_comb = 20
skf = StratifiedKFold(n_splits=kfold, random_state=42, shuffle=True)
params = {
'min_child_weight': [1, 5, 10],
'gamma': [0.5, 1, 1.5, 2, 5],
'subsample': [0.4, 0.6, 0.8, 1.0],
'colsample_bytree': [0.6, 0.8, 1.0],
'max_depth': [3, 5, 7],
'n_estimator':[100, 300, 500],
'learning_rate':[0.01, 0.02, 0.03]
}
xgb = XGBClassifier(learning_rate = 0.01, n_estimator=300,
objective='binary:logistic', silent=True, nthread=1,
tree_method='gpu_hist', predictor='gpu_predictor',
num_boost_round=500, early_stopping_rounds=70)
X = train.drop(['ID_code', 'target'], axis=1)
y = train.target.values
test_df = test
test = test.drop('ID_code', axis=1)
random_search = RandomizedSearchCV(xgb, param_distributions=params,
n_iter=param_comb, scoring='roc_auc',
n_jobs=1, cv=skf.split(X,y),
verbose=1, random_state=1001)
random_search.fit(X, y)
print('\n All results:')
print(random_search.cv_results_)
print('\n Best estimator:')
print(random_search.best_estimator_)
print('\n Best normalized roc_auc score for %d-fold search with %d parameter combinations:' % (kfold, param_comb))
print(random_search.best_score_)
print('\n Best hyperparameters:')
print(random_search.best_params_)
results = pd.DataFrame(random_search.cv_results_)
results.to_csv('xgb-random-grid-search-results-01.csv', index=False)
y_test = random_search.predict_proba(test)
results_df = pd.DataFrame(data={'ID_code':test_df['ID_code'], 'target':y_test[:,1]})
results_df.to_csv('submission.csv', index=False)
pred = random_search.best_estimator_.predict(X)
def XGBBest(train, test):
X = train.drop(['ID_code', 'target'], axis=1)
y = train.target.values
test_df = test
test = test.drop('ID_code', axis=1)
xgb = XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1,
colsample_bynode=1, colsample_bytree=0.6,
early_stopping_rounds=70, gamma=2, learning_rate=0.03,
max_delta_step=0, max_depth=7, min_child_weight=10, missing=None,
n_estimator=500, n_estimators=100, n_jobs=1, nthread=1,
num_boost_round=500, objective='binary:logistic',
predictor='gpu_predictor', random_state=0, reg_alpha=0,
reg_lambda=1, scale_pos_weight=1, seed=None, silent=True,
subsample=0.8, tree_method='gpu_hist', verbosity=1)
xgb.fit(X, y)
y_test = xgb.predict_proba(test)
results_df = pd.DataFrame(data={'ID_code':test_df['ID_code'], 'target':y_test[:,1]})
results_df.to_csv('submission.csv', index=False)
if __name__ == '__main__':
df_train = pd.read_csv("data/train.csv", engine='c')
df_test = pd.read_csv("data/test.csv", engine='c')
print("train shape: ", df_train.shape)
print("test shape: ", df_test.shape)
print("df_train is null: ", df_train.isnull().sum().sum())
print("df_test is null: ", df_test.isnull().sum().sum())
# Feature Engineering
# Correlation
features = df_train.columns.values[2:202]
correlations = df_train[features].corr().abs().unstack().sort_values(kind="quicksort").reset_index()
correlations = correlations[correlations['level_0'] != correlations['level_1']]
print(correlations.head(10))
print(correlations.tail(10))
# Duplicate check
features = df_train.columns.values[2:202]
unique_max_train = []
unique_max_test = []
for feature in features:
values = df_train[feature].value_counts()
unique_max_train.append([feature, values.max(), values.idxmax()])
values = df_test[feature].value_counts()
unique_max_test.append([feature, values.max(), values.idxmax()])
dup_train = np.transpose((pd.DataFrame(unique_max_train, columns=['Feature', 'Max duplicates', 'Value'])).
sort_values(by = 'Max duplicates', ascending=False).head(15))
dup_test = np.transpose((pd.DataFrame(unique_max_test, columns=['Feature', 'Max duplicates', 'Value'])).
sort_values(by = 'Max duplicates', ascending=False).head(15))
idx = features = df_train.columns.values[2:202]
for df in [df_test, df_train]:
df['sum'] = df[idx].sum(axis=1)
df['min'] = df[idx].min(axis=1)
df['max'] = df[idx].max(axis=1)
df['mean'] = df[idx].mean(axis=1)
df['std'] = df[idx].std(axis=1)
df['skew'] = df[idx].skew(axis=1)
df['kurt'] = df[idx].kurtosis(axis=1)
df['med'] = df[idx].median(axis=1)
t0 = df_train.loc[df_train['target'] == 0]
t1 = df_train.loc[df_train['target'] == 1]
features = df_train.columns.values[202:]
plot_new_feature_distribution(t0, t1, 'target: 0', 'target: 1', features)
features = df_train.columns.values[202:]
plot_new_feature_distribution(df_train, df_test, 'train', 'test', features)
features = [c for c in df_train.columns if c not in ['ID_code', 'target']]
for feature in features:
df_train['r2_'+feature] = np.round(df_train[feature], 2)
df_test['r2_'+feature] = np.round(df_test[feature], 2)
df_train['r1_'+feature] = np.round(df_train[feature], 1)
df_test['r1_'+feature] = np.round(df_test[feature], 1)
#DNN(df_train, df_test)
#CNN1D(df_train, df_test)
CNN2D(df_train, df_test)
#XGBRandom(df_train, df_test)
#XGBBest(df_train, df_test)
| 37.449136
| 118
| 0.649582
|
148f187374be8779d5ef3799014bc600678117c5
| 1,240
|
py
|
Python
|
professionnel/migrations/0007_auto_20200915_0205.py
|
john591/m243
|
8603be4a3a931fc8689d2f2ba11748841d7a60ce
|
[
"MIT"
] | null | null | null |
professionnel/migrations/0007_auto_20200915_0205.py
|
john591/m243
|
8603be4a3a931fc8689d2f2ba11748841d7a60ce
|
[
"MIT"
] | null | null | null |
professionnel/migrations/0007_auto_20200915_0205.py
|
john591/m243
|
8603be4a3a931fc8689d2f2ba11748841d7a60ce
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.5 on 2020-09-15 00:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('professionnel', '0006_auto_20200915_0141'),
]
operations = [
migrations.AlterField(
model_name='userprofil',
name='commune',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='userprofil',
name='profession',
field=models.CharField(choices=[('Webmaster', 'Webmaster'), ('Technicien help desk', 'Technicien help desk'), ('Graphiste', 'Graphiste'), ('Developpeur web', 'Developpeur web'), ('Medecin', 'Medecin'), ('Avocat', 'Avocat'), ('Journaliste', 'Journaliste'), ('Chantre', 'Chantre'), ('cuisinier', 'cuisinier'), ('Architecte', 'Architecte'), ('gynecologue', 'gynecologue'), ('infirmier', 'infirmier'), ('Artiste musicien', 'Artiste musicien'), ('Consultant en audit informatique', 'Consultant en audit informatique')], max_length=200),
),
migrations.AlterField(
model_name='userprofil',
name='quartier',
field=models.CharField(max_length=100),
),
]
| 42.758621
| 544
| 0.604839
|
7f150d0ab1181896060906d8b63dae060fd9847a
| 389
|
py
|
Python
|
openstack_dashboard/enabled/_1330_project_snapshots_panel.py
|
hemantsonawane95/horizon-apelby
|
01a5e72219aeca8c1451701ee85e232ed0618751
|
[
"Apache-2.0"
] | 930
|
2015-01-04T08:06:03.000Z
|
2022-03-13T18:47:13.000Z
|
openstack_dashboard/enabled/_1330_project_snapshots_panel.py
|
hemantsonawane95/horizon-apelby
|
01a5e72219aeca8c1451701ee85e232ed0618751
|
[
"Apache-2.0"
] | 106
|
2019-01-18T03:06:55.000Z
|
2019-11-29T05:06:18.000Z
|
openstack_dashboard/enabled/_1330_project_snapshots_panel.py
|
hemantsonawane95/horizon-apelby
|
01a5e72219aeca8c1451701ee85e232ed0618751
|
[
"Apache-2.0"
] | 1,040
|
2015-01-01T18:48:28.000Z
|
2022-03-19T08:35:18.000Z
|
# The slug of the panel to be added to HORIZON_CONFIG. Required.
PANEL = 'snapshots'
# The slug of the dashboard the PANEL associated with. Required.
PANEL_DASHBOARD = 'project'
# The slug of the panel group the PANEL is associated with.
PANEL_GROUP = 'volumes'
# Python panel class of the PANEL to be added.
ADD_PANEL = 'openstack_dashboard.dashboards.project.snapshots.panel.Snapshots'
| 38.9
| 78
| 0.781491
|
633050517350d6a858ee5124aa7e5eec8b2f1a8a
| 193
|
py
|
Python
|
small-problems/fibonacci-sequence/fib2.py
|
Prateek2506/classic-cs-problems
|
fa0e3c86fb7cd478888bb90006f7379cc6c7a38b
|
[
"MIT"
] | null | null | null |
small-problems/fibonacci-sequence/fib2.py
|
Prateek2506/classic-cs-problems
|
fa0e3c86fb7cd478888bb90006f7379cc6c7a38b
|
[
"MIT"
] | null | null | null |
small-problems/fibonacci-sequence/fib2.py
|
Prateek2506/classic-cs-problems
|
fa0e3c86fb7cd478888bb90006f7379cc6c7a38b
|
[
"MIT"
] | null | null | null |
count = 0
def fib2(n: int) -> int:
global count
count = count + 1
if n < 2:
return n
return fib2(n-1) + fib2(n-2)
print(fib2(20))
print('number of calls = ', count)
| 13.785714
| 34
| 0.544041
|
171d4e53f0b0d384a2e40f178e47e2e0fbf96e2f
| 109
|
py
|
Python
|
tech_gallery_bot/commands/__init__.py
|
ciandt/tech-gallery-chat-bot
|
f4c83ab626d829b8c8ce6dd549156224a034aa6d
|
[
"MIT"
] | 1
|
2020-02-19T14:03:25.000Z
|
2020-02-19T14:03:25.000Z
|
tech_gallery_bot/commands/__init__.py
|
ciandt/tech-gallery-chat-bot
|
f4c83ab626d829b8c8ce6dd549156224a034aa6d
|
[
"MIT"
] | null | null | null |
tech_gallery_bot/commands/__init__.py
|
ciandt/tech-gallery-chat-bot
|
f4c83ab626d829b8c8ce6dd549156224a034aa6d
|
[
"MIT"
] | 1
|
2020-03-31T15:11:35.000Z
|
2020-03-31T15:11:35.000Z
|
"""Commands"""
from .about import About
from .me import Me
from .skills import Skills
from .user import User
| 18.166667
| 26
| 0.752294
|
e933514cb9d5359a4a25da02dbb74ac839480921
| 5,397
|
py
|
Python
|
tests/webkit_time.py
|
sydp/dfdatetime
|
fbb4ed335861a99e0c87802c51e9d1d58c276b98
|
[
"Apache-2.0"
] | 17
|
2016-04-12T16:26:14.000Z
|
2022-02-18T22:27:36.000Z
|
tests/webkit_time.py
|
sydp/dfdatetime
|
fbb4ed335861a99e0c87802c51e9d1d58c276b98
|
[
"Apache-2.0"
] | 149
|
2016-03-10T22:20:13.000Z
|
2022-02-19T08:47:56.000Z
|
tests/webkit_time.py
|
sydp/dfdatetime
|
fbb4ed335861a99e0c87802c51e9d1d58c276b98
|
[
"Apache-2.0"
] | 15
|
2016-03-10T06:44:27.000Z
|
2022-02-07T12:53:48.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the WebKit time implementation."""
import decimal
import unittest
from dfdatetime import webkit_time
class WebKitTimeEpochTest(unittest.TestCase):
"""Tests for the WebKit time epoch."""
def testInitialize(self):
"""Tests the __init__ function."""
webkit_epoch = webkit_time.WebKitTimeEpoch()
self.assertIsNotNone(webkit_epoch)
class WebKitTimeTest(unittest.TestCase):
"""Tests for the WebKit timestamp."""
# pylint: disable=protected-access
def testProperties(self):
"""Tests the properties."""
webkit_time_object = webkit_time.WebKitTime(timestamp=12926120791546875)
self.assertEqual(webkit_time_object.timestamp, 12926120791546875)
webkit_time_object = webkit_time.WebKitTime()
self.assertIsNone(webkit_time_object.timestamp)
def testGetNormalizedTimestamp(self):
"""Tests the _GetNormalizedTimestamp function."""
webkit_time_object = webkit_time.WebKitTime(timestamp=12926120791546875)
normalized_timestamp = webkit_time_object._GetNormalizedTimestamp()
self.assertEqual(normalized_timestamp, decimal.Decimal('1281647191.546875'))
webkit_time_object = webkit_time.WebKitTime(
time_zone_offset=60, timestamp=12926120791546875)
normalized_timestamp = webkit_time_object._GetNormalizedTimestamp()
self.assertEqual(normalized_timestamp, decimal.Decimal('1281643591.546875'))
webkit_time_object = webkit_time.WebKitTime(timestamp=0x1ffffffffffffffff)
normalized_timestamp = webkit_time_object._GetNormalizedTimestamp()
self.assertIsNone(normalized_timestamp)
webkit_time_object = webkit_time.WebKitTime()
normalized_timestamp = webkit_time_object._GetNormalizedTimestamp()
self.assertIsNone(normalized_timestamp)
def testCopyFromDateTimeString(self):
"""Tests the CopyFromDateTimeString function."""
webkit_time_object = webkit_time.WebKitTime()
webkit_time_object.CopyFromDateTimeString('2010-08-12')
self.assertEqual(webkit_time_object._timestamp, 12926044800000000)
self.assertEqual(webkit_time_object._time_zone_offset, 0)
webkit_time_object.CopyFromDateTimeString('2010-08-12 21:06:31')
self.assertEqual(webkit_time_object._timestamp, 12926120791000000)
self.assertEqual(webkit_time_object._time_zone_offset, 0)
webkit_time_object.CopyFromDateTimeString('2010-08-12 21:06:31.546875')
self.assertEqual(webkit_time_object._timestamp, 12926120791546875)
self.assertEqual(webkit_time_object._time_zone_offset, 0)
webkit_time_object.CopyFromDateTimeString(
'2010-08-12 21:06:31.546875-01:00')
self.assertEqual(webkit_time_object._timestamp, 12926120791546875)
self.assertEqual(webkit_time_object._time_zone_offset, -60)
webkit_time_object.CopyFromDateTimeString(
'2010-08-12 21:06:31.546875+01:00')
self.assertEqual(webkit_time_object._timestamp, 12926120791546875)
self.assertEqual(webkit_time_object._time_zone_offset, 60)
webkit_time_object.CopyFromDateTimeString('1601-01-02 00:00:00')
self.assertEqual(webkit_time_object._timestamp, 86400 * 1000000)
self.assertEqual(webkit_time_object._time_zone_offset, 0)
def testCopyToDateTimeString(self):
"""Tests the CopyToDateTimeString function."""
webkit_time_object = webkit_time.WebKitTime(timestamp=12926120791546875)
date_time_string = webkit_time_object.CopyToDateTimeString()
self.assertEqual(date_time_string, '2010-08-12 21:06:31.546875')
webkit_time_object = webkit_time.WebKitTime()
date_time_string = webkit_time_object.CopyToDateTimeString()
self.assertIsNone(date_time_string)
def testCopyToDateTimeStringISO8601(self):
"""Tests the CopyToDateTimeStringISO8601 function."""
webkit_time_object = webkit_time.WebKitTime(timestamp=12926120791546875)
date_time_string = webkit_time_object.CopyToDateTimeStringISO8601()
self.assertEqual(date_time_string, '2010-08-12T21:06:31.546875+00:00')
def testGetDate(self):
"""Tests the GetDate function."""
webkit_time_object = webkit_time.WebKitTime(timestamp=12926120791546875)
date_tuple = webkit_time_object.GetDate()
self.assertEqual(date_tuple, (2010, 8, 12))
webkit_time_object = webkit_time.WebKitTime()
date_tuple = webkit_time_object.GetDate()
self.assertEqual(date_tuple, (None, None, None))
def testGetDateWithTimeOfDay(self):
"""Tests the GetDateWithTimeOfDay function."""
webkit_time_object = webkit_time.WebKitTime(timestamp=12926120791546875)
date_with_time_of_day_tuple = webkit_time_object.GetDateWithTimeOfDay()
self.assertEqual(date_with_time_of_day_tuple, (2010, 8, 12, 21, 6, 31))
webkit_time_object = webkit_time.WebKitTime()
date_with_time_of_day_tuple = webkit_time_object.GetDateWithTimeOfDay()
self.assertEqual(
date_with_time_of_day_tuple, (None, None, None, None, None, None))
def testGetTimeOfDay(self):
"""Tests the GetTimeOfDay function."""
webkit_time_object = webkit_time.WebKitTime(timestamp=12926120791546875)
time_of_day_tuple = webkit_time_object.GetTimeOfDay()
self.assertEqual(time_of_day_tuple, (21, 6, 31))
webkit_time_object = webkit_time.WebKitTime()
time_of_day_tuple = webkit_time_object.GetTimeOfDay()
self.assertEqual(time_of_day_tuple, (None, None, None))
if __name__ == '__main__':
unittest.main()
| 37.22069
| 80
| 0.780248
|
2d2a57955d8637f7ef74b9cca52cab6c461dbe9a
| 6,129
|
py
|
Python
|
memrise/core/modules/actions/db_actions.py
|
kolesnikov-bn/django-memrise-scraper
|
00dddb53f04ce2f794fe3611ea97190ef8265079
|
[
"MIT"
] | null | null | null |
memrise/core/modules/actions/db_actions.py
|
kolesnikov-bn/django-memrise-scraper
|
00dddb53f04ce2f794fe3611ea97190ef8265079
|
[
"MIT"
] | null | null | null |
memrise/core/modules/actions/db_actions.py
|
kolesnikov-bn/django-memrise-scraper
|
00dddb53f04ce2f794fe3611ea97190ef8265079
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import List, ClassVar, TYPE_CHECKING
from memrise.core.helpers import generate_custom_id
from memrise.core.modules.actions.base import Actions
from memrise.models import Course, Level, Word
if TYPE_CHECKING:
from memrise.core.domains.entities import CourseEntity, LevelEntity, WordEntity
class DBCourseActions(Actions):
def create(self, entities: List[CourseEntity]) -> None:
self.reporter.report(
entities, f"{self.prefix}Добавление новых курсов{self.postfix}"
)
courses = []
for item in entities:
courses.append(
Course(
id=item.id,
name=item.name,
url=item.url,
difficult=item.difficult,
num_things=item.num_words,
num_levels=item.num_levels,
difficult_url=item.difficult_url,
)
)
Course.objects.bulk_create(courses)
def update(self, entities: List[CourseEntity]) -> None:
self.reporter.report(entities, f"{self.prefix}Обновление курсов{self.postfix}")
courses = []
for item in entities:
courses.append(
Course(
id=item.id,
name=item.name,
url=item.url,
difficult=item.difficult,
num_things=item.num_words,
num_levels=item.num_levels,
difficult_url=item.difficult_url,
is_disable=False,
)
)
Course.objects.bulk_update(
courses,
[
"name",
"url",
"difficult",
"num_things",
"num_levels",
"difficult_url",
"is_disable",
],
)
def equal(self, entities: List[CourseEntity]) -> None:
self.reporter.report(
entities, f"{self.prefix}Курсы без изменений{self.postfix}", is_mute=True
)
def delete(self, entities: List[CourseEntity]) -> None:
# Курсы не удаляем, а отключаем, для того чтобы можно было бы вернуться к ним.
self.reporter.report(entities, f"{self.prefix}Отключение курсов{self.postfix}")
courses = []
for item in entities:
courses.append(Course(id=item.id, is_disable=True))
Course.objects.bulk_update(
courses, ["is_disable"],
)
# <editor-fold desc="Удаление курса из БД">
# self.reporter.report(entities, f"{self.prefix}Удаление курсов{self.postfix}")
#
# courses = []
# for item in entities:
# courses.append(item.id)
#
# Course.objects.filter(id__in=courses).delete()
# </editor-fold>
class DBLevelActions(Actions):
prefix: ClassVar[str] = "Курс $course_id --> "
def create(self, entities: List[LevelEntity]) -> None:
self.reporter.report(
entities, f"{self.prefix}Добавление новых уровней{self.postfix}"
)
levels = []
for item in entities:
levels.append(
Level(
id=item.id,
name=item.name,
number=item.number,
course_id=item.course_id,
)
)
Level.objects.bulk_create(levels)
def update(self, entities: List[LevelEntity]) -> None:
self.reporter.report(entities, f"{self.prefix}Обновление уровней{self.postfix}")
levels = []
for item in entities:
levels.append(Level(id=item.id, course_id=item.course_id, name=item.name))
Level.objects.bulk_update(levels, ["name"])
def equal(self, entities: List[LevelEntity]) -> None:
self.reporter.report(
entities, f"{self.prefix}Уровни без изменений{self.postfix}", is_mute=True
)
def delete(self, entities: List[LevelEntity]) -> None:
self.reporter.report(entities, f"{self.prefix}Удаление уровней{self.postfix}")
levels = []
for item in entities:
levels.append(item.id)
Level.objects.filter(id__in=levels).delete()
class DBWordActions(Actions):
prefix: ClassVar[str] = "Уровень $level_id --> "
def create(self, entities: List[WordEntity]) -> None:
self.reporter.report(
entities, f"{self.prefix}Добавление новых слов{self.postfix}"
)
words = []
for item in entities:
try:
# Если id был найден, значит это дубликат, присваиваем ему сгенерированный id.
Word.objects.get(id=item.id)
item_id = generate_custom_id()
except Word.DoesNotExist:
item_id = item.id
words.append(
Word(
id=item_id,
level_id=item.level_id,
word_a=item.word_a,
word_b=item.word_b,
is_learned=item.is_learned
)
)
Word.objects.bulk_create(words)
def update(self, entities: List[WordEntity]) -> None:
self.reporter.report(entities, f"{self.prefix}Обновление слов{self.postfix}")
words = []
for item in entities:
words.append(Word(id=item.id, word_a=item.word_a, word_b=item.word_b, is_learned=item.is_learned))
# TODO: сделать тесты, была не отловлена ошибка по обновлению данных!!!
Word.objects.bulk_update(words, ["word_a", "word_b"])
def equal(self, entities: List[WordEntity]) -> None:
self.reporter.report(
entities, f"{self.prefix}Слова без изменений{self.postfix}", is_mute=True
)
def delete(self, entities: List[WordEntity]) -> None:
self.reporter.report(entities, f"{self.prefix}Удаление слов{self.postfix}")
words = []
for item in entities:
words.append(item.id)
Word.objects.filter(id__in=words).delete()
| 32.089005
| 110
| 0.559635
|
091baa76ea791f3f441bd6d1b45bbaacde8e8567
| 696
|
py
|
Python
|
pytests/test_edit_deployment.py
|
gozynta/jmespath_demo
|
82e190c6bb426ea23d6975f0d14b93ec0881487d
|
[
"Apache-2.0"
] | null | null | null |
pytests/test_edit_deployment.py
|
gozynta/jmespath_demo
|
82e190c6bb426ea23d6975f0d14b93ec0881487d
|
[
"Apache-2.0"
] | null | null | null |
pytests/test_edit_deployment.py
|
gozynta/jmespath_demo
|
82e190c6bb426ea23d6975f0d14b93ec0881487d
|
[
"Apache-2.0"
] | null | null | null |
import pathlib
import edit_deployment
import yaml
SCRIPT_DIR = pathlib.Path(__file__).parent.absolute()
def test_without_jmespath():
# Load fixture
with open(SCRIPT_DIR.joinpath("edited_deployment.yaml")) as yml:
test_deployment = yaml.safe_load(yml)
# Run function and compare results
deployment = edit_deployment.without_jmespath()
assert deployment == test_deployment
def test_with_jmespath():
# Load fixture
with open(SCRIPT_DIR.joinpath("edited_deployment.yaml")) as yml:
test_deployment = yaml.safe_load(yml)
# Run function and compare results
deployment = edit_deployment.with_jmespath()
assert deployment == test_deployment
| 25.777778
| 68
| 0.742816
|
6ef89f8d7501338bf3b33d0411be52169724b733
| 6,444
|
py
|
Python
|
Tirex/__init__.py
|
hjanetzek/TirexStache
|
f2f6e4a856a7c0d37db00470c6531ac897d1ff81
|
[
"BSD-3-Clause"
] | 2
|
2016-10-30T19:00:58.000Z
|
2017-08-14T16:03:31.000Z
|
Tirex/__init__.py
|
hjanetzek/TirexStache
|
f2f6e4a856a7c0d37db00470c6531ac897d1ff81
|
[
"BSD-3-Clause"
] | null | null | null |
Tirex/__init__.py
|
hjanetzek/TirexStache
|
f2f6e4a856a7c0d37db00470c6531ac897d1ff81
|
[
"BSD-3-Clause"
] | null | null | null |
'''
Tirex/TileStache backend
based on https://github.com/mdaines/cover/blob/master/vendor/tirex_backend.rb
@author: Hannes Janetzek
'''
import os
import socket
import pipes
import signal
import sys
import logging
import re
import time
import tempfile
# This must match 'metatile_columns/rows' in tirex.conf
# and METATILE(x) in render_config.h of mod_tile (8 is hardcoded)
METATILE_SIZE = 2
# layer 'name' defined in layers.cfg must match tirex map 'name'
TILESTACHE_CFG = "/etc/tirex/renderer/stache_layers.cfg"
TILEDIR = "/var/lib/tirex/tiles"
# As defined by Tirex::MAX_PACKET_SIZE
MAX_PACKET_SIZE = 512
MATCH_PAIRS = re.compile(r"(.*)=(.*)\n")
class Tirex:
def __init__(self, backend, testing = False):
self.metatile_size = METATILE_SIZE
self.backend = backend
self.config = {}
self.config["name"] = os.environ.get("TIREX_BACKEND_NAME")
self.config["port"] = os.environ.get("TIREX_BACKEND_PORT")
self.config["syslog_facility"] = os.environ.get("TIREX_BACKEND_SYSLOG_FACILITY")
self.config["map_configs"] = os.environ.get("TIREX_BACKEND_MAP_CONFIGS")
self.config["alive_timeout"] = os.environ.get("TIREX_BACKEND_ALIVE_TIMEOUT")
self.config["pipe_fileno"] = os.environ.get("TIREX_BACKEND_PIPE_FILENO")
self.config["socket_fileno"] = os.environ.get("TIREX_BACKEND_SOCKET_FILENO")
self.config["debug"] = os.environ.get("TIREX_BACKEND_DEBUG")
sys.stderr.write(str(self.config))
self.debug = testing or self.config["debug"] == "1"
if testing:
self.tiledir = "%s/tiles" % os.getcwd()
return
self.tiledir = TILEDIR
self.parent_fd = int(self.config["pipe_fileno"])
self.running = True
self.timeout = int(self.config["alive_timeout"])
def run(self):
def stop(signum, frame):
self.running = False
print "stopping"
sys.exit(0)
if (self.config["socket_fileno"] != None):
sys.stderr.write("receive on fd %s" % self.config["socket_fileno"]);
fd = int(self.config["socket_fileno"])
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_DGRAM)
else:
sys.stderr.write("receive on port %s" % self.config["port"]);
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
port = int(self.config["port"])
sock.bind(("127.0.0.1", port))
signal.signal(signal.SIGHUP, stop)
signal.signal(signal.SIGINT, stop)
sock.settimeout(self.timeout)
while self.running:
# send alive message to backend manager
os.write(self.parent_fd, "alive")
try:
data, addr = sock.recvfrom(MAX_PACKET_SIZE)
except socket.timeout:
continue
except socket.error, e:
sys.stderr.write("recv: %s" %e);
continue
response = self.process_message(data)
sock.sendto(response, addr)
def process_message(self, message):
if self.debug:
sys.stderr.write(">\n%s\n" %message)
request = deserialize_msg(message)
try:
if request["type"] == "metatile_render_request":
response = self.process_render_request(request)
else:
raise Exception("Unknown request type: %s" % request["type"])
except Exception, e:
response = { "id" : request["id"],
"result": "fail",
"errmsg" : e }
response = serialize_msg(response)
sys.stderr.write("<\n%s\n" %str(response))
return response
def process_render_request(self, request):
layer = request["map"]
x = int(request["x"])
y = int(request["y"])
z = int(request["z"])
tiledir = "%s/%s" %(self.tiledir, layer)
filename = "%s/%s" % (tiledir, xyz_to_path(x, y, z) + ".meta")
start = time.time()
try:
# in-memory temp file for 512k
tmp = tempfile.SpooledTemporaryFile(1024 * 512)
self.backend.write(layer, x, y, z, self.metatile_size, tmp)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except OSError, e:
sys.stderr.write("could not create %s, %s\n" %(dirname, e))
pass
tmp.seek(0)
with open(filename, 'w') as f:
f.write(tmp.read())
finally:
tmp.close()
os.chmod(filename, 0644)
elapsed = time.time() - start
if self.debug:
sys.stderr.write("time: %f, %d bytes - %s -\n" %(elapsed, os.path.getsize(filename), filename))
return { "type": "metatile_render_request",
"result": "ok",
"id": request["id"],
"render_time": str(int(elapsed)) }
def serialize_msg(msg):
return '\n'.join(["%s=%s" % (k, v) for (k, v) in msg.iteritems()])
def deserialize_msg(string):
return dict(MATCH_PAIRS.findall(string))
def xyz_to_path(x, y, z):
hashes = []
for _ in xrange(0, 5):
hashes.append(((x & 0x0f) << 4) | (y & 0x0f))
x >>= 4
y >>= 4
return "%u/%s" % (z, "%u/%u/%u/%u/%u" % tuple(reversed(hashes)))
if __name__ == '__main__':
import Backend
if os.environ.get("TIREX_BACKEND_NAME") == None:
# not started from tirex: just testing layers
b = Backend.TileStacheBackend("%s/cfg/renderer/stache_layers.cfg" % os.getcwd())
t = Tirex(b, True)
request = {'map': 'proxy', 'prio': '1',
'y': '0', 'x': '0', 'z': '2',
'type': 'metatile_render_request',
'id': '1375054837_19944984'}
t.process_render_request(request)
exit(0)
else:
# run the backend process
b = Backend.TileStacheBackend(TILESTACHE_CFG)
t = Tirex(b)
t.run()
| 31.281553
| 107
| 0.53802
|
ef2f269b6538a65d43e2b33ece4ebb7db49d9d47
| 1,835
|
py
|
Python
|
test/mixed_test.py
|
lechat/multiconf
|
c41cbe9ab3fb768a7d4dbd7b9f5d983b1306bde3
|
[
"BSD-3-Clause"
] | null | null | null |
test/mixed_test.py
|
lechat/multiconf
|
c41cbe9ab3fb768a7d4dbd7b9f5d983b1306bde3
|
[
"BSD-3-Clause"
] | null | null | null |
test/mixed_test.py
|
lechat/multiconf
|
c41cbe9ab3fb768a7d4dbd7b9f5d983b1306bde3
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2012 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from .. import ConfigRoot, ConfigItem, ConfigBuilder
from ..decorators import nested_repeatables, named_as, repeat, required
from ..envs import EnvFactory
ef = EnvFactory()
prod = ef.Env('prod')
def test_configbuilders_alternating_with_items_repeatable_multilevel_required():
@repeat()
@required('some_attribute')
@named_as('inners')
class InnerItem(ConfigItem):
def __init__(self, name):
super(InnerItem, self).__init__(name=name)
class InnerBuilder(ConfigBuilder):
def __init__(self):
super(InnerBuilder, self).__init__()
def build(self):
InnerItem('innermost')
@repeat()
@nested_repeatables('inners')
@required('another_attribute')
class MiddleItem(ConfigItem):
def __init__(self, name):
super(MiddleItem, self).__init__(id=name)
@required('builder_attribute')
class MiddleBuilder(ConfigBuilder):
def __init__(self, name):
super(MiddleBuilder, self).__init__(name=name)
def build(self):
with MiddleItem(name=self.name) as mi:
mi.setattr('another_attribute', default=9)
class OuterBuilder(ConfigBuilder):
def __init__(self):
super(OuterBuilder, self).__init__()
def build(self):
with MiddleBuilder('base') as mb:
mb.builder_attribute = 1
with InnerBuilder() as ib:
ib.some_attribute = 1
@nested_repeatables('MiddleItems')
class OuterItem(ConfigItem):
pass
with ConfigRoot(prod, ef, name='myp') as cr:
with OuterItem():
OuterBuilder()
cr.json(builders=True)
# TODO
| 28.671875
| 80
| 0.639782
|
4c1ed1c960a58718923a28c8bc489d0e4f374927
| 153
|
py
|
Python
|
c__65.py
|
fhansmann/coding-challenges
|
eebb37565c72e05b77383c24e8273a1e4019b58e
|
[
"MIT"
] | null | null | null |
c__65.py
|
fhansmann/coding-challenges
|
eebb37565c72e05b77383c24e8273a1e4019b58e
|
[
"MIT"
] | null | null | null |
c__65.py
|
fhansmann/coding-challenges
|
eebb37565c72e05b77383c24e8273a1e4019b58e
|
[
"MIT"
] | null | null | null |
def fib(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib(n-1)+fib(n-2)
x = input()
print(fib(x))
| 12.75
| 32
| 0.431373
|
f9d8d4419fb1a4e65d930afdb22c6fe5a3e686c2
| 8,050
|
py
|
Python
|
util.py
|
wilkice/PyTorch-1.1.0-YOLO-V3
|
e3bb341cc533f5f754685d442e8937289e133ad0
|
[
"MIT"
] | null | null | null |
util.py
|
wilkice/PyTorch-1.1.0-YOLO-V3
|
e3bb341cc533f5f754685d442e8937289e133ad0
|
[
"MIT"
] | null | null | null |
util.py
|
wilkice/PyTorch-1.1.0-YOLO-V3
|
e3bb341cc533f5f754685d442e8937289e133ad0
|
[
"MIT"
] | null | null | null |
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import cv2
def predict_transform(prediction, inp_dim, anchors, num_classes, CUDA=True):
batch_size = prediction.size(0)
stride = inp_dim // prediction.size(2)
grid_size = inp_dim // stride
# TODO: grid_size ==prediction.size(2)?
bbox_attrs = 5 + num_classes
num_anchors = len(anchors)
prediction = prediction.view(
batch_size, bbox_attrs*num_anchors, grid_size*grid_size)
prediction = prediction.transpose(1, 2).contiguous()
prediction = prediction.view(
batch_size, grid_size*grid_size*num_anchors, bbox_attrs)
anchors = [(a[0]/stride, a[1]/stride) for a in anchors]
prediction[:, :, 0] = torch.sigmoid(prediction[:, :, 0])
prediction[:, :, 1] = torch.sigmoid(prediction[:, :, 1])
prediction[:, :, 4] = torch.sigmoid(prediction[:, :, 4])
grid = np.arange(grid_size)
a, b = np.meshgrid(grid, grid)
x_offset = torch.FloatTensor(a).view(-1, 1)
y_offset = torch.FloatTensor(b).view(-1, 1)
if CUDA:
x_offset = x_offset.cuda()
y_offset = y_offset.cuda()
x_y_offset = torch.cat((x_offset, y_offset), 1).repeat(
1, num_anchors).view(-1, 2).unsqueeze(0)
prediction[:, :, :2] += x_y_offset
anchors = torch.FloatTensor(anchors)
if CUDA:
anchors = anchors.cuda()
anchors = anchors.repeat(grid_size*grid_size, 1).unsqueeze(0)
prediction[:, :, 2:4] = torch.exp(prediction[:, :, 2:4]) * anchors
# TODO: need to including the last one
prediction[:, :, 5:] = torch.sigmoid(prediction[:, :, 5:])
prediction[:, :, :4] *= stride
return prediction
def unique(tensor):
tensor_np = tensor.cpu().numpy()
unique_np = np.unique(tensor_np)
unique_tensor = torch.from_numpy(unique_np)
tensor_res = tensor.new(unique_tensor.shape)
tensor_res.copy_(unique_tensor)
return tensor_res
def bbox_iou(box1, box2):
"""
return: IoU of two boxes
"""
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
# TODO: WHY +1
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1+1,
min=0) * torch.clamp(inter_rect_y2-inter_rect_y1+1, min=0)
#TODO: WHY +1
b1_area = (b1_x2-b1_x1+1) * (b1_y2-b1_y1+1)
b2_area = (b2_x2-b2_x1+1) * (b2_y2-b2_y1+1)
iou = inter_area / (b1_area+b2_area-inter_area)
return iou
def write_results(prediction, confidence, num_classes, nms_conf=0.4):
# whether there is an object
conf_mask = (prediction[:, :, 4] > confidence).float().unsqueeze(2)
prediction = prediction * conf_mask
# Non-maximun Suppression
box_corner = prediction.new(prediction.shape)
box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2
box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2
box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2
box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2
prediction[:, :, :4] = box_corner[:, :, :4]
batch_size = prediction.size(0)
write = False
for index in range(batch_size):
image_pred = prediction[index]
max_conf, max_conf_score = torch.max(image_pred[:, 5:], 1)
max_conf = max_conf.float().unsqueeze(1)
max_conf_score = max_conf_score.float().unsqueeze(1)
seq = (image_pred[:, :5], max_conf, max_conf_score)
# print(image_pred[:,:5].size(), max_conf.size(), max_conf_score.size())
image_pred = torch.cat(seq, 1)
non_zero_index = torch.nonzero(image_pred[:, 4])
try:
image_pred_ = image_pred[non_zero_index.squeeze(), :].view(-1, 7)
except:
continue
if image_pred_.shape[0] == 0:
continue
img_classes = unique(image_pred_[:, -1])
for cls in img_classes:
cla_mask = image_pred_ * \
(image_pred_[:, -1] == cls).float().unsqueeze(1)
class_mask_index = torch.nonzero(cla_mask[:, -2]).squeeze()
image_pred_class = image_pred_[class_mask_index].view(-1, 7)
conf_sort_index = torch.sort(
image_pred_class[:, 4], descending=True)[1]
image_pred_class = image_pred_class[conf_sort_index]
idx = image_pred_class.size(0)
for i in range(idx):
try:
ious = bbox_iou(image_pred_class[i].unsqueeze(
0), image_pred_class[i+1:])
except ValueError:
break
except IndexError:
break
iou_mask = (ious < nms_conf).float().unsqueeze(1)
image_pred_class[i+1:] *= iou_mask
non_zero_index = torch.nonzero(
image_pred_class[:, 4]).squeeze()
image_pred_class = image_pred_class[non_zero_index].view(-1, 7)
batch_ind = image_pred_class.new(
image_pred_class.size(0), 1).fill_(index)
seq = batch_ind, image_pred_class
if not write:
output = torch.cat(seq, 1)
write = True
else:
out = torch.cat(seq, 1)
output = torch.cat((output, out))
try:
return output
except:
return 0
def letterbox_image(img, inp_dim):
'''resize image with unchanged aspect ratio using padding'''
img_w, img_h = img.shape[1], img.shape[0]
w, h = inp_dim
new_w = int(img_w * min(w/img_w, h/img_h))
new_h = int(img_h * min(w/img_w, h/img_h))
resized_image = cv2.resize(
img, (new_w, new_h), interpolation=cv2.INTER_CUBIC)
canvas = np.full((inp_dim[1], inp_dim[0], 3), 128)
canvas[(h-new_h)//2:(h-new_h)//2 + new_h, (w-new_w) //
2:(w-new_w)//2 + new_w, :] = resized_image
return canvas
def prep_image(img, inp_dim):
"""
Prepare image for inputting to the neural network.
Returns a Variable
"""
img = (letterbox_image(img, (inp_dim, inp_dim)))
img = img[:, :, ::-1].transpose((2, 0, 1)).copy()
img = torch.from_numpy(img).float().div(255.0).unsqueeze(0)
return img
def load_classes(namesfile):
fp = open(namesfile, "r")
names = fp.read().split("\n")[:-1]
return names
def test_read_imgs(folder):
"""read imgs from folder: imgs
return:
img_list: list contains img name
img_data: list contains img data(3 * 608 * 608), rgb
img_size: list contains img width and height
"""
img_list = os.listdir(folder)
img_size = []
img_data = []
for i in range(len(img_list)):
img = cv2.imread(os.path.join(folder, img_list[i]) )
img_size.append(img.shape[:2])
img_data.append(img)
return img_list, img_data, img_size
def test_save_img(name, img_data, img_size, prediction, objs):
"""draw box on img and save to disk
Arguments:
name: img name
img_data: img metadata, the output of cv2.imread
img_size: tuple (height, width)
prediction: prediction output (n*8), n means the num of objects in one image
objs: total objects name in this image, list
"""
num = 0
height, width = img_size
for one_box in prediction:
box = one_box[1:5]
x1 = box[0] * width / 608
y1 = box[1] * height / 608
x2 = box[2] * width / 608
y2 = box[3] * height / 608
obj = objs[num]
num += 1
cv2.rectangle(img_data, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.putText(img_data, obj, (x1, y1-10),
cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 1, cv2.LINE_AA)
cv2.imwrite(name, img_data)
| 33.40249
| 87
| 0.593416
|
570767e7d972c186d373ba53acbded69d32398e0
| 17,148
|
py
|
Python
|
pgAdmin/browser/server_groups/servers/databases/external_tables/tests/test_external_tables_view.py
|
WeilerWebServices/PostgreSQL
|
ae594ed077bebbad1be3c1d95c38b7c2c2683e8c
|
[
"PostgreSQL"
] | null | null | null |
pgAdmin/browser/server_groups/servers/databases/external_tables/tests/test_external_tables_view.py
|
WeilerWebServices/PostgreSQL
|
ae594ed077bebbad1be3c1d95c38b7c2c2683e8c
|
[
"PostgreSQL"
] | null | null | null |
pgAdmin/browser/server_groups/servers/databases/external_tables/tests/test_external_tables_view.py
|
WeilerWebServices/PostgreSQL
|
ae594ed077bebbad1be3c1d95c38b7c2c2683e8c
|
[
"PostgreSQL"
] | null | null | null |
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2020, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import os
import sys
from pgadmin.browser.server_groups.servers.databases.external_tables import \
ExternalTablesView
from pgadmin.utils.route import BaseTestGenerator
if sys.version_info < (3, 3):
from mock import MagicMock, patch
else:
from unittest.mock import MagicMock, patch
class TestExternalTablesView(BaseTestGenerator):
scenarios = [
('#check_precondition When executing any http call, '
'it saves stores the connection and the manager in the class object',
dict(
test_type='check-precondition',
function_parameters=dict(
server_group_id=0,
server_id=1,
database_id=2,
),
manager=MagicMock(),
connection=MagicMock(execute_2darray=MagicMock()),
execute_2darray_return_value=(True, dict(rows=[])),
expected_manager_connection_to_be_called_with=dict(
did=2
),
)),
('#nodes When retrieving the children of external tables, '
'it return no child '
'and status 200',
dict(
test_type='children',
function_parameters=dict(
server_group_id=0,
server_id=1,
database_id=2,
),
manager=MagicMock(server_type='gpdb', sversion=80323),
connection=MagicMock(execute_2darray=MagicMock()),
execute_2darray_return_value=(True, dict(rows=[])),
expected_make_json_response_called_with=dict(data=[]),
)),
('#nodes When retrieving the nodes '
'and the database does not have external tables, '
'it return no child nodes '
'and status 200',
dict(
test_type='nodes',
function_parameters=dict(
server_group_id=0,
server_id=1,
database_id=2,
),
manager=MagicMock(server_type='gpdb', sversion=80323),
connection=MagicMock(execute_2darray=MagicMock()),
execute_2darray_return_value=(True, dict(rows=[])),
expect_render_template_called_with=os.path.join(
'sql/#gpdb#80323#', 'list.sql'),
expected_make_json_response_called_with=dict(
data=[],
status=200
),
)),
('#nodes When retrieving the nodes '
'and an error happens while executing the query, '
'it return an internal server error '
'and status 500',
dict(
test_type='nodes',
function_parameters=dict(
server_group_id=0,
server_id=1,
database_id=2,
),
manager=MagicMock(server_type='gpdb', sversion=80323),
connection=MagicMock(execute_2darray=MagicMock()),
execute_2darray_return_value=(False, 'Some error message'),
expect_render_template_called_with=os.path.join(
'sql/#gpdb#80323#', 'list.sql'),
expected_internal_server_error_called_with=dict(
errormsg='Some error message'
),
)),
('#nodes When retrieving the nodes '
'and the database has 2 external tables, '
'it return 2 child nodes '
'and status 200',
dict(
test_type='nodes',
function_parameters=dict(
server_group_id=0,
server_id=1,
database_id=2,
),
manager=MagicMock(server_type='gpdb', sversion=80323),
connection=MagicMock(execute_2darray=MagicMock()),
execute_2darray_return_value=(True, dict(
rows=[
dict(
oid='oid1',
name='table_one'
),
dict(
oid='oid2',
name='table_two'
),
]
)),
expect_render_template_called_with=os.path.join(
'sql/#gpdb#80323#', 'list.sql'),
expected_make_json_response_called_with=dict(
data=[
{
'id': "external_table/oid1",
'label': 'table_one',
'icon': 'icon-external_table',
'inode': False,
'_type': 'external_table',
'_id': 'oid1',
'_pid': 2,
'module': 'pgadmin.node.external_table'
},
{
'id': "external_table/oid2",
'label': 'table_two',
'icon': 'icon-external_table',
'inode': False,
'_type': 'external_table',
'_id': 'oid2',
'_pid': 2,
'module': 'pgadmin.node.external_table'
}
],
status=200
),
)),
('#node When retrieving the information about 1 external table '
'and an error happens while executing the query, '
'it return an internal server error '
'and status 500',
dict(
test_type='node',
function_parameters=dict(
server_group_id=0,
server_id=1,
database_id=2,
external_table_id=11
),
manager=MagicMock(server_type='gpdb', sversion=80323),
connection=MagicMock(execute_2darray=MagicMock()),
execute_2darray_return_value=(False, 'Some error message'),
expect_render_template_called_with=dict(
template_name_or_list=os.path.join(
'sql/#gpdb#80323#', 'node.sql'),
external_table_id=11
),
expected_internal_server_error_called_with=dict(
errormsg='Some error message'
),
)),
('#node When retrieving the information about 1 external table '
'and table does not exist, '
'it return an error message '
'and status 404',
dict(
test_type='node',
function_parameters=dict(
server_group_id=0,
server_id=1,
database_id=2,
external_table_id=11
),
manager=MagicMock(server_type='gpdb', sversion=80323),
connection=MagicMock(execute_2darray=MagicMock()),
execute_2darray_return_value=(True, dict(rows=[])),
expect_render_template_called_with=dict(
template_name_or_list=os.path.join(
'sql/#gpdb#80323#', 'node.sql'),
external_table_id=11
),
expected_make_json_response_called_with=dict(
data='Could not find the external table.',
status=404
),
)),
('#nodes When retrieving the information about 1 external table '
'and the table exists, '
'it return external node information '
'and status 200',
dict(
test_type='node',
function_parameters=dict(
server_group_id=0,
server_id=1,
database_id=2,
external_table_id=11
),
manager=MagicMock(server_type='gpdb', sversion=80323),
connection=MagicMock(execute_2darray=MagicMock()),
execute_2darray_return_value=(True, dict(
rows=[
dict(
oid='oid1',
name='table_one'
),
dict(
oid='oid2',
name='table_two'
),
]
)),
expect_render_template_called_with=dict(
template_name_or_list=os.path.join(
'sql/#gpdb#80323#', 'node.sql'),
external_table_id=11
),
expected_make_json_response_called_with=dict(
data={
'id': "external_table/oid1",
'label': 'table_one',
'icon': 'icon-external_table',
'inode': False,
'_type': 'external_table',
'_id': 'oid1',
'_pid': 2,
'module': 'pgadmin.node.external_table'
},
status=200
),
)),
('#properties When retrieving the properties of a external table '
'and the table exists, '
'it return the properties '
'and status 200',
dict(
test_type='properties',
function_parameters=dict(
server_group_id=0,
server_id=1,
database_id=2,
external_table_id=11
),
manager=MagicMock(server_type='gpdb', sversion=80323),
connection=MagicMock(execute_2darray=MagicMock()),
execute_2darray_return_value=(True, dict(
rows=[dict(
urilocation='{http://someurl.com}',
execlocation=['ALL_SEGMENTS'],
fmttype='a',
fmtopts='delimiter \',\' null \'\' '
'escape \'"\' quote \'"\'',
command=None,
rejectlimit=None,
rejectlimittype=None,
errtblname=None,
errortofile=None,
pg_encoding_to_char='UTF8',
writable=False,
options=None,
distribution=None,
name='some_table',
namespace='public'
)]
)),
expect_render_template_called_with=dict(
template_name_or_list=os.path.join(
'sql/#gpdb#80323#', 'get_table_information.sql'),
table_oid=11
),
expected_make_response_called_with=dict(
response=dict(
name="some_table",
type='readable',
format_type='UTF8',
format_options='delimiter \',\' null \'\' '
'escape \'"\' quote \'"\'',
external_options=None,
command=None,
execute_on='all segments',
),
status=200
),
)),
]
@patch('pgadmin.browser.server_groups.servers.databases.external_tables'
'.get_driver')
def runTest(self, get_driver_mock):
self.__before_all(get_driver_mock)
if self.test_type == 'check-precondition':
self.__test_backend_support()
elif self.test_type == 'nodes':
self.__test_nodes()
elif self.test_type == 'node':
self.__test_node()
elif self.test_type == 'children':
self.__test_children()
elif self.test_type == 'properties':
self.__test_properties()
@patch('pgadmin.browser.server_groups.servers.databases.external_tables'
'.make_json_response')
def __test_children(self, make_json_response_mock):
self.manager.connection = MagicMock(return_value=self.connection)
external_tables_view = ExternalTablesView(cmd='')
external_tables_view.children(**self.function_parameters)
make_json_response_mock.assert_called_with(
**self.expected_make_json_response_called_with
)
@patch('pgadmin.browser.server_groups.servers.databases.external_tables'
'.render_template')
def __test_backend_support(self, _):
self.manager.connection = MagicMock(return_value=self.connection)
external_tables_view = ExternalTablesView(cmd='')
external_tables_view.nodes(**self.function_parameters)
self.manager.connection.assert_called_with(
**self.expected_manager_connection_to_be_called_with
)
self.assertEquals(self.manager, external_tables_view.manager)
self.assertEquals(self.connection, external_tables_view.connection)
@patch('pgadmin.browser.server_groups.servers.databases.external_tables'
'.render_template')
@patch('pgadmin.browser.server_groups.servers.databases.external_tables'
'.make_json_response')
@patch('pgadmin.browser.server_groups.servers.databases.external_tables'
'.internal_server_error')
def __test_nodes(self, internal_server_error_mock,
make_json_response_mock, render_template_mock):
external_tables_view = ExternalTablesView(cmd='')
external_tables_view.nodes(**self.function_parameters)
if hasattr(self, 'expected_internal_server_error_called_with'):
internal_server_error_mock.assert_called_with(
**self.expected_internal_server_error_called_with
)
else:
internal_server_error_mock.assert_not_called()
if hasattr(self, 'expected_make_json_response_called_with'):
make_json_response_mock.assert_called_with(
**self.expected_make_json_response_called_with
)
else:
make_json_response_mock.assert_not_called()
render_template_mock.assert_called_with(
self.expect_render_template_called_with
)
@patch('pgadmin.browser.server_groups.servers.databases.external_tables'
'.render_template')
@patch('pgadmin.browser.server_groups.servers.databases.external_tables'
'.make_json_response')
@patch('pgadmin.browser.server_groups.servers.databases.external_tables'
'.internal_server_error')
def __test_node(self, internal_server_error_mock,
make_json_response_mock, render_template_mock):
external_tables_view = ExternalTablesView(cmd='')
external_tables_view.node(**self.function_parameters)
if hasattr(self, 'expected_internal_server_error_called_with'):
internal_server_error_mock.assert_called_with(
**self.expected_internal_server_error_called_with
)
else:
internal_server_error_mock.assert_not_called()
if hasattr(self, 'expected_make_json_response_called_with'):
make_json_response_mock.assert_called_with(
**self.expected_make_json_response_called_with
)
else:
make_json_response_mock.assert_not_called()
render_template_mock.assert_called_with(
**self.expect_render_template_called_with
)
@patch('pgadmin.browser.server_groups.servers.databases.external_tables'
'.render_template')
@patch('pgadmin.browser.server_groups.servers.databases.external_tables'
'.make_response')
@patch('pgadmin.browser.server_groups.servers.databases.external_tables'
'.internal_server_error')
def __test_properties(self, internal_server_error_mock,
make_response_mock, render_template_mock):
external_tables_view = ExternalTablesView(cmd='')
external_tables_view.properties(**self.function_parameters)
if hasattr(self, 'expected_internal_server_error_called_with'):
internal_server_error_mock.assert_called_with(
**self.expected_internal_server_error_called_with
)
else:
internal_server_error_mock.assert_not_called()
if hasattr(self, 'expected_make_response_called_with'):
make_response_mock.assert_called_with(
**self.expected_make_response_called_with
)
else:
make_response_mock.assert_not_called()
render_template_mock.assert_called_with(
**self.expect_render_template_called_with
)
def __before_all(self, get_driver_mock):
self.connection.execute_2darray.return_value = \
self.execute_2darray_return_value
self.manager.connection = MagicMock(return_value=self.connection)
get_driver_mock.return_value = MagicMock(
connection_manager=MagicMock(return_value=self.manager)
)
| 39.330275
| 78
| 0.542337
|
66cefe11712ab1e13c38f65ff5910bda5711e88b
| 17,546
|
py
|
Python
|
intersight/models/boot_precision_policy.py
|
ategaw-cisco/intersight-python
|
9d6476620507281b1dc358e29ac452d56081bbb0
|
[
"Apache-2.0"
] | null | null | null |
intersight/models/boot_precision_policy.py
|
ategaw-cisco/intersight-python
|
9d6476620507281b1dc358e29ac452d56081bbb0
|
[
"Apache-2.0"
] | null | null | null |
intersight/models/boot_precision_policy.py
|
ategaw-cisco/intersight-python
|
9d6476620507281b1dc358e29ac452d56081bbb0
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-262
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class BootPrecisionPolicy(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_moid': 'str',
'ancestors': 'list[MoBaseMoRef]',
'create_time': 'datetime',
'mod_time': 'datetime',
'moid': 'str',
'object_type': 'str',
'owners': 'list[str]',
'parent': 'MoBaseMoRef',
'tags': 'list[MoTag]',
'version_context': 'MoVersionContext',
'description': 'str',
'name': 'str',
'boot_devices': 'list[BootDeviceBase]',
'configured_boot_mode': 'str',
'enforce_uefi_secure_boot': 'bool',
'organization': 'IamAccountRef',
'profiles': 'list[PolicyAbstractConfigProfileRef]'
}
attribute_map = {
'account_moid': 'AccountMoid',
'ancestors': 'Ancestors',
'create_time': 'CreateTime',
'mod_time': 'ModTime',
'moid': 'Moid',
'object_type': 'ObjectType',
'owners': 'Owners',
'parent': 'Parent',
'tags': 'Tags',
'version_context': 'VersionContext',
'description': 'Description',
'name': 'Name',
'boot_devices': 'BootDevices',
'configured_boot_mode': 'ConfiguredBootMode',
'enforce_uefi_secure_boot': 'EnforceUefiSecureBoot',
'organization': 'Organization',
'profiles': 'Profiles'
}
def __init__(self, account_moid=None, ancestors=None, create_time=None, mod_time=None, moid=None, object_type=None, owners=None, parent=None, tags=None, version_context=None, description=None, name=None, boot_devices=None, configured_boot_mode='Legacy', enforce_uefi_secure_boot=None, organization=None, profiles=None):
"""
BootPrecisionPolicy - a model defined in Swagger
"""
self._account_moid = None
self._ancestors = None
self._create_time = None
self._mod_time = None
self._moid = None
self._object_type = None
self._owners = None
self._parent = None
self._tags = None
self._version_context = None
self._description = None
self._name = None
self._boot_devices = None
self._configured_boot_mode = None
self._enforce_uefi_secure_boot = None
self._organization = None
self._profiles = None
if account_moid is not None:
self.account_moid = account_moid
if ancestors is not None:
self.ancestors = ancestors
if create_time is not None:
self.create_time = create_time
if mod_time is not None:
self.mod_time = mod_time
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
if owners is not None:
self.owners = owners
if parent is not None:
self.parent = parent
if tags is not None:
self.tags = tags
if version_context is not None:
self.version_context = version_context
if description is not None:
self.description = description
if name is not None:
self.name = name
if boot_devices is not None:
self.boot_devices = boot_devices
if configured_boot_mode is not None:
self.configured_boot_mode = configured_boot_mode
if enforce_uefi_secure_boot is not None:
self.enforce_uefi_secure_boot = enforce_uefi_secure_boot
if organization is not None:
self.organization = organization
if profiles is not None:
self.profiles = profiles
@property
def account_moid(self):
"""
Gets the account_moid of this BootPrecisionPolicy.
The Account ID for this managed object.
:return: The account_moid of this BootPrecisionPolicy.
:rtype: str
"""
return self._account_moid
@account_moid.setter
def account_moid(self, account_moid):
"""
Sets the account_moid of this BootPrecisionPolicy.
The Account ID for this managed object.
:param account_moid: The account_moid of this BootPrecisionPolicy.
:type: str
"""
self._account_moid = account_moid
@property
def ancestors(self):
"""
Gets the ancestors of this BootPrecisionPolicy.
Ancestors is an array containing the MO references of the ancestors in the object containment hierarchy.
:return: The ancestors of this BootPrecisionPolicy.
:rtype: list[MoBaseMoRef]
"""
return self._ancestors
@ancestors.setter
def ancestors(self, ancestors):
"""
Sets the ancestors of this BootPrecisionPolicy.
Ancestors is an array containing the MO references of the ancestors in the object containment hierarchy.
:param ancestors: The ancestors of this BootPrecisionPolicy.
:type: list[MoBaseMoRef]
"""
self._ancestors = ancestors
@property
def create_time(self):
"""
Gets the create_time of this BootPrecisionPolicy.
The time when this managed object was created.
:return: The create_time of this BootPrecisionPolicy.
:rtype: datetime
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""
Sets the create_time of this BootPrecisionPolicy.
The time when this managed object was created.
:param create_time: The create_time of this BootPrecisionPolicy.
:type: datetime
"""
self._create_time = create_time
@property
def mod_time(self):
"""
Gets the mod_time of this BootPrecisionPolicy.
The time when this managed object was last modified.
:return: The mod_time of this BootPrecisionPolicy.
:rtype: datetime
"""
return self._mod_time
@mod_time.setter
def mod_time(self, mod_time):
"""
Sets the mod_time of this BootPrecisionPolicy.
The time when this managed object was last modified.
:param mod_time: The mod_time of this BootPrecisionPolicy.
:type: datetime
"""
self._mod_time = mod_time
@property
def moid(self):
"""
Gets the moid of this BootPrecisionPolicy.
A unique identifier of this Managed Object instance.
:return: The moid of this BootPrecisionPolicy.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this BootPrecisionPolicy.
A unique identifier of this Managed Object instance.
:param moid: The moid of this BootPrecisionPolicy.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this BootPrecisionPolicy.
The fully-qualified type of this managed object, e.g. the class name.
:return: The object_type of this BootPrecisionPolicy.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this BootPrecisionPolicy.
The fully-qualified type of this managed object, e.g. the class name.
:param object_type: The object_type of this BootPrecisionPolicy.
:type: str
"""
self._object_type = object_type
@property
def owners(self):
"""
Gets the owners of this BootPrecisionPolicy.
An array of owners which represent effective ownership of this object.
:return: The owners of this BootPrecisionPolicy.
:rtype: list[str]
"""
return self._owners
@owners.setter
def owners(self, owners):
"""
Sets the owners of this BootPrecisionPolicy.
An array of owners which represent effective ownership of this object.
:param owners: The owners of this BootPrecisionPolicy.
:type: list[str]
"""
self._owners = owners
@property
def parent(self):
"""
Gets the parent of this BootPrecisionPolicy.
The direct ancestor of this managed object in the containment hierarchy.
:return: The parent of this BootPrecisionPolicy.
:rtype: MoBaseMoRef
"""
return self._parent
@parent.setter
def parent(self, parent):
"""
Sets the parent of this BootPrecisionPolicy.
The direct ancestor of this managed object in the containment hierarchy.
:param parent: The parent of this BootPrecisionPolicy.
:type: MoBaseMoRef
"""
self._parent = parent
@property
def tags(self):
"""
Gets the tags of this BootPrecisionPolicy.
An array of tags, which allow to add key, value meta-data to managed objects.
:return: The tags of this BootPrecisionPolicy.
:rtype: list[MoTag]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""
Sets the tags of this BootPrecisionPolicy.
An array of tags, which allow to add key, value meta-data to managed objects.
:param tags: The tags of this BootPrecisionPolicy.
:type: list[MoTag]
"""
self._tags = tags
@property
def version_context(self):
"""
Gets the version_context of this BootPrecisionPolicy.
The versioning info for this managed object
:return: The version_context of this BootPrecisionPolicy.
:rtype: MoVersionContext
"""
return self._version_context
@version_context.setter
def version_context(self, version_context):
"""
Sets the version_context of this BootPrecisionPolicy.
The versioning info for this managed object
:param version_context: The version_context of this BootPrecisionPolicy.
:type: MoVersionContext
"""
self._version_context = version_context
@property
def description(self):
"""
Gets the description of this BootPrecisionPolicy.
Description of the policy.
:return: The description of this BootPrecisionPolicy.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this BootPrecisionPolicy.
Description of the policy.
:param description: The description of this BootPrecisionPolicy.
:type: str
"""
self._description = description
@property
def name(self):
"""
Gets the name of this BootPrecisionPolicy.
Name of the policy.
:return: The name of this BootPrecisionPolicy.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this BootPrecisionPolicy.
Name of the policy.
:param name: The name of this BootPrecisionPolicy.
:type: str
"""
self._name = name
@property
def boot_devices(self):
"""
Gets the boot_devices of this BootPrecisionPolicy.
Set of boot devices to be configured
:return: The boot_devices of this BootPrecisionPolicy.
:rtype: list[BootDeviceBase]
"""
return self._boot_devices
@boot_devices.setter
def boot_devices(self, boot_devices):
"""
Sets the boot_devices of this BootPrecisionPolicy.
Set of boot devices to be configured
:param boot_devices: The boot_devices of this BootPrecisionPolicy.
:type: list[BootDeviceBase]
"""
self._boot_devices = boot_devices
@property
def configured_boot_mode(self):
"""
Gets the configured_boot_mode of this BootPrecisionPolicy.
Sets the BIOS boot mode. UEFI uses the GUID Partition Table (GPT) whereas Legacy mode uses the Master Boot Record(MBR) partitioning scheme.
:return: The configured_boot_mode of this BootPrecisionPolicy.
:rtype: str
"""
return self._configured_boot_mode
@configured_boot_mode.setter
def configured_boot_mode(self, configured_boot_mode):
"""
Sets the configured_boot_mode of this BootPrecisionPolicy.
Sets the BIOS boot mode. UEFI uses the GUID Partition Table (GPT) whereas Legacy mode uses the Master Boot Record(MBR) partitioning scheme.
:param configured_boot_mode: The configured_boot_mode of this BootPrecisionPolicy.
:type: str
"""
allowed_values = ["Legacy", "Uefi"]
if configured_boot_mode not in allowed_values:
raise ValueError(
"Invalid value for `configured_boot_mode` ({0}), must be one of {1}"
.format(configured_boot_mode, allowed_values)
)
self._configured_boot_mode = configured_boot_mode
@property
def enforce_uefi_secure_boot(self):
"""
Gets the enforce_uefi_secure_boot of this BootPrecisionPolicy.
If UEFI secure boot is enabled, the boot mode is set to UEFI by default. Secure boot enforces that device boots using only software that is trusted by the Original Equipment Manufacturer (OEM)
:return: The enforce_uefi_secure_boot of this BootPrecisionPolicy.
:rtype: bool
"""
return self._enforce_uefi_secure_boot
@enforce_uefi_secure_boot.setter
def enforce_uefi_secure_boot(self, enforce_uefi_secure_boot):
"""
Sets the enforce_uefi_secure_boot of this BootPrecisionPolicy.
If UEFI secure boot is enabled, the boot mode is set to UEFI by default. Secure boot enforces that device boots using only software that is trusted by the Original Equipment Manufacturer (OEM)
:param enforce_uefi_secure_boot: The enforce_uefi_secure_boot of this BootPrecisionPolicy.
:type: bool
"""
self._enforce_uefi_secure_boot = enforce_uefi_secure_boot
@property
def organization(self):
"""
Gets the organization of this BootPrecisionPolicy.
Organization
:return: The organization of this BootPrecisionPolicy.
:rtype: IamAccountRef
"""
return self._organization
@organization.setter
def organization(self, organization):
"""
Sets the organization of this BootPrecisionPolicy.
Organization
:param organization: The organization of this BootPrecisionPolicy.
:type: IamAccountRef
"""
self._organization = organization
@property
def profiles(self):
"""
Gets the profiles of this BootPrecisionPolicy.
Reference to the profile objects that this policy is a part of
:return: The profiles of this BootPrecisionPolicy.
:rtype: list[PolicyAbstractConfigProfileRef]
"""
return self._profiles
@profiles.setter
def profiles(self, profiles):
"""
Sets the profiles of this BootPrecisionPolicy.
Reference to the profile objects that this policy is a part of
:param profiles: The profiles of this BootPrecisionPolicy.
:type: list[PolicyAbstractConfigProfileRef]
"""
self._profiles = profiles
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, BootPrecisionPolicy):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 30.251724
| 323
| 0.617292
|
2f41f14740e69380898faf433f71501a12fed254
| 11,571
|
py
|
Python
|
mux_python/api/exports_api.py
|
moaazsidat/mux-python
|
3f03b9dd0761fa1a0cd5bdbeac85ccf4f326508c
|
[
"MIT"
] | null | null | null |
mux_python/api/exports_api.py
|
moaazsidat/mux-python
|
3f03b9dd0761fa1a0cd5bdbeac85ccf4f326508c
|
[
"MIT"
] | 5
|
2021-09-15T05:46:50.000Z
|
2021-09-21T01:13:41.000Z
|
mux_python/api/exports_api.py
|
moaazsidat/mux-python
|
3f03b9dd0761fa1a0cd5bdbeac85ccf4f326508c
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Mux API
Mux is how developers build online video. This API encompasses both Mux Video and Mux Data functionality to help you build your video-related projects better and faster than ever before. # noqa: E501
The version of the OpenAPI document: v1
Contact: devex@mux.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from mux_python.api_client import ApiClient
from mux_python.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class ExportsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def list_exports(self, **kwargs): # noqa: E501
"""List property video view export links # noqa: E501
Deprecated: The API has been replaced by the list-exports-views API call. Lists the available video view exports along with URLs to retrieve them. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_exports(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ListExportsResponse
"""
kwargs['_return_http_data_only'] = True
return self.list_exports_with_http_info(**kwargs) # noqa: E501
def list_exports_with_http_info(self, **kwargs): # noqa: E501
"""List property video view export links # noqa: E501
Deprecated: The API has been replaced by the list-exports-views API call. Lists the available video view exports along with URLs to retrieve them. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_exports_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ListExportsResponse, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_exports" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['accessToken'] # noqa: E501
response_types_map = {
200: "ListExportsResponse",
}
return self.api_client.call_api(
'/data/v1/exports', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def list_exports_views(self, **kwargs): # noqa: E501
"""List available property view exports # noqa: E501
Lists the available video view exports along with URLs to retrieve them. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_exports_views(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ListVideoViewExportsResponse
"""
kwargs['_return_http_data_only'] = True
return self.list_exports_views_with_http_info(**kwargs) # noqa: E501
def list_exports_views_with_http_info(self, **kwargs): # noqa: E501
"""List available property view exports # noqa: E501
Lists the available video view exports along with URLs to retrieve them. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_exports_views_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ListVideoViewExportsResponse, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_exports_views" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['accessToken'] # noqa: E501
response_types_map = {
200: "ListVideoViewExportsResponse",
}
return self.api_client.call_api(
'/data/v1/exports/views', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
| 40.317073
| 204
| 0.598997
|
c499582726cd6d9d3ec77fa891123677f1eeadb6
| 21
|
py
|
Python
|
pyopt/__init__.py
|
vndee/python-options
|
f518ec4e13c529a12a997a9bddbb54f86e21b55d
|
[
"MIT"
] | 2
|
2020-11-19T03:17:27.000Z
|
2020-11-19T10:19:37.000Z
|
option/__init__.py
|
zlasd/novelS
|
44905ec0477806f8aee377e098d4311d65aabd18
|
[
"MIT"
] | null | null | null |
option/__init__.py
|
zlasd/novelS
|
44905ec0477806f8aee377e098d4311d65aabd18
|
[
"MIT"
] | 1
|
2020-11-19T10:11:43.000Z
|
2020-11-19T10:11:43.000Z
|
from .option import *
| 21
| 21
| 0.761905
|
0521a69c5f80ee2674694001f4e12f435eadafc8
| 3,602
|
py
|
Python
|
python/tvm/ndarray.py
|
mli/tvm
|
814b46dccb3c340be7611d113a303a546f7b2b2a
|
[
"Apache-2.0"
] | 1
|
2018-01-30T01:38:38.000Z
|
2018-01-30T01:38:38.000Z
|
python/tvm/ndarray.py
|
mli/tvm
|
814b46dccb3c340be7611d113a303a546f7b2b2a
|
[
"Apache-2.0"
] | null | null | null |
python/tvm/ndarray.py
|
mli/tvm
|
814b46dccb3c340be7611d113a303a546f7b2b2a
|
[
"Apache-2.0"
] | 1
|
2020-06-11T08:04:06.000Z
|
2020-06-11T08:04:06.000Z
|
"""TVM Runtime NDArray API.
tvm.ndarray provides a minimum runtime array API to test
the correctness of the program.
"""
# pylint: disable=invalid-name,unused-import
from __future__ import absolute_import as _abs
import numpy as _np
from ._ffi.ndarray import TVMContext, TVMType, NDArrayBase
from ._ffi.ndarray import context, empty
from ._ffi.ndarray import _set_class_ndarray
from ._ffi.ndarray import register_extension, free_extension_handle
class NDArray(NDArrayBase):
"""Lightweight NDArray class of TVM runtime.
Strictly this is only an Array Container(a buffer object)
No arthimetic operations are defined.
All operations are performed by TVM functions.
The goal is not to re-build yet another array library.
Instead, this is a minimal data structure to demonstrate
how can we use TVM in existing project which might have their own array containers.
"""
pass
def cpu(dev_id=0):
"""Construct a CPU device
Parameters
----------
dev_id : int, optional
The integer device id
Returns
-------
ctx : TVMContext
The created context
"""
return TVMContext(1, dev_id)
def gpu(dev_id=0):
"""Construct a CPU device
Parameters
----------
dev_id : int, optional
The integer device id
Returns
-------
ctx : TVMContext
The created context
"""
return TVMContext(2, dev_id)
def rocm(dev_id=0):
"""Construct a ROCM device
Parameters
----------
dev_id : int, optional
The integer device id
Returns
-------
ctx : TVMContext
The created context
"""
return TVMContext(10, dev_id)
def opencl(dev_id=0):
"""Construct a OpenCL device
Parameters
----------
dev_id : int, optional
The integer device id
Returns
-------
ctx : TVMContext
The created context
"""
return TVMContext(4, dev_id)
def metal(dev_id=0):
"""Construct a metal device
Parameters
----------
dev_id : int, optional
The integer device id
Returns
-------
ctx : TVMContext
The created context
"""
return TVMContext(8, dev_id)
def vpi(dev_id=0):
"""Construct a VPI simulated device
Parameters
----------
dev_id : int, optional
The integer device id
Returns
-------
ctx : TVMContext
The created context
"""
return TVMContext(9, dev_id)
def opengl(dev_id=0):
"""Construct a OpenGL device
Parameters
----------
dev_id : int, optional
The integer device id
Returns
-------
ctx : TVMContext
The created context
"""
return TVMContext(11, dev_id)
def ext_dev(dev_id=0):
"""Construct a extension device
Parameters
----------
dev_id : int, optional
The integer device id
Returns
-------
ctx : TVMContext
The created context
Note
----
This API is reserved for quick testing of new
device by plugin device API as ext_dev.
"""
return TVMContext(12, dev_id)
cl = opencl
mtl = metal
def array(arr, ctx=cpu(0)):
"""Create an array from source arr.
Parameters
----------
arr : numpy.ndarray
The array to be copied from
ctx : TVMContext, optional
The device context to create the array
Returns
-------
ret : NDArray
The created array
"""
if not isinstance(arr, (_np.ndarray, NDArray)):
arr = _np.array(arr)
return empty(arr.shape, arr.dtype, ctx).copyfrom(arr)
_set_class_ndarray(NDArray)
| 19.576087
| 87
| 0.616602
|
ba2c7bb6668a23f47291b1a1279e5c31da9bcdac
| 1,008
|
py
|
Python
|
.history/my_classes/FirstClassFunctions/MapFilterZipList_20210706150854.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
.history/my_classes/FirstClassFunctions/MapFilterZipList_20210706150854.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
.history/my_classes/FirstClassFunctions/MapFilterZipList_20210706150854.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
"""Map Filter Zip List Comprehensions
Higher order functions
A function that takes a function as a parameter and/or returns a function as it's return value
Example: sorted
map _
|
-- modern alternative -> list comprehensions and generator expressions
|
filter -
The map function
map(func, *iterables)
*iterables -> avariable number of iterable objects
func -> some function that takes a many arguments as there are iterable objects passed to iterables
map(func, *iterables) will then return an iterator that calculates the function applied to each element of the iterables
The iterator stops as soon as one of the iterables has been exhausted, so, unequal length iterables can be used
Examples
"""
l = [2, 3, 4]
def sq(x):
return x**2
list(map(sq, l)) # [4, 9, 19]
l1 = [1, 2, 3]
l2 = [10, 20, 30]
def add(x, y):
return x + y
list(map(add, l1, l2))
| 22.4
| 120
| 0.625992
|
786d708057ae7d806f497088a2b8cc8df49aa7aa
| 9,769
|
py
|
Python
|
tests/test_oauth2client_file.py
|
manikanta-kondeti/google-api-python-client
|
37d60a2da6864ebef5bd83daa4da680e06e08db3
|
[
"Apache-2.0"
] | 1
|
2015-05-14T00:06:51.000Z
|
2015-05-14T00:06:51.000Z
|
tests/test_oauth2client_file.py
|
moughamir/google-api-python-client
|
37d60a2da6864ebef5bd83daa4da680e06e08db3
|
[
"Apache-2.0"
] | 1
|
2016-06-23T16:33:52.000Z
|
2016-06-23T16:33:52.000Z
|
tests/test_oauth2client_file.py
|
moughamir/google-api-python-client
|
37d60a2da6864ebef5bd83daa4da680e06e08db3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python2.4
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Oauth2client.file tests
Unit tests for oauth2client.file
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import copy
import datetime
import httplib2
import os
import pickle
import stat
import tempfile
import unittest
from apiclient.http import HttpMockSequence
from oauth2client import GOOGLE_TOKEN_URI
from oauth2client import file
from oauth2client import locked_file
from oauth2client import multistore_file
from oauth2client import util
from oauth2client.anyjson import simplejson
from oauth2client.client import AccessTokenCredentials
from oauth2client.client import AssertionCredentials
from oauth2client.client import OAuth2Credentials
FILENAME = tempfile.mktemp('oauth2client_test.data')
class OAuth2ClientFileTests(unittest.TestCase):
def tearDown(self):
try:
os.unlink(FILENAME)
except OSError:
pass
def setUp(self):
try:
os.unlink(FILENAME)
except OSError:
pass
def create_test_credentials(self, client_id='some_client_id'):
access_token = 'foo'
client_secret = 'cOuDdkfjxxnv+'
refresh_token = '1/0/a.df219fjls0'
token_expiry = datetime.datetime.utcnow()
token_uri = 'https://www.google.com/accounts/o8/oauth2/token'
user_agent = 'refresh_checker/1.0'
credentials = OAuth2Credentials(
access_token, client_id, client_secret,
refresh_token, token_expiry, token_uri,
user_agent)
return credentials
def test_non_existent_file_storage(self):
s = file.Storage(FILENAME)
credentials = s.get()
self.assertEquals(None, credentials)
def test_no_sym_link_credentials(self):
if hasattr(os, 'symlink'):
SYMFILENAME = FILENAME + '.sym'
os.symlink(FILENAME, SYMFILENAME)
s = file.Storage(SYMFILENAME)
try:
s.get()
self.fail('Should have raised an exception.')
except file.CredentialsFileSymbolicLinkError:
pass
finally:
os.unlink(SYMFILENAME)
def test_pickle_and_json_interop(self):
# Write a file with a pickled OAuth2Credentials.
credentials = self.create_test_credentials()
f = open(FILENAME, 'w')
pickle.dump(credentials, f)
f.close()
# Storage should be not be able to read that object, as the capability to
# read and write credentials as pickled objects has been removed.
s = file.Storage(FILENAME)
read_credentials = s.get()
self.assertEquals(None, read_credentials)
# Now write it back out and confirm it has been rewritten as JSON
s.put(credentials)
f = open(FILENAME)
data = simplejson.load(f)
f.close()
self.assertEquals(data['access_token'], 'foo')
self.assertEquals(data['_class'], 'OAuth2Credentials')
self.assertEquals(data['_module'], OAuth2Credentials.__module__)
def test_token_refresh(self):
credentials = self.create_test_credentials()
s = file.Storage(FILENAME)
s.put(credentials)
credentials = s.get()
new_cred = copy.copy(credentials)
new_cred.access_token = 'bar'
s.put(new_cred)
credentials._refresh(lambda x: x)
self.assertEquals(credentials.access_token, 'bar')
def test_credentials_delete(self):
credentials = self.create_test_credentials()
s = file.Storage(FILENAME)
s.put(credentials)
credentials = s.get()
self.assertNotEquals(None, credentials)
s.delete()
credentials = s.get()
self.assertEquals(None, credentials)
def test_access_token_credentials(self):
access_token = 'foo'
user_agent = 'refresh_checker/1.0'
credentials = AccessTokenCredentials(access_token, user_agent)
s = file.Storage(FILENAME)
credentials = s.put(credentials)
credentials = s.get()
self.assertNotEquals(None, credentials)
self.assertEquals('foo', credentials.access_token)
mode = os.stat(FILENAME).st_mode
if os.name == 'posix':
self.assertEquals('0600', oct(stat.S_IMODE(os.stat(FILENAME).st_mode)))
def test_read_only_file_fail_lock(self):
credentials = self.create_test_credentials()
open(FILENAME, 'a+b').close()
os.chmod(FILENAME, 0400)
store = multistore_file.get_credential_storage(
FILENAME,
credentials.client_id,
credentials.user_agent,
['some-scope', 'some-other-scope'])
store.put(credentials)
if os.name == 'posix':
self.assertTrue(store._multistore._read_only)
os.chmod(FILENAME, 0600)
def test_multistore_no_symbolic_link_files(self):
if hasattr(os, 'symlink'):
SYMFILENAME = FILENAME + 'sym'
os.symlink(FILENAME, SYMFILENAME)
store = multistore_file.get_credential_storage(
SYMFILENAME,
'some_client_id',
'user-agent/1.0',
['some-scope', 'some-other-scope'])
try:
store.get()
self.fail('Should have raised an exception.')
except locked_file.CredentialsFileSymbolicLinkError:
pass
finally:
os.unlink(SYMFILENAME)
def test_multistore_non_existent_file(self):
store = multistore_file.get_credential_storage(
FILENAME,
'some_client_id',
'user-agent/1.0',
['some-scope', 'some-other-scope'])
credentials = store.get()
self.assertEquals(None, credentials)
def test_multistore_file(self):
credentials = self.create_test_credentials()
store = multistore_file.get_credential_storage(
FILENAME,
credentials.client_id,
credentials.user_agent,
['some-scope', 'some-other-scope'])
store.put(credentials)
credentials = store.get()
self.assertNotEquals(None, credentials)
self.assertEquals('foo', credentials.access_token)
store.delete()
credentials = store.get()
self.assertEquals(None, credentials)
if os.name == 'posix':
self.assertEquals('0600', oct(stat.S_IMODE(os.stat(FILENAME).st_mode)))
def test_multistore_file_custom_key(self):
credentials = self.create_test_credentials()
custom_key = {'myapp': 'testing', 'clientid': 'some client'}
store = multistore_file.get_credential_storage_custom_key(
FILENAME, custom_key)
store.put(credentials)
stored_credentials = store.get()
self.assertNotEquals(None, stored_credentials)
self.assertEqual(credentials.access_token, stored_credentials.access_token)
store.delete()
stored_credentials = store.get()
self.assertEquals(None, stored_credentials)
def test_multistore_file_custom_string_key(self):
credentials = self.create_test_credentials()
# store with string key
store = multistore_file.get_credential_storage_custom_string_key(
FILENAME, 'mykey')
store.put(credentials)
stored_credentials = store.get()
self.assertNotEquals(None, stored_credentials)
self.assertEqual(credentials.access_token, stored_credentials.access_token)
# try retrieving with a dictionary
store_dict = multistore_file.get_credential_storage_custom_string_key(
FILENAME, {'key': 'mykey'})
stored_credentials = store.get()
self.assertNotEquals(None, stored_credentials)
self.assertEqual(credentials.access_token, stored_credentials.access_token)
store.delete()
stored_credentials = store.get()
self.assertEquals(None, stored_credentials)
def test_multistore_file_backwards_compatibility(self):
credentials = self.create_test_credentials()
scopes = ['scope1', 'scope2']
# store the credentials using the legacy key method
store = multistore_file.get_credential_storage(
FILENAME, 'client_id', 'user_agent', scopes)
store.put(credentials)
# retrieve the credentials using a custom key that matches the legacy key
key = {'clientId': 'client_id', 'userAgent': 'user_agent',
'scope': util.scopes_to_string(scopes)}
store = multistore_file.get_credential_storage_custom_key(FILENAME, key)
stored_credentials = store.get()
self.assertEqual(credentials.access_token, stored_credentials.access_token)
def test_multistore_file_get_all_keys(self):
# start with no keys
keys = multistore_file.get_all_credential_keys(FILENAME)
self.assertEquals([], keys)
# store credentials
credentials = self.create_test_credentials(client_id='client1')
custom_key = {'myapp': 'testing', 'clientid': 'client1'}
store1 = multistore_file.get_credential_storage_custom_key(
FILENAME, custom_key)
store1.put(credentials)
keys = multistore_file.get_all_credential_keys(FILENAME)
self.assertEquals([custom_key], keys)
# store more credentials
credentials = self.create_test_credentials(client_id='client2')
string_key = 'string_key'
store2 = multistore_file.get_credential_storage_custom_string_key(
FILENAME, string_key)
store2.put(credentials)
keys = multistore_file.get_all_credential_keys(FILENAME)
self.assertEquals(2, len(keys))
self.assertTrue(custom_key in keys)
self.assertTrue({'key': string_key} in keys)
# back to no keys
store1.delete()
store2.delete()
keys = multistore_file.get_all_credential_keys(FILENAME)
self.assertEquals([], keys)
if __name__ == '__main__':
unittest.main()
| 30.151235
| 79
| 0.717883
|
c0d7000a540d5edad85c548cdaa4e334db504c72
| 19,112
|
py
|
Python
|
tests/framework/hooks/test_context_hooks.py
|
avichalbadaya/kedro
|
a303243947f4da9684ebb852192bdd73acb6ad24
|
[
"Apache-2.0"
] | 1
|
2020-08-11T00:33:45.000Z
|
2020-08-11T00:33:45.000Z
|
tests/framework/hooks/test_context_hooks.py
|
markoke/kedro
|
1cbd3e14aaf383d7d44946a20abb57bab7e4ce9c
|
[
"Apache-2.0"
] | 1
|
2021-05-11T19:22:42.000Z
|
2021-05-11T19:22:42.000Z
|
tests/framework/hooks/test_context_hooks.py
|
avichalbadaya/kedro
|
a303243947f4da9684ebb852192bdd73acb6ad24
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from logging.handlers import QueueHandler, QueueListener
from multiprocessing import Queue
from pathlib import Path
from typing import Any, Dict, List, Union
import pandas as pd
import pytest
import yaml
from kedro import __version__
from kedro.framework.context import KedroContext
from kedro.framework.context.context import _convert_paths_to_absolute_posix
from kedro.framework.hooks import hook_impl
from kedro.framework.hooks.manager import _create_hook_manager
from kedro.io import DataCatalog
from kedro.pipeline import Pipeline
from kedro.pipeline.node import Node, node
from kedro.runner import ParallelRunner
@pytest.fixture
def local_logging_config():
return {
"version": 1,
"formatters": {
"simple": {"format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s"}
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {"kedro": {"level": "INFO", "handlers": ["console"]}},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "INFO",
"formatter": "simple",
"stream": "ext://sys.stdout",
}
},
}
def _write_yaml(filepath: Path, config: Dict):
filepath.parent.mkdir(parents=True, exist_ok=True)
yaml_str = yaml.dump(config)
filepath.write_text(yaml_str)
@pytest.fixture
def local_config(tmp_path):
cars_filepath = str(tmp_path / "cars.csv")
boats_filepath = str(tmp_path / "boats.csv")
return {
"cars": {
"type": "pandas.CSVDataSet",
"filepath": cars_filepath,
"save_args": {"index": False},
"versioned": True,
},
"boats": {
"type": "pandas.CSVDataSet",
"filepath": boats_filepath,
"versioned": True,
},
}
@pytest.fixture(autouse=True)
def config_dir(tmp_path, local_config, local_logging_config):
catalog = tmp_path / "conf" / "base" / "catalog.yml"
credentials = tmp_path / "conf" / "local" / "credentials.yml"
logging = tmp_path / "conf" / "local" / "logging.yml"
_write_yaml(catalog, local_config)
_write_yaml(credentials, {"dev_s3": "foo"})
_write_yaml(logging, local_logging_config)
@pytest.fixture(autouse=True)
def hook_manager(monkeypatch):
# re-create the global hook manager after every test
hook_manager = _create_hook_manager()
monkeypatch.setattr("kedro.framework.hooks.get_hook_manager", lambda: hook_manager)
monkeypatch.setattr(
"kedro.framework.context.context.get_hook_manager", lambda: hook_manager
)
monkeypatch.setattr("kedro.runner.runner.get_hook_manager", lambda: hook_manager)
return hook_manager
def identity(x: str):
return x
def broken_node():
raise ValueError("broken")
def assert_exceptions_equal(e1: Exception, e2: Exception):
assert isinstance(e1, type(e2)) and str(e1) == str(e2)
@pytest.fixture
def dummy_dataframe():
return pd.DataFrame({"test": [1, 2]})
class LoggingHooks:
"""A set of test hooks that only log information when invoked.
Use a log queue to properly test log messages written by hooks invoked by ParallelRunner.
"""
handler_name = "hooks_handler"
def __init__(self, logs_queue):
self.logger = logging.getLogger("hooks_handler")
self.logger.handlers = []
self.logger.addHandler(QueueHandler(logs_queue))
@hook_impl
def after_catalog_created(
self,
catalog: DataCatalog,
conf_catalog: Dict[str, Any],
conf_creds: Dict[str, Any],
feed_dict: Dict[str, Any],
save_version: str,
load_versions: Dict[str, str],
run_id: str,
):
self.logger.info(
"Catalog created",
extra={
"catalog": catalog,
"conf_catalog": conf_catalog,
"conf_creds": conf_creds,
"feed_dict": feed_dict,
"save_version": save_version,
"load_versions": load_versions,
"run_id": run_id,
},
)
@hook_impl
def before_node_run(
self,
node: Node,
catalog: DataCatalog,
inputs: Dict[str, Any],
is_async: str,
run_id: str,
) -> None:
self.logger.info(
"About to run node",
extra={
"node": node,
"catalog": catalog,
"inputs": inputs,
"is_async": is_async,
"run_id": run_id,
},
)
@hook_impl
def after_node_run(
self,
node: Node,
catalog: DataCatalog,
inputs: Dict[str, Any],
outputs: Dict[str, Any],
is_async: str,
run_id: str,
) -> None:
self.logger.info(
"Ran node",
extra={
"node": node,
"catalog": catalog,
"inputs": inputs,
"outputs": outputs,
"is_async": is_async,
"run_id": run_id,
},
)
@hook_impl
def on_node_error(
self,
error: Exception,
node: Node,
catalog: DataCatalog,
inputs: Dict[str, Any],
is_async: bool,
run_id: str,
):
self.logger.info(
"Node error",
extra={
"error": error,
"node": node,
"catalog": catalog,
"inputs": inputs,
"is_async": is_async,
"run_id": run_id,
},
)
@hook_impl
def before_pipeline_run(
self, run_params: Dict[str, Any], pipeline: Pipeline, catalog: DataCatalog
) -> None:
self.logger.info(
"About to run pipeline",
extra={"pipeline": pipeline, "run_params": run_params, "catalog": catalog},
)
@hook_impl
def after_pipeline_run(
self, run_params: Dict[str, Any], pipeline: Pipeline, catalog: DataCatalog
) -> None:
self.logger.info(
"Ran pipeline",
extra={"pipeline": pipeline, "run_params": run_params, "catalog": catalog},
)
@hook_impl
def on_pipeline_error(
self,
error: Exception,
run_params: Dict[str, Any],
pipeline: Pipeline,
catalog: DataCatalog,
) -> None:
self.logger.info(
"Pipeline error",
extra={
"error": error,
"run_params": run_params,
"pipeline": pipeline,
"catalog": catalog,
},
)
@pytest.fixture
def logs_queue():
return Queue()
@pytest.fixture
def logging_hooks(logs_queue):
return LoggingHooks(logs_queue)
def _create_context_with_hooks(tmp_path, mocker, logging_hooks):
"""Create a context with some Hooks registered.
We do this in a function to support both calling it directly as well as as part of a fixture.
"""
class DummyContextWithHooks(KedroContext):
project_name = "test hooks"
package_name = "test_hooks"
project_version = __version__
hooks = (logging_hooks,)
def _get_run_id(self, *args, **kwargs) -> Union[None, str]:
return "mocked context with hooks run id"
def _get_pipelines(self) -> Dict[str, Pipeline]:
pipeline = Pipeline(
[
node(identity, "cars", "planes", name="node1"),
node(identity, "boats", "ships", name="node2"),
],
tags="pipeline",
)
return {"__default__": pipeline}
mocker.patch("logging.config.dictConfig")
return DummyContextWithHooks(tmp_path, env="local")
@pytest.fixture
def context_with_hooks(tmp_path, mocker, logging_hooks):
return _create_context_with_hooks(tmp_path, mocker, logging_hooks)
@pytest.fixture
def broken_context_with_hooks(tmp_path, mocker, logging_hooks):
class BrokenContextWithHooks(KedroContext):
project_name = "broken-context"
package_name = "broken"
project_version = __version__
hooks = (logging_hooks,)
def _get_pipelines(self) -> Dict[str, Pipeline]:
pipeline = Pipeline(
[
node(broken_node, None, "A", name="node1"),
node(broken_node, None, "B", name="node2"),
],
tags="pipeline",
)
return {"__default__": pipeline}
mocker.patch("logging.config.dictConfig")
return BrokenContextWithHooks(tmp_path, env="local")
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Due to bug in hooks")
class TestKedroContextHooks:
@staticmethod
def _assert_hook_call_record_has_expected_parameters(
call_record: logging.LogRecord, expected_parameters: List[str]
):
"""Assert the given call record has all expected parameters."""
for param in expected_parameters:
assert hasattr(call_record, param)
def test_calling_register_hooks_multiple_times_should_not_raise(
self, context_with_hooks
):
context_with_hooks._register_hooks()
context_with_hooks._register_hooks()
assert True # if we get to this statement, it means the previous repeated calls don't raise
def test_hooks_are_registered_when_context_is_created(
self, tmp_path, mocker, logging_hooks, hook_manager
):
assert not hook_manager.is_registered(logging_hooks)
# create the context
_create_context_with_hooks(tmp_path, mocker, logging_hooks)
# assert hooks are registered after context is created
assert hook_manager.is_registered(logging_hooks)
def test_after_catalog_created_hook_is_called(self, context_with_hooks, caplog):
catalog = context_with_hooks.catalog
config_loader = context_with_hooks.config_loader
relevant_records = [
r for r in caplog.records if r.name == LoggingHooks.handler_name
]
record = relevant_records[0]
assert record.getMessage() == "Catalog created"
assert record.catalog == catalog
assert record.conf_creds == config_loader.get("credentials*")
assert record.conf_catalog == _convert_paths_to_absolute_posix(
project_path=context_with_hooks.project_path,
conf_dictionary=config_loader.get("catalog*"),
)
assert record.save_version is None
assert record.load_versions is None
assert record.run_id == "mocked context with hooks run id"
def test_before_and_after_pipeline_run_hooks_are_called(
self, context_with_hooks, dummy_dataframe, caplog
):
context_with_hooks.catalog.save("cars", dummy_dataframe)
context_with_hooks.catalog.save("boats", dummy_dataframe)
context_with_hooks.run()
# test before pipeline run hook
before_pipeline_run_calls = [
record
for record in caplog.records
if record.funcName == "before_pipeline_run"
]
assert len(before_pipeline_run_calls) == 1
call_record = before_pipeline_run_calls[0]
assert call_record.pipeline.describe() == context_with_hooks.pipeline.describe()
self._assert_hook_call_record_has_expected_parameters(
call_record, ["pipeline", "catalog", "run_params"]
)
# test after pipeline run hook
after_pipeline_run_calls = [
record
for record in caplog.records
if record.funcName == "after_pipeline_run"
]
assert len(after_pipeline_run_calls) == 1
call_record = after_pipeline_run_calls[0]
self._assert_hook_call_record_has_expected_parameters(
call_record, ["pipeline", "catalog", "run_params"]
)
assert call_record.pipeline.describe() == context_with_hooks.pipeline.describe()
def test_on_pipeline_error_hook_is_called(self, broken_context_with_hooks, caplog):
with pytest.raises(ValueError, match="broken"):
broken_context_with_hooks.run()
on_pipeline_error_calls = [
record
for record in caplog.records
if record.funcName == "on_pipeline_error"
]
assert len(on_pipeline_error_calls) == 1
call_record = on_pipeline_error_calls[0]
self._assert_hook_call_record_has_expected_parameters(
call_record, ["error", "run_params", "pipeline", "catalog"]
)
expected_error = ValueError("broken")
assert_exceptions_equal(call_record.error, expected_error)
def test_on_node_error_hook_is_called_with_sequential_runner(
self, broken_context_with_hooks, caplog
):
with pytest.raises(ValueError, match="broken"):
broken_context_with_hooks.run(node_names=["node1"])
on_node_error_calls = [
record for record in caplog.records if record.funcName == "on_node_error"
]
assert len(on_node_error_calls) == 1
call_record = on_node_error_calls[0]
self._assert_hook_call_record_has_expected_parameters(
call_record, ["error", "node", "catalog", "inputs", "is_async", "run_id"]
)
expected_error = ValueError("broken")
assert_exceptions_equal(call_record.error, expected_error)
def test_before_and_after_node_run_hooks_are_called_with_sequential_runner(
self, context_with_hooks, dummy_dataframe, caplog
):
context_with_hooks.catalog.save("cars", dummy_dataframe)
context_with_hooks.run(node_names=["node1"])
# test before node run hook
before_node_run_calls = [
record for record in caplog.records if record.funcName == "before_node_run"
]
assert len(before_node_run_calls) == 1
call_record = before_node_run_calls[0]
self._assert_hook_call_record_has_expected_parameters(
call_record, ["node", "catalog", "inputs", "is_async", "run_id"]
)
# sanity check a couple of important parameters
assert call_record.inputs["cars"].to_dict() == dummy_dataframe.to_dict()
assert call_record.run_id == context_with_hooks.run_id
# test after node run hook
after_node_run_calls = [
record for record in caplog.records if record.funcName == "after_node_run"
]
assert len(after_node_run_calls) == 1
call_record = after_node_run_calls[0]
self._assert_hook_call_record_has_expected_parameters(
call_record, ["node", "catalog", "inputs", "outputs", "is_async", "run_id"]
)
# sanity check a couple of important parameters
assert call_record.outputs["planes"].to_dict() == dummy_dataframe.to_dict()
assert call_record.run_id == context_with_hooks.run_id
def test_on_node_error_hook_is_called_with_parallel_runner(
self, broken_context_with_hooks, logs_queue
):
log_records = []
class LogHandler(logging.Handler): # pylint: disable=abstract-method
def handle(self, record):
log_records.append(record)
logs_queue_listener = QueueListener(logs_queue, LogHandler())
logs_queue_listener.start()
with pytest.raises(ValueError, match="broken"):
broken_context_with_hooks.run(
runner=ParallelRunner(max_workers=2), node_names=["node1", "node2"]
)
logs_queue_listener.stop()
on_node_error_records = [
r for r in log_records if r.funcName == "on_node_error"
]
assert len(on_node_error_records) == 2
for call_record in on_node_error_records:
self._assert_hook_call_record_has_expected_parameters(
call_record,
["error", "node", "catalog", "inputs", "is_async", "run_id"],
)
expected_error = ValueError("broken")
assert_exceptions_equal(call_record.error, expected_error)
def test_before_and_after_node_run_hooks_are_called_with_parallel_runner(
self, context_with_hooks, dummy_dataframe, logs_queue
):
log_records = []
class LogHandler(logging.Handler): # pylint: disable=abstract-method
def handle(self, record):
log_records.append(record)
logs_queue_listener = QueueListener(logs_queue, LogHandler())
logs_queue_listener.start()
context_with_hooks.catalog.save("cars", dummy_dataframe)
context_with_hooks.catalog.save("boats", dummy_dataframe)
context_with_hooks.run(runner=ParallelRunner(), node_names=["node1", "node2"])
logs_queue_listener.stop()
before_node_run_log_records = [
r for r in log_records if r.funcName == "before_node_run"
]
assert len(before_node_run_log_records) == 2
for record in before_node_run_log_records:
assert record.getMessage() == "About to run node"
assert record.node.name in ["node1", "node2"]
assert set(record.inputs.keys()) <= {"cars", "boats"}
after_node_run_log_records = [
r for r in log_records if r.funcName == "after_node_run"
]
assert len(after_node_run_log_records) == 2
for record in after_node_run_log_records:
assert record.getMessage() == "Ran node"
assert record.node.name in ["node1", "node2"]
assert set(record.outputs.keys()) <= {"planes", "ships"}
| 35.06789
| 100
| 0.63536
|
990f87e0dcfede9ade52f7feb7e33cb925b53cfe
| 18,456
|
py
|
Python
|
fairseq/modules/transformer_layer.py
|
hamsik1223/fairseq
|
13164c38b0aab4269f8775a2506e2b60f5909114
|
[
"MIT"
] | null | null | null |
fairseq/modules/transformer_layer.py
|
hamsik1223/fairseq
|
13164c38b0aab4269f8775a2506e2b60f5909114
|
[
"MIT"
] | null | null | null |
fairseq/modules/transformer_layer.py
|
hamsik1223/fairseq
|
13164c38b0aab4269f8775a2506e2b60f5909114
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.modules import LayerNorm, MultiheadAttention
from fairseq.modules.quant_noise import quant_noise
from torch import Tensor
class TransformerEncoderLayer(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.encoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
"""
def __init__(self, args):
super().__init__()
self.embed_dim = args.encoder_embed_dim
self.quant_noise = getattr(args, "quant_noise_pq", 0)
self.quant_noise_block_size = getattr(args, "quant_noise_pq_block_size", 8)
self.self_attn = self.build_self_attention(self.embed_dim, args)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, "activation_fn", "relu")
)
self.activation_dropout = getattr(args, "activation_dropout", 0)
if self.activation_dropout == 0:
# for backwards compatibility with models that use args.relu_dropout
self.activation_dropout = getattr(args, "relu_dropout", 0)
self.normalize_before = args.encoder_normalize_before
self.fc1 = self.build_fc1(
self.embed_dim, args.encoder_ffn_embed_dim, self.quant_noise, self.quant_noise_block_size
)
self.fc2 = self.build_fc2(
args.encoder_ffn_embed_dim, self.embed_dim, self.quant_noise, self.quant_noise_block_size
)
self.final_layer_norm = LayerNorm(self.embed_dim)
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size)
def build_self_attention(self, embed_dim, args):
return MultiheadAttention(
embed_dim,
args.encoder_attention_heads,
dropout=args.attention_dropout,
self_attention=True,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
relative_pos_type=("unmasked"
if getattr(args, "use_relative_pos_embeddings", False)
else None),
max_relative_pos=getattr(args, "max_relative_pos", 128),
heads_share_embeddings=getattr(args, "heads_share_embeddings", False),
add_pos_embeddings_to_values=getattr(args, "add_pos_embeddings_to_values", False)
)
def upgrade_state_dict_named(self, state_dict, name):
"""
Rename layer norm states from `...layer_norms.0.weight` to
`...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to
`...final_layer_norm.weight`
"""
layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layer_norms.{}.{}".format(name, old, m)
if k in state_dict:
state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k]
del state_dict[k]
def forward(
self,
x,
encoder_padding_mask,
attn_mask: Optional[Tensor] = None,
need_attn: bool = False,
need_head_weights: bool = False,
sentence_position = None,
disable_add_1stblock = False,
only_weighted_sum = False):
#####
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
attn_mask (ByteTensor): binary tensor of shape (T_tgt, T_src), where
T_tgt is the length of query, while T_src is the length of key,
though here both query and key is x here,
attn_mask[t_tgt, t_src] = 1 means when calculating embedding
for t_tgt, t_src is excluded (or masked out), =0 means it is
included in attention
need_attn (bool, optional): return attention weights.
need_head_weights (bool, optional): return attention weights
for each head (default: return average over heads).
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
if attn_mask is not None:
attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), -1e8)
if need_head_weights:
need_attn = True
# anything in original attn_mask = 1, becomes -1e8
# anything in original attn_mask = 0, becomes 0
# Note that we cannot use -inf here, because at some edge cases,
# the attention weight (before softmax) for some padded element in query
# will become -inf, which results in NaN in model parameters
# TODO: to formally solve this problem, we need to change fairseq's
# MultiheadAttention. We will do this later on.
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=encoder_padding_mask,
attn_mask=attn_mask,
sentence_position = sentence_position,
need_weights=True,
need_head_weights=need_head_weights,
)
###
x = F.dropout(x, p=self.dropout, training=self.training)
#####
if not disable_add_1stblock:
x = residual + x
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
if only_weighted_sum:
if need_attn:
return x, attn
else:
return x, None
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=float(self.activation_dropout), training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.normalize_before:
x = self.final_layer_norm(x)
if need_attn:
return x, attn
return x, None
class TransformerDecoderLayer(nn.Module):
"""Decoder layer block.
In the original paper each operation (multi-head attention, encoder
attention or FFN) is postprocessed with: `dropout -> add residual ->
layernorm`. In the tensor2tensor code they suggest that learning is more
robust when preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.decoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False
):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.dropout = args.dropout
self.quant_noise = getattr(args, "quant_noise_pq", 0)
self.quant_noise_block_size = getattr(args, "quant_noise_pq_block_size", 8)
self.cross_self_attention = getattr(args, "cross_self_attention", False)
self.self_attn = self.build_self_attention(
self.embed_dim,
args,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
)
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, "activation_fn", "relu")
)
self.activation_dropout = getattr(args, "activation_dropout", 0)
if self.activation_dropout == 0:
# for backwards compatibility with models that use args.relu_dropout
self.activation_dropout = getattr(args, "relu_dropout", 0)
self.normalize_before = args.decoder_normalize_before
# use layerNorm rather than FusedLayerNorm for exporting.
# char_inputs can be used to determint this.
# TODO remove this once we update apex with the fix
export = getattr(args, "char_inputs", False)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = self.build_encoder_attention(self.embed_dim, args)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.fc1 = self.build_fc1(
self.embed_dim, args.decoder_ffn_embed_dim, self.quant_noise, self.quant_noise_block_size
)
self.fc2 = self.build_fc2(
args.decoder_ffn_embed_dim, self.embed_dim, self.quant_noise, self.quant_noise_block_size
)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
self.need_attn = True
self.onnx_trace = False
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_self_attention(self, embed_dim, args, add_bias_kv=False, add_zero_attn=False):
return MultiheadAttention(
embed_dim,
args.decoder_attention_heads,
dropout=args.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=not getattr(args, "cross_self_attention", False),
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
# relative_pos_type=("masked"
# if getattr(args, "use_relative_pos_embeddings", False)
# else None),
relative_pos_type = None, ###
max_relative_pos=getattr(args, "max_relative_pos", 128),
heads_share_embeddings=getattr(args, "heads_share_embeddings", False),
add_pos_embeddings_to_values=getattr(args, "add_pos_embeddings_to_values", False)
)
def build_encoder_attention(self, embed_dim, args):
return MultiheadAttention(
embed_dim,
args.decoder_attention_heads,
kdim=getattr(args, "encoder_embed_dim", None),
vdim=getattr(args, "encoder_embed_dim", None),
dropout=args.attention_dropout,
encoder_decoder_attention=True,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def forward(
self,
x,
encoder_out: Optional[torch.Tensor] = None,
encoder_padding_mask: Optional[torch.Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
prev_self_attn_state: Optional[List[torch.Tensor]] = None,
prev_attn_state: Optional[List[torch.Tensor]] = None,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
need_attn: bool = False,
need_head_weights: bool = False,
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor, optional): binary
ByteTensor of shape `(batch, src_len)` where padding
elements are indicated by ``1``.
need_attn (bool, optional): return attention weights
need_head_weights (bool, optional): return attention weights
for each head (default: return average over heads).
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
if need_head_weights:
need_attn = True
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
if prev_self_attn_state is not None:
prev_key, prev_value = prev_self_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_self_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_self_attn_state[2]
assert incremental_state is not None
self.self_attn._set_input_buffer(incremental_state, saved_state)
_self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state)
if self.cross_self_attention and not (
incremental_state is not None
and _self_attn_input_buffer is not None
and "prev_key" in _self_attn_input_buffer
):
if self_attn_mask is not None:
assert encoder_out is not None
self_attn_mask = torch.cat(
(x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1
)
if self_attn_padding_mask is not None:
if encoder_padding_mask is None:
assert encoder_out is not None
encoder_padding_mask = self_attn_padding_mask.new_zeros(
encoder_out.size(1), encoder_out.size(0)
)
self_attn_padding_mask = torch.cat(
(encoder_padding_mask, self_attn_padding_mask), dim=1
)
assert encoder_out is not None
y = torch.cat((encoder_out, x), dim=0)
else:
y = x
x, attn = self.self_attn(
query=x,
key=y,
value=y,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
if self.encoder_attn is not None:
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
if prev_attn_state is not None:
prev_key, prev_value = prev_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_attn_state[2]
assert incremental_state is not None
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=need_attn or (not self.training and self.need_attn),
need_head_weights=need_head_weights,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=float(self.activation_dropout), training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.normalize_before:
x = self.final_layer_norm(x)
if self.onnx_trace and incremental_state is not None:
saved_state = self.self_attn._get_input_buffer(incremental_state)
assert saved_state is not None
if self_attn_padding_mask is not None:
self_attn_state = [
saved_state["prev_key"],
saved_state["prev_value"],
saved_state["prev_key_padding_mask"],
]
else:
self_attn_state = [saved_state["prev_key"], saved_state["prev_value"]]
return x, attn, self_attn_state
return x, attn, None
def make_generation_fast_(self, need_attn: bool = False, **kwargs):
self.need_attn = need_attn
@torch.jit.export
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
new_order: Tensor,
):
"""Scriptable reorder incremental state in transformer layers."""
self.self_attn.reorder_incremental_state(incremental_state, new_order)
if self.encoder_attn is not None:
self.encoder_attn.reorder_incremental_state(incremental_state, new_order)
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
| 41.755656
| 101
| 0.624675
|
8f18368740403a944ed29ef774492d941296a7c7
| 18,093
|
py
|
Python
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/operations/_virtual_machine_scale_set_rolling_upgrades_operations.py
|
dubiety/azure-sdk-for-python
|
62ffa839f5d753594cf0fe63668f454a9d87a346
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/operations/_virtual_machine_scale_set_rolling_upgrades_operations.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/operations/_virtual_machine_scale_set_rolling_upgrades_operations.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar, Union
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_cancel_request_initial(
resource_group_name: str,
vm_scale_set_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2018-04-01") # type: str
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_query_parameters,
**kwargs
)
def build_start_os_upgrade_request_initial(
resource_group_name: str,
vm_scale_set_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2018-04-01") # type: str
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_query_parameters,
**kwargs
)
def build_get_latest_request(
resource_group_name: str,
vm_scale_set_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2018-04-01") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/latest") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
class VirtualMachineScaleSetRollingUpgradesOperations(object):
"""VirtualMachineScaleSetRollingUpgradesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2018_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _cancel_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2018-04-01") # type: str
request = build_cancel_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._cancel_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_cancel_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel"} # type: ignore
@distributed_trace
def begin_cancel( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Cancels the current virtual machine scale set rolling upgrade.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2018-04-01") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._cancel_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_cancel.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel"} # type: ignore
def _start_os_upgrade_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2018-04-01") # type: str
request = build_start_os_upgrade_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._start_os_upgrade_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_os_upgrade_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade"} # type: ignore
@distributed_trace
def begin_start_os_upgrade( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Starts a rolling upgrade to move all virtual machine scale set instances to the latest
available Platform Image OS version. Instances which are already running the latest available
OS version are not affected.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2018-04-01") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_os_upgrade_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start_os_upgrade.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade"} # type: ignore
@distributed_trace
def get_latest(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> "_models.RollingUpgradeStatusInfo":
"""Gets the status of the latest virtual machine scale set rolling upgrade.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RollingUpgradeStatusInfo, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2018_04_01.models.RollingUpgradeStatusInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RollingUpgradeStatusInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2018-04-01") # type: str
request = build_get_latest_request(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_latest.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RollingUpgradeStatusInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_latest.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/latest"} # type: ignore
| 43.702899
| 232
| 0.681092
|
15a4f6a789ac7fb86ea1e16c988246d371720460
| 13,735
|
py
|
Python
|
gneiss/util.py
|
biocore/gneiss
|
5d253d68ef14fa82e26b5f74118ff170f1990585
|
[
"BSD-3-Clause"
] | 48
|
2016-10-26T21:10:18.000Z
|
2022-03-12T03:25:04.000Z
|
gneiss/util.py
|
biocore/gneiss
|
5d253d68ef14fa82e26b5f74118ff170f1990585
|
[
"BSD-3-Clause"
] | 287
|
2016-06-30T00:33:43.000Z
|
2022-03-02T03:44:04.000Z
|
gneiss/util.py
|
biocore/gneiss
|
5d253d68ef14fa82e26b5f74118ff170f1990585
|
[
"BSD-3-Clause"
] | 27
|
2016-06-30T00:40:25.000Z
|
2021-11-09T14:13:36.000Z
|
"""
Utility functions (:mod:`gneiss.util`)
======================================
.. currentmodule:: gneiss.util
This module contains helper functions for aligning metadata tables,
contingency tables and trees.
Functions
---------
.. autosummary::
:toctree: generated/
match
match_tips
rename_internal_nodes
block_diagonal
band_diagonal
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, gneiss development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import warnings
import numpy as np
from skbio.stats.composition import closure
import pandas as pd
from patsy import dmatrix
import biom
# Specifies which child is numberator and denominator
NUMERATOR = 1
DENOMINATOR = 0
def split_balance(balance, tree):
""" Splits a balance into its log ratio components.
Parameters
----------
balance : pd.Series
A vector corresponding to a single balance. These values
that will be split into its numberator and denominator
components.
Returns
-------
pd.DataFrame
Dataframe where the first column contains the numerator and the
second column contains the denominator of the balance.
Note
----
The balance must have a name associated with it.
"""
node = tree.find(balance.name)
if node.is_tip():
raise ValueError("%s is not a balance." % balance.name)
left = node.children[0]
right = node.children[1]
if left.is_tip():
L = 1
else:
L = len([n for n in left.tips()])
if right.is_tip():
R = 1
else:
R = len([n for n in right.tips()])
b = np.expand_dims(balance.values, axis=1)
# need to scale down by the number of children in subtrees
b = np.exp(b / (np.sqrt((L * R) / (L + R))))
o = np.ones((len(b), 1))
k = np.hstack((b, o))
p = closure(k)
return pd.DataFrame(p, columns=[left.name, right.name],
index=balance.index)
def match(table, metadata):
""" Matches samples between a contingency table and a metadata table.
Sorts samples in metadata and contingency table in the same order.
If there are sames contained in the contigency table, but not in metadata
or vice versa, the intersection of samples in the contingency table and the
metadata table will returned.
Parameters
----------
table : pd.DataFrame or biom.Table
Contingency table where samples correspond to rows and
features correspond to columns.
metadata: pd.DataFrame
Metadata table where samples correspond to rows and
explanatory metadata variables correspond to columns.
Returns
-------
pd.DataFrame :
Filtered contingency table.
pd.DataFrame :
Filtered metadata table
Raises
------
ValueError:
Raised if duplicate sample ids are present in `table`.
ValueError:
Raised if duplicate sample ids are present in `metadata`.
ValueError:
Raised if `table` and `metadata` have incompatible sizes.
"""
if isinstance(table, pd.DataFrame):
return _dense_match(table, metadata)
elif isinstance(table, biom.Table):
return _sparse_match(table, metadata)
def _dense_match(table, metadata):
""" Match on dense pandas tables"""
subtableids = set(table.index)
submetadataids = set(metadata.index)
if len(subtableids) != len(table.index):
raise ValueError("`table` has duplicate sample ids.")
if len(submetadataids) != len(metadata.index):
raise ValueError("`metadata` has duplicate sample ids.")
idx = list(subtableids & submetadataids)
# make sure that the list is always the same to remove
# unwanted random behavior
idx.sort()
if len(idx) == 0:
raise ValueError(("No more samples left. Check to make sure that "
"the sample names between `metadata` and `table` "
"are consistent"))
subtable = table.loc[idx]
submetadata = metadata.loc[idx]
return subtable, submetadata
def _sparse_match(table, metadata):
""" Match on sparse biom tables. """
subtableids = set(table.ids(axis='sample'))
submetadataids = set(metadata.index)
if len(submetadataids) != len(metadata.index):
raise ValueError("`metadata` has duplicate sample ids.")
idx = list(subtableids & submetadataids)
# make sure that the list is always the same to remove
# unwanted random behavior
idx.sort()
if len(idx) == 0:
raise ValueError(("No more samples left. Check to make sure that "
"the sample names between `metadata` and `table` "
"are consistent"))
out_metadata = metadata.loc[idx]
def metadata_filter(val, id_, md):
return id_ in out_metadata.index
out_table = table.filter(metadata_filter, axis='sample', inplace=False)
def sort_f(xs):
return [xs[out_metadata.index.get_loc(x)] for x in xs]
out_table = out_table.sort(sort_f=sort_f, axis='sample')
out_metadata = out_metadata.loc[out_table.ids()]
return out_table, out_metadata
def match_tips(table, tree):
""" Returns the contingency table and tree with matched tips.
Sorts the columns of the contingency table to match the tips in
the tree. The ordering of the tips is in post-traversal order.
If the tree is multi-furcating, then the tree is reduced to a
bifurcating tree by randomly inserting internal nodes.
The intersection of samples in the contingency table and the
tree will returned.
Parameters
----------
table : pd.DataFrame or biom.Table
Contingency table where samples correspond to rows and
features correspond to columns.
tree : skbio.TreeNode
Tree object where the leafs correspond to the features.
Returns
-------
pd.DataFrame :
Subset of the original contingency table with the common features.
skbio.TreeNode :
Sub-tree with the common features.
Raises
------
ValueError:
Raised if `table` and `tree` have incompatible sizes.
See Also
--------
skbio.TreeNode.bifurcate
skbio.TreeNode.tips
"""
if isinstance(table, pd.DataFrame):
return _dense_match_tips(table, tree)
elif isinstance(table, biom.Table):
return _sparse_match_tips(table, tree)
def _sparse_match_tips(table, tree):
""" Match on sparse biom tables. """
tips = [x.name for x in tree.tips()]
common_tips = set(tips) & set(table.ids(axis='observation'))
_tree = tree.shear(names=list(common_tips))
def filter_uncommon(val, id_, md):
return id_ in common_tips
_table = table.filter(filter_uncommon, axis='observation', inplace=False)
_tree.bifurcate()
_tree.prune()
def sort_f(x):
return [n.name for n in _tree.tips()]
_table = _table.sort(sort_f=sort_f, axis='observation')
return _table, _tree
def _dense_match_tips(table, tree):
""" Match on dense pandas dataframes. """
tips = [x.name for x in tree.tips()]
common_tips = list(set(tips) & set(table.columns))
_table = table.loc[:, common_tips]
_tree = tree.shear(names=common_tips)
_tree.bifurcate()
_tree.prune()
sorted_features = [n.name for n in _tree.tips()]
_table = _table.reindex(sorted_features, axis=1)
return _table, _tree
def design_formula(train_metadata, test_metadata, formula):
""" Generate and align two design matrices.
Parameters
----------
train_metadata : pd.DataFrame
Training metadata
test_metadata : pd.DataFrame
Testing metadata
formula : str
Statistical formula specifying design matrix
Return
------
train_design : pd.DataFrame
Train design matrix
test_design : pd.DataFrame
Test design matrix
"""
train_design = dmatrix(formula, train_metadata,
return_type='dataframe')
test_design = dmatrix(formula, test_metadata,
return_type='dataframe')
# pad extra columns with zeros, so that we can still make predictions
extra_columns = list(set(train_design.columns) - set(test_design.columns))
df = pd.DataFrame({C: np.zeros(test_design.shape[0])
for C in extra_columns},
index=test_design.index)
test_design = pd.concat((test_design, df), axis=1)
test_design = test_design.reindex(columns=train_design.columns)
return train_design, test_design
def check_internal_nodes(tree):
for n in tree.levelorder():
if n.name is None:
raise ValueError('TreeNode has no name.')
def rename_internal_nodes(tree, names=None, inplace=False):
""" Names the internal according to level ordering.
The tree will be traversed in level order (i.e. top-down, left to right).
If `names` is not specified, the node with the smallest label (y0)
will be located at the root of the tree, and the node with the largest
label will be located at bottom right corner of the tree.
Parameters
----------
tree : skbio.TreeNode
Tree object where the leafs correspond to the features.
names : list, optional
List of labels to rename the tip names. It is assumed that the
names are listed in level ordering, and the length of the list
is at least as long as the number of internal nodes.
inplace : bool, optional
Specifies if the operation should be done on the original tree or not.
Returns
-------
skbio.TreeNode
Tree with renamed internal nodes.
Raises
------
ValueError:
Raised if `tree` and `name` have incompatible sizes.
"""
if inplace:
_tree = tree
else:
_tree = tree.copy()
non_tips = [n for n in _tree.levelorder() if not n.is_tip()]
if names is not None and len(non_tips) != len(names):
raise ValueError("`_tree` and `names` have incompatible sizes, "
"`_tree` has %d tips, `names` has %d elements." %
(len(non_tips), len(names)))
i = 0
for n in _tree.levelorder():
if not n.is_tip():
if names is None:
label = 'y%i' % i
else:
label = names[i]
if n.name is not None and label == n.name:
warnings.warn("Warning. Internal node (%s) has been replaced "
"with (%s)" % (n.name, label), UserWarning)
n.name = label
i += 1
return _tree
def _type_cast_to_float(df):
""" Attempt to cast all of the values in dataframe to float.
This will try to type cast all of the series within the
dataframe into floats. If a column cannot be type casted,
it will be kept as is.
Parameters
----------
df : pd.DataFrame
Returns
-------
pd.DataFrame
"""
# TODO: Will need to improve this, as this is a very hacky solution.
for c in df.columns:
s = df[c]
try:
df[c] = s.astype(np.float64)
except Exception:
continue
return df
def block_diagonal(ncols, nrows, nblocks):
""" Generate block diagonal with uniformly distributed values within
blocks.
Parameters
----------
ncol : int
Number of columns
nrows : int
Number of rows
nblocks : int
Number of blocks
Returns
-------
np.array
Table with a block diagonal where the rows represent samples
and the columns represent features. The values within the blocks
are uniformly distributed between 0 and 1.
Note
----
The number of blocks specified by `nblocks` needs to be greater than 1.
"""
if nblocks <= 1:
raise ValueError('`nblocks` needs to be greater than 1.')
mat = np.zeros((nrows, ncols))
block_cols = ncols // nblocks
block_rows = nrows // nblocks
for b in range(nblocks - 1):
B = np.random.uniform(size=(block_rows, block_cols))
lower_row = block_rows * b
upper_row = min(block_rows * (b + 1), nrows)
lower_col = block_cols * b
upper_col = min(block_cols * (b + 1), ncols)
mat[lower_row:upper_row, lower_col:upper_col] = B
# Make last block fill in the remainder
B = np.random.uniform(size=(nrows - upper_row, ncols - upper_col))
mat[upper_row:, upper_col:] = B
return mat
def _shift(L, n):
""" Creates the band table by iteratively shifting a single vector.
Parameters
----------
L : array
Vector to be shifted
n : int
Max number of shifts
"""
sl = L
table = [L]
if n == 0:
return table
else:
for k in range(n):
sl = np.roll(sl, 1)
table.append(sl)
return table
def band_diagonal(n, b):
""" Creates band table with dense diagonal, sparse corners.
Parameters
----------
n : int
Number of features
b : int
Length of band
Returns
-------
np.array
Table with a dense band diagonal where the rows represent samples
and the columns represent features. The values within the
diagonal are marked with a constant `1/b`.
"""
p = n - b + 1 # samples
y = [1. / b] * b + [0] * (n - b)
table = _shift(y, p - 1)
table = np.column_stack(table)
return table
| 29.411135
| 79
| 0.62024
|
7ba60b921e095c2bbc587414fc697746618645fb
| 1,378
|
py
|
Python
|
chapter1/3n+1/3n+1.py
|
OrghoN/programmingChallengesUVa
|
0704a37609714ce8fbce11c255fa1c208115689d
|
[
"MIT"
] | null | null | null |
chapter1/3n+1/3n+1.py
|
OrghoN/programmingChallengesUVa
|
0704a37609714ce8fbce11c255fa1c208115689d
|
[
"MIT"
] | null | null | null |
chapter1/3n+1/3n+1.py
|
OrghoN/programmingChallengesUVa
|
0704a37609714ce8fbce11c255fa1c208115689d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
global cache
cache = [0] * 1000000
def cyclesFastCache(number):
cycles = 1
numbers = []
while number > 1:
if cache[number] != 0:
for index, num in enumerate(numbers):
cache[num] = cache[number] + len(numbers) - index + 1
return cache[number]
numbers.append(number)
cycles+=1
if number % 2 ==0:
number = number //2
else:
number = (number*3) + 1
numbers.append(1)
for index, num in enumerate(numbers):
cache[num] = cycles - index
return cycles
def cycles(number):
cycles = 1
while number >1:
cycles+=1
if number % 2 ==0:
number = number //2
else:
number = ((number*3) + 1)//2
cycles+=1
return cycles
def cyclesRange(num1, num2):
cycleLengths = []
if num1 > num2:
start = num2
stop = num1
elif num2>num1:
start = num1
stop = num2
else:
return cycles(num1)
for i in range(start,stop):
cycleLengths.append(cycles(i))
# return max(cycleLengths)
return (cycleLengths)
# for i in sys.stdin:
# numbers = i.split()
# numbers = list(map(int, numbers))
# print(numbers[0],numbers[1], cyclesRange(numbers[0], numbers[1]))
print(cyclesRange(1, 999999))
| 22.590164
| 71
| 0.546444
|
f95b0720021f0b313bf0128451f9d01d43ebe5f6
| 6,665
|
py
|
Python
|
notify_sdk/model/health_assessment/health_assessment_rule_version_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | 5
|
2019-07-31T04:11:05.000Z
|
2021-01-07T03:23:20.000Z
|
notify_sdk/model/health_assessment/health_assessment_rule_version_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
notify_sdk/model/health_assessment/health_assessment_rule_version_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: health_assessment_rule_version.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from notify_sdk.model.health_assessment import health_assessment_event_score_config_item_pb2 as notify__sdk_dot_model_dot_health__assessment_dot_health__assessment__event__score__config__item__pb2
from notify_sdk.model.health_assessment import health_assessment_related_resource_score_config_item_pb2 as notify__sdk_dot_model_dot_health__assessment_dot_health__assessment__related__resource__score__config__item__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='health_assessment_rule_version.proto',
package='health_assessment',
syntax='proto3',
serialized_options=_b('ZKgo.easyops.local/contracts/protorepo-models/easyops/model/health_assessment'),
serialized_pb=_b('\n$health_assessment_rule_version.proto\x12\x11health_assessment\x1aRnotify_sdk/model/health_assessment/health_assessment_event_score_config_item.proto\x1a]notify_sdk/model/health_assessment/health_assessment_related_resource_score_config_item.proto\"\xc6\x02\n\x1bHealthAssessmentRuleVersion\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0e\n\x06ruleId\x18\x02 \x01(\t\x12\x10\n\x08objectId\x18\x03 \x01(\t\x12Q\n\x10\x65ventScoreConfig\x18\x04 \x03(\x0b\x32\x37.health_assessment.HealthAssessmentEventScoreConfigItem\x12\x65\n\x1arelatedResourceScoreConfig\x18\x05 \x03(\x0b\x32\x41.health_assessment.HealthAssessmentRelatedResourceScoreConfigItem\x12\x18\n\x10\x65ventScoreWeight\x18\x06 \x01(\x05\x12\x1d\n\x15relatedResourceWeight\x18\x07 \x01(\x05\x42MZKgo.easyops.local/contracts/protorepo-models/easyops/model/health_assessmentb\x06proto3')
,
dependencies=[notify__sdk_dot_model_dot_health__assessment_dot_health__assessment__event__score__config__item__pb2.DESCRIPTOR,notify__sdk_dot_model_dot_health__assessment_dot_health__assessment__related__resource__score__config__item__pb2.DESCRIPTOR,])
_HEALTHASSESSMENTRULEVERSION = _descriptor.Descriptor(
name='HealthAssessmentRuleVersion',
full_name='health_assessment.HealthAssessmentRuleVersion',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='health_assessment.HealthAssessmentRuleVersion.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ruleId', full_name='health_assessment.HealthAssessmentRuleVersion.ruleId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='objectId', full_name='health_assessment.HealthAssessmentRuleVersion.objectId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eventScoreConfig', full_name='health_assessment.HealthAssessmentRuleVersion.eventScoreConfig', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='relatedResourceScoreConfig', full_name='health_assessment.HealthAssessmentRuleVersion.relatedResourceScoreConfig', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eventScoreWeight', full_name='health_assessment.HealthAssessmentRuleVersion.eventScoreWeight', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='relatedResourceWeight', full_name='health_assessment.HealthAssessmentRuleVersion.relatedResourceWeight', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=239,
serialized_end=565,
)
_HEALTHASSESSMENTRULEVERSION.fields_by_name['eventScoreConfig'].message_type = notify__sdk_dot_model_dot_health__assessment_dot_health__assessment__event__score__config__item__pb2._HEALTHASSESSMENTEVENTSCORECONFIGITEM
_HEALTHASSESSMENTRULEVERSION.fields_by_name['relatedResourceScoreConfig'].message_type = notify__sdk_dot_model_dot_health__assessment_dot_health__assessment__related__resource__score__config__item__pb2._HEALTHASSESSMENTRELATEDRESOURCESCORECONFIGITEM
DESCRIPTOR.message_types_by_name['HealthAssessmentRuleVersion'] = _HEALTHASSESSMENTRULEVERSION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
HealthAssessmentRuleVersion = _reflection.GeneratedProtocolMessageType('HealthAssessmentRuleVersion', (_message.Message,), {
'DESCRIPTOR' : _HEALTHASSESSMENTRULEVERSION,
'__module__' : 'health_assessment_rule_version_pb2'
# @@protoc_insertion_point(class_scope:health_assessment.HealthAssessmentRuleVersion)
})
_sym_db.RegisterMessage(HealthAssessmentRuleVersion)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 56.008403
| 865
| 0.812153
|
0377295e3bc51da268998e59e051fb496fd099ad
| 10,942
|
py
|
Python
|
storage/team17/Estructura_ArbolB.py
|
itsmjoe/tytus
|
3b0341cc854d67979b766c5c8b06ed172ce0c913
|
[
"MIT"
] | null | null | null |
storage/team17/Estructura_ArbolB.py
|
itsmjoe/tytus
|
3b0341cc854d67979b766c5c8b06ed172ce0c913
|
[
"MIT"
] | null | null | null |
storage/team17/Estructura_ArbolB.py
|
itsmjoe/tytus
|
3b0341cc854d67979b766c5c8b06ed172ce0c913
|
[
"MIT"
] | null | null | null |
# File: B Tree Structure
# License: Released under MIT License
# Notice: Copyright (c) 2020 TytusDB Team
import os
# ------------- CREACION DE LOS NODOS -------------#
class NodoB:
def __init__(self, grado): #INICIALIZAR NODO
self.llaves = []
self.padre = None
self.hijos = []
def insertar(self, valor):
if valor not in self.llaves:
self.llaves.append(valor)
self.ordenar_llaves()
return len(self.llaves)
def comparar(self, valor):
i = 0
tamano = len(self.llaves)
if self.hijos == [] or self.buscar_llave(valor, self.llaves): #Si la lista de hijos esta vacia o el valor ya se encuentra en la lista de llaves
return -1
while(i < tamano):
if str(valor).isdigit():
if str(self.llaves[i][0]).isdigit():
if int(valor) < int(self.llaves[i][0]):
return i
i += 1
else:
if int(valor) < self.toASCII(self.llaves[i][0]):
return i
i += 1
else:
if str(self.llaves[i][0]).isdigit():
if self.toASCII(valor) < int(self.llaves[i][0]):
return i
i += 1
else:
if valor < self.llaves[i][0]:
return i
i += 1
return i #Regresa la posicion
def posicionNodo(self):
try:
return self.padre.hijos.index(self)
except:
pass
def buscar_llave(self, llave, llaves):
for i in llaves:
if i[0] == llave:
return True
def ordenar_llaves(self):
for i in range(len(self.llaves)-1):
for j in range(i+1,len(self.llaves)):
if str(self.llaves[i][0]).isdigit():
if str(self.llaves[j][0]).isdigit():
if int(self.llaves[i][0]) > int(self.llaves[j][0]):
tmp = self.llaves[i]
self.llaves[i] = self.llaves[j]
self.llaves[j] = tmp
else:
if int(self.llaves[i][0]) > self.toASCII(self.llaves[j][0]):
tmp = self.llaves[i]
self.llaves[i] = self.llaves[j]
self.llaves[j] = tmp
else:
if str(self.llaves[j][0]).isdigit():
if self.toASCII(self.llaves[i][0]) > int(self.llaves[j][0]):
tmp = self.llaves[i]
self.llaves[i] = self.llaves[j]
self.llaves[j] = tmp
else:
if self.llaves[i][0] > self.llaves[j][0]:
tmp = self.llaves[i]
self.llaves[i] = self.llaves[j]
self.llaves[j] = tmp
def toASCII(self, cadena):
result = 0
for char in cadena:
result += ord(char)
return result
# ----------------ARMAR EL ARBOL -----------------#
class arbolB:
def __init__(self, grado):
self.root = NodoB(grado)
self.grado = grado
self.enmedio = int((self.grado-1)/2)
def buscar(self, valor):
return self._buscar(valor)
def _buscar(self, valor, tmp = None):
if not tmp:
tmp2 = self.root
else:
tmp2 = tmp
result = tmp2.comparar(valor)
if result == -1:
return tmp2
else:
return self._buscar(valor, tmp2.hijos[result])
def separar_nodo(self, tmp):
n1 = NodoB(self.grado)
n2 = NodoB(self.grado)
n3 = NodoB(self.grado)
return self._separar_nodo(tmp,n1,n2,n3)
def _separar_nodo(self, tmp,nodo_p,nodo_i,nodo_d): #Se crean 3 nodos nuevos para realizar la separacion de nodos
if len(tmp.llaves)+ 1 <= self.grado:
return 0
padre = tmp.padre
enmedio = self.enmedio
center = tmp.llaves[enmedio]
for i in range(0,enmedio):
nodo_i.llaves.append(tmp.llaves[i]) #Llenado del nodo izquierdo
for i in range(enmedio+1, len(tmp.llaves)):
nodo_d.llaves.append(tmp.llaves[i]) #Llenado del nodo derecho
if tmp.hijos != []:
for i in range(enmedio+1):
nodo_i.hijos.append(tmp.hijos[i]) #Asigna los hijos izquierdos
for i in range(enmedio+1, len(tmp.hijos)):
nodo_d.hijos.append(tmp.hijos[i]) #Asigna los hijos derechos
i = 0
while(i < enmedio+1):
tmp.hijos[i].padre = nodo_i #Asigna el padre al nodo izquierdo
i += 1
while(i < self.grado +1):
tmp.hijos[i].padre = nodo_d #Asigna el padre al nodo derecho
i += 1
if not padre:
padre = nodo_p
padre.llaves.append(center)
padre.hijos.insert(0, nodo_i)
padre.hijos.insert(1, nodo_d)
nodo_i.padre = padre
nodo_d.padre = padre
self.root = padre
return 0
nodo_i.padre = padre
nodo_d.padre = padre
padre.insertar(center)
index = padre.hijos.index(tmp)
padre.hijos.pop(index)
padre.hijos.insert(index, nodo_i)
padre.hijos.insert(index + 1, nodo_d)
return self.separar_nodo(padre)
def insertar(self, *valores):
for valor in valores:
tmp = self.buscar(valor[0])
self._insertar(valor, tmp)
def _insertar(self, valor, tmp):
length = tmp.insertar(valor)
if length == self.grado:
self.separar_nodo(tmp)
# UTILIDADES
# ME DEVUELVE UNA LISTA CON LA INFORMACION DE TODOS LOS NODOS INGRESADOS
def registros(self):
global l
l = list()
return self._registros(self.root)
def _registros(self, tmp):
if tmp:
for i in tmp.llaves:
l.append(i[1])
for j in tmp.hijos:
self._registros(j)
return l
# ME DEVUELVE UNA LISTA CON LA PK DE TODOS LOS NODOS INGRESADOS
def Keys(self):
global l
l = list()
return self._Keys(self.root)
def _Keys(self, tmp):
if tmp:
for i in tmp.llaves:
l.append(i[0])
for j in tmp.hijos:
self._Keys(j)
return l
# AGREGA UNA COLUMNA MAS A TODOS LOS NODOS
def agregarValor(self, valor):
self.root = self._agregarValor(self.root, valor)
def _agregarValor(self, tmp, valor):
if tmp:
for i in tmp.llaves:
i[1].append(valor)
for j in tmp.hijos:
self._agregarValor(j,valor)
return tmp
def update(self, valor, llave):
self._update(self.root, valor, llave)
def _update(self, tmp, valor, llave):
if tmp:
for i in tmp.llaves:
if str(i[0]) == str(llave):
i[1] = valor
i[0] = llave
for j in tmp.hijos:
self._update(j, valor, llave)
return tmp
# ELIMINA UNA COLUMNA A TODOS LOS NODOS
def eliminarValor(self, valor):
self.root = self._eliminarValor(self.root, valor)
def _eliminarValor(self, tmp, valor):
if tmp:
for i in tmp.llaves:
i[1].pop(valor)
for j in tmp.hijos:
self._eliminarValor(j,valor)
return tmp
# ELIMINA UN NODO DEL ARBOL
def _del(self, llave):
tmp = self.buscar(llave)
posicion = self.posicion(tmp, llave)
tmp.llaves.pop(posicion)
self.estructurar(tmp, posicion)
# ME RETORNA LA POSICION DE UNA TUPLA EN UN NODO
def posicion(self, nodo, llave):
for i in range(len(nodo.llaves)):
if str(nodo.llaves[i][0]) == str(llave):
return i
# ME RETORNA EL VALOR EN UNA POSICION
def valor_buscar(self, nodo, llave):
for i in range(len(nodo.llaves)):
if str(nodo.llaves[i][0]) == str(llave):
return nodo.llaves[i]
# ORDENA EL ARBOL DE NUEVO
def estructurar(self, tmp, posicion):
if tmp.hijos == []:
return self.unir(tmp, tmp.posicionNodo())
siguiente = tmp.hijos[posicion + 1]
tmp.insertar(siguiente.llaves.pop(0))
return self.estructurar(siguiente, 0)
# UNE LOS HIJOS AL PADRE PARA REGRESAR A LA FORMA IDEAL
def unir(self, tmp, pos):
if not tmp.padre:
return 0
if len(tmp.llaves) >= self.enmedio:
return 0
padre = tmp.padre
if pos:
pre = padre.llaves[pos-1]
tmp2 = padre.hijos[pos-1]
else:
pre = None
tmp2 = padre.hijos[1]
if len(tmp2.llaves) > self.enmedio:
return self.rotar(tmp, tmp2, padre, pre)
if not pre:
tmp.insertar(padre.llaves.pop(0))
tmp2.hijos = tmp.hijos + tmp2.hijos
else:
tmp.insertar(padre.llaves.pop(pos-1))
tmp2.hijos = tmp.hijos + tmp2.hijos
tmp2.llaves += tmp.llaves
tmp2.ordenar_llaves()
padre.hijos.remove(tmp)
if len(padre.llaves) == 0 and not padre.padre:
self.root = tmp2
return 0
if len(padre.llaves) < self.enmedio:
return self.unir(padre, padre.posicionNodo())
def rotar(self, nodo, tmp, padre, pre):
if not pre:
nodo.insertar(padre.llaves.pop(0)) #Izquierda
padre.insertar(tmp.llaves.pop(0))
return 0
pos = nodo.posicionNodo() #Derecha
nodo.insertar(padre.llaves.pop(pos-1))
padre.insertar(tmp.llaves.pop(-1))
return 0
def graficar(self):
f = open('archivo.dot', 'w',encoding='utf-8')
f.write("digraph dibujo{\n")
f.write('graph [ordering="out"];')
f.write('rankdir=TB;\n')
global t
t = 0
f = self._graficar(f,self.root)
f.write('}')
f.close()
os.system('dot -Tpng archivo.dot -o salida.png')
def _graficar(self, f, temp):
global t
if temp:
nombre = "Nodo"+str(t)
t+=1
f.write(nombre+' [ label = "'+", ".join(str(x[0]) for x in temp.llaves)+'",shape = box];\n')
for c in temp.hijos:
nombre2 = "Nodo"+str(t)
f = self._graficar(f, c)
f.write(nombre+'->'+ nombre2+';\n')
t+=1
return f
| 31.715942
| 151
| 0.497258
|
1b09f75771ea9be531db48aab3c804d21b7db7eb
| 11,293
|
py
|
Python
|
examples/ex15.py
|
mfem/PyMFEM
|
b7b7c3d3de1082eac1015e3a313cf513db06fd7b
|
[
"BSD-3-Clause"
] | 93
|
2017-03-01T16:45:33.000Z
|
2022-03-27T22:10:33.000Z
|
examples/ex15.py
|
GabrielJie/PyMFEM
|
fa654447ac6819c5aa0341397b91a299f4ce5492
|
[
"BSD-3-Clause"
] | 64
|
2017-03-15T21:47:31.000Z
|
2022-03-31T23:59:00.000Z
|
examples/ex15.py
|
GabrielJie/PyMFEM
|
fa654447ac6819c5aa0341397b91a299f4ce5492
|
[
"BSD-3-Clause"
] | 32
|
2017-03-02T22:13:38.000Z
|
2022-03-26T13:09:31.000Z
|
'''
MFEM example 15
How to run:
python <arguments>
Example of arguments:
ex15.py
ex15.py -o 1 -y 0.4
ex15.py -o 4 -y 0.1
ex15.py -n 5
ex15.py -p 1 -n 3
Other meshes:
ex15.py -m square-disc-nurbs.mesh
ex15.py -m disc-nurbs.mesh
ex15.py -m fichera.mesh -tf 0.3
ex15.py -m ball-nurbs.mesh -tf 0.3
ex15.py -m mobius-strip.mesh
ex15.py -m amr-quad.mesh
Conforming meshes (no derefinement):
ex15.py -m square-disc.mesh
ex15.py -m escher.mesh -r 2 -tf 0.3
'''
import sys
from mfem.common.arg_parser import ArgParser
from os.path import expanduser, join, dirname
import numpy as np
from numpy import cos, sin, pi, exp, sqrt, arctan
import mfem.ser as mfem
from mfem.ser import intArray
parser = ArgParser(description='Ex15')
parser.add_argument('-m', '--mesh',
default='star-hilbert.mesh',
action='store', type=str,
help='Mesh file to use.')
parser.add_argument("-p", "--problem",
action='store', default=0, type=int,
help="Problem setup to use: 0 = spherical front, 1 = ball.")
parser.add_argument("-n", "--nfeatures",
action='store', default=1, type=int,
help="Number of solution features (fronts/balls).")
parser.add_argument('-o', '--order',
action='store', default=2, type=int,
help="Finite element order (polynomial degree)")
parser.add_argument("-e", "--max-err",
action='store', default=5e-3, type=float,
help="Maximum element error")
parser.add_argument("-y", "--hysteresis",
action='store', default=0.15, type=float,
help="Derefinement safety coefficient.")
parser.add_argument('-r', '--ref-levels',
action='store', default=0, type=int,
help="Number of inital uniform refinement")
parser.add_argument("-l", "--nc-limit",
action='store', default=3, type=int,
help="Maximum level of hanging nodes.")
parser.add_argument('-tf', '--t-final',
action='store', default=1.0, type=float,
help="Final time; start time is 0.")
parser.add_argument('-vis', '--visualization',
action='store_true', default=True,
help='Enable GLVis visualization')
args = parser.parse_args()
ref_levels = args.ref_levels
problem = args.problem
nfeatures = args.nfeatures
order = args.order
max_elem_error = args.max_err
hysteresis = args.hysteresis
nc_limit = args.nc_limit
t_final = args.t_final
visualization = args.visualization
parser.print_options(args)
alpha = 0.02
def front(x, y, z, t, dim):
r = sqrt(x**2 + y**2 + z**2)
return exp(-0.5 * ((r - t)/alpha) ** 2)
def ball(x, y, z, t, dim):
r = sqrt(x**2 + y**2 + z**2)
return -atan(2.*(r - t)/alpha)
def front_laplace(x, y, z, t, dim):
x2 = x**2
y2 = y**2
z2 = z**2
t2 = t**2
a2 = alpha**2
a4 = a2**2
r = sqrt(x2 + y2 + z2)
ret = (-exp(- 0.5 * ((r - t)/alpha)**2) / a4 *
(-2.*t*(x2 + y2 + z2 - (dim-1)*a2/2.)/r + x2 + y2 + z2 + t2 - dim*a2))
return ret
def ball_laplace(x, y, z, t, dim):
x2 = x**2
y2 = y**2
z2 = z**2
t2 = t**2
a2 = alpha**2
a4 = a2**2
r = sqrt(x2 + y2 + z2)
den = (-a2 - 4.*(x2 + y2 + z2 - 2*r*t) - t2)**2
if dim == 2:
return 2*alpha*(a2 + t2 - 4*x2 - 4*y2)/r/den
return 4*alpha*(a2 + t2 - 4.*r*t)/r/den
def composite_func(pt, t, f0, f1):
dim = len(pt)
x = pt[0]
y = pt[1]
z = 0.0
if dim == 3:
z = pt[2]
if (problem == 0):
if (nfeatures <= 1):
return f0(x, y, z, t, dim)
else:
i = np.arange(nfeatures)
x0 = 0.5*cos(2 * pi * i / nfeatures)
y0 = 0.5*sin(2 * pi * i / nfeatures)
return np.sum(f0(x - x0, y - y0, z, t, dim))
else:
i = np.arange(nfeatures)
x0 = 0.5*cos(2 * pi * i / nfeatures + pi*t)
y0 = 0.5*sin(2 * pi * i / nfeatures + pi*t)
return np.sum(f1(x - x0, y - y0, z, 0.25, dim))
class BdrCoefficient(mfem.PyCoefficientT):
def EvalValue(self, pt, t):
return composite_func(pt, t, front, ball)
class RhsCoefficient(mfem.PyCoefficientT):
def EvalValue(self, pt, t):
# print 'rhs', composite_func(pt, t, front_laplace, ball_laplace)
return composite_func(pt, t, front_laplace, ball_laplace)
def UpdateProblem(mesh, fespace, x, a, b):
# Update the space: recalculate the number of DOFs and construct a matrix
# that will adjust any GridFunctions to the new mesh state.
fespace.Update()
# Interpolate the solution on the new mesh by applying the transformation
# matrix computed in the finite element space. Multiple GridFunctions could
# be updated here.
x.Update()
# Free any transformation matrices to save memory.
fespace.UpdatesFinished()
# Inform the linear and bilinear forms that the space has changed.
a.Update()
b.Update()
# 2. Read the mesh from the given mesh file on all processors. We can handle
# triangular, quadrilateral, tetrahedral, hexahedral, surface and volume
# meshes with the same code
meshfile = expanduser(join(dirname(__file__), '..', 'data', args.mesh))
mesh = mfem.Mesh(meshfile, 1, 1)
dim = mesh.Dimension()
sdim = mesh.SpaceDimension()
# 3. Project a NURBS mesh to a piecewise-quadratic curved mesh. Make sure
# that the mesh is non-conforming if it has quads or hexes and refine it
if (mesh.NURBSext):
mesh.UniformRefinement()
if ref_levels > 0:
ref_levels = ref_levels-1
mesh.SetCurvature(2)
mesh.EnsureNCMesh()
for l in range(ref_levels):
mesh.UniformRefinement()
# 4. All boundary attributes will be used for essential (Dirichlet) BC
ess_bdr = intArray(mesh.bdr_attributes.Max())
ess_bdr.Assign(1)
# 5. Define a finite element space on the mesh. The polynomial order is one
# (linear) by default, but this can be changed on the command line.
fec = mfem.H1_FECollection(order, dim)
fespace = mfem.FiniteElementSpace(mesh, fec)
# 6. As in Example 1p, we set up bilinear and linear forms corresponding to
# the Laplace problem -\Delta u = 1. We don't assemble the discrete
# problem yet, this will be done in the inner loop.
a = mfem.BilinearForm(fespace)
b = mfem.LinearForm(fespace)
one = mfem.ConstantCoefficient(1.0)
bdr = BdrCoefficient()
rhs = RhsCoefficient()
integ = mfem.DiffusionIntegrator(one)
a.AddDomainIntegrator(integ)
b.AddDomainIntegrator(mfem.DomainLFIntegrator(rhs))
# 7. The solution vector x and the associated finite element grid function
# will be maintained over the AMR iterations.
x = mfem.GridFunction(fespace)
# 8. Connect to GLVis.
if visualization:
sout = mfem.socketstream("localhost", 19916)
sout.precision(8)
# 9. As in Example 6, we set up a Zienkiewicz-Zhu estimator that will be
# used to obtain element error indicators. The integrator needs to
# provide the method ComputeElementFlux. The smoothed flux space is a
# vector valued H1 space here.
flux_fespace = mfem.FiniteElementSpace(mesh, fec, sdim)
# own_flux_fes = False indicate flux_fespace is passed by reference
# this is actually default action, but for the sake of explanaiton
# it is explicitly set. If you want to pass pointer use own_flux_fes = True
estimator = mfem.ZienkiewiczZhuEstimator(integ, x, flux_fespace,
own_flux_fes=False)
# 10. As in Example 6, we also need a refiner. This time the refinement
# strategy is based on a fixed threshold that is applied locally to each
# element. The global threshold is turned off by setting the total error
# fraction to zero. We also enforce a maximum refinement ratio between
# adjacent elements.
refiner = mfem.ThresholdRefiner(estimator)
refiner.SetTotalErrorFraction(0.0)
refiner.SetLocalErrorGoal(max_elem_error)
refiner.PreferConformingRefinement()
refiner.SetNCLimit(nc_limit)
# 11. A derefiner selects groups of elements that can be coarsened to form
# a larger element. A conservative enough threshold needs to be set to
# prevent derefining elements that would immediately be refined again.
derefiner = mfem.ThresholdDerefiner(estimator)
derefiner.SetThreshold(hysteresis * max_elem_error)
derefiner.SetNCLimit(nc_limit)
# 12. The outer time loop. In each iteration we update the right hand side,
# solve the problem on the current mesh, visualize the solution and
# refine the mesh as many times as necessary. Then we derefine any
# elements which have very small errors.
x.Assign(0.0)
time = 0.0
while (time < t_final + 1e-10):
print("Time " + str(time) + "\n\nRefinement:")
bdr.SetTime(time)
rhs.SetTime(time)
# Make sure errors will be recomputed in the following.
refiner.Reset()
derefiner.Reset()
# 13. The inner refinement loop. At the end we want to have the current
# time step resolved to the prescribed tolerance in each element.
ref_it = 0
while(True):
ref_it = ref_it + 1
print("Iteration: " + str(ref_it) + ", number of unknowns: "
+ str(fespace.GetVSize()))
# 14. Recompute the field on the current mesh: assemble the stiffness
# matrix and the right-hand side.
a.Assemble()
b.Assemble()
# 15. Project the exact solution to the essential boundary DOFs.
x.ProjectBdrCoefficient(bdr, ess_bdr)
# 16. Create and solve the linear system.
ess_tdof_list = intArray()
fespace.GetEssentialTrueDofs(ess_bdr, ess_tdof_list)
A = mfem.OperatorPtr()
B = mfem.Vector()
X = mfem.Vector()
a.FormLinearSystem(ess_tdof_list, x, b, A, X, B)
AA = mfem.OperatorHandle2SparseMatrix(A)
M = mfem.GSSmoother(AA)
mfem.PCG(AA, M, B, X, 0, 500, 1e-12, 0.0)
# 17. Extract the local solution on each processor.
a.RecoverFEMSolution(X, b, x)
# 18. Send the solution by socket to a GLVis server
if visualization:
sout.precision(8)
sout.send_solution(mesh, x)
# 19. Apply the refiner on the mesh. The refiner calls the error
# estimator to obtain element errors, then it selects elements to
# be refined and finally it modifies the mesh. The Stop() method
# determines if all elements satisfy the local threshold.
refiner.Apply(mesh)
if refiner.Stop():
break
# 20. Update the space and interpolate the solution.
UpdateProblem(mesh, fespace, x, a, b)
# 21. Use error estimates from the last inner iteration to check for
# possible derefinements. The derefiner works similarly as the
# refiner. The errors are not recomputed because the mesh did not
# change (and also the estimator was not Reset() at this time).
if derefiner.Apply(mesh):
print("Derefined elements.")
# 22. Update the space and interpolate the solution.
UpdateProblem(mesh, fespace, x, a, b)
a.Update()
b.Update()
time = time + 0.01
| 34.117825
| 81
| 0.635526
|
851dfa43090f410aea68d07d8df17b0273e3e853
| 2,782
|
py
|
Python
|
utils/cv_sentlen_results.py
|
duanbiao-nlp/DESC_MOL-DDIE
|
add555a7136e4be13f369320fd694a9aae11eb4f
|
[
"MIT"
] | 14
|
2020-11-03T13:53:23.000Z
|
2022-03-23T07:26:50.000Z
|
utils/cv_sentlen_results.py
|
duanbiao-nlp/DESC_MOL-DDIE
|
add555a7136e4be13f369320fd694a9aae11eb4f
|
[
"MIT"
] | 1
|
2022-02-09T09:56:39.000Z
|
2022-02-09T09:56:39.000Z
|
utils/cv_sentlen_results.py
|
duanbiao-nlp/DESC_MOL-DDIE
|
add555a7136e4be13f369320fd694a9aae11eb4f
|
[
"MIT"
] | 4
|
2020-11-04T04:31:05.000Z
|
2021-11-05T07:49:27.000Z
|
import sys
import os
import pickle
import statistics
import numpy as np
from metrics_ddie import ddie_compute_metrics
from scipy.special import softmax
from transformers import BertTokenizer
_, tsv_dir, cv_dir, k = sys.argv
k = int(k)
tokenizer = BertTokenizer.from_pretrained('/mnt/model/scibert_scivocab_uncased', do_lower_case=True)
sentence_lengths = [[] for i in range(k)]
for i in range(k):
with open(os.path.join(tsv_dir, str(i+1), 'tsv', 'dev.tsv'), 'r') as f:
lines = f.read().strip().split('\n')
length_list = []
for idx, line in enumerate(lines):
sent = line.split('\t')[0]
tokenized_sent = tokenizer.tokenize(sent)
sentence_length = len(tokenized_sent)
sentence_lengths[i].append(sentence_length)
interval = 20
N = 128 // interval + 1
indices = [[[] for i in range(N)] for j in range(k)]
for i in range(k):
for idx, length in enumerate(sentence_lengths[i]):
if length > 128:
div = 128 // interval
else:
div = length // interval
indices[i][div].append(idx)
cnt = {}
for x in sentence_lengths:
for xx in x:
key = xx // interval * interval
if key in cnt:
cnt[key] += 1
else:
cnt[key] = 1
print(cnt)
#for x in indices:
# for i,xx in enumerate(x):
# print(i,len(xx))
#paths = ['cls', 'cnn', 'rad0', 'rad1', 'rad2', 'desc']
paths = ['cnn', 'rad1', 'desc']
model_list = ('cnn', 'desc', 'rad1', 'ensemble')
# Ensemble
ensembled_fscores = []
sentence_fscores = [[] for i in range(N)]
for model_name in model_list:
print(model_name)
for i in range(k):
if model_name == 'ensemble':
rad_preds = np.load(os.path.join(cv_dir, str(i+1), 'rad1', 'preds.npy'))
desc_preds = np.load(os.path.join(cv_dir, str(i+1), 'desc', 'preds.npy'))
preds = rad_preds + desc_preds
labels = np.load(os.path.join(cv_dir, str(i+1), 'rad1', 'labels.npy'))
else:
preds= np.load(os.path.join(cv_dir, str(i+1), model_name, 'preds.npy'))
labels= np.load(os.path.join(cv_dir, str(i+1), model_name, 'labels.npy'))
for j in range(N):
if len(indices[i][j]) == 0:
continue
div_preds = preds[np.array(indices[i][j])]
div_labels = labels[np.array(indices[i][j])]
div_result = ddie_compute_metrics('ddie', np.argmax(div_preds, axis=1), div_labels, every_type=False)
div_fscore = div_result['microF']
#print(j, div_fscore)
sentence_fscores[j].append(div_fscore)
for x in sentence_fscores:
#print(statistics.mean(x), '\t', statistics.stdev(x))
print(sum(x) / len(x), '\t', statistics.stdev(x))
| 31.258427
| 113
| 0.595255
|
763f1d8b60e4c9db3f2df63fb793e8973bc0c24a
| 4,226
|
py
|
Python
|
pymanopt/manifolds/positive_definite.py
|
navigator8972/pymanopt
|
b9f53fa2d187c22ae75f65c71aeeb2bfa8b9c37f
|
[
"BSD-3-Clause"
] | null | null | null |
pymanopt/manifolds/positive_definite.py
|
navigator8972/pymanopt
|
b9f53fa2d187c22ae75f65c71aeeb2bfa8b9c37f
|
[
"BSD-3-Clause"
] | null | null | null |
pymanopt/manifolds/positive_definite.py
|
navigator8972/pymanopt
|
b9f53fa2d187c22ae75f65c71aeeb2bfa8b9c37f
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from pymanopt.manifolds.manifold import RiemannianSubmanifold
from pymanopt.tools.multi import (
multiexpm,
multilogm,
multiqr,
multisym,
multitransp,
)
class SymmetricPositiveDefinite(RiemannianSubmanifold):
"""Manifold of symmetric positive definite matrices.
Points on the manifold and tangent vectors are represented as arrays of
shape ``k x n x n`` if ``k > 1``, and ``n x n`` if ``k == 1``.
Args:
n: The size of matrices in the manifold, i.e., the number of rows and
columns of each element.
k: The number of elements in the product geometry.
Note:
The geometry is based on the discussion in chapter 6 of [Bha2007]_.
Also see [SH2015]_ for more details.
The second-order retraction is taken from [JVV2012]_.
"""
def __init__(self, n: int, *, k: int = 1):
self._n = n
self._k = k
if k == 1:
name = f"Manifold of positive definite {n}x{n} matrices"
else:
name = (
f"Product manifold of {k} positive definite {n}x{n} matrices"
)
dimension = int(k * n * (n + 1) / 2)
super().__init__(name, dimension)
@property
def typical_dist(self):
return np.sqrt(self.dim)
def dist(self, point_a, point_b):
c = np.linalg.cholesky(point_a)
c_inv = np.linalg.inv(c)
logm = multilogm(
c_inv @ point_b @ multitransp(c_inv),
positive_definite=True,
)
return np.linalg.norm(logm)
def inner_product(self, point, tangent_vector_a, tangent_vector_b):
p_inv_tv_a = np.linalg.solve(point, tangent_vector_a)
if tangent_vector_a is tangent_vector_b:
p_inv_tv_b = p_inv_tv_a
else:
p_inv_tv_b = np.linalg.solve(point, tangent_vector_b)
return np.tensordot(
p_inv_tv_a, multitransp(p_inv_tv_b), axes=tangent_vector_a.ndim
)
def projection(self, point, vector):
return multisym(vector)
to_tangent_space = projection
def euclidean_to_riemannian_gradient(self, point, euclidean_gradient):
return point @ multisym(euclidean_gradient) @ point
def euclidean_to_riemannian_hessian(
self, point, euclidean_gradient, euclidean_hessian, tangent_vector
):
return point @ multisym(euclidean_hessian) @ point + multisym(
tangent_vector @ multisym(euclidean_gradient) @ point
)
def norm(self, point, tangent_vector):
return np.sqrt(
self.inner_product(point, tangent_vector, tangent_vector)
)
def random_point(self):
# Generate eigenvalues between 1 and 2.
d = 1.0 + np.random.uniform(size=(self._k, self._n, 1))
# Generate an orthogonal matrix.
q, _ = multiqr(np.random.normal(size=(self._n, self._n)))
point = q @ (d * multitransp(q))
if self._k == 1:
return point[0]
return point
def random_tangent_vector(self, point):
k = self._k
n = self._n
if k == 1:
tangent_vector = multisym(np.random.normal(size=(n, n)))
else:
tangent_vector = multisym(np.random.normal(size=(k, n, n)))
return tangent_vector / self.norm(point, tangent_vector)
def transport(self, point_a, point_b, tangent_vector_a):
return tangent_vector_a
def exp(self, point, tangent_vector):
p_inv_tv = np.linalg.solve(point, tangent_vector)
return point @ multiexpm(p_inv_tv, symmetric=False)
def retraction(self, point, tangent_vector):
p_inv_tv = np.linalg.solve(point, tangent_vector)
return multisym(point + tangent_vector + tangent_vector @ p_inv_tv / 2)
def log(self, point_a, point_b):
c = np.linalg.cholesky(point_a)
c_inv = np.linalg.inv(c)
logm = multilogm(
c_inv @ point_b @ multitransp(c_inv),
positive_definite=True,
)
return c @ logm @ multitransp(c)
def zero_vector(self, point):
k = self._k
n = self._n
if k == 1:
return np.zeros((n, n))
return np.zeros((k, n, n))
| 31.774436
| 79
| 0.614056
|
52873ec2168c8a0c7d713b3189edb3abbb3602c3
| 2,381
|
py
|
Python
|
drdown/appointments/urls.py
|
fga-gpp-mds/2018.1-Cris-Down
|
3423374360105b06ac2c57a320bf2ee8deaa08a3
|
[
"MIT"
] | 11
|
2018-03-11T01:21:43.000Z
|
2018-06-19T21:51:33.000Z
|
drdown/appointments/urls.py
|
fga-gpp-mds/2018.1-Grupo12
|
3423374360105b06ac2c57a320bf2ee8deaa08a3
|
[
"MIT"
] | 245
|
2018-03-13T19:07:14.000Z
|
2018-07-07T22:46:00.000Z
|
drdown/appointments/urls.py
|
fga-gpp-mds/2018.1-Grupo12
|
3423374360105b06ac2c57a320bf2ee8deaa08a3
|
[
"MIT"
] | 12
|
2018-08-24T13:26:04.000Z
|
2021-03-27T16:28:22.000Z
|
from django.conf.urls import url
from drdown.appointments.views.view_appointment import (
AppointmentListView,
AppointmentCreateView,
AppointmentUpdateView,
AppointmentMonthArchiveView,
AppointmentUpdateStatusView,
AppointmentFromRequestCreateView,
)
from drdown.appointments.views.view_request import (
RequestListView,
RequestCreateView,
RequestUpdateView,
RequestDeleteView,
RequestUpdateStatusView,
RequestAfterResultDeleteView,
load_doctors
)
app_name = 'appointments'
urlpatterns = [
url(
regex=r'^$',
view=AppointmentListView.as_view(),
name='list_appointments'
),
url(
regex=r'^request/new/$',
view=RequestCreateView.as_view(),
name='create_request'
),
url(
regex=r'^requests/$',
view=RequestListView.as_view(),
name='list_requests'
),
url(
regex=r'^request/update/(?P<request_pk>\d+)/$',
view=RequestUpdateView.as_view(),
name='update_request'
),
url(
regex=r'^request/delete/(?P<request_pk>\d+)/$',
view=RequestDeleteView.as_view(),
name='delete_request'
),
url(
regex=r'^request/result/delete/(?P<request_pk>\d+)/$',
view=RequestAfterResultDeleteView.as_view(),
name='delete_request_after_result'
),
url(
regex=r'^new/$',
view=AppointmentCreateView.as_view(),
name='create_appointment'
),
url(
regex=r'^new/(?P<request_pk>\d+)/$',
view=AppointmentFromRequestCreateView.as_view(),
name='create_from_request'
),
url(
regex=r'^(?P<year>\d{4})/(?P<month>\d+)/$',
view=AppointmentMonthArchiveView.as_view(month_format='%m'),
name="archive_month"
),
url(
regex=r'^update/(?P<appointment_pk>\d+)/$',
view=AppointmentUpdateView.as_view(),
name='update_appointment'
),
url(
regex=r'^cancel/(?P<appointment_pk>\d+)/$',
view=AppointmentUpdateStatusView.as_view(),
name='update_status_appointment'
),
url(
regex=r'^request/cancel/(?P<request_pk>\d+)/$',
view=RequestUpdateStatusView.as_view(),
name='update_status_request'
),
url(
regex=r'^ajax/load-doctors/$',
view=load_doctors,
name='ajax_load_doctors'
),
]
| 26.455556
| 68
| 0.616128
|
fd1687f7539b1658fe752cd075a60ec7e20e5b21
| 5,156
|
py
|
Python
|
fkie_node_manager/src/fkie_node_manager/editor/yaml_highlighter.py
|
JOiiNT-LAB/multimaster_fkie
|
8d77d75f57da16ad81c01069e6f616e8d7121ff7
|
[
"BSD-3-Clause"
] | 194
|
2015-01-21T12:46:42.000Z
|
2022-03-29T08:22:22.000Z
|
fkie_node_manager/src/fkie_node_manager/editor/yaml_highlighter.py
|
JOiiNT-LAB/multimaster_fkie
|
8d77d75f57da16ad81c01069e6f616e8d7121ff7
|
[
"BSD-3-Clause"
] | 146
|
2015-01-13T23:02:24.000Z
|
2022-03-23T05:54:02.000Z
|
fkie_node_manager/src/fkie_node_manager/editor/yaml_highlighter.py
|
JOiiNT-LAB/multimaster_fkie
|
8d77d75f57da16ad81c01069e6f616e8d7121ff7
|
[
"BSD-3-Clause"
] | 103
|
2015-02-08T23:20:02.000Z
|
2022-03-27T12:45:48.000Z
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Fraunhofer FKIE/US, Alexander Tiderko
# based on code of Timo Roehling
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Fraunhofer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from python_qt_binding.QtCore import QRegExp, Qt
from python_qt_binding.QtGui import QColor, QFont, QSyntaxHighlighter, QTextCharFormat
class YamlHighlighter(QSyntaxHighlighter):
'''
Enabled the syntax highlightning for the yaml files.
'''
def __init__(self, parent=None):
QSyntaxHighlighter.__init__(self, parent)
self.rules = []
self.commentStart = QRegExp("#")
self.commentEnd = QRegExp("\n|\r")
self.default_format = QTextCharFormat()
self.default_format.setForeground(QColor(24, 24, 24))
self.commentFormat = QTextCharFormat()
self.commentFormat.setFontItalic(True)
self.commentFormat.setForeground(Qt.darkGray)
tagList = ["\\btrue\\b", "\\bfalse\\b"]
# create patterns for tags
for tag in tagList:
self.rules.append((self._create_regexp(tag), self._create_format(Qt.blue)))
# create pattern for digits
self.rules.append((self._create_regexp("\\d+"), self._create_format(QColor(127, 64, 127))))
# create pattern for params
self.rules.append((self._create_regexp("\s*[_.\w]*\s*:"), self._create_format(Qt.darkBlue)))
# create pattern for params
self.rules.append((self._create_regexp(":\s*:[_\.\w]*$|:\s*\@[_\.\w]*$"), self._create_format(Qt.darkBlue)))
# create pattern for list signes
self.rules.append((self._create_regexp("^\s*-"), self._create_format(Qt.darkRed, 'bold')))
# create pattern for ???
self.rules.append((self._create_regexp("^---$"), self._create_format(Qt.darkRed)))
# create pattern for braces
self.rules.append((self._create_regexp("[\[\]\{\}\,]"), self._create_format(Qt.darkGreen)))
# create patterns for strings
self.rules.append((self._create_regexp("\".*\"|\'.*\'"), self._create_format(Qt.blue)))
# create patterns for substitutions
self.rules.append((self._create_regexp("\\$\\(.*\\)"), self._create_format(QColor(127, 64, 127))))
# create patterns for DOCTYPE
self.rules.append((self._create_regexp("<!DOCTYPE.*>"), self._create_format(Qt.lightGray)))
self.rules.append((self._create_regexp("<\\?xml.*\\?>"), self._create_format(Qt.lightGray)))
def highlightBlock(self, text):
self.setFormat(0, len(text), self.default_format)
for pattern, form in self.rules:
index = pattern.indexIn(text)
while index >= 0:
length = pattern.matchedLength()
self.setFormat(index, length, form)
index = pattern.indexIn(text, index + length)
# mark comment blocks
self.setCurrentBlockState(0)
startIndex = 0
if self.previousBlockState() != 1:
startIndex = self.commentStart.indexIn(text)
if startIndex >= 0:
commentLength = len(text) - startIndex
self.setFormat(startIndex, commentLength, self.commentFormat)
def _create_regexp(self, pattern=''):
_regexp = QRegExp()
_regexp.setMinimal(True)
_regexp.setPattern(pattern)
return _regexp
def _create_format(self, color, style=''):
_format = QTextCharFormat()
_format.setForeground(color)
if 'bold' in style:
_format.setFontWeight(QFont.Bold)
else:
_format.setFontWeight(QFont.Normal)
if 'italic' in style:
_format.setFontItalic(True)
return _format
| 41.918699
| 116
| 0.67339
|
b94eb564d968ac85a04a1d9191b04daba4d3e127
| 3,142
|
py
|
Python
|
swagger_client/models/delete_fleets_fleet_id_members_member_id_not_found.py
|
rseichter/bootini-star
|
a80258f01a05e4df38748b8cb47dfadabd42c20d
|
[
"MIT"
] | null | null | null |
swagger_client/models/delete_fleets_fleet_id_members_member_id_not_found.py
|
rseichter/bootini-star
|
a80258f01a05e4df38748b8cb47dfadabd42c20d
|
[
"MIT"
] | null | null | null |
swagger_client/models/delete_fleets_fleet_id_members_member_id_not_found.py
|
rseichter/bootini-star
|
a80258f01a05e4df38748b8cb47dfadabd42c20d
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online # noqa: E501
OpenAPI spec version: 0.8.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DeleteFleetsFleetIdMembersMemberIdNotFound(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'error': 'str'
}
attribute_map = {
'error': 'error'
}
def __init__(self, error=None): # noqa: E501
"""DeleteFleetsFleetIdMembersMemberIdNotFound - a model defined in Swagger""" # noqa: E501
self._error = None
self.discriminator = None
if error is not None:
self.error = error
@property
def error(self):
"""Gets the error of this DeleteFleetsFleetIdMembersMemberIdNotFound. # noqa: E501
Not found message # noqa: E501
:return: The error of this DeleteFleetsFleetIdMembersMemberIdNotFound. # noqa: E501
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""Sets the error of this DeleteFleetsFleetIdMembersMemberIdNotFound.
Not found message # noqa: E501
:param error: The error of this DeleteFleetsFleetIdMembersMemberIdNotFound. # noqa: E501
:type: str
"""
self._error = error
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteFleetsFleetIdMembersMemberIdNotFound):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.321739
| 99
| 0.574157
|
e007f455c9f147e015610e9d17125a02432f3926
| 17,994
|
py
|
Python
|
tests/integration/sts/topology/base_test.py
|
jhall11/sts
|
b484f184824c9fe59864103f24fdfa24ff8bcdcd
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/sts/topology/base_test.py
|
jhall11/sts
|
b484f184824c9fe59864103f24fdfa24ff8bcdcd
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/sts/topology/base_test.py
|
jhall11/sts
|
b484f184824c9fe59864103f24fdfa24ff8bcdcd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Ahmed El-Hassany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import unittest
from pox.openflow.libopenflow_01 import ofp_phy_port
from pox.lib.util import connect_socket_with_backoff
from sts.topology.graph import TopologyGraph
from sts.topology.base import Topology, TopologyCapabilities
from sts.topology.controllers_manager import ControllersManager
from sts.entities.hosts import Host
from sts.entities.hosts import HostInterface
from sts.entities.sts_entities import AccessLink
from sts.entities.sts_entities import Link
from sts.entities.base import BiDirectionalLinkAbstractClass
from sts.entities.sts_entities import FuzzSoftwareSwitch
from sts.topology.sts_hosts_manager import STSHostsManager
from sts.topology.sts_switches_manager import STSSwitchesManager
from sts.topology.sts_patch_panel import STSPatchPanel
from sts.topology.hosts_manager import mac_addresses_generator
from sts.topology.hosts_manager import ip_addresses_generator
from sts.topology.hosts_manager import interface_names_generator
from sts.topology.dp_buffer import BufferedPatchPanel
from sts.entities.sts_entities import DeferredOFConnection
from sts.openflow_buffer import OpenFlowBuffer
from sts.util.io_master import IOMaster
from sts.util.deferred_io import DeferredIOWorker
class TopologyTest(unittest.TestCase):
def initialize_io_loop(self):
io_master = IOMaster()
return io_master
def create_connection(self, controller_info, switch):
"""Connect switches to controllers. May raise a TimeoutError"""
max_backoff_seconds=1024
socket_ctor = socket.socket
sock = connect_socket_with_backoff(controller_info.config.address,
controller_info.config.port,
max_backoff_seconds=max_backoff_seconds,
socket_ctor=socket_ctor)
# Set non-blocking
sock.setblocking(0)
io_worker = DeferredIOWorker(self.io_master.create_worker_for_socket(sock))
connection = DeferredOFConnection(io_worker, controller_info.cid,
switch.dpid, self.openflow_buffer)
return connection
def sts_topology_type_factory(self, is_host=None, is_switch=None,
is_network_link=None, is_access_link=None,
is_host_interface=None, is_port=None):
"""
Fills in the parameters needed for default behavior as STS topology.
Returns Topology class init with some of the fields already filled in.
"""
is_host_lambda = lambda x: isinstance(x, Host)
is_switch_lambda = lambda x: hasattr(x, 'dpid')
is_network_link_lambda =lambda x: isinstance(x, Link)
is_access_link_lambda = lambda x: isinstance(x, AccessLink)
is_host_interface_lambda = lambda x: isinstance(x, HostInterface)
is_port_lambda = lambda x: isinstance(x, ofp_phy_port)
is_host = is_host or is_host_lambda
is_switch = is_switch or is_switch_lambda
is_network_link = is_network_link or is_network_link_lambda
is_access_link = is_access_link or is_access_link_lambda
is_host_interface = is_host_interface or is_host_interface_lambda
is_port = is_port or is_port_lambda
return functools.partial(Topology, hosts_manager=STSHostsManager(),
switches_manager=STSSwitchesManager(self.create_connection),
controllers_manager=ControllersManager(),
dp_buffer=BufferedPatchPanel(),
is_host=is_host, is_switch=is_switch,
is_network_link=is_network_link,
is_access_link=is_access_link,
is_host_interface=is_host_interface,
is_port=is_port)
def setUp(self):
self.io_master = self.initialize_io_loop()
self.openflow_buffer = OpenFlowBuffer()
@unittest.skip
def test_build(self):
# Arrange
if1 = dict(hw_addr='00:00:00:00:00:01', ips='192.168.56.21')
if2 = dict(hw_addr='00:00:00:00:00:02', ips='192.168.56.22')
topo_cls = self.sts_topology_type_factory()
topo = TopologyGraph()
h1 = topo.add_host(interfaces=[if1, if2], name='h1')
# Act
net = topo_cls(topo_graph=topo, patch_panel=TestPatchPanel(),
capabilities=TopologyCapabilities())
net.build()
# Assert
self.assertEquals(h1, 'h1')
self.assertEquals(len(topo._g.vertices), 3)
self.assertEquals(list(topo.hosts_iter()), [h1])
self.assertEquals(list(topo.interfaces_iter()), ['h1-eth0', 'h1-eth1'])
self.assertEquals(len(topo.get_host_info(h1)['interfaces']), 2)
self.assertEquals(topo.get_host_info(h1)['name'], h1)
def test_create_interface(self):
# Arrange
topo_cls = self.sts_topology_type_factory()
topo = topo_cls(patch_panel=STSPatchPanel(),
capabilities=TopologyCapabilities())
# Act
iface1 = topo.create_interface(hw_addr="00:00:00:00:00:11",
ip_or_ips="1.2.3.4", name="eth1")
iface2 = topo.create_interface(hw_addr="00:00:00:00:00:12",
ip_or_ips="1.2.3.5", name="eth2")
# Assert
self.assertIsNotNone(iface1)
self.assertIsNotNone(iface2)
def test_create_host(self):
# Arrange
h1_eth1 = HostInterface(hw_addr='11:22:33:44:55:66', ip_or_ips='10.0.0.1')
h2_eth1 = HostInterface(hw_addr='11:22:33:44:55:77', ip_or_ips='10.0.0.2')
topo_cls = self.sts_topology_type_factory()
topo = topo_cls(patch_panel=STSPatchPanel(),
capabilities=TopologyCapabilities())
# Act
h1 = topo.create_host(1, "h1", h1_eth1)
h2 = topo.create_host(2, "h2", h2_eth1)
# Assert
self.assertIsNotNone(h1)
self.assertIsNotNone(h2)
self.assertItemsEqual([h1_eth1], h1.interfaces)
self.assertItemsEqual([h2_eth1], h2.interfaces)
self.assertTrue(topo.graph.has_host(h1))
self.assertTrue(topo.graph.has_host(h2))
self.assertTrue(h1 in topo.hosts_manager.live_hosts)
self.assertTrue(h2 in topo.hosts_manager.live_hosts)
def test_create_host_with_interfaces(self):
# Arrange
mac_gen = mac_addresses_generator()
ip_gen = ip_addresses_generator()
name_gen = interface_names_generator()
topo_cls = self.sts_topology_type_factory()
topo = topo_cls(patch_panel=STSPatchPanel(),
capabilities=TopologyCapabilities())
# Act
h1 = topo.create_host_with_interfaces(1, "h1", 2, mac_gen, ip_gen, name_gen)
h2 = topo.create_host_with_interfaces(2, "h2", 3, mac_gen, ip_gen, name_gen)
# Assert
self.assertIsNotNone(h1)
self.assertIsNotNone(h2)
self.assertEquals(len(h1.interfaces), 2)
self.assertEquals(len(h2.interfaces), 3)
self.assertTrue(topo.graph.has_host(h1))
self.assertTrue(topo.graph.has_host(h2))
self.assertTrue(h1 in topo.hosts_manager.live_hosts)
self.assertTrue(h2 in topo.hosts_manager.live_hosts)
def test_add_host(self):
# Arrange
h1_eth1 = HostInterface(hw_addr='11:22:33:44:55:66', ip_or_ips='10.0.0.1')
h2_eth1 = HostInterface(hw_addr='11:22:33:44:55:77', ip_or_ips='10.0.0.2')
h1 = Host(h1_eth1, hid=1)
h2 = Host(h2_eth1, hid=2)
topo_cls = self.sts_topology_type_factory()
topo = topo_cls(patch_panel=STSPatchPanel(),
capabilities=TopologyCapabilities())
# Act
topo.add_host(h1)
topo.add_host(h2)
duplicate_add = lambda: topo.add_host(h1)
wrong_type = lambda: topo.add_host("Dummy")
topo._can_add_hosts = False
immutable_add = lambda: topo.add_host(h1)
# Assert
self.assertEquals(len(list(topo.graph.hosts_iter())), 2)
self.assertRaises(AssertionError, duplicate_add)
self.assertRaises(AssertionError, wrong_type)
self.assertRaises(AssertionError, immutable_add)
self.assertEquals(len(list(topo.graph.hosts_iter())), 2)
self.assertTrue(topo.graph.has_host(h1))
self.assertTrue(topo.graph.has_host(h2))
self.assertTrue(topo.graph.has_host(h1.name))
def test_remove_host(self):
# Arrange
h1_eth1 = HostInterface(hw_addr='11:22:33:44:55:66', ip_or_ips='10.0.0.1')
h2_eth1 = HostInterface(hw_addr='11:22:33:44:55:77', ip_or_ips='10.0.0.2')
h1 = Host(h1_eth1, hid=1)
h2 = Host(h2_eth1, hid=2)
topo_cls = self.sts_topology_type_factory()
topo = topo_cls(patch_panel=STSPatchPanel(),
capabilities=TopologyCapabilities())
topo.add_host(h1)
topo.add_host(h2)
# Act
topo.remove_host(h1)
# Assert
self.assertFalse(topo.graph.has_host(h1))
self.assertTrue(topo.graph.has_host(h2))
def test_create_switch(self):
# Arrange
topo_cls = self.sts_topology_type_factory()
topo = topo_cls(patch_panel=STSPatchPanel(),
capabilities=TopologyCapabilities())
# Act
switch = topo.create_switch(1, 2, True)
# Assert
self.assertTrue(topo.graph.has_switch(switch))
def test_add_switch(self):
# Arrange
s1 = FuzzSoftwareSwitch(1, 's1', ports=1)
s2 = FuzzSoftwareSwitch(2, 's2', ports=1)
topo_cls = self.sts_topology_type_factory()
topo = topo_cls(patch_panel=STSPatchPanel(),
capabilities=TopologyCapabilities())
# Act
topo.add_switch(s1)
topo.add_switch(s2)
duplicate_add = lambda: topo.add_switch(s1)
wrong_type = lambda: topo.add_switch("Dummy")
topo._can_add_hosts = False
immutable_add = lambda: topo.add_switch(s1)
# Assert
self.assertEquals(len(list(topo.graph.switches_iter())), 2)
self.assertRaises(AssertionError, duplicate_add)
self.assertRaises(AssertionError, wrong_type)
self.assertRaises(AssertionError, immutable_add)
self.assertEquals(len(list(topo.graph.switches_iter())), 2)
self.assertTrue(topo.graph.has_switch(s1))
self.assertTrue(topo.graph.has_switch(s2))
self.assertFalse(topo.graph.has_switch('s3'))
def test_remove_switch(self):
# Arrange
s1 = FuzzSoftwareSwitch(1, 's1', ports=1)
s2 = FuzzSoftwareSwitch(2, 's2', ports=1)
topo_cls = self.sts_topology_type_factory()
topo = topo_cls(patch_panel=STSPatchPanel(),
capabilities=TopologyCapabilities())
topo.add_switch(s1)
topo.add_switch(s2)
# Act
topo.remove_switch(s1)
# Assert
self.assertFalse(topo.graph.has_switch(s1))
self.assertTrue(topo.graph.has_switch(s2))
def test_create_network_link(self):
# Arrange
s1 = FuzzSoftwareSwitch(1, 's1', ports=1)
s2 = FuzzSoftwareSwitch(2, 's2', ports=1)
topo_cls = self.sts_topology_type_factory()
topo = topo_cls(patch_panel=STSPatchPanel(),
capabilities=TopologyCapabilities())
topo.add_switch(s1)
topo.add_switch(s2)
# Act
l1 = topo.create_network_link(s1, s1.ports[1], s2, s2.ports[1])
# Assert
self.assertEquals(l1.start_node, s1)
self.assertEquals(l1.start_port, s1.ports[1])
self.assertEquals(l1.end_node, s2)
self.assertEquals(l1.end_port, s2.ports[1])
def test_add_network_link(self):
# Arrange
s1 = FuzzSoftwareSwitch(1, 's1', ports=1)
s2 = FuzzSoftwareSwitch(2, 's2', ports=1)
l1 = Link(s1, s1.ports[1], s2, s2.ports[1])
topo_cls = self.sts_topology_type_factory()
topo = topo_cls(patch_panel=STSPatchPanel(),
capabilities=TopologyCapabilities())
topo.add_switch(s1)
topo.add_switch(s2)
# Act
link = topo.add_network_link(l1)
# Assert
self.assertEquals(link, l1)
self.assertTrue(topo.graph.has_link(link))
self.assertIn(l1, topo.patch_panel.network_links)
def test_add_bidir_link(self):
# Arrange
s1 = FuzzSoftwareSwitch(1, 's1', ports=1)
s2 = FuzzSoftwareSwitch(2, 's2', ports=1)
l1 = BiDirectionalLinkAbstractClass(s1, s1.ports[1], s2, s2.ports[1])
topo_cls = self.sts_topology_type_factory(
is_network_link=lambda x: isinstance(x, BiDirectionalLinkAbstractClass))
topo = topo_cls(patch_panel=STSPatchPanel(),
capabilities=TopologyCapabilities())
#topo = Topology(patch_panel=TestPatchPanel(),
# link_cls=BiDirectionalLinkAbstractClass)
topo.add_switch(s1)
topo.add_switch(s2)
# Act
link = topo.add_network_link(l1)
# Assert
self.assertEquals(link, l1)
self.assertTrue(topo.graph.has_link(link))
def test_create_access_link(self):
# Arrange
s1 = FuzzSoftwareSwitch(1, 's1', ports=3)
h1_eth1 = HostInterface(hw_addr='11:22:33:44:55:66', ip_or_ips='10.0.0.1')
h1 = Host([h1_eth1], name='h1', hid=1)
topo_cls = self.sts_topology_type_factory()
topo = topo_cls(patch_panel=STSPatchPanel(),
capabilities=TopologyCapabilities())
topo.add_switch(s1)
topo.add_host(h1)
# Act
l1 = topo.create_access_link(h1, h1_eth1, s1, s1.ports[1])
# Assert
self.assertEquals(l1.host, h1)
self.assertEquals(l1.interface, h1_eth1)
self.assertEquals(l1.switch, s1)
self.assertEquals(l1.switch_port, s1.ports[1])
def test_add_access_link(self):
# Arrange
s1 = FuzzSoftwareSwitch(1, 's1', ports=3)
h1_eth1 = HostInterface(hw_addr='11:22:33:44:55:66', ip_or_ips='10.0.0.1')
h1 = Host([h1_eth1], name='h1', hid=1)
topo_cls = self.sts_topology_type_factory()
topo = topo_cls(patch_panel=STSPatchPanel(),
capabilities=TopologyCapabilities())
topo.add_switch(s1)
topo.add_host(h1)
l1 = AccessLink(h1, h1_eth1, s1, s1.ports[1])
# Act
l1 = topo.add_access_link(l1)
# Assert
self.assertIn(l1, topo.patch_panel.access_links)
self.assertTrue(topo.graph.has_link(l1))
def test_remove_access_link(self):
# Arrange
s1 = FuzzSoftwareSwitch(1, 's1', ports=2)
h1_eth1 = HostInterface(hw_addr='11:22:33:44:55:66', ip_or_ips='10.0.0.1')
h1_eth2 = HostInterface(hw_addr='11:22:33:44:55:77', ip_or_ips='10.0.0.2')
h1 = Host([h1_eth1, h1_eth2], name='h1', hid=1)
topo_cls = self.sts_topology_type_factory()
topo = topo_cls(patch_panel=STSPatchPanel(),
capabilities=TopologyCapabilities())
topo.add_switch(s1)
topo.add_host(h1)
l1 = AccessLink(h1, h1_eth1, s1, s1.ports[1])
l2 = AccessLink(h1, h1_eth2, s1, s1.ports[2])
topo.add_network_link(l1)
topo.add_network_link(l2)
# Act
topo.remove_access_link(l1)
# Assert
self.assertFalse(topo.graph.has_link(l1))
self.assertNotIn(l1, topo.patch_panel.access_links)
self.assertTrue(topo.graph.has_link(l2))
self.assertIn(l2, topo.patch_panel.access_links)
def test_remove_network_link(self):
# Arrange
s1 = FuzzSoftwareSwitch(1, 's1', ports=3)
s2 = FuzzSoftwareSwitch(2, 's2', ports=3)
l1 = Link(s1, s1.ports[1], s2, s2.ports[1])
l2 = Link(s1, s1.ports[2], s2, s2.ports[2])
l3 = Link(s1, s1.ports[3], s2, s2.ports[3])
topo_cls = self.sts_topology_type_factory()
topo = topo_cls(patch_panel=STSPatchPanel(),
capabilities=TopologyCapabilities())
topo.add_switch(s1)
topo.add_switch(s2)
topo.add_network_link(l1)
topo.add_network_link(l2)
topo.add_network_link(l3)
# Act
topo.remove_network_link(l1)
# Assert
self.assertFalse(topo.graph.has_link(l1))
self.assertNotIn(l1, topo.patch_panel.network_links)
self.assertTrue(topo.graph.has_link(l2))
self.assertIn(l2, topo.patch_panel.network_links)
def test_crash_switch(self):
# Arrange
topo_cls = self.sts_topology_type_factory()
topo = topo_cls(patch_panel=STSPatchPanel(),
capabilities=TopologyCapabilities())
s1 = FuzzSoftwareSwitch(1, 's1', ports=0)
s2 = FuzzSoftwareSwitch(2, 's2', ports=0)
topo.add_switch(s1)
topo.add_switch(s2)
# Act
topo.switches_manager.crash_switch(s1)
# Assert
self.assertEquals(len(topo.switches_manager.failed_switches), 1)
self.assertIn(s1, topo.switches_manager.failed_switches)
self.assertEquals(topo.switches_manager.live_switches, set([s2]))
def test_recover_switch(self):
# Arrange
topo_cls = self.sts_topology_type_factory()
topo = topo_cls(patch_panel=STSPatchPanel(),
capabilities=TopologyCapabilities())
s1 = FuzzSoftwareSwitch(1, 's1', ports=0)
s2 = FuzzSoftwareSwitch(2, 's2', ports=0)
topo.add_switch(s1)
topo.add_switch(s2)
topo.switches_manager.crash_switch(s1)
topo.switches_manager.crash_switch(s2)
# Act
topo.switches_manager.recover_switch(s1)
# Assert
self.assertEquals(len(topo.switches_manager.failed_switches), 1)
self.assertIn(s2, topo.switches_manager.failed_switches)
self.assertEquals(topo.switches_manager.live_switches, set([s1]))
def test_live_edge_switches(self):
# Arrange
topo_cls = self.sts_topology_type_factory()
topo = topo_cls(patch_panel=STSPatchPanel(),
capabilities=TopologyCapabilities())
s1 = FuzzSoftwareSwitch(1, 's1', ports=0)
s2 = FuzzSoftwareSwitch(2, 's2', ports=0)
topo.add_switch(s1)
topo.add_switch(s2)
topo.switches_manager.crash_switch(s1)
# Act
live_edge = topo.switches_manager.live_edge_switches
# Assert
self.assertEquals(len(topo.switches_manager.failed_switches), 1)
self.assertIn(s1, topo.switches_manager.failed_switches)
self.assertEquals(topo.switches_manager.live_switches, set([s2]))
self.assertItemsEqual(live_edge, [s2])
| 39.634361
| 89
| 0.694843
|
2378e828acdc06e96075f80b4d479eb2f9609c90
| 260
|
py
|
Python
|
psx/_dump_/25/_dump_ida_/overlay_3/set_vars.py
|
maoa3/scalpel
|
2e7381b516cded28996d290438acc618d00b2aa7
|
[
"Unlicense"
] | 15
|
2018-06-28T01:11:25.000Z
|
2021-09-27T15:57:18.000Z
|
psx/_dump_/25/_dump_ida_/overlay_3/set_vars.py
|
maoa3/scalpel
|
2e7381b516cded28996d290438acc618d00b2aa7
|
[
"Unlicense"
] | 7
|
2018-06-29T04:08:23.000Z
|
2019-10-17T13:57:22.000Z
|
psx/_dump_/25/_dump_ida_/overlay_3/set_vars.py
|
maoa3/scalpel
|
2e7381b516cded28996d290438acc618d00b2aa7
|
[
"Unlicense"
] | 7
|
2018-06-28T01:11:34.000Z
|
2020-05-23T09:21:48.000Z
|
del_items(0x800A0E14)
SetType(0x800A0E14, "char StrDate[12]")
del_items(0x800A0E20)
SetType(0x800A0E20, "char StrTime[9]")
del_items(0x800A0E2C)
SetType(0x800A0E2C, "char *Words[118]")
del_items(0x800A1004)
SetType(0x800A1004, "struct MONTH_DAYS MonDays[12]")
| 28.888889
| 52
| 0.792308
|
8b215a3e58508968d735b24e38eee0e3fe6db24d
| 5,745
|
py
|
Python
|
tftpy/TftpClient.py
|
pucgenie/tftpy
|
40146f92fabe872b1d942bd7695cb2cde308ae4a
|
[
"MIT"
] | null | null | null |
tftpy/TftpClient.py
|
pucgenie/tftpy
|
40146f92fabe872b1d942bd7695cb2cde308ae4a
|
[
"MIT"
] | null | null | null |
tftpy/TftpClient.py
|
pucgenie/tftpy
|
40146f92fabe872b1d942bd7695cb2cde308ae4a
|
[
"MIT"
] | null | null | null |
# vim: ts=4 sw=4 et ai:
# -*- coding: utf8 -*-
"""This module implements the TFTP Client functionality. Instantiate an
instance of the client, and then use its upload or download method. Logging is
performed via a standard logging object set in TftpShared."""
import types
import logging
from .TftpShared import *
from .TftpPacketTypes import *
from .TftpContexts import TftpContextClientDownload, TftpContextClientUpload
log = logging.getLogger('tftpy.TftpClient')
class TftpClient(TftpSession):
"""This class is an implementation of a tftp client. Once instantiated, a
download can be initiated via the download() method, or an upload via the
upload() method."""
def __init__(self, host, port=69, options={}, localip = ""):
TftpSession.__init__(self)
self.context = None
self.host = host
self.iport = port
self.filename = None
self.options = options
self.localip = localip
if 'blksize' in self.options:
size = self.options['blksize']
tftpassert(int == type(size), "blksize must be an int")
if size < MIN_BLKSIZE or size > MAX_BLKSIZE:
raise TftpException("Invalid blksize: %d" % size)
def download(self, filename, output, packethook=None, timeout=SOCK_TIMEOUT, retries=DEF_TIMEOUT_RETRIES):
"""This method initiates a tftp download from the configured remote
host, requesting the filename passed. It writes the file to output,
which can be a file-like object or a path to a local file. If a
packethook is provided, it must be a function that takes a single
parameter, which will be a copy of each DAT packet received in the
form of a TftpPacketDAT object. The timeout parameter may be used to
override the default SOCK_TIMEOUT setting, which is the amount of time
that the client will wait for a receive packet to arrive.
The retires paramater may be used to override the default DEF_TIMEOUT_RETRIES
settings, which is the amount of retransmission attemtpts the client will initiate
after encountering a timeout.
Note: If output is a hyphen, stdout is used."""
# We're downloading.
log.debug("Creating download context with the following params:")
log.debug("host = %s, port = %s, filename = %s" % (self.host, self.iport, filename))
log.debug("options = %s, packethook = %s, timeout = %s" % (self.options, packethook, timeout))
self.context = TftpContextClientDownload(self.host,
self.iport,
filename,
output,
self.options,
packethook,
timeout,
retries=retries,
localip=self.localip)
self.context.start()
# Download happens here
self.context.end()
metrics = self.context.metrics
log.info('')
log.info("Download complete.")
if metrics.duration == 0:
log.info("Duration too short, rate undetermined")
else:
log.info("Downloaded %.2f bytes in %.2f seconds" % (metrics.bytes, metrics.duration))
log.info("Average rate: %.2f kbps" % metrics.kbps)
log.info("%.2f bytes in resent data" % metrics.resent_bytes)
log.info("Received %d duplicate packets" % metrics.dupcount)
def upload(self, filename, input, packethook=None, timeout=SOCK_TIMEOUT, retries=DEF_TIMEOUT_RETRIES):
"""This method initiates a tftp upload to the configured remote host,
uploading the filename passed. It reads the file from input, which
can be a file-like object or a path to a local file. If a packethook
is provided, it must be a function that takes a single parameter,
which will be a copy of each DAT packet sent in the form of a
TftpPacketDAT object. The timeout parameter may be used to override
the default SOCK_TIMEOUT setting, which is the amount of time that
the client will wait for a DAT packet to be ACKd by the server.
The retires paramater may be used to override the default DEF_TIMEOUT_RETRIES
settings, which is the amount of retransmission attemtpts the client will initiate
after encountering a timeout.
Note: If input is a hyphen, stdin is used."""
self.context = TftpContextClientUpload(self.host,
self.iport,
filename,
input,
self.options,
packethook,
timeout,
retries=retries,
localip=self.localip)
self.context.start()
# Upload happens here
self.context.end()
metrics = self.context.metrics
log.info('')
log.info("Upload complete.")
if metrics.duration == 0:
log.info("Duration too short, rate undetermined")
else:
log.info("Uploaded %d bytes in %.2f seconds" % (metrics.bytes, metrics.duration))
log.info("Average rate: %.2f kbps" % metrics.kbps)
log.info("%.2f bytes in resent data" % metrics.resent_bytes)
log.info("Resent %d packets" % metrics.dupcount)
| 49.525862
| 109
| 0.584334
|
9e762225da9ca9d3712266725ae137a7b263d362
| 156
|
py
|
Python
|
Exercicios-Desafios python/Desafios/desafio019.py
|
ThiagoPereira232/python-curso-em-video-mundo01
|
9cecf3f91672ec5a1fa85ca5d018e158b0d31bae
|
[
"MIT"
] | null | null | null |
Exercicios-Desafios python/Desafios/desafio019.py
|
ThiagoPereira232/python-curso-em-video-mundo01
|
9cecf3f91672ec5a1fa85ca5d018e158b0d31bae
|
[
"MIT"
] | null | null | null |
Exercicios-Desafios python/Desafios/desafio019.py
|
ThiagoPereira232/python-curso-em-video-mundo01
|
9cecf3f91672ec5a1fa85ca5d018e158b0d31bae
|
[
"MIT"
] | null | null | null |
import random
alunos = input('Digite os alunos separados por ; ').split(";")
escolhido = random.choice(alunos)
print(f'O aluno escolhido foi {escolhido}')
| 26
| 62
| 0.730769
|
0fde9497eb43844aa9202c7402fccf8be3ba3ef0
| 1,610
|
py
|
Python
|
ccdproc/tests/pytest_fixtures.py
|
cdeil/ccdproc
|
1bcfb0142669243325bfce05b4f2fc45ea013f02
|
[
"BSD-3-Clause"
] | null | null | null |
ccdproc/tests/pytest_fixtures.py
|
cdeil/ccdproc
|
1bcfb0142669243325bfce05b4f2fc45ea013f02
|
[
"BSD-3-Clause"
] | null | null | null |
ccdproc/tests/pytest_fixtures.py
|
cdeil/ccdproc
|
1bcfb0142669243325bfce05b4f2fc45ea013f02
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import division, print_function
import numpy as np
from astropy.tests.helper import pytest
from astropy import units as u
from astropy.utils import NumpyRNGContext
from ..ccddata import CCDData
# If additional pytest markers are defined the key in the dictionary below
# should be the name of the marker.
DEFAULTS = {
'seed': 123,
'data_size': 100,
'data_scale': 1.0,
'data_mean': 0.0
}
DEFAULT_SEED = 123
DEFAULT_DATA_SIZE = 100
DEFAULT_DATA_SCALE = 1.0
def value_from_markers(key, request):
try:
val = request.keywords[key].args[0]
except KeyError:
val = DEFAULTS[key]
return val
@pytest.fixture
def ccd_data(request):
"""
Return a CCDData object with units of ADU.
The size of the data array is 100x100 but can be changed using the marker
@pytest.mark.data_size(N) on the test function, where N should be the
desired dimension.
Data values are initialized to random numbers drawn from a normal
distribution with mean of 0 and scale 1.
The scale can be changed with the marker @pytest.marker.scale(s) on the
test function, where s is the desired scale.
The mean can be changed with the marker @pytest.marker.scale(m) on the
test function, where m is the desired mean.
"""
size = value_from_markers('data_size', request)
scale = value_from_markers('data_scale', request)
mean = value_from_markers('data_mean', request)
with NumpyRNGContext(DEFAULTS['seed']):
data = np.random.normal(loc=mean, size=[size, size], scale=scale)
return CCDData(data, unit=u.adu)
| 27.288136
| 77
| 0.711801
|
084939578ee7787dc63d202219cddb044567601a
| 637
|
py
|
Python
|
thevoid/whispers/migrations/0002_auto_20180926_0203.py
|
CBR0MS/telematicEnvironment
|
6b3130347cad06c6b3aa453010c91d9990bc9cb8
|
[
"MIT"
] | null | null | null |
thevoid/whispers/migrations/0002_auto_20180926_0203.py
|
CBR0MS/telematicEnvironment
|
6b3130347cad06c6b3aa453010c91d9990bc9cb8
|
[
"MIT"
] | 2
|
2020-06-05T19:00:38.000Z
|
2021-06-10T20:51:00.000Z
|
thevoid/whispers/migrations/0002_auto_20180926_0203.py
|
cbroms/telematicEnvironment
|
6b3130347cad06c6b3aa453010c91d9990bc9cb8
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.7 on 2018-09-26 02:03
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('whispers', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='whisper',
name='display_text',
field=models.CharField(blank=True, max_length=200),
),
migrations.AlterField(
model_name='whisper',
name='id',
field=models.CharField(blank=True, default=uuid.uuid4, max_length=100, primary_key=True, serialize=False, unique=True),
),
]
| 25.48
| 131
| 0.605965
|
1909ab484d482a8566794cbe612e4029f9d013d4
| 5,714
|
py
|
Python
|
sdk/python/pulumi_azure_native/authorization/v20161201/get_policy_definition.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/authorization/v20161201/get_policy_definition.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/authorization/v20161201/get_policy_definition.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetPolicyDefinitionResult',
'AwaitableGetPolicyDefinitionResult',
'get_policy_definition',
]
@pulumi.output_type
class GetPolicyDefinitionResult:
"""
The policy definition.
"""
def __init__(__self__, description=None, display_name=None, id=None, metadata=None, mode=None, name=None, parameters=None, policy_rule=None, policy_type=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if metadata and not isinstance(metadata, dict):
raise TypeError("Expected argument 'metadata' to be a dict")
pulumi.set(__self__, "metadata", metadata)
if mode and not isinstance(mode, str):
raise TypeError("Expected argument 'mode' to be a str")
pulumi.set(__self__, "mode", mode)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if parameters and not isinstance(parameters, dict):
raise TypeError("Expected argument 'parameters' to be a dict")
pulumi.set(__self__, "parameters", parameters)
if policy_rule and not isinstance(policy_rule, dict):
raise TypeError("Expected argument 'policy_rule' to be a dict")
pulumi.set(__self__, "policy_rule", policy_rule)
if policy_type and not isinstance(policy_type, str):
raise TypeError("Expected argument 'policy_type' to be a str")
pulumi.set(__self__, "policy_type", policy_type)
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
The policy definition description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[str]:
"""
The display name of the policy definition.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the policy definition.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def metadata(self) -> Optional[Any]:
"""
The policy definition metadata.
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def mode(self) -> Optional[str]:
"""
The policy definition mode. Possible values are NotSpecified, Indexed, and All.
"""
return pulumi.get(self, "mode")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the policy definition.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parameters(self) -> Optional[Any]:
"""
Required if a parameter is used in policy rule.
"""
return pulumi.get(self, "parameters")
@property
@pulumi.getter(name="policyRule")
def policy_rule(self) -> Optional[Any]:
"""
The policy rule.
"""
return pulumi.get(self, "policy_rule")
@property
@pulumi.getter(name="policyType")
def policy_type(self) -> Optional[str]:
"""
The type of policy definition. Possible values are NotSpecified, BuiltIn, and Custom.
"""
return pulumi.get(self, "policy_type")
class AwaitableGetPolicyDefinitionResult(GetPolicyDefinitionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPolicyDefinitionResult(
description=self.description,
display_name=self.display_name,
id=self.id,
metadata=self.metadata,
mode=self.mode,
name=self.name,
parameters=self.parameters,
policy_rule=self.policy_rule,
policy_type=self.policy_type)
def get_policy_definition(policy_definition_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPolicyDefinitionResult:
"""
The policy definition.
:param str policy_definition_name: The name of the policy definition to get.
"""
__args__ = dict()
__args__['policyDefinitionName'] = policy_definition_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:authorization/v20161201:getPolicyDefinition', __args__, opts=opts, typ=GetPolicyDefinitionResult).value
return AwaitableGetPolicyDefinitionResult(
description=__ret__.description,
display_name=__ret__.display_name,
id=__ret__.id,
metadata=__ret__.metadata,
mode=__ret__.mode,
name=__ret__.name,
parameters=__ret__.parameters,
policy_rule=__ret__.policy_rule,
policy_type=__ret__.policy_type)
| 34.215569
| 163
| 0.641232
|
d81251b788e15d60102454e6d8285e021a3b61cf
| 9,491
|
py
|
Python
|
Scripts/sims4communitylib/utils/sims/common_gender_utils.py
|
ColonolNutty/Sims4CommunityLibrary
|
684f28dc3c7deb4d9fd520e21e63942b65a91d31
|
[
"CC-BY-4.0"
] | 118
|
2019-08-31T04:33:18.000Z
|
2022-03-28T21:12:14.000Z
|
Scripts/sims4communitylib/utils/sims/common_gender_utils.py
|
ColonolNutty/Sims4CommunityLibrary
|
684f28dc3c7deb4d9fd520e21e63942b65a91d31
|
[
"CC-BY-4.0"
] | 15
|
2019-12-05T01:29:46.000Z
|
2022-02-18T17:13:46.000Z
|
Scripts/sims4communitylib/utils/sims/common_gender_utils.py
|
ColonolNutty/Sims4CommunityLibrary
|
684f28dc3c7deb4d9fd520e21e63942b65a91d31
|
[
"CC-BY-4.0"
] | 28
|
2019-09-07T04:11:05.000Z
|
2022-02-07T18:31:40.000Z
|
"""
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from typing import Union
from sims.sim_info import SimInfo
from sims.sim_info_types import Gender
from sims4communitylib.enums.common_gender import CommonGender
from sims4communitylib.enums.traits_enum import CommonTraitId
from sims4communitylib.utils.cas.common_outfit_utils import CommonOutfitUtils
from sims4communitylib.utils.sims.common_sim_voice_utils import CommonSimVoiceUtils
from sims4communitylib.utils.sims.common_trait_utils import CommonTraitUtils
class CommonGenderUtils:
"""Utilities for manipulating Genders of Sims.
"""
@staticmethod
def get_gender(sim_info: SimInfo) -> Union[Gender, None]:
"""get_gender(sim_info)
Retrieve the Gender of a Sim.
:param sim_info: The Sim to retrieve the gender of.
:type sim_info: SimInfo
:return: The Gender of the Sim or None if a problem occurs.
:rtype: Union[Gender, None]
"""
if sim_info is None:
return None
if hasattr(sim_info, 'gender'):
# noinspection PyPropertyAccess
return sim_info.gender
if hasattr(sim_info, 'sim_info') and hasattr(sim_info.sim_info, 'gender'):
return sim_info.sim_info.gender
return None
@staticmethod
def set_gender(sim_info: SimInfo, gender: Union[Gender, CommonGender, int]) -> bool:
"""set_gender(sim_info, gender)
Set the Gender of a Sim.
:param sim_info: The Sim to set the Gender of.
:type sim_info: SimInfo
:param gender: The Gender to set the Sim to.
:type gender: Union[Gender, CommonGender, int]
:return: True, if the Gender of the Sim was set successfully. False, if not.
:rtype: bool
"""
gender = CommonGender.convert_to_vanilla(gender)
if gender is None:
return False
sim_info.gender = gender
if gender == Gender.MALE:
new_trait_id = CommonTraitId.GENDER_MALE
CommonTraitUtils.remove_trait(sim_info, CommonTraitId.GENDER_FEMALE)
else:
new_trait_id = CommonTraitId.GENDER_FEMALE
CommonTraitUtils.remove_trait(sim_info, CommonTraitId.GENDER_MALE)
CommonTraitUtils.add_trait(sim_info, new_trait_id)
from sims4communitylib.events.sim.common_sim_event_dispatcher import CommonSimEventDispatcherService
CommonSimEventDispatcherService()._on_sim_change_gender(sim_info)
return True
@staticmethod
def swap_gender(sim_info: SimInfo, update_gender_options: bool=True, update_voice: bool=True, update_outfits: bool=True) -> bool:
"""swap_gender(sim_info, update_gender_options=True, update_voice=True, update_outfits=True)
Swap the Gender of a Sim to it's opposite. i.e. Change a Sim from Male to Female or from Female to Male.
:param sim_info: An instance of a Sim.
:type sim_info: SimInfo
:param update_gender_options: If True, gender option traits such as Toilet Usage, Clothing Preference, Pregnancy, and Body Frame will be updated to reflect the vanilla settings for each gender\
For example, if a Human Sim is swapping from Female to Male, their gender options will be updated to Toilet Standing, Cannot Be Impregnated, Can Impregnate, Mens Wear clothing preference, and Masculine Frame.\
If False, gender option traits will not be updated.\
Default is True.
:type update_gender_options: bool, optional
:param update_voice: If True, the voice of the Sim will be updated to a default voice for the gender being swapped to. If False, the voice of the Sim will remain unchanged. Default is True.
:type update_voice: bool, optional
:param update_outfits: If True, the outfits of the Sim will be regenerated to match the gender options of the Sim. If False, the outfits of the Sim will not be regenerated. Default is True.
:param update_outfits: bool, optional
:return: True, if the Gender of the Sim was swapped successfully. False, if not.
:rtype: bool
"""
from sims4communitylib.utils.sims.common_sim_gender_option_utils import CommonSimGenderOptionUtils
result = False
frame = CommonSimGenderOptionUtils.has_masculine_frame(sim_info)
prefers_menswear = CommonSimGenderOptionUtils.prefers_menswear(sim_info)
can_impregnate = CommonSimGenderOptionUtils.can_impregnate(sim_info)
can_be_impregnated = CommonSimGenderOptionUtils.can_be_impregnated(sim_info)
can_reproduce = CommonSimGenderOptionUtils.can_reproduce(sim_info)
voice_pitch = CommonSimVoiceUtils.get_voice_pitch(sim_info)
voice_actor = CommonSimVoiceUtils.get_voice_actor(sim_info)
uses_toilet_standing = CommonSimGenderOptionUtils.uses_toilet_standing(sim_info)
has_breasts = CommonSimGenderOptionUtils.has_breasts(sim_info)
saved_outfits = sim_info.save_outfits()
current_outfit = CommonOutfitUtils.get_current_outfit(sim_info)
if CommonGenderUtils.is_male(sim_info):
result = CommonGenderUtils.set_gender(sim_info, CommonGender.FEMALE)
if update_voice:
CommonSimVoiceUtils.set_to_default_female_voice(sim_info)
if update_gender_options:
CommonSimGenderOptionUtils.update_gender_options_to_vanilla_female(sim_info)
if update_outfits:
CommonOutfitUtils.regenerate_all_outfits(sim_info)
elif CommonGenderUtils.is_female(sim_info):
result = CommonGenderUtils.set_gender(sim_info, CommonGender.MALE)
if update_voice:
CommonSimVoiceUtils.set_to_default_male_voice(sim_info)
if update_gender_options:
CommonSimGenderOptionUtils.update_gender_options_to_vanilla_male(sim_info)
if update_outfits:
CommonOutfitUtils.regenerate_all_outfits(sim_info)
if not update_voice:
CommonSimVoiceUtils.set_voice_pitch(sim_info, voice_pitch)
CommonSimVoiceUtils.set_voice_actor(sim_info, voice_actor)
if not update_gender_options:
CommonSimGenderOptionUtils.update_body_frame(sim_info, frame)
CommonSimGenderOptionUtils.update_clothing_preference(sim_info, prefers_menswear)
CommonSimGenderOptionUtils.update_can_impregnate(sim_info, can_impregnate)
CommonSimGenderOptionUtils.update_can_be_impregnated(sim_info, can_be_impregnated)
CommonSimGenderOptionUtils.update_can_reproduce(sim_info, can_reproduce)
CommonSimGenderOptionUtils.update_toilet_usage(sim_info, uses_toilet_standing)
CommonSimGenderOptionUtils.update_has_breasts(sim_info, has_breasts)
if not update_outfits:
sim_info.load_outfits(saved_outfits)
CommonOutfitUtils.resend_outfits(sim_info)
CommonOutfitUtils.set_current_outfit(sim_info, current_outfit)
return result
@staticmethod
def are_same_gender(sim_info: SimInfo, other_sim_info: SimInfo) -> bool:
"""are_same_gender(sim_info, other_sim_info)
Determine if two Sims are the same Gender.
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:param other_sim_info: The Sim to compare to.
:type other_sim_info: SimInfo
:return: True, if both Sims are the same Gender. False, if not.
:rtype: bool
"""
return int(CommonGenderUtils.get_gender(sim_info)) == int(CommonGenderUtils.get_gender(other_sim_info))
@staticmethod
def is_female_gender(gender: Union[Gender, CommonGender, int]) -> bool:
"""is_female_gender(gender)
Determine if a Gender is Female.
:param gender: The gender to check.
:type gender: Union[Gender, CommonGender, int]
:return: True, if the gender is female. False, if the gender is not female.
:rtype: bool
"""
return int(gender) == int(Gender.FEMALE)
@staticmethod
def is_male_gender(gender: Union[Gender, CommonGender, int]) -> bool:
"""is_male_gender(gender)
Determine if a Gender is Male.
:param gender: The gender to check.
:type gender: Union[Gender, CommonGender, int]
:return: True, if the gender is male. False, if the gender is not male.
:rtype: bool
"""
return int(gender) == int(Gender.MALE)
@staticmethod
def is_female(sim_info: SimInfo) -> bool:
"""is_female(sim_info)
Determine if a Sim is Female.
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:return: True, if the Sim is female. False, if the Sim is not female.
:rtype: bool
"""
return CommonGenderUtils.is_female_gender(CommonGenderUtils.get_gender(sim_info))
@staticmethod
def is_male(sim_info: SimInfo) -> bool:
"""is_male(sim_info)
Determine if a Sim is Male.
:param sim_info: The Sim to check.
:type sim_info: SimInfo
:return: True, if the Sim is male. False, if the Sim is not male.
:rtype: bool
"""
return CommonGenderUtils.is_male_gender(CommonGenderUtils.get_gender(sim_info))
| 46.072816
| 217
| 0.704035
|
107d460280d46044e03041a687e052a4f4899906
| 5,420
|
py
|
Python
|
experimentalTest/marketing_nn.py
|
ProjetEtudeMLFI/TensorFI
|
961a0205ec90935a238c58112e8119c34a70ba7c
|
[
"MIT"
] | null | null | null |
experimentalTest/marketing_nn.py
|
ProjetEtudeMLFI/TensorFI
|
961a0205ec90935a238c58112e8119c34a70ba7c
|
[
"MIT"
] | null | null | null |
experimentalTest/marketing_nn.py
|
ProjetEtudeMLFI/TensorFI
|
961a0205ec90935a238c58112e8119c34a70ba7c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
""" Neural Network.
A 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron)
implementation with TensorFlow. This example is using the MNIST database
of handwritten digits (http://yann.lecun.com/exdb/mnist/).
Links:
[MNIST Dataset](http://yann.lecun.com/exdb/mnist/).
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
"""
from __future__ import print_function
import TensorFI as ti
import tensorflow as tf
import math
import numpy, pandas
import preprocessing
import sys
logPath = sys.argv[1]
# Parameters
learning_rate = 0.1
num_steps = 300
batch_size = 128
display_step = 100
# Network Parameters
n_hidden_1 = 256 # 1st layer number of neurons
n_hidden_2 = 256 # 2nd layer number of neurons
num_input = 20 # 20 features in marketing dataset
num_classes = 2 # wheter a client will subscribe a term deposit ==> 2 classes
######
data = pandas.read_csv("./experimentalTest/marketing.csv")
data = preprocessing.cleanDataForClassification(data, "class")
labels = []
for d in data['class']:
if int(d) == 0:
labels.append([0, 1])
else:
labels.append([1, 0])
labels = pandas.DataFrame(labels).values
######
# tf Graph input
X = tf.placeholder("float", [None, num_input])
Y = tf.placeholder("float", [None, num_classes])
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, num_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([num_classes]))
}
# Create model
def neural_net(x):
# Hidden fully connected layer with 256 neurons
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
# Hidden fully connected layer with 256 neurons
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
# Output fully connected layer with a neuron for each class
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
# Construct model
logits = neural_net(X)
prediction = tf.nn.softmax(logits)
# Define loss and optimizer
loss_op = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
# Evaluate model
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# Start training
with tf.compat.v1.Session() as sess:
# Run the initializer
sess.run(init)
for step in range(1, num_steps + 1):
# batch_x, batch_y = mnist.train.next_batch(batch_size)
batch_xs = data.drop("class", axis=1).values
batch_ys = labels
# Run optimization op (backprop)
sess.run(train_op, feed_dict={X: batch_xs, Y: batch_ys})
if step % display_step == 0 or step == 1:
# Calculate batch loss and accuracy
loss, acc = sess.run([loss_op, accuracy],
feed_dict={
X: batch_xs,
Y: batch_ys
})
print("Step " + str(step) + ", Minibatch Loss= " + \
"{:.4f}".format(loss) + ", Training Accuracy= " + \
"{:.3f}".format(acc))
print("Training Finished!")
# Add the fault injection code here to instrument the graph
fi = ti.TensorFI(sess,
name="Perceptron",
logLevel=50,
disableInjections=True)
correctResult = sess.run(accuracy, feed_dict={X: batch_xs, Y: batch_ys})
print("Testing Accuracy:", correctResult)
diffFunc = lambda x: math.fabs(x - correctResult)
# Make the log files in TensorBoard
logs_path = "./logs"
logWriter = tf.summary.FileWriter(logs_path, sess.graph)
###
print("Accuracy (with no injections):",
accuracy.eval({
X: batch_xs[:20000],
Y: batch_ys[:20000]
}))
orgAcy = accuracy.eval({X: batch_xs[:20000], Y: batch_ys[:20000]})
# Turn on TensorFI to inject faults in inference phase
fi.turnOnInjections()
print("Accuracy (with injections):",
accuracy.eval({
X: batch_xs[20000:],
Y: batch_ys[20000:]
}))
fiAcy = accuracy.eval({X: batch_xs[:20000], Y: batch_ys[:20000]})
###
with open(logPath, 'a') as of:
of.write( ` orgAcy ` + "," + ` fiAcy ` + "," + ` (orgAcy - fiAcy) ` +
'\n')
# Initialize the number of threads and injections
# numThreads = 5
# numInjections = 100
# Now start performing fault injections, and collect statistics
# myStats = []
# for i in range(numThreads):
# myStats.append( ti.FIStat("Perceptron") )
# Launch the fault injections in parallel
#fi.pLaunch( numberOfInjections = numInjections, numberOfProcesses = numThreads,
# computeDiff = diffFunc, collectStatsList = myStats, timeout = 100)
# Collate the statistics and print them
# print( ti.collateStats(myStats).getStats() )
| 30.971429
| 80
| 0.64428
|
7b2ce129be1bb99e2e28ab2868b52d70b370ea5c
| 3,424
|
py
|
Python
|
networkx/algorithms/approximation/tests/test_clique.py
|
rakschahsa/networkx
|
6cac55b1064c3c346665f9281680fa3b66442ad0
|
[
"BSD-3-Clause"
] | 445
|
2019-01-26T13:50:26.000Z
|
2022-03-18T05:17:38.000Z
|
SLpackage/private/thirdparty/pythonpkgs/networkx/networkx_2.2/lib/python2.7/site-packages/networkx/algorithms/approximation/tests/test_clique.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | 242
|
2019-01-29T15:48:27.000Z
|
2022-03-31T22:09:21.000Z
|
SLpackage/private/thirdparty/pythonpkgs/networkx/networkx_2.2/lib/python2.7/site-packages/networkx/algorithms/approximation/tests/test_clique.py
|
fanglab/6mASCOPE
|
3f1fdcb7693ff152f17623ce549526ec272698b1
|
[
"BSD-3-Clause"
] | 31
|
2019-03-10T09:51:27.000Z
|
2022-02-14T23:11:12.000Z
|
# test_clique.py - unit tests for the approximation.clique module
#
# Copyright 2015 NetworkX developers.
#
# This file is part of NetworkX.
#
# NetworkX is distributed under a BSD license; see LICENSE.txt for more
# information.
"""Unit tests for the :mod:`networkx.algorithms.approximation.clique`
module.
"""
from __future__ import division
from nose.tools import assert_greater
from nose.tools import assert_true
from nose.tools import assert_equal
import networkx as nx
from networkx.algorithms.approximation import max_clique
from networkx.algorithms.approximation import clique_removal
from networkx.algorithms.approximation import large_clique_size
def is_independent_set(G, nodes):
"""Returns True if and only if `nodes` is a clique in `G`.
`G` is a NetworkX graph. `nodes` is an iterable of nodes in
`G`.
"""
return G.subgraph(nodes).number_of_edges() == 0
def is_clique(G, nodes):
"""Returns True if and only if `nodes` is an independent set
in `G`.
`G` is an undirected simple graph. `nodes` is an iterable of
nodes in `G`.
"""
H = G.subgraph(nodes)
n = len(H)
return H.number_of_edges() == n * (n - 1) // 2
class TestCliqueRemoval(object):
"""Unit tests for the
:func:`~networkx.algorithms.approximation.clique_removal` function.
"""
def test_trivial_graph(self):
G = nx.trivial_graph()
independent_set, cliques = clique_removal(G)
assert_true(is_independent_set(G, independent_set))
assert_true(all(is_clique(G, clique) for clique in cliques))
# In fact, we should only have 1-cliques, that is, singleton nodes.
assert_true(all(len(clique) == 1 for clique in cliques))
def test_complete_graph(self):
G = nx.complete_graph(10)
independent_set, cliques = clique_removal(G)
assert_true(is_independent_set(G, independent_set))
assert_true(all(is_clique(G, clique) for clique in cliques))
def test_barbell_graph(self):
G = nx.barbell_graph(10, 5)
independent_set, cliques = clique_removal(G)
assert_true(is_independent_set(G, independent_set))
assert_true(all(is_clique(G, clique) for clique in cliques))
class TestMaxClique(object):
"""Unit tests for the :func:`networkx.algorithms.approximation.max_clique`
function.
"""
def test_null_graph(self):
G = nx.null_graph()
assert_equal(len(max_clique(G)), 0)
def test_complete_graph(self):
graph = nx.complete_graph(30)
# this should return the entire graph
mc = max_clique(graph)
assert_equal(30, len(mc))
def test_maximal_by_cardinality(self):
"""Tests that the maximal clique is computed according to maximum
cardinality of the sets.
For more information, see pull request #1531.
"""
G = nx.complete_graph(5)
G.add_edge(4, 5)
clique = max_clique(G)
assert_greater(len(clique), 1)
G = nx.lollipop_graph(30, 2)
clique = max_clique(G)
assert_greater(len(clique), 2)
def test_large_clique_size():
G = nx.complete_graph(9)
nx.add_cycle(G, [9, 10, 11])
G.add_edge(8, 9)
G.add_edge(1, 12)
G.add_node(13)
assert_equal(large_clique_size(G), 9)
G.remove_node(5)
assert_equal(large_clique_size(G), 8)
G.remove_edge(2, 3)
assert_equal(large_clique_size(G), 7)
| 28.533333
| 78
| 0.677862
|
9863f8c14b2fb40467535f328a3f02dfadb46f00
| 6,380
|
py
|
Python
|
pynodegl-utils/pynodegl_utils/ui/gl_view.py
|
mrobertseidowsky-gpsw/node.gl.ci_test
|
e882d01fa9b91489b3325151c0c49052f6f04075
|
[
"Apache-2.0"
] | null | null | null |
pynodegl-utils/pynodegl_utils/ui/gl_view.py
|
mrobertseidowsky-gpsw/node.gl.ci_test
|
e882d01fa9b91489b3325151c0c49052f6f04075
|
[
"Apache-2.0"
] | null | null | null |
pynodegl-utils/pynodegl_utils/ui/gl_view.py
|
mrobertseidowsky-gpsw/node.gl.ci_test
|
e882d01fa9b91489b3325151c0c49052f6f04075
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018 GoPro Inc.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtCore import Qt
from PyQt5.QtCore import QEvent
from seekbar import Seekbar
from pynodegl_utils import player
from pynodegl_utils import export
class _GLWidget(QtWidgets.QWidget):
on_player_available = QtCore.pyqtSignal(name='onPlayerAvailable')
def __init__(self, parent, config):
super(_GLWidget, self).__init__(parent)
self.setAttribute(Qt.WA_DontCreateNativeAncestors)
self.setAttribute(Qt.WA_NativeWindow)
self.setAttribute(Qt.WA_PaintOnScreen)
self.setMinimumSize(640, 360)
self._player = None
self._last_frame_time = 0.0
self._config = config
def paintEngine(self):
return None
def resizeEvent(self, event):
if not self._player:
return
size = event.size()
width = int(size.width() * self.devicePixelRatioF())
height = int(size.height() * self.devicePixelRatioF())
self._player.resize(width, height)
super(_GLWidget, self).resizeEvent(event)
def event(self, event):
if event.type() == QEvent.Paint:
if not self._player:
self._player = player.Player(
self.winId(),
self.width() * self.devicePixelRatioF(),
self.height() * self.devicePixelRatioF(),
self._config,
)
self._player.start()
self._player.onFrame.connect(self._set_last_frame_time)
self.onPlayerAvailable.emit()
else:
self._player.draw()
elif event.type() == QEvent.Close:
if self._player:
self._player.stop()
self._player.wait()
return super(_GLWidget, self).event(event)
@QtCore.pyqtSlot(int, float)
def _set_last_frame_time(self, frame_index, frame_time):
self._last_frame_time = frame_time
def get_last_frame_time(self):
return self._last_frame_time
def get_player(self):
return self._player
class GLView(QtWidgets.QWidget):
def __init__(self, get_scene_func, config):
super(GLView, self).__init__()
self._get_scene_func = get_scene_func
self._cfg = None
self._seekbar = Seekbar(config)
self._gl_widget = _GLWidget(self, config)
self._gl_widget.onPlayerAvailable.connect(self._connect_seekbar)
screenshot_btn = QtWidgets.QToolButton()
screenshot_btn.setText(u'📷')
toolbar = QtWidgets.QHBoxLayout()
toolbar.addWidget(self._seekbar)
toolbar.addWidget(screenshot_btn)
self._gl_layout = QtWidgets.QVBoxLayout(self)
self._gl_layout.addWidget(self._gl_widget, stretch=1)
self._gl_layout.addLayout(toolbar)
screenshot_btn.clicked.connect(self._screenshot)
@QtCore.pyqtSlot()
def _connect_seekbar(self):
player = self._gl_widget.get_player()
player.set_scene(self._cfg)
player.onPlay.connect(self._seekbar.set_play_state)
player.onPause.connect(self._seekbar.set_pause_state)
player.onSceneMetadata.connect(self._seekbar.set_scene_metadata)
player.onFrame.connect(self._seekbar.set_frame_time)
self._seekbar.seek.connect(player.seek)
self._seekbar.play.connect(player.play)
self._seekbar.pause.connect(player.pause)
self._seekbar.step.connect(player.step)
self._seekbar.stop.connect(player.reset_scene)
@QtCore.pyqtSlot()
def _screenshot(self):
filenames = QtWidgets.QFileDialog.getSaveFileName(self, 'Save screenshot file')
if not filenames[0]:
return
exporter = export.Exporter(
self._get_scene_func,
filenames[0],
self._gl_widget.width(),
self._gl_widget.height(),
['-frames:v', '1'],
self._gl_widget.get_last_frame_time()
)
exporter.start()
exporter.wait()
@QtCore.pyqtSlot(tuple)
def set_aspect_ratio(self, ar):
player = self._gl_widget.get_player()
if not player:
return
player.set_aspect_ratio(ar)
@QtCore.pyqtSlot(tuple)
def set_frame_rate(self, fr):
player = self._gl_widget.get_player()
if not player:
return
player.set_framerate(fr)
@QtCore.pyqtSlot(int)
def set_samples(self, samples):
player = self._gl_widget.get_player()
if not player:
return
player.set_samples(samples)
@QtCore.pyqtSlot(tuple)
def set_clear_color(self, color):
player = self._gl_widget.get_player()
if not player:
return
player.set_clear_color(color)
@QtCore.pyqtSlot(str)
def set_backend(self, backend):
player = self._gl_widget.get_player()
if not player:
return
player.set_backend(backend)
def enter(self):
self._cfg = self._get_scene_func()
if not self._cfg:
return
player = self._gl_widget.get_player()
if not player:
return
player.set_scene(self._cfg)
self._gl_widget.update()
def leave(self):
player = self._gl_widget.get_player()
if not player:
return
player.pause()
def closeEvent(self, close_event):
self._gl_widget.close()
self._seekbar.close()
super(GLView, self).closeEvent(close_event)
| 30.526316
| 87
| 0.642633
|
c728e9af64770faa186b859b7dd1c18e24c1a590
| 1,695
|
py
|
Python
|
setup.py
|
litecoinfinance/electrumx
|
8f71fa565ba30f3f77855c00a85829d26eedbbf3
|
[
"MIT"
] | null | null | null |
setup.py
|
litecoinfinance/electrumx
|
8f71fa565ba30f3f77855c00a85829d26eedbbf3
|
[
"MIT"
] | null | null | null |
setup.py
|
litecoinfinance/electrumx
|
8f71fa565ba30f3f77855c00a85829d26eedbbf3
|
[
"MIT"
] | 1
|
2021-08-05T09:04:08.000Z
|
2021-08-05T09:04:08.000Z
|
import setuptools
version = '1.16.0'
setuptools.setup(
name='e-x',
version=version,
scripts=['electrumx_server', 'electrumx_rpc', 'electrumx_compact_history'],
python_requires='>=3.7',
install_requires=['aiorpcX[ws]>=0.18.5,<0.19', 'attrs',
'plyvel', 'pylru', 'aiohttp>=3.3,<4'],
extras_require={
'rapidjson': ['python-rapidjson>=0.4.1,<2.0'],
'rocksdb': ['python-rocksdb>=0.6.9'],
'ujson': ['ujson>=2.0.0,<4.0.0'],
'uvloop': ['uvloop>=0.14'],
# For various coins
'blake256': ['blake256>=0.1.1'],
'crypto': ['pycryptodomex>=3.8.1'],
'groestl': ['groestlcoin-hash>=1.0.1'],
'tribushashm': ['tribushashm>=1.0.5'],
'xevan-hash': ['xevan-hash'],
'x11-hash': ['x11-hash>=1.4'],
'zny-yespower-0-5': ['zny-yespower-0-5'],
'bell-yespower': ['bell-yespower'],
'cpupower': ['cpupower'],
},
packages=setuptools.find_packages(include=('electrumx*',)),
description='ElectrumX Server',
author='Electrum developers',
author_email='electrumdev@gmail.com',
license='MIT Licence',
url='https://github.com/litecoinfinance/electrumx',
long_description='Server implementation for the Electrum protocol',
download_url=('https://github.com/litecoinfinance/electrumX/archive/'
f'{version}.tar.gz'),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: AsyncIO',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
"Programming Language :: Python :: 3.7",
"Topic :: Database",
'Topic :: Internet',
],
)
| 36.847826
| 79
| 0.575811
|
30e3a64bedf5d972327d7eefc2652153a242917f
| 777
|
py
|
Python
|
src/services/cached.py
|
BoaVaga/boavaga_server
|
7d25a68832d3b9f4f5666d0a3d55c99025498511
|
[
"MIT"
] | null | null | null |
src/services/cached.py
|
BoaVaga/boavaga_server
|
7d25a68832d3b9f4f5666d0a3d55c99025498511
|
[
"MIT"
] | null | null | null |
src/services/cached.py
|
BoaVaga/boavaga_server
|
7d25a68832d3b9f4f5666d0a3d55c99025498511
|
[
"MIT"
] | null | null | null |
from pymemcache import serde, PooledClient
class Cached:
def __init__(self, host: str, port: int):
self.client = PooledClient((host, port), serde=serde.pickle_serde)
def get(self, group: str, key: str):
return self.client.get(self._gen_key_name(group, key))
def contains(self, group: str, key: str):
return self.client.get(self._gen_key_name(group, key)) is not None
def set(self, group: str, key: str, value):
self.client.set(self._gen_key_name(group, key), value)
def remove(self, group: str, key: str):
self.client.delete(self._gen_key_name(group, key))
@staticmethod
def _gen_key_name(group: str, key: str) -> str:
return group + ':' + key
def close(self):
self.client.close()
| 29.884615
| 74
| 0.649936
|
35c2109304af9356b1c37410f9830e5db6953259
| 602
|
py
|
Python
|
tests.py
|
iSab01/deep-FinGAF
|
688a4ea0860b25e15847d7fb0e0bd4ff6d7f22d0
|
[
"MIT"
] | null | null | null |
tests.py
|
iSab01/deep-FinGAF
|
688a4ea0860b25e15847d7fb0e0bd4ff6d7f22d0
|
[
"MIT"
] | null | null | null |
tests.py
|
iSab01/deep-FinGAF
|
688a4ea0860b25e15847d7fb0e0bd4ff6d7f22d0
|
[
"MIT"
] | null | null | null |
import numpy as np
import pytest
from DataGeneration.GramianAngularField import normalize, polar_encoding, make_gaf
def test_normalize():
x = np.arange(100)
x = normalize(x)
assert (max(x), min(x)) == (1, -1)
def test_polar_encoding():
x = np.array([1, -1, 0, 0.5])
time_stamp = np.array([0, 1, 2, 3]) / 4
assert (polar_encoding(x, time_stamp) == np.array([0, -1. / 4., 0, 1.5 / 4])).any()
@pytest.mark.parametrize("test_input,expected", [(np.array([1]), 1), (np.array([0]), -1)])
def test_make_gaf(test_input, expected):
assert make_gaf(test_input)[0, 0] == expected
| 28.666667
| 90
| 0.642857
|
8ba61bce3d0fe6d1377c618893ca5fb3ad407289
| 2,185
|
py
|
Python
|
SelectConsortiumArchitecture_2Ssakuranetine/Scripts/EcPp3_individualTestFLYCOP_v0_generalized.py
|
ivanmm25/FLYCOPtools
|
46e4b96d506e1e29e131f0d1b1cea6a7a10d6fdf
|
[
"MIT"
] | 1
|
2021-10-19T11:09:25.000Z
|
2021-10-19T11:09:25.000Z
|
SelectConsortiumArchitecture_2Ssakuranetine/Scripts/EcPp3_individualTestFLYCOP_v0_generalized.py
|
ivanmm25/FLYCOPtools
|
46e4b96d506e1e29e131f0d1b1cea6a7a10d6fdf
|
[
"MIT"
] | null | null | null |
SelectConsortiumArchitecture_2Ssakuranetine/Scripts/EcPp3_individualTestFLYCOP_v0_generalized.py
|
ivanmm25/FLYCOPtools
|
46e4b96d506e1e29e131f0d1b1cea6a7a10d6fdf
|
[
"MIT"
] | 1
|
2021-12-01T17:41:11.000Z
|
2021-12-01T17:41:11.000Z
|
#!/usr/bin/python3
############ FLYCOP ############
# Author: Beatriz García-Jiménez, Iván Martín Martín
# April 2018, June 2021
################################
"""
INDIVIDUAL TEST FILE for repeated execution of the optimal configuration found through FLYCOP.
Functions used in the current script:
* SelectConsortiumArchitecture(**args) from EcPp3_generalized.py
NOTE THAT the argument 'initial_biomass' is composed as a series of initial biomass
values returned as a string ('initial_biomass_str'), further splitted to be given
to the last function as a list.
"""
print("\nInitialize individualTest execution\n")
import sys
sys.path.append('../../Scripts')
# import os
import EcPp3_generalized
# Number of args by command line
n_line_args = len(sys.argv)
# OTHER VARIABLES
sd_cutoff = 0.1
maxCycles = 240
# STUDY PARAMETERS OPTIMIZED BY SMAC
# ------------------------------------------
# UPTAKE RATES
sucr1 = float(sys.argv[1])
frc2 = float(sys.argv[2])
nh4_Ec = float(sys.argv[3])
nh4_KT = float(sys.argv[4])
# FVA ratios for the iEC1364 model
FVApCA = float(sys.argv[5])
FVAfru = float(sys.argv[6])
FVAMetNar = float(sys.argv[7])
FVANar = float(sys.argv[8])
# CONSORTIUM ARCH AND FITNESS VALUES
consortium_arch = sys.argv[9]
fitness = sys.argv[n_line_args-1]
# BIOMASSES
# The second argument, from the end, corresponds to the biomass_string in the FLYCOPAnalyzingResults file, individualTestFLYCOP line
initial_biomass_str = sys.argv[n_line_args-2]
initial_biomass = [float(init_biomass) for init_biomass in initial_biomass_str.split()]
# RUN EXECUTION
avgfitness,sdfitness,strains_list=EcPp3_generalized.SelectConsortiumArchitecture(sucr1, frc2, nh4_Ec, nh4_KT, FVApCA, FVAfru, FVAMetNar, FVANar,
consortium_arch, initial_biomass, \
fitObj='MaxMetNar', maxCycles = maxCycles, dirPlot='', repeat=5, sd_cutoff = sd_cutoff,
models_summary=True)
print("\nComplete individualTest execution\n")
| 30.347222
| 168
| 0.643021
|
0a4de3658870809d76535399c2e4b5b99a627cb7
| 18,552
|
py
|
Python
|
owslib/coverage/wcs100.py
|
bradh/OWSLib
|
38282ae839db47eab02e7fbcbce3199f4dfeea7b
|
[
"BSD-3-Clause"
] | null | null | null |
owslib/coverage/wcs100.py
|
bradh/OWSLib
|
38282ae839db47eab02e7fbcbce3199f4dfeea7b
|
[
"BSD-3-Clause"
] | null | null | null |
owslib/coverage/wcs100.py
|
bradh/OWSLib
|
38282ae839db47eab02e7fbcbce3199f4dfeea7b
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2004, 2006 Sean C. Gillies
# Copyright (c) 2007 STFC <http://www.stfc.ac.uk>
#
# Authors :
# Dominic Lowe <d.lowe@rl.ac.uk>
#
# Contact email: d.lowe@rl.ac.uk
# =============================================================================
from owslib.coverage.wcsBase import WCSBase, WCSCapabilitiesReader, ServiceException
from urllib import urlencode
from owslib.util import openURL, testXMLValue
from owslib.etree import etree
from owslib.crs import Crs
import os, errno
# function to save writing out WCS namespace in full each time
def ns(tag):
return '{http://www.opengis.net/wcs}'+tag
class WebCoverageService_1_0_0(WCSBase):
"""Abstraction for OGC Web Coverage Service (WCS), version 1.0.0
Implements IWebCoverageService.
"""
def __getitem__(self,name):
''' check contents dictionary to allow dict like access to service layers'''
if name in self.__getattribute__('contents').keys():
return self.__getattribute__('contents')[name]
else:
raise KeyError, "No content named %s" % name
def __init__(self,url,xml, cookies):
self.version='1.0.0'
self.url = url
self.cookies=cookies
# initialize from saved capability document or access the server
reader = WCSCapabilitiesReader(self.version, self.cookies)
if xml:
self._capabilities = reader.readString(xml)
else:
self._capabilities = reader.read(self.url)
# check for exceptions
se = self._capabilities.find('ServiceException')
if se is not None:
err_message = str(se.text).strip()
raise ServiceException(err_message, xml)
#serviceIdentification metadata
subelem=self._capabilities.find(ns('Service'))
self.identification=ServiceIdentification(subelem)
#serviceProvider metadata
subelem=self._capabilities.find(ns('Service/')+ns('responsibleParty'))
self.provider=ServiceProvider(subelem)
#serviceOperations metadata
self.operations=[]
for elem in self._capabilities.find(ns('Capability/')+ns('Request'))[:]:
self.operations.append(OperationMetadata(elem))
#serviceContents metadata
self.contents={}
for elem in self._capabilities.findall(ns('ContentMetadata/')+ns('CoverageOfferingBrief')):
cm=ContentMetadata(elem, self)
self.contents[cm.id]=cm
#Some WCS servers (wrongly) advertise 'Content' OfferingBrief instead.
if self.contents=={}:
for elem in self._capabilities.findall(ns('ContentMetadata/')+ns('ContentOfferingBrief')):
cm=ContentMetadata(elem, self)
self.contents[cm.id]=cm
#exceptions
self.exceptions = [f.text for f \
in self._capabilities.findall('Capability/Exception/Format')]
def items(self):
'''supports dict-like items() access'''
items=[]
for item in self.contents:
items.append((item,self.contents[item]))
return items
def __makeString(self,value):
#using repr unconditionally breaks things in some circumstances if a value is already a string
if type(value) is not str:
sval=repr(value)
else:
sval = value
return sval
def getCoverage(self, identifier=None, bbox=None, time=None, format = None, crs=None, width=None, height=None, resx=None, resy=None, resz=None,parameter=None,method='Get',**kwargs):
"""Request and return a coverage from the WCS as a file-like object
note: additional **kwargs helps with multi-version implementation
core keyword arguments should be supported cross version
example:
cvg=wcs.getCoverage(identifier=['TuMYrRQ4'], timeSequence=['2792-06-01T00:00:00.0'], bbox=(-112,36,-106,41),format='cf-netcdf')
is equivalent to:
http://myhost/mywcs?SERVICE=WCS&REQUEST=GetCoverage&IDENTIFIER=TuMYrRQ4&VERSION=1.1.0&BOUNDINGBOX=-180,-90,180,90&TIME=2792-06-01T00:00:00.0&FORMAT=cf-netcdf
"""
self.log.debug('WCS 1.0.0 DEBUG: Parameters passed to GetCoverage: identifier=%s, bbox=%s, time=%s, format=%s, crs=%s, width=%s, height=%s, resx=%s, resy=%s, resz=%s, parameter=%s, method=%s, other_arguments=%s'%(identifier, bbox, time, format, crs, width, height, resx, resy, resz, parameter, method, str(kwargs)))
base_url = self.getOperationByName('GetCoverage').methods[method]['url']
self.log.debug('WCS 1.0.0 DEBUG: base url of server: %s'%base_url)
#process kwargs
request = {'version': self.version, 'request': 'GetCoverage', 'service':'WCS'}
assert len(identifier) > 0
request['Coverage']=identifier
#request['identifier'] = ','.join(identifier)
if bbox:
request['BBox']=','.join([self.__makeString(x) for x in bbox])
else:
request['BBox']=None
if time:
request['time']=','.join(time)
if crs:
request['crs']=crs
request['format']=format
if width:
request['width']=width
if height:
request['height']=height
if resx:
request['resx']=resx
if resy:
request['resy']=resy
if resz:
request['resz']=resz
#anything else e.g. vendor specific parameters must go through kwargs
if kwargs:
for kw in kwargs:
request[kw]=kwargs[kw]
#encode and request
data = urlencode(request)
self.log.debug('WCS 1.0.0 DEBUG: Second part of URL: %s'%data)
u=openURL(base_url, data, method, self.cookies)
return u
def getOperationByName(self, name):
"""Return a named operation item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError, "No operation named %s" % name
class OperationMetadata(object):
"""Abstraction for WCS metadata.
Implements IMetadata.
"""
def __init__(self, elem):
"""."""
self.name = elem.tag.split('}')[1]
#self.formatOptions = [f.text for f in elem.findall('{http://www.opengis.net/wcs/1.1/ows}Parameter/{http://www.opengis.net/wcs/1.1/ows}AllowedValues/{http://www.opengis.net/wcs/1.1/ows}Value')]
methods = []
for resource in elem.findall(ns('DCPType/')+ns('HTTP/')+ns('Get/')+ns('OnlineResource')):
url = resource.attrib['{http://www.w3.org/1999/xlink}href']
methods.append(('Get', {'url': url}))
for resource in elem.findall(ns('DCPType/')+ns('HTTP/')+ns('Post/')+ns('OnlineResource')):
url = resource.attrib['{http://www.w3.org/1999/xlink}href']
methods.append(('Post', {'url': url}))
self.methods = dict(methods)
class ServiceIdentification(object):
""" Abstraction for ServiceIdentification metadata """
def __init__(self,elem):
# properties
self.type='OGC:WCS'
self.version='1.0.0'
self.service = testXMLValue(elem.find(ns('name')))
self.abstract = testXMLValue(elem.find(ns('description')))
self.title = testXMLValue(elem.find(ns('name')))
self.keywords = [f.text for f in elem.findall(ns('keywords')+'/'+ns('keyword'))]
#note: differs from 'rights' in interface
self.fees=elem.find(ns('fees')).text
self.accessConstraints=elem.find(ns('accessConstraints')).text
class ServiceProvider(object):
""" Abstraction for WCS ResponsibleParty
Implements IServiceProvider"""
def __init__(self,elem):
#it's not uncommon for the service provider info to be missing
#so handle case where None is passed in
if elem is None:
self.name=None
self.url=None
self.contact = None
else:
self.name=testXMLValue(elem.find(ns('organisationName')))
self.url=self.name #there is no definitive place for url WCS, repeat organisationName
self.contact=ContactMetadata(elem)
class ContactMetadata(object):
''' implements IContactMetadata'''
def __init__(self, elem):
try:
self.name = elem.find(ns('individualName')).text
except AttributeError:
self.name = None
try:
self.organization=elem.find(ns('organisationName')).text
except AttributeError:
self.organization = None
try:
self.address = elem.find(ns('contactInfo')+'/'+ns('address')+'/'+ns('deliveryPoint')).text
except AttributeError:
self.address = None
try:
self.city= elem.find(ns('contactInfo')+'/'+ns('address')+'/'+ns('city')).text
except AttributeError:
self.city = None
try:
self.region=elem.find(ns('contactInfo')+'/'+ns('address')+'/'+ns('administrativeArea')).text
except AttributeError:
self.region = None
try:
self.postcode=elem.find(ns('contactInfo')+'/'+ns('address')+'/'+ns('postalCode')).text
except AttributeError:
self.postcode=None
try:
self.country=elem.find(ns('contactInfo')+'/'+ns('address')+'/'+ns('country')).text
except AttributeError:
self.country = None
try:
self.email=elem.find(ns('contactInfo')+'/'+ns('address')+'/'+ns('electronicMailAddress')).text
except AttributeError:
self.email = None
class ContentMetadata(object):
"""
Implements IContentMetadata
"""
def __init__(self, elem, service):
"""Initialize. service is required so that describeCoverage requests may be made"""
#TODO - examine the parent for bounding box info.
#self._parent=parent
self._elem=elem
self._service=service
self.id=elem.find(ns('name')).text
self.title = testXMLValue(elem.find(ns('label')))
self.abstract= testXMLValue(elem.find(ns('description')))
self.keywords = [f.text for f in elem.findall(ns('keywords')+'/'+ns('keyword'))]
self.boundingBox=None #needed for iContentMetadata harmonisation
self.boundingBoxWGS84 = None
b = elem.find(ns('lonLatEnvelope'))
if b is not None:
gmlpositions=b.findall('{http://www.opengis.net/gml}pos')
lc=gmlpositions[0].text
uc=gmlpositions[1].text
self.boundingBoxWGS84 = (
float(lc.split()[0]),float(lc.split()[1]),
float(uc.split()[0]), float(uc.split()[1]),
)
#others not used but needed for iContentMetadata harmonisation
self.styles=None
self.crsOptions=None
self.defaulttimeposition=None
#grid is either a gml:Grid or a gml:RectifiedGrid if supplied as part of the DescribeCoverage response.
def _getGrid(self):
if not hasattr(self, 'descCov'):
self.descCov=self._service.getDescribeCoverage(self.id)
gridelem= self.descCov.find(ns('CoverageOffering/')+ns('domainSet/')+ns('spatialDomain/')+'{http://www.opengis.net/gml}RectifiedGrid')
if gridelem is not None:
grid=RectifiedGrid(gridelem)
else:
gridelem=self.descCov.find(ns('CoverageOffering/')+ns('domainSet/')+ns('spatialDomain/')+'{http://www.opengis.net/gml}Grid')
grid=Grid(gridelem)
return grid
grid=property(_getGrid, None)
#timelimits are the start/end times, timepositions are all timepoints. WCS servers can declare one or both or neither of these.
def _getTimeLimits(self):
timepoints, timelimits=[],[]
b=self._elem.find(ns('lonLatEnvelope'))
if b is not None:
timepoints=b.findall('{http://www.opengis.net/gml}timePosition')
else:
#have to make a describeCoverage request...
if not hasattr(self, 'descCov'):
self.descCov=self._service.getDescribeCoverage(self.id)
for pos in self.descCov.findall(ns('CoverageOffering/')+ns('domainSet/')+ns('temporalDomain/')+'{http://www.opengis.net/gml}timePosition'):
timepoints.append(pos)
if timepoints:
timelimits=[timepoints[0].text,timepoints[1].text]
return timelimits
timelimits=property(_getTimeLimits, None)
def _getTimePositions(self):
timepositions=[]
if not hasattr(self, 'descCov'):
self.descCov=self._service.getDescribeCoverage(self.id)
for pos in self.descCov.findall(ns('CoverageOffering/')+ns('domainSet/')+ns('temporalDomain/')+'{http://www.opengis.net/gml}timePosition'):
timepositions.append(pos.text)
return timepositions
timepositions=property(_getTimePositions, None)
def _getOtherBoundingBoxes(self):
''' incomplete, should return other bounding boxes not in WGS84
#TODO: find any other bounding boxes. Need to check for gml:EnvelopeWithTimePeriod.'''
bboxes=[]
if not hasattr(self, 'descCov'):
self.descCov=self._service.getDescribeCoverage(self.id)
for envelope in self.descCov.findall(ns('CoverageOffering/')+ns('domainSet/')+ns('spatialDomain/')+'{http://www.opengis.net/gml}Envelope'):
bbox = {}
bbox['nativeSrs'] = envelope.attrib['srsName']
gmlpositions = envelope.findall('{http://www.opengis.net/gml}pos')
lc=gmlpositions[0].text.split()
uc=gmlpositions[1].text.split()
bbox['bbox'] = (
float(lc[0]),float(lc[1]),
float(uc[0]), float(uc[1])
)
bboxes.append(bbox)
return bboxes
boundingboxes=property(_getOtherBoundingBoxes,None)
def _getSupportedCRSProperty(self):
# gets supported crs info
crss=[]
for elem in self._service.getDescribeCoverage(self.id).findall(ns('CoverageOffering/')+ns('supportedCRSs/')+ns('responseCRSs')):
for crs in elem.text.split(' '):
crss.append(Crs(crs))
for elem in self._service.getDescribeCoverage(self.id).findall(ns('CoverageOffering/')+ns('supportedCRSs/')+ns('requestResponseCRSs')):
for crs in elem.text.split(' '):
crss.append(Crs(crs))
for elem in self._service.getDescribeCoverage(self.id).findall(ns('CoverageOffering/')+ns('supportedCRSs/')+ns('nativeCRSs')):
for crs in elem.text.split(' '):
crss.append(Crs(crs))
return crss
supportedCRS=property(_getSupportedCRSProperty, None)
def _getSupportedFormatsProperty(self):
# gets supported formats info
frmts =[]
for elem in self._service.getDescribeCoverage(self.id).findall(ns('CoverageOffering/')+ns('supportedFormats/')+ns('formats')):
frmts.append(elem.text)
return frmts
supportedFormats=property(_getSupportedFormatsProperty, None)
def _getAxisDescriptionsProperty(self):
#gets any axis descriptions contained in the rangeset (requires a DescribeCoverage call to server).
axisDescs =[]
for elem in self._service.getDescribeCoverage(self.id).findall(ns('CoverageOffering/')+ns('rangeSet/')+ns('RangeSet/')+ns('axisDescription/')+ns('AxisDescription')):
axisDescs.append(AxisDescription(elem)) #create a 'AxisDescription' object.
return axisDescs
axisDescriptions=property(_getAxisDescriptionsProperty, None)
#Adding classes to represent gml:grid and gml:rectifiedgrid. One of these is used for the cvg.grid property
#(where cvg is a member of the contents dictionary)
#There is no simple way to convert the offset values in a rectifiedgrid grid to real values without CRS understanding, therefore this is beyond the current scope of owslib, so the representation here is purely to provide access to the information in the GML.
class Grid(object):
''' Simple grid class to provide axis and value information for a gml grid '''
def __init__(self, grid):
self.axislabels = []
self.dimension=None
self.lowlimits=[]
self.highlimits=[]
if grid is not None:
self.dimension=int(grid.get('dimension'))
self.lowlimits= grid.find('{http://www.opengis.net/gml}limits/{http://www.opengis.net/gml}GridEnvelope/{http://www.opengis.net/gml}low').text.split(' ')
self.highlimits = grid.find('{http://www.opengis.net/gml}limits/{http://www.opengis.net/gml}GridEnvelope/{http://www.opengis.net/gml}high').text.split(' ')
for axis in grid.findall('{http://www.opengis.net/gml}axisName'):
self.axislabels.append(axis.text)
class RectifiedGrid(Grid):
''' RectifiedGrid class, extends Grid with additional offset vector information '''
def __init__(self, rectifiedgrid):
super(RectifiedGrid,self).__init__(rectifiedgrid)
self.origin=rectifiedgrid.find('{http://www.opengis.net/gml}origin/{http://www.opengis.net/gml}pos').text.split()
self.offsetvectors=[]
for offset in rectifiedgrid.findall('{http://www.opengis.net/gml}offsetVector'):
self.offsetvectors.append(offset.text.split())
class AxisDescription(object):
''' Class to represent the AxisDescription element optionally found as part of the RangeSet and used to
define ordinates of additional dimensions such as wavelength bands or pressure levels'''
def __init__(self, axisdescElem):
self.name=self.label=None
self.values=[]
for elem in axisdescElem.getchildren():
if elem.tag == ns('name'):
self.name = elem.text
elif elem.tag == ns('label'):
self.label = elem.text
elif elem.tag == ns('values'):
for child in elem.getchildren():
self.values.append(child.text)
| 44.489209
| 323
| 0.610069
|
3ec7523cb9a2ca0c2804169920266a1a1d5c400b
| 9,266
|
py
|
Python
|
tests/pytest/fft_tests/test_fft_numpy1_18_1.py
|
SX-Aurora/nlcpy
|
0a53eec8778073bc48b12687b7ce37ab2bf2b7e0
|
[
"BSD-3-Clause"
] | 11
|
2020-07-31T02:21:55.000Z
|
2022-03-10T03:12:11.000Z
|
tests/pytest/fft_tests/test_fft_numpy1_18_1.py
|
SX-Aurora/nlcpy
|
0a53eec8778073bc48b12687b7ce37ab2bf2b7e0
|
[
"BSD-3-Clause"
] | null | null | null |
tests/pytest/fft_tests/test_fft_numpy1_18_1.py
|
SX-Aurora/nlcpy
|
0a53eec8778073bc48b12687b7ce37ab2bf2b7e0
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import division, absolute_import, print_function
import numpy
import nlcpy as np
import pytest
from nlcpy.random import random
from nlcpy.testing import ( # NOQA
assert_array_equal, assert_allclose)
from numpy.testing import assert_raises
# import threading
# import sys
# if sys.version_info[0] >= 3:
# import queue
# else:
# import Queue as queue
def fft1(x):
L = len(x)
phase = -2j * np.pi * (np.arange(L) / float(L))
phase = np.arange(L).reshape(-1, 1) * phase
return np.sum(x * np.exp(phase), axis=1)
class TestFFTShift(object):
def test_fft_n(self):
assert_raises(ValueError, np.fft.fft, [1, 2, 3], 0)
class TestFFT1D(object):
# TODO
def test_identity(self):
maxlen = 512
x = random(maxlen) + 1j * random(maxlen)
# xr = random(maxlen) # local variable 'xr' is assigned to but never used
for i in range(1, maxlen):
assert_allclose(np.fft.ifft(np.fft.fft(x[0:i])), x[0:i],
atol=1e-12)
# assert_allclose(np.fft.irfft(np.fft.rfft(xr[0:i]),i),
# xr[0:i], atol=1e-12)
def test_fft(self):
x = random(30) + 1j * random(30)
assert_allclose(fft1(x), np.fft.fft(x), atol=1e-6)
assert_allclose(fft1(x) / np.sqrt(30),
np.fft.fft(x, norm="ortho"), atol=1e-6)
@pytest.mark.parametrize('norm', (None, 'ortho'))
def test_ifft(self, norm):
x = random(30) + 1j * random(30)
assert_allclose(
x, np.fft.ifft(np.fft.fft(x, norm=norm), norm=norm),
atol=1e-6)
# Ensure we get the correct error message
with pytest.raises(ValueError):
# ,match='Invalid number of FFT data points'):
np.fft.ifft([], norm=norm)
def test_fft2(self):
x = random((30, 20)) + 1j * random((30, 20))
assert_allclose(np.fft.fft(np.fft.fft(x, axis=1), axis=0),
np.fft.fft2(x), atol=1e-6)
assert_allclose(np.fft.fft2(x) / np.sqrt(30 * 20),
np.fft.fft2(x, norm="ortho"), atol=1e-6)
def test_ifft2(self):
x = random((30, 20)) + 1j * random((30, 20))
assert_allclose(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0),
np.fft.ifft2(x), atol=1e-6)
assert_allclose(np.fft.ifft2(x) * np.sqrt(30 * 20),
np.fft.ifft2(x, norm="ortho"), atol=1e-6)
def test_fftn(self):
x = random((30, 20, 10)) + 1j * random((30, 20, 10))
assert_allclose(
np.fft.fft(np.fft.fft(np.fft.fft(x, axis=2), axis=1), axis=0),
np.fft.fftn(x), atol=1e-6)
assert_allclose(np.fft.fftn(x) / np.sqrt(30 * 20 * 10),
np.fft.fftn(x, norm="ortho"), atol=1e-6)
def test_ifftn(self):
x = random((30, 20, 10)) + 1j * random((30, 20, 10))
assert_allclose(
np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0),
np.fft.ifftn(x), atol=1e-6)
assert_allclose(np.fft.ifftn(x) * np.sqrt(30 * 20 * 10),
np.fft.ifftn(x, norm="ortho"), atol=1e-6)
def test_rfft(self):
x = random(30)
for n in [x.size, 2 * x.size]:
for norm in [None, 'ortho']:
assert_allclose(
np.fft.fft(x, n=n, norm=norm)[:(n // 2 + 1)],
np.fft.rfft(x, n=n, norm=norm), atol=1e-6)
assert_allclose(
np.fft.rfft(x, n=n) / np.sqrt(n),
np.fft.rfft(x, n=n, norm="ortho"), atol=1e-6)
def test_irfft(self):
x = random(30)
assert_allclose(x, np.fft.irfft(np.fft.rfft(x)), atol=1e-6)
assert_allclose(
x, np.fft.irfft(np.fft.rfft(x, norm="ortho"), norm="ortho"), atol=1e-6)
def test_rfft2(self):
x = random((30, 20))
assert_allclose(np.fft.fft2(x)[:, :11], np.fft.rfft2(x), atol=1e-6)
assert_allclose(np.fft.rfft2(x) / np.sqrt(30 * 20),
np.fft.rfft2(x, norm="ortho"), atol=1e-6)
def test_irfft2(self):
x = random((30, 20))
assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x)), atol=1e-6)
assert_allclose(
x, np.fft.irfft2(np.fft.rfft2(x, norm="ortho"), norm="ortho"), atol=1e-6)
def test_rfftn(self):
x = random((30, 20, 10))
assert_allclose(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x), atol=1e-6)
assert_allclose(np.fft.rfftn(x) / np.sqrt(30 * 20 * 10),
np.fft.rfftn(x, norm="ortho"), atol=1e-6)
def test_irfftn(self):
x = random((30, 20, 10))
assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x)), atol=1e-6)
assert_allclose(
x, np.fft.irfftn(np.fft.rfftn(x, norm="ortho"), norm="ortho"), atol=1e-6)
def test_hfft(self):
x = random(14) + 1j * random(14)
x_herm = np.concatenate((random(1), x, random(1)))
# x = np.concatenate((x_herm, x[::-1].conj()))
x = np.concatenate((x_herm, np.conj(x[::-1])))
assert_allclose(np.fft.fft(x), np.fft.hfft(x_herm), atol=1e-6)
assert_allclose(np.fft.hfft(x_herm) / np.sqrt(30),
np.fft.hfft(x_herm, norm="ortho"), atol=1e-6)
def test_ihttf(self):
x = random(14) + 1j * random(14)
x_herm = np.concatenate((random(1), x, random(1)))
# x = np.concatenate((x_herm, x[::-1].conj()))
x = np.concatenate((x_herm, np.conj(x[::-1])))
assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm)), atol=1e-6)
assert_allclose(
x_herm, np.fft.ihfft(np.fft.hfft(x_herm, norm="ortho"),
norm="ortho"), atol=1e-6)
@pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn,
np.fft.rfftn, np.fft.irfftn])
def _test_axes(self, op):
x = random((30, 20, 10))
axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)]
for a in axes:
op_tr = op(np.transpose(x, a))
tr_op = np.transpose(op(x, axes=a), a)
assert_allclose(op_tr, tr_op, atol=1e-6)
# TODO
def test_all_1d_norm_preserving(self):
# verify that round-trip transforms are norm-preserving
x = random(30)
# x_norm = np.linalg.norm(x)
x_norm = numpy.linalg.norm(x)
n = x.size * 2
func_pairs = [(np.fft.fft, np.fft.ifft),
# (np.fft.rfft, np.fft.irfft),
# hfft: order so the first function takes x.size samples
# (necessary for comparison to x_norm above)
# (np.fft.ihfft, np.fft.hfft),
]
for forw, back in func_pairs:
for n in [x.size, 2 * x.size]:
for norm in [None, 'ortho']:
tmp = forw(x, n=n, norm=norm)
tmp = back(tmp, n=n, norm=norm)
assert_allclose(x_norm,
# np.linalg.norm(tmp),
numpy.linalg.norm(tmp),
atol=1e-6)
# TODO
@pytest.mark.parametrize("dtype", [np.single, # np.half,
np.double]) # numpy.longdouble
def test_dtypes(self, dtype):
# make sure that all input precisions are accepted and internally
# converted to 64bit
# x = random(30).astype(dtype)
x = random(30).astype(dtype)
assert_allclose(np.fft.ifft(np.fft.fft(x)), x, atol=1e-6)
# assert_allclose(np.fft.irfft(np.fft.rfft(x)), x, atol=1e-6)
# TODO
@pytest.mark.parametrize(
"dtype",
[np.float32, np.float64, np.complex64, np.complex128]
)
@pytest.mark.parametrize(
"order",
["F", "C"] # 'non-contiguous'
)
@pytest.mark.parametrize(
"fft",
[np.fft.fft, # np.fft.fft2, np.fft.fftn,
np.fft.ifft] # ,np.fft.ifft2, np.fft.ifftn
)
def test_fft_with_order(dtype, order, fft):
# Check that FFT/IFFT produces identical results for C, Fortran and
# non contiguous arrays
rng = np.random.RandomState(42)
# X = rng.rand(8, 7, 13).astype(dtype, copy=False)
X = rng.rand((8, 7, 13)).astype(dtype, copy=False)
# See discussion in pull/14178
_tol = 8.0 * np.sqrt(np.log2(X.size)) * np.finfo(X.dtype).eps
if order == 'F':
# Y = np.asfortranarray(X)
Y = np.asarray(X, order='F')
else:
# Make a non contiguous array
# #Y = X[::-1]
# #X = np.ascontiguousarray(X[::-1])
Y = X[:-1]
X = np.asarray(X[:-1], order='C')
if fft.__name__.endswith('fft'):
for axis in range(3):
X_res = fft(X, axis=axis)
Y_res = fft(Y, axis=axis)
assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol)
elif fft.__name__.endswith(('fft2', 'fftn')):
axes = [(0, 1), (1, 2), (0, 2)]
if fft.__name__.endswith('fftn'):
axes.extend([(0,), (1,), (2,), None])
for ax in axes:
X_res = fft(X, axes=ax)
Y_res = fft(Y, axes=ax)
assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol)
else:
raise ValueError()
| 38.131687
| 85
| 0.529355
|
6c298c8e0fa8c5f6f57f978604a0c598bdf7f407
| 1,859
|
py
|
Python
|
tests/test_core_query_shell.py
|
Padraic-O-Mhuiris/fava
|
797ae1ee1f7378c8e7347d2970fc52c4be366b01
|
[
"MIT"
] | null | null | null |
tests/test_core_query_shell.py
|
Padraic-O-Mhuiris/fava
|
797ae1ee1f7378c8e7347d2970fc52c4be366b01
|
[
"MIT"
] | null | null | null |
tests/test_core_query_shell.py
|
Padraic-O-Mhuiris/fava
|
797ae1ee1f7378c8e7347d2970fc52c4be366b01
|
[
"MIT"
] | null | null | null |
# pylint: disable=missing-docstring
from __future__ import annotations
import pytest
from beancount.query.query import run_query
from .conftest import data_file
from fava.core import FavaLedger
from fava.helpers import FavaAPIException
LEDGER = FavaLedger(data_file("query-example.beancount"))
QUERY = LEDGER.query_shell
def run(query_string):
return QUERY.execute_query(query_string)
def run_text(query_string):
"""Run a query that should only return string contents."""
contents, types, result = run(query_string)
assert types is None
assert result is None
return contents
def test_query() -> None:
assert run_text("help")
assert (
run_text("help exit") == "Doesn't do anything in Fava's query shell."
)
assert run("lex select date, balance")[0] == "\n".join(
[
"LexToken(SELECT,'SELECT',1,0)",
"LexToken(ID,'date',1,7)",
"LexToken(COMMA,',',1,11)",
"LexToken(ID,'balance',1,13)",
]
)
assert run_text("run") == "custom_query\ncustom query with space"
bal = run("balances")
assert run("run custom_query") == bal
assert run("run 'custom query with space'") == bal
assert run("balances")[1:] == run_query(
LEDGER.entries, LEDGER.options, "balances"
)
assert (
run_text("asdf")
== "ERROR: Syntax error near 'asdf' (at 0)\n asdf\n ^"
)
def test_query_to_file(snapshot):
name, data = QUERY.query_to_file("run custom_query", "csv")
assert name == "custom_query"
name, data = QUERY.query_to_file("balances", "csv")
assert name == "query_result"
snapshot(data.getvalue())
with pytest.raises(FavaAPIException):
QUERY.query_to_file("select sdf", "csv")
with pytest.raises(FavaAPIException):
QUERY.query_to_file("run testsetest", "csv")
| 28.166667
| 77
| 0.651963
|
58b974e804197ce71d7ee508d32beb3e7163a314
| 22,730
|
py
|
Python
|
src/lib/2 Eye Tracking/preprocess_eye.py
|
galbiati/mnk-cleaning-analysis
|
d8e8b13b7a2c6431e453430588fa85fd694b3373
|
[
"MIT"
] | null | null | null |
src/lib/2 Eye Tracking/preprocess_eye.py
|
galbiati/mnk-cleaning-analysis
|
d8e8b13b7a2c6431e453430588fa85fd694b3373
|
[
"MIT"
] | null | null | null |
src/lib/2 Eye Tracking/preprocess_eye.py
|
galbiati/mnk-cleaning-analysis
|
d8e8b13b7a2c6431e453430588fa85fd694b3373
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Wei Ji Ma Lab, New York University Center for Neural Science
# By: Gianni Galbiati
# Standard Python Libraries (alphabetical order)
import os
# Scientific Python Libraries (alphabetical order)
import numpy as np
import pandas as pd
from scipy.stats import multivariate_normal
# Internal Python Libraries (alphabetical order)
# Set up directory and filepath references
data_dir = os.path.expanduser('~/Google Drive/Bas Zahy Gianni - Games/Data/2_eye/New')
game_dir = os.path.join(data_dir, 'game')
eyet_dir = os.path.join(data_dir, 'eyet')
mous_dir = os.path.join(data_dir, 'mous')
output_dir = os.path.expanduser('~/Google Drive/Bas Zahy Gianni - Games/Analysis/2_eye/histograms/temp')
os.makedirs(output_dir, exist_ok=True)
game_files = [os.path.join(game_dir, g) for g in os.listdir(game_dir) if g[-3:] == 'csv']
eyet_files = [os.path.join(eyet_dir, e) for e in os.listdir(eyet_dir) if e[-3:]=='csv']
mous_files = [os.path.join(mous_dir, m) for m in os.listdir(mous_dir) if m[-3:]=='csv']
# Get subject identifiers
subject_initial_map = [g[-6:-4] for g in game_files] # get alphabetical list of subject initials from filenames
subject_initial_map = dict(zip(subject_initial_map, np.arange(len(subject_initial_map))))
# Dimensions of board display in pixels
top = 192
bottom = 506
left = 177
right = 889
width = right - left
height = bottom - top
def mouse_x_to_tile(x):
"""Converts mouse x coordinates to board-space"""
return 9 * (x - left) / width
def mouse_y_to_tile(y):
"""Converts mouse y coordinates to board-space"""
return 4 * (y - top) / height
def expand_mouse_mt(row):
"""
Appends start time, end time to mouse timestamp records for a single record
For use with pd.DataFrame.apply()
"""
endtime = int(row['ts']) # get turn end timestamp
starttime = endtime - int(row['rt']) # get turn start from turn end and turn duration
# add start, end times to respective ends of record
if type(row['mt']) == str: # check if valid data
return str(starttime) + ',' + row['mt'] + ',' + str(endtime)
def expand_mouse_mx(row):
"""
Appends start time location, end time location to mouse spatial coordinates for a single record
For use with pd.DataFrame.apply()
"""
endtime = int(row['ts']) # get turn end timestamp
starttime = endtime - int(row['rt']) # get turn start from turn end and turn duration
if type(row['mx']) == str: # check if valid data
locs = row['mx'].split(';') # split record into (x, y) pair strings
endloc = locs[-1] # select first and last coordinate pairs
startloc = locs[0]
return startloc + ';' + row['mx'] + ';' + endloc # add start, end coords to respective ends of record
def fix_game_boards(row):
"""
Removes move from appropriate color board string representation for a single record
For use with pd.DataFrame.apply()
"""
bp, wp = row[['bp', 'wp']] # select board string reps
if row['color']==0: # if Black is player
p = list(bp) # convert board string to list (mutability)
p[int(row['zet'])] = '0' # set list at zet loc to be '0'
else: # if White is player, do the same thing for White's board
p = list(wp)
p[int(row['zet'])] = '0'
return ''.join(p) # rejoin into new string
def load_game_file(gf):
"""Loads and preprocesses data from game observations"""
# Labels for fields
gfnames = [
'idx', 'id', 'color', 'gi', 'mi', 'status',
'bp', 'wp', 'zet', 'rt',
'ts', 'mt', 'mx'
]
# Load csv into a dataframe
D = pd.read_csv(gf, names=gfnames)
# Remove convenience records by AI
readyfilter = D['status'] == 'ready' # filter on convenience records
rtfilter = D['rt'] == 0 # filter on AI moves
aifilter = ~(readyfilter & rtfilter) # filter out convenience AI records
D = D.loc[aifilter] # apply filter
# Make necessary data corrections
D.loc[readyfilter, 'rt'] = 0 # set human convenience records rt field to 0
D['subject'] = gf[-6:-4] # set subject field to initials in game file name
D['human'] = D['subject'].map(subject_initial_map) # set human field to be subject index (alphabetical)
D['move_start_ts'] = (D['ts'] - D['rt']).shift(-1) # set move_start_timestamp field to turn beginning
tsfilter = rtfilter | readyfilter # filter on ai OR convenience records
D.loc[tsfilter, 'ts'] = D.loc[tsfilter, 'move_start_ts'] # replace invalid timestamps with inferred move start
D['mx'] = D.apply(expand_mouse_mx, axis=1) # append move start and end mouse spatial coords
D['mt'] = D.apply(expand_mouse_mt, axis=1) # append move start and end timestamps to mouse timestamps
D['is human'] = 1 # initialize human player indicator variable
playfilter = D['status'].isin(['playing', 'win', 'draw']) # filter on non-convenience records
D.loc[playfilter & rtfilter, 'is human'] = 0 # set human player indicator to 0 on AI records
endfilter = D['status'].isin(['win', 'draw']) # filter on game end records
idx = D.loc[endfilter].index # get indices for game end filter application
if D.loc[idx[-1], 'rt'] != 0: # if human player ended last game
D.loc[idx[-1], 'gi'] = D.loc[idx[-1], 'gi'] - 1 # subtract 1 from game index (why? probably a data error)
bpfilter = D['color'] == 0 # filter on player colors
wpfilter = D['color'] == 1
# Apply filters and remove last move from board
D.loc[bpfilter, 'bp'] = D.loc[bpfilter].apply(fix_game_boards, axis=1)
D.loc[wpfilter, 'wp'] = D.loc[wpfilter].apply(fix_game_boards, axis=1)
return D.set_index('ts') # set index to timestamps
def load_mouse_file(mf):
"""Loads and preprocesses mouse tracking data"""
mfnames = [
'idx', 'id', 'color', 'gi', 'mi',
'status', 'bp', 'wp', 'zet',
'rt', 'ts', 'mt', 'mx'
] # names for columns
D = pd.read_csv(mf, names=mfnames) # load csv into pandas dataframe
D['mx'] = D.apply(expand_mouse_mx, axis=1) # append start and end mouse spatial coords
D['mt'] = D.apply(expand_mouse_mt, axis=1) # append start and end mouse timestamps
D = D[['mt', 'mx']] # lose the fluff
valid = pd.notnull(D['mt']) # select records with valid mouse time series
m = (D.loc[valid, 'mt'] + ',').sum().split(',')[:-1] # combine all mouse timestamp records
x = [tuple(xy.split(',')) for xy in (D.loc[valid, 'mx'] + ';').sum().split(';')][:-1]
# combine all mouse coordinate records
M = pd.DataFrame(index=m, data=x, columns=['x', 'y']) # new dataframe with timestamp index and coordinates
M['subject'] = mf[-6:-4] # set subject field to initials
M['human'] = M['subject'].map(subject_initial_map) # set human field to subject ordinal index
M.index = M.index.astype(np.int64) # cast timestamp index to integers
return M
def load_eyetracker_file(ef):
"""Loads and preprocesses eyetracker data"""
D = pd.read_csv(ef) # load EL data into pandas dataframe
D['subject'] = ef[-6:-4] # set subject field to initials
D['human'] = D['subject'].map(subject_initial_map) # set human field to subject ordinal index
# Set start and end fields to ms resolution, integers
D[['start', 'end']] = (D[['start', 'end']]*1000).astype(np.int64)
# Lose the fluff; index by start timestamp
return D[['start', 'end', 'transx', 'transy', 'human']].set_index('start')
def make_tidy(e, m, g):
"""Produces a combined dataframe of mouse and eye coordinates, indexed by timestamp"""
start_time = int(e.index[0]) # get the eyetracker start time
end_time = int(e.loc[e.index.values[-1], 'end']) # get the eyetracker end time
mbounds = (m.index >= start_time) & (m.index <= end_time) # filter on mouse records within EL record bounds
m = m.loc[mbounds] # apply filter
idx = np.arange(start_time, end_time, 1) # prepare index for new dataframe
D = pd.DataFrame(index=idx) # new dataframe for tidy timeseries
D.loc[e.index, 'eyex'] = e['transx'].astype(float) # get valid eye coordinates in board space
D.loc[e.index, 'eyey'] = e['transy'].astype(float)
D.loc[e.index, 'eyeflag'] = 1 # indicator for eye event
D.loc[m.index, 'moux'] = m['x'].astype(float).map(mouse_x_to_tile) # get valid mouse coords and map to board space
D.loc[m.index, 'mouy'] = m['y'].astype(float).map(mouse_y_to_tile)
D.loc[m.index, 'mouflag'] = 1 # indicator for mouse event
_sl = g.loc[g.index > start_time, :] # selector for valid game events
D.loc[_sl.index, 'turn'] = 100*_sl['gi'] + _sl['mi'] # unique id for turns for valid game events
D.loc[_sl.index, 'task'] = _sl['status'] # task indicator
D = D.dropna(how='all') # shrink dataframe by pruning all event-less records
# Fill fields forward
fillcols = ['eyex', 'eyey', 'moux', 'mouy', 'turn', 'task']
D[fillcols] = D[fillcols].fillna(method='ffill')
D['ts'] = D.index # convenience field of timestamps
# Set duration for each event of each type
D.loc[D['eyeflag'] == 1, 'eyedur'] = D.loc[D['eyeflag'] ==1, 'ts'].diff(periods=1)
D.loc[D['mouflag'] == 1, 'moudur'] = D.loc[D['mouflag'] == 1, 'ts'].diff(periods=1)
# Convert board coordinates to tile index
D['eyetile'] = D['eyex'].astype(np.int64) + 9*D['eyey'].astype(np.int64)
mouvalid = ~pd.isnull(D['moux'])
D.loc[mouvalid, 'moutile'] = D.loc[mouvalid, 'moux'].astype(np.int64) + 9*D.loc[mouvalid, 'mouy'].astype(np.int64)
# Cast valid tile vals to int (np.nan is float)
D.loc[D['eyeflag']==1, 'eyetile'] = D.loc[D['eyeflag']==1, 'eyetile'].astype(np.int64)
D.loc[D['mouflag']==1, 'moutile'] = D.loc[D['mouflag']==1, 'moutile'].astype(np.int64)
return D
def mouse_hist(m, g):
"""Modifies mousetracking data to produce histograms over tile indices"""
g['turn'] = 100*g['gi'] + g['mi'] # add unique turn ids
turnfilter = g['status'].isin(['playing', 'draw', 'win'])
# filter on non-convenience records
gp = g.loc[turnfilter] # apply filter
m['turn'] = np.nan # initialize helper fields
m['turnstart'] = np.nan
m['turnend'] = np.nan
m['ts'] = m.index
m['xtile'] = np.nan
m['ytile'] = np.nan
m['tile'] = np.nan
m['dur'] = np.nan
m['is human'] = np.nan
m = m.drop_duplicates(subset='ts') # get rid of duplicate timestamps
m.loc[gp.index, 'turn'] = gp['turn'] # add helper data to mouse df
m.loc[gp.index, 'turnstart'] = gp.index - gp['rt']
m.loc[gp.index, 'turnend'] = gp.index
m.loc[gp.index, 'is human'] = gp['is human']
m = m.sort_index() # sort mouse data by timestamp
fillthese = ['turn', 'turnstart', 'turnend', 'is human']
# helper columns to fill
m[fillthese] = m[fillthese].fillna(method='bfill') # backfill missing data
m['dur'] = m.index
m['dur'] = m['dur'].diff(periods=1) # compute duration of each event
eventbounds = (m.index > m['turnstart']) & (m.index <= m['turnend'])
# filter on mouse data within player turn
m = m.loc[eventbounds] # apply filter
m['xtile'] = m['x'].astype(float).map(mouse_x_to_tile) # map mouse coords to board coords
m['ytile'] = m['y'].astype(float).map(mouse_y_to_tile)
m['tile'] = (m['xtile'].astype(np.int64) + 9*m['ytile'].astype(np.int64)) # compute mouse tile
humanfilter = m['is human'] == 1 # filter on human moves (mouse df)
mpvt = m.loc[humanfilter].pivot_table(index='turn', columns='tile', values='dur', aggfunc=np.sum)
# pivot human trials duration per tile idx
mpvt['rt'] = mpvt.sum(axis=1) # recalculate rt for verification
# Get column names for locations off the board
offboard = [i for i in mpvt.columns if (i not in list(range(36)) and type(i)==int)]
mpvt[999] = mpvt[offboard].sum(axis=1) # combine all offboard durations
humanfilter = g['is human'] == 1 # filter on human moves (game df)
gt = g.loc[turnfilter & humanfilter].set_index('turn') # get non-convenience human records
mpvt.loc[gt.index, 'true rt'] = gt['rt'] # set 'true rt' for verification
mpvt = mpvt.fillna(value=0) # nan values mean 0 duration
# print('Mouse dif from true rt:', np.abs(mpvt['rt'] - mpvt['true rt']).sum())
for c in ['bp', 'wp', 'zet']:
mpvt.loc[gt.index, c] = gt[c] # set other game info fields on hist records
for c in range(36):
if c not in mpvt.columns: # set all nonvisited trials to 0 dur
mpvt[c] = 0
return m, mpvt
def eye_hist(e, g):
"""
Modifies eyetracking data to produce histograms per trial
note: eye hist requires including ready markers to distinguish correctly, due to latency etc
(mousetracking does not record until after ready stops)
"""
print('epiv fns')
# Get identifier for each turn
g['turn'] = 100*g['gi'] + g['mi']
# Filter for valid game status records
turn_filter = g['status'].isin(['ready', 'playing', 'draw', 'win'])
gp = g.loc[turn_filter]
# Set turn start and turn end timestamps
gp['turnstart'] = gp.index - gp['rt']
gp['turnend'] = gp.index
# Initialize fields in eyetracking dataframe
e['turnstart'] = np.nan
e['turnend'] = np.nan
e['ts'] = e.index
e['tile'] = np.nan
e['dur'] = np.nan
# Drop duplicate timestamps from e
e = e.drop_duplicates(subset='ts')
# Insert rows from game data at timestamps and sort by time
e = e.append(gp[['turn', 'is human', 'turnstart', 'turnend']])
e = e.sort_index()
# Fill appropriate records backwards in time (game records submitted at END of turn)
fillthese = ['turn', 'turnstart', 'turnend', 'is human']
e[fillthese] = e[fillthese].fillna(method='bfill')
# Convert translated coordinates to tile indices
evalid = ~pd.isnull(e['transx'])
e.loc[evalid, 'tile'] = e.loc[evalid, 'transx'].astype(np.int64) + 9*e.loc[evalid, 'transy'].astype(np.int64)
e['tile'] = e['tile'].fillna(method='ffill')
tilefilter = pd.notnull(e['tile'])
e.loc[tilefilter, 'tile'] = e.loc[tilefilter, 'tile'].astype(np.int64)
# Calculate observation durations
e['dur'] = e.index
e['dur'] = e['dur'].diff(periods=1)
# Filter out observations that don't happen during turn
eyebounds = (e.index >= e['turnstart']) & (e.index <= e['turnend'])
e = e.loc[eyebounds]
# Get total duration per turn for human trials
ehumanfilter = e['is human'] == 1
eendfilter = pd.notnull(e['end'])
good_turns = e.loc[eendfilter, 'turn']
epvt = e.loc[ehumanfilter & eendfilter].pivot_table(
index='turn', columns='tile', values='dur', aggfunc=np.sum
)
epvt.columns = epvt.columns.astype(np.int64)
# Calculate response time
epvt['rt'] = epvt.sum(axis=1)
# Combine off-board locations into single location
offboard = [
i for i in epvt.columns
if (i not in list(range(36)) and type(i) == int)
]
epvt[999] = epvt[offboard].sum(axis=1)
# Drop convenience records
turn_filter = g.status.isin(['playing', 'draw', 'win'])
g_human_filter = g['is human'] == 1
g_good_turn_filter = g['turn'].isin(good_turns)
# Get a view of game data indexed by turn (same as epvt)
gt = g.loc[turn_filter & g_human_filter & g_good_turn_filter].set_index('turn')
epvt.loc[gt.index, 'true rt'] = gt['rt']
# Set game data values on epvt
for c in ['bp', 'wp', 'zet']:
epvt.loc[gt.index, c] = gt[c]
# Get rid of entries where gi == mi == 0 and fill in zeros at all missing values
# ? - don't remember why I did this, but probably was due to the way timestamps recorded when AI moved first?
epvt = epvt.loc[(epvt.index % 100) != 0].fillna(value=0)
# Make sure all columns have values
for c in range(36):
if c not in epvt.columns:
epvt[c] = 0
# print(np.abs(epvt['rt'] - epvt['true rt']).sum())
return e, epvt
# Get a grid for norm binning
grid = np.dstack(np.mgrid[0:4, 0:9])
def gausshist(row, cov=1):
"""
Compute a multivariate normal distribution and filter location
For use with np.apply_along_axis()
"""
p = multivariate_normal.pdf(grid, mean=row[:2], cov=cov)
p *= row[2]
return p.reshape(36)
def filtermove(df, cov=1):
"""Apply Gaussian filter to all moves"""
df_ = df.loc[pd.notnull(df['end'])] # & (df['tile'] >= 0) & (df['tile'] < 36)]
vals = df_.loc[:, ['transy', 'transx', 'dur']].values
gh = lambda x: gausshist(x, cov=cov)
h = np.apply_along_axis(gh, axis=1, arr=vals)
h = h.sum(axis=0)
h = h / h.sum()
return h
def filterhalf(row, which='first'):
"""
Retrieves only half of an observation
For use with pd.DataFrame.apply()
"""
halfway = row['turnstart'] + (row['turnend'] - row['turnstart']) / 2
if which == 'first':
return row.name <= halfway
else:
return row.name > halfway
def make_filtered_hist(groupeddf, g, filterfunc=filtermove):
"""
Filter an entire histogram
For use with pd.DataFrame.groupby()
"""
filtered = groupeddf.apply(filterfunc)
filtered = pd.DataFrame(index=filtered.index, data=np.stack(filtered.values))
for c in ['bp', 'wp', 'zet']:
filtered[c] = g.loc[filtered.index, c]
return filtered
def main():
# Get a list of dataframes for each kind of data
e_list = [load_eyetracker_file(e) for e in eyet_files]
m_list = [load_mouse_file(m) for m in mous_files]
g_list = [load_game_file(g) for g in mous_files]
# create tidy dfs per subject along timestamp index
# t = [make_tidy(e_list[i], m_list[i], g_list[i]) for i in range(len(e_list))]
# Create holding lists for histograms
mpivs = []
epivs = []
fepivs = []
fepivs_wide = []
fepivs_narrow = []
fepivs_half0 = []
fepivs_half1 = []
# For each subject, generate histogrmams
for i in range(len(m_list)):
g = g_list[i]
# MOUSE HISTOGRAMS
m_list[i], mpvt = mouse_hist(m_list[i], g)
mpivs.append(mpvt)
# EYE HISTOGRAMS
e_list[i], epvt = eye_hist(e_list[i], g)
epivs.append(epvt)
print("epiv len", len(epvt))
# FILTERED EYE HISTOGRAMS
e = e_list[i]
eendfilter = pd.notnull(e['end'])
ehumanfilter = e['is human'] == 1
eclean = e.loc[eendfilter & ehumanfilter]
half0 = eclean.apply(filterhalf, axis=1)
eclean_half0 = eclean.loc[half0]
eclean_half1 = eclean.loc[~half0]
# good_turns = e.loc[eendfilter, 'turn']
# turn_filter = g.status.isin(['playing', 'draw', 'win'])
# g_human_filter = g['is human'] == 1
# g_good_turn_filter = g['turn'].isin(good_turns)
# gt = g.loc[turn_filter & g_human_filter & g_good_turn_filter].set_index('turn')
grouped = eclean.groupby('turn')
widefunc = lambda x: filtermove(x, cov=[[1.5, 0], [0, 1.5]])
narrowfunc = lambda x: filtermove(x, cov=[[.66, 0], [0, .66]])
filtered = make_filtered_hist(grouped, g)
filtered_wide = make_filtered_hist(grouped, g, filterfunc=widefunc)
filtered_narrow = make_filtered_hist(grouped, g, filterfunc=narrowfunc)
filtered_half0 = make_filtered_hist(eclean_half0.groupby('turn'), g)
filtered_half1 = make_filtered_hist(eclean_half1.groupby('turn'), g)
print('filtered len', len(filtered))
fepivs.append(filtered)
fepivs_wide.append(filtered_wide)
fepivs_narrow.append(filtered_narrow)
fepivs_half0.append(filtered_half0)
fepivs_half1.append(filtered_half1)
export_cols = list(range(36)) + [999, 'bp', 'wp', 'zet']
fil_cols = list(range(36)) + ['bp', 'wp', 'zet']
# EXPORT FILES FOR EACH SUBJECT, HIST TYPE
for i, mp in enumerate(mpivs):
ep = epivs[i]
fep = fepivs[i]
fepw = fepivs_wide[i]
fepn = fepivs_narrow[i]
feph0 = fepivs_half0[i]
feph1 = fepivs_half1[i]
mp[export_cols].to_csv(os.path.join(output_dir, 'mouse {}.csv'.format(i)))
ep[export_cols].to_csv(os.path.join(output_dir, 'eye {}.csv'.format(i)))
fep[fil_cols].to_csv(os.path.join(output_dir, 'filtered eye {}.csv'.format(i)))
fepw[fil_cols].to_csv(os.path.join(output_dir, 'filtered eye wide {}.csv'.format(i)))
fepn[fil_cols].to_csv(os.path.join(output_dir, 'filtered eye narrow {}.csv'.format(i)))
feph0[fil_cols].to_csv(os.path.join(output_dir, 'filtered eye half0 {}.csv').format(i))
feph1[fil_cols].to_csv(os.path.join(output_dir, 'filtered eye half1 {}.csv').format(i))
return None
if __name__ == '__main__':
main()
| 41.630037
| 118
| 0.578003
|
5f989bf661f0036ae5041ebc6f5f97c40af1c7e5
| 5,105
|
py
|
Python
|
tests/unit/test_oneview_appliance_device_snmp_v3_users.py
|
SHANDCRUZ/test-codecov
|
f7aca851423641570fa86d3f8233235d14c71756
|
[
"Apache-2.0"
] | 10
|
2020-12-14T19:06:14.000Z
|
2022-03-24T15:36:49.000Z
|
tests/unit/test_oneview_appliance_device_snmp_v3_users.py
|
SHANDCRUZ/test-codecov
|
f7aca851423641570fa86d3f8233235d14c71756
|
[
"Apache-2.0"
] | 40
|
2020-10-12T11:45:25.000Z
|
2022-03-29T14:43:47.000Z
|
tests/unit/test_oneview_appliance_device_snmp_v3_users.py
|
SHANDCRUZ/test-codecov
|
f7aca851423641570fa86d3f8233235d14c71756
|
[
"Apache-2.0"
] | 16
|
2020-10-05T14:48:56.000Z
|
2022-03-11T12:52:38.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###
# Copyright (2021) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import mock
import pytest
from ansible_collections.hpe.oneview.tests.unit.utils.hpe_test_utils import OneViewBaseTest
from ansible_collections.hpe.oneview.tests.unit.utils.oneview_module_loader import ApplianceDeviceSnmpV3UsersModule, OneViewModuleException
ERROR_MSG = 'Fake message error'
DEFAULT_PARAMS = dict(
userName='testUser666',
securityLevel='Authentication',
authenticationProtocol='SHA256'
)
PARAMS_FOR_PRESENT = dict(
config='config.json',
state='present',
name=DEFAULT_PARAMS['userName'],
data=DEFAULT_PARAMS,
)
PARAMS_WITH_CHANGES = dict(
config='config.json',
state='present',
name=DEFAULT_PARAMS['userName'],
data=dict(userName=DEFAULT_PARAMS['userName'],
authenticationProtocol='SHA512'),
)
PARAMS_FOR_SET_PASSWORD = dict(
config='config.json',
state='present',
name=DEFAULT_PARAMS['userName'],
data=dict(userName=DEFAULT_PARAMS['userName'],
authenticationPassphrase='NewPass1234')
)
PARAMS_FOR_ABSENT = dict(
config='config.json',
state='absent',
name=DEFAULT_PARAMS['userName'])
@pytest.mark.resource(TestApplianceDeviceSnmpV3UsersModule='appliance_device_snmp_v3_users')
class TestApplianceDeviceSnmpV3UsersModule(OneViewBaseTest):
def test_should_raise_exception_when_api_is_lower_than_600(self):
self.mock_ov_client.api_version = 400
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
ApplianceDeviceSnmpV3UsersModule().run()
self.mock_ansible_module.fail_json.assert_called_once_with(
exception=mock.ANY,
msg=ApplianceDeviceSnmpV3UsersModule.MSG_API_VERSION_ERROR
)
@pytest.fixture(autouse=True)
def specific_set_up(self, setUp):
self.mock_ov_client.api_version = 600
def test_should_create_new_snmp_v3_user(self):
self.resource.get_by_name.return_value = None
self.resource.data = DEFAULT_PARAMS
self.resource.create.return_value = self.resource
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
ApplianceDeviceSnmpV3UsersModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=ApplianceDeviceSnmpV3UsersModule.MSG_CREATED,
ansible_facts=dict(appliance_device_snmp_v3_users=DEFAULT_PARAMS)
)
def test_should_not_update_when_data_is_equals(self):
self.resource.data = DEFAULT_PARAMS
self.resource.get_by_name.return_value = self.resource
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
ApplianceDeviceSnmpV3UsersModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=ApplianceDeviceSnmpV3UsersModule.MSG_ALREADY_PRESENT,
ansible_facts=dict(appliance_device_snmp_v3_users=DEFAULT_PARAMS)
)
def test_update_when_data_has_modified_attributes(self):
data_merged = DEFAULT_PARAMS.copy()
self.resource.data = DEFAULT_PARAMS
self.resource.get_by_name.return_value = self.resource
self.resource.update.return_value = data_merged
self.mock_ansible_module.params = PARAMS_WITH_CHANGES
ApplianceDeviceSnmpV3UsersModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=ApplianceDeviceSnmpV3UsersModule.MSG_UPDATED,
ansible_facts=dict(appliance_device_snmp_v3_users=data_merged)
)
def test_should_remove_snmp_v3_user(self):
self.resource.get_by.return_value = [DEFAULT_PARAMS]
self.mock_ansible_module.params = PARAMS_FOR_ABSENT
ApplianceDeviceSnmpV3UsersModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=ApplianceDeviceSnmpV3UsersModule.MSG_DELETED
)
def test_should_do_nothing_when_snmp_v3_user_not_exist(self):
self.resource.get_by_name.return_value = []
self.mock_ansible_module.params = PARAMS_FOR_ABSENT
ApplianceDeviceSnmpV3UsersModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=ApplianceDeviceSnmpV3UsersModule.MSG_ALREADY_ABSENT
)
if __name__ == '__main__':
pytest.main([__file__])
| 33.149351
| 139
| 0.736141
|
c5ad2a0a8d271fe47fa77dbc1cee0234beb54ace
| 99
|
py
|
Python
|
locke/patterns/__init__.py
|
ciphertechsolutions/locke
|
848f68d34332c9ced32826216f912ee1db2610d0
|
[
"BSD-3-Clause"
] | null | null | null |
locke/patterns/__init__.py
|
ciphertechsolutions/locke
|
848f68d34332c9ced32826216f912ee1db2610d0
|
[
"BSD-3-Clause"
] | null | null | null |
locke/patterns/__init__.py
|
ciphertechsolutions/locke
|
848f68d34332c9ced32826216f912ee1db2610d0
|
[
"BSD-3-Clause"
] | null | null | null |
from locke.patterns.manager import Manager
from locke.patterns.pattern_plugin import PatternPlugin
| 33
| 55
| 0.878788
|
61aec8ce08e19eea3241fb12d9ad23acf441a14c
| 9,762
|
py
|
Python
|
examples/song_recommendation.py
|
hcook/gmm
|
2f858619c01b02c3e961e988037670e14276abcb
|
[
"BSD-3-Clause"
] | 34
|
2015-04-16T00:27:50.000Z
|
2019-08-20T05:19:22.000Z
|
examples/song_recommendation.py
|
hcook/gmm
|
2f858619c01b02c3e961e988037670e14276abcb
|
[
"BSD-3-Clause"
] | null | null | null |
examples/song_recommendation.py
|
hcook/gmm
|
2f858619c01b02c3e961e988037670e14276abcb
|
[
"BSD-3-Clause"
] | 11
|
2015-09-03T02:05:13.000Z
|
2020-04-27T18:51:22.000Z
|
import unittest
import pylab as pl
import matplotlib as mpl
import itertools
import sys
import math
import timeit
import copy
import time
import struct
import scipy.stats.mstats as stats
import ConfigParser
import os
import getopt
import h5py
import random as rnd
import cPickle as pickle
import operator
from gmm.gmm_specializer import *
def get_song_dict():
fileList = []
rootdir = '/disk1/home_user/egonina/asp/MSD/MillionSongSubset/data/'
for root, subFolders, files in os.walk(rootdir):
for file in files:
fileList.append(os.path.join(root,file))
file_tag_dict = {}
for file in fileList:
print file
f = h5py.File(file, 'r')
mbtags = f['musicbrainz']['artist_mbtags']
list = []
for t in mbtags:
list.append(t)
tags = f['metadata']['artist_terms']
tag_freq = f['metadata']['artist_terms_freq']
tags_dict = {}
for t in range(len(tags)):
tags_dict[tags[t]] = tag_freq[t]
file_id = str(f['analysis']['songs']['track_id'][0])
file_tag_dict[file_id] = {}
file_tag_dict[file_id]['artist_mbtags'] = list
file_tag_dict[file_id]['artist_terms'] = tags_dict
file_tag_dict[file_id]['artist_name'] = str(f['metadata']['songs']['artist_name'][0])
file_tag_dict[file_id]['title'] = str(f['metadata']['songs']['title'][0])
file_tag_dict[file_id]['segments_timbre'] = np.array(f['analysis']['segments_timbre'], dtype=np.float32)
file_tag_dict[file_id]['duration'] = float(f['analysis']['songs']['duration'][0])
file_tag_dict[file_id]['tempo'] = float(f['analysis']['songs']['tempo'][0])
file_tag_dict[file_id]['time_signature'] = float(f['analysis']['songs']['time_signature'][0])
file_tag_dict[file_id]['segments_start'] = np.array(f['analysis']['segments_start'], dtype=np.float32)
f.close()
p = open("/disk1/home_user/egonina/asp/MSD/all_file_dict_dump.pkl", "wb")
pickle.dump(file_tag_dict, p, True)
p.close()
return file_tag_dict
def count_songs_by_tag(tags_file_name, output_file_name, fileDict):
tags_file = open(tags_file_name, 'r')
tag_dict = {}
for tag in tags_file:
tag = tag[:len(tag)-1] #delete end-of-line characater
tag_dict[tag] = 0
#---------- READ FILES -----------
start = time.time()
for file in fileDict.keys():
tags = fileDict[file]
if tag in tags:
tag_dict[tag]+=1
total = time.time() - start
print "songs with keyword [" + tag + "]: "+ str(tag_dict[tag])
print "total time: ", total
tag_out = open(output_file_name, 'w')
for tag in tag_dict.keys():
tag_out.write(tag+"\t"+str(tag_dict[tag])+"\n")
tag_out.close()
if __name__ == '__main__':
total_start_time = time.time()
freq_threshold = 0.8
M = 32
category_tag = "metal"
rnd.seed(42)
print "Reading Files"
#song_dict = get_song_dict()
st = time.time()
# assume the dictionary has been already read in and pickled
p = open("/disk1/home_user/egonina/asp/MSD/all_file_dict_dump.pkl", "rb")
song_dict = pickle.load(p)
p.close()
print "--- File Reading:\t", time.time() - st, " -----"
st = time.time()
# collect songs
songs_with_tag = {}
songs_without_tag = {}
song_with_tag_count = 0
song_without_tag_count = 0
for song in song_dict.keys():
if category_tag in song_dict[song]['artist_terms'].keys(): #the song's tag list contains the tag we're looking for
if song_dict[song]['artist_terms'][category_tag] > freq_threshold:
songs_with_tag[song] = song_dict[song]
song_with_tag_count += 1
else:
songs_without_tag[song] = song_dict[song]
song_without_tag_count += 1
print "--- Collecting songs for the tag time:\t", time.time() - st, " ----- "
print "INFO: songs with tag count:", song_with_tag_count
print "INFO: songs without tag count: ", song_without_tag_count
st = time.time()
# get indices for various sets of songs
all_positive_indices = range(song_with_tag_count-1)
all_negative_indices = range(song_without_tag_count-1)
all_indices = range(len(song_dict.keys()))
#split songs with tag into training/testing sets (70/30)
training_sample_indices = np.array(rnd.sample(all_positive_indices, int(song_with_tag_count*0.7)))
testing_sample_indices = np.delete(all_positive_indices, training_sample_indices)
negative_sample_indices = all_negative_indices
print "INFO: number of training indices:", len(training_sample_indices)
print "INFO: testing indices:", len(testing_sample_indices)
print "INFO: negative testing indices:", len(negative_sample_indices)
# get song keys for the:
# - 70% of total songs for training
# - 30% of total songs for testing
# - (total songs - songs with tag) for negative testing
# - 30% of all song features for UBM model
song_keys = np.array(songs_with_tag.keys())
song_neg_keys = np.array(songs_without_tag.keys())
all_song_keys = np.array(song_dict.keys())
# get the corresponding song keys for each of the sets
training_song_keys = song_keys[training_sample_indices]
testing_song_keys = song_keys[testing_sample_indices]
negative_song_keys = song_neg_keys[negative_sample_indices]
# collect features for positive GMM training
first_song = True
for song in training_song_keys:
feats = songs_with_tag[song]['segments_timbre']
if first_song:
total_features = feats
first_song = False
else:
total_features = np.concatenate((total_features, feats))
print "--- Collecting training features time:\t", time.time() - st, " ----- "
print "INFO: total features: ", total_features.shape
# collect features for UBM training
st = time.time()
p = open("/disk1/home_user/egonina/asp/MSD/ubm_features_all.pkl", "rb")
total_ubm_features = np.array(pickle.load(p))
p.close()
# train the UBM on 30% of the total features from all songs
training_ubm_features = np.array(rnd.sample(total_ubm_features, int(len(total_ubm_features)*0.3)))
print "--- Collecting ubm features time:\t", time.time() - st, " -----"
print "INFO: total ubm features: ", total_ubm_features.shape, " 30%: ", training_ubm_features.shape
# train UBM on features
D = total_ubm_features.shape[1]
ubm = GMM(M,D,cvtype='diag')
train_st = time.time()
ubm.train(training_ubm_features)
train_total = time.time() - train_st
print "--- UBM training time:\t", train_total, " -----"
# train positive GMM on features
D = total_features.shape[1]
gmm = GMM(M, D, means=np.array(ubm.components.means), covars=np.array(ubm.components.covars), weights=np.array(ubm.components.weights), cvtype='diag')
train_st = time.time()
gmm.train(total_features)
train_total = time.time() - train_st
print "--- GMM training time:\t", train_total, " -----"
print "--- Testing Labeled Examples ---"
# testing the labeled test files
test_st = time.time()
labeled_songs = {}
unlabeled_songs = {}
for test_song in testing_song_keys:
test_feats = songs_with_tag[test_song]['segments_timbre']
all_lklds = gmm.score(test_feats)
all_ubm_lklds = ubm.score(test_feats)
avg_lkld = np.average(all_lklds)
avg_ubm_lkld = np.average(all_ubm_lklds)
sum_lkld = np.sum(all_lklds)
labeled_songs[str(songs_with_tag[test_song]['artist_name']+ " - "+songs_with_tag[test_song]['title'])] = (avg_lkld, avg_ubm_lkld, avg_lkld - avg_ubm_lkld)
print "--- Testing Unlabeled Examples ---"
test_st = time.time()
count = 0
# testing the unlabeled test files
for test_song in negative_song_keys:
count+=1
print count
test_feats = songs_without_tag[test_song]['segments_timbre']
all_lklds = gmm.score(test_feats)
all_ubm_lklds = ubm.score(test_feats)
avg_lkld = np.average(all_lklds)
avg_ubm_lkld = np.average(all_ubm_lklds)
sum_lkld = np.sum(all_lklds)
unlabeled_songs[str(songs_without_tag[test_song]['artist_name'] + " - " + songs_without_tag[test_song]['title'])] = (avg_lkld, avg_ubm_lkld, avg_lkld - avg_ubm_lkld)
test_total = time.time() - test_st
print "--- Total testing time:\t", test_total, " -----"
#print out top 20 labeled suggestions and unlabeled recommendations
print "======================================================================"
print "=================== TOP 20 LABELED SAMPLES ==========================="
print "======================================================================"
sorted_lab_samples = sorted(labeled_songs.iteritems(), key=lambda k: k[1][2], reverse=True)
for p in range(20):
print sorted_lab_samples[p]
print "======================================================================"
print "=================== TOP 20 UNLABELED SAMPLES ========================="
print "======================================================================"
sorted_unlab_samples = sorted(unlabeled_songs.iteritems(), key=lambda k: k[1][2], reverse=True)
for p in range(20):
print sorted_unlab_samples[p]
print "-------------- DONE ---------------"
print "--- Total time: ", time.time() - total_start_time, " ---"
print "-----------------------------------"
| 36.561798
| 173
| 0.61555
|
63ddb31cc7dde2ddc532470bd973f4f926f234fa
| 889
|
py
|
Python
|
tests/networking/shared_vpc_gke/test_plan.py
|
sekhon67/cloud-foundation-fabric
|
7e429425fe8a3598a38b490f35a6359ba13a25ff
|
[
"Apache-2.0"
] | null | null | null |
tests/networking/shared_vpc_gke/test_plan.py
|
sekhon67/cloud-foundation-fabric
|
7e429425fe8a3598a38b490f35a6359ba13a25ff
|
[
"Apache-2.0"
] | null | null | null |
tests/networking/shared_vpc_gke/test_plan.py
|
sekhon67/cloud-foundation-fabric
|
7e429425fe8a3598a38b490f35a6359ba13a25ff
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
FIXTURES_DIR = os.path.join(os.path.dirname(__file__), 'fixture')
def test_resources(e2e_plan_runner):
"Test that plan works and the numbers of resources is as expected."
modules, resources = e2e_plan_runner(FIXTURES_DIR)
assert len(modules) == 10
assert len(resources) == 45
| 31.75
| 74
| 0.758155
|
25fb964e620aa8c797f5ee59273c7000ed359165
| 16,134
|
py
|
Python
|
app/R64/_GPIO.py
|
JonathanArrance/speedbot
|
33cadaae157ccfb35688147c4662eaa7383e3f02
|
[
"MIT"
] | null | null | null |
app/R64/_GPIO.py
|
JonathanArrance/speedbot
|
33cadaae157ccfb35688147c4662eaa7383e3f02
|
[
"MIT"
] | null | null | null |
app/R64/_GPIO.py
|
JonathanArrance/speedbot
|
33cadaae157ccfb35688147c4662eaa7383e3f02
|
[
"MIT"
] | 1
|
2020-10-30T21:14:25.000Z
|
2020-10-30T21:14:25.000Z
|
#!/usr/bin/env python
# Allison Creely, 2018, LGPLv3 License
# Rock 64 GPIO Library for Python
# Import modules
import os.path
from multiprocessing import Process, Value
from time import time
from time import sleep
# Define static module variables
var_gpio_root = '/sys/class/gpio'
ROCK = 'ROCK'
BOARD = 'BOARD'
BCM = 'BCM'
HIGH = 1
LOW = 0
OUT = 'out'
IN = 'in'
RISING = 'rising'
FALLING = 'falling'
BOTH = 'both'
PUD_UP = 0
PUD_DOWN = 1
VERSION = '0.6.3'
RPI_INFO = {'P1_REVISION': 3, 'RAM': '1024M', 'REVISION': 'a22082', 'TYPE': 'Pi 3 Model B', 'PROCESSOR': 'BCM2837', 'MANUFACTURER': 'Embest'}
# Define GPIO arrays
ROCK_valid_channels = [27, 32, 33, 34, 35, 36, 37, 38, 60, 64, 65, 67, 68, 69, 76, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 96, 97, 98, 100, 101, 102, 103, 104]
BOARD_to_ROCK = [0, 0, 0, 89, 0, 88, 0, 60, 64, 0, 65, 0, 67, 0, 0, 100, 101, 0, 102, 97, 0, 98, 103, 96, 104, 0, 76, 68, 69, 0, 0, 0, 38, 32, 0, 33, 37, 34, 36, 0, 35, 0, 0, 81, 82, 87, 83, 0, 0, 80, 79, 85, 84, 27, 86, 0, 0, 0, 0, 0, 0, 89, 88]
BCM_to_ROCK = [68, 69, 89, 88, 81, 87, 83, 76, 104, 98, 97, 96, 38, 32, 64, 65, 37, 80, 67, 33, 36, 35, 100, 101, 102, 103, 34, 82]
# Define dynamic module variables
gpio_mode = None
warningmode = 1
# GPIO Functions
def setmode(mode):
if mode in ['ROCK','BOARD','BCM']:
global gpio_mode
gpio_mode = mode
else:
print("An invalid mode ({}) was passed to setmode(). Use one of the following: ROCK, BOARD, BCM".format(mode))
def getmode():
if gpio_mode in ['ROCK','BOARD','BCM']:
return gpio_mode
else:
print("Error: An invalid mode ({}) is currently set".format(gpio_mode))
def get_gpio_number(channel):
if gpio_mode in ['ROCK','BOARD','BCM']:
# Convert to ROCK GPIO
if gpio_mode == BOARD:
channel_new = BOARD_to_ROCK[channel]
if gpio_mode == BCM:
channel_new = BCM_to_ROCK[channel]
if gpio_mode == ROCK:
channel_new = channel
# Check that the GPIO is valid
if channel_new in ROCK_valid_channels:
return channel_new
else:
print("Error: GPIO not supported on {0} {1}".format(gpio_mode, channel))
return None
else:
print("RuntimeError: Please set pin numbering mode using GPIO.setmode(GPIO.ROCK), GPIO.setmode(GPIO.BOARD), or GPIO.setmode(GPIO.BCM)")
return None
def gpio_function(channel):
# Translate the GPIO based on the current gpio_mode
channel_int = get_gpio_number(channel)
if channel_int == None:
return
# Get direction of requested GPIO
try:
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(channel_int) + "/direction"
with open(var_gpio_filepath, 'r') as file:
direction = file.read(1)
except:
return "GPIO.UNKNOWN"
if direction == 'i':
return "GPIO.INPUT"
elif direction == 'o':
return "GPIO.OUTPUT"
else:
return "GPIO.UNKNOWN"
def setwarnings(state=True):
if state in [0,1]:
global warningmode
warningmode = state
else:
print("Error: {} is not a valid warning mode. Use one of the following: True, 1, False, 0".format(state))
def validate_direction(channel, validation_type='both'):
# Translate the GPIO based on the current gpio_mode
channel_int = get_gpio_number(channel)
if channel_int == None:
return
# Get direction of requested GPIO
if validation_type in ['input', 'output', 'both']:
try:
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(channel_int) + "/direction"
with open(var_gpio_filepath, 'r') as file:
direction = file.read(1)
except:
direction = 'none'
# Perform sanity checks
if (validation_type == 'input') and (direction != 'i'):
print("You must setup() the GPIO channel ({0} {1}) as an input first".format(gpio_mode, channel))
return 0
elif (validation_type == 'output') and (direction != 'o'):
print("You must setup() the GPIO channel ({0} {1}) as an output first".format(gpio_mode, channel))
return 0
elif ((validation_type == 'both') and (direction not in ['i', 'o'])) or (direction == 'none'):
print("You must setup() the GPIO channel ({0} {1}) first".format(gpio_mode, channel))
return 0
else:
return 1
else:
print("Error: {} is not a valid direction. use one of the following: input, output, both")
return
def setup(channel, direction, pull_up_down=PUD_DOWN, initial=LOW):
# If channel is an intiger, convert intiger to list
if isinstance(channel, int) == True:
channel = [channel]
# Itterate through channel list
for index in range(len(channel)):
# Translate the GPIO based on the current gpio_mode
channel_int = get_gpio_number(channel[index])
if channel_int == None:
return
# Check if GPIO export already exists
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(channel_int) + "/value"
var_gpio_exists = os.path.exists(var_gpio_filepath)
if var_gpio_exists == 1:
if warningmode == 1:
print("This channel ({0} {1}) is already in use, continuing anyway. Use GPIO.setwarnings(False) to disable warnings.".format(gpio_mode, channel[index]))
# Export GPIO if an export doesn't already exist
else:
try:
var_gpio_filepath = var_gpio_root + "/export"
with open(var_gpio_filepath, 'w') as file:
file.write(str(channel_int))
except:
print("Error: Unable to export GPIO")
# Set GPIO direction (in/out)
try:
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(channel_int) + "/direction"
with open(var_gpio_filepath, 'w') as file:
file.write(str(direction))
except:
print("Error: Unable to set GPIO direction")
return
# If GPIO direction is out, set initial value of the GPIO (high/low)
if direction == 'out':
try:
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(channel_int) + "/value"
with open(var_gpio_filepath, 'w') as file:
# If multiple initial values, itterate through initial values
if isinstance(initial, int) == False:
file.write(str(initial[index]))
else:
file.write(str(initial))
except:
print("Error: Unable to set GPIO initial state")
# If GPIO direction is in, set the state of internal pullup (high/low)
if direction == 'in':
try:
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(channel_int) + "/active_low"
with open(var_gpio_filepath, 'w') as file:
file.write(str(pull_up_down))
except:
print("Error: Unable to set internal pullup resistor state")
def output(channel, value):
# If channel is an intiger, convert intiger to list
if isinstance(channel, int) == True:
channel = [channel]
# Itterate through channel list
for index in range(len(channel)):
# Translate the GPIO based on the current gpio_mode
channel_int = get_gpio_number(channel[index])
# Perform sanity checks
if channel_int == None:
return
if validate_direction(channel[index], 'output') == 0:
return
# Set the value of the GPIO (high/low)
try:
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(channel_int) + "/value"
with open(var_gpio_filepath, 'w') as file:
# If multiple states, itterate through states
if isinstance(value, int) == False:
file.write(str(value[index]))
else:
file.write(str(value))
except:
print("Error: Unable to set GPIO output state")
def input(channel):
# Translate the GPIO based on the current gpio_mode
channel_int = get_gpio_number(channel)
# Perform sanity checks
if channel_int == None:
return
if validate_direction(channel, 'both') == 0:
return
# Get the value of the GPIO
try:
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(channel_int) + "/value"
with open(var_gpio_filepath, 'r') as file:
return int(file.read(1))
except:
print("Error: Unable to get GPIO value")
def wait_for_edge(channel, edge, bouncetime='none', timeout='none'):
# Translate the GPIO based on the current gpio_mode
channel_int = get_gpio_number(channel)
# Perform sanity checks
if channel_int == None:
return
if validate_direction(channel, 'input') == 0:
return
if edge not in [RISING, FALLING, BOTH]:
print("The edge must be set to GPIO.RISING, GPIO.FALLING or GPIO.BOTH")
return
if (bouncetime != 'none') and (bouncetime <= 0):
print("Bouncetime must be greater than 0")
return
if (timeout != 'none') and (timeout <= 0):
print("timeout must be greater than 0")
return
# Set the edge state to trigger on
try:
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(channel_int) + "/edge"
with open(var_gpio_filepath, 'w') as file:
file.write(str(edge))
except:
print("Error: Unable to set GPIO edge state")
return
# Get current state of the input
try:
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(channel_int) + "/value"
with open(var_gpio_filepath, 'r') as file:
original_value = file.read(1)
except:
print("Error: Unable to get GPIO value")
return
# convert times from ms to fractions of a second
if timeout != 'none':
timeout = timeout / 1000.0
if bouncetime != 'none':
bouncetime = bouncetime / 1000.0
# Wait for interrupt (10ms resolution)
while True:
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(channel_int) + "/value"
with open(var_gpio_filepath, 'r') as file:
new_value = file.read(1)
if new_value != original_value:
if bouncetime != 'none':
sleep(bouncetime)
return True
sleep(0.01)
if timeout != 'none':
timeout -= 0.01
if timeout <= 0.0:
return None
def event_detected(channel):
print("Error: GPIO.event_detected() Not implemented")
def add_event_detect(gpio, edge, callback='none', bouncetime='none'):
print("Error: GPIO.add_event_detect() Not implemented")
#p = Process(target=wait_for_edge, args=(gpio, edge), name='eventdetect_process')
#p.start()
def add_event_callback(gpio, callback):
print("Error: GPIO.add_event_callback() Not implemented")
def remove_event_detect(gpio):
print("Error: GPIO.remove_event_detect() Not implemented")
def cleanup(channel='none'):
# If no channel is specified...
if channel == 'none':
# Cleanup all GPIOs
var_gpio_cleared = 0
for gpio_index in range(105):
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(gpio_index) + "/value"
var_gpio_exists = os.path.exists(var_gpio_filepath)
if var_gpio_exists == 1:
try:
var_gpio_filepath = var_gpio_root + "/unexport"
with open(var_gpio_filepath, 'w') as file:
file.write(str(gpio_index))
var_gpio_cleared = 1
except:
print("Error: Unknown failure while performing cleanup")
if (var_gpio_cleared == 0) and (warningmode == 1):
print("No channels have been set up yet - nothing to clean up! Try cleaning up at the end of your program instead!")
# If a channel is specified...
else:
# If channel is an intiger, convert intiger to list
if isinstance(channel, int) == True:
channel = [channel]
# Itterate through channel list
for index in range(len(channel)):
# Translate the GPIO based on the current gpio_mode
channel_int = get_gpio_number(channel[index])
if channel_int == None:
return
# Cleanup specified GPIO
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(channel_int) + "/value"
var_gpio_exists = os.path.exists(var_gpio_filepath)
if var_gpio_exists == 1:
try:
var_gpio_filepath = var_gpio_root + "/unexport"
with open(var_gpio_filepath, 'w') as file:
file.write(str(channel_int))
except:
print("Error: Unknown failure while performing cleanup")
# PWM Class
class PWM:
def __init__(self, channel, frequency):
# Translate the GPIO based on the current gpio_mode
channel_int = get_gpio_number(channel)
# Perform sanity checks
if channel_int == None:
return
if validate_direction(channel, 'output') == 0:
return
if frequency <= 0.0:
print("frequency must be greater than 0.0")
return
self.freq = frequency
self.gpio = channel_int
self.state = 0
return
def start(self, dutycycle, pwm_precision=HIGH):
if (dutycycle < 0.0) or (dutycycle > 100.0):
print("dutycycle must have a value from 0.0 to 100.0")
return
self.precision = pwm_precision
self.dutycycle = dutycycle
self.pwm_calc()
self.p = Process(target=self.pwm_process, args=(self.gpio, self.sleep_high, self.sleep_low, self.precision), name='pwm_process')
self.p.start()
self.state = 1
def stop(self):
self.p.terminate()
self.p.join()
self.state = 0
@staticmethod
def pwm_busywait(wait_time):
current_time = time()
while (time() < current_time+wait_time):
pass
def pwm_calc(self):
self.sleep_low = (1.0 / self.freq) * ((100 - self.dutycycle) / 100.0)
self.sleep_high = (1.0 / self.freq) * ((100 - (100 - self.dutycycle)) / 100.0)
@staticmethod
def pwm_process(channel, sleep_high, sleep_low, precision=HIGH):
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(channel) + "/value"
# Note: Low precision mode greatly reduces CPU usage, but accuracy will depend upon your kernel.
# p.start(dutycycle, pwm_precision=GPIO.LOW)
try:
if precision == HIGH:
while True:
with open(var_gpio_filepath, 'w') as file:
file.write('1')
PWM.pwm_busywait(sleep_high)
with open(var_gpio_filepath, 'w') as file:
file.write('0')
PWM.pwm_busywait(sleep_low)
else:
while True:
with open(var_gpio_filepath, 'w') as file:
file.write('1')
sleep(sleep_high)
with open(var_gpio_filepath, 'w') as file:
file.write('0')
sleep(sleep_low)
except:
try:
with open(var_gpio_filepath, 'w') as file:
file.write('0')
except:
pass
print("Warning: PWM process ended prematurely")
def ChangeFrequency(self, frequency):
self.freq = frequency
if self.state == 1:
self.stop()
self.start(self.dutycycle)
def ChangeDutyCycle(self, dutycycle):
self.dutycycle = dutycycle
if self.state == 1:
self.stop()
self.start(self.dutycycle)
| 39.160194
| 246
| 0.583364
|
991cec01a8b7a19069b00806947cc7fb2d59297a
| 34,507
|
py
|
Python
|
sensorml2iso/sensorml2iso.py
|
emiliom/sensorml2iso
|
1759aad507b18e56ac9f1030e92c3b001f61d924
|
[
"MIT"
] | null | null | null |
sensorml2iso/sensorml2iso.py
|
emiliom/sensorml2iso
|
1759aad507b18e56ac9f1030e92c3b001f61d924
|
[
"MIT"
] | null | null | null |
sensorml2iso/sensorml2iso.py
|
emiliom/sensorml2iso
|
1759aad507b18e56ac9f1030e92c3b001f61d924
|
[
"MIT"
] | null | null | null |
import os
import errno
import io
import sys
from datetime import datetime, timedelta
from dateutil import parser
import pytz
from six import iteritems
try:
from urllib.parse import unquote, unquote_plus, urlencode, urlparse # Python 3
except ImportError:
from urllib import unquote, unquote_plus, urlencode # Python 2
from urlparse import urlparse
from collections import OrderedDict
from lxml import etree
from requests.exceptions import ConnectionError, ReadTimeout
# import numpy as np
import pandas as pd
# from shapely.geometry import Point
# import geopandas as gpd
from owslib.sos import SensorObservationService
from owslib.swe.sensor.sml import SensorML, Contact, Documentation
from owslib.util import testXMLValue, testXMLAttribute, nspath_eval, ServiceException
from owslib.namespaces import Namespaces
from pyoos.collectors.ioos.swe_sos import IoosSweSos
from pyoos.parsers.ioos.describe_sensor import IoosDescribeSensor
from pyoos.parsers.ioos.one.describe_sensor import ont
from jinja2 import Environment, PackageLoader
class Sensorml2Iso:
"""
Attributes
----------
service : str
An IOOS DMAC-compliant SOS service to parse for active sensors to generate metadata for.
active_station_days : int
Number of days before present to designate stations as 'active' for inclusion/exclusion purposes.
stations : str
List of station URNs to filter by for processing
getobs_req_hours : int
Number of hours from last valid station observation time to use in GetObservation request example URLs
response_formats : str
List of responseFormat values to include in GetObservation download links
sos_type : str
Name of SOS implementation type [ioos|ndbc|coops]
output_dir : str
Name of an output directory (relative) to output ISO 19115-2 XML metadata to
more : str
More class attributes...
"""
RESPONSE_FORMAT_TYPE_MAP = {
'application/json': 'application/json',
'application/zip; subtype=x-netcdf': 'application/x-netcdf',
'text/xml; subtype="om/1.0.0"': 'text/xml',
'text/xml; subtype="om/1.0.0/profiles/ioos_sos/1.0"': 'text/xml',
'text/xml;subtype="om/1.0.0/profiles/ioos_sos/1.0"': 'text/xml',
'text/xml;schema="ioos/0.6.1"': 'text/xml',
'application/ioos+xml;version=0.6.1': 'text/xml'
}
RESPONSE_FORMAT_NAME_MAP = {
'application/json': 'JSON',
'application/zip; subtype=x-netcdf': 'NetCDF',
'text/xml; subtype="om/1.0.0"': 'XML (O&M 1.0)',
'text/xml; subtype="om/1.0.0/profiles/ioos_sos/1.0"': 'XML (IOOS SOS v1.0 Profile)',
'text/xml;subtype="om/1.0.0/profiles/ioos_sos/1.0"': 'XML (IOOS SOS v1.0 Profile)',
'text/csv': 'CSV',
'text/tab-separated-values': 'TSV (tab-separated-values)',
'text/xml;schema="ioos/0.6.1"': 'XML (IOOS DIF SOS v0.6.1)',
'application/ioos+xml;version=0.6.1': 'XML (IOOS DIF SOS v0.6.1)'
}
def __init__(self, service=None, active_station_days=None, stations=None, getobs_req_hours=None, response_formats=None, sos_type=None, output_dir=None, verbose=False):
"""
"""
self.service = service
self.active_station_days = active_station_days
self.stations = stations
self.getobs_req_hours = getobs_req_hours
self.response_formats = response_formats
self.sos_type = sos_type
self.verbose = verbose
self.service_url = urlparse(self.service)
self.server_name = self.service_url.netloc
self.log = io.open('sensorml2iso.log', mode='wt', encoding='utf-8')
if output_dir is not None:
self.output_directory = output_dir
else:
self.output_directory = self.service_url.netloc
self.output_directory = self.output_directory.replace(":", "_")
if self.verbose:
self.print_debug_info()
try:
# self.csv = io.open('sensorml2iso.csv', mode='wt', encoding='utf-8')
self.csv = open('sensorml2iso.csv', mode='wt')
except OSError:
pass
if self.stations is not None:
self.log.write(u"Station URNs to filter by:\n")
print("Station URNs to filter by:")
for station in self.stations:
self.log.write(u"URN: {station}\n".format(station=station))
print("URN: {station}".format(station=station))
if os.path.exists(self.output_directory):
if not os.path.isdir(self.output_directory):
self.log.write(u"\nError: the configured output directory: {output_dir} exists, but is not a directory".format(output_dir=os.path.abspath(self.output_directory)))
sys.exit("Error: the configured output directory: {output_dir} exists, but is not a directory".format(output_dir=os.path.abspath(self.output_directory)))
else:
self.create_output_dir()
def run(self):
"""
"""
self.namespaces = self.get_namespaces()
# obtain the stations DataFrame:
stations_df = self.get_stations_df(self.service, self.stations)
if stations_df is None:
self.log.write(u"\nNo valid SensorML documents obtained from SOS serivce. Verify service is compliant with the SOS profile [URL: {url}]".format(url=self.service))
sys.exit("No valed SensorML documents obtained from SOS serivce. Verify service is compliant with the SOS profile [URL: {url}]".format(url=self.service))
# Assign EPSG:4326 CRS, retrieved from epsg.io
# The OGC WKT crs string is available directly at http://epsg.io/4326.wkt
# or http://spatialreference.org/ref/epsg/4326/ogcwkt/
# crs = '''GEOGCS["WGS 84",
# DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],
# AUTHORITY["EPSG","6326"]],
# PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],
# UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],
# AUTHORITY["EPSG","4326"]]'
# '''
# geometry = [Point(xy) for xy in zip(stations_df.lon, stations_df.lat)]
# self.stations_gdf = gpd.GeoDataFrame(stations_df, geometry=geometry, crs=crs)
# determine active/inactive stations (--active_station_days parameter if provided) and filter stations_df accordingly:
if self.active_station_days is not None:
station_active_date = datetime.now() - timedelta(days=self.active_station_days)
active_cnt = len(stations_df[stations_df.ending > station_active_date.isoformat()])
total_cnt = len(stations_df)
filtered_stations_df = stations_df.loc[stations_df.ending > station_active_date.isoformat()]
if self.verbose:
# print("Date for determining active/inactive stations in SOS service: {active_date}".format(active_date=active_date.strftime("%Y-%m-%d")))
print("Date for determining active/inactive stations in SOS service: {active_date:%Y-%m-%d}".format(active_date=station_active_date))
print("'Active' stations: %d / Total stations: %d" % (active_cnt, total_cnt))
print("DataFrame sizes: Original(stations_df): {len_stations_df:2.0f}, Filtered: {len_filtered_stations_df:2.0f}".format(len_stations_df=len(stations_df), len_filtered_stations_df=len(filtered_stations_df)))
self.log.write(u"\nDate for determining active/inactive stations in SOS service: {active_date:%Y-%m-%d}".format(active_date=station_active_date))
self.log.write(u"\n'Active' stations: %d / Total stations: %d" % (active_cnt, total_cnt))
self.log.write(u"\nDataFrame sizes: Original(stations_df): {len_stations_df:2.0f}, Filtered: {len_filtered_stations_df:2.0f}".format(len_stations_df=len(stations_df), len_filtered_stations_df=len(filtered_stations_df)))
if self.verbose:
# self.csv.write(unicode(stations_df[stations_df.ending > station_active_date.isoformat()].to_csv(encoding='utf-8')))
self.csv.write(stations_df[stations_df.ending > station_active_date.isoformat()].to_csv(encoding='utf-8'))
self.generate_iso(filtered_stations_df)
else:
self.generate_iso(stations_df)
return
# These functions are all from OWSLib, with minor adaptations
def get_namespaces(self):
"""
"""
n = Namespaces()
namespaces = n.get_namespaces(["sml", "gml", "xlink", "swe"])
namespaces["ism"] = "urn:us:gov:ic:ism:v2"
return namespaces
def nsp(self, path):
"""
"""
return nspath_eval(path, self.namespaces)
def get_stations_df(self, sos_url, station_urns_sel=None):
""" Returns a Pandas Dataframe
"""
# oFrmts: IOOS SOS OutputFormat strings (first is compliant to the IOOS SOS spec, second is to accommodate NDBC). More info here:
# http://ioos.github.io/sos-guidelines/doc/wsdd/sos_wsdd_github_notoc/#describesensor-request:638e0b263020c13a76a55332bd966dbe
oFrmts = ['text/xml; subtype="sensorML/1.0.1/profiles/ioos_sos/1.0"', 'text/xml;subtype="sensorML/1.0.1"']
params = {'service': 'SOS', 'request': 'GetCapabilities', 'acceptVersions': '1.0.0'}
sos_url_params = sos_url + '?' + urlencode(params)
# sos_url_params_quoted = quote(sos_url_params,"/=:")
# sos_url_params_unquoted = unquote(sos_url_params)
try:
sosgc = SensorObservationService(sos_url_params)
except (ConnectionError, ReadTimeout) as e:
self.log.write(u"\nError: unable to connect to SOS service: {url} due to HTTP connection error.".format(url=sos_url_params))
self.log.write(u"\nHTTP connection error: {err}.".format(err=str(e)))
sys.exit("\nError: unable to connect to SOS service: {url}. \nUnderlying HTTP connection error: {err}".format(url=sos_url_params, err=str(e)))
# vars to store returns from sos_collector.metadata_plus_exceptions function:
sml_recs = {}
sml_errors = {}
describe_sensor_url = {}
# leverage Pyoos Collector to query for all available stations and obtain SensorML (if station subset not passed in --stations param)
if station_urns_sel is not None:
station_urns = station_urns_sel
else:
sos_collector = IoosSweSos(sos_url)
station_urns = [urn.name for urn in sos_collector.server.offerings
if 'network' not in urn.name.split(':')]
sos_collector.features = station_urns
# write out stations in SOS that will be handled:
if self.verbose:
self.log.write(u"\nStations to process for SOS: {sos}".format(sos=sos_url_params))
print("Stations to process for SOS: {sos}".format(sos=sos_url_params))
for feature in sos_collector.features:
self.log.write(u"\n - {feature}".format(feature=feature))
print(" - {feature}".format(feature=feature))
# iterate over possible oFrmts expected of the various SOS services (IOOS SOS 1.0, NDBC):
# for fmt in reversed(oFrmts):
for fmt in oFrmts:
try:
sml_recs, sml_errors = sos_collector.metadata_plus_exceptions(output_format=fmt, timeout=200)
# if no valid SensorML docs returned, try next oFrmt:
if not sml_recs:
continue
else:
# assign correct DescribeSensor url (use sos_collector.feature rather than sml_recs.keys() to
# create DescribeSensor URLs for failures to record in logs):
for station in sos_collector.features:
describe_sensor_url[station] = self.generate_describe_sensor_url(sosgc, procedure=station, oFrmt=fmt)
# report on errors returned from metadata_plus_exceptions:
if sml_errors:
if self.verbose:
for station, msg in iteritems(sml_errors):
self.log.write(u"\nSOS DescribeSensor error returned for: {station}, skipping. Error msg: {msg}".format(station=station, msg=msg))
print("SOS DescribeSensor error returned for: {station}, skipping. Error msg: {msg}".format(station=station, msg=msg))
else:
self.log.write(u"\nSuccess, no errors returned from DescribeSensor requests in service: {sos}".format(sos=sos_url_params))
print("Success, no errors returned from DescribeSensor requests in service: {sos}".format(sos=sos_url_params))
break
# ServiceException shouldn't be thrown by metadata_plus_exceptions function, but handle regardless by attempting next oFrmt:
except ServiceException as e:
continue
station_recs = []
failures = []
# generate Pandas DataFrame by populating 'station_recs' list by parsing SensorML strings:
for station_idx, station_urn in enumerate(station_urns):
if station_urns_sel is not None:
# iterate oFrmts items for describe_sensor request (first is IOOS SOS spec-compliant, second is for NDBC SOS):
for fmt in oFrmts:
try:
describe_sensor_url[station_urn] = self.generate_describe_sensor_url(sosgc, procedure=station_urn, oFrmt=fmt)
sml_str = sosgc.describe_sensor(procedure=station_urn, outputFormat=fmt, timeout=200)
break
except ServiceException as e:
sml_errors[station_urn] = str(e)
continue
sml = SensorML(sml_str)
else:
# process valid SensorML responses, quietly pass on invalid stations (add to failures list for verbose reporting):
try:
sml = sml_recs[station_urn]
except KeyError:
self.log.write(u"\n\nStation: {station} failed (no SensorML in sml_recs dict). URL: {ds}".format(station=station_urn, ds=describe_sensor_url[station_urn].replace("&", "&")))
print("Station: {station} failed (no SensorML in sml_recs dict). URL: {ds}".format(station=station_urn, ds=describe_sensor_url[station_urn].replace("&", "&")))
failures.append(station_urn)
continue
try:
ds = IoosDescribeSensor(sml._root)
except AttributeError:
self.log.write(u"\nInvalid SensorML passed to IoosDescribeSensor. Check DescribeSensor request for : {station}, URL: ".format(station=station, ds=describe_sensor_url[station_urn].replace("&", "&")))
print("Invalid SensorML passed to IoosDescribeSensor. Check DescribeSensor request for : {station}, URL: ".format(station=station, ds=describe_sensor_url[station_urn].replace("&", "&")))
station = OrderedDict()
# debug:
if self.verbose:
self.log.write(u"\n\nProcessing station: {station}".format(station=station_urn))
print("Processing station: {station}".format(station=station_urn))
self.log.write("\n" + etree.tostring(sml._root).decode('utf-8'))
# assign 'pos' to GML point location (accommodate 'gml:coordinates' as used by NDBC if gml:Point not found):
try:
pos = testXMLValue(ds.system.location.find(self.nsp('gml:Point/gml:pos'))) if testXMLValue(ds.system.location.find(self.nsp('gml:Point/gml:pos'))) is not None else testXMLValue(ds.system.location.find(self.nsp('gml:Point/gml:coordinates')))
station['lon'] = float(pos.split()[1])
station['lat'] = float(pos.split()[0])
except AttributeError as e:
station['lon'] = None
station['lat'] = None
system_el = sml._root.findall(self.nsp('sml:member'))[0].find(self.nsp('sml:System'))
# Parse the DocumentList into a dict storing documents by index value 'name' (may cause index duplication
# errors but there is not enough information in SensorML for alternatives)
# Assume that member corresponds to xlink:arcrole="urn:ogc:def:role:webPage"
documents = system_el.findall(self.nsp('sml:documentation/sml:DocumentList/sml:member'))
documents_dct = {}
for d in documents:
document = Documentation(d)
name = testXMLAttribute(d, "name")
# url = document.documents[0].url
documents_dct[name] = document
# obtain list of contacts (accommodate 'sml:contact' element repetition used by NDBC insead of ContactList):
contacts = system_el.findall(self.nsp('sml:contact/sml:ContactList/sml:member')) if system_el.findall(self.nsp('sml:contact/sml:ContactList/sml:member')) else system_el.findall(self.nsp('sml:contact'))
contacts_dct = {}
for c in contacts:
contact = Contact(c)
role = contact.role.split('/')[-1]
contacts_dct[role] = contact
# verify a 'publisher' Contact exists (template expects one):
if "publisher" not in contacts_dct.keys():
self.log.write(u"\n\nStation: {station} skipped. No \'http://mmisw.org/ont/ioos/definition/publisher\' Contact role defined in SensorML as required. Roles defined: [{roles}]".format(station=station_urn, roles=", ".join(contacts_dct.keys())))
print("Station: {station} skipped. No \'http://mmisw.org/ont/ioos/definition/publisher\' Contact role defined in SensorML as required. Roles defined: [{roles}]".format(station=station_urn, roles=", ".join(contacts_dct.keys())))
failures.append(station_urn)
continue
sweQuants = system_el.findall(self.nsp('sml:outputs/sml:OutputList/sml:output/swe:Quantity'))
quant_lst = [sweQuant.attrib['definition'] for sweQuant in sweQuants]
parameter_lst = [sweQuant.split('/')[-1] for sweQuant in quant_lst]
# attempt to read beginPosition, if available, otherwise use current date bc ISO requires date value in output location
# in template:
beginPosition = testXMLValue(system_el.find(self.nsp('sml:validTime/gml:TimePeriod/gml:beginPosition')))
try:
begin_service_date = parser.parse(beginPosition)
except (AttributeError, TypeError) as e:
begin_service_date = datetime.now(pytz.utc)
station['station_urn'] = station_urn
station['sos_url'] = sos_url_params
station['describesensor_url'] = describe_sensor_url[station_urn]
station['shortName'] = ds.shortName
station['longName'] = ds.longName
station['wmoID'] = ds.get_ioos_def('wmoID', 'identifier', ont)
station['serverName'] = self.server_name
# Some capabilities-level metadata:
station['title'] = sosgc.identification.title
station['abstract'] = sosgc.identification.abstract
station['keywords'] = sosgc.identification.keywords
station['begin_service_date'] = begin_service_date
# Beware that a station can have >1 classifier of the same type
# This code does not accommodate that possibility
station['platformType'] = ds.platformType
station['parentNetwork'] = ds.get_ioos_def('parentNetwork', 'classifier', ont)
station['sponsor'] = ds.get_ioos_def('sponsor', 'classifier', ont)
# store some nested dictionaries in 'station' for appopriate SensorML sources:
station['contacts_dct'] = contacts_dct
station['documents_dct'] = documents_dct
station['starting'] = ds.starting
station['ending'] = ds.ending
# station['starting_isostr'] = datetime.isoformat(ds.starting)
# station['ending_isostr'] = datetime.isoformat(ds.ending)
station['parameter_uris'] = ','.join(quant_lst)
station['parameters'] = ','.join(parameter_lst)
station['variables'] = [var.split('/')[-1] for var in ds.variables]
if self.verbose:
for var in ds.variables:
self.log.write(u"\nvariable: {var}".format(var=var))
print("variable: {var}".format(var=var))
# print(sosgc.contents)
# for id, offering in sosgc.contents.iteritems():
# print("sosgc.contents: {item}".format(item=id))
# parse 'responseFormat' vals from SensorML:
# response_formats = sosgc.contents[station_urn].response_formats
response_formats = []
for id, sosgc.content in sosgc.contents.items():
if sosgc.content.name == station_urn:
response_formats = sosgc.content.response_formats
# response_formats = [ sosgc.content.response_formats for id, sosgc.content in sosgc.contents.items() if sosgc.content.name == station_urn ]
# match responseFormats from SensorML (response_formats) against those passed in --response_formats parameter to
# populate 'download_formats' list, that is then used to generate GetObservation requests for the template:
# (default --response_formats values are: 'application/json,application/zip; subtype=x-netcdf' )
download_formats = [response_format for response_format in response_formats if response_format in self.response_formats]
station['response_formats'] = response_formats
station['download_formats'] = download_formats
if self.verbose:
for format in response_formats:
self.log.write(u"\nresponseFormat: {format}".format(format=format))
print("responseFormat: {format}".format(format=format))
for format in download_formats:
self.log.write(u"\ndownloadFormats: {format}".format(format=format))
print("downloadFormats: {format}".format(format=format))
# calculate event_time using self.getobs_req_hours:
if ds.starting is not None and ds.ending is not None:
event_time = "{begin:%Y-%m-%dT%H:%M:%S}/{end:%Y-%m-%dT%H:%M:%S}".format(begin=ds.ending - timedelta(hours=self.getobs_req_hours), end=ds.ending)
if self.verbose:
self.log.write(u"\nUsing starting/ending times from SensorML for eventTime")
print("Using starting/ending times from SensorML for eventTime")
self.log.write(u"\nobservationTimeRange: starting: {start}, ending: {end}".format(start=ds.starting, end=ds.ending))
print("observationTimeRange: starting: {start}, ending: {end}".format(start=ds.starting, end=ds.ending))
else:
now = datetime.now(pytz.utc)
then = now - timedelta(hours=self.getobs_req_hours)
event_time = "{begin:%Y-%m-%dT%H:%M:%S}/{end:%Y-%m-%dT%H:%M:%S}".format(begin=then, end=now)
if self.verbose:
self.log.write(u"\nNo 'observationTimeRange' present in SensorML. Using present time for eventTime: then: {then:%Y-%m-%dT%H:%M:%S%z}, now: {now:%Y-%m-%dT%H:%M:%S%z}".format(then=then, now=now))
print("No 'observationTimeRange' present in SensorML. Using present time for eventTime: then: {then:%Y-%m-%dT%H:%M:%S%z}, now: {now:%Y-%m-%dT%H:%M:%S%z}".format(then=then, now=now))
if self.verbose:
self.log.write(u"\neventTime: {time}".format(time=event_time))
print("eventTime: {time}".format(time=event_time))
# create a dict to store parameters for valid example GetObservation requests for station:
getobs_req_dct = {}
# populate a parameters dictionary for download links for each 'observedProperty' type and secondly for each 'responseFormat' per observedProperty:
getobs_params_base = {'service': 'SOS', 'request': 'GetObservation', 'version': '1.0.0', 'offering': station_urn, 'eventTime': event_time}
for variable in ds.variables:
getobs_params = getobs_params_base.copy()
getobs_params['observedProperty'] = variable
variable = variable.split('/')[-1]
for format in download_formats:
getobs_params['responseFormat'] = format
getobs_request_url_encoded = sos_url + '?' + urlencode(getobs_params)
getobs_request_url = unquote(getobs_request_url_encoded)
getobs_req_dct[variable + '-' + format] = {
'variable': variable,
'url': getobs_request_url,
'format_type': self.RESPONSE_FORMAT_TYPE_MAP.get(format, format),
'format_name': self.RESPONSE_FORMAT_NAME_MAP.get(format, format)
}
if self.verbose:
self.log.write(u"\ngetobs_request_url (var: {variable}): {getobs_request_url}".format(variable=variable.split("/")[-1], getobs_request_url=getobs_request_url))
print("getobs_request_url (var: {variable}): {getobs_request_url}".format(variable=variable.split("/")[-1], getobs_request_url=getobs_request_url))
# ToDo: finish adding the 'getobs_req_dct' to the output template
station['getobs_req_dct'] = getobs_req_dct
station_recs.append(station)
# extra debug for failed stations in verbose mode:
if self.verbose:
self.log.write(u"\n\n\nSOS DescribeSensor request errors recap. Failed requests:")
print("SOS DescribeSensor request errors recap. Failed requests:")
for station_fail, msg in iteritems(sml_errors):
self.log.write(u"\n{station} - {msg}. DescribeSensor URL: {ds}".format(station=station_fail, msg=msg, ds=describe_sensor_url[station_fail].replace("&", "&")))
print("{station} - {msg}. DescribeSensor URL: {ds}".format(station=station_fail, msg=msg, ds=describe_sensor_url[station_fail].replace("&", "&")))
if failures:
self.log.write(u"\nStations in 'failures' list (should match DescribeSensor errors):")
print("Stations in 'failures' list (should match DescribeSensor errors):")
for station_fail in failures:
self.log.write(u"\n{station}".format(station=station_fail))
print("{station}".format(station=station_fail))
if station_recs:
stations_df = pd.DataFrame.from_records(station_recs, columns=station.keys())
stations_df.index = stations_df['station_urn']
return stations_df
else:
return None
def generate_iso(self, df):
"""
"""
# set up the Jinja2 template:
env = Environment(loader=PackageLoader('sensorml2iso', 'templates'), trim_blocks=True, lstrip_blocks=True, autoescape=True)
template = env.get_template('sensorml_iso.xml')
for idx, station in df.iterrows():
ctx = {}
# populate some general elements for the template:
# we can use format filters in the template to format dates...
# ctx['metadataDate'] = "{metadata_date:%Y-%m-%d}".format(metadata_date=datetime.today())
ctx['metadataDate'] = datetime.now()
# debug: get the first station:
# station = df.iloc[0]
ctx['identifier'] = station.station_urn
ctx['contacts_dct'] = station['contacts_dct']
ctx['documents_dct'] = station['documents_dct']
ctx['sos_url'] = station['sos_url']
ctx['describesensor_url'] = station['describesensor_url']
ctx['lon'] = station['lon']
ctx['lat'] = station['lat']
ctx['shortName'] = station.shortName
ctx['longName'] = station.longName
ctx['wmoID'] = station.wmoID
ctx['serverName'] = station.serverName
ctx['title'] = station.title
ctx['abstract'] = station.abstract
ctx['keywords'] = station.keywords
ctx['beginServiceDate'] = station.begin_service_date
ctx['platformType'] = station.platformType
ctx['parentNetwork'] = station.parentNetwork
ctx['sponsor'] = station.sponsor
ctx['starting'] = station.starting
ctx['ending'] = station.ending
ctx['parameter_uris'] = station.parameter_uris
ctx['parameters'] = station.parameter_uris
ctx['variables'] = station.variables
ctx['response_formats'] = station.response_formats
ctx['download_formats'] = station.download_formats
ctx['getobs_req_dct'] = station.getobs_req_dct
output_filename = os.path.join(self.output_directory, "{serverName}-{station}.xml".format(serverName=self.server_name, station=station.station_urn.replace(":", "_")))
try:
iso_xml = template.render(ctx)
output_file = io.open(output_filename, mode='wt', encoding='utf8')
output_file.write(iso_xml)
output_file.close()
if self.verbose:
self.log.write(u"\n\nMetadata for station: {station} written to output file: {out_file}".format(station=station.station_urn, out_file=os.path.abspath(output_filename)))
print("\nMetadata for station: {station} written to output file: {out_file}".format(station=station.station_urn, out_file=os.path.abspath(output_filename)))
except OSError as ex:
if ex.errno == errno.EEXIST:
if self.verbose:
self.log.write(u"\nWarning, output file: {out_file} already exists, and can't be written to, skipping.".format(out_file=output_filename))
print("Warning, output file: {out_file} already exists, and can't be written to, skipping.".format(out_file=output_filename))
else:
self.log.write(u"\Warning: Unable to open output file: {out_file} for writing, skipping.".format(out_file=output_filename))
print("Warning: Unable to open output file: {out_file} for writing, skipping.".format(out_file=output_filename))
continue
def generate_describe_sensor_url(self, sos, procedure=None, oFrmt=None):
"""
"""
# generate a DescribeSensor request to include in the ISO output (lifted from OWSlib):
try:
base_url = next((m.get('url') for m in sos.getOperationByName('DescribeSensor').methods if m.get('type').lower() == "get"))
except StopIteration:
base_url = sos.url
if not base_url.endswith("?"):
base_url = base_url + "?"
params = {'service': 'SOS', 'version': sos.version, 'request': 'DescribeSensor', 'procedure': procedure, 'outputFormat': oFrmt}
return base_url + unquote_plus(urlencode(params))
def create_output_dir(self):
"""
"""
try:
os.makedirs(self.output_directory)
# test error handling:
# raise OSError
except OSError as ex:
if ex.errno == errno.EEXIST and os.path.isdir(self.output_directory):
self.log.write(u"\nWarning: the configured output directory: {output_dir} already exists. Files will be overwritten.".format(output_dir=os.path.abspath(self.output_directory)))
print("Warning: the configured output directory: {output_dir} already exists. Files will be overwritten.".format(output_dir=os.path.abspath(self.output_directory)))
# sys.exit("Error: the configured output directory: {output_dir} already exists.".format(output_dir=os.path.abspath(self.output_directory)))
else:
self.log.write(u"\nError: the configured output directory: {output_dir} was not able to be created.".format(output_dir=os.path.abspath(self.output_directory)))
sys.exit("Error: the configured output directory: {output_dir} was not able to be created.".format(output_dir=os.path.abspath(self.output_directory)))
def print_debug_info(self):
"""
"""
# just print out some parameter info:
if self.verbose:
self.log.write(u"sensorml2iso:\n______________\nService: {service}\nStations (--stations): {stations}\nActive Station Days (-d|--active_station_days): {active_station_days}\nGetObs Request Hours (--getobs_req_hours): {getobs_req_hours}\nResponse Formats (--response_formats): {response_formats}\nSOS Type (--sos_type): {sos_type}\nOutput Dir (--output_dir): {output_dir}\n______________\n\n".format(service=self.service, stations=self.stations, active_station_days=self.active_station_days, getobs_req_hours=self.getobs_req_hours, response_formats=self.response_formats, sos_type=self.sos_type, output_dir=os.path.abspath(self.output_directory)))
print("sensorml2iso:\n______________\nService: {service}\nStations (--stations): {stations}\nActive Station Days (-d|--active_station_days): {active_station_days}\nGetObs Request Hours (--getobs_req_hours): {getobs_req_hours}\nResponse Formats (--response_formats): {response_formats}\nSOS Type (--sos_type): {sos_type}\nOutput Dir (--output_dir): {output_dir}\n______________\n\n".format(service=self.service, stations=self.stations, active_station_days=self.active_station_days, getobs_req_hours=self.getobs_req_hours, response_formats=self.response_formats, sos_type=self.sos_type, output_dir=os.path.abspath(self.output_directory)))
| 58.885666
| 658
| 0.632422
|
01a4ab4758ad4615ae1d2fa82ff586dc380afd19
| 304
|
py
|
Python
|
fairscale/experimental/nn/__init__.py
|
zhaojuanmao/fairscale
|
61ece000bd1b70029270e2dccab66ffa2ca16d51
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | 1,662
|
2020-07-15T21:40:19.000Z
|
2022-03-31T10:45:12.000Z
|
fairscale/experimental/nn/__init__.py
|
zhaojuanmao/fairscale
|
61ece000bd1b70029270e2dccab66ffa2ca16d51
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | 648
|
2020-07-21T19:00:32.000Z
|
2022-03-30T23:11:41.000Z
|
fairscale/experimental/nn/__init__.py
|
zhaojuanmao/fairscale
|
61ece000bd1b70029270e2dccab66ffa2ca16d51
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | 170
|
2020-07-16T00:28:01.000Z
|
2022-03-15T19:39:21.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from .offload import OffloadModel
from .sync_batchnorm import SyncBatchNorm
__all__: List[str] = []
| 25.333333
| 65
| 0.769737
|
16194eec1d4843138a1bc866b7081025b6d2c776
| 230
|
py
|
Python
|
src/aiquscanner.py
|
shlei/aiquFileTool
|
44a7f981fa9b665f6df67c9af226215df1b7914b
|
[
"MIT"
] | 1
|
2019-12-13T04:36:49.000Z
|
2019-12-13T04:36:49.000Z
|
src/aiquscanner.py
|
shlei/aiquFileTool
|
44a7f981fa9b665f6df67c9af226215df1b7914b
|
[
"MIT"
] | null | null | null |
src/aiquscanner.py
|
shlei/aiquFileTool
|
44a7f981fa9b665f6df67c9af226215df1b7914b
|
[
"MIT"
] | null | null | null |
from os.path import isdir as isPath
from os.path import isfile as isFile
from os.path import join as pathJoin
def dir_scan(root_dir, options):
print("dir scanning")
def file_scan(root_dir, options):
print("dir scanning")
| 25.555556
| 36
| 0.756522
|
7180680bc24b0095b472c7f2163ad5b951b1cbb9
| 7,428
|
py
|
Python
|
test/unit/test_graph_selection.py
|
joellabes/dbt
|
1060035838650a30e86989cbf2693db7720ff002
|
[
"Apache-2.0"
] | 1
|
2020-11-18T21:25:53.000Z
|
2020-11-18T21:25:53.000Z
|
test/unit/test_graph_selection.py
|
joellabes/dbt
|
1060035838650a30e86989cbf2693db7720ff002
|
[
"Apache-2.0"
] | 50
|
2021-11-02T06:20:50.000Z
|
2022-03-31T06:23:16.000Z
|
test/unit/test_graph_selection.py
|
joellabes/dbt
|
1060035838650a30e86989cbf2693db7720ff002
|
[
"Apache-2.0"
] | 1
|
2021-11-23T20:28:07.000Z
|
2021-11-23T20:28:07.000Z
|
import unittest
from unittest import mock
import pytest
import string
import dbt.exceptions
import dbt.graph.selector as graph_selector
import dbt.graph.cli as graph_cli
from dbt.node_types import NodeType
import networkx as nx
def _get_graph():
integer_graph = nx.balanced_tree(2, 2, nx.DiGraph())
package_mapping = {
i: 'm.' + ('X' if i % 2 == 0 else 'Y') + '.' + letter
for (i, letter) in enumerate(string.ascii_lowercase)
}
# Edges: [(X.a, Y.b), (X.a, X.c), (Y.b, Y.d), (Y.b, X.e), (X.c, Y.f), (X.c, X.g)]
return graph_selector.Graph(nx.relabel_nodes(integer_graph, package_mapping))
def _get_manifest(graph):
nodes = {}
for unique_id in graph:
fqn = unique_id.split('.')
node = mock.MagicMock(
unique_id=unique_id,
fqn=fqn,
package_name=fqn[0],
tags=[],
resource_type=NodeType.Model,
empty=False,
config=mock.MagicMock(enabled=True),
)
nodes[unique_id] = node
nodes['m.X.a'].tags = ['abc']
nodes['m.Y.b'].tags = ['abc', 'bcef']
nodes['m.X.c'].tags = ['abc', 'bcef']
nodes['m.Y.d'].tags = []
nodes['m.X.e'].tags = ['efg', 'bcef']
nodes['m.Y.f'].tags = ['efg', 'bcef']
nodes['m.X.g'].tags = ['efg']
return mock.MagicMock(nodes=nodes)
@pytest.fixture
def graph():
return graph_selector.Graph(_get_graph())
@pytest.fixture
def manifest(graph):
return _get_manifest(graph)
def id_macro(arg):
if isinstance(arg, str):
return arg
try:
return '_'.join(arg)
except TypeError:
return arg
run_specs = [
# include by fqn
(['X.a'], [], {'m.X.a'}),
# include by tag
(['tag:abc'], [], {'m.X.a', 'm.Y.b', 'm.X.c'}),
# exclude by tag
(['*'], ['tag:abc'], {'m.Y.d', 'm.X.e', 'm.Y.f', 'm.X.g'}),
# tag + fqn
(['tag:abc', 'a'], [], {'m.X.a', 'm.Y.b', 'm.X.c'}),
(['tag:abc', 'd'], [], {'m.X.a', 'm.Y.b', 'm.X.c', 'm.Y.d'}),
# multiple node selection across packages
(['X.a', 'b'], [], {'m.X.a', 'm.Y.b'}),
(['X.a+'], ['b'], {'m.X.a','m.X.c', 'm.Y.d','m.X.e','m.Y.f','m.X.g'}),
# children
(['X.c+'], [], {'m.X.c', 'm.Y.f', 'm.X.g'}),
(['X.a+1'], [], {'m.X.a', 'm.Y.b', 'm.X.c'}),
(['X.a+'], ['tag:efg'], {'m.X.a','m.Y.b','m.X.c', 'm.Y.d'}),
# parents
(['+Y.f'], [], {'m.X.c', 'm.Y.f', 'm.X.a'}),
(['1+Y.f'], [], {'m.X.c', 'm.Y.f'}),
# childrens parents
(['@X.c'], [], {'m.X.a', 'm.X.c', 'm.Y.f', 'm.X.g'}),
# multiple selection/exclusion
(['tag:abc', 'tag:bcef'], [], {'m.X.a', 'm.Y.b', 'm.X.c', 'm.X.e', 'm.Y.f'}),
(['tag:abc', 'tag:bcef'], ['tag:efg'], {'m.X.a', 'm.Y.b', 'm.X.c'}),
(['tag:abc', 'tag:bcef'], ['tag:efg', 'a'], {'m.Y.b', 'm.X.c'}),
# intersections
(['a,a'], [], {'m.X.a'}),
(['+c,c+'], [], {'m.X.c'}),
(['a,b'], [], set()),
(['tag:abc,tag:bcef'], [], {'m.Y.b', 'm.X.c'}),
(['*,tag:abc,a'], [], {'m.X.a'}),
(['a,tag:abc,*'], [], {'m.X.a'}),
(['tag:abc,tag:bcef'], ['c'], {'m.Y.b'}),
(['tag:bcef,tag:efg'], ['tag:bcef,@b'], {'m.Y.f'}),
(['tag:bcef,tag:efg'], ['tag:bcef,@a'], set()),
(['*,@a,+b'], ['*,tag:abc,tag:bcef'], {'m.X.a'}),
(['tag:bcef,tag:efg', '*,tag:abc'], [], {'m.X.a', 'm.Y.b', 'm.X.c', 'm.X.e', 'm.Y.f'}),
(['tag:bcef,tag:efg', '*,tag:abc'], ['e'], {'m.X.a', 'm.Y.b', 'm.X.c', 'm.Y.f'}),
(['tag:bcef,tag:efg', '*,tag:abc'], ['e'], {'m.X.a', 'm.Y.b', 'm.X.c', 'm.Y.f'}),
(['tag:bcef,tag:efg', '*,tag:abc'], ['e', 'f'], {'m.X.a', 'm.Y.b', 'm.X.c'}),
(['tag:bcef,tag:efg', '*,tag:abc'], ['tag:abc,tag:bcef'], {'m.X.a', 'm.X.e', 'm.Y.f'}),
(['tag:bcef,tag:efg', '*,tag:abc'], ['tag:abc,tag:bcef', 'tag:abc,a'], {'m.X.e', 'm.Y.f'})
]
@pytest.mark.parametrize('include,exclude,expected', run_specs, ids=id_macro)
def test_run_specs(include, exclude, expected):
graph = _get_graph()
manifest = _get_manifest(graph)
selector = graph_selector.NodeSelector(graph, manifest)
spec = graph_cli.parse_difference(include, exclude)
selected = selector.select_nodes(spec)
assert selected == expected
param_specs = [
('a', False, None, False, None, 'fqn', 'a', False),
('+a', True, None, False, None, 'fqn', 'a', False),
('256+a', True, 256, False, None, 'fqn', 'a', False),
('a+', False, None, True, None, 'fqn', 'a', False),
('a+256', False, None, True, 256, 'fqn', 'a', False),
('+a+', True, None, True, None, 'fqn', 'a', False),
('16+a+32', True, 16, True, 32, 'fqn', 'a', False),
('@a', False, None, False, None, 'fqn', 'a', True),
('a.b', False, None, False, None, 'fqn', 'a.b', False),
('+a.b', True, None, False, None, 'fqn', 'a.b', False),
('256+a.b', True, 256, False, None, 'fqn', 'a.b', False),
('a.b+', False, None, True, None, 'fqn', 'a.b', False),
('a.b+256', False, None, True, 256, 'fqn', 'a.b', False),
('+a.b+', True, None, True, None, 'fqn', 'a.b', False),
('16+a.b+32', True, 16, True, 32, 'fqn', 'a.b', False),
('@a.b', False, None, False, None, 'fqn', 'a.b', True),
('a.b.*', False, None, False, None, 'fqn', 'a.b.*', False),
('+a.b.*', True, None, False, None, 'fqn', 'a.b.*', False),
('256+a.b.*', True, 256, False, None, 'fqn', 'a.b.*', False),
('a.b.*+', False, None, True, None, 'fqn', 'a.b.*', False),
('a.b.*+256', False, None, True, 256, 'fqn', 'a.b.*', False),
('+a.b.*+', True, None, True, None, 'fqn', 'a.b.*', False),
('16+a.b.*+32', True, 16, True, 32, 'fqn', 'a.b.*', False),
('@a.b.*', False, None, False, None, 'fqn', 'a.b.*', True),
('tag:a', False, None, False, None, 'tag', 'a', False),
('+tag:a', True, None, False, None, 'tag', 'a', False),
('256+tag:a', True, 256, False, None, 'tag', 'a', False),
('tag:a+', False, None, True, None, 'tag', 'a', False),
('tag:a+256', False, None, True, 256, 'tag', 'a', False),
('+tag:a+', True, None, True, None, 'tag', 'a', False),
('16+tag:a+32', True, 16, True, 32, 'tag', 'a', False),
('@tag:a', False, None, False, None, 'tag', 'a', True),
('source:a', False, None, False, None, 'source', 'a', False),
('source:a+', False, None, True, None, 'source', 'a', False),
('source:a+1', False, None, True, 1, 'source', 'a', False),
('source:a+32', False, None, True, 32, 'source', 'a', False),
('@source:a', False, None, False, None, 'source', 'a', True),
]
@pytest.mark.parametrize(
'spec,parents,parents_depth,children,children_depth,filter_type,filter_value,childrens_parents',
param_specs,
ids=id_macro
)
def test_parse_specs(spec, parents, parents_depth, children, children_depth, filter_type, filter_value, childrens_parents):
parsed = graph_selector.SelectionCriteria.from_single_spec(spec)
assert parsed.parents == parents
assert parsed.parents_depth == parents_depth
assert parsed.children == children
assert parsed.children_depth == children_depth
assert parsed.method == filter_type
assert parsed.value == filter_value
assert parsed.childrens_parents == childrens_parents
invalid_specs = [
'@a+',
'@a.b+',
'@a.b*+',
'@tag:a+',
'@source:a+',
]
@pytest.mark.parametrize('invalid', invalid_specs, ids=lambda k: str(k))
def test_invalid_specs(invalid):
with pytest.raises(dbt.exceptions.RuntimeException):
graph_selector.SelectionCriteria.from_single_spec(invalid)
| 37.515152
| 123
| 0.522348
|
4cc0ffa0912e3dcf4bf84e644a8108717a333bec
| 1,442
|
py
|
Python
|
jwt/tests/test_utils.py
|
dizballanze/python-jwt
|
a3feebcc6c4638a3e66dc78fca3c9c98f4fdbf4c
|
[
"Apache-2.0"
] | null | null | null |
jwt/tests/test_utils.py
|
dizballanze/python-jwt
|
a3feebcc6c4638a3e66dc78fca3c9c98f4fdbf4c
|
[
"Apache-2.0"
] | null | null | null |
jwt/tests/test_utils.py
|
dizballanze/python-jwt
|
a3feebcc6c4638a3e66dc78fca3c9c98f4fdbf4c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2017 Gehirn Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from jwt.utils import (
b64encode,
b64decode,
uint_b64encode,
uint_b64decode,
)
def test_b64encode():
ret = (b'{"iss":"joe",\r\n "exp":1300819380,\r\n '
b'"http://example.com/is_root":true}')
expected = ('eyJpc3MiOiJqb2UiLA0KICJleHAiOjEzMDA4MTkzODAsDQog'
'Imh0dHA6Ly9leGFtcGxlLmNvbS9pc19yb290Ijp0cnVlfQ')
assert b64encode(ret) == expected
def test_b64decode():
ret = ('eyJpc3MiOiJqb2UiLA0KICJleHAiOjEzMDA4MTkzODAsDQog'
'Imh0dHA6Ly9leGFtcGxlLmNvbS9pc19yb290Ijp0cnVlfQ')
expected = (b'{"iss":"joe",\r\n "exp":1300819380,\r\n '
b'"http://example.com/is_root":true}')
assert b64decode(ret) == expected
def test_uint_b64encode():
assert uint_b64encode(65537) == 'AQAB'
def test_uint_b64decode():
assert uint_b64decode('AQAB') == 65537
| 30.680851
| 74
| 0.700416
|
5206b84de62863846233c7deeb95c9ab3896de23
| 4,799
|
py
|
Python
|
atnlp/data/reuters.py
|
wedavey/atnlp
|
002497f27abfcdac9701aa324301d482dbf4df0e
|
[
"MIT"
] | null | null | null |
atnlp/data/reuters.py
|
wedavey/atnlp
|
002497f27abfcdac9701aa324301d482dbf4df0e
|
[
"MIT"
] | null | null | null |
atnlp/data/reuters.py
|
wedavey/atnlp
|
002497f27abfcdac9701aa324301d482dbf4df0e
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
"""
reuters.py
~~~~~~~~~~
Functionality to read in Reuters corpus using the nltk module
"""
__author__ = "Will Davey"
__email__ = "wedavey@gmail.com"
__created__ = "2018-05-05"
__copyright__ = "Copyright 2018 Will Davey"
__license__ = "MIT https://opensource.org/licenses/MIT"
# standard imports
# third party imports
from nltk.corpus import reuters
import numpy as np
import pandas as pd
from sklearn.preprocessing import MultiLabelBinarizer
# local imports
# globals
def get_data(cats=None, tokenize=None):
"""Return raw text data from Reuters corpus in (train, test) tuple
If *cats* is specified, data is filtered to only contain documents
from the specified categories.
If *tokenize* is specified, data is tokenized.
:param cats: categories
:param tokenize: tokenization function
:return: tuple of (train, test) data (each is list of strings)
"""
return (get_data_train(cats,tokenize=tokenize), get_data_test(cats,tokenize=tokenize))
def get_labels(cats=None):
"""Return topic labels (one-hot format) from Reuters corpus in (train, test) tuple
:param cats: categories
:return: tuple of (train, test) topic labels (one-hot format)
"""
return (get_labels_train(cats), get_labels_test(cats))
def get_data_train(cats=None, tokenize=None):
"""Return raw text training data (cf *get_data*)
:param cats: categories
:param tokenize: tokenization function
:return: train data (list of strings)
"""
return ReutersIter(train_filenames(cats=cats), tokenize=tokenize)
def get_data_test(cats=None, tokenize=None):
"""Return raw text testing data (cf *get_data*)
:param cats: categories
:param tokenize: tokenization function
:return: test data (list of strings)
"""
return ReutersIter(test_filenames(cats=cats), tokenize=tokenize)
def get_labels_train(cats=None):
"""Return training set topic labels (one-hot format) from Reuters corpus (cf *get_labels*)
:param cats: categories
:return: train topic labels (one-hot format)
"""
return labels(train_filenames(cats), cats=cats)
def get_labels_test(cats=None):
"""Return testing set topic labels (one-hot format) from Reuters corpus (cf *get_labels*)
:param cats: categories
:return: test topic labels (one-hot format)
"""
return labels(test_filenames(cats), cats=cats)
def get_topics(min_samples=None):
"""Return set of topics from Reuters corpus
If *min_samples* is specified, only topics with at
least that many examples are included.
:param min_samples: minimum number of example per topic
:return: list of topics
"""
cats = reuters.categories()
if min_samples is not None:
cats = [c for c in reuters.categories() if len(reuters.fileids(c)) >= min_samples]
return cats
def train_filenames(cats=None):
"""Return filenames of training examples
If *cats* is specified, filenames are filtered to only contain documents
from the specified categories.
:param cats: categories
:return: list of filenames
"""
return np.array([f for f in reuters.fileids(cats) if f.startswith('train')])
def test_filenames(cats=None):
"""Return filenames of testing examples
If *cats* is specified, filenames are filtered to only contain documents
from the specified categories.
:param cats: categories
:return: list of filenames
"""
return np.array([f for f in reuters.fileids(cats) if f.startswith('test')])
def labels(filenames, cats=None):
"""Return topic labels (one-hot format) for given files
:param filenames: selected files from Reuters dataset
:param cats: categories to filter (optional)
:return: topic labels (one-hot format)
"""
if cats is None: cats = reuters.categories()
data = [[c for c in reuters.categories(f) if c in cats] for f in filenames]
mb = MultiLabelBinarizer(classes = cats)
onehot = mb.fit_transform(data)
df = pd.DataFrame(onehot, columns=cats)
return df
class ReutersIter(object):
"""Reuters dataset iterator
Implements generator instead of reading full dataset into memory.
However, its not super necessary coz this dataset is small, and
most of the time we actually create a list from this anyway.
:param files: list of files to iterate over
:param tokenize: tokenization function (optional)
"""
def __init__(self, files, tokenize=None):
self.files = files
self.tokenize = tokenize
def __iter__(self):
for i in range(len(self.files)):
yield self[i]
def __getitem__(self, key):
data = reuters.raw(self.files[key])
if self.tokenize is None:
return data
return self.tokenize(data)
# EOF
| 28.39645
| 94
| 0.697645
|
bec8aa45fcf825c40b3c1f4634f5127be8d3d36a
| 8,579
|
py
|
Python
|
languages/fr.py
|
marian-vignau/OpenLex
|
e65a5ede23dacf356786aa32fbd70cee8a3743ce
|
[
"Linux-OpenIB"
] | 8
|
2018-12-14T15:58:23.000Z
|
2021-04-08T02:23:02.000Z
|
languages/fr.py
|
marian-vignau/OpenLex
|
e65a5ede23dacf356786aa32fbd70cee8a3743ce
|
[
"Linux-OpenIB"
] | null | null | null |
languages/fr.py
|
marian-vignau/OpenLex
|
e65a5ede23dacf356786aa32fbd70cee8a3743ce
|
[
"Linux-OpenIB"
] | 4
|
2019-01-29T03:35:16.000Z
|
2019-06-20T11:40:56.000Z
|
# coding: utf8
{'!langcode!': 'fr',
'!langname!': 'Français',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" est une expression optionnelle comme "champ1=\'nouvellevaleur\'". Vous ne pouvez mettre à jour ou supprimer les résultats d\'un JOIN',
'%s %%{row} deleted': '%s lignes supprimées',
'%s %%{row} updated': '%s lignes mises à jour',
'%s selected': '%s sélectionné',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'About': 'À propos',
'Access Control': "Contrôle d'accès",
'Administrative Interface': "Interface d'administration",
'Administrative interface': "Interface d'administration",
'Ajax Recipes': 'Recettes Ajax',
'appadmin is disabled because insecure channel': "appadmin est désactivée parce que le canal n'est pas sécurisé",
'Are you sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?',
'Authentication': 'Authentification',
'Available Databases and Tables': 'Bases de données et tables disponibles',
'Buy this book': 'Acheter ce livre',
'cache': 'cache',
'Cache': 'Cache',
'Cache Keys': 'Clés de cache',
'Cannot be empty': 'Ne peut pas être vide',
'change password': 'changer le mot de passe',
'Check to delete': 'Cliquez pour supprimer',
'Check to delete:': 'Cliquez pour supprimer:',
'Clear CACHE?': 'Vider le CACHE?',
'Clear DISK': 'Vider le DISQUE',
'Clear RAM': 'Vider la RAM',
'Client IP': 'IP client',
'Community': 'Communauté',
'Components and Plugins': 'Composants et Plugins',
'Controller': 'Contrôleur',
'Copyright': 'Copyright',
'Created By': 'Créé par',
'Created On': 'Créé le',
'Current request': 'Demande actuelle',
'Current response': 'Réponse actuelle',
'Current session': 'Session en cours',
'customize me!': 'personnalisez-moi!',
'data uploaded': 'données téléchargées',
'Database': 'base de données',
'Database %s select': 'base de données %s selectionnée',
'db': 'bdd',
'DB Model': 'Modèle BDD',
'Delete:': 'Supprimer:',
'Demo': 'Démo',
'Deployment Recipes': 'Recettes de déploiement',
'Description': 'Description',
'design': 'design',
'DISK': 'DISQUE',
'Disk Cache Keys': 'Clés de cache du disque',
'Disk Cleared': 'Disque vidé',
'Documentation': 'Documentation',
"Don't know what to do?": 'Vous ne savez pas quoi faire?',
'done!': 'fait!',
'Download': 'Téléchargement',
'E-mail': 'E-mail',
'Edit': 'Éditer',
'Edit current record': "Modifier l'enregistrement courant",
'edit profile': 'modifier le profil',
'Edit This App': 'Modifier cette application',
'Email and SMS': 'Email et SMS',
'enter an integer between %(min)g and %(max)g': 'entrez un entier entre %(min)g et %(max)g',
'Errors': 'Erreurs',
'export as csv file': 'exporter sous forme de fichier csv',
'FAQ': 'FAQ',
'First name': 'Prénom',
'Forms and Validators': 'Formulaires et Validateurs',
'Free Applications': 'Applications gratuites',
'Function disabled': 'Fonction désactivée',
'Group ID': 'Groupe ID',
'Groups': 'Groupes',
'Hello World': 'Bonjour le monde',
'Home': 'Accueil',
'How did you get here?': 'Comment êtes-vous arrivé ici?',
'import': 'import',
'Import/Export': 'Importer/Exporter',
'Index': 'Index',
'insert new': 'insérer un nouveau',
'insert new %s': 'insérer un nouveau %s',
'Internal State': 'État interne',
'Introduction': 'Introduction',
'Invalid email': 'E-mail invalide',
'Invalid Query': 'Requête Invalide',
'invalid request': 'requête invalide',
'Is Active': 'Est actif',
'Key': 'Clé',
'Last name': 'Nom',
'Layout': 'Mise en page',
'Layout Plugins': 'Plugins de mise en page',
'Layouts': 'Mises en page',
'Live chat': 'Chat en direct',
'Live Chat': 'Chat en direct',
'login': 'connectez-vous',
'Login': 'Connectez-vous',
'logout': 'déconnectez-vous',
'lost password': 'mot de passe perdu',
'Lost Password': 'Mot de passe perdu',
'Lost password?': 'Mot de passe perdu?',
'lost password?': 'mot de passe perdu?',
'Main Menu': 'Menu principal',
'Manage Cache': 'Gérer le Cache',
'Menu Model': 'Menu modèle',
'Modified By': 'Modifié par',
'Modified On': 'Modifié le',
'My Sites': 'Mes sites',
'Name': 'Nom',
'New Record': 'Nouvel enregistrement',
'new record inserted': 'nouvel enregistrement inséré',
'next 100 rows': '100 prochaines lignes',
'No databases in this application': "Cette application n'a pas de bases de données",
'Object or table name': 'Objet ou nom de table',
'Online examples': 'Exemples en ligne',
'or import from csv file': "ou importer d'un fichier CSV",
'Origin': 'Origine',
'Other Plugins': 'Autres Plugins',
'Other Recipes': 'Autres recettes',
'Overview': 'Présentation',
'Password': 'Mot de passe',
"Password fields don't match": 'Les mots de passe ne correspondent pas',
'Plugins': 'Plugins',
'Powered by': 'Alimenté par',
'Preface': 'Préface',
'previous 100 rows': '100 lignes précédentes',
'Python': 'Python',
'Query:': 'Requête:',
'Quick Examples': 'Exemples Rapides',
'RAM': 'RAM',
'RAM Cache Keys': 'Clés de cache de la RAM',
'Ram Cleared': 'Ram vidée',
'Readme': 'Lisez-moi',
'Recipes': 'Recettes',
'Record': 'enregistrement',
'record does not exist': "l'archive n'existe pas",
'Record ID': "ID d'enregistrement",
'Record id': "id d'enregistrement",
'Register': "S'inscrire",
'register': "s'inscrire",
'Registration identifier': "Identifiant d'enregistrement",
'Registration key': "Clé d'enregistrement",
'Remember me (for 30 days)': 'Se souvenir de moi (pendant 30 jours)',
'Request reset password': 'Demande de réinitialiser le mot clé',
'Reset Password key': 'Réinitialiser le mot clé',
'Resources': 'Ressources',
'Role': 'Rôle',
'Rows in Table': 'Lignes du tableau',
'Rows selected': 'Lignes sélectionnées',
'Semantic': 'Sémantique',
'Services': 'Services',
'Size of cache:': 'Taille du cache:',
'state': 'état',
'Statistics': 'Statistiques',
'Stylesheet': 'Feuille de style',
'submit': 'soumettre',
'Submit': 'Soumettre',
'Support': 'Support',
'Sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?',
'Table': 'tableau',
'Table name': 'Nom du tableau',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La "requête" est une condition comme "db.table1.champ1==\'valeur\'". Quelque chose comme "db.table1.champ1==db.table2.champ2" résulte en un JOIN SQL.',
'The Core': 'Le noyau',
'The output of the file is a dictionary that was rendered by the view %s': 'La sortie de ce fichier est un dictionnaire qui été restitué par la vue %s',
'The Views': 'Les Vues',
'This App': 'Cette Appli',
'This is a copy of the scaffolding application': "Ceci est une copie de l'application échafaudage",
'Time in Cache (h:m:s)': 'Temps en Cache (h:m:s)',
'Timestamp': 'Horodatage',
'Twitter': 'Twitter',
'unable to parse csv file': "incapable d'analyser le fichier cvs",
'Update:': 'Mise à jour:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Employez (...)&(...) pour AND, (...)|(...) pour OR, and ~(...) pour NOT afin de construire des requêtes plus complexes.',
'User %(id)s Logged-in': 'Utilisateur %(id)s connecté',
'User %(id)s Registered': 'Utilisateur %(id)s enregistré',
'User ID': 'ID utilisateur',
'User Voice': "Voix de l'utilisateur",
'Verify Password': 'Vérifiez le mot de passe',
'Videos': 'Vidéos',
'View': 'Présentation',
'Web2py': 'Web2py',
'Welcome': 'Bienvenue',
'Welcome %s': 'Bienvenue %s',
'Welcome to web2py': 'Bienvenue à web2py',
'Welcome to web2py!': 'Bienvenue à web2py!',
'Which called the function %s located in the file %s': 'Qui a appelé la fonction %s se trouvant dans le fichier %s',
'You are successfully running web2py': 'Vous exécutez avec succès web2py',
'You can modify this application and adapt it to your needs': "Vous pouvez modifier cette application et l'adapter à vos besoins",
'You visited the url %s': "Vous avez visité l'URL %s",
}
| 45.152632
| 297
| 0.629327
|
b723e8414b8341949fccc941923dfdf0bef93a80
| 164
|
py
|
Python
|
khatangatao/asgi.py
|
khatangatao/khatangatao
|
58ecf793f2dfdaceee90e5a2d6b60559f208a131
|
[
"MIT"
] | null | null | null |
khatangatao/asgi.py
|
khatangatao/khatangatao
|
58ecf793f2dfdaceee90e5a2d6b60559f208a131
|
[
"MIT"
] | null | null | null |
khatangatao/asgi.py
|
khatangatao/khatangatao
|
58ecf793f2dfdaceee90e5a2d6b60559f208a131
|
[
"MIT"
] | null | null | null |
import os
from channels.asgi import get_channel_layer
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "khatangatao.settings")
channel_layer = get_channel_layer()
| 23.428571
| 71
| 0.835366
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.