hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
62ad2faaa4417f27b1e2dd75edf9e858d937f1c1
| 5,786
|
bzl
|
Python
|
docs.bzl
|
es-ude/EmbeddedSystemsBuildScripts
|
276c3ca78ba8285cd26c3c10443d89ccc403a69c
|
[
"MIT"
] | 3
|
2019-06-26T14:08:12.000Z
|
2020-03-10T06:24:46.000Z
|
docs.bzl
|
es-ude/EmbeddedSystemsBuildScripts
|
276c3ca78ba8285cd26c3c10443d89ccc403a69c
|
[
"MIT"
] | 31
|
2019-06-10T10:50:58.000Z
|
2021-08-06T13:43:54.000Z
|
docs.bzl
|
es-uni-due/EmbeddedSystemsBuildScripts
|
276c3ca78ba8285cd26c3c10443d89ccc403a69c
|
[
"MIT"
] | 5
|
2019-07-08T23:33:39.000Z
|
2020-10-11T20:35:25.000Z
|
def _doxygen_archive_impl(ctx):
"""Generate a .tar.gz archive containing documentation using Doxygen.
Args:
name: label for the generated rule. The archive will be "%{name}.tar.gz".
doxyfile: configuration file for Doxygen, @@OUTPUT_DIRECTORY@@ will be replaced with the actual output dir
srcs: source files the documentation will be generated from.
"""
doxyfile = ctx.file.doxyfile
out_file = ctx.outputs.out
out_dir_path = out_file.short_path[:-len(".tar.gz")]
commands = [
"mkdir -p %s" % out_dir_path,
"out_dir_path=$(cd %s; pwd)" % out_dir_path,
"pushd %s" % doxyfile.dirname,
"""sed -e \"s:@@OUTPUT_DIRECTORY@@:$out_dir_path/:\" <%s | doxygen -""" % doxyfile.basename,
"popd",
"tar czf %s -C %s ./" % (out_file.path, out_dir_path),
]
ctx.actions.run_shell(
inputs = ctx.files.srcs + [doxyfile],
outputs = [out_file],
use_default_shell_env = True,
command = " && ".join(commands),
)
doxygen_archive = rule(
implementation = _doxygen_archive_impl,
attrs = {
"doxyfile": attr.label(
mandatory = True,
allow_single_file = True,
),
"srcs": attr.label_list(
mandatory = True,
allow_files = True,
),
},
outputs = {
"out": "%{name}.tar.gz",
},
)
def _sphinx_archive_impl(ctx):
"""
Generates a sphinx documentation archive (.tar.gz).
The output is called <name>.tar.gz, where <name> is the
name of the rule.
Args:
config_file: sphinx conf.py file
doxygen_xml_archive: an archive that containing the generated doxygen
xml files to be consumed by the breathe sphinx plugin.
Setting this attribute automatically enables the breathe plugin
srcs: the *.rst files to consume
"""
out_file = ctx.outputs.sphinx
out_dir_path = out_file.short_path[:-len(".tar.gz")]
commands = ["mkdir _static"]
inputs = ctx.files.srcs
if ctx.attr.doxygen_xml_archive != None:
commands = commands + [
"mkdir xml",
"tar -xzf {xml} -C xml --strip-components=2".format(xml = ctx.file.doxygen_xml_archive.path),
]
inputs.append(ctx.file.doxygen_xml_archive)
commands = commands + [
"sphinx-build -M build ./ _build -q -b html -C {settings}".format(
settings = _sphinx_settings(ctx),
out_dir = out_dir_path,
),
]
commands = commands + [
"tar czf %s -C _build/build/ ./" % (out_file.path),
]
ctx.actions.run_shell(
use_default_shell_env = True,
outputs = [out_file],
inputs = inputs,
command = " && ".join(commands),
)
sphinx_archive = rule(
implementation = _sphinx_archive_impl,
attrs = {
"srcs": attr.label_list(
mandatory = True,
allow_files = True,
),
"doxygen_xml_archive": attr.label(
default = None,
allow_single_file = True,
),
"master_doc": attr.string(default = "contents"),
"version": attr.string(
mandatory = True,
),
"project": attr.string(
default = "",
),
"copyright": attr.string(default = ""),
"extensions": attr.string_list(default = [
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
]),
"templates": attr.string_list(default = []),
"source_suffix": attr.string_list(default = [".rst"]),
"exclude_patterns": attr.string_list(default = ["_build", "Thumbs.db", ".DS_Store"]),
"pygments_style": attr.string(default = ""),
"language": attr.string(default = ""),
"html_theme": attr.string(default = "sphinx_rtd_theme"),
"html_theme_options": attr.string_dict(default = {}),
"html_static_path": attr.string_list(default = ["_static"]),
"html_sidebars": attr.string_dict(default = {}),
"intersphinx_mapping": attr.string_dict(default = {}),
},
outputs = {
"sphinx": "%{name}.tar.gz",
},
)
def add_option(settings, setting, value):
if value != None or len(value) == 0:
settings = settings + ["-D {setting}={value}".format(setting = setting, value = value.replace(" ", "\ "))]
return settings
def _sphinx_settings(ctx):
settings = []
extensions = ctx.attr.extensions
settings = add_option(settings, "version", ctx.attr.version)
if ctx.attr.project == "":
settings = add_option(settings, "project", ctx.workspace_name)
else:
settings = add_option(settings, "project", ctx.attr.project)
if ctx.attr.doxygen_xml_archive != None:
extensions = extensions + ["breathe"]
settings = add_option(settings, "breathe_projects." + ctx.workspace_name, "xml")
settings = add_option(settings, "breathe_default_project", ctx.workspace_name)
settings = add_option(settings, "copyright", ctx.attr.copyright)
settings = add_option(settings, "master_doc", ctx.attr.master_doc)
for extension in extensions:
settings = add_option(settings, "extensions", extension)
for template in ctx.attr.templates:
settings = add_option(settings, "templates", template)
for suffix in ctx.attr.source_suffix:
settings = add_option(settings, "source_suffix", suffix)
for pattern in ctx.attr.exclude_patterns:
settings = add_option(settings, "exclude_patterns", pattern)
settings = add_option(settings, "html_theme", ctx.attr.html_theme)
for path in ctx.attr.html_static_path:
settings = add_option(settings, "html_static_path", path)
setting_string = " ".join(settings)
return setting_string
| 37.816993
| 114
| 0.610093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,802
| 0.311441
|
62ae8dd259b43e9f8c27ede31598aad711abeea2
| 234
|
py
|
Python
|
patches/reduceRNG.py
|
muffinjets/LADXR
|
bbd82a5b7bac015561bb6a4cfe1c5fa017f827f5
|
[
"MIT"
] | 13
|
2020-09-13T16:50:28.000Z
|
2022-03-22T20:49:54.000Z
|
patches/reduceRNG.py
|
muffinjets/LADXR
|
bbd82a5b7bac015561bb6a4cfe1c5fa017f827f5
|
[
"MIT"
] | 10
|
2020-06-27T12:34:38.000Z
|
2022-01-03T12:15:42.000Z
|
patches/reduceRNG.py
|
muffinjets/LADXR
|
bbd82a5b7bac015561bb6a4cfe1c5fa017f827f5
|
[
"MIT"
] | 18
|
2020-05-29T17:48:04.000Z
|
2022-02-08T03:36:08.000Z
|
from assembler import ASM
def slowdownThreeOfAKind(rom):
rom.patch(0x06, 0x096B, ASM("ldh a, [$E7]\nand $0F"), ASM("ldh a, [$E7]\nand $3F"))
def fixHorseHeads(rom):
rom.patch(0x07, 0x3653, "00010400", "00010000")
| 23.4
| 88
| 0.636752
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 66
| 0.282051
|
62b094d0827cf6c404c0fb38a86d155811976aaa
| 5,577
|
py
|
Python
|
jvd/ida/ida.py
|
L1NNA/JARV1S-Disassembler
|
6bc9d9459bd5142406fdda0ed88ba636934c94c6
|
[
"Apache-2.0"
] | 7
|
2020-12-19T18:56:23.000Z
|
2021-11-21T20:29:58.000Z
|
jvd/ida/ida.py
|
L1NNA/JARV1S-Disassembler
|
6bc9d9459bd5142406fdda0ed88ba636934c94c6
|
[
"Apache-2.0"
] | 1
|
2020-12-20T07:57:37.000Z
|
2020-12-28T18:10:11.000Z
|
jvd/ida/ida.py
|
L1NNA/JARV1S-Ghidra
|
84b551b2a1266b6bcb9454aaa01b97b21d7d4d4f
|
[
"Apache-2.0"
] | 2
|
2020-12-20T11:32:20.000Z
|
2021-03-17T15:36:16.000Z
|
import sys
import os
import json
import hashlib
import logging
import base64
import shutil
from concurrent.futures import ProcessPoolExecutor
from subprocess import Popen, PIPE, STDOUT
from jvd.disassembler import DisassemblerAbstract
import logging as log
import traceback
from jvd.utils import read_gz_js, write_gz_js, which, check_output_ctx
import platform
from jvd.resources import require
import time
import threading
SRC = os.path.split(os.path.realpath(__file__))[0]
IDA_script = os.path.join(SRC, 'ida_script.py')
ida_available = which('ida64.exe' if platform.system()
== 'Windows' else 'ida64') != None
ida64 = 'ida64' if platform.system() == 'Windows' else 'idat64'
ida32 = 'ida' if platform.system() == 'Windows' else 'idat'
class IDA(DisassemblerAbstract):
def __init__(self):
pass
def _process(self, file, file_type, output_file_path, decompile=False, verbose=-1):
if not ida_available and 'idaapi' not in sys.modules:
raise FileNotFoundError('IDA is not found!')
log = None
program = ida64
extension = None
if file_type.startswith('IDA '):
# 32-bit database
program = ida32
extension = '.idb'
elif file_type.startswith('FoxPro FPT'):
# 64-bit database
program = ida64
extension = '.i64'
if extension:
db = file + extension
if not os.path.exists(db):
shutil.copyfile(file, db)
file = db
cmd = [program, '-A', '-S{}'.format(IDA_script), file]
# print(cmd)
sub_env = os.environ.copy()
sub_env["output_file_path"] = os.path.abspath(output_file_path)
# print(cmd)
# p = Popen(
# cmd,
# env=sub_env,
# stdout=PIPE,
# stderr=STDOUT)
# log, _ = p.communicate(timeout=self.timeout)
if verbose > 1:
print(' '.join(cmd))
with check_output_ctx(cmd, timeout=self.timeout, env=sub_env) as log:
if not log:
log = ''
if decompile:
# assuming that IDA does not support decompilation
# transfer decompiled code to IDA
jar = require('ghidrajar')
java = require('jdk')
from jvd.ghidra.decompiler import process as gh_process
obj = read_gz_js(output_file_path)
func_entries = [f['addr_start']-obj['bin']['base']
for f in obj['functions']]
output_file_path_gh = output_file_path + '.gh.gz'
gh_process(java, jar, file, output_file_path_gh,
decompile=True, func_entries=func_entries)
if os.path.exists(output_file_path_gh):
obj_gh = read_gz_js(output_file_path_gh)
src = obj_gh['functions_src']
base_diff = obj_gh['bin']['base'] - obj['bin']['base']
for f in src:
f['addr_start'] = f['addr_start'] - base_diff
obj['functions_src'] = src
write_gz_js(obj, output_file_path)
return output_file_path, log
def context_init(self):
if 'idaapi' in sys.modules:
import idaapi
self.f_current = None
def _check():
addr = idaapi.get_screen_ea()
f_current = idaapi.get_func(addr)
if f_current and f_current != self.f_current:
self.f_current = f_current
from jvd.client import search
search(self.context_function_info)
def _step():
idaapi.execute_sync(_check, idaapi.MFF_FAST)
tt = threading.Timer(.5, _step)
tt.daemon = True
tt.start()
_step()
return True
return False
def _get_all_wrapped(self, **kwargs):
from jvd.ida.ida_utils import get_all
import idaapi
# this import cannot be moved to the header since it can
# be only imported when running in context
_bin = {}
def _get():
_bin.update(get_all(**kwargs))
idaapi.execute_sync(_get, idaapi.MFF_FAST)
return _bin
def context_binary_info(self):
_bin_info = self._get_all_wrapped(
function_eas=None,
with_blocks=False)['bin']
return {
k: v for k, v in _bin_info.items() if k not in ['strings', 'data', ]
}
def context_function_info(self):
_all_info = self._get_all_wrapped(
function_eas=None,
with_blocks=True,
current_ea=True
)
refs = set()
for b in _all_info['blocks']:
for i in b.get('ins', []):
for r in i.get('dr', []) + i.get('cr', []):
refs.add(r)
_cleaned_bin = {
k: v for k, v in _all_info['bin'].items() if k not in [
'strings', 'data', 'import_functions', 'export_functions',
'import_modules', 'seg', 'entry_points']
}
_cleaned_bin['strings'] = {
k: v for k, v in _all_info['bin']['strings'].items() if k in refs
}
_cleaned_bin['data'] = {
k: v for k, v in _all_info['bin']['strings'].items() if k in refs
}
return {
'bin': _cleaned_bin,
'functions': _all_info['functions'],
'blocks': _all_info['blocks'],
'comments': _all_info['comments'],
}
| 32.614035
| 87
| 0.556392
| 4,811
| 0.86265
| 0
| 0
| 0
| 0
| 0
| 0
| 919
| 0.164784
|
62b0c55a01828adb8ecff345e701a1575e8cd8c1
| 3,203
|
py
|
Python
|
scanapi/variable_parser.py
|
barbosa/scanapi
|
82def9d7c9ef19a2b658d9aa6c973790e2c16ddc
|
[
"MIT"
] | null | null | null |
scanapi/variable_parser.py
|
barbosa/scanapi
|
82def9d7c9ef19a2b658d9aa6c973790e2c16ddc
|
[
"MIT"
] | null | null | null |
scanapi/variable_parser.py
|
barbosa/scanapi
|
82def9d7c9ef19a2b658d9aa6c973790e2c16ddc
|
[
"MIT"
] | null | null | null |
from enum import Enum
import logging
import os
import re
import sys
import yaml
from scanapi.errors import BadConfigurationError, InvalidPythonCodeError
# Available imports to be used dinamically in the api spec
import datetime
import math
import random
import time
import uuid
logger = logging.getLogger(__name__)
variable_pattern = re.compile("(\\w*)(\\${)(\\w*)(})(\\w*)") # ${<variable_name>}
python_code_pattern = re.compile("(^\\${{)(.*)(}}$)") # ${{<python_code>}}
responses = {}
class EvaluationType(Enum):
ENV_VAR = 1
CUSTOM_VAR = 2
PYTHON_CODE = 3
def evaluate(type, element, node=None):
if isinstance(element, dict):
return evaluate_dict(type, element, node)
if isinstance(element, list):
return evaluate_list(type, element, node)
if not isinstance(element, str):
return element
if type == EvaluationType.ENV_VAR:
try:
return evaluate_env_var(element)
except BadConfigurationError as e:
logger.error(e)
sys.exit()
if type == EvaluationType.CUSTOM_VAR:
return evaluate_custom_var(element, node)
if type == EvaluationType.PYTHON_CODE:
try:
return evaluate_python_code(element)
except InvalidPythonCodeError as e:
logger.error(e)
sys.exit()
return element
def evaluate_dict(type, element, node):
evaluated_dict = {}
for key, value in element.items():
evaluated_dict[key] = evaluate(type, value, node)
return evaluated_dict
def evaluate_list(type, elements, node):
evaluated_list = []
for item in elements:
evaluated_list.append(evaluate(type, item, node))
return evaluated_list
def evaluate_env_var(sequence):
matches = variable_pattern.finditer(sequence)
if not matches:
return sequence
for match in matches:
variable_name = match.group(3)
if variable_name.islower():
continue
try:
variable_value = os.environ[variable_name]
except KeyError as e:
raise BadConfigurationError(e)
sequence = evaluate_var(sequence, match.group(), variable_value)
return sequence
def evaluate_custom_var(sequence, node):
matches = variable_pattern.finditer(sequence)
if not matches or not node:
return sequence
for match in matches:
variable_name = match.group(3)
if variable_name.isupper():
continue
variable_value = evaluate(
EvaluationType.PYTHON_CODE, node.parent.custom_vars[match.group(3)]
)
sequence = evaluate_var(sequence, match.group(), variable_value)
return sequence
def evaluate_var(sequence, variable, variable_value):
variable = re.escape(variable)
return re.sub(variable, variable_value, sequence)
def evaluate_python_code(sequence):
match = python_code_pattern.search(sequence)
if not match:
return sequence
code = match.group(2)
try:
return str(eval(code))
except Exception as e:
raise InvalidPythonCodeError(str(e))
def save_response(request_id, response):
responses[request_id] = response
| 23.043165
| 82
| 0.664689
| 82
| 0.025601
| 0
| 0
| 0
| 0
| 0
| 0
| 146
| 0.045582
|
62b2d24bf74949e44c4ce714693661230de5e646
| 18,720
|
py
|
Python
|
python/londiste/setup.py
|
priitkustala/skytools-dev
|
14bb378f95d2e5b82d01acf068377a660315b716
|
[
"0BSD"
] | 1
|
2016-05-09T13:35:53.000Z
|
2016-05-09T13:35:53.000Z
|
python/londiste/setup.py
|
priitkustala/skytools-dev
|
14bb378f95d2e5b82d01acf068377a660315b716
|
[
"0BSD"
] | null | null | null |
python/londiste/setup.py
|
priitkustala/skytools-dev
|
14bb378f95d2e5b82d01acf068377a660315b716
|
[
"0BSD"
] | null | null | null |
#! /usr/bin/env python
"""Londiste setup and sanity checker.
"""
import sys, os, skytools
from pgq.cascade.admin import CascadeAdmin
__all__ = ['LondisteSetup']
class LondisteSetup(CascadeAdmin):
"""Londiste-specific admin commands."""
initial_db_name = 'node_db'
extra_objs = [ skytools.DBSchema("londiste", sql_file="londiste.sql") ]
provider_location = None
def __init__(self, args):
"""Londiste setup init."""
CascadeAdmin.__init__(self, 'londiste', 'db', args, worker_setup = True)
# compat
self.queue_name = self.cf.get('pgq_queue_name', '')
# real
if not self.queue_name:
self.queue_name = self.cf.get('queue_name')
self.set_name = self.queue_name
def connection_setup(self, dbname, db):
if dbname == 'db':
curs = db.cursor()
curs.execute("set session_replication_role = 'replica'")
db.commit()
def init_optparse(self, parser=None):
"""Add londiste switches to cascadeadmin ones."""
p = CascadeAdmin.init_optparse(self, parser)
p.add_option("--expect-sync", action="store_true", dest="expect_sync",
help = "no copy needed", default=False)
p.add_option("--skip-truncate", action="store_true", dest="skip_truncate",
help = "dont delete old data", default=False)
p.add_option("--force", action="store_true",
help="force", default=False)
p.add_option("--all", action="store_true",
help="include all tables", default=False)
p.add_option("--create", action="store_true",
help="include all tables", default=False)
p.add_option("--create-only",
help="pkey,fkeys,indexes")
return p
def extra_init(self, node_type, node_db, provider_db):
"""Callback from CascadeAdmin init."""
if not provider_db:
return
pcurs = provider_db.cursor()
ncurs = node_db.cursor()
# sync tables
q = "select table_name from londiste.get_table_list(%s)"
pcurs.execute(q, [self.set_name])
for row in pcurs.fetchall():
tbl = row['table_name']
q = "select * from londiste.global_add_table(%s, %s)"
ncurs.execute(q, [self.set_name, tbl])
# sync seqs
q = "select seq_name, last_value from londiste.get_seq_list(%s)"
pcurs.execute(q, [self.set_name])
for row in pcurs.fetchall():
seq = row['seq_name']
val = row['last_value']
q = "select * from londiste.global_update_seq(%s, %s, %s)"
ncurs.execute(q, [self.set_name, seq, val])
# done
node_db.commit()
provider_db.commit()
def cmd_add_table(self, *args):
"""Attach table(s) to local node."""
dst_db = self.get_database('db')
dst_curs = dst_db.cursor()
src_db = self.get_provider_db()
src_curs = src_db.cursor()
src_tbls = self.fetch_set_tables(src_curs)
dst_tbls = self.fetch_set_tables(dst_curs)
src_db.commit()
self.sync_table_list(dst_curs, src_tbls, dst_tbls)
dst_db.commit()
# dont check for exist/not here (root handling)
problems = False
for tbl in args:
tbl = skytools.fq_name(tbl)
if (tbl in src_tbls) and not src_tbls[tbl]:
self.log.error("Table %s does not exist on provider, need to switch to different provider" % tbl)
problems = True
if problems:
self.log.error("Problems, canceling operation")
sys.exit(1)
# pick proper create flags
create = self.options.create_only
if not create and self.options.create:
create = 'full'
fmap = {
"full": skytools.T_ALL,
"pkey": skytools.T_PKEY,
}
create_flags = 0
if create:
for f in create.split(','):
if f not in fmap:
raise Exception("bad --create-only flag: " + f)
create_flags += fmap[f]
# seems ok
for tbl in args:
tbl = skytools.fq_name(tbl)
self.add_table(src_db, dst_db, tbl, create_flags)
def add_table(self, src_db, dst_db, tbl, create_flags):
src_curs = src_db.cursor()
dst_curs = dst_db.cursor()
tbl_exists = skytools.exists_table(dst_curs, tbl)
if create_flags:
if tbl_exists:
self.log.info('Table %s already exist, not touching' % tbl)
else:
if not skytools.exists_table(src_curs, tbl):
# table not present on provider - nowhere to get the DDL from
self.log.warning('Table "%s" missing on provider, skipping' % tbl)
return
s = skytools.TableStruct(src_curs, tbl)
src_db.commit()
s.create(dst_curs, create_flags, log = self.log)
elif not tbl_exists:
self.log.warning('Table "%s" missing on subscriber, use --create if necessary' % tbl)
return
q = "select * from londiste.local_add_table(%s, %s)"
self.exec_cmd(dst_curs, q, [self.set_name, tbl])
dst_db.commit()
def sync_table_list(self, dst_curs, src_tbls, dst_tbls):
for tbl in src_tbls.keys():
q = "select * from londiste.global_add_table(%s, %s)"
if tbl not in dst_tbls:
self.log.info("Table %s info missing from subscriber, adding" % tbl)
self.exec_cmd(dst_curs, q, [self.set_name, tbl])
dst_tbls[tbl] = False
for tbl in dst_tbls.keys():
q = "select * from londiste.global_remove_table(%s, %s)"
if tbl not in src_tbls:
self.log.info("Table %s gone but exists on subscriber, removing")
self.exec_cmd(dst_curs, q, [self.set_name, tbl])
del dst_tbls[tbl]
def fetch_set_tables(self, curs):
q = "select table_name, local from londiste.get_table_list(%s)"
curs.execute(q, [self.set_name])
res = {}
for row in curs.fetchall():
res[row[0]] = row[1]
return res
def cmd_remove_table(self, *args):
"""Detach table(s) from local node."""
q = "select * from londiste.local_remove_table(%s, %s)"
db = self.get_database('db')
self.exec_cmd_many(db, q, [self.set_name], args)
def cmd_add_seq(self, *args):
"""Attach seqs(s) to local node."""
dst_db = self.get_database('db')
dst_curs = dst_db.cursor()
src_db = self.get_provider_db()
src_curs = src_db.cursor()
src_seqs = self.fetch_seqs(src_curs)
dst_seqs = self.fetch_seqs(dst_curs)
src_db.commit()
self.sync_seq_list(dst_curs, src_seqs, dst_seqs)
dst_db.commit()
# pick proper create flags
create = self.options.create_only
if not create and self.options.create:
create = 'full'
fmap = {
"full": skytools.T_SEQUENCE,
}
create_flags = 0
if create:
for f in create.split(','):
if f not in fmap:
raise Exception("bad --create-only flag: " + f)
create_flags += fmap[f]
# seems ok
for seq in args:
seq = skytools.fq_name(seq)
self.add_seq(src_db, dst_db, seq, create_flags)
dst_db.commit()
def add_seq(self, src_db, dst_db, seq, create_flags):
src_curs = src_db.cursor()
dst_curs = dst_db.cursor()
seq_exists = skytools.exists_sequence(dst_curs, seq)
if create_flags:
if seq_exists:
self.log.info('Sequence %s already exist, not creating' % seq)
else:
if not skytools.exists_sequence(src_curs, seq):
# sequence not present on provider - nowhere to get the DDL from
self.log.warning('Sequence "%s" missing on provider, skipping' % seq)
return
s = skytools.SeqStruct(src_curs, seq)
src_db.commit()
s.create(dst_curs, create_flags, log = self.log)
elif not seq_exists:
self.log.warning('Sequence "%s" missing on subscriber, use --create if necessary' % seq)
return
q = "select * from londiste.local_add_seq(%s, %s)"
self.exec_cmd(dst_curs, q, [self.set_name, seq])
def fetch_seqs(self, curs):
q = "select seq_name, last_value, local from londiste.get_seq_list(%s)"
curs.execute(q, [self.set_name])
res = {}
for row in curs.fetchall():
res[row[0]] = row
return res
def sync_seq_list(self, dst_curs, src_seqs, dst_seqs):
for seq in src_seqs.keys():
q = "select * from londiste.global_update_seq(%s, %s, %s)"
if seq not in dst_seqs:
self.log.info("Sequence %s info missing from subscriber, adding" % seq)
self.exec_cmd(dst_curs, q, [self.set_name, seq, src_seqs[seq]['last_value']])
tmp = src_seqs[seq].copy()
tmp['local'] = False
dst_seqs[seq] = tmp
for seq in dst_seqs.keys():
q = "select * from londiste.global_remove_seq(%s, %s)"
if seq not in src_seqs:
self.log.info("Sequence %s gone but exists on subscriber, removing")
self.exec_cmd(dst_curs, q, [self.set_name, seq])
del dst_seqs[seq]
def cmd_remove_seq(self, *args):
"""Detach seqs(s) from local node."""
q = "select * from londiste.local_remove_seq(%s, %s)"
db = self.get_database('db')
self.exec_cmd_many(db, q, [self.set_name], args)
def cmd_resync(self, *args):
"""Reload data from provider node.."""
# fixme
q = "select * from londiste.node_resync_table(%s, %s)"
db = self.get_database('db')
self.exec_cmd_many(db, q, [self.set_name], args)
def cmd_tables(self):
"""Show attached tables."""
q = "select table_name, local, merge_state from londiste.get_table_list(%s)"
db = self.get_database('db')
self.display_table(db, "Tables on node", q, [self.set_name])
def cmd_seqs(self):
"""Show attached seqs."""
q = "select seq_name, local, last_value from londiste.get_seq_list(%s)"
db = self.get_database('db')
self.display_table(db, "Sequences on node", q, [self.set_name])
def cmd_missing(self):
"""Show missing tables on local node."""
# fixme
q = "select * from londiste.node_show_missing(%s)"
db = self.get_database('db')
self.display_table(db, "Missing objects on node", q, [self.set_name])
def cmd_check(self):
"""TODO: check if structs match"""
pass
def cmd_fkeys(self):
"""TODO: show removed fkeys."""
pass
def cmd_triggers(self):
"""TODO: show removed triggers."""
pass
def cmd_execute(self, *files):
db = self.get_database('db')
curs = db.cursor()
for fn in files:
fname = os.path.basename(fn)
sql = open(fn, "r").read()
q = "select * from londiste.execute_start(%s, %s, %s, true)"
self.exec_cmd(db, q, [self.queue_name, fname, sql], commit = False)
for stmt in skytools.parse_statements(sql):
curs.execute(stmt)
q = "select * from londiste.execute_finish(%s, %s)"
self.exec_cmd(db, q, [self.queue_name, fname], commit = False)
db.commit()
def get_provider_db(self):
if not self.provider_location:
db = self.get_database('db')
q = 'select * from pgq_node.get_node_info(%s)'
res = self.exec_cmd(db, q, [self.queue_name], quiet = True)
self.provider_location = res[0]['provider_location']
return self.get_database('provider_db', connstr = self.provider_location)
#
# Old commands
#
#class LondisteSetup_tmp(LondisteSetup):
#
# def find_missing_provider_tables(self, pattern='*'):
# src_db = self.get_database('provider_db')
# src_curs = src_db.cursor()
# q = """select schemaname || '.' || tablename as full_name from pg_tables
# where schemaname not in ('pgq', 'londiste', 'pg_catalog', 'information_schema')
# and schemaname !~ 'pg_.*'
# and (schemaname || '.' || tablename) ~ %s
# except select table_name from londiste.provider_get_table_list(%s)"""
# src_curs.execute(q, [glob2regex(pattern), self.queue_name])
# rows = src_curs.fetchall()
# src_db.commit()
# list = []
# for row in rows:
# list.append(row[0])
# return list
#
# def admin(self):
# cmd = self.args[2]
# if cmd == "tables":
# self.subscriber_show_tables()
# elif cmd == "missing":
# self.subscriber_missing_tables()
# elif cmd == "add":
# self.subscriber_add_tables(self.args[3:])
# elif cmd == "remove":
# self.subscriber_remove_tables(self.args[3:])
# elif cmd == "resync":
# self.subscriber_resync_tables(self.args[3:])
# elif cmd == "register":
# self.subscriber_register()
# elif cmd == "unregister":
# self.subscriber_unregister()
# elif cmd == "install":
# self.subscriber_install()
# elif cmd == "check":
# self.check_tables(self.get_provider_table_list())
# elif cmd in ["fkeys", "triggers"]:
# self.collect_meta(self.get_provider_table_list(), cmd, self.args[3:])
# elif cmd == "seqs":
# self.subscriber_list_seqs()
# elif cmd == "add-seq":
# self.subscriber_add_seq(self.args[3:])
# elif cmd == "remove-seq":
# self.subscriber_remove_seq(self.args[3:])
# elif cmd == "restore-triggers":
# self.restore_triggers(self.args[3], self.args[4:])
# else:
# self.log.error('bad subcommand: ' + cmd)
# sys.exit(1)
#
# def collect_meta(self, table_list, meta, args):
# """Display fkey/trigger info."""
#
# if args == []:
# args = ['pending', 'active']
#
# field_map = {'triggers': ['table_name', 'trigger_name', 'trigger_def'],
# 'fkeys': ['from_table', 'to_table', 'fkey_name', 'fkey_def']}
#
# query_map = {'pending': "select %s from londiste.subscriber_get_table_pending_%s(%%s)",
# 'active' : "select %s from londiste.find_table_%s(%%s)"}
#
# table_list = self.clean_subscriber_tables(table_list)
# if len(table_list) == 0:
# self.log.info("No tables, no fkeys")
# return
#
# dst_db = self.get_database('subscriber_db')
# dst_curs = dst_db.cursor()
#
# for which in args:
# union_list = []
# fields = field_map[meta]
# q = query_map[which] % (",".join(fields), meta)
# for tbl in table_list:
# union_list.append(q % skytools.quote_literal(tbl))
#
# # use union as fkey may appear in duplicate
# sql = " union ".join(union_list) + " order by 1"
# desc = "%s %s" % (which, meta)
# self.display_table(desc, dst_curs, fields, sql)
# dst_db.commit()
#
# def check_tables(self, table_list):
# src_db = self.get_database('provider_db')
# src_curs = src_db.cursor()
# dst_db = self.get_database('subscriber_db')
# dst_curs = dst_db.cursor()
#
# failed = 0
# for tbl in table_list:
# self.log.info('Checking %s' % tbl)
# if not skytools.exists_table(src_curs, tbl):
# self.log.error('Table %s missing from provider side' % tbl)
# failed += 1
# elif not skytools.exists_table(dst_curs, tbl):
# self.log.error('Table %s missing from subscriber side' % tbl)
# failed += 1
# else:
# failed += self.check_table_columns(src_curs, dst_curs, tbl)
#
# src_db.commit()
# dst_db.commit()
#
# return failed
#
# def check_table_columns(self, src_curs, dst_curs, tbl):
# src_colrows = find_column_types(src_curs, tbl)
# dst_colrows = find_column_types(dst_curs, tbl)
#
# src_cols = make_type_string(src_colrows)
# dst_cols = make_type_string(dst_colrows)
# if src_cols.find('k') < 0:
# self.log.error('provider table %s has no primary key (%s)' % (
# tbl, src_cols))
# return 1
# if dst_cols.find('k') < 0:
# self.log.error('subscriber table %s has no primary key (%s)' % (
# tbl, dst_cols))
# return 1
#
# if src_cols != dst_cols:
# self.log.warning('table %s structure is not same (%s/%s)'\
# ', trying to continue' % (tbl, src_cols, dst_cols))
#
# err = 0
# for row in src_colrows:
# found = 0
# for row2 in dst_colrows:
# if row2['name'] == row['name']:
# found = 1
# break
# if not found:
# err = 1
# self.log.error('%s: column %s on provider not on subscriber'
# % (tbl, row['name']))
# elif row['type'] != row2['type']:
# err = 1
# self.log.error('%s: pk different on column %s'
# % (tbl, row['name']))
#
# return err
#
# def find_missing_subscriber_tables(self, pattern='*'):
# src_db = self.get_database('subscriber_db')
# src_curs = src_db.cursor()
# q = """select schemaname || '.' || tablename as full_name from pg_tables
# where schemaname not in ('pgq', 'londiste', 'pg_catalog', 'information_schema')
# and schemaname !~ 'pg_.*'
# and schemaname || '.' || tablename ~ %s
# except select table_name from londiste.provider_get_table_list(%s)"""
# src_curs.execute(q, [glob2regex(pattern), self.queue_name])
# rows = src_curs.fetchall()
# src_db.commit()
# list = []
# for row in rows:
# list.append(row[0])
# return list
#
| 38.439425
| 113
| 0.554594
| 12,056
| 0.644017
| 0
| 0
| 0
| 0
| 0
| 0
| 9,620
| 0.513889
|
62b30ce5a1ecf3197e59646a2a71b9143771e4fd
| 4,929
|
py
|
Python
|
tests/tests.py
|
SherineAwad/ribofilio
|
4dea38692e7715f07df3ee074e2adc5380f4d6e9
|
[
"MIT"
] | null | null | null |
tests/tests.py
|
SherineAwad/ribofilio
|
4dea38692e7715f07df3ee074e2adc5380f4d6e9
|
[
"MIT"
] | null | null | null |
tests/tests.py
|
SherineAwad/ribofilio
|
4dea38692e7715f07df3ee074e2adc5380f4d6e9
|
[
"MIT"
] | null | null | null |
import pytest
import screed
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import cv2
import skimage.measure as measure
path = os.getcwd()
path = os.path.join(path,"src")
sys.path.append(path)
print(path)
import ribofilio as rb
def test_get_genes():
path = os.getcwd ()
path = os.path.join(path, "tests/test-data","transcripts_sample.fa")
max_gene_length, gene_length = rb.get_genes(path)
assert (max_gene_length == 60 )
assert (gene_length == {"YBR024W":16, "YDL245C":60,"YBR021W":8})
def test_get_subset_genes():
path = os.getcwd ()
file1 = os.path.join(path, "tests/test-data","transcripts_sample.fa")
file2 = os.path.join(path, "tests/test-data","subset.txt")
max_gene_length, gene_length = rb.get_subset_genes(file1,file2)
assert (max_gene_length ==16)
assert (gene_length == {"YBR024W":16,"YBR021W":8})
def test_get_reads():
path = os.getcwd()
path = os.path.join(path, "tests/test-data", "sample.bed")
coverage = rb.get_reads(path, {"YBR024W":52,"YBR021W":45})
assert (coverage == {'YBR021W': [39,44], "YBR024W": [49, 52]})
def test_get_gene_coverage_at_bin():
path = os.getcwd()
genes_length = {"YBR024W":52,"YBR021W":45}
max_gene_length = 52
bin_size =50
assert(rb.get_gene_coverage_at_bin(max_gene_length, bin_size, genes_length) == [2,1])
def test_get_gene_coverage_at_pos():
max_gene_length = 6
coverage = {"YBR024W":[1,2,3], "YBR021W":[5,6,7]}
gene_coverage_at_pos = [0] * (max_gene_length +1)
genes_length = {"YBR024W":6,"YBR021W":4, "YKL152C":23}
assert (rb.get_gene_coverage_at_pos(max_gene_length, coverage, genes_length) == [0,2,2,2,2,1,1])
def test_fill_positions ():
max_gene_length = 9
positions = [0] * (max_gene_length + 1 )
coverage = {"YKL152C":[5,7,9], "YBR021W":[3,6,7], "YBR024W":[1,2,3]}
assert(rb.fill_positions(coverage, max_gene_length) == [0,1,1,2,0,1,1,2,0,1] )
def test_binning():
positions =[0,2/3, 1,2/3]
gene_coverage_at_pos = [0,2,3,2]
max_gene_length = 3
genes_bin = rb.binning(2,positions, gene_coverage_at_pos, max_gene_length)
round_genes_bin = np.round(genes_bin, 6)
assert (round_genes_bin == [0.333334, 0.166668]).all()
def test_regression():
output ="test"
num_bins = 18
all_bins = [6612, 6612, 6569, 6481, 6384, 6251, 6130, 5757, 5446, 5226, 5039, 4888, 4716, 4550, 4393, 4233, 4105, 3969]
binsize = 2
plot = 0
gene_coverage_at_bin = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
ylogmin =-2
ylogmax=2
dropoff_rate, dropoff_codon, stand_error, margin_error, rmse, rsquare, tscore, pvalue = rb.regression(output, num_bins, all_bins,
binsize, ylogmin, ylogmax,
gene_coverage_at_bin, plot)
assert(dropoff_rate == -0.0334)
assert(dropoff_codon ==[[-0.0506]])
assert(stand_error == [0.0013])
assert(margin_error == [0.0028])
assert(rmse == 0.0007)
assert(rsquare == 0.9759)
assert(np.round(tscore[0],4) == -25.4522)
assert(pvalue == [0.])
def test_plot_regression():
x_value = np.array([1,2,3,4,5,6,7,8,9,10]).reshape(-1, 1)
y_value = np.array(np.log([1,2,4,6,9,13,15,16,18,20])).reshape(-1, 1)
y_predicted = np.array([1,2,4,6,10,12,14,16,18,20]).reshape(-1, 1)
dropoff_rate= -0.0051
dropoff_codon = -0.0003
rsquare = 0.4907
stand_error = 0.001
output ="test"
rmse =0.1
norm_weight = [1,1,1,1,1,1,1,1,1,1]
rb.plot_regression(x_value, y_value, y_predicted, norm_weight, dropoff_rate, dropoff_codon, rmse, rsquare, stand_error, output, -3, 2)
imageA = cv2.imread("test.Log.WLR.png")
imageB = cv2.imread("tests/test-data/test.png")
grayA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)
grayB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY)
(score, diff) = measure.compare_ssim(grayA, grayB, full=True)
print (score, diff)
assert(score > 0.995)
def test_normalize():
ribosomes = [6,6,6,6,6]
mRNA = [2,2,2,2,2]
gene_bins = rb.normalize( ribosomes, mRNA, 5)
assert(gene_bins ==[3,3,3,3,3])
def test_call_mRNA_1():
rna_gene_bins = []
genes_length = {"YBR024W":52,"YBR021W":45}
max_gene_length = 52
binsize = 5
rna_gene_bins = rb.call_mRNA("NULL", genes_length, max_gene_length, binsize)
assert(rna_gene_bins ==[])
def test_call_mRNA_2():
genes_length = {"YBR024W":52,"YBR021W":45}
max_gene_length = 52
binsize = 70
rna_gene_bins = rb.call_mRNA("tests/test-data/sample.bed", genes_length, max_gene_length, binsize)
assert(np.round(rna_gene_bins, 4) == [0.0429])
def test_call_footprints():
genes_length = {"YBR024W":52,"YBR021W":45}
max_gene_length = 52
binsize = 70
fp_gene_bins = rb.call_footprints("tests/test-data/sample.bed", genes_length, max_gene_length, binsize)
assert(np.round(fp_gene_bins, 4) == [0.0429])
| 34.957447
| 139
| 0.656523
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 486
| 0.0986
|
62b452aca3bac882d562e1e0dd344def55e8b870
| 6,208
|
py
|
Python
|
envoy.code.check/envoy/code/check/abstract/flake8.py
|
Nordix/pytooling
|
b48e70e9098c283d0f17db8016d2f32a2b103a57
|
[
"Apache-2.0"
] | null | null | null |
envoy.code.check/envoy/code/check/abstract/flake8.py
|
Nordix/pytooling
|
b48e70e9098c283d0f17db8016d2f32a2b103a57
|
[
"Apache-2.0"
] | null | null | null |
envoy.code.check/envoy/code/check/abstract/flake8.py
|
Nordix/pytooling
|
b48e70e9098c283d0f17db8016d2f32a2b103a57
|
[
"Apache-2.0"
] | null | null | null |
import io
import logging
import os
import pathlib
from functools import cached_property, lru_cache
from typing import List, Set, Tuple
from flake8.main.application import Application # type:ignore
from flake8 import ( # type:ignore
utils as flake8_utils,
checker as flake8_checker)
import abstracts
from aio.core.functional import async_property
from aio.core.directory.utils import directory_context
from envoy.code.check import abstract, typing
FLAKE8_CONFIG = '.flake8'
# Workaround for https://github.com/PyCQA/flake8/issues/1390
logging.getLogger("flake8.options.manager").setLevel(logging.ERROR)
class Flake8Application(Application):
"""Subclassed flake8.Application to capture output."""
@cached_property
def output_fd(self) -> io.StringIO:
return io.StringIO()
def make_formatter(self) -> None:
# ~Hacky workaround to capture flake8 output
super().make_formatter()
self.formatter.output_fd = self.output_fd
self._formatter_stop = self.formatter.stop
self.formatter.stop = self._stop
def _stop(self) -> None:
self.output_fd.seek(0)
self._results: List[str] = [
x
for x
in self.output_fd.read().strip().split("\n")
if x]
self._formatter_stop()
class Flake8App:
"""Wrapper around `flake8.main.application.Application`
Provides optimized file discovery using app's lookup tools.
"""
def __init__(self, path: str, args: Tuple[str, ...]) -> None:
self.path = path
self.args = args
@cached_property
def app(self) -> Flake8Application:
"""Flake8 Application."""
flake8_app = Flake8Application()
flake8_app.initialize(self.args)
return flake8_app
@property
def manager(self) -> flake8_checker.Manager:
"""Flake8 file checker manager."""
return self.app.file_checker_manager
def include_file(self, path: str) -> bool:
"""Include file according to flake8 config."""
path = os.path.join(self.path, path)
return (
self._filename_matches(path)
and self._include_directory(os.path.dirname(path))
and not self._is_excluded(path))
def include_files(self, files: Set[str]) -> Set[str]:
"""Figure out whether to include a file for checking."""
return set(
path
for path
in files
if self.include_file(os.path.join(self.path, path)))
def run_checks(self, paths: Set[str]) -> List[str]:
"""Run flake8 checks."""
with directory_context(self.path):
self.app.run_checks(files=paths)
self.app.report()
return self.app._results
@cached_property
def _excluded_paths(self) -> Set[str]:
return set()
def _filename_matches(self, path: str) -> bool:
return flake8_utils.fnmatch(
path,
self.app.options.filename)
@lru_cache
def _include_directory(self, path: str) -> bool:
while True:
if path == self.path:
return True
if not self._include_path(path):
return False
path = os.path.dirname(path)
@lru_cache
def _include_path(self, path: str) -> bool:
exclude = (
any(path.startswith(x) for x in self._excluded_paths)
or self._is_excluded(path))
if exclude:
self._excluded_paths.add(path)
return not exclude
def _is_excluded(self, path: str) -> bool:
return self.manager.is_path_excluded(path)
class AFlake8Check(abstract.ACodeCheck, metaclass=abstracts.Abstraction):
"""Flake8 check for a fileset."""
@classmethod
def check_flake8_files(
cls,
path: str,
args: Tuple[str, ...],
files: Set[str]) -> List[str]:
"""Flake8 checker."""
return Flake8App(
path,
args).run_checks(files)
@classmethod
def filter_flake8_files(
cls,
path: str,
args: Tuple[str, ...],
files: Set[str]) -> Set[str]:
"""Flake8 file discovery."""
return Flake8App(
path,
args).include_files(files)
@async_property
async def checker_files(self) -> Set[str]:
return await self.execute(
self.filter_flake8_files,
self.directory.absolute_path,
self.flake8_args,
await self.directory.files)
@property
def flake8_args(self) -> Tuple[str, ...]:
"""Flake configuration args."""
return (
"--config",
str(self.flake8_config_path),
str(self.directory.path))
@property
def flake8_config_path(self) -> pathlib.Path:
"""Path to flake8 configuration."""
return self.directory.path.joinpath(FLAKE8_CONFIG)
@async_property
async def flake8_errors(self) -> List[str]:
"""Flake8 error list for check files."""
# Important dont send an empty set to the flake8 checker,
# as flake8 will check every file in path.
return (
await self.execute(
self.check_flake8_files,
self.directory.absolute_path,
self.flake8_args,
await self.files)
if await self.files
else [])
@async_property(cache=True)
async def problem_files(self) -> typing.ProblemDict:
"""Discovered flake8 errors."""
return self.handle_errors(await self.flake8_errors)
def handle_errors(self, errors: List[str]) -> typing.ProblemDict:
"""Turn flake8 error list -> `ProblemDict`."""
flake8_errors: typing.ProblemDict = {}
for error in errors:
path, message = self._parse_error(error)
flake8_errors[path] = flake8_errors.get(path, [])
flake8_errors[path].append(message)
return flake8_errors
def _parse_error(self, error: str) -> Tuple[str, str]:
path = error.split(":")[0]
return (
path,
f"{path}: {error.split(':', 1)[1]}")
| 29.846154
| 73
| 0.602932
| 5,578
| 0.898518
| 0
| 0
| 2,882
| 0.46424
| 845
| 0.136115
| 946
| 0.152384
|
62b63fa1744965ed736f83868f1e02cf4c32335f
| 16,566
|
py
|
Python
|
szndaogen/data_access/manager_base.py
|
seznam/szndaogen
|
e33436893d9d933bee81c0cfb9a0ca4ce4d261b5
|
[
"MIT"
] | 3
|
2021-07-20T14:10:22.000Z
|
2022-03-21T10:28:15.000Z
|
szndaogen/data_access/manager_base.py
|
seznam/szndaogen
|
e33436893d9d933bee81c0cfb9a0ca4ce4d261b5
|
[
"MIT"
] | null | null | null |
szndaogen/data_access/manager_base.py
|
seznam/szndaogen
|
e33436893d9d933bee81c0cfb9a0ca4ce4d261b5
|
[
"MIT"
] | null | null | null |
import typing
from ..tools.log import Logger
from .db import DBI
from .model_base import ModelBase
from ..config import Config
class ManagerException(BaseException):
pass
class ViewManagerBase:
MODEL_CLASS = ModelBase
def __init__(self, dbi: DBI = None):
"""
Init function of base model manager class
:param dbi: Instance of database connector. If empty it will be created automatically. Instance of DBI is usualy used with combination of transaction wrapper @DBI.transaction("dbi")
"""
self.dbi = DBI() if dbi is None else dbi
self.bulk_insert_buffer_size = 50
self.bulk_insert_sql_statement = ""
self.bulk_insert_values_buffer = []
@classmethod
def create_model_instance(cls, init_data: dict = None) -> ModelBase:
if init_data is None:
init_data = {}
return cls.MODEL_CLASS(init_data)
def select_one(
self,
*args,
condition: str = "1",
condition_params: typing.Tuple = (),
projection: typing.Tuple = (),
order_by: typing.Tuple = (),
) -> ModelBase:
"""
Select one row from DB table or View
:param projection: sql projection - default *
:param args: Primary keys or condition and condition_params if there are no primary keys
:param condition: SQL Condition (Will be used if there are no positional args from primary keys)
:param condition_params: Positional params for SQL condition
(Will be used if there are no positional args from primary keys)
:param order_by: Params for SQL order by statement
"""
base_condition = self.MODEL_CLASS.Meta.SQL_STATEMENT_WHERE_BASE
if args:
condition = self._prepare_primary_sql_condition()
condition_params = args
projection_statement = ", ".join(projection) if projection else "*"
order_by_sql_format = ", ".join(order_by)
limit = 1
if base_condition == "1":
where_statement = f"WHERE ({condition})" if condition else ""
else:
where_statement = f"WHERE {base_condition} AND ({condition})" if condition else f"WHERE {base_condition}"
order_by_statement = f"ORDER BY {order_by_sql_format}" if order_by else ""
limit_statement = f"LIMIT {limit}" if limit else ""
sql = self.MODEL_CLASS.Meta.SQL_STATEMENT.format(
PROJECTION=projection_statement,
WHERE=where_statement,
ORDER_BY=order_by_statement,
LIMIT=limit_statement,
OFFSET="",
)
Logger.log.info("ViewManagerBase.select_one.sql", manager=self.__class__.__name__)
result = self.dbi.fetch_one(sql, condition_params)
Logger.log.info("ViewManagerBase.select_one.result", result=result, manager=self.__class__.__name__)
if Config.MANAGER_AUTO_MAP_MODEL_ATTRIBUTES:
return self.MODEL_CLASS(result).map_model_attributes() if result else None
return self.MODEL_CLASS(result) if result else None
def select_all(
self,
condition: str = "1",
condition_params: typing.Tuple = (),
projection: typing.Tuple = (),
order_by: typing.Tuple = (),
limit: int = 0,
offset: int = 0,
) -> typing.List[ModelBase]:
"""
Select all rows matching the condition
:param offset: SQL offset
:param projection: sql projection - default *
:param condition: SQL condition
:param condition_params: Positional params for SQL condition
:param order_by: Params for SQL order by statement
:param limit: Params for SQL limit statement
"""
base_condition = self.MODEL_CLASS.Meta.SQL_STATEMENT_WHERE_BASE
projection_statement = ", ".join(projection) if projection else "*"
if base_condition == "1":
where_statement = f"WHERE ({condition})" if condition else ""
else:
where_statement = f"WHERE {base_condition} AND ({condition})" if condition else f"WHERE {base_condition}"
order_by_sql_format = ", ".join(order_by)
if len(order_by) > 0:
order_by_statement = f"ORDER BY {order_by_sql_format}"
else:
if self.MODEL_CLASS.Meta.SQL_STATEMENT_ORDER_BY_DEFAULT:
order_by_statement = f"ORDER BY {self.MODEL_CLASS.Meta.SQL_STATEMENT_ORDER_BY_DEFAULT}"
else:
order_by_statement = ""
limit_statement = f"LIMIT {limit}" if limit else ""
offset_statement = f"OFFSET {offset}" if offset else ""
sql = self.MODEL_CLASS.Meta.SQL_STATEMENT.format(
PROJECTION=projection_statement,
WHERE=where_statement,
ORDER_BY=order_by_statement,
LIMIT=limit_statement,
OFFSET=offset_statement,
)
Logger.log.info("ViewManagerBase.select_all.sql", manager=self.__class__.__name__)
results = self.dbi.fetch_all(sql, condition_params)
Logger.log.info("ViewManagerBase.select_all.result", result=results, manager=self.__class__.__name__)
if Config.MANAGER_AUTO_MAP_MODEL_ATTRIBUTES:
Logger.log.debug("ViewManagerBase.select_all.result.list.automapped")
return [self.MODEL_CLASS(result).map_model_attributes() for result in results]
Logger.log.debug("ViewManagerBase.select_all.result.list")
return [self.MODEL_CLASS(result) for result in results]
@staticmethod
def models_into_dicts(result: typing.List[ModelBase]) -> typing.List[typing.Dict]:
"""
Convert result of select_all into list of dicts
:param result: List of models
"""
return [item.to_dict() for item in result]
@classmethod
def _prepare_primary_sql_condition(cls):
args = ["{} = %s".format(primary_key) for primary_key in cls.MODEL_CLASS.Meta.PRIMARY_KEYS]
return " AND ".join(args)
@classmethod
def _prepare_primary_sql_condition_params(cls, model_instance: ModelBase):
return [model_instance.__getattribute__(attribute_name) for attribute_name in cls.MODEL_CLASS.Meta.PRIMARY_KEYS]
class TableManagerBase(ViewManagerBase):
def update_one(self, model_instance: ModelBase, exclude_none_values: bool = False, exclude_columns: list = None) -> int:
"""
Update one database record based on model attributes
:param model_instance: Model instance
:param exclude_none_values: You can exclude columns with None value from update statement
:param exclude_columns: You can exclude columns names from update statement
:return: Number of affected rows
"""
exclude_columns = exclude_columns or []
if not self.MODEL_CLASS.Meta.PRIMARY_KEYS:
raise ManagerException("Can't update record based on model instance. There are no primary keys specified.")
set_prepare = []
set_prepare_params = []
for attribute_name in self.MODEL_CLASS.Meta.ATTRIBUTE_LIST:
value = model_instance.__getattribute__(attribute_name)
if (exclude_none_values and value is None) or attribute_name in exclude_columns:
continue
set_prepare.append("`{}` = %s".format(attribute_name))
set_prepare_params.append(value)
condition_prepare = self._prepare_primary_sql_condition()
condition_prepare_params = self._prepare_primary_sql_condition_params(model_instance)
sql = "UPDATE `{}` SET {} WHERE {} LIMIT 1".format(
self.MODEL_CLASS.Meta.TABLE_NAME, ", ".join(set_prepare), condition_prepare
)
Logger.log.info("TableManagerBase.update_one.sql", manager=self.__class__.__name__)
result = self.dbi.execute(sql, set_prepare_params + condition_prepare_params)
Logger.log.info("TableManagerBase.update_one.result", result=result, manager=self.__class__.__name__)
return result
def insert_one(
self,
model_instance: ModelBase,
exclude_none_values: bool = False,
exclude_columns: list = None,
use_on_duplicate_update_statement: bool = False,
use_insert_ignore_statement: bool = False,
) -> int:
"""
Insert one record into database based on model attributes
:param model_instance: Model instance
:param exclude_none_values: You can exclude columns with None value from insert statement
:param exclude_columns: You can exclude columns names from insert statement
:param use_on_duplicate_update_statement: Use ON DUPLICATE KEY UPDATE statement
:param use_insert_ignore_statement: Use INSERT IGNORE statement
:return: Last inserted id if it is possible
"""
exclude_columns = exclude_columns or []
insert_prepare = []
insert_prepare_values = []
insert_prepare_params = []
update_prepare = []
for attribute_name in self.MODEL_CLASS.Meta.ATTRIBUTE_LIST:
value = model_instance.__getattribute__(attribute_name)
if (exclude_none_values and value is None) or attribute_name in exclude_columns:
continue
insert_prepare.append("`{}`".format(attribute_name))
insert_prepare_values.append("%s")
insert_prepare_params.append(value)
if use_on_duplicate_update_statement:
update_prepare.append("`{0}` = VALUES(`{0}`)".format(attribute_name))
if use_on_duplicate_update_statement:
sql = "INSERT INTO `{}` ({}) VALUES ({}) ON DUPLICATE KEY UPDATE {}".format(
self.MODEL_CLASS.Meta.TABLE_NAME,
", ".join(insert_prepare),
", ".join(insert_prepare_values),
", ".join(update_prepare),
)
elif use_insert_ignore_statement:
sql = "INSERT IGNORE INTO `{}` ({}) VALUES ({})".format(
self.MODEL_CLASS.Meta.TABLE_NAME, ", ".join(insert_prepare), ", ".join(insert_prepare_values)
)
else:
sql = "INSERT INTO `{}` ({}) VALUES ({})".format(
self.MODEL_CLASS.Meta.TABLE_NAME, ", ".join(insert_prepare), ", ".join(insert_prepare_values)
)
Logger.log.info("TableManagerBase.insert_one.sql", manager=self.__class__.__name__)
result = self.dbi.execute(sql, insert_prepare_params)
# set primary key value
if (
result
and len(self.MODEL_CLASS.Meta.PRIMARY_KEYS) == 1
and self.MODEL_CLASS.Meta.ATTRIBUTE_TYPES[self.MODEL_CLASS.Meta.PRIMARY_KEYS[0]] == int
):
model_instance.__setattr__(self.MODEL_CLASS.Meta.PRIMARY_KEYS[0], result)
Logger.log.info("TableManagerBase.insert_one.result", result=result, manager=self.__class__.__name__)
return result
def insert_one_bulk(
self,
model_instance: ModelBase,
exclude_none_values: bool = False,
exclude_columns: list = None,
use_on_duplicate_update_statement: bool = False,
use_insert_ignore_statement: bool = False,
auto_flush: bool = True,
) -> int:
"""
Insert more records in one bulk.
:param model_instance: Model instance
:param exclude_none_values: You can exclude columns with None value from insert statement
:param exclude_columns: You can exclude columns names from insert statement
:param use_on_duplicate_update_statement: Use ON DUPLICATE KEY UPDATE statement
:param use_insert_ignore_statement: Use INSERT IGNORE statement
:param auto_flush: Auto flush bulks from buffer after N records (defined in self.bulk_insert_buffer_size)
:return: Number of items in buffer
"""
exclude_columns = exclude_columns or []
insert_prepare = []
insert_prepare_values = []
insert_prepare_params = []
update_prepare = []
for attribute_name in self.MODEL_CLASS.Meta.ATTRIBUTE_LIST:
value = model_instance.__getattribute__(attribute_name)
if (exclude_none_values and value is None) or attribute_name in exclude_columns:
continue
insert_prepare.append("`{}`".format(attribute_name))
insert_prepare_values.append("%s")
insert_prepare_params.append(value)
if use_on_duplicate_update_statement:
update_prepare.append("`{0}` = VALUES(`{0}`)".format(attribute_name))
if not self.bulk_insert_sql_statement:
if use_on_duplicate_update_statement:
self.bulk_insert_sql_statement = "INSERT INTO `{}` ({}) VALUES ({}) ON DUPLICATE KEY UPDATE {}".format(
self.MODEL_CLASS.Meta.TABLE_NAME,
", ".join(insert_prepare),
", ".join(insert_prepare_values),
", ".join(update_prepare),
)
elif use_insert_ignore_statement:
self.bulk_insert_sql_statement = "INSERT IGNORE INTO `{}` ({}) VALUES ({})".format(
self.MODEL_CLASS.Meta.TABLE_NAME, ", ".join(insert_prepare), ", ".join(insert_prepare_values)
)
else:
self.bulk_insert_sql_statement = "INSERT INTO `{}` ({}) VALUES ({})".format(
self.MODEL_CLASS.Meta.TABLE_NAME, ", ".join(insert_prepare), ", ".join(insert_prepare_values)
)
self.bulk_insert_values_buffer.append(insert_prepare_params)
buffer_len = len(self.bulk_insert_values_buffer)
if auto_flush and buffer_len >= self.bulk_insert_buffer_size:
self.insert_bulk_flush()
return buffer_len
def insert_bulk_flush(self) -> int:
"""
Flush prepared inserts from buffer
:return: Number of inserted rows
"""
result = None
if self.bulk_insert_values_buffer:
result = self.dbi.execute_many(self.bulk_insert_sql_statement, self.bulk_insert_values_buffer)
Logger.log.info(
"TableManagerBase.insert_one_bulk_flush.result",
result=result,
inserted_count=len(self.bulk_insert_values_buffer),
manager=self.__class__.__name__,
)
self.bulk_insert_sql_statement = ""
self.bulk_insert_values_buffer = []
return result
def delete_one(self, model_instance: ModelBase) -> int:
"""
Delete one row matching primary key condition.
:param model_instance: Instance of model
:return: Number of affected rows
"""
condition_prepare = self._prepare_primary_sql_condition()
condition_prepare_params = self._prepare_primary_sql_condition_params(model_instance)
sql_statement = "DELETE FROM `{}` WHERE {} LIMIT 1"
sql = sql_statement.format(self.MODEL_CLASS.Meta.TABLE_NAME, condition_prepare)
Logger.log.info("TableManagerBase.delete_one.sql", manager=self.__class__.__name__)
result = self.dbi.execute(sql, condition_prepare_params)
Logger.log.info(f"TableManagerBase.delete_one.result", result=result, manager=self.__class__.__name__)
return result
def delete_all(
self, condition: str, condition_params: typing.Tuple = (), order_by: typing.Tuple = (), limit: int = 0
) -> int:
"""
Delete all table rows matching condition.
:param condition: SQL condition statement
:param condition_params: SQL condition position params
:param order_by: SQL order statement
:param limit: SQL limit statement
:return: Number of affected rows
"""
where_statement = f"WHERE {condition}"
order_by_sql_format = ", ".join(order_by)
order_by_statement = f"ORDER BY {order_by_sql_format}" if order_by else ""
limit_statement = f"LIMIT {limit}" if limit else ""
sql_statement = "DELETE FROM `{TABLE}` {WHERE} {ORDER_BY} {LIMIT}"
sql = sql_statement.format(
TABLE=self.MODEL_CLASS.Meta.TABLE_NAME,
WHERE=where_statement,
ORDER_BY=order_by_statement,
LIMIT=limit_statement,
)
Logger.log.info("TableManagerBase.delete_all.sql", manager=self.__class__.__name__)
result = self.dbi.execute(sql, condition_params)
Logger.log.info("TableManagerBase.delete_all.result", result=result, manager=self.__class__.__name__)
return result
| 41.72796
| 189
| 0.651093
| 16,428
| 0.99167
| 0
| 0
| 857
| 0.051732
| 0
| 0
| 4,990
| 0.301219
|
62b641168ce9b71ea5015d7584d8b7ff3788ad8f
| 1,271
|
py
|
Python
|
setup.py
|
may-ank/hocr-tools
|
3ad9748e85360a327161ab562445ec5171e3366a
|
[
"Apache-2.0"
] | 200
|
2015-01-09T03:34:39.000Z
|
2020-07-28T17:12:40.000Z
|
setup.py
|
may-ank/hocr-tools
|
3ad9748e85360a327161ab562445ec5171e3366a
|
[
"Apache-2.0"
] | 141
|
2015-01-09T03:49:21.000Z
|
2020-06-12T19:14:39.000Z
|
setup.py
|
may-ank/hocr-tools
|
3ad9748e85360a327161ab562445ec5171e3366a
|
[
"Apache-2.0"
] | 55
|
2015-03-03T18:59:49.000Z
|
2020-07-02T08:18:04.000Z
|
#!/usr/bin/env python
__version__ = '1.3.0'
import glob
from setuptools import setup
setup(
name="hocr-tools",
version=__version__,
description='Advanced tools for hOCR integration',
author='Thomas Breuel',
maintainer='Konstantin Baierer',
maintainer_email='konstantin.baierer@gmail.com',
url='https://github.com/tmbdev/hocr-tools',
download_url='https://github.com/tmbdev/hocr-tools/tarball/v'
+ __version__,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Multimedia :: Graphics :: Graphics Conversion',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Utilities',
],
install_requires=[
'Pillow',
'lxml',
'reportlab',
],
scripts=[c for c in glob.glob("hocr-*")]
)
| 31.775
| 65
| 0.608969
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 783
| 0.61605
|
62b64285802deea7a9e5bd1076a5f0e456274316
| 2,237
|
py
|
Python
|
src/reporter.py
|
serhankk/Device-Reporter
|
bcf98fbbe38d6c116853556f185e3709269f1a81
|
[
"MIT"
] | null | null | null |
src/reporter.py
|
serhankk/Device-Reporter
|
bcf98fbbe38d6c116853556f185e3709269f1a81
|
[
"MIT"
] | null | null | null |
src/reporter.py
|
serhankk/Device-Reporter
|
bcf98fbbe38d6c116853556f185e3709269f1a81
|
[
"MIT"
] | null | null | null |
# Imports
import socket
import subprocess
import os
import requests
# from prettytable import PrettyTable
import getpass
import CONFIG
def send_message(text):
try:
requests.post('https://slack.com/api/chat.postMessage', {
'token': CONFIG.SLACK_TOKEN,
'channel': CONFIG.SLACK_CHANNEL_INFO,
'text': text,
'username': CONFIG.SLACK_BOT_NAME,
})
except ConnectionError:
exit("Connection Error.")
def get_username():
return getpass.getuser()
def get_hostname():
return socket.gethostname()
def get_local_ip():
local_ip_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
local_ip_socket.connect(('10.255.255.255', 1))
local_ip_address = local_ip_socket.getsockname()[0]
local_ip_socket.close()
return local_ip_address
def get_connected_network():
output = str(subprocess.check_output(['iwgetid']))
network= output.split('"')[1]
return network
def get_using_interface():
output = str(subprocess.check_output(['iwgetid']))
network = output.split(' ')[0]
return network
def get_device_uptime():
uptime_data = os.popen('uptime -p').read()[:-1]
uptime_data = [f'{x.capitalize()} ' for x in uptime_data.split(' ')]
uptime_data = ''.join(uptime_data).rstrip()
return uptime_data
def get_ram_usage():
total_m = os.popen('free -h').readlines()[1].split()[1]
used_m= os.popen('free -h').readlines()[1].split()[2]
return f'{used_m} of {total_m}'
username = get_username()
hostname = get_hostname()
local_ip = get_local_ip()
wifi = get_connected_network()
interface = get_using_interface()
device_uptime = get_device_uptime()
ram = get_ram_usage()
ssh_port = '*under_construction*'
INFORMATION = '''USERNAME: "{}"
HOSTNAME: "{}"
LOCAL IP: "{}"
CONNECTED NETWORK: "{}"
USING NETWORK INTERFACE: "{}"
DEVICE UPTIME: "{}"
RAM USAGE: "{}"
SSH PORT: "{}"'''.format(username, hostname, local_ip, wifi, interface, device_uptime, ram, ssh_port)
def make_table():
# table = PrettyTable(['Hostname', 'Local IP', 'Wi-Fi', 'Interface', 'Uptime', 'RAM'])
# data = ([hostname, local_ip, wifi, interface, device_uptime, ram])
# table.add_row(data)
# print(table)
pass
send_message(INFORMATION)
| 26.630952
| 101
| 0.682164
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 621
| 0.277604
|
62b72fe4dc07715ae87e1325abafd6e9ec329431
| 2,985
|
py
|
Python
|
pyflarum/extensions.py
|
CWKevo/pyFlarum
|
2c4e17a16b00367f140c3436f7a9148072ddd2d3
|
[
"MIT"
] | 1
|
2022-02-07T10:40:46.000Z
|
2022-02-07T10:40:46.000Z
|
pyflarum/extensions.py
|
CWKevo/pyFlarum
|
2c4e17a16b00367f140c3436f7a9148072ddd2d3
|
[
"MIT"
] | 1
|
2022-02-07T10:49:09.000Z
|
2022-02-07T12:25:25.000Z
|
pyflarum/extensions.py
|
CWKevo/pyFlarum
|
2c4e17a16b00367f140c3436f7a9148072ddd2d3
|
[
"MIT"
] | null | null | null |
import typing as t
import warnings
from .error_handler import MissingExtensionError, MissingExtensionWarning
class ExtensionMixin:
"""
A base class for mixing in custom classes (extensions) into another classes.
"""
AUTHOR = "unknown"
NAME = "unknown"
ID = f"{AUTHOR}-{NAME}"
SOFT_DEPENDENCIES = []
HARD_DEPENCENDIES = []
@classmethod
def get_dependencies(cls) -> t.Dict[str, t.List[object]]:
"""
This should return the following `dict`:
```python
{
"hard": [<class>, <class>, ...],
"soft": [<class>, <class>, ...]
}
```
A dependency is anything that you can pass into `FlarumUser(extensions=[...])` (e. g. an extension class).
#### Hard-dependencies:
- Will raise an error when they're not found. It is impossible for the extension to function without these.
#### Soft-dependencies:
- Will raise just a warning. It is possible for the extension to function without these, although with limitations
(such that some functions might be unavailable).
"""
return {
"soft": cls.SOFT_DEPENDENCIES,
"hard": cls.HARD_DEPENCENDIES
}
@classmethod
def mixin(cls, class_to_patch: object, class_to_mix_in: object, skip_protected: bool=True):
"""
A function to mix-in/merge properties, methods, functions, etc... of one class into another.
This skips all functions and properties starting with `__` (double underscore), unless `skip_protected` is False.
This sets/overwrites attributes of `class_to_patch` to attributes of `class_to_mix_in` (monkey-patch).
### Example:
```python
extension.mixin(myclass, pyflarum_class)
```
"""
for property, value in vars(class_to_mix_in).items():
if property.startswith('__') and skip_protected:
continue
setattr(class_to_patch, f'{property}', value)
def mixin_extensions(extensions: t.List[t.Type[ExtensionMixin]]) -> None:
for extension in extensions:
dependencies = extension.get_dependencies()
hard = dependencies.get("hard", None)
soft = dependencies.get("soft", None)
if hard and len(hard) > 0:
for hard_dependency in hard:
if hard_dependency not in extensions:
raise MissingExtensionError(f'`{extension}` hardly depends on `{hard_dependency}`. Please, include that extension too in your extension list.')
extension.mixin()
if soft and len(soft) > 0:
for soft_dependency in soft:
if soft_dependency not in extensions:
warnings.warn(f'`{extension}` softly depends on `{soft_dependency}`. Some features might be unavailable.', MissingExtensionWarning)
| 33.166667
| 163
| 0.603015
| 2,010
| 0.673367
| 0
| 0
| 1,746
| 0.584925
| 0
| 0
| 1,603
| 0.537018
|
62b95299da78a40aaf85180de76adaf63b33b8e6
| 3,695
|
py
|
Python
|
ComRISB/pyglib/pyglib/dft/eos.py
|
comscope/comsuite
|
d51c43cad0d15dc3b4d1f45e7df777cdddaa9d6c
|
[
"BSD-3-Clause"
] | 18
|
2019-06-15T18:08:21.000Z
|
2022-01-30T05:01:29.000Z
|
ComRISB/pyglib/pyglib/dft/eos.py
|
comscope/Comsuite
|
b80ca9f34c519757d337487c489fb655f7598cc2
|
[
"BSD-3-Clause"
] | null | null | null |
ComRISB/pyglib/pyglib/dft/eos.py
|
comscope/Comsuite
|
b80ca9f34c519757d337487c489fb655f7598cc2
|
[
"BSD-3-Clause"
] | 11
|
2019-06-05T02:57:55.000Z
|
2021-12-29T02:54:25.000Z
|
import numpy as np
import h5py
import pyglib.basic.units as units
import pyglib.basic.splot as splot
'''
Equation of state.
'''
def Murnaghan(parameters, vol):
'''
Given a vector of parameters and volumes, return a vector of energies.
equation From PRB 28,5480 (1983)
'''
E0 = parameters[0]
B0 = parameters[1]
BP = parameters[2]
V0 = parameters[3]
return E0 + B0 * vol / BP * (((V0 / vol)**BP) / \
(BP - 1) + 1) - V0 * B0 / (BP - 1.0)
def Murnaghan_pv(parameters, vol):
'''
function P(V).
'''
B0 = parameters[1]
BP = parameters[2]
V0 = parameters[3]
return B0 / BP * ((V0 / vol)**BP - 1.0)
def eos_fit_fun(pars, y, x):
'''
The objective function that will be minimized.
'''
return y - Murnaghan(pars, x)
def get_ev_fit(v, e):
'''
Fitting the Birch-Murnaghan EOS to data. v in \A^3, e in eV.
Based on http://gilgamesh.cheme.cmu.edu/doc/software/jacapo/
appendices/appendix-eos.html
'''
from pylab import polyfit
from scipy.optimize import leastsq
# fit a parabola to the data
# y = ax^2 + bx + c
a, b, c = polyfit(v, e, 2)
'''The parabola does not fit the data very well, but we can use it to get
some analytical guesses for other parameters.
V0 = minimum energy volume, or where dE/dV=0
E = aV^2 + bV + c
dE/dV = 2aV + b = 0
V0 = -b/2a
E0 is the minimum energy, which is:
E0 = aV0^2 + bV0 + c
B is equal to V0*d^2E/dV^2, which is just 2a*V0
and from experience we know Bprime_0 is usually a small number like 4
'''
# now here are our initial guesses.
v0 = -b / (2 * a)
e0 = a * v0**2 + b * v0 + c
b0 = 2 * a * v0
bP = 4
# initial guesses in the same order used in the Murnaghan function
x0 = [e0, b0, bP, v0]
murnpars, ier = leastsq(eos_fit_fun, x0, args=(e, v))
return murnpars
def h5get_mfit_ev(nmesh_fac=10, fsave='results.h5', path='/lapw'):
'''Calculate and save Murnaghan fiting results in fsave.
Interpolated e-v and p-v data on volume mesh with a factor a
nmesh_fac of the original one are also stored.
'''
# Get e,v data.
with h5py.File(fsave, 'r') as f:
e_list = f[path+'/etot_list'][...]
v_list = f['/vol_list'][...]
# fitting
murnpars = get_ev_fit(v_list, e_list)
vh = np.linspace(v_list[0], v_list[-1], nmesh_fac * len(v_list) - 1)
eh = Murnaghan(murnpars, vh)
ph = Murnaghan_pv(murnpars, vh)*units.eVA_GPa
with h5py.File(fsave, 'a') as f:
if path+'/eosfit' in f:
del f[path+'/eosfit']
f[path+'/eosfit/e0'] = murnpars[0]
f[path+'/eosfit/b0'] = murnpars[1]
f[path+'/eosfit/bp'] = murnpars[2]
f[path+'/eosfit/v0'] = murnpars[3]
f[path+'/eosfit/v_list'] = vh
f[path+'/eosfit/e_list'] = eh
f[path+'/eosfit/p_list'] = ph
splot.xy2_plot([v_list, vh], [e_list, eh], ['o', '-'], ['raw', 'fitting'],
xlabel='V ($\AA^3$/primitive cell)',
ylabel='E (eV/primitive cell)', fsave=path+'_evfit.pdf')
splot.xy_plot(vh, ph, xlabel='V ($\AA^3$/primitive cell)',
ylabel='P (GPa)', fsave=path+'_pvfit.pdf')
def eos_spline(v, e, tol):
'''
Get volume, energy, pressure, and bulk modulus using spline, given
v in \A^3 and e in eV.
'''
from scipy.interpolate import UnivariateSpline
s = UnivariateSpline(v, e, k=3, s=tol)
vh = np.linspace(v[0], v[-1], 10 * len(v) - 1)
eh = [s.derivatives(i)[0] for i in vh]
ph = [-s.derivatives(i)[1] * units.eVA_GPa for i in vh]
bh = [s.derivatives(i)[2] * vh[i] * units.eVA_GPa for i in vh]
return vh, eh, ph, bh
| 30.791667
| 78
| 0.586198
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,602
| 0.433559
|
62b99b8da2aecb88766819c7135ff9c55eef6434
| 1,808
|
py
|
Python
|
src/users/actions.py
|
josue0ghost/Python-and-MySQL-console-application
|
c82641c5ccaae3eb526decd2c96baa4457613a2a
|
[
"MIT"
] | null | null | null |
src/users/actions.py
|
josue0ghost/Python-and-MySQL-console-application
|
c82641c5ccaae3eb526decd2c96baa4457613a2a
|
[
"MIT"
] | null | null | null |
src/users/actions.py
|
josue0ghost/Python-and-MySQL-console-application
|
c82641c5ccaae3eb526decd2c96baa4457613a2a
|
[
"MIT"
] | null | null | null |
import users.user as user
import grades.actions as grade
class Actions:
def signup(self):
print("Selected item: signup")
name = input("Your name: ")
lastname = input("Your last name: ")
email = input("Your email: ")
password = input("Choose a password: ")
newUser = user.User(name, lastname, email, password)
reg = newUser.register()
if reg[0] >= 1:
print(f"{reg[1].name}, you've been registered with email {reg[1].email}")
else:
print("Registration failed")
def signin(self):
try:
email = input("Email: ")
password = input("Password: ")
existingUser = user.User('', '', email, password)
login = existingUser.identify()
# id | name | lastname | email | password | date
if email == login[3]:
print(f"Welcome, {login[1]}")
self.mainMenu(login)
except Exception as e:
print(type(e))
print(type(e).__name__)
print("Login failed")
def mainMenu(self, user):
print("""
Available options:
- Create grade (create)
- Show grades (show)
- Delete grade (delete)
- Log out (exit)
""")
action = input("What do you want to do?: ")
gradeActions = grade.Actions()
if action == "create":
gradeActions.create(user)
self.mainMenu(user)
elif action == "show":
gradeActions.show(user)
self.mainMenu(user)
elif action == "delete":
gradeActions.delete(user)
self.mainMenu(user)
elif action == "exit":
exit()
| 28.25
| 85
| 0.499447
| 1,750
| 0.96792
| 0
| 0
| 0
| 0
| 0
| 0
| 526
| 0.290929
|
62b9b66788e4870e77759cfd4f12b782254dda87
| 102
|
py
|
Python
|
python/src/pdef/version.py
|
pdef/pdef-python
|
09c6e6424ad141b40310eeea53c1f8b6e79be560
|
[
"Apache-2.0"
] | 2
|
2020-03-15T03:22:59.000Z
|
2020-03-15T04:37:23.000Z
|
python/src/pdef/version.py
|
pdef/pdef-python
|
09c6e6424ad141b40310eeea53c1f8b6e79be560
|
[
"Apache-2.0"
] | null | null | null |
python/src/pdef/version.py
|
pdef/pdef-python
|
09c6e6424ad141b40310eeea53c1f8b6e79be560
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
'''Pdef version in a separate module to simplify setup.py.'''
__version__ = '1.2.0'
| 25.5
| 61
| 0.696078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 85
| 0.833333
|
62bac4b95d046b26eb393d4a8ce42aab15524930
| 438
|
py
|
Python
|
hardhat/recipes/python/curtsies.py
|
stangelandcl/hardhat
|
1ad0c5dec16728c0243023acb9594f435ef18f9c
|
[
"MIT"
] | null | null | null |
hardhat/recipes/python/curtsies.py
|
stangelandcl/hardhat
|
1ad0c5dec16728c0243023acb9594f435ef18f9c
|
[
"MIT"
] | null | null | null |
hardhat/recipes/python/curtsies.py
|
stangelandcl/hardhat
|
1ad0c5dec16728c0243023acb9594f435ef18f9c
|
[
"MIT"
] | null | null | null |
from .base import PipBaseRecipe
class CurtsiesRecipe(PipBaseRecipe):
def __init__(self, *args, **kwargs):
super(CurtsiesRecipe, self).__init__(*args, **kwargs)
self.sha256 = '431631b9c1417b2ae8156d0bb6d7c3ce' \
'0c97941413717ed6713a9a9c60e9576e'
self.depends = ['ncurses']
self.pydepends = ['blessings', 'wcwidth']
self.name = 'curtsies'
self.version = '0.2.6'
| 31.285714
| 61
| 0.630137
| 403
| 0.920091
| 0
| 0
| 0
| 0
| 0
| 0
| 114
| 0.260274
|
62bafbcb01ba35806246e96f56398067276ef692
| 688
|
py
|
Python
|
topics/migrations/0001_initial.py
|
codingforentrepreneurs/Autogenerate-Django-Models-
|
95f3ffc2ad6714a02ea16b124ae075dd7ff218c2
|
[
"MIT"
] | 28
|
2020-11-08T21:04:00.000Z
|
2021-09-29T06:56:11.000Z
|
topics/migrations/0001_initial.py
|
codingforentrepreneurs/Autogenerate-Django-Models-
|
95f3ffc2ad6714a02ea16b124ae075dd7ff218c2
|
[
"MIT"
] | null | null | null |
topics/migrations/0001_initial.py
|
codingforentrepreneurs/Autogenerate-Django-Models-
|
95f3ffc2ad6714a02ea16b124ae075dd7ff218c2
|
[
"MIT"
] | 9
|
2020-11-11T13:47:32.000Z
|
2021-08-24T11:31:53.000Z
|
# Generated by Django 3.1.3 on 2020-11-08 19:52
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Topics',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag', models.CharField(blank=True, max_length=120, null=True)),
('count', models.BigIntegerField(blank=True, null=True)),
('percent', models.DecimalField(blank=True, decimal_places=5, max_digits=10, null=True)),
],
),
]
| 28.666667
| 114
| 0.59157
| 595
| 0.864826
| 0
| 0
| 0
| 0
| 0
| 0
| 84
| 0.122093
|
62bb8acaace74c492d28ddb4b4b9013124472c19
| 3,821
|
py
|
Python
|
utils.py
|
ChaosForge/shoot_tracer_test
|
e731ad2093b7d413430a03b37186e0787ccdda45
|
[
"MIT"
] | null | null | null |
utils.py
|
ChaosForge/shoot_tracer_test
|
e731ad2093b7d413430a03b37186e0787ccdda45
|
[
"MIT"
] | null | null | null |
utils.py
|
ChaosForge/shoot_tracer_test
|
e731ad2093b7d413430a03b37186e0787ccdda45
|
[
"MIT"
] | null | null | null |
import png
import numpy
import pprint
import math
import re
def gen_background(width, height, mag, b_col):
bg = numpy.zeros((width * mag, height * mag, 4), dtype=numpy.uint8)
for y in range(0, height * mag, mag):
for x in range(0, width * mag):
bg[y][x] = b_col
for x in range(0, width * mag, mag):
for y in range(0, height * mag):
bg[y][x] = b_col
for y in range(-1, height * mag, mag):
if y < 0: continue
for x in range(0, width * mag):
bg[y][x] = b_col
for x in range(-1, width * mag, mag):
if x < 0: continue
for y in range(0, height * mag):
bg[y][x] = b_col
return bg
class picture(object):
def __init__(self, width, height):
self.__array = numpy.full((height,width*4), 220, dtype=numpy.uint8)
self.__view = self.__array.view().reshape(-1,4)
self.__width = width
self.__height = height
self.__mag = 32
bg = gen_background(width, height, self.__mag, numpy.array((192, 192, 192, 64), dtype=numpy.uint8))
self.__dst_rgb = bg[..., :3].astype(numpy.float32) / 255.0
def put_pixel(self, x, y, color):
row = y
col = x
c = numpy.array(color)
sa_01 = c[3] / 255.0
o_m_a = 1.0 - sa_01
sc = c * sa_01
idx = self.__width * y + x
self.__view[idx] = sc + o_m_a * self.__view[idx]
def save(self, filename):
mag = self.__mag
v = self.__array.view().reshape(self.__height, self.__width, 4)
a = v.repeat(mag, axis=0).repeat(mag, axis=1)
src_rgb = a[..., :3].astype(numpy.float32) / 255.0
src_a = a[..., 3].astype(numpy.float32) / 255.0
out_a = src_a.view()
out_rgb = (src_rgb * src_a[..., None] + self.__dst_rgb * (1.0 - src_a[..., None]))
out = numpy.zeros_like(a)
out[..., :3] = out_rgb * 255
out[..., 3] = 255
sv = out.view().reshape(self.__height * mag, self.__width * mag * 4)
png.from_array(sv, mode='RGBA').save(filename)
TRUE_RE = re.compile(".*True.*")
def load_map(filename, canvas_size):
c_width, c_height = canvas_size
player_pos = (0,0)
enemy_pos = (0,0)
expected_result = False
lines = None
with open(filename, "r") as f:
lines = f.readlines()
expected_result = TRUE_RE.match(lines[0]) is not None
lines = lines[1:]
height = len(lines)
width = max([len(w.rstrip("\n")) for w in lines])
if height > c_height:
print("Map {0} height dimension doesn't fit the canvas!".format(filename))
exit(1)
if width > c_width:
print("Map {0} width dimenstion doesn't fit the canvas!".format(filename))
exit(1)
# let's calculate canvas padding
x_pad = (c_width - width) // 2
y_pad = (c_height - height) // 2
m = numpy.zeros((c_height,c_width),numpy.uint8)
for y,l in enumerate(lines):
for x,c in enumerate(l):
if c == "\n":
continue
if c == "#":
m[y+y_pad][x+x_pad] = 1
if c == "@":
m[y+y_pad][x+x_pad] = 2
player_pos = (x+x_pad,y+y_pad)
if c== "h":
m[y+y_pad][x+x_pad] = 3
enemy_pos = (x+x_pad,y+y_pad)
return (m,player_pos,enemy_pos,expected_result,width,height)
def draw_map(m,p):
for y,r in enumerate(m):
for x,c in enumerate(r):
if c == 1:
p.put_pixel(x,y,(64, 64, 64, 220))
if c == 2:
p.put_pixel(x,y,(0, 255, 0, 220))
if c == 3:
p.put_pixel(x,y,(255, 0, 0, 220))
def draw_route(route,p,c):
for e in route:
x,y = e
p.put_pixel(x,y,c)
| 28.94697
| 107
| 0.526302
| 1,396
| 0.365349
| 0
| 0
| 0
| 0
| 0
| 0
| 168
| 0.043968
|
62bcad1c3d9d1d715a3613bffe731d335e4c1324
| 2,019
|
py
|
Python
|
draft/pendulum/cosine_prod/cosine_prod.py
|
krystophny/profit
|
c6316c9df7cfaa7b30332fdbbf85ad27175eaf92
|
[
"MIT"
] | 14
|
2019-12-03T14:11:28.000Z
|
2022-03-15T13:44:06.000Z
|
draft/pendulum/cosine_prod/cosine_prod.py
|
krystophny/profit
|
c6316c9df7cfaa7b30332fdbbf85ad27175eaf92
|
[
"MIT"
] | 118
|
2019-11-16T19:51:26.000Z
|
2022-03-26T13:52:00.000Z
|
draft/pendulum/cosine_prod/cosine_prod.py
|
krystophny/profit
|
c6316c9df7cfaa7b30332fdbbf85ad27175eaf92
|
[
"MIT"
] | 9
|
2020-06-08T07:22:56.000Z
|
2021-03-21T14:12:21.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 27 23:58:37 2020
@author: manal
"""
import numpy as np
import GPy
from GPy.kern.src.stationary import Stationary
class Cosine_prod(Stationary):
"""
Cosine kernel:
Product of 1D Cosine kernels
.. math::
&k(x,x')_i = \sigma^2 \prod_{j=1}^{dimension} \cos(x_{i,j}-x_{i,j}')
&x,x' \in \mathcal{M}_{n,dimension}
&k \in \mathcal{M}_{n,n}
"""
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='Cosine_prod'):
super(Cosine_prod, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name)
def K_of_r(self, dist):
n = dist.shape[2]
p = 1
# l = self.lengthscale
for k in range(n):
p*= np.cos(dist[:,:,k])#/l)
return self.variance * p
def K(self, X, X2):
dist = X[:,None,:]-X2[None,:,:]
return self.K_of_r(dist)
def dK_dr(self,dist,dimX):
n = dist.shape[2]
m = dist.shape[0]
# l = self.lengthscale
dK = np.zeros((m,m,n))
for i in range(n):
dK[:,:,i]= np.cos(dist[:,:,i])#/l)
dK[:,:,dimX] = -np.sin(dist[:,:,dimX])#/l)
return self.variance * np.prod(dK,2)#/l
def dK_dX(self, X, X2, dimX):
dist = X[:,None,:]-X2[None,:,:]
dK_dr = self.dK_dr(dist,dimX)
return dK_dr
def dK_dX2(self,X,X2,dimX2):
return -self.dK_dX(X,X2, dimX2)
def dK2_dXdX2(self, X, X2, dimX, dimX2):
dist = X[:,None,:]-X2[None,:,:]
K = self.K_of_r(dist)
n = dist.shape[2]
m = dist.shape[0]
# l = self.lengthscale
dK = np.zeros((m,m,n))
for i in range(n):
dK[:,:,i]= np.cos(dist[:,:,i])#/l)
dK[:,:,dimX] = np.sin(dist[:,:,dimX])#/l)
dK[:,:,dimX2] = np.sin(dist[:,:,dimX2])#/l)
return ((dimX==dimX2)*K - (dimX!=dimX2)*np.prod(dK,2))#/(l**2)
| 27.283784
| 114
| 0.510649
| 1,826
| 0.904408
| 0
| 0
| 0
| 0
| 0
| 0
| 476
| 0.23576
|
62bd6807be95587bd7a23aaac66d6f7511aacb65
| 156
|
py
|
Python
|
tensorflowonspark/__init__.py
|
DerekRen/TensorFlowOnSpark
|
52dda7b006f2dd0d98f0cc5d362de555263623fd
|
[
"Apache-2.0"
] | 1
|
2020-11-06T08:30:30.000Z
|
2020-11-06T08:30:30.000Z
|
tensorflowonspark/__init__.py
|
DerekRen/TensorFlowOnSpark
|
52dda7b006f2dd0d98f0cc5d362de555263623fd
|
[
"Apache-2.0"
] | null | null | null |
tensorflowonspark/__init__.py
|
DerekRen/TensorFlowOnSpark
|
52dda7b006f2dd0d98f0cc5d362de555263623fd
|
[
"Apache-2.0"
] | null | null | null |
import logging
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s (%(threadName)s-%(process)d) %(message)s")
__version__ = "2.2.0"
| 26
| 116
| 0.717949
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 75
| 0.480769
|
62be0b337ff4bd9e1d305e934c2a552b0ef05ec1
| 791
|
py
|
Python
|
783-minimum-distance-between-bst-nodes/783-minimum-distance-between-bst-nodes.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | 2
|
2021-12-05T14:29:06.000Z
|
2022-01-01T05:46:13.000Z
|
783-minimum-distance-between-bst-nodes/783-minimum-distance-between-bst-nodes.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
783-minimum-distance-between-bst-nodes/783-minimum-distance-between-bst-nodes.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def minDiffInBST(self, root: Optional[TreeNode]) -> int:
output=[]
stack=[root]
while(stack):
cur = stack.pop(0)
output.append(cur.val)
if cur.left:
stack.append(cur.left)
if cur.right:
stack.append(cur.right)
sorted_output=sorted(output)
diff = sorted_output[1]-sorted_output[0]
for i in range(2,len(sorted_output)):
if sorted_output[i]-sorted_output[i-1]<diff:
diff=sorted_output[i]-sorted_output[i-1]
return diff
| 34.391304
| 60
| 0.558786
| 599
| 0.757269
| 0
| 0
| 0
| 0
| 0
| 0
| 186
| 0.235145
|
62bec360e9af625facdc5e2db7ded8db58128d8c
| 4,904
|
py
|
Python
|
Contents/Code/__init__.py
|
RussianPlex/plex-tvkultura
|
4522a9841a9c501b3f37bd1dfcb1a63f2cfb20bc
|
[
"MIT"
] | null | null | null |
Contents/Code/__init__.py
|
RussianPlex/plex-tvkultura
|
4522a9841a9c501b3f37bd1dfcb1a63f2cfb20bc
|
[
"MIT"
] | null | null | null |
Contents/Code/__init__.py
|
RussianPlex/plex-tvkultura
|
4522a9841a9c501b3f37bd1dfcb1a63f2cfb20bc
|
[
"MIT"
] | null | null | null |
PREFIX = "/video/tvkultura"
NAME = "TVKultura.Ru"
ICON = "tvkultura.png"
ART = "tvkultura.jpg"
BASE_URL = "https://tvkultura.ru/"
BRAND_URL = BASE_URL+"brand/"
# Channel initialization
def Start():
ObjectContainer.title1 = NAME
HTTP.Headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0'
# Main menu
@handler(PREFIX, NAME, thumb=ICON, art=ART)
def MainMenu():
brands = SharedCodeService.vgtrk.brand_menu(BRAND_URL)
oc = ObjectContainer(title1=NAME)
for brand in brands.list:
oc.add(DirectoryObject(
key=Callback(BrandMenu, url=brand.href),
title=brand.title,
summary=brand.about + ("\n\n[" + brand.schedule + "]" if brand.schedule else ''),
thumb=Resource.ContentsOfURLWithFallback(url=brand.big_thumb, fallback=brand.small_thumb),
))
return oc
@route(PREFIX+'/brand')
def BrandMenu(url):
brand = SharedCodeService.vgtrk.brand_detail(url)
if brand.video_href:
return VideoViewTypePictureMenu(brand.video_href)
@route(PREFIX+'/video/viewtype-picture')
def VideoViewTypePictureMenu(url, page=1, referer=None, page_title=None, next_title=None):
videos = SharedCodeService.vgtrk.video_menu(url, page=page, referer=referer, page_title=page_title, next_title=next_title)
video_items = videos.view_type('picture')
oc = ObjectContainer(title1=videos.title)
for video in video_items.list:
oc.add(MetadataRecordForItem(video))
next_page = video_items.next_page
if next_page is not None:
oc.add(NextPageObject(
key=Callback(
VideoViewTypePictureMenu,
url=next_page.href,
page=int(page) + 1,
referer=url if referer is None else referer,
page_title=videos.title,
next_title=next_page.title
),
title=next_page.title,
))
return oc
@route(PREFIX+'/video/viewtype-picture/children')
def VideoViewTypePictureChildren(url, referer=None, page_title=None):
video_items = SharedCodeService.vgtrk.video_children(url, referer=referer, page_title=page_title)
oc = ObjectContainer(title1=page_title)
for video in video_items.list:
oc.add(EpisodeObjectForItem(video))
return oc
def MetadataRecordForItem(video):
if video.has_children:
return DirectoryObject(
key=Callback(VideoViewTypePictureChildren, url=video.ajaxurl, referer=video.href, page_title=video.title),
title=video.title,
thumb=video.thumb,
)
return EpisodeObjectForItem(video)
def EpisodeObjectForItem(video):
callback = Callback(MetadataObjectForURL, href=video.href, thumb=video.thumb, title=video.title)
return EpisodeObject(
key=callback,
rating_key=video.href,
title=video.title,
thumb=video.thumb,
items=MediaObjectsForURL(callback),
)
def MetadataObjectForURL(href, thumb, title, **kwargs):
# This is a sort-of replacement for the similar method from the URL Services, just different parameters list.
page = SharedCodeService.vgtrk.video_page(href)
video_clip_object = VideoClipObject(
key=Callback(MetadataObjectForURL, href=href, thumb=thumb, title=title, **kwargs),
rating_key=href,
title=title,
thumb=thumb,
summary=page.full_text,
items=MediaObjectsForURL(
Callback(PlayVideo, href=href)
),
**kwargs
)
return ObjectContainer(
no_cache=True,
objects=[video_clip_object]
)
def MediaObjectsForURL(callback):
# This is a sort-of replacement for the similar method from the URL Services, just different parameters list.
return [
MediaObject(
container=Container.MP4,
video_codec=VideoCodec.H264,
audio_codec=AudioCodec.AAC,
parts=[
PartObject(key=callback)
]
)
]
@indirect
def PlayVideo(href):
page = SharedCodeService.vgtrk.video_page(href)
json = JSON.ObjectFromURL(page.datavideo_href, headers={'Referer': page.video_iframe_href})
medialist = json['data']['playlist']['medialist']
if len(medialist) > 1:
raise RuntimeWarning('More than one media found, each should have been set as a PartObject!')
quality = str(json['data']['playlist']['priority_quality'])
transport = 'http'
if 'sources' not in medialist[0] and medialist[0]['errors']:
raise Ex.PlexNonCriticalError(2005, medialist[0]['errors'])
video_url = medialist[0]['sources'][transport][quality]
Log('Redirecting to video URL: %s' % video_url)
return IndirectResponse(
VideoClipObject,
key=video_url,
http_headers={'Referer': page.video_iframe_href},
metadata_kwargs={'summary': page.full_text}
)
| 34.055556
| 126
| 0.668638
| 0
| 0
| 0
| 0
| 2,837
| 0.578507
| 0
| 0
| 750
| 0.152936
|
62bf318fcce84f085eb558f2ffb4dc78820b46cc
| 3,399
|
py
|
Python
|
pexp/management/commands/p2cmd.py
|
bconstantin/django_polymorphic
|
2c47db8fcc284a92d2c9769ba503603fbea92660
|
[
"BSD-3-Clause"
] | 27
|
2015-06-24T20:29:20.000Z
|
2021-04-18T15:38:15.000Z
|
pexp/management/commands/p2cmd.py
|
bconstantin/django_polymorphic
|
2c47db8fcc284a92d2c9769ba503603fbea92660
|
[
"BSD-3-Clause"
] | 1
|
2015-10-04T14:34:26.000Z
|
2015-10-04T14:34:26.000Z
|
pexp/management/commands/p2cmd.py
|
bconstantin/django_polymorphic
|
2c47db8fcc284a92d2c9769ba503603fbea92660
|
[
"BSD-3-Clause"
] | 3
|
2015-11-10T21:36:10.000Z
|
2020-06-22T01:51:39.000Z
|
# -*- coding: utf-8 -*-
"""
This module is a scratchpad for general development, testing & debugging
Well, even more so than pcmd.py. You best ignore p2cmd.py.
"""
import uuid
from django.core.management.base import NoArgsCommand
from django.db.models import connection
from pprint import pprint
import settings
import time,sys
from pexp.models import *
def reset_queries():
connection.queries=[]
def show_queries():
print; print 'QUERIES:',len(connection.queries); pprint(connection.queries); print; connection.queries=[]
def print_timing(func, message='', iterations=1):
def wrapper(*arg):
results=[]
reset_queries()
for i in xrange(iterations):
t1 = time.time()
x = func(*arg)
t2 = time.time()
results.append((t2-t1)*1000.0)
res_sum=0
for r in results: res_sum +=r
median = res_sum / len(results)
print '%s%-19s: %.4f ms, %i queries (%i times)' % (
message,func.func_name,
res_sum,
len(connection.queries),
iterations
)
sys.stdout.flush()
return wrapper
class Command(NoArgsCommand):
help = ""
def handle_noargs(self, **options):
print 'polycmd - sqlite test db is stored in:',settings.SQLITE_DB_PATH
print
if False:
ModelA.objects.all().delete()
a=ModelA.objects.create(field1='A1')
b=ModelB.objects.create(field1='B1', field2='B2')
c=ModelC.objects.create(field1='C1', field2='C2', field3='C3')
reset_queries()
print ModelC.base_objects.all();
show_queries()
if False:
ModelA.objects.all().delete()
for i in xrange(1000):
a=ModelA.objects.create(field1=str(i%100))
b=ModelB.objects.create(field1=str(i%100), field2=str(i%200))
c=ModelC.objects.create(field1=str(i%100), field2=str(i%200), field3=str(i%300))
if i%100==0: print i
f=print_timing(poly_sql_query,iterations=1000)
f()
f=print_timing(poly_sql_query2,iterations=1000)
f()
return
nModelA.objects.all().delete()
a=nModelA.objects.create(field1='A1')
b=nModelB.objects.create(field1='B1', field2='B2')
c=nModelC.objects.create(field1='C1', field2='C2', field3='C3')
qs=ModelA.objects.raw("SELECT * from pexp_modela")
for o in list(qs): print o
from django.db import connection, transaction
from random import Random
rnd=Random()
def poly_sql_query():
cursor = connection.cursor()
cursor.execute("""
SELECT id, pexp_modela.field1, pexp_modelb.field2, pexp_modelc.field3
FROM pexp_modela
LEFT OUTER JOIN pexp_modelb
ON pexp_modela.id = pexp_modelb.modela_ptr_id
LEFT OUTER JOIN pexp_modelc
ON pexp_modelb.modela_ptr_id = pexp_modelc.modelb_ptr_id
WHERE pexp_modela.field1=%i
ORDER BY pexp_modela.id
""" % rnd.randint(0,100) )
#row=cursor.fetchone()
return
def poly_sql_query2():
cursor = connection.cursor()
cursor.execute("""
SELECT id, pexp_modela.field1
FROM pexp_modela
WHERE pexp_modela.field1=%i
ORDER BY pexp_modela.id
""" % rnd.randint(0,100) )
#row=cursor.fetchone()
return
| 30.621622
| 109
| 0.611356
| 1,363
| 0.401
| 0
| 0
| 0
| 0
| 0
| 0
| 899
| 0.26449
|
62c2495191e9820c3997816cec2ee39d380c6cb2
| 13,997
|
py
|
Python
|
njunmt/utils/misc.py
|
whr94621/NJUNMT-tf
|
29e0b0c577ea7c81acdc80e7a94a1c4dfb85c118
|
[
"Apache-2.0"
] | 1
|
2018-10-27T12:04:03.000Z
|
2018-10-27T12:04:03.000Z
|
njunmt/utils/misc.py
|
whr94621/NJUNMT-tf
|
29e0b0c577ea7c81acdc80e7a94a1c4dfb85c118
|
[
"Apache-2.0"
] | null | null | null |
njunmt/utils/misc.py
|
whr94621/NJUNMT-tf
|
29e0b0c577ea7c81acdc80e7a94a1c4dfb85c118
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Natural Language Processing Group, Nanjing University, zhaocq.nlp@gmail.com.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Define utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import codecs
import os
import socket
import numpy
import tensorflow as tf
from tensorflow import gfile
from tensorflow.python.client import device_lib
from njunmt.utils.configurable import ModelConfigs
from njunmt.utils.constants import Constants
from njunmt.utils.constants import concat_name
def open_file(filename, encoding="utf-8", mode="r"):
""" Opens file using codecs module.
Args:
filename: A string.
encoding: A string specifies the encoding which is to be used for the
file.
mode: A string epecifies the opening mode.
Returns: A file descriptor.
"""
if mode == "r" and not gfile.Exists(filename):
raise OSError("File: \"{}\" not exists.".format(filename))
return codecs.open(filename, mode=mode, encoding=encoding)
def close_file(fp):
""" Closes a file descriptor.
Args:
fp: A file descriptor.
"""
if not fp.closed:
fp.close()
def compute_non_padding_num(input_fields, name_prefix):
""" Computes non-padding num and total tokens num.
Args:
input_fields: A dict of placeholders.
name_prefix: The key prefix name, Constants.FEATURE_NAME_PREFIX
or Constants.LABEL_NAME_PREFIX
Returns: A tuple (non-padding tokens num, total tokens num)
"""
length = input_fields[concat_name(name_prefix, Constants.LENGTH_NAME)]
ids = input_fields[concat_name(name_prefix, Constants.IDS_NAME)]
nonpadding_tokens_num = tf.reduce_sum(length)
shape = tf.shape(ids)
total_tokens_num = shape[0] * shape[1]
return nonpadding_tokens_num, total_tokens_num
def port_is_open(host):
""" Checks whether the port is open.
Args:
host: A string has format "ip:port".
Returns: True if the port is open, False otherwise.
"""
ip, port = host.strip().split(":")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((ip, int(port)))
s.shutdown(2)
# print '%d is open' % port
return True
except:
# print '%d is down' % port
return False
def create_ps_worker(ps_hosts, worker_hosts, task_index, ps):
""" Creates tf ps and workers.
Args:
ps_hosts: A list of host strings with format "ip:port".
worker_hosts: A list of worker strings with format "ip:port".
task_index: The task index.
ps: Whether it is a parameter server.
Returns: A tuple `(server, clusters, num_workers, gpu_options)`.
"""
ps_hosts = ps_hosts
worker_hosts = worker_hosts
num_workers = len(worker_hosts)
cluster = tf.train.ClusterSpec({"ps": ps_hosts, "worker": worker_hosts})
gpu_options = tf.GPUOptions(allocator_type='BFC', allow_growth=True)
if ps:
for host in ps_hosts:
if port_is_open(host):
raise ValueError("Error with ps_hosts: %s, the port %s is already occupied." \
% (host, host.split(":")[1]))
server_def = tf.train.ServerDef(cluster=cluster.as_cluster_def(),
job_name="ps",
task_index=task_index,
default_session_config=tf.ConfigProto(gpu_options=gpu_options,
device_count={"GPU": 0}),
protocol="grpc")
else:
host = worker_hosts[task_index]
if port_is_open(host):
raise ValueError("Error with worker_hosts: %s, the port %s is already occupied." \
% (host, host.split(":")[1]))
server_def = tf.train.ServerDef(cluster=cluster.as_cluster_def(),
job_name="worker",
task_index=task_index,
default_session_config=tf.ConfigProto(gpu_options=gpu_options),
protocol="grpc")
server = tf.train.Server(server_def)
return server, cluster, num_workers, gpu_options
def dump_model_analysis(model_dir):
""" Dumps detailed model size.
Args:
model_dir: The directory name to save to.
"""
# Dump to file on the chief worker
filename = os.path.join(model_dir, Constants.MODEL_ANALYSIS_FILENAME)
profile_opt_builder = tf.profiler.ProfileOptionBuilder
opts = profile_opt_builder.trainable_variables_parameter()
opts["output"] = "file:outfile={}".format(filename)
param_stats = tf.profiler.profile(tf.get_default_graph(), options=opts)
# following APIs are deprecated
# opts = tf.contrib.tfprof.model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS
# opts['dump_to_file'] = os.path.abspath(filename)
# tf.contrib.tfprof.model_analyzer.print_model_analysis(
# tf.get_default_graph(), tfprof_options=opts)
# Print the model analysis
with gfile.GFile(filename) as file:
tf.logging.info(file.read())
def get_available_gpus():
"""Returns a list of available GPU devices names. """
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == "GPU"]
def get_available_devices():
""" Returns aa list of """
gpus = get_available_gpus()
if len(gpus) == 0:
return ["/cpu:0"]
return ["/gpu:{}".format(i) for i, _ in enumerate(gpus)]
def label_smoothing(labels, vocab_size, epsilon=0.1):
"""Applies label smoothing. See https://arxiv.org/abs/1512.00567.
Args:
labels: A 2d tensor with shape of [N, T].
vocab_size: The size of vocabulary.
epsilon: Smoothing rate.
Returns: The smoothed labels.
For example,
```
import tensorflow as tf
inputs = tf.convert_to_tensor([[[0, 0, 1],
[0, 1, 0],
[1, 0, 0]],
[[1, 0, 0],
[1, 0, 0],
[0, 1, 0]]], tf.float32)
outputs = label_smoothing(inputs)
with tf.Session() as sess:
print(sess.run([outputs]))
>>
[array([[[ 0.03333334, 0.03333334, 0.93333334],
[ 0.03333334, 0.93333334, 0.03333334],
[ 0.93333334, 0.03333334, 0.03333334]],
[[ 0.93333334, 0.03333334, 0.03333334],
[ 0.93333334, 0.03333334, 0.03333334],
[ 0.03333334, 0.93333334, 0.03333334]]], dtype=float32)]
```
"""
confidence = 1. - epsilon
low_confidence = epsilon / tf.to_float(vocab_size - 1)
normalizing = -(confidence * tf.log(confidence)
+ tf.to_float(vocab_size - 1) * low_confidence
* tf.log(low_confidence + 1e-20))
soft_targets = tf.one_hot(
indices=labels,
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
return soft_targets, normalizing
def get_model_top_scope_name(model_name, problem_name):
""" Returns the top scope name of all models.
Args:
model_name: The model string.
problem_name: The problem name.
Returns: A str.
"""
if model_name is None:
model_name = "SequenceToSequence"
return problem_name or model_name.split(".")[-1]
def load_pretrain_model(model_name, pretrain_model_dir, problem_name):
""" Loads pretrained model.
Args:
model_name: The name of the model.
pretrain_model_dir: The pretrained model dir.
problem_name: The problem name.
Returns:
A list of assign ops.
"""
top_scope_name = get_model_top_scope_name(model_name, problem_name)
pt_model_configs = ModelConfigs.load(pretrain_model_dir)
pt_model_top_scope_name = get_model_top_scope_name(pt_model_configs["model"], pt_model_configs["problem_name"])
tf.logging.info("loading variables from {}".format(pretrain_model_dir))
assign_op = []
for var_name, _ in tf.contrib.framework.list_variables(pretrain_model_dir):
if var_name.startswith("OptimizeLoss"):
continue
if tf.GraphKeys.GLOBAL_STEP in var_name or "learning_rate" in var_name or "lr" in var_name:
tf.logging.info("Pretrain: ignore {}".format(var_name))
continue
tf.logging.info("Pretrain: reload {}".format(var_name))
var = tf.contrib.framework.load_variable(pretrain_model_dir, var_name)
with tf.variable_scope(top_scope_name, reuse=True):
v = tf.get_variable(name=var_name[len(pt_model_top_scope_name) + 1:],
shape=var.shape, dtype=var.dtype)
assign_op.append(v.assign(var))
return assign_op
def padding_batch_data(seqs_x, padding_x):
""" Creates batch data tensor.
Args:
seqs_x: A list of word sequence ids. Each word sequence is also
a list.
padding_x: The symbol id to be added to empty position.
Returns: A tuple `(seqs, seq_lengths)`, where `seqs` is a 2-d
numpy.ndarray with shape [len(seqs_x), max_seq_len] and
`seq_lengths` is a 1-d numpy.ndarray with shape [len(seqs_x), ].
"""
lengths_x = [len(s) for s in seqs_x]
max_len_x = numpy.max(lengths_x)
n_samples = len(seqs_x)
x = numpy.full([n_samples, max_len_x], padding_x, numpy.int32)
for idx, s_x in enumerate(seqs_x):
x[idx, :lengths_x[idx]] = s_x
return x, numpy.array(lengths_x, dtype=numpy.int32)
def add_dict_to_collection(collection_name, dict_):
""" Adds a dictionary to a graph collection.
Args:
collection_name: The name of the collection to add the dictionary to.
dict_: A dictionary of string keys to tensor values.
"""
key_collection = collection_name + "_keys"
value_collection = collection_name + "_values"
for key, value in dict_.items():
tf.add_to_collection(key_collection, key)
tf.add_to_collection(value_collection, value)
def get_dict_from_collection(collection_name):
""" Gets a dictionary from a graph collection.
Args:
collection_name: A collection name to read a dictionary from.
Returns: A dictionary with string keys and tensor values
"""
key_collection = collection_name + "_keys"
value_collection = collection_name + "_values"
keys = tf.get_collection(key_collection)
values = tf.get_collection(value_collection)
return dict(zip(keys, values))
def deprecated(obj):
"""This is a decorator which can be used to mark functions or classes
as deprecated. It will result in a warning being emmitted
when the function/class is used."""
def new_obj(*args, **kwargs):
tf.logging.info("Call to deprecated function/class %s." % obj.__name__)
tf.logging.warn("Call to deprecated function/class %s." % obj.__name__)
return obj(*args, **kwargs)
return new_obj
def shuffle_data(from_binding, to_binding):
""" Calls njunmt/tools/shuffle.py to shuffle data.
Args:
from_binding: The original data files with same number of lines.
to_binding: The files to save to.
"""
cmd = "python {script} {from_} {to_}".format(
script="njunmt/tools/shuffle.py",
from_=",".join(from_binding),
to_=",".join(to_binding))
os.system(cmd)
def get_labels_files(labels_file):
""" Gets the list of labels file.
Args:
labels_file: A string, the prefix of the labels file.
Returns: A list or None.
"""
if labels_file is None:
return None
ret = []
if gfile.Exists(labels_file):
ret.append(labels_file)
else:
idx = 0
while gfile.Exists(labels_file + str(idx)):
ret.append(labels_file + str(idx))
idx += 1
return ret
def inspect_varname_prefix(var_name):
""" Returns the top variable scope name. """
# empirical
keywords = "/input_symbol_modality"
if keywords in var_name:
return var_name[:var_name.index(keywords)]
keywords = "/symbol_modality_"
if keywords in var_name:
return var_name[:var_name.index(keywords)]
return None
def set_fflayers_layer_norm(layer_norm=False):
""" Set laye norm flag. """
from njunmt.layers import common_layers
common_layers.FFLAYERS_LAYER_NORM = layer_norm
def get_saver_or_default(**kwargs):
""" Returns the saver from SAVERS collection, or creates a default one.
This method is used by other members of the training module, such as
`CheckpointSaverHook`.
This method is modified from tensorflow.python.training.saver._get_saver_or_default.
Args:
kwargs: Parameters passed to tf.train.Saver.
Returns: `Saver`.
Raises:
RuntimeError: If the SAVERS collection already has more than one items.
"""
collection_key = tf.GraphKeys.SAVERS
savers = tf.get_collection(collection_key)
if savers:
if len(savers) > 1:
raise RuntimeError(
"More than one item in collection {}. "
"Please indicate which one to use by passing it to the constructor.".
format(collection_key))
return savers[0]
saver = tf.train.Saver(sharded=True, allow_empty=True, **kwargs)
if saver is not None:
tf.add_to_collection(collection_key, saver)
return saver
| 34.139024
| 115
| 0.650139
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6,177
| 0.441309
|
62c315af896205c5035b0984b4c54070e53199e5
| 4,381
|
py
|
Python
|
src/manager.py
|
advancedbioimagingcenter/opticalaberrations
|
80e642925bdc907d135717499e15d3217b5c6a0a
|
[
"BSD-2-Clause"
] | null | null | null |
src/manager.py
|
advancedbioimagingcenter/opticalaberrations
|
80e642925bdc907d135717499e15d3217b5c6a0a
|
[
"BSD-2-Clause"
] | 3
|
2021-11-12T17:13:45.000Z
|
2021-11-23T14:07:50.000Z
|
src/manager.py
|
advancedbioimagingcenter/opticalaberrations
|
80e642925bdc907d135717499e15d3217b5c6a0a
|
[
"BSD-2-Clause"
] | null | null | null |
import logging
import time
from pathlib import Path
from subprocess import call
import cli
def parse_args(args):
parser = cli.argparser()
subparsers = parser.add_subparsers(
help="Arguments for specific action.", dest="dtype"
)
subparsers.required = True
slurm = subparsers.add_parser("slurm", help='use SLURM to submit jobs')
slurm.add_argument(
"script", type=str,
help='path to script to run'
)
slurm.add_argument(
"--python", default=f'{Path.home()}/anaconda3/envs/deep/bin/python', type=str,
help='path to ext python to run program with'
)
slurm.add_argument(
"--task", action='append',
help='any additional flags you want to run the script with'
)
slurm.add_argument(
"--taskname", action='append',
help='allies name for each task'
)
slurm.add_argument(
"--outdir", default='/clusterfs/fiona/thayer/opticalaberrations/models', type=str,
help='output directory'
)
slurm.add_argument(
"--partition", default='abc', type=str,
)
slurm.add_argument(
"--qos", default='abc_high', type=str,
help='using `abc_high` for unlimited runtime',
)
slurm.add_argument(
"--gpus", default=1, type=int,
help='number of GPUs to use for this job'
)
slurm.add_argument(
"--cpus", default=5, type=int,
help='number of CPUs to use for this job'
)
slurm.add_argument(
"--mem", default='160G', type=str,
help='requested RAM to use for this job'
)
slurm.add_argument(
"--name", default='train', type=str,
help='allies name for this job'
)
slurm.add_argument(
"--job", default='job.slm', type=str,
help='path to slurm job template'
)
slurm.add_argument(
"--constraint", default=None, type=str,
help='select a specific node type eg. titan'
)
default = subparsers.add_parser("default", help='run a job using default python')
default.add_argument(
"script", type=str,
help='path to script to run'
)
default.add_argument(
"--python", default=f'{Path.home()}/anaconda3/envs/deep/bin/python', type=str,
help='path to ext python to run program with'
)
default.add_argument(
"--flags", default='', type=str,
help='any additional flags you want to run the script with'
)
default.add_argument(
"--outdir", default='/clusterfs/fiona/thayer/opticalaberrations/models', type=str,
help='output directory'
)
default.add_argument(
"--name", default='train', type=str,
help='allies name for this job'
)
return parser.parse_args(args)
def main(args=None):
args = parse_args(args)
outdir = Path(f"{args.outdir}/{args.name}")
outdir.mkdir(exist_ok=True, parents=True)
profiler = f"/usr/bin/time -v -o {outdir}/{args.script.split('.')[0]}_profile.log "
if args.dtype == 'default':
sjob = profiler
sjob += f"{args.python} "
sjob += f"{args.script} "
sjob += f" --outdir {outdir} {args.flags} 2>&1 | tee {outdir}/{args.script.split('.')[0]}.log"
call([sjob], shell=True)
elif args.dtype == 'slurm':
sjob = '/usr/bin/sbatch '
sjob += f' --qos={args.qos} '
sjob += f' --partition={args.partition} '
if args.constraint is not None:
sjob += f" -C '{args.constraint}' "
sjob += f' --gres=gpu:{args.gpus} '
sjob += f' --cpus-per-task={args.cpus} '
sjob += f" --mem='{args.mem}' "
sjob += f" --job-name={args.name} "
sjob += f" --output={outdir}/{args.script.split('.')[0]}.log"
sjob += f" --export=ALL,"
sjob += f"PROFILER='{profiler}',"
sjob += f"SCRIPT='{args.script}',"
sjob += f"PYTHON='{args.python}',"
sjob += f"JOBS='{len(args.task)}',"
for i, (t, n) in enumerate(zip(args.task, args.taskname)):
sjob += f"TASK_{i + 1}='{profiler} {args.python} {args.script} --cpu_workers -1 --gpu_workers -1 --outdir {outdir/n} {t}'"
sjob += ',' if i < len(args.task)-1 else ' '
sjob += args.job
call([sjob], shell=True)
else:
logging.error('Unknown action')
if __name__ == "__main__":
main()
| 27.904459
| 134
| 0.577494
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,863
| 0.425245
|
62c3b75f8adcffa947ee4bcc6c76cec4ce476e9e
| 1,127
|
py
|
Python
|
src/aiographql/client/response.py
|
ehtec/aiographql-client
|
66b135ee08a1c4e3c3d25e63db91e7713a99501e
|
[
"MIT"
] | 18
|
2019-12-08T23:38:21.000Z
|
2021-04-14T17:40:34.000Z
|
src/aiographql/client/response.py
|
ehtec/aiographql-client
|
66b135ee08a1c4e3c3d25e63db91e7713a99501e
|
[
"MIT"
] | 134
|
2019-07-30T04:51:44.000Z
|
2021-05-24T07:07:02.000Z
|
src/aiographql/client/response.py
|
ehtec/aiographql-client
|
66b135ee08a1c4e3c3d25e63db91e7713a99501e
|
[
"MIT"
] | 7
|
2019-09-26T10:14:58.000Z
|
2021-01-01T06:09:11.000Z
|
from dataclasses import dataclass, field
from typing import Any, Dict, List
from aiographql.client.error import GraphQLError
from aiographql.client.request import GraphQLRequestContainer
@dataclass(frozen=True)
class GraphQLBaseResponse(GraphQLRequestContainer):
json: Dict[str, Any] = field(default_factory=dict)
@dataclass(frozen=True)
class GraphQLResponse(GraphQLBaseResponse):
"""
GraphQL Response object wrapping response data and any errors. This object also
contains the a copy of the :class:`GraphQLRequest` that produced this response.
"""
@property
def errors(self) -> List[GraphQLError]:
"""
A list of :class:`GraphQLError` objects if server responded with query errors.
"""
return [GraphQLError.load(error) for error in self.json.get("errors", list())]
@property
def data(self) -> Dict[str, Any]:
"""The data payload the server responded with."""
return self.json.get("data", dict())
@property
def query(self) -> str:
"""The query string used to produce this response."""
return self.request.query
| 31.305556
| 86
| 0.697427
| 885
| 0.785271
| 0
| 0
| 933
| 0.827862
| 0
| 0
| 397
| 0.352263
|
62c3efcf40a53a46324b9e3f1578e57e7300a9cb
| 21
|
py
|
Python
|
lib/utils/__init__.py
|
jwyang/C3Net.pytorch
|
70026fc80c5427484268c428a9dcd4cde2e8197f
|
[
"MIT"
] | 43
|
2019-12-13T06:13:40.000Z
|
2021-07-25T06:29:17.000Z
|
lib/utils/__init__.py
|
jwyang/C3Net.pytorch
|
70026fc80c5427484268c428a9dcd4cde2e8197f
|
[
"MIT"
] | 2
|
2020-12-05T14:24:17.000Z
|
2020-12-24T09:47:10.000Z
|
lib/utils/__init__.py
|
jwyang/C3Net.pytorch
|
70026fc80c5427484268c428a9dcd4cde2e8197f
|
[
"MIT"
] | 4
|
2019-12-16T20:25:20.000Z
|
2020-06-23T08:45:17.000Z
|
from .verbo import *
| 10.5
| 20
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
62c415a1f0ebcba4e884791640c35ed88c5b14d6
| 698
|
py
|
Python
|
Populating Next Right Pointers in Each Node II.py
|
quake0day/oj
|
c09333d1738f8735de0d5d825db6f4b707585670
|
[
"MIT"
] | null | null | null |
Populating Next Right Pointers in Each Node II.py
|
quake0day/oj
|
c09333d1738f8735de0d5d825db6f4b707585670
|
[
"MIT"
] | null | null | null |
Populating Next Right Pointers in Each Node II.py
|
quake0day/oj
|
c09333d1738f8735de0d5d825db6f4b707585670
|
[
"MIT"
] | null | null | null |
class Solution(object):
def connect(self, root):
"""
:type root: TreeLinkNode
:rtype: nothing
"""
if root == None:
return
p = root.next
while p:
if p.left != None:
p = p.left
break
elif p.right != None:
p = p.right
break
p = p.next
if (root.right != None):
root.right.next = p
if(root.left != None):
if root.right != None:
root.left.next = root.right
else:
root.left.next = p
self.connect(root.right)
self.connect(root.left)
| 24.928571
| 43
| 0.41404
| 697
| 0.998567
| 0
| 0
| 0
| 0
| 0
| 0
| 72
| 0.103152
|
62c4af423f4d437ce0fc13458b7ee5066a241ce5
| 2,602
|
py
|
Python
|
federatedml/feature/feature_selection/variance_coe_filter.py
|
yzjba/FATE
|
9a6d252da637b2583a0f8a51f6cb4c615850bab9
|
[
"Apache-2.0"
] | 32
|
2020-06-12T08:39:58.000Z
|
2022-03-20T06:57:08.000Z
|
federatedml/feature/feature_selection/variance_coe_filter.py
|
ErikSun2020/FATE
|
bdda535c7d8a974fc2c43102837964b7da199730
|
[
"Apache-2.0"
] | 10
|
2020-11-13T18:55:48.000Z
|
2022-02-10T02:00:12.000Z
|
federatedml/feature/feature_selection/variance_coe_filter.py
|
ErikSun2020/FATE
|
bdda535c7d8a974fc2c43102837964b7da199730
|
[
"Apache-2.0"
] | 16
|
2020-06-12T06:51:46.000Z
|
2022-03-29T10:23:42.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from arch.api.utils import log_utils
from federatedml.feature.feature_selection.filter_base import BaseFilterMethod
from federatedml.statistic.statics import MultivariateStatisticalSummary
from federatedml.param.feature_selection_param import VarianceOfCoeSelectionParam
from federatedml.protobuf.generated import feature_selection_meta_pb2
from federatedml.util import consts
import math
LOGGER = log_utils.getLogger()
class VarianceCoeFilter(BaseFilterMethod):
"""
Filter the columns if coefficient of variance is less than a threshold.
"""
def __init__(self, filter_param: VarianceOfCoeSelectionParam):
super().__init__(filter_param)
self.statics_obj = None
def _parse_filter_param(self, filter_param):
self.value_threshold = filter_param.value_threshold
def set_statics_obj(self, statics_obj):
self.statics_obj = statics_obj
def fit(self, data_instances, suffix):
if self.statics_obj is None:
self.statics_obj = MultivariateStatisticalSummary(data_instances)
std_var = self.statics_obj.get_std_variance()
mean_value = self.statics_obj.get_mean()
for col_name in self.selection_properties.select_col_names:
s_v = std_var.get(col_name)
m_v = mean_value.get(col_name)
if math.fabs(m_v) < consts.FLOAT_ZERO:
m_v = consts.FLOAT_ZERO
coeff_of_var = math.fabs(s_v / m_v)
if coeff_of_var >= self.value_threshold:
self.selection_properties.add_left_col_name(col_name)
self.selection_properties.add_feature_value(col_name, coeff_of_var)
self._keep_one_feature(pick_high=True)
return self
def get_meta_obj(self, meta_dicts):
result = feature_selection_meta_pb2.VarianceOfCoeSelectionMeta(value_threshold=self.value_threshold)
meta_dicts['variance_coe_meta'] = result
return meta_dicts
| 38.264706
| 108
| 0.734051
| 1,516
| 0.582629
| 0
| 0
| 0
| 0
| 0
| 0
| 750
| 0.28824
|
62c4f0e22569d1378b2c2f18e1303c8af52e1edb
| 269
|
py
|
Python
|
hausa_text_corpus/tool.py
|
tunde99/AMMI-2020-SPEECH-COURSE
|
d1f6614804169a59a324c75c0b398c63af013d8c
|
[
"MIT"
] | 1
|
2020-08-24T21:00:01.000Z
|
2020-08-24T21:00:01.000Z
|
hausa_text_corpus/tool.py
|
tunde99/AMMI-2020-SPEECH-COURSE
|
d1f6614804169a59a324c75c0b398c63af013d8c
|
[
"MIT"
] | null | null | null |
hausa_text_corpus/tool.py
|
tunde99/AMMI-2020-SPEECH-COURSE
|
d1f6614804169a59a324c75c0b398c63af013d8c
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
word_counts = defaultdict(int)
for w in open('runbun_ilimi/runbin_ilimi.txt', encoding="utf-8").read().split():
word_counts[w.lower()] += 1
totalCount = 0;
for w, c in word_counts.items():
totalCount += 1
print(totalCount)
| 24.454545
| 80
| 0.717472
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 38
| 0.141264
|
62c6209c1d4244f1912d66e155942262d1b47bff
| 3,839
|
py
|
Python
|
S1/SI/S3/Test-Moteur-Elec-2/Test-Moteur-Elec/main.py
|
HerbeMalveillante/ecole
|
bebbc73cd678c58c9cd40389ea1cf229a0200308
|
[
"MIT"
] | null | null | null |
S1/SI/S3/Test-Moteur-Elec-2/Test-Moteur-Elec/main.py
|
HerbeMalveillante/ecole
|
bebbc73cd678c58c9cd40389ea1cf229a0200308
|
[
"MIT"
] | null | null | null |
S1/SI/S3/Test-Moteur-Elec-2/Test-Moteur-Elec/main.py
|
HerbeMalveillante/ecole
|
bebbc73cd678c58c9cd40389ea1cf229a0200308
|
[
"MIT"
] | null | null | null |
# Fichier main de gestion des ressources du robot
from micropython import const
from machine import *
from DRV8833 import *
from BME280 import *
import pycom
import time
import os
# Variables globales pour moteurs et pont en H
DRV8833_Sleep_pin = "P20" # Pin SLEEP
DRV8833_AIN1 = "P22" # Entrée PWM moteur A : AIN1
DRV8833_AIN2 = "P21" # Entrée PWM moteur A : AIN2
DRV8833_BIN1 = "P19" # Entrée PWM moteur B : BIN1
DRV8833_BIN2 = "P12" # Entrée PWM moteur B : BIN2
# Vitesse de rotation des roues
V_MAX = 1.0
V_MOYEN = 0.5
V_MIN = 0.25
# ---------------------------------------------------------------------------
# Routines de déplacements du robot
def Avancer(vitesse):
Moteur_Droit.Cmde_moteur(SENS_HORAIRE, vitesse)
Moteur_Gauche.Cmde_moteur(SENS_ANTI_HORAIRE, vitesse)
def Reculer(vitesse):
Moteur_Droit.Cmde_moteur(SENS_ANTI_HORAIRE, vitesse)
Moteur_Gauche.Cmde_moteur(SENS_HORAIRE, vitesse)
def Pivoter_droite(vitesse):
Moteur_Droit.Cmde_moteur(SENS_ANTI_HORAIRE, vitesse)
Moteur_Gauche.Cmde_moteur(SENS_ANTI_HORAIRE, vitesse)
def Pivoter_gauche(vitesse):
Moteur_Droit.Cmde_moteur(SENS_HORAIRE, vitesse)
Moteur_Gauche.Cmde_moteur(SENS_HORAIRE, vitesse)
def Arret():
Moteur_Droit.Cmde_moteur(SENS_HORAIRE, 0)
Moteur_Gauche.Cmde_moteur(SENS_HORAIRE, 0)
# ------------------------------------------------------------------------
# Initialisation des moteurs
# IN1_pin : entrée PWM 1 DRV8833
# IN2_pin : entrée PWM 2 DRV8833
# sleep_pin : SLP pin pour désactiver les ponts en H du DRV8833
# timer_number : dans [0,1,2,3]. Choix du timer utilisé pour générer le signal pwm
# freq : fréquence du signal pwm
# num_channel_pwm_In1 : numéro de l'Id du canal PWM associé à la broche In1_pin
# num_channel_pwm_In2 : numéro de l'Id du canal PWM associé à la broche In2_pin
# DRV8833 (In1_pin, In2_pin, sleep_pin, timer_number, freq, num_channel_pwm_In1, num_channel_pwm_In2)
Moteur_Gauche = DRV8833(
DRV8833_AIN1, DRV8833_AIN2, DRV8833_Sleep_pin, 1, 500, 0, 1
) # Sur connecteur Encoder1
Moteur_Droit = DRV8833(
DRV8833_BIN1, DRV8833_BIN2, DRV8833_Sleep_pin, 1, 500, 2, 3
) # Sur connecteur Encoder2
Arret()
bus_i2c = I2C()
bus_i2c.init(I2C.MASTER, baudrate=400000)
adr = bus_i2c.scan()
Id_BME280 = bus_i2c.readfrom_mem(BME280_I2C_ADR, BME280_CHIP_ID_ADDR, 1)
capteur_BME280 = BME280(BME280_I2C_ADR, bus_i2c) # --Calibrage du capteur
capteur_BME280.Calibration_Param_Load()
rtc = RTC()
rtc.init((2020, 10, 26, 0, 0, 0, 0, 0))
jour = rtc.now()
date = "Date : " + str(jour[0]) + "/" + str(jour[1]) + "/" + str(jour[2])
print("L'adresse du périphérique I2C est :", adr)
print("Valeur ID BME280 :", hex(Id_BME280[0]))
while True:
jour = rtc.now()
temps = str(jour[3]) + "h " + str(jour[4]) + "m " + str(jour[5]) + "s"
temp = capteur_BME280.read_temp()
humi = capteur_BME280.read_humidity()
pres = capteur_BME280.read_pression()
print("-------------------------------------------------------------------")
print(
"Temps passé :",
temps,
"- Température :",
"%.2f" % temp,
"- Humidité :",
"%.2f" % humi,
"- Préssion :",
"%.2f" % pres,
)
print("--------------")
print("-> Démarage")
print("-Avancer")
Avancer(V_MIN)
time.sleep(2)
print("-Reculer")
Reculer(V_MIN)
time.sleep(2)
print("-Pivoter droite")
Pivoter_droite(V_MIN)
time.sleep(2)
print("-Pivoter gauche")
Pivoter_gauche(V_MIN)
time.sleep(2)
print("-> Arret")
Arret()
time.sleep(2)
"""
Index = 0
while True :
print('Index : ', Index)
# Définition d'une séquence de mouvements
time.sleep(0.25)
Index +=1
"""
| 29.530769
| 102
| 0.62386
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,582
| 0.409208
|
62c68df32015d9517a46bfdec493cc8175c53e34
| 4,146
|
py
|
Python
|
evaluators/weighted_cross_entropy.py
|
adgilbert/med-seg
|
825ea068c6cf5328e437e3ba85b894aeae58cf25
|
[
"BSD-3-Clause"
] | null | null | null |
evaluators/weighted_cross_entropy.py
|
adgilbert/med-seg
|
825ea068c6cf5328e437e3ba85b894aeae58cf25
|
[
"BSD-3-Clause"
] | null | null | null |
evaluators/weighted_cross_entropy.py
|
adgilbert/med-seg
|
825ea068c6cf5328e437e3ba85b894aeae58cf25
|
[
"BSD-3-Clause"
] | null | null | null |
import torch
import torch.nn.functional as F
from torch import nn as nn
from torch.autograd import Variable
# from https://github.com/wolny/pytorch-3dunet/blob/master/pytorch3dunet/unet3d/losses.py
def flatten(tensor):
"""Flattens a given tensor such that the channel axis is first.
The shapes are transformed as follows:
(N, C, D, H, W) -> (C, N * D * H * W)
"""
# number of channels
C = tensor.size(1)
# new axis order
axis_order = (1, 0) + tuple(range(2, tensor.dim()))
# Transpose: (N, C, D, H, W) -> (C, N, H, W)
transposed = tensor.permute(axis_order)
# Flatten: (C, N, D, H, W) -> (C, N * H * W)
return transposed.contiguous().view(C, -1)
def expand_as_one_hot(input, C, ignore_index=None):
"""
Converts NxHxW label image to NxCxDxHxW, where each label gets converted to its corresponding one-hot vector
:param input: 4D input image (NxDxHxW)
:param C: number of channels/labels
:param ignore_index: ignore index to be kept during the expansion
:return: 5D output image (NxCxDxHxW)
"""
assert input.dim() == 3
# expand the input tensor to Nx1xHxW before scattering
input = input.unsqueeze(1)
# create result tensor shape (NxCxDxHxW)
shape = list(input.size())
shape[1] = C
if ignore_index is not None:
# create ignore_index mask for the result
mask = input.expand(shape) == ignore_index
# clone the src tensor and zero out ignore_index in the input
input = input.clone()
input[input == ignore_index] = 0
# scatter to get the one-hot tensor
result = torch.zeros(shape).to(input.device).scatter_(1, input, 1)
# bring back the ignore_index in the result
result[mask] = ignore_index
return result
else:
# scatter to get the one-hot tensor
return torch.zeros(shape).to(input.device).scatter_(1, input, 1)
class WeightedCrossEntropyLoss(nn.Module):
"""WeightedCrossEntropyLoss (WCE) as described in https://arxiv.org/pdf/1707.03237.pdf
"""
def __init__(self, ignore_index=0):
super(WeightedCrossEntropyLoss, self).__init__()
self.ignore_index = ignore_index
def forward(self, input, target):
weight = self._class_weights(input)
return F.cross_entropy(input, target.squeeze(), weight=weight, ignore_index=self.ignore_index)
@staticmethod
def _class_weights(input):
# normalize the input first
input = F.softmax(input, dim=1)
flattened = flatten(input)
nominator = (1. - flattened).sum(-1)
denominator = flattened.sum(-1)
class_weights = Variable(nominator / denominator, requires_grad=False)
return class_weights
class PixelWiseCrossEntropyLoss(nn.Module):
def __init__(self, class_weights=None, ignore_index=None):
super(PixelWiseCrossEntropyLoss, self).__init__()
self.register_buffer('class_weights', class_weights)
self.ignore_index = ignore_index
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, input, target, weights):
assert target.size() == weights.size()
# normalize the input
log_probabilities = self.log_softmax(input)
# standard CrossEntropyLoss requires the target to be (NxDxHxW), so we need to expand it to (NxCxDxHxW)
target = expand_as_one_hot(target, C=input.size()[1], ignore_index=self.ignore_index)
# expand weights
weights = weights.unsqueeze(0)
weights = weights.expand_as(input)
# create default class_weights if None
if self.class_weights is None:
class_weights = torch.ones(input.size()[1]).float().to(input.device)
else:
class_weights = self.class_weights
# resize class_weights to be broadcastable into the weights
class_weights = class_weights.view(1, -1, 1, 1, 1)
# multiply weights tensor by class weights
weights = class_weights * weights
# compute the losses
result = -weights * target * log_probabilities
# average the losses
return result.mean()
| 36.690265
| 112
| 0.660637
| 2,218
| 0.534973
| 0
| 0
| 348
| 0.083936
| 0
| 0
| 1,454
| 0.350699
|
62c70fbd4dd1990a1151426895ed667c88dc7b19
| 1,020
|
py
|
Python
|
AlgorithmsPractice/python/20_simple_Valid Parenthese.py
|
YangXiaoo/NoteBook
|
37056acad7a05b876832f72ac34d3d1a41e0dd22
|
[
"CNRI-Python",
"RSA-MD",
"CECILL-B"
] | 58
|
2019-03-03T04:42:23.000Z
|
2022-01-13T04:36:31.000Z
|
AlgorithmsPractice/python/20_simple_Valid Parenthese.py
|
YangXiaoo/NoteBook
|
37056acad7a05b876832f72ac34d3d1a41e0dd22
|
[
"CNRI-Python",
"RSA-MD",
"CECILL-B"
] | null | null | null |
AlgorithmsPractice/python/20_simple_Valid Parenthese.py
|
YangXiaoo/NoteBook
|
37056acad7a05b876832f72ac34d3d1a41e0dd22
|
[
"CNRI-Python",
"RSA-MD",
"CECILL-B"
] | 28
|
2019-08-11T01:25:00.000Z
|
2021-08-22T06:46:06.000Z
|
'''
Given a string containing just the characters '(', ')', '{', '}', '[' and ']', determine if the input string is valid.
An input string is valid if:
Open brackets must be closed by the same type of brackets.
Open brackets must be closed in the correct order.
Note that an empty string is also considered valid.
Example 1:
Input: "()"
Output: true
Example 2:
Input: "()[]{}"
Output: true
Example 3:
Input: "(]"
Output: false
Example 4:
Input: "([)]"
Output: false
Example 5:
Input: "{[]}"
Output: true
'''
# 2018-6-17
# Valid Parenthese
# 栈
class Solution:
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
pars = [None]
parmap = {')': '(', '}': '{', ']': '['}
for c in s:
print(c,pars)
if c in parmap and parmap[c] == pars[len(pars)-1]:
pars.pop()
else:
pars.append(c)
return len(pars) == 1
# test
s = ")(([])[]{}"
test = Solution()
res = test.isValid(s)
print(res)
| 18.888889
| 118
| 0.540196
| 392
| 0.383562
| 0
| 0
| 0
| 0
| 0
| 0
| 641
| 0.627202
|
62c980b1f8ae0f43cf9504c637fc1f567b5d9a10
| 3,440
|
py
|
Python
|
tests/test_verify_json_response.py
|
ambertide/flask-verify
|
4ad26e67cdd9a9775d4e6ed56a281825dbcaf1cf
|
[
"MIT"
] | null | null | null |
tests/test_verify_json_response.py
|
ambertide/flask-verify
|
4ad26e67cdd9a9775d4e6ed56a281825dbcaf1cf
|
[
"MIT"
] | null | null | null |
tests/test_verify_json_response.py
|
ambertide/flask-verify
|
4ad26e67cdd9a9775d4e6ed56a281825dbcaf1cf
|
[
"MIT"
] | null | null | null |
from json import dumps
from typing import Callable
from flask.json import jsonify
from flask.wrappers import Response
from flask_verify.verify_json import verify_json_response
from pytest import raises
@verify_json_response
def _view_function_response() -> Response:
"""
To test if an endpoint that already returns a response work.
Positive test case, should work just fine.
"""
return Response(dumps({"message": "This is a JSON."}),
status=200, content_type='application/json')
@verify_json_response
def _view_function_response_failure() -> Response:
"""
To test if an endpoint that already returns a malformed response work.
Negative test case, should raise an error that will result in a 500.
"""
return Response("This is obviously not JSON.", content_type='plain/text',
status=200)
@verify_json_response
def _view_function_tuple(dictionary: dict) -> tuple[dict, int]:
"""
To test if an endpoint that returns a tuple successfully get converted
to a Response.
"""
return dictionary, 200
@verify_json_response
def _view_function_tuple_failure() -> tuple[Callable, int]:
"""
To test if an endpoint that cannot be converted into a JSON
raises a TypeException.
"""
return lambda x: 1, 20
@verify_json_response
def _view_function_tuple_pack() -> tuple[dict, int, int]:
"""
To test if an endpoint that returns too many values raises
a TypeException.
"""
return {"msg": "This is a JSON."}, 200, 0
@verify_json_response
def _view_function_invalid_status() -> tuple[dict, str]:
"""
To test if an endpoint that does not return a status code
raises a TypeException.
"""
return {"msg": "This is okay."}, "This is not a status."
def test_already_response() -> None:
"""
Test if a view function that already returns a Response object
does not get corrupted.
"""
actual = _view_function_response()
expected = Response(dumps({"message": "This is a JSON."}),
status=200, content_type='application/json')
assert actual.response == expected.response
assert actual.status_code == expected.status_code
assert actual.content_type == expected.content_type
def test_non_json_response() -> None:
"""
Test if a view function whose Response is not of type JSON
successfully raises an exception.
"""
with raises(TypeError):
_view_function_response_failure()
def test_tuple_response() -> None:
"""
Test if a view function that returns a tuple automatically
gets converted to a JSON response.
"""
dictionary = {"message": "This should be converted to JSON."}
actual = _view_function_tuple(dictionary)
expected = Response(dumps(dictionary), status=200, content_type='application/json')
assert actual.content_type == expected.content_type
assert actual.status_code == expected.status_code
assert actual.response == expected.response
def test_tuple_response_fail() -> None:
"""
Test the fail conditions of the view functions that return
tuples.
"""
fail_conditions = (_view_function_invalid_status,
_view_function_tuple_failure,
_view_function_tuple_pack)
for fail_condition in fail_conditions:
with raises(TypeError):
fail_condition()
| 30.714286
| 87
| 0.68314
| 0
| 0
| 0
| 0
| 1,594
| 0.463372
| 0
| 0
| 1,399
| 0.406686
|
62c9b5e931b6417fe4d81185cc271efbd05d9b8d
| 1,266
|
py
|
Python
|
utils/loader.py
|
zhangcheng007/face_detection_base_on_mtcnn
|
7ac1890dca16784955911b9efd0fef2c8447b9cb
|
[
"MIT"
] | 1
|
2017-10-20T06:47:22.000Z
|
2017-10-20T06:47:22.000Z
|
utils/loader.py
|
zhangcheng007/face_detection_base_on_mtcnn
|
7ac1890dca16784955911b9efd0fef2c8447b9cb
|
[
"MIT"
] | null | null | null |
utils/loader.py
|
zhangcheng007/face_detection_base_on_mtcnn
|
7ac1890dca16784955911b9efd0fef2c8447b9cb
|
[
"MIT"
] | null | null | null |
import numpy as np
import sys
import cv2
sys.path.append("../")
from utils.config import config
class TestLoader:
def __init__(self, imdb, batch_size=1, shuffle=False):
self.imdb = imdb
self.batch_size = batch_size
self.shuffle = shuffle
self.size = len(imdb)#num of data
self.cur = 0
self.data = None
self.label = None
self.reset()
self.get_batch()
def reset(self):
self.cur = 0
if self.shuffle:
np.random.shuffle(self.imdb)
def iter_next(self):
return self.cur + self.batch_size <= self.size
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
if self.iter_next():
self.get_batch()
self.cur += self.batch_size
return self.data
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def get_batch(self):
imdb = self.imdb[self.cur]
im = cv2.imread(imdb)
self.data = im
| 23.886792
| 58
| 0.562401
| 1,169
| 0.923381
| 0
| 0
| 0
| 0
| 0
| 0
| 17
| 0.013428
|
62ca4cc5761e9a0a5eb64bd672778ab82de9c1ca
| 1,676
|
py
|
Python
|
wmata/rail/station.py
|
emma-k-alexandra/pywmata
|
b11e851f864defc0bda84f012dbe2a2c31c202d1
|
[
"MIT"
] | 5
|
2019-12-28T20:18:22.000Z
|
2021-09-12T17:28:00.000Z
|
wmata/rail/station.py
|
emma-k-alexandra/pywmata
|
b11e851f864defc0bda84f012dbe2a2c31c202d1
|
[
"MIT"
] | null | null | null |
wmata/rail/station.py
|
emma-k-alexandra/pywmata
|
b11e851f864defc0bda84f012dbe2a2c31c202d1
|
[
"MIT"
] | 1
|
2021-06-28T16:08:08.000Z
|
2021-06-28T16:08:08.000Z
|
"""MetroRail Station related structures
"""
from enum import Enum
from typing import Any, Optional
class Station(Enum):
"""A MetroRail Station
"""
A01 = "A01"
A02 = "A02"
A03 = "A03"
A04 = "A04"
A05 = "A05"
A06 = "A06"
A07 = "A07"
A08 = "A08"
A09 = "A09"
A10 = "A10"
A11 = "A11"
A12 = "A12"
A13 = "A13"
A14 = "A14"
A15 = "A15"
B01 = "B01"
B02 = "B02"
B03 = "B03"
B04 = "B04"
B05 = "B05"
B06 = "B06"
B07 = "B07"
B08 = "B08"
B09 = "B09"
B10 = "B10"
B11 = "B11"
B35 = "B35"
C01 = "C01"
C02 = "C02"
C03 = "C03"
C04 = "C04"
C05 = "C05"
C06 = "C06"
C07 = "C07"
C08 = "C08"
C09 = "C09"
C10 = "C10"
C12 = "C12"
C13 = "C13"
C14 = "C14"
C15 = "C15"
D01 = "D01"
D02 = "D02"
D03 = "D03"
D04 = "D04"
D05 = "D05"
D06 = "D06"
D07 = "D07"
D08 = "D08"
D09 = "D09"
D10 = "D10"
D11 = "D11"
D12 = "D12"
D13 = "D13"
E01 = "E01"
E02 = "E02"
E03 = "E03"
E04 = "E04"
E05 = "E05"
E06 = "E06"
E07 = "E07"
E08 = "E08"
E09 = "E09"
E10 = "E10"
F01 = "F01"
F02 = "F02"
F03 = "F03"
F04 = "F04"
F05 = "F05"
F06 = "F06"
F07 = "F07"
F08 = "F08"
F09 = "F09"
F10 = "F10"
F11 = "F11"
G01 = "G01"
G02 = "G02"
G03 = "G03"
G04 = "G04"
G05 = "G05"
J02 = "J02"
J03 = "J03"
K01 = "K01"
K02 = "K02"
K03 = "K03"
K04 = "K04"
K05 = "K05"
K06 = "K06"
K07 = "K07"
K08 = "K08"
N01 = "N01"
N02 = "N02"
N03 = "N03"
N04 = "N04"
N06 = "N06"
| 16.115385
| 39
| 0.406325
| 1,575
| 0.939737
| 0
| 0
| 0
| 0
| 0
| 0
| 548
| 0.326969
|
62cd069a9b7cd2af7aa7c84f21bfa318e3d2f590
| 27,398
|
py
|
Python
|
tests/unit/core/test_models.py
|
uktrade/great-cms
|
f13fa335ddcb925bc33a5fa096fe73ef7bdd351a
|
[
"MIT"
] | 10
|
2020-04-30T12:04:35.000Z
|
2021-07-21T12:48:55.000Z
|
tests/unit/core/test_models.py
|
uktrade/great-cms
|
f13fa335ddcb925bc33a5fa096fe73ef7bdd351a
|
[
"MIT"
] | 1,461
|
2020-01-23T18:20:26.000Z
|
2022-03-31T08:05:56.000Z
|
tests/unit/core/test_models.py
|
uktrade/great-cms
|
f13fa335ddcb925bc33a5fa096fe73ef7bdd351a
|
[
"MIT"
] | 3
|
2020-04-07T20:11:36.000Z
|
2020-10-16T16:22:59.000Z
|
import time
from unittest import mock
import pytest
from django.contrib.auth.models import AnonymousUser
from django.core.exceptions import ValidationError
from django.db import IntegrityError
from django.http import Http404
from django.test import RequestFactory, TestCase
from django.urls import reverse
from wagtail.admin.edit_handlers import ObjectList
from wagtail.core.blocks.stream_block import StreamBlockValidationError
from wagtail.core.models import Collection
from wagtail.images import get_image_model
from wagtail.images.tests.utils import get_test_image_file
from wagtail.tests.utils import WagtailPageTests, WagtailTestUtils
from wagtail_factories import ImageFactory
from core.mixins import AuthenticatedUserRequired
from core.models import (
AbstractObjectHash,
CaseStudyRelatedPages,
Country,
CuratedListPage,
DetailPage,
IndustryTag,
InterstitialPage,
LandingPage,
LessonPlaceholderPage,
ListPage,
MagnaPageChooserPanel,
Product,
Region,
Tag,
TopicPage,
case_study_body_validation,
)
from domestic.models import DomesticDashboard, DomesticHomePage, GreatDomesticHomePage
from tests.helpers import SetUpLocaleMixin, make_test_video
from tests.unit.core import factories
from .factories import (
CaseStudyFactory,
DetailPageFactory,
LessonPlaceholderPageFactory,
StructurePageFactory,
TopicPageFactory,
)
def test_object_hash():
mocked_file = mock.Mock()
mocked_file.read.return_value = b'foo'
hash = AbstractObjectHash.generate_content_hash(mocked_file)
assert hash == 'acbd18db4cc2f85cedef654fccc4a4d8'
@pytest.mark.django_db
def test_detail_page_can_mark_as_read(client, domestic_homepage, user, domestic_site, mock_get_user_profile):
# given the user has not read a lesson
client.force_login(user)
list_page = factories.ListPageFactory(parent=domestic_homepage, record_read_progress=True)
curated_list_page = factories.CuratedListPageFactory(parent=list_page)
topic_page = factories.TopicPageFactory(parent=curated_list_page)
detail_page = factories.DetailPageFactory(parent=topic_page)
client.get(detail_page.url)
# then the progress is saved
read_hit = detail_page.page_views.get()
assert read_hit.sso_id == str(user.pk)
assert read_hit.list_page == list_page
@pytest.mark.django_db
def test_detail_page_cannot_mark_as_read(client, domestic_homepage, user, domestic_site, mock_get_user_profile):
# given the user has not read a lesson
client.force_login(user)
list_page = factories.ListPageFactory(parent=domestic_homepage, record_read_progress=False)
curated_list_page = factories.CuratedListPageFactory(parent=list_page)
topic_page = factories.TopicPageFactory(parent=curated_list_page)
detail_page = factories.DetailPageFactory(parent=topic_page)
client.get(detail_page.url)
# then the progress is saved
assert detail_page.page_views.count() == 0
@pytest.mark.django_db
def test_detail_page_anon_user_not_marked_as_read(client, domestic_homepage, domestic_site, mock_get_user_profile):
# given the user has not read a lesson
clp = factories.CuratedListPageFactory(parent=domestic_homepage)
topic_page = factories.TopicPageFactory(parent=clp)
detail_page = factories.DetailPageFactory(parent=topic_page)
client.get(detail_page.url)
# then the progress is unaffected
assert detail_page.page_views.count() == 0
@pytest.mark.django_db
def test_curated_list_page_has_link_in_context_back_to_parent(
client,
domestic_homepage,
domestic_site,
mock_export_plan_detail_list,
patch_get_user_lesson_completed,
user,
mock_get_user_profile,
):
list_page = factories.ListPageFactory(
parent=domestic_homepage, record_read_progress=False, slug='example-learning-homepage'
)
curated_list_page = factories.CuratedListPageFactory(parent=list_page, slug='example-module')
expected_url = list_page.url
assert expected_url == '/example-learning-homepage/'
client.force_login(user) # because unauthed users get redirected
resp = client.get(curated_list_page.url)
# Make a more precise string to search for: one that's marked up as a
# hyperlink target, at least
expected_link_string = f'href="{expected_url}"'
assert expected_link_string.encode('utf-8') in resp.content
@pytest.mark.django_db
@pytest.mark.parametrize(
'querystring_to_add,expected_backlink_value',
(
('', None),
('?return-link=%2Fexport-plan%2F1%2Fabout-your-business%2F', '/export-plan/1/about-your-business/'),
(
'?return-link=%2Fexport-plan%2F1%2Fabout-your-business%2F%3Ffoo%3Dbar',
'/export-plan/1/about-your-business/?foo=bar',
),
(
'?bam=baz&return-link=%2Fexport-plan%2F1%2Fabout-your-business%2F%3Ffoo%3Dbar',
'/export-plan/1/about-your-business/?foo=bar', # NB: bam=baz should not be here
),
('?bam=baz&return-link=example%2Fexport-plan%2Fpath%2F%3Ffoo%3Dbar', None),
(
(
'?bam=baz&return-link=https%3A%2F%2Fphishing.example.com'
'%2Fexport-plan%2F1%2Fabout-your-business%2F%3Ffoo%3Dbar'
),
None,
),
(
(
'?bam=baz&return-link=%3A%2F%2Fphishing.example.com'
'%2Fexport-plan%2F1%2Fabout-your-business%2F%3Ffoo%3Dbar'
),
None,
),
('?bam=baz', None),
(
'?bam=baz&return-link=%2Fexport-plan%2F1%2Fabout-your-business%2F%3Ffoo%3Dbar',
'/export-plan/1/about-your-business/?foo=bar',
),
),
ids=(
'no backlink querystring present',
'backlink querystring present without encoded querystring of its own',
'backlink querystring present WITH encoded querystring of its own',
'backlink querystring present WITH encoded querystring and other args',
'backlink querystring present WITH bad payload - path does not start with / ',
'backlink querystring present WITH bad payload - path is a full URL',
'backlink querystring present WITH bad payload - path is a URL with flexible proto',
'backlink querystring NOT present BUT another querystring is',
'backlink querystring present WITH OTHER QUERYSTRING TOO',
),
)
def test_detail_page_get_context_handles_backlink_querystring_appropriately(
rf, domestic_homepage, domestic_site, user, querystring_to_add, expected_backlink_value, export_plan_data
):
list_page = factories.ListPageFactory(parent=domestic_homepage, record_read_progress=False)
curated_list_page = factories.CuratedListPageFactory(parent=list_page)
topic_page = factories.TopicPageFactory(parent=curated_list_page)
detail_page = factories.DetailPageFactory(parent=topic_page, template='learn/detail_page.html')
lesson_page_url = detail_page.url
if querystring_to_add:
lesson_page_url += querystring_to_add
request = rf.get(lesson_page_url)
request.user = user
context = detail_page.get_context(request)
if expected_backlink_value is None:
assert 'backlink' not in context
else:
assert context.get('backlink') == expected_backlink_value
@pytest.mark.django_db
@pytest.mark.parametrize(
'backlink_path,expected',
(
(None, None),
('', None),
('/export-plan/1/about-your-business/', 'About your business'),
('/export-plan/1/business-objectives/', 'Business objectives'),
('/export-plan/1/target-markets-research/', 'Target markets research'),
('/export-plan/1/adapting-your-product/', 'Adapting your product'),
('/export-plan/1/marketing-approach/', 'Marketing approach'),
('/export-plan/1/costs-and-pricing/', 'Costs and pricing'),
('/export-plan/1/funding-and-credit/', 'Funding and credit'),
('/export-plan/1/getting-paid/', 'Getting paid'),
('/export-plan/1/travel-plan/', 'Travel plan'),
('/export-plan/1/business-risk/', 'Business risk'),
('/export-plan/1/adapting-your-product/?foo=bar', 'Adapting your product'),
('/export-plan/', None),
('/path/that/will/not/match/anything/', None),
),
ids=(
'no backlink',
'empty string backlink',
'Seeking: About your business',
'Seeking: Business objectives',
'Seeking: Target markets research',
'Seeking: Adapting your product',
'Seeking: Marketing approach',
'Seeking: Costs and pricing',
'Seeking: Getting paid',
'Seeking: Funding and credit',
'Seeking: Travel plan',
'Seeking: Business risk',
'Valid backlink with querystring does not break name lookup',
'backlink for real page that is not an export plan step',
'backlink for a non-existent page',
),
)
def test_detail_page_get_context_gets_backlink_title_based_on_backlink(
backlink_path,
expected,
en_locale,
):
detail_page = factories.DetailPageFactory(template='learn/detail_page.html')
assert detail_page._get_backlink_title(backlink_path) == expected
@pytest.mark.django_db
def test_case_study__str_method():
case_study = CaseStudyFactory(title='', summary_context='Test Co')
assert f'{case_study}' == 'Test Co'
case_study = CaseStudyFactory(title='Alice and Bob export to every continent', summary_context='Test Co')
assert f'{case_study}' == 'Alice and Bob export to every continent'
@pytest.mark.django_db
def test_case_study__timestamps():
case_study = CaseStudyFactory(summary_context='Test Co')
created = case_study.created
modified = case_study.created
assert created == modified
time.sleep(1) # Forgive this - we need to have a real, later save
case_study.save()
case_study.refresh_from_db()
assert case_study.created == created
assert case_study.modified > modified
_case_study_top_level_error_message = (
'This block must contain one Media section (with one or two items in it) and one Text section.'
)
_case_study_one_video_only_error_message = 'Only one video may be used in a case study.'
_case_study_video_order_error_message = 'The video must come before a still image.'
@pytest.mark.django_db
@pytest.mark.parametrize(
'block_type_values,exception_message',
(
(['text'], _case_study_top_level_error_message),
([('media', ('video',))], _case_study_top_level_error_message),
([], None),
(['text', 'text'], _case_study_top_level_error_message),
(['text', ('media', ('video', 'image'))], _case_study_top_level_error_message),
([('media', ('video',)), ('media', ('video',))], _case_study_top_level_error_message),
(['text', ('media', ('video', 'image')), 'text'], _case_study_top_level_error_message),
([('media', ('video', 'image')), 'text', ('media', ('video', 'image'))], _case_study_top_level_error_message),
([('media', ('video', 'image')), 'text'], None),
([('media', ('video',)), 'text'], None),
([('media', ('image',)), 'text'], None),
([('media', ('image', 'image')), 'text'], None),
([('media', ('image', 'video')), 'text'], _case_study_video_order_error_message),
([('media', ('video', 'video')), 'text'], _case_study_one_video_only_error_message),
(['quote', ('media', ('video', 'image')), 'text'], None),
(['quote', 'quote', ('media', ('video', 'image')), 'text'], None),
),
ids=(
'1. Top-level check: text node only: not fine',
'2. Top-level check: media node only: not fine',
'3. Top-level check: no nodes: fine - requirement is done at a higher level',
'4. Top-level check: two text nodes: not fine',
'5. Top-level check: text before media: not fine',
'6. Top-level check: two media nodes: not fine',
'7. Top-level check: text, media, text: not fine',
'8. Top-level check: media, text, media: not fine',
'9. media node (video and image) and text node: fine',
'10. media node (video only) and text node: fine',
'11. media node (image only) and text node: fine',
'12. media node (two images) and text node: fine',
'13. media node (image before video) and text node: not fine',
'14. media node (two videos) and text node: not fine',
'15. quote node, media node (video and image) and text node: fine',
'16. 2 quote nodes, media node (video and image) and text node: fine',
),
)
def test_case_study_body_validation(block_type_values, exception_message):
def _create_block(block_type):
mock_block = mock.Mock()
mock_block.block_type = block_type
return mock_block
value = []
for block_spec in block_type_values:
if type(block_spec) == tuple:
parent_block = _create_block(block_spec[0])
children = []
for subblock_spec in block_spec[1]:
children.append(_create_block(subblock_spec))
parent_block.value = children
value.append(parent_block)
else:
value.append(_create_block(block_spec))
if exception_message:
with pytest.raises(StreamBlockValidationError) as ctx:
case_study_body_validation(value)
assert ctx.message == exception_message
else:
# should not blow up
case_study_body_validation(value)
class LandingPageTests(WagtailPageTests):
def test_can_be_created_under_homepage(self):
self.assertAllowedParentPageTypes(
LandingPage,
{
DomesticHomePage,
GreatDomesticHomePage,
},
)
def test_can_be_created_under_landing_page(self):
self.assertAllowedSubpageTypes(LandingPage, {ListPage, InterstitialPage, DomesticDashboard})
class ListPageTests(WagtailPageTests):
def test_can_be_created_under_landing_page(self):
self.assertAllowedParentPageTypes(ListPage, {LandingPage})
def test_allowed_subtypes(self):
self.assertAllowedSubpageTypes(ListPage, {CuratedListPage})
class CuratedListPageTests(WagtailPageTests):
def test_can_be_created_under_list_page(self):
self.assertAllowedParentPageTypes(CuratedListPage, {ListPage})
def test_allowed_subtypes(self):
self.assertAllowedSubpageTypes(CuratedListPage, {TopicPage})
@pytest.mark.django_db
def test_curatedlistpage_count_detail_pages(curated_list_pages_with_lessons):
data = curated_list_pages_with_lessons
clp_1 = data[0][0]
clp_2 = data[1][0]
assert clp_1.count_detail_pages == 2 # 2 pages, placeholder ignored
assert clp_2.count_detail_pages == 1 # 1 page only, no placeholders at all
class TopicPageTests(WagtailPageTests):
def test_parent_page_types(self):
self.assertAllowedParentPageTypes(TopicPage, {CuratedListPage})
def test_allowed_subtypes(self):
self.assertAllowedSubpageTypes(
TopicPage,
{
DetailPage,
LessonPlaceholderPage,
},
)
@pytest.mark.django_db
def test_topic_page_redirects_to_module(
rf,
domestic_homepage,
domestic_site,
):
# The topic pages should never render their own content - they are basically
# scaffolding to give us a sensible page tree. As such they shouldn't be
# rendered
list_page = factories.ListPageFactory(parent=domestic_homepage, record_read_progress=False)
curated_list_page = factories.CuratedListPageFactory(parent=list_page)
topic_page = TopicPageFactory(
parent=curated_list_page,
)
# Check that we have the page tree set up correctly, else this is None
assert curated_list_page.url is not None
for page_method in ('serve', 'serve_preview'):
request = rf.get(topic_page.url)
resp = getattr(topic_page, page_method)(request)
assert resp._headers['location'] == ('Location', curated_list_page.url)
class LessonPlaceholderPageTests(WagtailPageTests):
def test_parent_page_types(self):
self.assertAllowedParentPageTypes(LessonPlaceholderPage, {TopicPage})
def test_allowed_subtypes(self):
self.assertAllowedSubpageTypes(LessonPlaceholderPage, {})
@pytest.mark.django_db
def test_context_cms_generic_page(rf, domestic_homepage):
assert 'page' in domestic_homepage.get_context(rf)
@pytest.mark.django_db
def test_placeholder_page_redirects_to_module(
rf,
domestic_homepage,
domestic_site,
):
# The topic pages should never render their own content and instead redirect
list_page = factories.ListPageFactory(parent=domestic_homepage, record_read_progress=False)
curated_list_page = factories.CuratedListPageFactory(parent=list_page)
topic_page = TopicPageFactory(
parent=curated_list_page,
)
placeholder_page = LessonPlaceholderPageFactory(parent=topic_page)
# Check that we have the page tree set up correctly, else this is None
assert curated_list_page.url is not None
for page_method in ('serve', 'serve_preview'):
request = rf.get(placeholder_page.url)
resp = getattr(placeholder_page, page_method)(request)
assert resp._headers['location'] == ('Location', curated_list_page.url)
@pytest.mark.django_db
def test_structure_page_redirects_to_http404(
rf,
domestic_homepage,
domestic_site,
):
# The structure pages should never render their own content and instead return Http404
structure_page = StructurePageFactory(parent=domestic_homepage)
for page_method in ('serve', 'serve_preview'):
request = rf.get('/foo/')
with pytest.raises(Http404):
getattr(structure_page, page_method)(request)
class DetailPageTests(SetUpLocaleMixin, WagtailPageTests):
def test_parent_page_types(self):
self.assertAllowedParentPageTypes(DetailPage, {TopicPage})
def test_detail_page_creation_for_single_hero_image(self):
detail_page = DetailPageFactory(hero=[('Image', ImageFactory())])
self.assert_(detail_page, True)
def test_validation_kick_for_multiple_hero_image(self):
with pytest.raises(ValidationError):
detail_page = DetailPageFactory(hero=[('Image', ImageFactory()), ('Image', ImageFactory())])
self.assert_(detail_page, None)
@pytest.mark.django_db
def test_redirection_for_unauthenticated_user(
client,
domestic_homepage,
domestic_site,
mock_export_plan_detail_list,
patch_get_user_lesson_completed,
user,
mock_get_user_profile,
):
landing_page = factories.LandingPageFactory(parent=domestic_homepage)
interstitial_page = factories.InterstitialPageFactory(parent=landing_page)
list_page = factories.ListPageFactory(parent=domestic_homepage)
curated_list_page = factories.CuratedListPageFactory(parent=list_page)
topic_page = factories.TopicPageFactory(parent=curated_list_page)
detail_page = factories.DetailPageFactory(parent=topic_page)
pages = [
landing_page,
interstitial_page,
list_page,
curated_list_page,
detail_page,
]
for page in pages:
assert isinstance(page, AuthenticatedUserRequired)
for page in pages:
response = client.get(page.url, follow=False)
assert response.status_code == 302
assert response._headers['location'] == ('Location', f'/signup/?next={page.url}')
# Show an authenticated user can still get in there
client.force_login(user)
for page in pages:
response = client.get(page.url, follow=False)
assert response.status_code == 200
class TestImageAltRendition(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
root_collection, _ = Collection.objects.get_or_create(name='Root', depth=0)
great_image_collection = root_collection.add_child(name='Great Images')
# Create an image with alt text
AltTextImage = get_image_model() # Noqa
self.image = AltTextImage.objects.create(
title='Test image', file=get_test_image_file(), alt_text='smart alt text', collection=great_image_collection
)
def test_image_alt_rendition(self):
rendition = self.image.get_rendition('width-100')
assert rendition.alt == 'smart alt text'
assert self.image.title != rendition.alt
class TestGreatMedia(TestCase):
def test_sources_mp4_with_no_transcript(self):
media = make_test_video()
self.assertEqual(
media.sources,
[
{
'src': '/media/movie.mp4',
'type': 'video/mp4',
'transcript': None,
}
],
)
def test_sources_mp4_with_transcript(self):
media = make_test_video(transcript='A test transcript text')
self.assertEqual(
media.sources,
[
{
'src': '/media/movie.mp4',
'type': 'video/mp4',
'transcript': 'A test transcript text',
}
],
)
def test_subtitles__present(self):
media = make_test_video()
media.subtitles_en = 'Dummy subtitles content'
media.save()
self.assertTrue(media.subtitles_en)
expected = [
{
'srclang': 'en',
'label': 'English',
'url': reverse('core:subtitles-serve', args=[media.id, 'en']),
'default': False,
},
]
self.assertEqual(media.subtitles, expected)
def test_subtitles__not_present(self):
media = make_test_video()
self.assertFalse(media.subtitles_en)
self.assertEqual(media.subtitles, [])
class TestSmallSnippets(TestCase):
# Most snippets are generally small models. Move them out of this test case
# into their own if/when they gain any custom methods beyond __str__
def test_region(self):
region = Region.objects.create(name='Test Region')
self.assertEqual(region.name, 'Test Region')
self.assertEqual(f'{region}', 'Test Region') # tests __str__
def test_country(self):
region = Region.objects.create(name='Test Region')
# NB: slugs are not automatically set.
# The SlugField is about valiation, not auto-population by default
country1 = Country.objects.create(
name='Test Country',
slug='test-country',
)
country2 = Country.objects.create(
name='Other Country',
slug='other-country',
region=region,
)
country_unicode = Country.objects.create(
name='Téßt Country',
slug='tt-country',
)
self.assertEqual(country1.name, 'Test Country')
self.assertEqual(country1.slug, 'test-country')
self.assertEqual(country1.region, None)
self.assertEqual(f'{country1}', 'Test Country') # tests __str__
self.assertEqual(country2.name, 'Other Country')
self.assertEqual(country2.slug, 'other-country')
self.assertEqual(country2.region, region)
self.assertEqual(country_unicode.name, 'Téßt Country')
# by default, ASCII only - https://docs.djangoproject.com/en/2.2/ref/utils/#django.utils.text.slugify
self.assertEqual(country_unicode.slug, 'tt-country')
self.assertEqual(country_unicode.region, None)
self.assertEqual(f'{country_unicode}', 'Téßt Country') # tests __str__
def test_country_sets_slug_on_save(self):
country = Country.objects.create(name='Test Country')
country.refresh_from_db()
self.assertEqual(country.slug, 'test-country')
# Slug is set only on first save, if not already set
country_2 = Country.objects.create(name='Another Country')
self.assertEqual(country_2.slug, 'another-country')
country_2.name = 'Changed country name'
country_2.save()
country_2.refresh_from_db()
self.assertEqual(
country_2.slug,
'another-country',
'Slug should not have changed',
)
# Can specify slug up-front
country_3 = Country.objects.create(
name='Country Three',
slug='somewhere',
)
country_3.refresh_from_db()
self.assertEqual(country_3.slug, 'somewhere')
# Can't reuse slug
with self.assertRaises(IntegrityError):
Country.objects.create(name='Test Country')
def test_product(self):
product = Product.objects.create(name='Test Product')
self.assertEqual(product.name, 'Test Product')
self.assertEqual(f'{product}', 'Test Product') # tests __str__
def test_tag(self):
tag = Tag.objects.create(name='Test Tag')
self.assertEqual(tag.name, 'Test Tag')
self.assertEqual(f'{tag}', 'Test Tag') # tests __str__
def test_industry_tag(self):
tag = IndustryTag.objects.create(name='Test IndustryTag')
self.assertEqual(tag.name, 'Test IndustryTag')
self.assertEqual(f'{tag}', 'Test IndustryTag') # tests __str__
class TestMagnaPageChooserPanel(SetUpLocaleMixin, TestCase):
def setUp(self):
self.request = RequestFactory().get('/')
user = AnonymousUser() # technically, Anonymous users cannot access the admin
self.request.user = user
model = CaseStudyRelatedPages # a model with a foreign key to Page which we want to render as a page chooser
# a MagnaPageChooserPanel class that works on CaseStudyRelatedPages's 'page' field
self.edit_handler = ObjectList(
[MagnaPageChooserPanel('page', [DetailPage, CuratedListPage, TopicPage])]
).bind_to(model=model, request=self.request)
self.my_page_chooser_panel = self.edit_handler.children[0]
# build a form class containing the fields that MyPageChooserPanel wants
self.PageChooserForm = self.edit_handler.get_form_class()
# a test instance of PageChooserModel, pointing to the 'christmas' page
self.detail_page = DetailPageFactory(slug='detail-page')
self.test_instance = model.objects.create(page=self.detail_page)
self.form = self.PageChooserForm(instance=self.test_instance)
self.page_chooser_panel = self.my_page_chooser_panel.bind_to(instance=self.test_instance, form=self.form)
def test_magna_page_chooser_panel_target_models(self):
result = (
MagnaPageChooserPanel('page', [DetailPage, CuratedListPage, TopicPage])
.bind_to(model=MagnaPageChooserPanel)
.target_models()
)
self.assertEqual(result, [DetailPage, CuratedListPage, TopicPage])
def test_magna_page_chooser_panel_render_as_empty_field(self):
test_instance = CaseStudyRelatedPages()
form = self.PageChooserForm(instance=test_instance)
page_chooser_panel = self.my_page_chooser_panel.bind_to(instance=test_instance, form=form, request=self.request)
result = page_chooser_panel.render_as_field()
self.assertIn('<span class="title"></span>', result)
self.assertIn('Choose a page', result)
| 37.377899
| 120
| 0.682495
| 9,818
| 0.35819
| 0
| 0
| 15,572
| 0.568114
| 0
| 0
| 7,548
| 0.275374
|
62ce269193d7705f35038bcd87a972dc46af569a
| 4,141
|
py
|
Python
|
polyadcirc/run_framework/no_ibrun.py
|
tmiesse/PolyADCIRC
|
a4a31dda2c2dac4cd696c0f3827dbbcea7feab33
|
[
"BSD-3-Clause"
] | 5
|
2016-03-04T19:42:32.000Z
|
2022-01-20T15:39:25.000Z
|
polyadcirc/run_framework/no_ibrun.py
|
tmiesse/PolyADCIRC
|
a4a31dda2c2dac4cd696c0f3827dbbcea7feab33
|
[
"BSD-3-Clause"
] | 5
|
2015-04-28T05:14:28.000Z
|
2017-01-19T12:54:59.000Z
|
polyadcirc/run_framework/no_ibrun.py
|
UT-CHG/PolyADCIRC
|
a4a31dda2c2dac4cd696c0f3827dbbcea7feab33
|
[
"BSD-3-Clause"
] | 5
|
2016-01-20T00:34:47.000Z
|
2022-01-02T11:00:56.000Z
|
# Copyright (C) 2013 Lindley Graham
"""
This file provides a mpirun work-around for clusters that do not have the ibrun
command.
"""
import os, stat
class random_manningsn(object):
"""
This class is an implementation of
:class:`polyadcirc.run_framework.random_manningsn` that provides a
``mpirun`` based work-around for clusters that do not have ibrun. It is
probabaly system dependent and might need to be modified.
"""
def __init__(self, script_name, fdir):
self.script_name = script_name
self.base_dir = fdir
self.rf_dirs = ['dirone', 'dirtwo', 'dirthree']
def write_run_script_no_ibrun(self, num_procs, num_jobs, procs_pnode, TpN,
screenout=True, num_writers=None):
"""
Creates a bash script called ``self.script_name`` in ``self.base_dir``
and a set of rankfiles named ``rankfile_n`` to run multiple
non-interacting parallel programs in parallel.
:type num_procs: int
:param num_procs: number of processes per job
:type num_jobs: int
:param num_jobs: number of jobs to run
:param int procs_pnode: number of processors per node
:param bool screenout: flag (True -- write ``ADCIRC`` output to
screen, False -- write ``ADCIRC`` output to temp file)
:param int num_writers: number of MPI processes to dedicate soley to
the task of writing ascii files
:param int TpN: number of tasks (processors to use) per node (wayness)
:rtype: string
:returns: name of bash script for running a batch of jobs within our
processor allotment
"""
tmp_file = self.script_name.partition('.')[0]+'.tmp'
# num_nodes = int(math.ceil(num_procs*num_jobs/float(TpN)))
with open(os.path.join(self.base_dir, self.script_name), 'w') as f:
f.write('#!/bin/bash\n')
# change i to 2*i or something like that to no use all of the
# processors on a node?
for i in xrange(num_jobs):
# write the bash file containing mpi commands
#line = 'ibrun -n {:d} -o {:d} '.format(num_procs,
# num_procs*i*(procs_pnode/TpN))
rankfile = 'rankfile{:d}'.format(i)
line = 'mpirun -machinefile $TMP/machines -rf '
line += rankfile+' -np {:d} '.format(num_procs)
line += './padcirc -I {0} -O {0} '.format(self.rf_dirs[i])
if num_writers:
line += '-W '+str(num_writers)+' '
if not screenout:
line += '> '+tmp_file
line += ' &\n'
f.write(line)
# write the rankfile containing the bindings
with open(os.path.join(self.base_dir, rankfile), 'w') as frank:
for j in xrange(num_procs):
# rank, node_num, slot_nums
if TpN == procs_pnode:
line = 'rank {:d}=n+{:d} slot={:d}'.format(j,\
(i*num_procs+j)/procs_pnode,\
(i*num_procs+j)%procs_pnode)
else:
processors_per_process = procs_pnode/TpN
line = 'rank {:d}=n+{:d} slot={:d}-{:d}'.format(j,\
(i*num_procs+j)/TpN,\
((i*num_procs+j)*processors_per_process)\
%procs_pnode,\
((i*num_procs+j)*processors_per_process)\
%procs_pnode+processors_per_process-1)
if j < num_procs-1:
line += '\n'
frank.write(line)
f.write('wait\n')
curr_stat = os.stat(os.path.join(self.base_dir, self.script_name))
os.chmod(os.path.join(self.base_dir, self.script_name),
curr_stat.st_mode | stat.S_IXUSR)
return self.script_name
| 46.52809
| 79
| 0.530065
| 3,989
| 0.963294
| 0
| 0
| 0
| 0
| 0
| 0
| 1,898
| 0.458343
|
62cfcef9c0c1bac2152ebbbdc822957a7ae21154
| 3,185
|
py
|
Python
|
automated_codeforces_registration/auto_register.py
|
Asienwald/GCI-Fedora
|
378d70e97fb6fa57d127753d3bd3d6450e5a0381
|
[
"MIT"
] | null | null | null |
automated_codeforces_registration/auto_register.py
|
Asienwald/GCI-Fedora
|
378d70e97fb6fa57d127753d3bd3d6450e5a0381
|
[
"MIT"
] | null | null | null |
automated_codeforces_registration/auto_register.py
|
Asienwald/GCI-Fedora
|
378d70e97fb6fa57d127753d3bd3d6450e5a0381
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import datetime as dt
import sys
import getpass
import re
def start_registration(handle, email, pwd1, pwd2):
print("Starting registration, browser opening shortly...\n")
driver = webdriver.Chrome()
URL_TO_CONNECT = "https://codeforces.com/register"
driver.get(URL_TO_CONNECT)
handle_input = driver.find_element_by_name("handle")
email_input = driver.find_element_by_name("email")
pwd1_input = driver.find_element_by_name("password")
pwd2_input = driver.find_element_by_name("passwordConfirmation")
handle_input.send_keys(handle)
email_input.send_keys(email)
pwd1_input.send_keys(pwd1)
pwd2_input.send_keys(pwd2)
form = driver.find_element_by_id("registerForm")
form.submit()
try:
# wait for next page to load
WebDriverWait(driver, 10).until(EC.url_changes(URL_TO_CONNECT))
current_datetime = dt.datetime.now()
driver.save_screenshot(f"{current_datetime}.png")
driver.close()
print(f"Screenshot captured! Saved as {current_datetime}.png")
print("Exiting...")
sys.exit(1)
except Exception:
print("Session Timeout. Handle might already be taken.")
print("Exiting...")
driver.close()
sys.exit(1)
def main():
print('''
_________ .___ ___________ __________ .__ __ __ .__
\_ ___ \ ____ __| _/____\_ _____/__________ ____ ____ \______ \ ____ ____ |__| _______/ |_____________ _/ |_|__| ____ ____
/ \ \/ / _ \ / __ |/ __ \| __)/ _ \_ __ \_/ ___\/ __ \ | _// __ \ / ___\| |/ ___/\ __\_ __ \__ \\ __\ |/ _ \ / \
\ \___( <_> ) /_/ \ ___/| \( <_> ) | \/\ \__\ ___/ | | \ ___// /_/ > |\___ \ | | | | \// __ \| | | ( <_> ) | \
\______ /\____/\____ |\___ >___ / \____/|__| \___ >___ > |____|_ /\___ >___ /|__/____ > |__| |__| (____ /__| |__|\____/|___| /
\/ \/ \/ \/ \/ \/ \/ \/_____/ \/ \/ \/
''')
handle = input("Enter your username/handle to use: ")
while True:
email = input("Enter your email to use: ")
if re.match('.+@{1}.+[.]{1}.+', email):
break
else:
print("Please enter a valid email.\n")
while True:
pwd1 = getpass.getpass(prompt="Enter password: ")
pwd2 = getpass.getpass(prompt="Enter password again: ")
if pwd1 != pwd2:
print("Passwords don't match.\n")
elif not re.match("^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[!@#\$%\^&]).{5,}$", pwd1):
# registration page checks for password strength
print("Password must be >5 in length, have lowercase, uppercase, numbers and special characters.\n")
else:
break
start_registration(handle, email, pwd1, pwd2)
if __name__ == '__main__':
main()
| 38.841463
| 149
| 0.546311
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,607
| 0.504553
|
62d071b99f54f8dbc2410a08457bd117463691a6
| 575
|
py
|
Python
|
3/redis.py
|
dyygtfx/python-in-action
|
86e4fe71d801a0133e488b7eb914bd9766736959
|
[
"MIT"
] | null | null | null |
3/redis.py
|
dyygtfx/python-in-action
|
86e4fe71d801a0133e488b7eb914bd9766736959
|
[
"MIT"
] | null | null | null |
3/redis.py
|
dyygtfx/python-in-action
|
86e4fe71d801a0133e488b7eb914bd9766736959
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/python
#coding=utf-8
#第 0003 题:将 0001 题生成的 200 个激活码(或者优惠券)保存到 Redis 非关系型数据库中。
import uuid
import redis
def create_code(num, length):
result = []
while True:
uuid_id = uuid.uuid1()
temp = str(uuid_id).replace('-', '')[:length]
if temp not in result:
result.append(temp)
if len(result) == num:
break
return result
def save_to_redis(num_list):
r = redis.Redis(host='localhost', port=6379, db=0)
for code in num_list:
r.lpush('code',code)
save_to_redis(create_code(200,20))
| 23
| 56
| 0.617391
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 176
| 0.276295
|
62d30bbb6f283ca534cedc754312f5c27d2a329b
| 141
|
py
|
Python
|
Tuples.py
|
PiggyAwesome/Learn-Python-Full-Course-for-Beginners-Tutorial-code
|
c164492a757cb825b73af1014f95aef884ac49af
|
[
"Unlicense"
] | 2
|
2021-08-11T15:53:16.000Z
|
2021-09-13T13:43:59.000Z
|
Tuples.py
|
PiggyAwesome/Learn-Python-Full-Course-for-Beginners-Tutorial-code
|
c164492a757cb825b73af1014f95aef884ac49af
|
[
"Unlicense"
] | null | null | null |
Tuples.py
|
PiggyAwesome/Learn-Python-Full-Course-for-Beginners-Tutorial-code
|
c164492a757cb825b73af1014f95aef884ac49af
|
[
"Unlicense"
] | null | null | null |
# Tuples
coordinates = (4, 5) # Cant be changed or modified
print(coordinates[1])
# coordinates[1] = 10
# print(coordinates[1])
| 14.1
| 51
| 0.638298
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 85
| 0.602837
|
62d38abd8cc901db862694dad66e79677fe1126b
| 620
|
py
|
Python
|
drift/tests/test_soakit.py
|
dgnorth/drift
|
d4f52726dad1e8a1aa25d9295dd898c5514f729f
|
[
"MIT"
] | 6
|
2016-09-24T13:40:12.000Z
|
2020-04-15T18:53:47.000Z
|
drift/tests/test_soakit.py
|
dgnorth/drift
|
d4f52726dad1e8a1aa25d9295dd898c5514f729f
|
[
"MIT"
] | 4
|
2016-11-15T10:40:04.000Z
|
2020-11-26T09:48:37.000Z
|
drift/tests/test_soakit.py
|
dgnorth/drift
|
d4f52726dad1e8a1aa25d9295dd898c5514f729f
|
[
"MIT"
] | 3
|
2016-10-31T09:48:02.000Z
|
2021-05-25T09:22:07.000Z
|
import unittest
import logging
from flask import Flask
@unittest.skip("needs refactoring")
class driftTestCase(unittest.TestCase):
def setUp(self):
self.app = Flask(__name__)
logging.basicConfig(level="ERROR")
self.app.testing = True
self.test_client = self.app.test_client()
def tearDown(self):
pass
def test_flasksetup(self):
# Run minimal setup
# flasksetup(self.app, options=[])
pass
def test_all(self):
# Run with all options
# flasksetup(self.app)
pass
if __name__ == "__main__":
unittest.main()
| 18.787879
| 49
| 0.624194
| 475
| 0.766129
| 0
| 0
| 511
| 0.824194
| 0
| 0
| 133
| 0.214516
|
62d4d2b9bbdb7c26c851c4cf1142dbfca5ebcb07
| 4,603
|
py
|
Python
|
dir-stats-summary.py
|
rbrt-weiler/dir-stats
|
1f9d1bccd9eef41016f2dcf8dca584e193414fc7
|
[
"Zlib"
] | null | null | null |
dir-stats-summary.py
|
rbrt-weiler/dir-stats
|
1f9d1bccd9eef41016f2dcf8dca584e193414fc7
|
[
"Zlib"
] | null | null | null |
dir-stats-summary.py
|
rbrt-weiler/dir-stats
|
1f9d1bccd9eef41016f2dcf8dca584e193414fc7
|
[
"Zlib"
] | null | null | null |
#!/usr/bin/python
# vim: set sw=4 sts=4 ts=8 et ft=python fenc=utf8 ff=unix tw=74 :
#
# SYNOPSIS
# ========
# This script analyses an INI file created by dir-stats.py and displays
# directories containing a certain amount of data.
#
# ARGUMENTS
# =========
# Call the script without any parameters to see an unsage message.
#
# OUTPUT
# ======
# The script will print an INI style list of directory names and byte
# counts to stdout.
#
# HISTORY
# =======
# 2008-Jan-22 rbrt-weiler
# * Created the script.
#
import getopt
import os.path
import sys
import time
import ConfigParser
##########################################################################
SCRIPT_VERSION = '1.0.0'
opt_limit = 50000000
opt_style = 'win'
##########################################################################
class MyRawConfigParser(ConfigParser.RawConfigParser):
def optionxform(self, optionstr):
return str(optionstr)
##########################################################################
def main():
global opt_limit, opt_style
try:
opts, args = getopt.getopt(sys.argv[1:], 'hl:s:', [ 'help',
'limit=', 'style=' ])
except getopt.GetoptError:
usage()
sys.exit(1)
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit(1)
if o in ('-l', '--limit'):
opt_limit = int(a)
if o in ('-s', '--style'):
if a in ('win', 'unix'):
opt_style = a
else:
usage()
sys.exit(1)
if 0 == len(args):
usage()
sys.exit(1)
else:
for arg in args:
if not os.path.isfile(arg):
print 'Error: "' + arg + '" is no file.'
sys.exit(2)
summarize(args)
##########################################################################
def summarize(filenames):
if 'win' == opt_style:
cmt_char = ';'
kv_sep = ' = '
else:
cmt_char = '#'
kv_sep = ': '
summary = { }
print cmt_char + ' created ' + time.asctime() + ' by ' \
+ 'dir-stats-summary v' + SCRIPT_VERSION
print cmt_char + ' using a limit of ' + str(opt_limit) + ' bytes'
for filename in filenames:
cfg_parser = MyRawConfigParser()
try:
f_in = open(filename, 'r')
except:
print 'Error: Cannot read file "' + filename + '".'
sys.exit(3)
cfg_parser.readfp(f_in)
f_in.close()
sections = cfg_parser.sections()
for section in sections:
options = cfg_parser.options(section)
for option in options:
try:
size = cfg_parser.getint(section, option)
except ValueError:
size = 0
(basedir, basename) = os.path.split(option)
if summary.has_key(basedir):
summary[basedir] = summary[basedir] + size
else:
summary[basedir] = size
total_dirs = 0
total_size = 0
filename = os.path.basename(filename)
dirs = summary.keys()
dirs.sort()
print
print '[' + filename + ']'
for dir in dirs:
if summary[dir] >= opt_limit:
print dir + kv_sep + str(summary[dir])
total_dirs = total_dirs + 1
total_size = total_size + summary[dir]
print cmt_char + ' ' + filename + ': ' + str(total_dirs) \
+ ' directories with ' + str(total_size) + ' bytes'
cfg_parser = None
summary = { }
##########################################################################
def usage():
print 'dir-stats-summary v' + SCRIPT_VERSION + ' - released ' \
+ 'under the Zlib license'
print 'Usage: ' + os.path.basename(sys.argv[0]) + ' [options] ' \
+ 'filename [...]'
print
print 'Options:'
print ' -h, --help'
print ' Display this usage message and exit.'
print ' -l BYTES, --limit=BYTES'
print ' Set the minimum number of bytes that triggers reporting '
print ' of a directory.'
print ' The default limit is 50000000 bytes.'
print ' -s STYLE, --style=STYLE'
print ' Define the style of the output. Accepted values are ' \
+ '"win" and "unix".'
print ' The default value is "win".'
##########################################################################
if '__main__' == __name__:
main()
sys.exit(0)
| 28.067073
| 74
| 0.473604
| 122
| 0.026504
| 0
| 0
| 0
| 0
| 0
| 0
| 1,670
| 0.362807
|
62d4d43b9a1fc71daaf8cfbde1c3396de23d1c7b
| 602
|
py
|
Python
|
command_preprocessor.py
|
Polyhistorian/Pyt-wh-orstBot
|
3e02bf9f6772d0a21b1cb7a2b9e10c053598a5ec
|
[
"MIT"
] | null | null | null |
command_preprocessor.py
|
Polyhistorian/Pyt-wh-orstBot
|
3e02bf9f6772d0a21b1cb7a2b9e10c053598a5ec
|
[
"MIT"
] | null | null | null |
command_preprocessor.py
|
Polyhistorian/Pyt-wh-orstBot
|
3e02bf9f6772d0a21b1cb7a2b9e10c053598a5ec
|
[
"MIT"
] | null | null | null |
import command_processor as command
import discord
async def process(message: discord.Message, is_owner, client: discord.Client):
if message.author.id == client.user.id:
return
if not message.content.startswith('wh!'):
return
if message.channel.type != discord.ChannelType.text:
await message.channel.send('Please use this bot from a guild channel, many features depend on '
'it.')
return
message_string = message.content.lower()
await command.process(message, message_string, is_owner, client)
| 33.444444
| 103
| 0.652824
| 0
| 0
| 0
| 0
| 0
| 0
| 548
| 0.910299
| 78
| 0.129568
|
62d525e622ba5d66f4d11a820ba42088e87bc06b
| 13,294
|
py
|
Python
|
pybnn/svgd_.py
|
hssandriss/pybnn
|
e878553a24ce9ebdde9088f285c7f292e4ee8885
|
[
"BSD-3-Clause"
] | null | null | null |
pybnn/svgd_.py
|
hssandriss/pybnn
|
e878553a24ce9ebdde9088f285c7f292e4ee8885
|
[
"BSD-3-Clause"
] | null | null | null |
pybnn/svgd_.py
|
hssandriss/pybnn
|
e878553a24ce9ebdde9088f285c7f292e4ee8885
|
[
"BSD-3-Clause"
] | null | null | null |
import random
import time
import numpy as np
import theano
import theano.tensor as T
from scipy.spatial.distance import pdist, squareform
from tqdm import tqdm
'''
Sample code to reproduce our results for the Bayesian neural network example.
Our settings are almost the same as Hernandez-Lobato and Adams (ICML15) https://jmhldotorg.files.wordpress.com/2015/05/pbp-icml2015.pdf
Our implementation is also based on their Python code.
p(y | W, X, \gamma) = \prod_i^N N(y_i | f(x_i; W), \gamma^{-1})
p(W | \lambda) = \prod_i N(w_i | 0, \lambda^{-1})
p(\gamma) = Gamma(\gamma | a0, b0)
p(\lambda) = Gamma(\lambda | a0, b0)
The posterior distribution is as follows:
p(W, \gamma, \lambda) = p(y | W, X, \gamma) p(W | \lambda) p(\gamma) p(\lambda)
To avoid negative values of \gamma and \lambda, we update loggamma and loglambda instead.
Copyright (c) 2016, Qiang Liu & Dilin Wang
All rights reserved.
'''
class CyclicAnnealing:
def __init__(self, C, T) -> None:
"""Annealing Schedule for the driving force
Args:
C (int): Number of cycles
T (int): Number of iterations
"""
self.p = 0.1
self.C = C
self.T = T
self.cycle_len = int(T / C)
def getCoef(self, t):
return ((t % self.cycle_len) / self.cycle_len)**self.p
class svgd_bayesnn:
'''
We define a one-hidden-layer-neural-network specifically. We leave extension of deep neural network as our future work.
Input
-- X_train: training dataset, features
-- y_train: training labels
-- batch_size: sub-sampling batch size
-- max_iter: maximum iterations for the training procedure
-- M: number of particles are used to fit the posterior distribution
-- n_hidden: number of hidden units
-- a0, b0: hyper-parameters of Gamma distribution
-- master_stepsize, auto_corr: parameters of adgrad
'''
def __init__(self, X_train, y_train, batch_size=100, max_iter=1000, M=1000, n_hidden=50, a0=1, b0=0.1, master_stepsize=1e-3, beta_1=0.9, beta_2=0.99):
self.n_hidden = n_hidden
self.d = X_train.shape[1] # number of data, dimension
self.M = M
num_vars = self.d * n_hidden + n_hidden * 2 + 3 # w1: d*n_hidden; b1: n_hidden; w2 = n_hidden; b2 = 1; 2 variances
self.theta = np.zeros([self.M, num_vars]) # particles, will be initialized later
self.annealer = CyclicAnnealing(C=10, T=100)
'''
We keep the last 10% (maximum 500) of training data points for model developing
'''
size_dev = min(int(np.round(0.1 * X_train.shape[0])), 500)
X_dev, y_dev = X_train[-size_dev:], y_train[-size_dev:]
X_train, y_train = X_train[:-size_dev], y_train[:-size_dev]
'''
The data sets are normalized so that the input features and the targets have zero mean and unit variance
'''
self.std_X_train = np.std(X_train, 0)
self.std_X_train[self.std_X_train == 0] = 1
self.mean_X_train = np.mean(X_train, 0)
self.mean_y_train = np.mean(y_train)
self.std_y_train = np.std(y_train)
'''
Theano symbolic variables
Define the neural network here
'''
X = T.matrix('X') # Feature matrix
y = T.vector('y') # labels
w_1 = T.matrix('w_1') # weights between input layer and hidden layer
b_1 = T.vector('b_1') # bias vector of hidden layer
w_2 = T.vector('w_2') # weights between hidden layer and output layer
b_2 = T.scalar('b_2') # bias of output
N = T.scalar('N') # number of observations
log_gamma = T.scalar('log_gamma') # variances related parameters
log_lambda = T.scalar('log_lambda')
###
prediction = T.dot(T.nnet.relu(T.dot(X, w_1) + b_1), w_2) + b_2
''' define the log posterior distribution '''
log_lik_data = -0.5 * X.shape[0] * (T.log(2 * np.pi) - log_gamma) - (T.exp(log_gamma) / 2) * T.sum(T.power(prediction - y, 2))
log_prior_data = (a0 - 1) * log_gamma - b0 * T.exp(log_gamma) + log_gamma
log_prior_w = -0.5 * (num_vars - 2) * (T.log(2 * np.pi) - log_lambda) - (T.exp(log_lambda) / 2) * ((w_1**2).sum() + (w_2**2).sum() + (b_1**2).sum() + b_2**2) \
+ (a0 - 1) * log_lambda - b0 * T.exp(log_lambda) + log_lambda
# sub-sampling mini-batches of data, where (X, y) is the batch data, and N is the number of whole observations
log_posterior = (log_lik_data * N / X.shape[0] + log_prior_data + log_prior_w)
dw_1, db_1, dw_2, db_2, d_log_gamma, d_log_lambda = T.grad(log_posterior, [w_1, b_1, w_2, b_2, log_gamma, log_lambda])
# automatic gradient
logp_gradient = theano.function(
inputs=[X, y, w_1, b_1, w_2, b_2, log_gamma, log_lambda, N],
outputs=[dw_1, db_1, dw_2, db_2, d_log_gamma, d_log_lambda]
)
# prediction function
self.nn_predict = theano.function(inputs=[X, w_1, b_1, w_2, b_2], outputs=prediction)
'''
Training with SVGD
'''
# normalization
X_train, y_train = self.normalization(X_train, y_train)
N0 = X_train.shape[0] # number of observations
''' initializing all particles '''
for i in range(self.M):
w1, b1, w2, b2, loggamma, loglambda = self.init_weights(a0, b0)
# use better initialization for gamma
ridx = np.random.choice(range(X_train.shape[0]), np.min([X_train.shape[0], 1000]), replace=False)
y_hat = self.nn_predict(X_train[ridx, :], w1, b1, w2, b2)
loggamma = -np.log(np.mean(np.power(y_hat - y_train[ridx], 2)))
self.theta[i, :] = self.pack_weights(w1, b1, w2, b2, loggamma, loglambda)
grad_theta = np.zeros([self.M, num_vars]) # gradient
# adagrad with momentum
eps = 1e-6
v = 0
m = 0
pbar = tqdm(range(max_iter), ncols=150)
for iter in pbar:
# sub-sampling
batch = [i % N0 for i in range(iter * batch_size, (iter + 1) * batch_size)]
for i in range(self.M):
w1, b1, w2, b2, loggamma, loglambda = self.unpack_weights(self.theta[i, :])
dw1, db1, dw2, db2, dloggamma, dloglambda = logp_gradient(X_train[batch, :], y_train[batch], w1, b1, w2, b2, loggamma, loglambda, N0)
grad_theta[i, :] = self.pack_weights(dw1, db1, dw2, db2, dloggamma, dloglambda)
# calculating the kernel matrix
kxy, dxkxy = self.svgd_kernel(h=-1)
current_coef = self.annealer.getCoef(iter)
grad_theta = (current_coef * np.matmul(kxy, grad_theta) + dxkxy) / self.M # \Phi(x)
# adagrad
if iter == 0:
m = m + grad_theta
v = v + np.multiply(grad_theta, grad_theta)
m_ = m
v_ = v
else:
m = beta_1 * m + (1 - beta_1) * grad_theta
v = beta_2 * v + (1 - beta_2) * np.multiply(grad_theta, grad_theta)
m_ = m / (1 - beta_1**iter)
v_ = v / (1 - beta_2**iter)
adj_grad = np.divide(m_, eps + np.sqrt(v_))
self.theta = self.theta + master_stepsize * adj_grad
pbar.set_description(f"Current Coef {current_coef:.3f}")
'''
Model selection by using a development set
'''
X_dev = self.normalization(X_dev)
val = [0 for _ in range(self.M)]
for i in range(self.M):
w1, b1, w2, b2, loggamma, loglambda = self.unpack_weights(self.theta[i, :])
pred_y_dev = self.nn_predict(X_dev, w1, b1, w2, b2) * self.std_y_train + self.mean_y_train
# likelihood
def f_log_lik(loggamma):
return np.sum(np.log(np.sqrt(np.exp(loggamma)) / np.sqrt(2 * np.pi) * np.exp(-1 * (np.power(pred_y_dev - y_dev, 2) / 2) * np.exp(loggamma))))
# The higher probability is better
lik1 = f_log_lik(loggamma)
# one heuristic setting
loggamma = -np.log(np.mean(np.power(pred_y_dev - y_dev, 2)))
lik2 = f_log_lik(loggamma)
if lik2 > lik1:
self.theta[i, -2] = loggamma # update loggamma
def normalization(self, X, y=None):
X = (X - np.full(X.shape, self.mean_X_train)) / \
np.full(X.shape, self.std_X_train)
if y is not None:
y = (y - self.mean_y_train) / self.std_y_train
return (X, y)
else:
return X
'''
Initialize all particles
'''
def init_weights(self, a0, b0):
w1 = 1.0 / np.sqrt(self.d + 1) * np.random.randn(self.d, self.n_hidden)
b1 = np.zeros((self.n_hidden,))
w2 = 1.0 / np.sqrt(self.n_hidden + 1) * np.random.randn(self.n_hidden)
b2 = 0.
loggamma = np.log(np.random.gamma(a0, b0))
loglambda = np.log(np.random.gamma(a0, b0))
return (w1, b1, w2, b2, loggamma, loglambda)
'''
Calculate kernel matrix and its gradient: K, \nabla_x k
'''
def svgd_kernel(self, h=-1):
sq_dist = pdist(self.theta)
pairwise_dists = squareform(sq_dist)**2
if h < 0: # if h < 0, using median trick
h = np.median(pairwise_dists)
h = np.sqrt(0.5 * h / np.log(self.theta.shape[0] + 1))
# compute the rbf kernel
Kxy = np.exp(-pairwise_dists / h**2 / 2)
dxkxy = -np.matmul(Kxy, self.theta)
sumkxy = np.sum(Kxy, axis=1)
for i in range(self.theta.shape[1]):
dxkxy[:, i] = dxkxy[:, i] + np.multiply(self.theta[:, i], sumkxy)
dxkxy = dxkxy / (h**2)
return (Kxy, dxkxy)
'''
Pack all parameters in our model
'''
def pack_weights(self, w1, b1, w2, b2, loggamma, loglambda):
params = np.concatenate([w1.flatten(), b1, w2, [b2], [loggamma], [loglambda]])
return params
'''
Unpack all parameters in our model
'''
def unpack_weights(self, z):
w = z
w1 = np.reshape(w[:self.d * self.n_hidden], [self.d, self.n_hidden])
b1 = w[self.d * self.n_hidden:(self.d + 1) * self.n_hidden]
w = w[(self.d + 1) * self.n_hidden:]
w2, b2 = w[:self.n_hidden], w[-3]
# the last two parameters are log variance
loggamma, loglambda = w[-2], w[-1]
return (w1, b1, w2, b2, loggamma, loglambda)
'''
Evaluating testing rmse and log-likelihood, which is the same as in PBP
Input:
-- X_test: unnormalized testing feature set
-- y_test: unnormalized testing labels
'''
def evaluation(self, X_test, y_test):
# normalization
X_test = self.normalization(X_test)
# average over the output
pred_y_test = np.zeros([self.M, len(y_test)])
prob = np.zeros([self.M, len(y_test)])
'''
Since we have M particles, we use a Bayesian view to calculate rmse and log-likelihood
'''
for i in range(self.M):
w1, b1, w2, b2, loggamma, loglambda = self.unpack_weights(self.theta[i, :])
pred_y_test[i, :] = self.nn_predict(X_test, w1, b1, w2, b2) * self.std_y_train + self.mean_y_train
prob[i, :] = np.sqrt(np.exp(loggamma)) / np.sqrt(2 * np.pi) * np.exp(-1 *
(np.power(pred_y_test[i, :] - y_test, 2) / 2) * np.exp(loggamma))
pred = np.mean(pred_y_test, axis=0)
# evaluation
svgd_rmse = np.sqrt(np.mean((pred - y_test)**2))
svgd_ll = np.mean(np.log(np.mean(prob, axis=0)))
return (svgd_rmse, svgd_ll)
if __name__ == '__main__':
print('Theano', theano.version.version) # our implementation is based on theano 0.8.2
np.random.seed(1)
''' load data file '''
data = np.loadtxt('../data/boston_housing')
# Please make sure that the last column is the label and the other columns are features
X_input = data[:, range(data.shape[1] - 1)]
y_input = data[:, data.shape[1] - 1]
''' build the training and testing data set'''
train_ratio = 0.9 # We create the train and test sets with 90% and 10% of the data
permutation = np.arange(X_input.shape[0])
random.shuffle(permutation)
size_train = int(np.round(X_input.shape[0] * train_ratio))
index_train = permutation[0: size_train]
index_test = permutation[size_train:]
X_train, y_train = X_input[index_train, :], y_input[index_train]
X_test, y_test = X_input[index_test, :], y_input[index_test]
start = time.time()
''' Training Bayesian neural network with SVGD '''
batch_size, n_hidden, max_iter = 100, 50, 2000 # max_iter is a trade-off between running time and performance
svgd = svgd_bayesnn(X_train, y_train, batch_size=batch_size, n_hidden=n_hidden, max_iter=max_iter)
svgd_time = time.time() - start
svgd_rmse, svgd_ll = svgd.evaluation(X_test, y_test)
print('SVGD', svgd_rmse, svgd_ll, svgd_time)
| 40.407295
| 168
| 0.578682
| 10,976
| 0.825636
| 0
| 0
| 0
| 0
| 0
| 0
| 4,084
| 0.307206
|
62d7219219d48ca548a710f2a1aee166cd73d83e
| 3,172
|
py
|
Python
|
poi_mining/api/server.py
|
yummydeli/machine_learning
|
54471182ac21ef0eee26557a7bd6f3a3dc3a09bd
|
[
"MIT"
] | 1
|
2019-09-29T13:36:29.000Z
|
2019-09-29T13:36:29.000Z
|
poi_mining/api/server.py
|
yummydeli/machine_learning
|
54471182ac21ef0eee26557a7bd6f3a3dc3a09bd
|
[
"MIT"
] | null | null | null |
poi_mining/api/server.py
|
yummydeli/machine_learning
|
54471182ac21ef0eee26557a7bd6f3a3dc3a09bd
|
[
"MIT"
] | null | null | null |
#coding:utf-8
################################################################################
#
### Copyright (c) 2015 Baidu.com, Inc. All Rights Reserved
#
##################################################################################
"""
This module provide configure file management service in i18n environment.
Authors: wangdia01(wangdian01@baidu.com)
Date: 2015/07/14
"""
import urllib2
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import datetime
from input import RequestData
from output import ResponseData
from processer import Processer
from tornado.options import define
from tornado.options import options
from log import EasyLog
define("port", default=8881, help="run on the given port", type=int)
class MainHandler(tornado.web.RequestHandler):
"""
服务处理类
"""
logger = EasyLog()
logger.init_log("../logs/poi")
def initialize(self):
"""
初始化
"""
# 初始化日志
# 数据初始化
self.request_data = RequestData()
self.response_data = ResponseData()
self.processer = Processer(self.logger)
def get(self):
"""
处理get请求
"""
return self.run()
def post(self):
"""
处理post请求
"""
return self.run()
def run(self):
"""
处理post请求
"""
# 解析传入参数
self.request_data.reset()
self.request_data.aid = self.get_argument('globalId')
self.request_data.content = urllib2.unquote(str(self.get_argument('content')))
self.request_data.city_id = self.get_argument('city_id', default="")
self.request_data.city_name = self.get_argument('city_name', default="")
self.request_data.debug = False if self.get_argument('debug', "false") == 'false' else True
#self.log_req()
# 创造传出参数
self.response_data.reset()
start_time = datetime.datetime.now()
self.processer.run(self.request_data, self.response_data)
end_time = datetime.datetime.now()
run_time = "运行时间:" + str(end_time - start_time) + "微秒"
MainHandler.logger.info(run_time)
#self.log_res()
self.write(self.response_data.package())
def log_req(self):
"""
打印请求日志信息
"""
ip = self.request.remote_ip
path = self.request.uri
#body = self.request.body
body = self.request_data.tostr()
request = "Request[" + ip + "]" + "[" + path + "]" + "[" + body + "]"
MainHandler.logger.info(request)
def log_res(self):
"""
打印响应日志信息
"""
ip = self.request.remote_ip
path = self.request.uri
body = self.response_data.tostr()
response = "Response[" + ip + "]" + "[" + path + "]" + "[" + body + "]"
MainHandler.logger.info(response)
if __name__ == "__main__":
tornado.options.parse_command_line()
Handlers=[(r"/feed/poiRecognize", MainHandler),]
application = tornado.web.Application(Handlers)
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
| 28.836364
| 99
| 0.584489
| 2,213
| 0.670606
| 0
| 0
| 0
| 0
| 0
| 0
| 998
| 0.302424
|
62d7ffd0472b4eb45907da6224fc3b2b392b8416
| 238
|
py
|
Python
|
python/7kyu/even_numbers_in_an_array.py
|
Sigmanificient/codewars
|
b34df4bf55460d312b7ddf121b46a707b549387a
|
[
"MIT"
] | 3
|
2021-06-08T01:57:13.000Z
|
2021-06-26T10:52:47.000Z
|
python/7kyu/even_numbers_in_an_array.py
|
Sigmanificient/codewars
|
b34df4bf55460d312b7ddf121b46a707b549387a
|
[
"MIT"
] | null | null | null |
python/7kyu/even_numbers_in_an_array.py
|
Sigmanificient/codewars
|
b34df4bf55460d312b7ddf121b46a707b549387a
|
[
"MIT"
] | 2
|
2021-06-10T21:20:13.000Z
|
2021-06-30T10:13:26.000Z
|
"""Kata url: https://www.codewars.com/kata/5a431c0de1ce0ec33a00000c."""
from typing import List
def even_numbers(arr: List[int], n: int) -> List[int]:
odds: List[int] = [x for x in arr if not x % 2]
return odds[len(odds) - n:]
| 26.444444
| 71
| 0.655462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 71
| 0.298319
|
62d8f79aae34a225a888d00382e4743afa82bca2
| 10,997
|
py
|
Python
|
backend/lambda_functions/arcgis_loader/arcgis_loader.py
|
GispoCoding/tarmo
|
064eead90991fb2836173b647282e044dfa06c5a
|
[
"MIT"
] | null | null | null |
backend/lambda_functions/arcgis_loader/arcgis_loader.py
|
GispoCoding/tarmo
|
064eead90991fb2836173b647282e044dfa06c5a
|
[
"MIT"
] | 92
|
2022-01-27T08:05:09.000Z
|
2022-03-31T06:54:46.000Z
|
backend/lambda_functions/arcgis_loader/arcgis_loader.py
|
GispoCoding/tarmo
|
064eead90991fb2836173b647282e044dfa06c5a
|
[
"MIT"
] | null | null | null |
import datetime
import json
from typing import Any, Dict, Optional
import requests
from shapely.geometry import (
LineString,
MultiLineString,
MultiPoint,
MultiPolygon,
Point,
Polygon,
shape,
)
from sqlalchemy.types import BOOLEAN, DATE
from .base_loader import (
LOGGER,
BaseLoader,
Event,
FeatureCollection,
KoosteBase,
Response,
base_handler,
)
class ArcGisLoader(BaseLoader):
# We must support multiple ArcGIS REST sources, with different urls and different
# layers to import from each service. Also, data from multiple layers might be
# joined to the same table, if the schemas fit.
TABLE_NAMES = {
"museovirastoarcrest_metadata": {
"WFS/MV_KulttuuriymparistoSuojellut:Muinaisjaannokset_piste": "museovirastoarcrest_muinaisjaannokset", # noqa
"WFS/MV_KulttuuriymparistoSuojellut:RKY_piste": "museovirastoarcrest_rkykohteet", # noqa
},
"syke_metadata": {
"SYKE/SYKE_SuojellutAlueet:Natura 2000 - SAC Manner-Suomi aluemaiset": "syke_natura2000", # noqa
"SYKE/SYKE_SuojellutAlueet:Natura 2000 - SPA Manner-Suomi": "syke_natura2000", # noqa
"SYKE/SYKE_SuojellutAlueet:Natura 2000 - SCI Manner-Suomi": "syke_natura2000", # noqa
"SYKE/SYKE_SuojellutAlueet:Valtion maiden luonnonsuojelualueet": "syke_valtionluonnonsuojelualueet", # noqa
},
}
# Each metadata table contains the URL of each arcgis source
METADATA_TABLE_NAME = list(TABLE_NAMES.keys())
FIELD_NAMES = {
"kohdenimi": "name",
"nimiSuomi": "name",
"Nimi": "name",
"TyyppiNimi": "infoFi",
"kunta": "cityName",
"tyyppi": "type_name",
"alatyyppi": "type_name",
"url": "www",
}
DEFAULT_PROJECTION = 4326
def __init__(
self, connection_string: str, layers_to_include: Optional[dict] = None, **kwargs
) -> None:
super().__init__(connection_string, **kwargs)
self.layers_to_include = layers_to_include if layers_to_include else {}
for metadata_table in self.METADATA_TABLE_NAME:
if not layers_to_include:
self.layers_to_include[metadata_table] = self.metadata_row[
metadata_table
].layers_to_include
LOGGER.debug("ArcGIS loader initialized")
def get_arcgis_query_params(self) -> dict:
params = {
"inSR": self.DEFAULT_PROJECTION,
"outSR": self.DEFAULT_PROJECTION,
"units": "esriSRUnit_Meter",
"geometryType": "esriGeometryPoint",
"spatialRel": "esriSpatialRelIntersects",
"outFields": "*",
# Arcgis messes up unicode if we try to request geojson.
# So we're stuck with their proprietary json.
"f": "json",
}
if self.point_radius and self.point_of_interest:
params["distance"] = self.point_radius * 1000
params["geometry"] = json.dumps(
{"x": self.point_of_interest.x, "y": self.point_of_interest.y}
)
LOGGER.debug(f"ArcGIS query params: {params}")
return params
def get_arcgis_service_url(self, arcgis_url: str, service_name: str) -> str:
if arcgis_url[-1] != "/":
arcgis_url += "/"
url = f"{arcgis_url}{service_name}/MapServer?f=json"
return url
def get_arcgis_query_url(
self, arcgis_url: str, service_name: str, layer_number: str
) -> str:
if arcgis_url[-1] != "/":
arcgis_url += "/"
url = f"{arcgis_url}{service_name}/MapServer/{layer_number}/query"
return url
def get_geojson(self, arcgis_type: str, feature: dict) -> dict:
# geojsonify arcgis json
geojson = {}
geojson["properties"] = feature["attributes"]
if arcgis_type == "esriGeometryPoint":
geojson["geometry"] = {
"type": "Point",
"coordinates": [
feature["geometry"]["x"],
feature["geometry"]["y"],
],
}
# TODO: support line geometries
elif arcgis_type == "esriGeometryPolygon":
rings = feature["geometry"]["rings"]
# Oh great. Arcgis doesn't differentiate outer and inner rings.
# Would be too easy if it did.
# Let's assume the first ring is always an outer ring and go from there.
outer_rings = [[rings[0]]]
for ring in rings[1:]:
for outer_ring in outer_rings:
if Polygon(ring).within(Polygon(outer_ring[0])):
# we have an inner ring, hooray
outer_ring.append(ring)
break
else:
# we have an outer ring, yippee
outer_rings.append([ring])
geojson["geometry"] = {
"type": "MultiPolygon",
"coordinates": outer_rings,
}
return geojson
def get_features(self) -> FeatureCollection: # type: ignore[override]
data = FeatureCollection(
features=[],
crs={"type": "name", "properties": {"name": self.DEFAULT_PROJECTION}},
)
params = self.get_arcgis_query_params()
for metadata_table, services in self.layers_to_include.items():
url = self.api_url[metadata_table]
for service_name, layers in services.items():
# we have to find out the layer ids from layer names
r = requests.get(
self.get_arcgis_service_url(url, service_name), headers=self.HEADERS
)
# Some arcgis services might be down for maintenance. Don't let this
# get in the way of importing other data.
if r.status_code == 503:
LOGGER.warn(
f"ArcGIS service {url}/{service_name} is down at the "
"moment. Skipping this service."
)
continue
else:
r.raise_for_status()
LOGGER.debug(
f"Service {url}/{service_name} reached, querying layers..."
)
layer_list = r.json()["layers"]
for layer_name in layers:
layer_id = [
layer["id"]
for layer in layer_list
if layer["name"] == layer_name
][0]
LOGGER.debug(f"Querying layer {layer_name}...")
r = requests.get(
self.get_arcgis_query_url(url, service_name, layer_id),
params=params,
headers=self.HEADERS,
)
r.raise_for_status()
LOGGER.debug(f"Got results for layer {layer_name}...")
result = r.json()
layer_features = result["features"]
geometry_type = result["geometryType"]
for feature in layer_features:
feature = self.get_geojson(geometry_type, feature)
feature["properties"]["source"] = metadata_table
feature["properties"]["service"] = service_name
feature["properties"]["layer"] = layer_name
data["features"].append(feature)
return data
def clean_props(self, props: Dict[str, Any], table_name: str) -> dict:
# Get rid of empty fields, they might not go well with the database.
cleaned = {key: value for key, value in props.items() if value is not None}
# Clean values of extra characters too
for key, value in cleaned.items():
if type(value) is str:
cleaned[key] = value.rstrip(", ")
# Clean boolean and date fields
table_cls = getattr(KoosteBase.classes, table_name)
boolean_fields = [
c.name for c in table_cls.__table__.columns if type(c.type) is BOOLEAN
]
date_fields = [
c.name for c in table_cls.__table__.columns if type(c.type) is DATE
]
for key in boolean_fields:
if key in cleaned.keys():
cleaned[key] = True if cleaned[key] in {"K", "k", "true"} else False
for key in date_fields:
if key in cleaned.keys():
# dates are in posix timestamps with millisecond precision
cleaned[key] = datetime.date.fromtimestamp(cleaned[key] / 1000)
return cleaned
def get_feature(self, element: Dict[str, Any]) -> Optional[dict]: # type: ignore[override] # noqa
props = element["properties"]
source = props.pop("source")
service = props.pop("service")
layer = props.pop("layer")
table_name = self.TABLE_NAMES[source][":".join([service, layer])]
geometry = shape(element["geometry"])
if isinstance(geometry, Point):
geom = MultiPoint([geometry])
elif isinstance(geometry, MultiPoint):
geom = MultiPoint(geometry)
elif isinstance(geometry, LineString):
geom = MultiLineString([geometry])
elif isinstance(geometry, MultiLineString):
geom = MultiLineString(geometry)
elif isinstance(geometry, Polygon):
geom = MultiPolygon([geometry])
elif isinstance(geometry, MultiPolygon):
geom = MultiPolygon(geometry)
else:
# Unsupported geometry type
return None
# Do not save any invalid or empty features
if geom.is_empty:
return None
props = self.clean_props(props, table_name)
# Rename and/or combine desired fields
for origin_name, tarmo_name in self.FIELD_NAMES.items():
if origin_name in props.keys():
value = props.pop(origin_name)
if tarmo_name in props.keys():
props[tarmo_name] += f": {value}"
else:
# Seems urls are not always complete urls after all :(
if value.startswith("www."):
value = "https://" + value
# Caps may or may not be present, do not override them
if value[0].islower() and not value.startswith("http"):
value = value.capitalize()
props[tarmo_name] = value
flattened = {
**props,
"geom": geom.wkt,
"table": table_name,
"deleted": False,
}
return flattened
def handler(event: Event, _) -> Response:
"""Handler which is called when accessing the endpoint."""
return base_handler(event, ArcGisLoader)
| 40.430147
| 122
| 0.557516
| 10,436
| 0.948986
| 0
| 0
| 0
| 0
| 0
| 0
| 3,061
| 0.278349
|
62da8796a3106e941b0e8eec2b3eb3b47d77106e
| 4,123
|
py
|
Python
|
excel_handler.py
|
Jason2031/EMailResponder
|
af9be4bd9dbd38f2ba4ea934a40627774766c8ae
|
[
"MIT"
] | null | null | null |
excel_handler.py
|
Jason2031/EMailResponder
|
af9be4bd9dbd38f2ba4ea934a40627774766c8ae
|
[
"MIT"
] | null | null | null |
excel_handler.py
|
Jason2031/EMailResponder
|
af9be4bd9dbd38f2ba4ea934a40627774766c8ae
|
[
"MIT"
] | null | null | null |
import os
import yaml
import xlrd
from openpyxl import load_workbook
from util_func import securely_check_dir
class ExcelHandler:
def __init__(self, config):
self.config = config
securely_check_dir('forms')
securely_check_dir('att')
securely_check_dir('config')
self.subject = []
for item in self.config['responds'].keys():
if not item.startswith('default'):
self.subject.append(item)
self.handle_config = []
config_root = 'config'
for _, _, files in os.walk(config_root):
for file in files:
subject, _ = os.path.splitext(file)
if subject != 'top' and not subject.endswith('-old'):
with open(os.path.join(config_root, file)) as fp:
subject_config = yaml.load(fp.read())
self.handle_config.append({'subject_name': subject, 'config': subject_config})
def handle(self):
att_root = 'att'
for subject_config in self.handle_config:
subject = subject_config['subject_name']
config = subject_config['config']
if os.path.exists(os.path.join(att_root, subject)):
for _, _, files in os.walk(os.path.join(att_root, subject)):
for f in files:
short_name, ext = os.path.splitext(f)
if not short_name.endswith('-old') and not f.startswith('.'):
workbook = load_workbook(os.path.join(att_root, subject, f))
sheet_names = config.keys()
for sheet_name in sheet_names:
from_row = config[sheet_name]['header']['row']['to'] + 1
from_column = config[sheet_name]['column']['from']
sheet = workbook[sheet_name]
content = []
tmp_work_book = xlrd.open_workbook(os.path.join(att_root, subject, f))
tmp_sheet = tmp_work_book.sheet_by_name(sheet_name)
lines = tmp_sheet.nrows
tmp_work_book.release_resources()
for i in range(from_row, lines + 1):
row = [val.value for val in sheet[i]][from_column - 1:-1]
content.append(row)
form_workbook = load_workbook(
os.path.join('forms', subject, config[sheet_name]['destination_file']))
form_sheet = form_workbook[sheet_name]
tmp_work_book = xlrd.open_workbook(
os.path.join('forms', subject, config[sheet_name]['destination_file']))
tmp_sheet = tmp_work_book.sheet_by_name(sheet_name)
lines = tmp_sheet.nrows
tmp_work_book.release_resources()
for i in range(len(content)):
for j in range(len(content[i])):
form_sheet[lines + i + 1][j].value = content[i][j]
form_workbook.save(
os.path.join('forms', subject, config[sheet_name]['destination_file']))
form_workbook.close()
workbook.close()
os.rename(os.path.join(att_root, subject, f),
os.path.join(att_root, subject, '{}{}{}'.format(short_name, '-old', ext)))
if __name__ == '__main__':
config_file = 'config/top.yml'
if not os.path.exists(config_file):
print('No top.yml file found!')
exit(-1)
with open(config_file, encoding='utf-8') as f:
config_file = yaml.load(f.read())
excel_handler = ExcelHandler(config_file)
excel_handler.handle()
| 48.505882
| 112
| 0.494785
| 3,681
| 0.892797
| 0
| 0
| 0
| 0
| 0
| 0
| 293
| 0.071065
|
62dbe883ecb8afdfe748f21860863b240087b5b4
| 564
|
py
|
Python
|
setup.py
|
gregory-halverson/crs
|
3fc7b68b347fec29e977e150e15841b16ec38647
|
[
"MIT"
] | null | null | null |
setup.py
|
gregory-halverson/crs
|
3fc7b68b347fec29e977e150e15841b16ec38647
|
[
"MIT"
] | null | null | null |
setup.py
|
gregory-halverson/crs
|
3fc7b68b347fec29e977e150e15841b16ec38647
|
[
"MIT"
] | null | null | null |
from os.path import join
from os.path import abspath
from os.path import dirname
from distutils.core import setup
__author__ = 'Gregory Halverson'
NAME = 'crs'
EMAIL = 'gregory.halverson@gmail.com'
URL = 'http://github.com/gregory-halverson/crs'
with open(join(abspath(dirname(__file__)), NAME, 'version.txt')) as f:
__version__ = f.read()
setup(
name=NAME,
version=__version__,
description="Geographic Coordinate Reference System Encapsulation and Conversion",
author=__author__,
author_email=EMAIL,
url=URL,
packages=['crs']
)
| 24.521739
| 86
| 0.728723
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 181
| 0.320922
|
62dc5a004b7115829f44a8eadc00ed4081475f1f
| 161
|
py
|
Python
|
src/libs/django/utils/request.py
|
antiline/jun2
|
00928cea1f4b8cd6634cf9a1ae6dc19c95d0e54c
|
[
"MIT"
] | null | null | null |
src/libs/django/utils/request.py
|
antiline/jun2
|
00928cea1f4b8cd6634cf9a1ae6dc19c95d0e54c
|
[
"MIT"
] | 17
|
2019-06-24T14:11:49.000Z
|
2021-06-04T22:19:59.000Z
|
src/libs/django/utils/request.py
|
tabetaku/roots
|
8a9f91b8b0e0b64a85db2898a537b12be65de753
|
[
"MIT"
] | null | null | null |
from ipware.ip import get_ip
from ipware.utils import is_private_ip
def is_private_ip_from_request(request) -> bool:
return is_private_ip(get_ip(request))
| 23
| 48
| 0.807453
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
62dcdfc108fcc269a77defa004067921ebd5f696
| 1,067
|
py
|
Python
|
sammba/registration/tests/test_base.py
|
salma1601/sammba-mri
|
c3c79ed806a4e5ce3524bc6053bf0c3ff1444113
|
[
"CECILL-B"
] | null | null | null |
sammba/registration/tests/test_base.py
|
salma1601/sammba-mri
|
c3c79ed806a4e5ce3524bc6053bf0c3ff1444113
|
[
"CECILL-B"
] | null | null | null |
sammba/registration/tests/test_base.py
|
salma1601/sammba-mri
|
c3c79ed806a4e5ce3524bc6053bf0c3ff1444113
|
[
"CECILL-B"
] | null | null | null |
import os
from nose import with_setup
from nose.tools import assert_true
import nibabel
from nilearn.datasets.tests import test_utils as tst
from nilearn.image import index_img
from sammba.registration import base
from sammba import testing_data
from nilearn._utils.niimg_conversions import _check_same_fov
@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata)
def test_warp():
anat_file = os.path.join(os.path.dirname(testing_data.__file__),
'anat.nii.gz')
func_file = os.path.join(os.path.dirname(testing_data.__file__),
'func.nii.gz')
func_file0 = os.path.join(tst.tmpdir, 'mean_func.nii.gz')
func_img0 = index_img(func_file, 0)
func_img0.to_filename(func_file0)
registered_anat_oblique_file, mat_file =\
base._warp(anat_file, func_file0, write_dir=tst.tmpdir,
caching=False, verbose=False)
assert_true(_check_same_fov(nibabel.load(registered_anat_oblique_file),
func_img0))
assert_true(os.path.isfile(mat_file))
| 38.107143
| 75
| 0.709466
| 0
| 0
| 0
| 0
| 757
| 0.709466
| 0
| 0
| 44
| 0.041237
|
62dd03d0d913944957c2612082f29f5c840f0d43
| 555
|
py
|
Python
|
crawling_image/get_image.py
|
Lee-JH-kor/Review_Project
|
5e604f2bcdceea23740759681bdc7e5d3a7670ca
|
[
"MIT"
] | null | null | null |
crawling_image/get_image.py
|
Lee-JH-kor/Review_Project
|
5e604f2bcdceea23740759681bdc7e5d3a7670ca
|
[
"MIT"
] | null | null | null |
crawling_image/get_image.py
|
Lee-JH-kor/Review_Project
|
5e604f2bcdceea23740759681bdc7e5d3a7670ca
|
[
"MIT"
] | 1
|
2020-11-11T05:02:37.000Z
|
2020-11-11T05:02:37.000Z
|
import urllib.request
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt
from PIL import Image
import os
def image_poster(title_address):
url = f'{title_address}'
req = urllib.request.Request(url)
res = urllib.request.urlopen(url).read()
soup = BeautifulSoup(res, 'html.parser')
soup = soup.find("div", class_="poster")
# img의 경로를 받아온다
imgUrl = soup.find("img")["src"]
# urlretrieve는 다운로드 함수
# img.alt는 이미지 대체 텍스트
urllib.request.urlretrieve(imgUrl, soup.find("img")["alt"] + '.jpg')
plt.show()
| 23.125
| 72
| 0.673874
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 176
| 0.291874
|
62dd4a508db411e5b7ff314613aafdeaeb5656d2
| 376
|
py
|
Python
|
muon/__init__.py
|
WeilerP/muon
|
8e0988f07ae23be4fa913bb297ef059e5ab702a0
|
[
"BSD-3-Clause"
] | null | null | null |
muon/__init__.py
|
WeilerP/muon
|
8e0988f07ae23be4fa913bb297ef059e5ab702a0
|
[
"BSD-3-Clause"
] | null | null | null |
muon/__init__.py
|
WeilerP/muon
|
8e0988f07ae23be4fa913bb297ef059e5ab702a0
|
[
"BSD-3-Clause"
] | null | null | null |
"""Multimodal omics analysis framework"""
from ._core.mudata import MuData
from ._core import preproc as pp
from ._core import tools as tl
from ._core import plot as pl
from ._core import utils
from ._core.io import *
from ._core.config import set_options
from . import atac
from . import prot
__version__ = "0.1.0"
__mudataversion__ = "0.1.0"
__anndataversion__ = "0.1.0"
| 22.117647
| 41
| 0.755319
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 62
| 0.164894
|
62de606ad5a0ee4725f392cc0be4a4d2ca1933b9
| 2,756
|
py
|
Python
|
recipes/recipes/goma_hello_world.py
|
xinghun61/infra
|
b5d4783f99461438ca9e6a477535617fadab6ba3
|
[
"BSD-3-Clause"
] | 2
|
2021-04-13T21:22:18.000Z
|
2021-09-07T02:11:57.000Z
|
recipes/recipes/goma_hello_world.py
|
asdfghjjklllllaaa/infra
|
8f63af54e46194cd29291813f2790ff6e986804d
|
[
"BSD-3-Clause"
] | 21
|
2020-09-06T02:41:05.000Z
|
2022-03-02T04:40:01.000Z
|
recipes/recipes/goma_hello_world.py
|
xinghun61/infra
|
b5d4783f99461438ca9e6a477535617fadab6ba3
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Compiles trivial C++ program using Goma.
Intended to be used as a very simple litmus test of Goma health on LUCI staging
environment. Linux and OSX only.
"""
DEPS = [
'build/goma',
'recipe_engine/context',
'recipe_engine/file',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/step',
'recipe_engine/time',
]
HELLO_WORLD_CPP = """
#include <iostream>
int get_number();
int main() {
std::cout << "Hello, world!" << std::endl;
std::cout << "Non-static part " << get_number() << std::endl;
return 0;
}
"""
MODULE_CPP = """
int get_number() {
return %(time)d;
}
"""
def RunSteps(api):
root_dir = api.path['tmp_base']
# TODO(vadimsh): We need to somehow pull clang binaries and use them instead
# of system-provided g++. Otherwise Goma may fall back to local execution,
# since system-provided g++ may not be whitelisted in Goma.
# One static object file and one "dynamic", to test cache hit and cache miss.
source_code = {
'hello_world.cpp': HELLO_WORLD_CPP,
'module.cpp': MODULE_CPP % {'time': int(api.time.time())},
}
for name, data in sorted(source_code.items()):
api.file.write_text('write %s' % name, root_dir.join(name), data)
api.goma.ensure_goma(client_type='candidate')
api.goma.start()
gomacc = api.goma.goma_dir.join('gomacc')
out = root_dir.join('compiled_binary')
build_exit_status = None
try:
# We want goma proxy to actually hit the backends, so disable fallback to
# the local compiler.
gomacc_env = {
'GOMA_USE_LOCAL': 'false',
'GOMA_FALLBACK': 'false',
}
with api.context(env=gomacc_env):
objs = []
for name in sorted(source_code):
obj = root_dir.join(name.replace('.cpp', '.o'))
api.step(
'compile %s' % name,
[gomacc, 'g++', '-c', root_dir.join(name), '-o', obj])
objs.append(obj)
api.step('link', [gomacc, 'g++', '-o', out] + objs)
build_exit_status = 0
except api.step.StepFailure as e:
build_exit_status = e.retcode
raise e
finally:
api.goma.stop(build_exit_status=build_exit_status)
api.step('run', [out])
def GenTests(api):
yield (
api.test('linux') +
api.platform.name('linux') +
api.properties.generic(
buildername='test_builder',
mastername='test_master'))
yield (
api.test('linux_fail') +
api.platform.name('linux') +
api.properties.generic(
buildername='test_builder',
mastername='test_master') +
api.step_data('link', retcode=1))
| 25.757009
| 79
| 0.644049
| 0
| 0
| 417
| 0.151306
| 0
| 0
| 0
| 0
| 1,379
| 0.500363
|
62de74cf7251561058f563593dbf807c8c8593c6
| 16,049
|
py
|
Python
|
Nowruz_SemEval.py
|
mohammadmahdinoori/Nowruz-at-SemEval-2022-Task-7
|
d87bf033c3798ff707ba25ddffde8c46abec8bd4
|
[
"MIT"
] | 2
|
2022-03-20T02:03:53.000Z
|
2022-03-21T19:44:54.000Z
|
Nowruz_SemEval.py
|
mohammadmahdinoori/Nowruz-at-SemEval-2022-Task-7
|
d87bf033c3798ff707ba25ddffde8c46abec8bd4
|
[
"MIT"
] | null | null | null |
Nowruz_SemEval.py
|
mohammadmahdinoori/Nowruz-at-SemEval-2022-Task-7
|
d87bf033c3798ff707ba25ddffde8c46abec8bd4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Nowruz at SemEval 2022: Tackling Cloze Tests with Transformers and Ordinal Regression
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1RXkjBpzNJtc0WhhrKMjU-50rd5uSviX3
"""
import torch
import torch.nn as nn
from torch.functional import F
from datasets import Dataset
import transformers as ts
from transformers import AutoTokenizer , AutoModelForSequenceClassification
from transformers import TrainingArguments, Trainer
from transformers import DataCollatorWithPadding
from transformers import create_optimizer
from transformers.file_utils import ModelOutput
from transformers.modeling_outputs import SequenceClassifierOutput
from coral_pytorch.layers import CoralLayer
from coral_pytorch.losses import coral_loss
from coral_pytorch.dataset import levels_from_labelbatch
from coral_pytorch.dataset import proba_to_label
from dataclasses import dataclass
from typing import Optional, Tuple
import numpy as np
import pandas as pd
from scipy import stats
import sys
from data_loader import (
retrieve_instances_from_dataset,
retrieve_labels_from_dataset_for_classification,
retrieve_labels_from_dataset_for_ranking,
write_predictions_to_file,
)
"""#Preparing Data"""
def loadDataset(dataPath , labelPath=None , scoresPath=None):
dataset = pd.read_csv(dataPath, sep="\t", quoting=3)
ids , sentences , fillers = retrieve_instances_from_dataset(dataset)
#Creating dictionaries to convert datas to Huggingface Dataset
datasetDict = {
"id": ids,
"sentence": sentences,
"filler": fillers,
}
labels = None
if labelPath != None:
labels = pd.read_csv(labelPath, sep="\t", header=None, names=["Id", "Label"])
labels = retrieve_labels_from_dataset_for_classification(labels)
datasetDict["labels"] = labels
scores = None
if scoresPath != None:
scores = pd.read_csv(scoresPath, sep="\t", header=None, names=["Id", "Label"])
scores = retrieve_labels_from_dataset_for_ranking(scores)
datasetDict["scores"] = scores
#Removing Periods if fillers appear at the end of the sentence (because if we don't period will be considered last word piece of the filler)
for index , _ in enumerate(fillers):
fillers[index].replace("." , "")
#Creating Huggingface Datasets from Dictionaries
dataset = Dataset.from_dict(datasetDict)
return dataset
"""#Preprocessing"""
def preprocessDataset(dataset , tokenizer):
def addToDict(dict_1 , dict_2 , columns_1=[] , columns_2=["input_ids" , "attention_mask"]):
for item_1 , item_2 in zip(columns_1 , columns_2):
dict_1[item_1] = dict_2.pop(item_2)
def mappingFunction(dataset):
outputDict = {}
cleanedSentence = dataset["sentence"].replace("\n" , " ").replace("(...)" , "").strip()
sentenceWithFiller = cleanedSentence.replace("[MASK]" , dataset["filler"].strip()).strip()
tokenized_sentence = tokenizer(sentenceWithFiller)
addToDict(outputDict , tokenized_sentence , ["input_ids" , "attention_mask"])
#Getting the index of the last word piece of the filler
if "cls_token" in tokenizer.special_tokens_map.keys():
filler_indecies = len(tokenizer(tokenizer.special_tokens_map["cls_token"] + " " + cleanedSentence.split("[MASK]")[0].strip() + " " + dataset["filler"].strip() , add_special_tokens=False)["input_ids"]) - 1
elif "bos_token" in tokenizer.special_tokens_map.keys():
filler_indecies = len(tokenizer(tokenizer.special_tokens_map["bos_token"] + " " + cleanedSentence.split("[MASK]")[0].strip() + " " + dataset["filler"].strip() , add_special_tokens=False)["input_ids"]) - 1
else:
filler_indecies = len(tokenizer(cleanedSentence.split("[MASK]")[0].strip() + " " + dataset["filler"].strip() , add_special_tokens=False)["input_ids"]) - 1
outputDict["filler_indecies"] = filler_indecies
return outputDict
return dataset.map(mappingFunction , batched=False)
"""#Model Definition"""
@dataclass
class CustomOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
classificationOutput: torch.FloatTensor = None
regressionOutput: torch.FloatTensor = None
class SequenceClassificationModel(nn.Module):
def __init__(self,
encoder,
dim,
use_coral=False,
use_cls=True,
supportPooledRepresentation=False,
mode="both",
num_labels=3,
num_ranks=5,
lambda_c=0.5,
lambda_r=0.5,
dropout_rate=0.2):
super().__init__()
#mode can be one of these: ["both" , "classification" , "regression"]
self.encoder = encoder
self.dim = dim
self.use_coral = use_coral
self.use_cls = use_cls
self.supportPooledRepresentation = supportPooledRepresentation
self.mode = mode
self.num_labels = num_labels
self.num_ranks = num_ranks
self.lambda_c = lambda_c
self.lambda_r = lambda_r
self.dropout_rate = dropout_rate
if self.use_cls:
self.pre_classifier = nn.Linear(self.dim*2 , self.dim , bias=True)
else:
self.pre_classifier = nn.Linear(self.dim , self.dim , bias=True)
self.dropout = nn.Dropout(p=self.dropout_rate , inplace=False)
self.regressionHead = CoralLayer(self.dim , self.num_ranks)
if use_coral:
self.classificationHead = CoralLayer(self.dim , self.num_labels)
else:
self.classificationHead = nn.Linear(self.dim , self.num_labels , bias=True)
def forward(
self,
input_ids,
attention_mask,
filler_indecies,
labels=None,
scores=None,
**args):
device = self.encoder.device
# Getting fillers representation from pre-trained transformer (encoder)
sentence_embedding = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
)
#Getting Fillers Representation
filler_tokens = sentence_embedding[0][filler_indecies[0] , filler_indecies[1]]
fillers = filler_tokens[: , 0 , :]
#Concatenating [CLS] output with Filler output if the model supports [CLS]
pooled_output = None
if self.use_cls:
if self.supportPooledRepresentation:
pooled_output = torch.concat((sentence_embedding[1] , fillers) , dim=-1)
else:
pooled_output = torch.concat((sentence_embedding[0][: , 0 , :] , fillers) , dim=-1)
else:
pooled_output = fillers
#Passing Pooled Output to another dense layer followed by activation function and dropout
pooled_output = self.pre_classifier(pooled_output)
pooled_output = nn.GELU()(pooled_output)
pooled_output = self.dropout(pooled_output)
#Passing the final output to the classificationHead and RegressionHead
classificationOutput = self.classificationHead(pooled_output)
regressionOutput = self.regressionHead(pooled_output)
totalLoss = None
classification_loss = None
regression_loss = None
#Computing classification loss
if labels != None and (self.mode.lower() == "both" or self.mode.lower() == "classification"):
if self.use_coral:
levels = levels_from_labelbatch(labels.view(-1) , self.num_labels).to(device)
classification_loss = coral_loss(classificationOutput.view(-1 , self.num_labels - 1) , levels.view(-1 , self.num_labels - 1))
else:
loss_fct = nn.CrossEntropyLoss()
classification_loss = loss_fct(classificationOutput.view(-1 , self.num_labels) , labels.view(-1))
#Computing regression loss
if scores != None and (self.mode.lower() == "both" or self.mode.lower() == "regression"):
levels = levels_from_labelbatch(scores.view(-1) , self.num_ranks).to(device)
regression_loss = coral_loss(regressionOutput.view(-1 , self.num_ranks - 1) , levels.view(-1 , self.num_ranks - 1))
if self.mode.lower() == "both" and (labels != None and scores != None):
totalLoss = (self.lambda_c * classification_loss) + (self.lambda_r * regression_loss)
elif self.mode.lower() == "classification" and labels != None:
totalLoss = classification_loss
elif self.mode.lower() == "regression" and scores != None:
totalLoss = regression_loss
outputs = torch.concat((classificationOutput , regressionOutput) , dim=-1)
finalClassificationOutput = torch.sigmoid(classificationOutput)
finalRegressionOutput = torch.sigmoid(regressionOutput)
finalClassificationOutput = proba_to_label(finalClassificationOutput.cpu().detach()).numpy()
finalRegressionOutput = torch.sum(finalRegressionOutput.cpu().detach() , dim=-1).numpy() + 1
return CustomOutput(
loss=totalLoss,
logits=outputs,
classificationOutput=finalClassificationOutput,
regressionOutput=finalRegressionOutput,
)
def model_init(encoderPath=None,
dimKey=None,
customEncoder=None,
customDim=None,
mode="both",
use_coral=True,
use_cls=True,
supportPooledRepresentation=False,
freezeEmbedding=True,
num_labels=3,
num_ranks=5,
lambda_c=0.5,
lambda_r=0.5,
dropout_rate=0.2,):
encoder = ts.AutoModel.from_pretrained(encoderPath) if encoderPath != None else customEncoder
dim = encoder.config.to_dict()[dimKey] if dimKey != None else customDim
model = SequenceClassificationModel(
encoder,
dim,
use_coral=use_coral,
use_cls=use_cls,
supportPooledRepresentation=supportPooledRepresentation,
mode=mode,
num_labels=num_labels,
num_ranks=num_ranks,
lambda_c=lambda_c,
lambda_r=lambda_r,
dropout_rate=dropout_rate,
)
try:
if freezeEmbedding:
for param in model.encoder.embeddings.parameters():
param.requires_grad = False
except:
print("The embedding layer name is different in this model, try to find the name of the emebdding layer and freeze it manually")
return model
def makeTrainer(model,
trainDataset,
data_collator,
tokenizer,
outputsPath,
learning_rate=1.90323e-05,
scheduler="cosine",
save_steps=5000,
batch_size=8,
num_epochs=5,
weight_decay=0.00123974,
roundingType="F"):
def data_collator_fn(items , columns=[]):
data_collator_input = {
"input_ids": items[columns[0]],
"attention_mask": items[columns[1]]
}
result = data_collator(data_collator_input)
items[columns[0]] = result["input_ids"]
items[columns[1]] = result["attention_mask"]
def collate_function(items):
outputDict = {
key: [] for key in items[0].keys()
}
for item in items:
for key in item.keys():
outputDict[key].append(item[key])
data_collator_fn(outputDict , ["input_ids" , "attention_mask"])
#Removing unnecessary Items from outputDict
columns = ["sentence" , "filler" , "id"]
for item in columns:
try:
outputDict.pop(item)
except:
pass
#Adding New Columns
if "labels" in outputDict.keys():
outputDict["labels"] = torch.tensor(outputDict.pop("labels"))
if "scores" in outputDict.keys():
if roundingType == "F":
outputDict["scores"] = torch.tensor(outputDict.pop("scores") , dtype=torch.int32) - 1
elif roundingType == "R":
outputDict["scores"] = torch.tensor([round(score) for score in outputDict.pop("scores")] , dtype=torch.int32) - 1
filler_indecies = torch.tensor(outputDict.pop("filler_indecies")).view(-1 , 1)
outputDict["filler_indecies"] = (torch.arange(filler_indecies.shape[0]).view(-1 , 1) , filler_indecies)
return outputDict
training_args = TrainingArguments(
outputsPath,
learning_rate= learning_rate,
lr_scheduler_type=scheduler,
save_steps=save_steps,
per_device_train_batch_size=batch_size,
num_train_epochs=num_epochs,
weight_decay=weight_decay,
remove_unused_columns=False,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=trainDataset,
tokenizer=tokenizer,
data_collator=collate_function,
)
return trainer , collate_function
"""#Evaluating on Val Dataset"""
def evaluateModel(
model,
dataset,
collate_function,
):
model.eval()
#Passing the inputs through model
labels = []
scores = []
for item in dataset:
sample_input = collate_function([item])
outputs = model(input_ids=sample_input["input_ids"].to(model.encoder.device),
attention_mask=sample_input["attention_mask"].to(model.encoder.device),
filler_indecies=sample_input["filler_indecies"],
scores=None)
labels.append(outputs["classificationOutput"][0])
scores.append(outputs["regressionOutput"][0])
#Computing Accuracy
count = 0
correctCount = 0
for prediction , target in zip(labels , dataset["labels"]):
count += 1
correctCount += 1 if prediction == target else 0
accuracy = (correctCount / count)
#Computing Spearman
scores = np.array(scores , dtype=np.float32)
valScores = np.array(dataset["scores"] , dtype=np.float32)
spearman = stats.spearmanr(scores.reshape(-1 , 1) , valScores.reshape(-1 , 1))
return (labels , scores) , accuracy , spearman
"""#Making Predictions on Test Dataset"""
def predictOnTestDataset(
model,
dataset,
collate_function,
labelsPath=None,
scoresPath=None,
):
model.eval()
ids = []
classification_predictions = []
ranking_predictions = []
for item in dataset:
sample_input = collate_function([item])
outputs = model(input_ids=sample_input["input_ids"].to(model.encoder.device),
attention_mask=sample_input["attention_mask"].to(model.encoder.device),
filler_indecies=sample_input["filler_indecies"],
scores=None,
labels=None)
ids.append(item["id"])
classification_predictions.append(outputs["classificationOutput"][0])
ranking_predictions.append(outputs["regressionOutput"][0])
if labelsPath != None:
open(labelsPath , mode="wb")
write_predictions_to_file(labelsPath , ids , classification_predictions , "classification")
if scoresPath != None:
open(scoresPath , mode="wb")
write_predictions_to_file(scoresPath , ids , ranking_predictions , "ranking")
return ids , classification_predictions , ranking_predictions
"""#Inference"""
def inference(
model,
sentences,
fillers,
tokenizer,
collate_function
):
model.eval()
datasetDict = {
"sentence": sentences,
"filler": fillers,
}
dataset = Dataset.from_dict(datasetDict)
tokenizedDataset = preprocessDataset(dataset , tokenizer)
finalInput = collate_function(tokenizedDataset)
outputs = model(
input_ids=finalInput["input_ids"].to(model.encoder.device),
attention_mask=finalInput["attention_mask"].to(model.encoder.device),
filler_indecies=finalInput["filler_indecies"],
)
finalLabels = []
for item in outputs["classificationOutput"].reshape(-1):
if item == 0:
finalLabels.append("Implausible")
elif item == 1:
finalLabels.append("Neutral")
elif item == 2:
finalLabels.append("Plausible")
finalLabels = np.array(finalLabels)
return {
"labels": finalLabels,
"scores": outputs["regressionOutput"],
}
| 32.686354
| 210
| 0.663406
| 4,854
| 0.302449
| 0
| 0
| 223
| 0.013895
| 0
| 0
| 2,442
| 0.152159
|
62defe5f6a2a05a1164bd7391f942132d33f8a26
| 1,703
|
py
|
Python
|
fbchat/utils.py
|
Dainius14/fb-chat-bot-old
|
6bdfa07e6a423e386ed61ce67ac218d806ad38f8
|
[
"MIT"
] | 2
|
2018-04-05T14:07:16.000Z
|
2020-11-03T06:08:09.000Z
|
fbchat/utils.py
|
Dainius14/fb-chat-bot-old
|
6bdfa07e6a423e386ed61ce67ac218d806ad38f8
|
[
"MIT"
] | null | null | null |
fbchat/utils.py
|
Dainius14/fb-chat-bot-old
|
6bdfa07e6a423e386ed61ce67ac218d806ad38f8
|
[
"MIT"
] | 1
|
2018-04-05T14:17:44.000Z
|
2018-04-05T14:17:44.000Z
|
import re
import json
from time import time
from random import random
USER_AGENTS = [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/601.1.10 (KHTML, like Gecko) Version/8.0.5 Safari/601.1.10",
"Mozilla/5.0 (Windows NT 6.3; WOW64; ; NCT50_AAP285C84A1328) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6"
]
def now():
return int(time()*1000)
def get_json(text):
return json.loads(re.sub(r"^[^{]*", '', text, 1))
def digit_to_char(digit):
if digit < 10:
return str(digit)
return chr(ord('a') + digit - 10)
def str_base(number,base):
if number < 0:
return '-' + str_base(-number, base)
(d, m) = divmod(number, base)
if d > 0:
return str_base(d, base) + digit_to_char(m)
return digit_to_char(m)
def generateMessageID(client_id=None):
k = now()
l = int(random() * 4294967295)
return ("<%s:%s-%s@mail.projektitan.com>" % (k, l, client_id));
def getSignatureID():
return hex(int(random() * 2147483648))
def generateOfflineThreadingID() :
ret = now()
value = int(random() * 4294967295);
string = ("0000000000000000000000" + bin(value))[-22:]
msgs = bin(ret) + string
return str(int(msgs,2))
| 36.234043
| 139
| 0.656489
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 778
| 0.456841
|
62e05da86f265d8babd95a4bb39d8c5d2cf0aa4a
| 964
|
py
|
Python
|
rfhub/blueprints/api/libraries.py
|
Accruent/robotframework-hub
|
46f72d2d720e6ad1848c162e9dfd21740797a054
|
[
"Apache-2.0"
] | null | null | null |
rfhub/blueprints/api/libraries.py
|
Accruent/robotframework-hub
|
46f72d2d720e6ad1848c162e9dfd21740797a054
|
[
"Apache-2.0"
] | 90
|
2019-09-04T17:52:10.000Z
|
2021-07-01T14:01:08.000Z
|
rfhub/blueprints/api/libraries.py
|
Accruent/robotframework-hub
|
46f72d2d720e6ad1848c162e9dfd21740797a054
|
[
"Apache-2.0"
] | null | null | null |
'''
This provides the view functions for the /api/libraries endpoints
'''
import flask
from flask import current_app
class ApiEndpoint(object):
def __init__(self, blueprint):
blueprint.add_url_rule("/libraries/", view_func = self.get_libraries)
blueprint.add_url_rule("/libraries/<int:collection_id>", view_func = self.get_library)
def get_libraries(self):
kwdb = current_app.kwdb
query_pattern = flask.request.args.get('pattern', "*").strip().lower()
libraries = kwdb.get_collections(query_pattern)
return flask.jsonify(libraries=libraries)
def get_library(self, collection_id):
# if collection_id is a library _name_, redirect
print("get_library: collection_id=", collection_id)
kwdb = current_app.kwdb
collection = kwdb.get_collection(collection_id)
if collection is None:
flask.abort(404)
return flask.jsonify(collection=collection)
| 32.133333
| 94
| 0.695021
| 843
| 0.874481
| 0
| 0
| 0
| 0
| 0
| 0
| 207
| 0.21473
|
62e085ec76ed466edc7957012e2209ee7eb9a47a
| 131
|
py
|
Python
|
pair-ranking-cnn/utils.py
|
shinoyuki222/torch-light
|
4799805d9bcae82a9f12a574dcf9fdd838c92ee9
|
[
"MIT"
] | 310
|
2018-11-02T10:12:33.000Z
|
2022-03-30T02:59:51.000Z
|
pair-ranking-cnn/utils.py
|
shinoyuki222/torch-light
|
4799805d9bcae82a9f12a574dcf9fdd838c92ee9
|
[
"MIT"
] | 14
|
2018-11-08T10:09:46.000Z
|
2021-07-30T08:54:33.000Z
|
pair-ranking-cnn/utils.py
|
shinoyuki222/torch-light
|
4799805d9bcae82a9f12a574dcf9fdd838c92ee9
|
[
"MIT"
] | 152
|
2018-11-02T13:00:49.000Z
|
2022-03-28T12:45:08.000Z
|
import const
def corpora2idx(sents, ind2idx):
return [[ind2idx[w] if w in ind2idx else const.UNK for w in s] for s in sents]
| 21.833333
| 82
| 0.709924
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
62e0a00977882b69ee47910edb8fb49b209ff9a7
| 1,251
|
py
|
Python
|
applications/admin/models/menu.py
|
forca-inf/forca
|
99b63c63a7aaebd6f11cb4f73ec54de54ce25986
|
[
"BSD-3-Clause"
] | 6
|
2018-01-25T01:07:55.000Z
|
2019-04-26T23:58:29.000Z
|
applications/admin/models/menu.py
|
forca-inf/forca
|
99b63c63a7aaebd6f11cb4f73ec54de54ce25986
|
[
"BSD-3-Clause"
] | null | null | null |
applications/admin/models/menu.py
|
forca-inf/forca
|
99b63c63a7aaebd6f11cb4f73ec54de54ce25986
|
[
"BSD-3-Clause"
] | 2
|
2018-02-03T02:55:56.000Z
|
2018-02-06T19:55:10.000Z
|
# ###########################################################
# ## generate menu
# ###########################################################
_a = request.application
_c = request.controller
_f = request.function
response.title = '%s %s' % (_f, '/'.join(request.args))
response.subtitle = 'admin'
response.menu = [(T('site'), _f == 'site', URL(_a,'default','site'))]
if request.args:
_t = request.args[0]
response.menu.append((T('edit'), _c == 'default' and _f == 'design',
URL(_a,'default','design',args=_t)))
response.menu.append((T('about'), _c == 'default' and _f == 'about',
URL(_a,'default','about',args=_t)))
response.menu.append((T('errors'), _c == 'default' and _f == 'errors',
URL(_a,'default','errors',args=_t)))
response.menu.append((T('versioning'),
_c == 'mercurial' and _f == 'commit',
URL(_a,'mercurial','commit',args=_t)))
if not session.authorized:
response.menu = [(T('login'), True, '')]
else:
response.menu.append((T('logout'), False,
URL(_a,'default',f='logout')))
response.menu.append((T('help'), False, URL('examples','default','index')))
| 37.909091
| 75
| 0.490807
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 421
| 0.336531
|
62e0c8f94beaf1979e0b0e3755a3173a04c8a516
| 1,159
|
py
|
Python
|
lenstronomy/LightModel/Profiles/moffat.py
|
heather999/lenstronomy
|
8102fe026c1f3ba6e81d8a1f59cceb90e68430b4
|
[
"MIT"
] | null | null | null |
lenstronomy/LightModel/Profiles/moffat.py
|
heather999/lenstronomy
|
8102fe026c1f3ba6e81d8a1f59cceb90e68430b4
|
[
"MIT"
] | null | null | null |
lenstronomy/LightModel/Profiles/moffat.py
|
heather999/lenstronomy
|
8102fe026c1f3ba6e81d8a1f59cceb90e68430b4
|
[
"MIT"
] | null | null | null |
__author__ = 'sibirrer'
# this file contains a class to make a Moffat profile
__all__ = ['Moffat']
class Moffat(object):
"""
this class contains functions to evaluate a Moffat surface brightness profile
.. math::
I(r) = I_0 * (1 + (r/\\alpha)^2)^{-\\beta}
with :math:`I_0 = amp`.
"""
def __init__(self):
self.param_names = ['amp', 'alpha', 'beta', 'center_x', 'center_y']
self.lower_limit_default = {'amp': 0, 'alpha': 0, 'beta': 0, 'center_x': -100, 'center_y': -100}
self.upper_limit_default = {'amp': 100, 'alpha': 10, 'beta': 10, 'center_x': 100, 'center_y': 100}
def function(self, x, y, amp, alpha, beta, center_x=0, center_y=0):
"""
2D Moffat profile
:param x: x-position (angle)
:param y: y-position (angle)
:param amp: normalization
:param alpha: scale
:param beta: exponent
:param center_x: x-center
:param center_y: y-center
:return: surface brightness
"""
x_shift = x - center_x
y_shift = y - center_y
return amp * (1. + (x_shift**2+y_shift**2)/alpha**2)**(-beta)
| 28.268293
| 106
| 0.569456
| 1,055
| 0.910267
| 0
| 0
| 0
| 0
| 0
| 0
| 687
| 0.592752
|
62e0e93747dae752fc1a23adaf41a5a5edb9094b
| 1,912
|
py
|
Python
|
pypy/module/oracle/test/test_objectvar.py
|
kantai/passe-pypy-taint-tracking
|
b60a3663f8fe89892dc182c8497aab97e2e75d69
|
[
"MIT"
] | 2
|
2016-07-06T23:30:20.000Z
|
2017-05-30T15:59:31.000Z
|
pypy/module/oracle/test/test_objectvar.py
|
benoitc/pypy
|
a3e1b12d1d01dc29056b7badc051ffc034297658
|
[
"MIT"
] | null | null | null |
pypy/module/oracle/test/test_objectvar.py
|
benoitc/pypy
|
a3e1b12d1d01dc29056b7badc051ffc034297658
|
[
"MIT"
] | 2
|
2020-07-09T08:14:22.000Z
|
2021-01-15T18:01:25.000Z
|
from pypy.module.oracle.test.test_connect import OracleTestBase
class AppTestObjectVar(OracleTestBase):
def test_fetch_object(self):
import datetime
cur = self.cnx.cursor()
try:
cur.execute("drop table pypy_test_objtable")
except oracle.DatabaseError:
pass
try:
cur.execute("drop type pypy_test_objtype")
except oracle.DatabaseError:
pass
try:
cur.execute("drop type pypy_test_arraytype")
except oracle.DatabaseError:
pass
cur.execute("""\
create type pypy_test_objtype as object (
numbercol number,
stringcol varchar2(60),
datecol date);
""")
cur.execute("""\
create type pypy_test_arraytype as varray(10) of number;
""")
cur.execute("""\
create table pypy_test_objtable (
objcol pypy_test_objtype,
arraycol pypy_test_arraytype)
""")
cur.execute("""\
insert into pypy_test_objtable values (
pypy_test_objtype(1, 'someText',
to_date(20070306, 'YYYYMMDD')),
pypy_test_arraytype(5, 10, null, 20))
""")
cur.execute("select objcol, arraycol from pypy_test_objtable")
objValue, arrayValue = cur.fetchone()
assert objValue.type.schema == self.cnx.username.upper()
assert objValue.type.name == "PYPY_TEST_OBJTYPE"
assert objValue.type.attributes[0].name == "NUMBERCOL"
assert isinstance(arrayValue, list)
assert arrayValue == [5, 10, None, 20]
assert objValue.NUMBERCOL == 1
assert objValue.STRINGCOL == "someText"
assert objValue.DATECOL == datetime.datetime(2007, 03, 06)
raises(AttributeError, getattr, objValue, 'OTHER')
| 37.490196
| 70
| 0.579498
| 1,846
| 0.965481
| 0
| 0
| 0
| 0
| 0
| 0
| 840
| 0.439331
|
62e1d5665a19ec6ff0058abaac2fe46b0195ec1d
| 250
|
py
|
Python
|
lecture70_practice.py
|
adwabh/python_practice
|
878aa06841ec606648eab97fe5e801f073ce0aa7
|
[
"Apache-2.0"
] | null | null | null |
lecture70_practice.py
|
adwabh/python_practice
|
878aa06841ec606648eab97fe5e801f073ce0aa7
|
[
"Apache-2.0"
] | null | null | null |
lecture70_practice.py
|
adwabh/python_practice
|
878aa06841ec606648eab97fe5e801f073ce0aa7
|
[
"Apache-2.0"
] | null | null | null |
tempratures = [10,-20, -289, 100]
def c_to_f(c):
if c<-273.15:
return ""
return c* 9/5 +32
def writeToFile(input):
with open("output.txt","a") as file:
file.write(input)
for temp in tempratures:
writeToFile(str(c_to_f(temp)))
| 19.230769
| 40
| 0.628
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 17
| 0.068
|
62e200f1509ce70b40b4ad9b1ff9f7adeffa7fcc
| 6,574
|
py
|
Python
|
causal_da/components/ica_torch/GCL_nonlinear_ica_train.py
|
sharmapulkit/few-shot-domain-adaptation-by-causal-mechanism-transfer
|
05b4cab288dbb2ad7e30bbd174c22beb39d5c4cd
|
[
"Apache-2.0"
] | null | null | null |
causal_da/components/ica_torch/GCL_nonlinear_ica_train.py
|
sharmapulkit/few-shot-domain-adaptation-by-causal-mechanism-transfer
|
05b4cab288dbb2ad7e30bbd174c22beb39d5c4cd
|
[
"Apache-2.0"
] | null | null | null |
causal_da/components/ica_torch/GCL_nonlinear_ica_train.py
|
sharmapulkit/few-shot-domain-adaptation-by-causal-mechanism-transfer
|
05b4cab288dbb2ad7e30bbd174c22beb39d5c4cd
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from ignite.engine import Engine, Events
import torch
from .gcl_model import GeneralizedContrastiveICAModel
from .trainer_util import random_pick_wrong_target, binary_logistic_loss
from .logging_util import DummyRunLogger
# Type hinting
from typing import Callable
from torch import FloatTensor, LongTensor
BinaryCallableLoss = Callable[[FloatTensor, int], FloatTensor]
def GCL_nonlinear_ica_train(data_tensor: FloatTensor, c_src: LongTensor,
batch_size: int, max_epochs: int,
gcl_ica_model: GeneralizedContrastiveICAModel,
device: str, optimizer, epoch_callback,
final_callback, run_logger):
"""Perform generalized contrastive learning (GCL) for nonlinear independent component analysis (nonlinear ICA).
Parameters:
data_tensor: the training data input variables (shape ``(n_sample,)``).
c_src: the auxiliary variable used as labels in the contrastive learning (shape ``(n_sample,)``).
batch_size: the batch size for training.
max_epochs: the maximum number of epochs to run the training.
gcl_ica_model: the ICA model that can be trained via GCL.
device: the device identifier (``'cpu'``: use CPU).
optimizer: the ``pytorch`` optimizer.
epoch_callback: The callback to be called after every epoch the training loop.
final_callback: The callback to be called at the end of the training loop.
To be called with the single argument ``None``.
run_logger: the logger to save the results.
"""
trainerbase = GCLTrainer(gcl_ica_model,
optimizer,
contrastive_coeff=1.,
balance=True,
device=device,
run_logger=run_logger)
trainer = Engine(trainerbase)
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_loss(trainer):
"""Callback at the end of each epoch to record the training loss."""
trainerbase.log_training_loss(trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def call_epoch_callback(trainer):
"""User-defined callback at the end of each epoch."""
epoch_callback(trainer.state.epoch)
@trainer.on(Events.COMPLETED)
def call_final_callback(_):
"""User-defined callback at the end of the training process."""
final_callback(None)
dataset = torch.utils.data.TensorDataset(data_tensor,
torch.LongTensor(c_src))
train_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
shuffle=True,
drop_last=True)
trainer.run(train_loader, max_epochs=max_epochs)
class GCLTrainer:
"""Trainer class conforming to the interface of the ``torch-ignite`` package."""
def __init__(self,
model,
optimizer,
contrastive_coeff: float = 1.,
contrastive_loss: BinaryCallableLoss = binary_logistic_loss,
balance: bool = True,
device: str = 'cpu',
run_logger=None):
"""Train a nonlinear ICA model by generalized contrastive learning.
Parameters:
model: the model to be trained.
optimizer: the optimizer.
contrastive_coeff: The loss can be multiplied by this coefficient to improve numerical stability.
contrastive_loss: the loss function used for contrastive training.
balance: whether to use a coefficient to stabilize the learning (the coefficient is initialized to normalize the loss to 1 in the first iteration).
device: whether to use GPU for training.
* ``gpu_identifier``: use GPU with the identifier if available.
* ``'cpu'``: use CPU.
run_logger: the logger to save the results.
"""
self.contrastive_coeff = contrastive_coeff
self.contrastive_loss = contrastive_loss
self.balance = balance
if run_logger is None:
self.run_logger = DummyRunLogger()
else:
self.run_logger = run_logger
self.model = model
self.optimizer = optimizer
self.device = device
def __call__(self, engine, batch):
"""Perform one training iteration by back-propagation using the optimizer."""
self.model.train()
data, target = batch[0].to(self.device), batch[1].to(self.device)
self.optimizer.zero_grad()
loss = self.compute_and_backward_loss(engine, data, target)
self.optimizer.step()
return loss
def compute_and_backward_loss(self, trainer, data: FloatTensor,
target: LongTensor):
"""Compute loss and prepare the back-propagation.
Parameters:
trainer: the trainer.
data: the input data (shape ``(n_sample, n_dim)``).
target: the auxiliary variables for GCL (shape ``(n_sample,)``).
"""
pos_output = self.model.classify((data, target[:, None]),
return_hidden=False)
negative_targets = random_pick_wrong_target(target)
neg_output = self.model.classify((data, negative_targets),
return_hidden=False)
contrastive_term = self.contrastive_loss(
pos_output, True) + self.contrastive_loss(neg_output, False) #.to(torch.device('cuda'))
# For numerical stability.
if (trainer.state.epoch == 1) and (trainer.state.iteration == 1):
if self.balance:
self.scale_contrastive_term = contrastive_term.item()
else:
self.scale_contrastive_term = 1
loss = self.contrastive_coeff * contrastive_term / self.scale_contrastive_term
loss.backward()
return tuple(l.item() for l in (contrastive_term, ))
def log_training_loss(self, trainer):
"""Record the training loss metrics.
Parameters:
trainer: the trainer object.
"""
print(
f"Epoch:{trainer.state.epoch:4d}\tTrain Loss: {trainer.state.output}"
)
self.run_logger.log_metrics({'contrastive': trainer.state.output},
step=trainer.state.epoch)
| 43.826667
| 159
| 0.609066
| 3,636
| 0.553088
| 0
| 0
| 536
| 0.081533
| 0
| 0
| 2,534
| 0.385458
|
62e27fc7ce47704f27bdd2c667d663a58a6d3981
| 485
|
py
|
Python
|
tetrad_cms/cases/tasks.py
|
UsernameForGerman/tetraD-NK
|
e00b406ac7b2ce63b92698c887fb53bf53344454
|
[
"Apache-2.0"
] | null | null | null |
tetrad_cms/cases/tasks.py
|
UsernameForGerman/tetraD-NK
|
e00b406ac7b2ce63b92698c887fb53bf53344454
|
[
"Apache-2.0"
] | null | null | null |
tetrad_cms/cases/tasks.py
|
UsernameForGerman/tetraD-NK
|
e00b406ac7b2ce63b92698c887fb53bf53344454
|
[
"Apache-2.0"
] | null | null | null |
from django.conf import settings
from requests import Session
import os
from json import dumps
from core.celery import app
@app.task(queue='cms')
def send_new_contact_to_admins(contact: dict, admins: list) -> None:
s = Session()
data = {'admins': admins, 'contact': contact}
url = settings.TELEGRAM_BOT_API_URL + 'send/contact'
try:
s.post(url, data=dumps(data), headers={'Content-Type': 'application/json'})
except BaseException as e:
print(e)
| 25.526316
| 83
| 0.694845
| 0
| 0
| 0
| 0
| 357
| 0.736082
| 0
| 0
| 68
| 0.140206
|
62e353f71bc5f0d9e24cfab6d427c04ff9186124
| 316
|
py
|
Python
|
learning/example03_for.py
|
bokunimowakaru/iot
|
e2672a9b1dc0c4f3b57995daee634edce00a8029
|
[
"MIT"
] | 6
|
2019-04-19T18:56:27.000Z
|
2022-03-07T13:08:28.000Z
|
learning/example03_for.py
|
bokunimowakaru/iot
|
e2672a9b1dc0c4f3b57995daee634edce00a8029
|
[
"MIT"
] | null | null | null |
learning/example03_for.py
|
bokunimowakaru/iot
|
e2672a9b1dc0c4f3b57995daee634edce00a8029
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# coding: utf-8
# Example 03 コンピュータお得意の繰り返しfor文
from sys import argv # 本プログラムの引数argvを取得する
for name in argv: # 引数を変数nameへ代入
print('Hello,', name + '!') # 変数nameの内容を、文字列Helloに続いて表示
# for文の「argv」を「argv[1:]」にするとargv[1]以降の全引数を順次nameへ代入して繰り返す
| 28.727273
| 67
| 0.623418
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 361
| 0.752083
|
62e409fd164236f3061594e924d59c0872ce51fc
| 5,929
|
py
|
Python
|
linekey.py
|
alex-west-met-office/IMBS_MO
|
76aff7d53da02aafb13a94d4afceeb0cc771b5cb
|
[
"BSD-3-Clause"
] | 1
|
2021-03-05T18:53:01.000Z
|
2021-03-05T18:53:01.000Z
|
linekey.py
|
alex-west-met-office/IMBS_MO
|
76aff7d53da02aafb13a94d4afceeb0cc771b5cb
|
[
"BSD-3-Clause"
] | null | null | null |
linekey.py
|
alex-west-met-office/IMBS_MO
|
76aff7d53da02aafb13a94d4afceeb0cc771b5cb
|
[
"BSD-3-Clause"
] | 1
|
2021-03-05T18:53:07.000Z
|
2021-03-05T18:53:07.000Z
|
''' A module for defining and producing the linekey object, which is used
to determine and store information about data format in a CRREL
ice mass balance buoy.'''
class linekey:
def __init__(self,date_index = 0):
self.date_index = date_index
self.value_index = []
self.phenomena_names = []
self.lon_flip_ew = (False,-1,-1)
self.lat_flip_ns = (False,-1,-1)
self.vertical_scale = 1.
self.fliplon = False
def add_value_index(self,phenomenon,index):
self.value_index.append(index)
self.phenomena_names.append(phenomenon)
def ns(self,index_flippee,index_flipper):
self.lat_flip_ns = (True,index_flippee,index_flipper)
def ew(self,index_flippee,index_flipper):
self.lon_flip_ew = (True,index_flippee,index_flipper)
def get_temp_linekey(data_file):
import csv
fileh = open(data_file)
rows = csv.reader(fileh)
found_key = False
found_date = False
for row in rows:
print(row)
for (i,strtest) in enumerate(row):
if ('Date' in strtest) or ('DATE' in strtest):
key = linekey(date_index = i)
found_date = True
break
if found_date:
temp_codes = {}
temp_type = ''
for (i,strtest) in enumerate(row):
result = classify_temp_header(strtest)
if result[0]==1:
if temp_type == 'subjective':
print('Unable to determine temperature type')
return None
temp_type = 'objective'
prefix = 'TO'
if result[0]==2:
if temp_type == 'objective':
print('Unable to determine temperature type')
return None
temp_type = 'subjective'
prefix = 'TS'
temp_codes[i] = classify_temp_header(strtest)
if result[0]!=0:
key.add_value_index(prefix+str(result[1]),i)
break
return key
def get_linekey(data_file,variable_list,buoy_name):
import dictionaries
import csv
fileh = open(data_file)
rows = csv.reader(fileh)
found_key = False
found_date = False
td = dictionaries.title_dic()
variable_keys_list = [td[variable_name] for variable_name in variable_list]
vertical_scale = 1.
fliplon = False
for row in rows:
if not found_key:
for (i,strtest) in enumerate(row):
if ('Date' in strtest) or ('DATE' in strtest):
key = linekey(date_index = i)
found_date = True
break
if found_date:
for (varno,variable_keys) in enumerate(variable_keys_list):
found_key = False
for string in variable_keys:
for (i,strtest) in enumerate(row):
if (string == strtest.strip()):
key.add_value_index(variable_list[varno],i)
found_key = True
i_key = i
if '(cm)' in string:
vertical_scale = 0.01
if '(m)' in string:
vertical_scale = 1.
if string=='Longitude (W)':
fliplon = True
if not found_key:
key.add_value_index(variable_list[varno],-1)
if variable_list[varno]=='latitude':
for (i,strtest) in enumerate(row):
if (strtest == 'N/S'):
key.ns(i_key,i)
if variable_list[varno]=='longitude':
for (i,strtest) in enumerate(row):
if (strtest == 'E/W'):
key.ew(i_key,i)
if True in [('units are cm') in item for item in row]:
vertical_scale = 0.01
if 'E/W' in row and 'longitude' in key.phenomena_names:
i_flipper = row.index('E/W')
i_flippee = key.value_index[key.phenomena_names.index('longitude')]
key.ew(i_flippee,i_flipper)
if 'N/S' in row and 'latitude' in key.phenomena_names:
i_flipper = row.index('N/S')
i_flippee = key.value_index[key.phenomena_names.index('latitude')]
key.ew(i_flippee,i_flipper)
if not found_date:
print('Could not find date')
fileh.close()
return None
key.vertical_scale = vertical_scale
key.fliplon = fliplon
fileh.close()
return key
def classify_temp_header(string):
import functions
if functions.is_number(string):
number = float(string)
return (1,number)
elif string[0:1]=='T' and string[-3:]=='(C)' and functions.is_number(string[1:-3]):
number = int(string[1:-3])
return (2,number)
elif string[0:1]=='T' and functions.is_number(string[1:]):
number = int(string[1:])
return (2,number)
elif len(string) >= 4:
if string[0:4]=='TEMP' and functions.is_number(string[4:]):
number = int(string[4:])
return (2,number)
elif string[0:5]=='Temp ' and functions.is_number(string[5:]):
number = int(string[5:])
return (2,number)
else:
return (0,0)
else:
return (0,0)
| 32.938889
| 87
| 0.49182
| 652
| 0.109968
| 0
| 0
| 0
| 0
| 0
| 0
| 499
| 0.084163
|
62e5121cc3d103f5d833e64dac522900d5c6c105
| 468
|
py
|
Python
|
2020/02/07/An Introduction to Sessions in Flask/flask_session_example/app.py
|
kenjitagawa/youtube_video_code
|
ef3c48b9e136b3745d10395d94be64cb0a1f1c97
|
[
"Unlicense"
] | 492
|
2019-06-25T12:54:31.000Z
|
2022-03-30T12:38:28.000Z
|
2020/02/07/An Introduction to Sessions in Flask/flask_session_example/app.py
|
kenjitagawa/youtube_video_code
|
ef3c48b9e136b3745d10395d94be64cb0a1f1c97
|
[
"Unlicense"
] | 23
|
2019-10-01T01:36:08.000Z
|
2022-02-10T12:46:16.000Z
|
2020/02/07/An Introduction to Sessions in Flask/flask_session_example/app.py
|
kenjitagawa/youtube_video_code
|
ef3c48b9e136b3745d10395d94be64cb0a1f1c97
|
[
"Unlicense"
] | 1,734
|
2019-06-03T06:25:13.000Z
|
2022-03-31T23:57:53.000Z
|
from flask import Flask, render_template, session, redirect, url_for
app = Flask(__name__)
app.config['SECRET_KEY'] = 'prettyprinted'
@app.route('/')
def index():
return render_template('index.html')
@app.route('/set-background/<mode>')
def set_background(mode):
session['mode'] = mode
return redirect(url_for('index'))
@app.route('/drop-session')
def drop_session():
session.pop('mode', None)
return redirect(url_for('index'))
| 26
| 69
| 0.675214
| 0
| 0
| 0
| 0
| 319
| 0.681624
| 0
| 0
| 107
| 0.228632
|
62e7dc7223d5307c35918c3ce6453c318e70e573
| 6,034
|
py
|
Python
|
python/y2019/d19/day18a.py
|
luke-dixon/aoc
|
94851a5866a1ef29e3ba10098160cba883882683
|
[
"MIT"
] | 1
|
2021-01-12T20:04:01.000Z
|
2021-01-12T20:04:01.000Z
|
python/y2019/d19/day18a.py
|
luke-dixon/aoc
|
94851a5866a1ef29e3ba10098160cba883882683
|
[
"MIT"
] | null | null | null |
python/y2019/d19/day18a.py
|
luke-dixon/aoc
|
94851a5866a1ef29e3ba10098160cba883882683
|
[
"MIT"
] | null | null | null |
import random
from collections import deque
import networkx as nx
from lib import puzzle
def draw_grid(grid):
min_y, max_y = 0, 0
min_x, max_x = 0, 0
for y, x in grid:
if y < min_y:
min_y = y
if y > max_y:
max_y = y
if x < min_x:
min_x = x
if x > max_x:
max_x = x
y_range = range(min_y, max_y + 2)
x_range = range(min_x, max_x + 2)
output = ''
for y in y_range:
for x in x_range:
if (y, x) in grid:
output += grid[(y, x)]
else:
output += '#'
output += '\n'
return output
def construct_graph(grid, position, keys):
g = nx.Graph()
l = deque([position])
visited = set()
movable = {'.', '@', *[chr(x) for x in range(ord('a'), ord('z') + 1)]}
possible_keys = []
while len(l) > 0:
n = l.popleft()
visited.add(n)
if n in grid and grid[n] in keys:
possible_keys.append(n)
for y, x in [(n[0] + 1, n[1]), (n[0] - 1, n[1]), (n[0], n[1] + 1), (n[0], n[1] - 1)]:
if (y, x) in grid and grid[(y, x)] in movable:
g.add_edge(n, (y, x))
if (y, x) not in visited:
l.append((y, x))
return g, possible_keys
def new_state_path_length(t):
return len(t[-1])
class Day18(puzzle.Puzzle):
year = '2019'
day = '18'
def get_data(self):
data = self.input_data
return data
def part1(self):
data = self.get_data()
g = nx.Graph()
keys = {}
pos_to_key = {}
doors = {}
position = (0, 0)
grid = {}
for y, row in enumerate(data.splitlines()):
for x, c in enumerate(row):
if c == '#':
continue
if c == '.':
grid[(y, x)] = '.'
continue
if c == '@':
position = (y, x)
grid[(y, x)] = c
if ord(c) in set(range(ord('a'), ord('z') + 1)):
keys[c] = (y, x)
pos_to_key[(y, x)] = c
grid[(y, x)] = c
if ord(c) in set(range(ord('A'), ord('Z') + 1)):
doors[c] = (y, x)
grid[(y, x)] = c
paths = []
state = deque([(dict(grid), position, set(), [position])])
shortest_path = 5423
b_next = True
counted = 0
discarded = 0
discarded2 = 0
reached_end = 0
reached_end2 = 0
count = 0
while len(state) > 0:
count += 1
if count % 100 == 0:
print(f'{count}, states: {len(state)}, shortest path: {shortest_path}, discarded: {discarded}, discarded2: {discarded2}, reached end: {reached_end}, reached end2: {reached_end2}')
b_next = True
if b_next == True:
current_grid, position, keys_collected, path = state.popleft()
#print(f'b_next, {len(paths)}')
else:
current_grid, position, keys_collected, path = state.pop()
#print(f'At position {position}, keys collected {keys_collected}')
#print(draw_grid(current_grid))
#print(len(keys_collected), len(keys))
if len(path) >= shortest_path:
b_next = True
discarded += 1
continue
if len(keys_collected) == len(keys):
reached_end2 += 1
if len(path) < shortest_path:
shortest_path = len(path)
print(f'new shortest path {shortest_path}, paths: {len(paths)}, discarded: {discarded}')
#b_next = True
continue
graph, possible_keys = construct_graph(current_grid, position, keys)
#print(f'possible keys: {possible_keys}, collected: {keys_collected}')
b_next = False
new_states = []
for key_pos in possible_keys:
#print(f'Adding path to {key_pos}')
path_to_key = nx.shortest_path(graph, position, key_pos)[1:]
new_path = path + path_to_key
if len(new_path) >= shortest_path:
#b_next = True
discarded += 1
continue
if (len(new_path) / (len(keys_collected) + 1)) >= (shortest_path / len(pos_to_key)):
#b_next = True
discarded2 += 1
continue
new_position = key_pos
new_keys_collected = set(keys_collected)
new_keys_collected.add(current_grid[key_pos])
#print(f'new keys collected {new_keys_collected}')
key = current_grid[key_pos]
new_grid = dict(current_grid)
new_grid[position] = '.'
if key.upper() in doors:
new_grid[doors[pos_to_key[key_pos].upper()]] = '.'
new_grid[key_pos] = '@'
new_states.append((
new_grid,
new_position,
new_keys_collected,
new_path,
))
#for new_state in sorted(new_states, key=new_state_path_length):
for new_state in random.sample(new_states, len(new_states)):
state.append(new_state)
if len(new_states) == 0:
reached_end += 1
b_next = True
print(draw_grid(grid))
print(paths)
print(len(paths))
lengths = []
for path in paths:
print(len(path) - 1)
lengths.append(len(path) - 1)
return min(lengths)
def part2(self):
return None
def main(self):
print(f'Part 1 Answer: {self.part1()}')
print(f'Part 2 Answer: {self.part2()}')
| 28.597156
| 195
| 0.465529
| 4,656
| 0.771627
| 0
| 0
| 0
| 0
| 0
| 0
| 808
| 0.133908
|
62e8443425595d1830f01ee66eb245eac34208d4
| 6,741
|
py
|
Python
|
train.py
|
TahjidEshan/PIXOR-1
|
741b3f913d32b84e550b69d6ff9b89946a524192
|
[
"Apache-2.0"
] | null | null | null |
train.py
|
TahjidEshan/PIXOR-1
|
741b3f913d32b84e550b69d6ff9b89946a524192
|
[
"Apache-2.0"
] | null | null | null |
train.py
|
TahjidEshan/PIXOR-1
|
741b3f913d32b84e550b69d6ff9b89946a524192
|
[
"Apache-2.0"
] | null | null | null |
import torch
import time
from loss import CustomLoss
from datagen import get_data_loader
from model import PIXOR
from utils import get_model_name, load_config, plot_bev, plot_label_map
from postprocess import non_max_suppression
def build_model(config, device, train=True):
net = PIXOR(config['use_bn']).to(device)
criterion = CustomLoss(device=device, num_classes=1)
if not train:
return net, criterion
optimizer = torch.optim.SGD(net.parameters(), lr=config['learning_rate'], momentum=config['momentum'])
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=config['lr_decay_every'], gamma=0.1)
return net, criterion, optimizer, scheduler
def validate_batch(net, criterion, batch_size, test_data_loader, device):
net.eval()
val_loss = 0
num_samples = 0
for i, data in enumerate(test_data_loader):
input, label_map = data
input = input.to(device)
label_map = label_map.to(device)
predictions = net(input)
loss = criterion(predictions, label_map)
val_loss += float(loss)
num_samples += label_map.shape[0]
return val_loss * batch_size / num_samples
def printnorm(self, input, output):
# input is a tuple of packed inputs
# output is a Tensor. output.data is the Tensor we are interested
print('Inside ' + self.__class__.__name__ + ' forward')
print('')
print('input: ', type(input))
print('input[0]: ', type(input[0]))
print('output: ', type(output))
print('')
print('input size:', input[0].size())
print('output size:', output.data.size())
print('output norm:', output.data.norm())
def printgradnorm(self, grad_input, grad_output):
print('Inside ' + self.__class__.__name__ + ' backward')
print('Inside class:' + self.__class__.__name__)
print('')
print('grad_input: ', type(grad_input))
print('grad_input[0]: ', type(grad_input[0]))
print('grad_output: ', type(grad_output))
print('grad_output[0]: ', type(grad_output[0]))
print('')
print('grad_input size:', grad_input[0].size())
print('grad_output size:', grad_output[0].size())
print('grad_input norm:', grad_input[0].norm())
def train(config_name, device):
config, learning_rate, batch_size, max_epochs = load_config(config_name)
train_data_loader, test_data_loader = get_data_loader(batch_size=batch_size, use_npy=config['use_npy'], frame_range=config['frame_range'])
net, criterion, optimizer, scheduler = build_model(config, device, train=True)
if config['resume_training']:
saved_ckpt_path = get_model_name(config['old_ckpt_name'])
net.load_state_dict(torch.load(saved_ckpt_path, map_location=device))
print("Successfully loaded trained ckpt at {}".format(saved_ckpt_path))
net.train()
#net.backbone.conv1.register_forward_hook(printnorm)
#net.backbone.conv2.register_backward_hook(printgradnorm)
start_time = time.time()
for epoch in range(max_epochs):
train_loss = 0
num_samples = 0
scheduler.step()
print("Learning Rate for Epoch {} is {} ".format(epoch + 1, scheduler.get_lr()))
for i, (input, label_map) in enumerate(train_data_loader):
input = input.to(device)
label_map = label_map.to(device)
optimizer.zero_grad()
# Forward
predictions = net(input)
loss = criterion(predictions, label_map)
loss.backward()
optimizer.step()
train_loss += float(loss)
num_samples += label_map.shape[0]
train_loss = train_loss * batch_size/ num_samples
val_loss = validate_batch(net, criterion, batch_size, test_data_loader, device)
print("Epoch {}|Time {:.3f}|Training Loss: {}|Validation Loss: {}".format(
epoch + 1, time.time() - start_time, train_loss, val_loss))
if (epoch + 1) == max_epochs or (epoch + 1) % config['save_every'] == 0:
model_path = get_model_name(config['name']+'__epoch{}'.format(epoch+1))
torch.save(net.state_dict(), model_path)
print("Checkpoint saved at {}".format(model_path))
print('Finished Training')
end_time = time.time()
elapsed_time = end_time - start_time
print("Total time elapsed: {:.2f} seconds".format(elapsed_time))
def experiment(config_name, device):
config, _, _, _ = load_config(config_name)
net, criterion = build_model(config, device, train=False)
#net.load_state_dict(torch.load(get_model_name(config['name']), map_location=device))
net.set_decode(True)
loader, _ = get_data_loader(batch_size=1, use_npy=config['use_npy'], frame_range=config['frame_range'])
net.eval()
image_id = 25
threshold = config['cls_threshold']
with torch.no_grad():
input, label_map = loader.dataset[image_id]
input = input.to(device)
label_map = label_map.to(device)
label_map_unnorm, label_list = loader.dataset.get_label(image_id)
# Forward Pass
t_start = time.time()
pred = net(input.unsqueeze(0)).squeeze_(0)
print("Forward pass time", time.time() - t_start)
# Select all the bounding boxes with classification score above threshold
cls_pred = pred[..., 0]
activation = cls_pred > threshold
# Compute (x, y) of the corners of selected bounding box
num_boxes = int(activation.sum())
if num_boxes == 0:
print("No bounding box found")
return
corners = torch.zeros((num_boxes, 8))
for i in range(1, 9):
corners[:, i - 1] = torch.masked_select(pred[..., i], activation)
corners = corners.view(-1, 4, 2).numpy()
scores = torch.masked_select(pred[..., 0], activation).numpy()
# NMS
t_start = time.time()
selected_ids = non_max_suppression(corners, scores, config['nms_iou_threshold'])
corners = corners[selected_ids]
scores = scores[selected_ids]
print("Non max suppression time:", time.time() - t_start)
# Visualization
input_np = input.cpu().numpy()
plot_bev(input_np, label_list, window_name='GT')
plot_bev(input_np, corners, window_name='Prediction')
plot_label_map(cls_pred.numpy())
if __name__ == "__main__":
device = torch.device('cpu')
if torch.cuda.is_available():
device = torch.device('cuda')
print('using device', device)
name = 'config.json'
train(name, device)
#experiment(name, device)
| 37.870787
| 143
| 0.637739
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,291
| 0.191515
|
62e88e0e53e902fc19cd512c4d2ebfa27cd4aa98
| 1,595
|
py
|
Python
|
Data-Wrangling-With-Pandas/code.py
|
fakhruddin950/ga-learner-dsmp-repo
|
388b13867667167514ef8a6cb314daa06e862850
|
[
"MIT"
] | null | null | null |
Data-Wrangling-With-Pandas/code.py
|
fakhruddin950/ga-learner-dsmp-repo
|
388b13867667167514ef8a6cb314daa06e862850
|
[
"MIT"
] | null | null | null |
Data-Wrangling-With-Pandas/code.py
|
fakhruddin950/ga-learner-dsmp-repo
|
388b13867667167514ef8a6cb314daa06e862850
|
[
"MIT"
] | null | null | null |
# --------------
# Import packages
import numpy as np
import pandas as pd
from scipy.stats import mode
# code starts here
bank=pd.read_csv(path)
categorical_var=bank.select_dtypes(include='object')
print(categorical_var)
numerical_var=bank.select_dtypes(include='number')
print(numerical_var)
# code ends here
# --------------
banks=bank.drop(['Loan_ID'],axis=1)
print(banks.isnull().sum())
bank_mode=banks.mode().iloc[0]
print(bank_mode)
banks.fillna(bank_mode,inplace=True)
print(banks.isnull().sum())
# --------------
# Code starts here
import pandas as pd
import numpy as np
avg_loan_amount=pd.pivot_table(banks,index=['Gender','Married','Self_Employed'],
values= ['LoanAmount'],aggfunc='mean')
print(avg_loan_amount)
# code ends here
# --------------
# code starts here
yes=(banks['Loan_Status']=='Y') & (banks['Self_Employed']=='Yes')
loan_approved_se=banks[yes].count()[0]
no=(banks['Loan_Status']=='Y') & (banks['Self_Employed']=='No')
loan_approved_nse=banks[no].count()[0]
Loan_Status_count=banks['Loan_Status'].count()
percentage_se=100*loan_approved_se/Loan_Status_count
percentage_nse=100*loan_approved_nse/Loan_Status_count
print(percentage_nse,percentage_se)
# code ends here
# --------------
# code starts here
loan_term=banks['Loan_Amount_Term'].apply(lambda x: int(x)/12)
big_loan_term=len(loan_term[loan_term>=25])
print(big_loan_term)
# code ends here
# --------------
# code starts here
loan_groupby=banks.groupby(['Loan_Status'])['ApplicantIncome','Credit_History']
mean_values=loan_groupby.mean()
print(mean_values)
# code ends here
| 18.333333
| 80
| 0.711599
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 506
| 0.317241
|
62e9bb75214838eb014900bb542cf54ee4677ca5
| 3,039
|
py
|
Python
|
pygfunction/examples/custom_borehole.py
|
icupeiro/pygfunction
|
3688ecc45515e161cfe882fdf4a3687c49013174
|
[
"BSD-3-Clause"
] | null | null | null |
pygfunction/examples/custom_borehole.py
|
icupeiro/pygfunction
|
3688ecc45515e161cfe882fdf4a3687c49013174
|
[
"BSD-3-Clause"
] | null | null | null |
pygfunction/examples/custom_borehole.py
|
icupeiro/pygfunction
|
3688ecc45515e161cfe882fdf4a3687c49013174
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
""" Example definition of a borehole. A top-view plot of the borehole is
created and the borehole resistance is computed.
"""
from __future__ import absolute_import, division, print_function
import pygfunction as gt
from numpy import pi
def main():
# Borehole dimensions
H = 400. # Borehole length (m)
D = 5. # Borehole buried depth (m)
r_b = 0.0875 # Borehole radius (m)
# Pipe dimensions
rp_out = 0.0133 # Pipe outer radius (m)
rp_in = 0.0108 # Pipe inner radius (m)
D_s = 0.029445 # Shank spacing (m)
epsilon = 1.0e-6 # Pipe roughness (m)
# Pipe positions
# Single U-tube [(x_in, y_in), (x_out, y_out)]
pos = [(-D_s, 0.), (D_s, 0.)]
# Define a borehole
borehole = gt.boreholes.Borehole(H, D, r_b, x=0., y=0.)
k_p = 0.4 # Pipe thermal conductivity (W/m.K)
k_s = 2.0 # Ground thermal conductivity (W/m.K)
k_g = 1.0 # Grout thermal conductivity (W/m.K)
# Fluid properties
m_flow = 0.25 # Total fluid mass flow rate per borehole (kg/s)
cp_f = 3977. # Fluid specific isobaric heat capacity (J/kg.K)
den_f = 1015. # Fluid density (kg/m3)
visc_f = 0.00203 # Fluid dynamic viscosity (kg/m.s)
k_f = 0.492 # Fluid thermal conductivity (W/m.K)
# Pipe thermal resistance
R_p = gt.pipes.conduction_thermal_resistance_circular_pipe(rp_in,
rp_out,
k_p)
# Fluid to inner pipe wall thermal resistance (Single U-tube)
h_f = gt.pipes.convective_heat_transfer_coefficient_circular_pipe(m_flow,
rp_in,
visc_f,
den_f,
k_f,
cp_f,
epsilon)
R_f = 1.0 / (h_f * 2 * pi * rp_in)
SingleUTube = gt.pipes.SingleUTube(
pos, rp_in, rp_out, borehole, k_s, k_g, R_f + R_p)
Rb = gt.pipes.borehole_thermal_resistance(SingleUTube, m_flow, cp_f)
print('Borehole thermal resistance: {0:.4f} m.K/W'.format(Rb))
# Check the geometry to make sure it is physically possible
#
# This class method is automatically called at the instanciation of the
# pipe object and raises an error if the pipe geometry is invalid. It is
# manually called here for demosntration.
check = SingleUTube._check_geometry()
print('The geometry of the borehole is valid (realistic/possible): '
+ str(check))
# Create a borehole top view
fig = SingleUTube.visualize_pipes()
# Save the figure as a pdf
fig.savefig('borehole-top-view.pdf')
if __name__ == '__main__':
main()
| 37.518519
| 78
| 0.535373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,264
| 0.415926
|
62ea3738cc160ba63213f6e51f4925dc71a6ad64
| 2,409
|
py
|
Python
|
project/models.py
|
nikodrum/evaluationua
|
8c68330da1629a9d3a08fa7ed43b10f71148fd01
|
[
"MIT"
] | null | null | null |
project/models.py
|
nikodrum/evaluationua
|
8c68330da1629a9d3a08fa7ed43b10f71148fd01
|
[
"MIT"
] | null | null | null |
project/models.py
|
nikodrum/evaluationua
|
8c68330da1629a9d3a08fa7ed43b10f71148fd01
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
from random import randint
import math
from project import matplt,database
from geopy.geocoders import Nominatim
from geopy import exc
import os, shutil
categ_coef = 17960
geolocator = Nominatim()
def clean_temp_folder(folder):
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
if os.path.isfile(file_path):
os.unlink(file_path)
def data_validation(list_data_for_valid):
for i in list_data_for_valid:
try:
i = float(i)
except ValueError:
i = 0
return list_data_for_valid
def plotting(post_plot_distr):
# collecting data for plot
metrics = ['price','room','all_area','livin_area','kitch_area','all_floors','year']
to_return_plot_data = []
for i in range(3):
num_of_metr = randint(0,len(metrics)-1)
plt_data = []
for i in range(0,len(post_plot_distr)):
try :
plt_data.append(post_plot_distr[i][metrics[num_of_metr]])
except IndexError:
break
to_return_plot_data.append([metrics[num_of_metr],plt_data])
metrics.remove(metrics[num_of_metr])
path_img_hist = matplt.plot_hist(to_return_plot_data)
path_img_scatter = matplt.plot_scatter(to_return_plot_data)
return [path_img_hist[0],path_img_scatter[0],[path_img_hist[1],path_img_scatter[1],path_img_scatter[2]]]
def calculating(street,num_build):
location = None
try:
location = geolocator.geocode("Киев, " + street +' '+ num_build )
if location != None:
lat = location.latitude
lon = location.longitude
else :
lat = 0
lon = 0
except exc.GeocoderTimedOut:
lat = 0
lon = 0
return [lat,lon]
def choose_full_info_row(list_df_na):
indicator = False
data_for_posting = None
while indicator == False:
rand_n = randint(0,len(list_df_na))
try:
if list_df_na[rand_n]['price'] != '' and list_df_na[rand_n]['street'] != ''and list_df_na[rand_n]['distr'] != ''and list_df_na[rand_n]['all_area'] != ''and list_df_na[rand_n]['all_floors'] != ''and list_df_na[rand_n]['room'] != '':
data_for_posting = list_df_na[rand_n]
indicator = True
except IndexError:
indicator = True
return data_for_posting
| 29.378049
| 243
| 0.630552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 191
| 0.079155
|
62ed041f991b95827e52f2d6f991c749ace2aa73
| 1,914
|
py
|
Python
|
python/titlecase.py
|
edewillians10/ewsc
|
bedd3fec854ac1633eefc028281b97ca6e2686df
|
[
"Apache-2.0"
] | null | null | null |
python/titlecase.py
|
edewillians10/ewsc
|
bedd3fec854ac1633eefc028281b97ca6e2686df
|
[
"Apache-2.0"
] | null | null | null |
python/titlecase.py
|
edewillians10/ewsc
|
bedd3fec854ac1633eefc028281b97ca6e2686df
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import os
import re
import argparse
re_junk = re.compile(r'[._-]')
re_spaces = re.compile(r'\s\s+')
def print_rename(old_filename, new_filename):
print('{} -> {}'.format(old_filename, new_filename))
def print_and_rename(old_path, new_path):
print_rename(old_path, new_path)
os.rename(old_path, new_path)
def get_new_path(old_path):
""" Get the new path, titlecased and (a little bit) sanitized.
- Only operate on the basename:
+ don't touch parent directories
+ don't touch the extension
- Sanitize:
+ replace junk characters with space
+ replace multiple spaces with single space
+ trim extra spaces at start and end
:param old_path: the path to rename
:return: titlecased and a little bit sanitized new path
"""
dirpart, filepart = os.path.split(old_path)
if filepart.startswith('.'):
return old_path
base, ext = os.path.splitext(filepart)
base = re_junk.sub(' ', base)
base = re_spaces.sub(' ', base).strip()
if not base:
return old_path
return os.path.join(dirpart, base.title() + ext)
def titlecase(old_path, rename_function):
if not os.path.exists(old_path):
return
new_path = get_new_path(old_path)
if old_path == new_path:
return
rename_function(old_path, new_path)
def main():
parser = argparse.ArgumentParser(description='Rename files to "titlecased" and "sanitized"')
parser.add_argument('-n', '--dry-run', action='store_true', help='Print what would happen, don\'t rename')
parser.add_argument('paths', nargs='+')
args = parser.parse_args()
rename_function = print_rename if args.dry_run else print_and_rename
for path in args.paths:
titlecase(path, rename_function)
if __name__ == '__main__':
main()
| 26.957746
| 111
| 0.646813
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 643
| 0.335946
|
62edfa74cc80bf68fdca4db96ce6ae0f223f2112
| 1,007
|
py
|
Python
|
broti/modules/stalking.py
|
pcworld/broti
|
4f0d1e79cb7f51d1f71ce349426cb01b8ef2b1f1
|
[
"BSD-2-Clause"
] | null | null | null |
broti/modules/stalking.py
|
pcworld/broti
|
4f0d1e79cb7f51d1f71ce349426cb01b8ef2b1f1
|
[
"BSD-2-Clause"
] | null | null | null |
broti/modules/stalking.py
|
pcworld/broti
|
4f0d1e79cb7f51d1f71ce349426cb01b8ef2b1f1
|
[
"BSD-2-Clause"
] | 1
|
2021-03-28T18:52:26.000Z
|
2021-03-28T18:52:26.000Z
|
import time
requires = ['db']
def add_joined(bot, c, e):
username, _, _ = e.source.partition('!')
cur = bot.provides['db'].cursor()
cur.execute('''INSERT INTO stalking (username, date, action, channel)
VALUES (?, ?, ?, ?)''', (username, time.time(), 'join', e.target))
bot.provides['db'].commit()
def add_left(bot, c, e):
username, _, _ = e.source.partition('!')
cur = bot.provides['db'].cursor()
cur.execute('''INSERT INTO stalking (username, date, action, channel)
VALUES (?, ?, ?, ?)''', (username, time.time(), 'leave', e.target))
bot.provides['db'].commit()
def load_module(bot):
cur = bot.provides['db'].cursor()
cur.execute('''CREATE TABLE IF NOT EXISTS stalking (
username TEXT,
date INTEGER,
action TEXT,
channel TEXT
)''')
bot.provides['db'].commit()
bot.hook_action('userJoined', add_joined)
bot.hook_action('userLeft', add_left)
return [hash(add_joined), hash(add_left)]
| 28.771429
| 75
| 0.594836
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 385
| 0.382324
|
62eecf5be6b8f29ec4406432dc27f44102230b56
| 7,968
|
py
|
Python
|
lib/streamlit/uploaded_file_manager.py
|
Sax-dot/sax-test-streamlit
|
05dfef0c26bbdf3467c6236921a01afafa90f435
|
[
"Apache-2.0"
] | null | null | null |
lib/streamlit/uploaded_file_manager.py
|
Sax-dot/sax-test-streamlit
|
05dfef0c26bbdf3467c6236921a01afafa90f435
|
[
"Apache-2.0"
] | null | null | null |
lib/streamlit/uploaded_file_manager.py
|
Sax-dot/sax-test-streamlit
|
05dfef0c26bbdf3467c6236921a01afafa90f435
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import threading
from typing import Dict, NamedTuple, Optional, List, Tuple
from blinker import Signal
class UploadedFileRec(NamedTuple):
"""Metadata and raw bytes for an uploaded file. Immutable."""
id: str
name: str
type: str
data: bytes
class UploadedFile(io.BytesIO):
"""A mutable uploaded file.
This class extends BytesIO, which has copy-on-write semantics when
initialized with `bytes`.
"""
def __init__(self, record: UploadedFileRec):
# BytesIO's copy-on-write semantics doesn't seem to be mentioned in
# the Python docs - possibly because it's a CPython-only optimization
# and not guaranteed to be in other Python runtimes. But it's detailed
# here: https://hg.python.org/cpython/rev/79a5fbe2c78f
super(UploadedFile, self).__init__(record.data)
self.id = record.id
self.name = record.name
self.type = record.type
self.size = len(record.data)
class UploadedFileManager(object):
"""Holds files uploaded by users of the running Streamlit app,
and emits an event signal when a file is added.
"""
def __init__(self):
self._files_by_id: Dict[Tuple[str, str], List[UploadedFileRec]] = {}
self._file_counts_by_id: Dict[Tuple[str, str], int] = {}
# Prevents concurrent access to the _files_by_id dict.
# In remove_session_files(), we iterate over the dict's keys. It's
# an error to mutate a dict while iterating; this lock prevents that.
self._files_lock = threading.Lock()
self.on_files_updated = Signal(
doc="""Emitted when a file list is added to the manager or updated.
Parameters
----------
session_id : str
The session_id for the session whose files were updated.
"""
)
def _on_files_updated(self, session_id: str, widget_id: str):
files_by_widget = session_id, widget_id
if files_by_widget in self._file_counts_by_id:
expected_file_count: int = self._file_counts_by_id[files_by_widget]
actual_file_count: int = (
len(self._files_by_id[files_by_widget])
if files_by_widget in self._files_by_id
else 0
)
if expected_file_count == actual_file_count:
self.on_files_updated.send(session_id)
else:
self.on_files_updated.send(session_id)
def _add_files(
self,
session_id: str,
widget_id: str,
files: List[UploadedFileRec],
):
"""
Add a list of files to the FileManager. Does not emit any signals
"""
files_by_widget = session_id, widget_id
with self._files_lock:
file_list = self._files_by_id.get(files_by_widget, None)
if file_list:
files = file_list + files
self._files_by_id[files_by_widget] = files
def add_files(
self,
session_id: str,
widget_id: str,
files: List[UploadedFileRec],
) -> None:
"""Add a list of files to the FileManager.
The "on_file_added" Signal will be emitted after the list is added.
Parameters
----------
session_id : str
The session ID of the report that owns the files.
widget_id : str
The widget ID of the FileUploader that created the files.
files : List[UploadedFileRec]
The file records to add.
"""
self._add_files(session_id, widget_id, files)
self._on_files_updated(session_id, widget_id)
def get_files(
self, session_id: str, widget_id: str
) -> Optional[List[UploadedFileRec]]:
"""Return the file list with the given ID, or None if the ID doesn't exist.
Parameters
----------
session_id : str
The session ID of the report that owns the file.
widget_id : str
The widget ID of the FileUploader that created the file.
Returns
-------
list of UploadedFileRec or None
"""
files_by_widget = session_id, widget_id
with self._files_lock:
return self._files_by_id.get(files_by_widget, None)
def remove_file(self, session_id: str, widget_id: str, file_id: str) -> None:
"""Remove the file list with the given ID, if it exists."""
files_by_widget = session_id, widget_id
with self._files_lock:
file_list = self._files_by_id[files_by_widget]
self._files_by_id[files_by_widget] = [
file for file in file_list if file.id != file_id
]
if len(file_list) != len(self._files_by_id[files_by_widget]):
self._on_files_updated(session_id, widget_id)
def _remove_files(self, session_id: str, widget_id: str) -> None:
"""Remove the file list for the provided widget in the
provided session, if it exists.
Does not emit any signals.
"""
files_by_widget = session_id, widget_id
self.update_file_count(session_id, widget_id, 0)
with self._files_lock:
self._files_by_id.pop(files_by_widget, None)
def remove_files(self, session_id: str, widget_id: str) -> None:
"""Remove the file list for the provided widget in the
provided session, if it exists.
Parameters
----------
session_id : str
The session ID of the report that owns the file.
widget_id : str
The widget ID of the FileUploader that created the file.
"""
self._remove_files(session_id, widget_id)
self._on_files_updated(session_id, widget_id)
def remove_session_files(self, session_id: str) -> None:
"""Remove all files that belong to the given report.
Parameters
----------
session_id : str
The session ID of the report whose files we're removing.
"""
# Copy the keys into a list, because we'll be mutating the dictionary.
with self._files_lock:
all_ids = list(self._files_by_id.keys())
for files_id in all_ids:
if files_id[0] == session_id:
self.remove_files(*files_id)
def replace_files(
self,
session_id: str,
widget_id: str,
files: List[UploadedFileRec],
) -> None:
"""Removes the file list for the provided widget in the
provided session, if it exists and add the provided files
to the widget in the session
Parameters
----------
session_id : str
The session ID of the report that owns the file.
widget_id : str
The widget ID of the FileUploader that created the file.
files : List[UploadedFileRec]
The files to add.
"""
self._remove_files(session_id, widget_id)
self._add_files(session_id, widget_id, files)
self._on_files_updated(session_id, widget_id)
def update_file_count(
self,
session_id: str,
widget_id: str,
file_count: int,
) -> None:
files_by_widget = session_id, widget_id
self._file_counts_by_id[files_by_widget] = file_count
self._on_files_updated(session_id, widget_id)
| 34.79476
| 83
| 0.626004
| 7,263
| 0.911521
| 0
| 0
| 0
| 0
| 0
| 0
| 3,708
| 0.465361
|
62ef13900c5a6f18d29ae7b0c78da18966ff6af7
| 4,047
|
py
|
Python
|
data/transforms/build.py
|
zyxwvu321/Classifer_SSL_Longtail
|
e6c09414c49e695b0f4221a3c6245ae3929a1788
|
[
"MIT"
] | null | null | null |
data/transforms/build.py
|
zyxwvu321/Classifer_SSL_Longtail
|
e6c09414c49e695b0f4221a3c6245ae3929a1788
|
[
"MIT"
] | null | null | null |
data/transforms/build.py
|
zyxwvu321/Classifer_SSL_Longtail
|
e6c09414c49e695b0f4221a3c6245ae3929a1788
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
"""
build transform
"""
#import torchvision.transforms as T
#from PIL import Image
#from .transforms import RandomErasing,RandomErasingCorner
from .data_preprocessing import TrainAugmentation_albu,TestAugmentation_albu,TrainAugmentation_bone,TestAugmentation_bone
import torchvision.transforms as transforms
from data.transforms.RandAugment.augmentations import RandAugment,Lighting
_IMAGENET_PCA = {
'eigval': [0.2175, 0.0188, 0.0045],
'eigvec': [
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
]
}
def get_transform(resize, phase='train'):
if phase == 'train':
tfms = transforms.Compose([
transforms.Resize(size=(int(resize[0] / 0.875), int(resize[1] / 0.875))),
transforms.RandomCrop(resize),
transforms.RandomHorizontalFlip(0.5),
transforms.ColorJitter(brightness=0.126, saturation=0.5),
transforms.ToTensor(),
#Lighting(0.1, _IMAGENET_PCA['eigval'], _IMAGENET_PCA['eigvec']),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
# Add RandAugment with N, M(hyperparameter)
#tfms.transforms.insert(1, RandAugment(2, 9))
return tfms
else:
return transforms.Compose([
transforms.Resize(size=(int(resize[0] / 0.875), int(resize[1] / 0.875))),
transforms.CenterCrop(resize),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
def build_transforms(cfg, is_train=True, weak_aug = False,n_aug = 1):
if cfg.INPUT.USE_FGTFMS is True:
if is_train is True:
transform = get_transform( cfg.INPUT.SIZE_TRAIN_PRED, 'train')
else:
transform = get_transform( cfg.INPUT.SIZE_TRAIN_PRED, 'val')
return transform
if cfg.DATASETS.NAMES =='ISIC':
if is_train is True:
if weak_aug is False:
transform = TrainAugmentation_albu(sz_hw = cfg.INPUT.SIZE_TRAIN_IN, \
mean = cfg.INPUT.PIXEL_MEAN, std = cfg.INPUT.PIXEL_STD, \
crp_scale = cfg.INPUT.CRP_SCALE, crp_ratio = cfg.INPUT.CRP_RATIO, n_aug = n_aug,out_augpos = cfg.DATASETS.OUT_AUGPOS)
else:
transform = TrainAugmentation_albu(sz_hw = cfg.INPUT.SIZE_TRAIN_IN, \
mean = cfg.INPUT.PIXEL_MEAN, std = cfg.INPUT.PIXEL_STD, \
crp_scale = cfg.INPUT.CRP_SCALE_WEAK, crp_ratio = cfg.INPUT.CRP_RATIO,weak_aug = True, n_aug = n_aug)
else:
transform = TestAugmentation_albu(size = cfg.INPUT.SIZE_TRAIN_IN, mean = cfg.INPUT.PIXEL_MEAN, std = cfg.INPUT.PIXEL_STD,out_augpos = cfg.DATASETS.OUT_AUGPOS)
elif cfg.DATASETS.NAMES =='BoneXray':
#size = configs.image_size, mean = configs.image_mean, std = configs.image_std, ext_p =configs.ext_p
if is_train is True:
transform = TrainAugmentation_bone(sz_in_hw = cfg.INPUT.SIZE_TRAIN_IN, sz_out_hw = cfg.INPUT.SIZE_TRAIN_PRED, \
mean = cfg.INPUT.PIXEL_MEAN, std = cfg.INPUT.PIXEL_STD, \
minmax_h = cfg.INPUT.MINMAX_H, w2h_ratio = cfg.INPUT.W2H_RATIO)
else:
transform = TestAugmentation_bone(sz_in_hw = cfg.INPUT.SIZE_TRAIN_IN,sz_out_hw = cfg.INPUT.SIZE_TRAIN_PRED, mean = cfg.INPUT.PIXEL_MEAN, std = cfg.INPUT.PIXEL_STD)
else:
raise ValueError('unknown transform for dataset {cfg.DATASETS.NAMES}')
# local att
#train_transform_lc = TrainAugmentation_albu(sz_in_hw = configs.sz_in_hw_lc, sz_out_hw = configs.sz_out_hw_lc, mean = configs.image_mean, std = configs.image_std,
# minmax_h= configs.minmax_h_lc,w2h_ratio = configs.w2h_ratio_lc)
return transform
| 40.069307
| 178
| 0.617742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 803
| 0.198419
|
62ef92a0927f04b3c8692fbdb4474ca4db193b08
| 2,039
|
py
|
Python
|
compiler_oj/testcase.py
|
XunGong99/compiler-offline-judge
|
89d03133d34bd06e6fe7bb4cbb016ac9fe9f78d5
|
[
"MIT"
] | 19
|
2018-05-01T09:15:18.000Z
|
2021-12-22T08:27:52.000Z
|
compiler_oj/testcase.py
|
XunGong99/compiler-offline-judge
|
89d03133d34bd06e6fe7bb4cbb016ac9fe9f78d5
|
[
"MIT"
] | 1
|
2018-05-01T13:59:58.000Z
|
2018-05-01T14:49:37.000Z
|
compiler_oj/testcase.py
|
XunGong99/compiler-offline-judge
|
89d03133d34bd06e6fe7bb4cbb016ac9fe9f78d5
|
[
"MIT"
] | 10
|
2018-05-28T02:31:29.000Z
|
2020-01-30T06:11:22.000Z
|
import os
class TestCase:
def __init__(self, raw, filename="unknown", t=1.0):
self.__raw = raw
self.filename = filename
self.src = self.__read_source()
self.comment = self.__find_block("comment")
self.input = self.__find_block("input")
self.output = self.__format_output(self.__find_block("output"))
self.assertion = self.__find_block("assert")
self.timeout = self.__find_block("timeout")
if self.timeout != "":
self.timeout = float(self.timeout)
else:
self.timeout = 10.0
self.timeout /= t # Continue for t seconds 续一秒!!
self.exitcode = self.__find_block("exitcode")
self.exitcode = int(self.exitcode) if self.exitcode != "" else None
self.phase = self.__find_block("phase")
@staticmethod
def __format_output(raw):
return '\n'.join(list(map(lambda x: x.strip(), raw.split('\n'))))
def __read_source(self):
end = self.__raw.find("/*!! metadata:")
return self.__raw[0:end]
def __find_block(self, name):
title = '=== ' + name + ' ==='
beg = self.__raw.find(title)
if beg == -1:
return ''
beg += len(title)
end = self.__raw.find("===", beg)
if end == -1:
end = self.__raw.find("!!*", beg)
if end == -1:
return ""
return self.__raw[beg:end].strip()
def read_testcases(dirs, t):
testcases = []
for dir in dirs:
names = os.listdir(dir)
print(dir + " : " + str(len(names)))
for name in names:
__, extension = os.path.splitext(name)
if extension != ".txt" and extension != ".mx":
# print(extension)
continue
with open(os.path.join(dir, name)) as f:
raw = f.read()
testcases.append(TestCase(raw, name, t))
print("testcases at all: " + str(len(testcases)))
# print([i.phase for i in testcases])
return testcases
| 31.369231
| 75
| 0.545365
| 1,424
| 0.696333
| 0
| 0
| 117
| 0.057213
| 0
| 0
| 248
| 0.121271
|
62efb5daea165045f78966066a5dddd62fe07ac8
| 10,137
|
py
|
Python
|
lib/python3.8/site-packages/ansible_collections/cisco/nxos/plugins/modules/nxos_l3_interfaces.py
|
cjsteel/python3-venv-ansible-2.10.5
|
c95395c4cae844dc66fddde9b4343966f4b2ecd5
|
[
"Apache-1.1"
] | null | null | null |
lib/python3.8/site-packages/ansible_collections/cisco/nxos/plugins/modules/nxos_l3_interfaces.py
|
cjsteel/python3-venv-ansible-2.10.5
|
c95395c4cae844dc66fddde9b4343966f4b2ecd5
|
[
"Apache-1.1"
] | null | null | null |
lib/python3.8/site-packages/ansible_collections/cisco/nxos/plugins/modules/nxos_l3_interfaces.py
|
cjsteel/python3-venv-ansible-2.10.5
|
c95395c4cae844dc66fddde9b4343966f4b2ecd5
|
[
"Apache-1.1"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The module file for nxos_l3_interfaces
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: nxos_l3_interfaces
short_description: L3 interfaces resource module
description: This module manages Layer-3 interfaces attributes of NX-OS Interfaces.
version_added: 1.0.0
author: Trishna Guha (@trishnaguha)
notes:
- Tested against NXOS 7.3.(0)D1(1) on VIRL
options:
running_config:
description:
- This option is used only with state I(parsed).
- The value of this option should be the output received from the NX-OS device
by executing the command B(show running-config | section '^interface').
- The state I(parsed) reads the configuration from C(running_config) option and
transforms it into Ansible structured data as per the resource module's argspec
and the value is then returned in the I(parsed) key within the result.
type: str
config:
description: A dictionary of Layer-3 interface options
type: list
elements: dict
suboptions:
name:
description:
- Full name of L3 interface, i.e. Ethernet1/1.
type: str
required: true
dot1q:
description:
- Configures IEEE 802.1Q VLAN encapsulation on a subinterface.
type: int
ipv4:
description:
- IPv4 address and attributes of the L3 interface.
type: list
elements: dict
suboptions:
address:
description:
- IPV4 address of the L3 interface.
type: str
tag:
description:
- URIB route tag value for local/direct routes.
type: int
secondary:
description:
- A boolean attribute to manage addition of secondary IP address.
type: bool
default: false
ipv6:
description:
- IPv6 address and attributes of the L3 interface.
type: list
elements: dict
suboptions:
address:
description:
- IPV6 address of the L3 interface.
type: str
tag:
description:
- URIB route tag value for local/direct routes.
type: int
redirects:
description:
- Enables/disables ip redirects
type: bool
unreachables:
description:
- Enables/disables ip redirects
type: bool
evpn_multisite_tracking:
description:
- VxLAN evpn multisite Interface tracking. Supported only on selected model.
type: str
version_added: 1.1.0
choices:
- fabric-tracking
- dci-tracking
state:
description:
- The state of the configuration after module completion.
- The state I(overridden) would override the IP address configuration
of all interfaces on the device with the provided configuration in
the task. Use caution with this state as you may loose access to the
device.
type: str
choices:
- merged
- replaced
- overridden
- deleted
- gathered
- rendered
- parsed
default: merged
"""
EXAMPLES = """
# Using merged
# Before state:
# -------------
#
# interface Ethernet1/6
- name: Merge provided configuration with device configuration.
cisco.nxos.nxos_l3_interfaces:
config:
- name: Ethernet1/6
ipv4:
- address: 192.168.1.1/24
tag: 5
- address: 10.1.1.1/24
secondary: true
tag: 10
ipv6:
- address: fd5d:12c9:2201:2::1/64
tag: 6
- name: Ethernet1/7.42
dot1q: 42
redirects: false
unreachables: false
state: merged
# After state:
# ------------
#
# interface Ethernet1/6
# ip address 192.168.22.1/24 tag 5
# ip address 10.1.1.1/24 secondary tag 10
# interface Ethernet1/6
# ipv6 address fd5d:12c9:2201:2::1/64 tag 6
# interface Ethernet1/7.42
# encapsulation dot1q 42
# no ip redirects
# no ip unreachables
# Using replaced
# Before state:
# -------------
#
# interface Ethernet1/6
# ip address 192.168.22.1/24
# ipv6 address "fd5d:12c9:2201:1::1/64"
- name: Replace device configuration of specified L3 interfaces with provided configuration.
cisco.nxos.nxos_l3_interfaces:
config:
- name: Ethernet1/6
ipv4:
- address: 192.168.22.3/24
state: replaced
# After state:
# ------------
#
# interface Ethernet1/6
# ip address 192.168.22.3/24
# Using overridden
# Before state:
# -------------
#
# interface Ethernet1/2
# ip address 192.168.22.1/24
# interface Ethernet1/6
# ipv6 address "fd5d:12c9:2201:1::1/64"
- name: Override device configuration of all L3 interfaces on device with provided
configuration.
cisco.nxos.nxos_l3_interfaces:
config:
- name: Ethernet1/2
ipv4: 192.168.22.3/4
state: overridden
# After state:
# ------------
#
# interface Ethernet1/2
# ipv4 address 192.168.22.3/24
# interface Ethernet1/6
# Using deleted
# Before state:
# -------------
#
# interface Ethernet1/6
# ip address 192.168.22.1/24
# interface Ethernet1/2
# ipv6 address "fd5d:12c9:2201:1::1/64"
- name: Delete L3 attributes of given interfaces (This won't delete the interface
itself).
cisco.nxos.nxos_l3_interfaces:
config:
- name: Ethernet1/6
- name: Ethernet1/2
state: deleted
# After state:
# ------------
#
# interface Ethernet1/6
# interface Ethernet1/2
# Using rendered
- name: Use rendered state to convert task input to device specific commands
cisco.nxos.nxos_l3_interfaces:
config:
- name: Ethernet1/800
ipv4:
- address: 192.168.1.100/24
tag: 5
- address: 10.1.1.1/24
secondary: true
tag: 10
- name: Ethernet1/800
ipv6:
- address: fd5d:12c9:2201:2::1/64
tag: 6
state: rendered
# Task Output (redacted)
# -----------------------
# rendered:
# - "interface Ethernet1/800"
# - "ip address 192.168.1.100/24 tag 5"
# - "ip address 10.1.1.1/24 secondary tag 10"
# - "interface Ethernet1/800"
# - "ipv6 address fd5d:12c9:2201:2::1/64 tag 6"
# Using parsed
# parsed.cfg
# ------------
# interface Ethernet1/800
# ip address 192.168.1.100/24 tag 5
# ip address 10.1.1.1/24 secondary tag 10
# no ip redirects
# interface Ethernet1/801
# ipv6 address fd5d:12c9:2201:2::1/64 tag 6
# ip unreachables
# interface mgmt0
# ip address dhcp
# vrf member management
- name: Use parsed state to convert externally supplied config to structured format
cisco.nxos.nxos_l3_interfaces:
running_config: "{{ lookup('file', 'parsed.cfg') }}"
state: parsed
# Task output (redacted)
# -----------------------
# parsed:
# - name: Ethernet1/800
# ipv4:
# - address: 192.168.1.100/24
# tag: 5
# - address: 10.1.1.1/24
# secondary: True
# tag: 10
# redirects: False
# - name: Ethernet1/801
# ipv6:
# - address: fd5d:12c9:2201:2::1/64
# tag: 6
# unreachables: True
# Using gathered
# Existing device config state
# -------------------------------
# interface Ethernet1/1
# ip address 192.0.2.100/24
# interface Ethernet1/2
# no ip redirects
# ip address 203.0.113.10/24
# ip unreachables
# ipv6 address 2001:db8::1/32
- name: Gather l3_interfaces facts from the device using nxos_l3_interfaces
cisco.nxos.nxos_l3_interfaces:
state: gathered
# Task output (redacted)
# -----------------------
# gathered:
# - name: Ethernet1/1
# ipv4:
# - address: 192.0.2.100/24
# - name: Ethernet1/2
# ipv4:
# - address: 203.0.113.10/24
# ipv6:
# - address: 2001:db8::1/32
# redirects: False
# unreachables: True
"""
RETURN = """
before:
description: The configuration as structured data prior to module invocation.
returned: always
type: list
sample: >
The configuration returned will always be in the same format
of the parameters above.
after:
description: The configuration as structured data after module completion.
returned: when changed
type: list
sample: >
The configuration returned will always be in the same format
of the parameters above.
commands:
description: The set of commands pushed to the remote device.
returned: always
type: list
sample: ['interface Ethernet1/2', 'ip address 192.168.0.1/2']
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.argspec.l3_interfaces.l3_interfaces import (
L3_interfacesArgs,
)
from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.config.l3_interfaces.l3_interfaces import (
L3_interfaces,
)
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
required_if = [
("state", "merged", ("config",)),
("state", "replaced", ("config",)),
("state", "overridden", ("config",)),
("state", "rendered", ("config",)),
("state", "parsed", ("running_config",)),
]
mutually_exclusive = [("config", "running_config")]
module = AnsibleModule(
argument_spec=L3_interfacesArgs.argument_spec,
required_if=required_if,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True,
)
result = L3_interfaces(module).execute_module()
module.exit_json(**result)
if __name__ == "__main__":
main()
| 24.96798
| 114
| 0.625037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9,163
| 0.903916
|
62f07f01c417635fb15b3f7c35ca5a2a958e3a07
| 2,426
|
py
|
Python
|
tests/ethereumetl/job/test_extract_geth_traces_job.py
|
XWorldGames/bsc-etl
|
c4a1ba72381340994ec376e6de860cde6637becc
|
[
"MIT"
] | null | null | null |
tests/ethereumetl/job/test_extract_geth_traces_job.py
|
XWorldGames/bsc-etl
|
c4a1ba72381340994ec376e6de860cde6637becc
|
[
"MIT"
] | null | null | null |
tests/ethereumetl/job/test_extract_geth_traces_job.py
|
XWorldGames/bsc-etl
|
c4a1ba72381340994ec376e6de860cde6637becc
|
[
"MIT"
] | null | null | null |
# MIT License
#
# Copyright (c) 2018 Evgeniy Filatov, evgeniyfilatov@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import pytest
import tests.resources
from bscetl.jobs.exporters.traces_item_exporter import traces_item_exporter
from bscetl.jobs.extract_geth_traces_job import ExtractGethTracesJob
from tests.helpers import compare_lines_ignore_order, read_file
RESOURCE_GROUP = 'test_extract_geth_traces_job'
def read_resource(resource_group, file_name):
return tests.resources.read_resource([RESOURCE_GROUP, resource_group], file_name)
@pytest.mark.parametrize('resource_group', [
'block_without_transactions',
'block_with_create',
'block_with_suicide',
'block_with_subtraces',
'block_with_error',
])
def test_extract_traces_job(tmpdir, resource_group):
output_file = str(tmpdir.join('actual_traces.csv'))
geth_traces_content = read_resource(resource_group, 'geth_traces.json')
traces_iterable = (json.loads(line) for line in geth_traces_content.splitlines())
job = ExtractGethTracesJob(
traces_iterable=traces_iterable,
batch_size=2,
item_exporter=traces_item_exporter(output_file),
max_workers=5
)
job.run()
print('=====================')
print(read_file(output_file))
compare_lines_ignore_order(
read_resource(resource_group, 'expected_traces.csv'), read_file(output_file)
)
| 37.90625
| 85
| 0.766694
| 0
| 0
| 0
| 0
| 843
| 0.347486
| 0
| 0
| 1,349
| 0.556059
|
62f07ff59fb064975f519a1d53028c1dfda5c299
| 4,582
|
py
|
Python
|
typed_python/compiler/type_wrappers/ref_to_wrapper.py
|
APrioriInvestments/typed_python
|
a3191e5d30333eba156c2a910abc78f7813dcaa3
|
[
"Apache-2.0"
] | 105
|
2019-12-02T01:44:46.000Z
|
2022-03-28T20:27:38.000Z
|
typed_python/compiler/type_wrappers/ref_to_wrapper.py
|
APrioriInvestments/typed_python
|
a3191e5d30333eba156c2a910abc78f7813dcaa3
|
[
"Apache-2.0"
] | 173
|
2019-10-08T19:37:06.000Z
|
2022-01-24T18:43:42.000Z
|
typed_python/compiler/type_wrappers/ref_to_wrapper.py
|
APrioriInvestments/typed_python
|
a3191e5d30333eba156c2a910abc78f7813dcaa3
|
[
"Apache-2.0"
] | 1
|
2020-01-23T00:06:42.000Z
|
2020-01-23T00:06:42.000Z
|
# Copyright 2017-2019 typed_python Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typed_python.compiler.type_wrappers.wrapper import Wrapper
from typed_python.compiler.typed_expression import TypedExpression
from typed_python._types import refTo
import typed_python.compiler.native_ast as native_ast
import typed_python.compiler
typeWrapper = lambda t: typed_python.compiler.python_object_representation.typedPythonTypeToTypeWrapper(t)
class RefToObjectWrapper(Wrapper):
is_pod = True
is_empty = False
is_pass_by_ref = False
def __init__(self):
super().__init__(refTo)
def getNativeLayoutType(self):
return native_ast.Type.Void()
@Wrapper.unwrapOneOfAndValue
def convert_call(self, context, expr, args, kwargs):
if len(args) != 1 or kwargs:
return super().convert_call(context, expr, args, kwargs)
return args[0].expr_type.convert_refTo(context, args[0])
class RefToWrapper(Wrapper):
is_pod = True
is_empty = False
is_pass_by_ref = False
def __init__(self, t):
super().__init__(t)
self.layoutType = typeWrapper(t.ElementType).getNativeLayoutType().pointer()
def underlyingTypeWrapper(self):
return typeWrapper(self.typeRepresentation.ElementType)
def getNativeLayoutType(self):
return self.layoutType
def convert_assign(self, context, target, toStore):
assert target.isReference
context.pushEffect(
target.expr.store(toStore.nonref_expr)
)
def convert_copy_initialize(self, context, target, toStore):
assert target.isReference
context.pushEffect(
target.expr.store(toStore.nonref_expr)
)
def deref(self, instance):
return TypedExpression(
instance.context,
instance.nonref_expr,
typeWrapper(self.typeRepresentation.ElementType),
True
)
def convert_destroy(self, context, instance):
pass
def _can_convert_to_type(self, targetType, conversionLevel):
return self.underlyingTypeWrapper._can_convert_to_type(targetType, conversionLevel)
def convert_to_type_with_target(self, context, instance, targetVal, conversionLevel, mayThrowOnFailure=False):
return self.deref(instance).convert_to_type_with_target(targetVal, conversionLevel)
def convert_bin_op(self, context, left, op, right, inplace):
return self.deref(left).convert_bin_op(op, right, inplace)
def convert_unary_op(self, context, left, op):
return self.deref(left).convert_unary_op(op)
def convert_attribute(self, context, instance, attr):
return self.deref(instance).convert_attribute(attr)
def convert_getitem(self, context, instance, key):
return self.deref(instance).convert_getitem(key)
def convert_setitem(self, context, instance, key, val):
return self.deref(instance).convert_setitem(key, val)
def convert_method_call(self, context, instance, methodname, args, kwargs):
return self.deref(instance).convert_method_call(methodname, args, kwargs)
def convert_set_attribute(self, context, instance, attribute, value):
return self.deref(instance).convert_set_attribute(attribute, value)
def convert_hash(self, context, expr):
return self.deref(expr).convert_hash()
def convert_call(self, context, expr, args, kwargs):
self.deref(expr).convert_call(args, kwargs)
def convert_len(self, context, expr):
self.deref(expr).convert_len()
def convert_abs(self, context, expr):
self.deref(expr).convert_abs()
def convert_repr(self, context, expr):
self.deref(expr).convert_repr()
def convert_builtin(self, f, context, expr, a1=None):
self.deref(expr).convert_builtin(a1)
def convert_comparison(self, context, l, op, r):
self.deref(l).convert_comparison(op, r)
def convert_bin_op_reverse(self, context, r, op, l, inplace):
self.deref(r).convert_bin_op_reverse(op, l, inplace)
| 34.451128
| 114
| 0.712134
| 3,605
| 0.786774
| 0
| 0
| 257
| 0.056089
| 0
| 0
| 596
| 0.130074
|
62f131f2fd644c186231aef33c85b6720ddcf3fc
| 587
|
py
|
Python
|
securesite/payroll/admin.py
|
simokauranen/payroll_api_localhost
|
76cb4dede290afa1204236fb7b097eaeee61eb21
|
[
"MIT"
] | null | null | null |
securesite/payroll/admin.py
|
simokauranen/payroll_api_localhost
|
76cb4dede290afa1204236fb7b097eaeee61eb21
|
[
"MIT"
] | null | null | null |
securesite/payroll/admin.py
|
simokauranen/payroll_api_localhost
|
76cb4dede290afa1204236fb7b097eaeee61eb21
|
[
"MIT"
] | null | null | null |
"""Module to add Employee fields to the User admin interface."""
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
from .models import Employee
class EmployeeInline(admin.StackedInline):
model = Employee
can_delete = False
max_num = 1
verbose_name_plural = 'employee'
class UserAdmin(BaseUserAdmin):
# Add the ssn, salary and last_updated fields to User admin view
inlines = (EmployeeInline,)
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
| 23.48
| 68
| 0.758092
| 271
| 0.46167
| 0
| 0
| 0
| 0
| 0
| 0
| 138
| 0.235094
|
62f23976c671843e25e3faff67dcc2d0f2fe7178
| 1,332
|
py
|
Python
|
qamplus/voice.py
|
qamplus/qamplus-pythonsdk
|
5669e621018ffd6605354b672b446c3ad631665d
|
[
"MIT"
] | null | null | null |
qamplus/voice.py
|
qamplus/qamplus-pythonsdk
|
5669e621018ffd6605354b672b446c3ad631665d
|
[
"MIT"
] | null | null | null |
qamplus/voice.py
|
qamplus/qamplus-pythonsdk
|
5669e621018ffd6605354b672b446c3ad631665d
|
[
"MIT"
] | null | null | null |
class VoiceClient(object):
def __init__(self, base_obj):
self.base_obj = base_obj
self.api_resource = "/voice/v1/{}"
def create(self,
direction,
to,
caller_id,
execution_logic,
reference_logic='',
country_iso2='us',
technology='pstn',
status_callback_uri=''):
api_resource = self.api_resource.format(direction)
return self.base_obj.post(api_resource=api_resource, direction=direction, to=to,
caller_id=caller_id, execution_logic=execution_logic, reference_logic=reference_logic,
country_iso2=country_iso2, technology=technology, status_callback_uri=status_callback_uri)
def update(self, reference_id, execution_logic):
api_resource = self.api_resource.format(reference_id)
return self.base_obj.put(api_resource=api_resource,
execution_logic=execution_logic)
def delete(self, reference_id):
api_resource = self.api_resource.format(reference_id)
return self.base_obj.delete(api_resource=api_resource)
def get_status(self, reference_id):
api_resource = self.api_resource.format(reference_id)
return self.base_obj.get(api_resource=api_resource)
| 32.487805
| 102
| 0.652402
| 1,326
| 0.995495
| 0
| 0
| 0
| 0
| 0
| 0
| 28
| 0.021021
|
62f447bcd9eda650bb251abec9e284d7ebf171db
| 1,865
|
py
|
Python
|
tests/test_mpauth.py
|
chuter/wechat-requests
|
23591f8e04e795a1727e6a8029602cfb2dde90f1
|
[
"MIT"
] | 3
|
2019-06-17T10:54:03.000Z
|
2021-01-29T08:25:01.000Z
|
tests/test_mpauth.py
|
chuter/wechat-requests
|
23591f8e04e795a1727e6a8029602cfb2dde90f1
|
[
"MIT"
] | 2
|
2020-03-24T15:46:37.000Z
|
2020-03-30T20:26:19.000Z
|
tests/test_mpauth.py
|
chuter/wechat-requests
|
23591f8e04e795a1727e6a8029602cfb2dde90f1
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8
import pytest
from wechat.result import build_from_response
from wechat.auth import MpOuthApi, get_mp_access_token
@pytest.mark.usefixtures('response_builder')
class TestAuth:
def test_mp_auth_params(self, mocker, mp_appid, mp_secret):
patched_request_execute = mocker.MagicMock(
return_value=build_from_response(
self.response(text=u'{"access_token": "fake token"}')
)
)
mocker.patch.object(
MpOuthApi,
'_execute_request',
patched_request_execute
)
get_mp_access_token(mp_appid, mp_secret)
patched_request_execute.assert_called_once_with(
'GET',
'https://api.weixin.qq.com/cgi-bin/token',
params_dict={
"grant_type": "client_credential",
"appid": mp_appid,
"secret": mp_secret
},
allow_redirects=False,
timeout=mocker.ANY
)
def test_mp_auth_retry(self, mocker, mp_appid, mp_secret):
patched_request_execute = mocker.MagicMock(
return_value=build_from_response(
self.response(text=u'{"errcode": -1}')
)
)
mocker.patch.object(
MpOuthApi,
'_execute_request',
patched_request_execute
)
mocker.spy(MpOuthApi, '_execute_request')
result = get_mp_access_token(mp_appid, mp_secret)
assert result.errcode == -1
assert patched_request_execute.call_count == 2
def test_auth_appid_secret_immutable(self, mp_appid, mp_secret):
outh = MpOuthApi(mp_appid, mp_secret)
with pytest.raises(AttributeError):
outh._appid = 'new appid'
with pytest.raises(AttributeError):
outh._secret = 'new secret'
| 29.603175
| 69
| 0.604826
| 1,677
| 0.899196
| 0
| 0
| 1,722
| 0.923324
| 0
| 0
| 259
| 0.138874
|
62f5cef50adba84125aceb4b7bcd641b085ef856
| 76,410
|
py
|
Python
|
python/pb/pomerium/pb/users_pb2.py
|
adriangb/enterprise-client
|
5d50b457425b0c6d08415b0d986fa9151b792151
|
[
"Apache-2.0"
] | null | null | null |
python/pb/pomerium/pb/users_pb2.py
|
adriangb/enterprise-client
|
5d50b457425b0c6d08415b0d986fa9151b792151
|
[
"Apache-2.0"
] | null | null | null |
python/pb/pomerium/pb/users_pb2.py
|
adriangb/enterprise-client
|
5d50b457425b0c6d08415b0d986fa9151b792151
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: users.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='users.proto',
package='pomerium.dashboard',
syntax='proto3',
serialized_options=b'Z+github.com/pomerium/pomerium-console/pkg/pb',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0busers.proto\x12\x12pomerium.dashboard\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xd3\x01\n\rRecoveryToken\x12\n\n\x02id\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\x12.\n\ncreated_at\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bmodified_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12.\n\nexpires_at\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x12\n\npublic_key\x18\x06 \x01(\t\"%\n\tGroupInfo\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"\xf3\x01\n\x08UserInfo\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\r\n\x05\x65mail\x18\x03 \x01(\t\x12\x0e\n\x06groups\x18\x04 \x03(\t\x12I\n\x0fnamespace_roles\x18\x05 \x03(\x0b\x32\x30.pomerium.dashboard.UserInfo.NamespaceRolesEntry\x12\x13\n\x0bpicture_url\x18\x06 \x01(\t\x12\x17\n\x0fis_impersonated\x18\x07 \x01(\x08\x1a\x35\n\x13NamespaceRolesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"6\n\x12GetUserInfoRequest\x12\x14\n\x07user_id\x18\x01 \x01(\tH\x00\x88\x01\x01\x42\n\n\x08_user_id\"F\n\x13GetUserInfoResponse\x12/\n\tuser_info\x18\x01 \x01(\x0b\x32\x1c.pomerium.dashboard.UserInfo\"B\n\x12QueryGroupsRequest\x12\r\n\x05query\x18\x01 \x01(\t\x12\x0e\n\x06offset\x18\x02 \x01(\x03\x12\r\n\x05limit\x18\x03 \x01(\x03\"Y\n\x13QueryGroupsResponse\x12-\n\x06groups\x18\x01 \x03(\x0b\x32\x1d.pomerium.dashboard.GroupInfo\x12\x13\n\x0btotal_count\x18\x02 \x01(\x03\"A\n\x11QueryUsersRequest\x12\r\n\x05query\x18\x01 \x01(\t\x12\x0e\n\x06offset\x18\x02 \x01(\x03\x12\r\n\x05limit\x18\x03 \x01(\x03\"V\n\x12QueryUsersResponse\x12+\n\x05users\x18\x01 \x03(\x0b\x32\x1c.pomerium.dashboard.UserInfo\x12\x13\n\x0btotal_count\x18\x02 \x01(\x03\"\xc0\x01\n\x16PomeriumServiceAccount\x12\n\n\x02id\x18\x01 \x01(\t\x12\x19\n\x0cnamespace_id\x18\x08 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x07user_id\x18\x02 \x01(\t\x12.\n\nexpires_at\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12-\n\tissued_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x0f\n\r_namespace_id\"g\n AddPomeriumServiceAccountRequest\x12\x43\n\x0fservice_account\x18\x01 \x01(\x0b\x32*.pomerium.dashboard.PomeriumServiceAccount\"u\n!AddPomeriumServiceAccountResponse\x12\x43\n\x0fservice_account\x18\x01 \x01(\x0b\x32*.pomerium.dashboard.PomeriumServiceAccount\x12\x0b\n\x03JWT\x18\x02 \x01(\t\"1\n#DeletePomeriumServiceAccountRequest\x12\n\n\x02id\x18\x01 \x01(\t\"&\n$DeletePomeriumServiceAccountResponse\".\n GetPomeriumServiceAccountRequest\x12\n\n\x02id\x18\x01 \x01(\t\"h\n!GetPomeriumServiceAccountResponse\x12\x43\n\x0fservice_account\x18\x01 \x01(\x0b\x32*.pomerium.dashboard.PomeriumServiceAccount\"7\n\"ListPomeriumServiceAccountsRequest\x12\x11\n\tnamespace\x18\x01 \x01(\t\"k\n#ListPomeriumServiceAccountsResponse\x12\x44\n\x10service_accounts\x18\x01 \x03(\x0b\x32*.pomerium.dashboard.PomeriumServiceAccount\"\x80\x04\n\x0fPomeriumSession\x12\n\n\x02id\x18\x01 \x01(\t\x12\x36\n\x04user\x18\x02 \x01(\x0b\x32(.pomerium.dashboard.PomeriumSession.User\x12\x39\n\x06groups\x18\x03 \x03(\x0b\x32).pomerium.dashboard.PomeriumSession.Group\x12\x0e\n\x06issuer\x18\x04 \x01(\t\x12-\n\tissued_at\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12.\n\nexpires_at\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x10\n\x08\x61udience\x18\x07 \x03(\t\x12?\n\x06\x63laims\x18\x08 \x03(\x0b\x32/.pomerium.dashboard.PomeriumSession.ClaimsEntry\x1a\x30\n\x05Group\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\r\n\x05\x65mail\x18\x03 \x01(\t\x1a/\n\x04User\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\r\n\x05\x65mail\x18\x03 \x01(\t\x1aI\n\x0b\x43laimsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12)\n\x05value\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.ListValue:\x02\x38\x01\"*\n\x1c\x44\x65letePomeriumSessionRequest\x12\n\n\x02id\x18\x01 \x01(\t\"\x1f\n\x1d\x44\x65letePomeriumSessionResponse\"\'\n\x19GetPomeriumSessionRequest\x12\n\n\x02id\x18\x01 \x01(\t\"R\n\x1aGetPomeriumSessionResponse\x12\x34\n\x07session\x18\x01 \x01(\x0b\x32#.pomerium.dashboard.PomeriumSession\"\xbf\x01\n\x1bListPomeriumSessionsRequest\x12\x12\n\x05query\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06offset\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x12\n\x05limit\x18\x03 \x01(\x03H\x02\x88\x01\x01\x12\x15\n\x08order_by\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x14\n\x07user_id\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x08\n\x06_queryB\t\n\x07_offsetB\x08\n\x06_limitB\x0b\n\t_order_byB\n\n\x08_user_id\"j\n\x1cListPomeriumSessionsResponse\x12\x35\n\x08sessions\x18\x01 \x03(\x0b\x32#.pomerium.dashboard.PomeriumSession\x12\x13\n\x0btotal_count\x18\x02 \x01(\x03\"(\n\x12ImpersonateRequest\x12\x12\n\nsession_id\x18\x01 \x01(\t\"\x15\n\x13ImpersonateResponse2\xaa\x02\n\x0bUserService\x12^\n\x0bGetUserInfo\x12&.pomerium.dashboard.GetUserInfoRequest\x1a\'.pomerium.dashboard.GetUserInfoResponse\x12^\n\x0bQueryGroups\x12&.pomerium.dashboard.QueryGroupsRequest\x1a\'.pomerium.dashboard.QueryGroupsResponse\x12[\n\nQueryUsers\x12%.pomerium.dashboard.QueryUsersRequest\x1a&.pomerium.dashboard.QueryUsersResponse2\xda\x04\n\x1dPomeriumServiceAccountService\x12\x88\x01\n\x19\x41\x64\x64PomeriumServiceAccount\x12\x34.pomerium.dashboard.AddPomeriumServiceAccountRequest\x1a\x35.pomerium.dashboard.AddPomeriumServiceAccountResponse\x12\x91\x01\n\x1c\x44\x65letePomeriumServiceAccount\x12\x37.pomerium.dashboard.DeletePomeriumServiceAccountRequest\x1a\x38.pomerium.dashboard.DeletePomeriumServiceAccountResponse\x12\x88\x01\n\x19GetPomeriumServiceAccount\x12\x34.pomerium.dashboard.GetPomeriumServiceAccountRequest\x1a\x35.pomerium.dashboard.GetPomeriumServiceAccountResponse\x12\x8e\x01\n\x1bListPomeriumServiceAccounts\x12\x36.pomerium.dashboard.ListPomeriumServiceAccountsRequest\x1a\x37.pomerium.dashboard.ListPomeriumServiceAccountsResponse2\xe6\x03\n\x16PomeriumSessionService\x12|\n\x15\x44\x65letePomeriumSession\x12\x30.pomerium.dashboard.DeletePomeriumSessionRequest\x1a\x31.pomerium.dashboard.DeletePomeriumSessionResponse\x12s\n\x12GetPomeriumSession\x12-.pomerium.dashboard.GetPomeriumSessionRequest\x1a..pomerium.dashboard.GetPomeriumSessionResponse\x12^\n\x0bImpersonate\x12&.pomerium.dashboard.ImpersonateRequest\x1a\'.pomerium.dashboard.ImpersonateResponse\x12y\n\x14ListPomeriumSessions\x12/.pomerium.dashboard.ListPomeriumSessionsRequest\x1a\x30.pomerium.dashboard.ListPomeriumSessionsResponseB-Z+github.com/pomerium/pomerium-console/pkg/pbb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_RECOVERYTOKEN = _descriptor.Descriptor(
name='RecoveryToken',
full_name='pomerium.dashboard.RecoveryToken',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='pomerium.dashboard.RecoveryToken.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='namespace', full_name='pomerium.dashboard.RecoveryToken.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='created_at', full_name='pomerium.dashboard.RecoveryToken.created_at', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='modified_at', full_name='pomerium.dashboard.RecoveryToken.modified_at', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='expires_at', full_name='pomerium.dashboard.RecoveryToken.expires_at', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='public_key', full_name='pomerium.dashboard.RecoveryToken.public_key', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=99,
serialized_end=310,
)
_GROUPINFO = _descriptor.Descriptor(
name='GroupInfo',
full_name='pomerium.dashboard.GroupInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='pomerium.dashboard.GroupInfo.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='pomerium.dashboard.GroupInfo.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=312,
serialized_end=349,
)
_USERINFO_NAMESPACEROLESENTRY = _descriptor.Descriptor(
name='NamespaceRolesEntry',
full_name='pomerium.dashboard.UserInfo.NamespaceRolesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pomerium.dashboard.UserInfo.NamespaceRolesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='pomerium.dashboard.UserInfo.NamespaceRolesEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=542,
serialized_end=595,
)
_USERINFO = _descriptor.Descriptor(
name='UserInfo',
full_name='pomerium.dashboard.UserInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='pomerium.dashboard.UserInfo.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='pomerium.dashboard.UserInfo.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='email', full_name='pomerium.dashboard.UserInfo.email', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='groups', full_name='pomerium.dashboard.UserInfo.groups', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='namespace_roles', full_name='pomerium.dashboard.UserInfo.namespace_roles', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='picture_url', full_name='pomerium.dashboard.UserInfo.picture_url', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='is_impersonated', full_name='pomerium.dashboard.UserInfo.is_impersonated', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_USERINFO_NAMESPACEROLESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=352,
serialized_end=595,
)
_GETUSERINFOREQUEST = _descriptor.Descriptor(
name='GetUserInfoRequest',
full_name='pomerium.dashboard.GetUserInfoRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='user_id', full_name='pomerium.dashboard.GetUserInfoRequest.user_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='_user_id', full_name='pomerium.dashboard.GetUserInfoRequest._user_id',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=597,
serialized_end=651,
)
_GETUSERINFORESPONSE = _descriptor.Descriptor(
name='GetUserInfoResponse',
full_name='pomerium.dashboard.GetUserInfoResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='user_info', full_name='pomerium.dashboard.GetUserInfoResponse.user_info', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=653,
serialized_end=723,
)
_QUERYGROUPSREQUEST = _descriptor.Descriptor(
name='QueryGroupsRequest',
full_name='pomerium.dashboard.QueryGroupsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='query', full_name='pomerium.dashboard.QueryGroupsRequest.query', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='offset', full_name='pomerium.dashboard.QueryGroupsRequest.offset', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='limit', full_name='pomerium.dashboard.QueryGroupsRequest.limit', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=725,
serialized_end=791,
)
_QUERYGROUPSRESPONSE = _descriptor.Descriptor(
name='QueryGroupsResponse',
full_name='pomerium.dashboard.QueryGroupsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='groups', full_name='pomerium.dashboard.QueryGroupsResponse.groups', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='total_count', full_name='pomerium.dashboard.QueryGroupsResponse.total_count', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=793,
serialized_end=882,
)
_QUERYUSERSREQUEST = _descriptor.Descriptor(
name='QueryUsersRequest',
full_name='pomerium.dashboard.QueryUsersRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='query', full_name='pomerium.dashboard.QueryUsersRequest.query', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='offset', full_name='pomerium.dashboard.QueryUsersRequest.offset', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='limit', full_name='pomerium.dashboard.QueryUsersRequest.limit', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=884,
serialized_end=949,
)
_QUERYUSERSRESPONSE = _descriptor.Descriptor(
name='QueryUsersResponse',
full_name='pomerium.dashboard.QueryUsersResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='users', full_name='pomerium.dashboard.QueryUsersResponse.users', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='total_count', full_name='pomerium.dashboard.QueryUsersResponse.total_count', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=951,
serialized_end=1037,
)
_POMERIUMSERVICEACCOUNT = _descriptor.Descriptor(
name='PomeriumServiceAccount',
full_name='pomerium.dashboard.PomeriumServiceAccount',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='pomerium.dashboard.PomeriumServiceAccount.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='namespace_id', full_name='pomerium.dashboard.PomeriumServiceAccount.namespace_id', index=1,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='user_id', full_name='pomerium.dashboard.PomeriumServiceAccount.user_id', index=2,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='expires_at', full_name='pomerium.dashboard.PomeriumServiceAccount.expires_at', index=3,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='issued_at', full_name='pomerium.dashboard.PomeriumServiceAccount.issued_at', index=4,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='_namespace_id', full_name='pomerium.dashboard.PomeriumServiceAccount._namespace_id',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=1040,
serialized_end=1232,
)
_ADDPOMERIUMSERVICEACCOUNTREQUEST = _descriptor.Descriptor(
name='AddPomeriumServiceAccountRequest',
full_name='pomerium.dashboard.AddPomeriumServiceAccountRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='service_account', full_name='pomerium.dashboard.AddPomeriumServiceAccountRequest.service_account', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1234,
serialized_end=1337,
)
_ADDPOMERIUMSERVICEACCOUNTRESPONSE = _descriptor.Descriptor(
name='AddPomeriumServiceAccountResponse',
full_name='pomerium.dashboard.AddPomeriumServiceAccountResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='service_account', full_name='pomerium.dashboard.AddPomeriumServiceAccountResponse.service_account', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='JWT', full_name='pomerium.dashboard.AddPomeriumServiceAccountResponse.JWT', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1339,
serialized_end=1456,
)
_DELETEPOMERIUMSERVICEACCOUNTREQUEST = _descriptor.Descriptor(
name='DeletePomeriumServiceAccountRequest',
full_name='pomerium.dashboard.DeletePomeriumServiceAccountRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='pomerium.dashboard.DeletePomeriumServiceAccountRequest.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1458,
serialized_end=1507,
)
_DELETEPOMERIUMSERVICEACCOUNTRESPONSE = _descriptor.Descriptor(
name='DeletePomeriumServiceAccountResponse',
full_name='pomerium.dashboard.DeletePomeriumServiceAccountResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1509,
serialized_end=1547,
)
_GETPOMERIUMSERVICEACCOUNTREQUEST = _descriptor.Descriptor(
name='GetPomeriumServiceAccountRequest',
full_name='pomerium.dashboard.GetPomeriumServiceAccountRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='pomerium.dashboard.GetPomeriumServiceAccountRequest.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1549,
serialized_end=1595,
)
_GETPOMERIUMSERVICEACCOUNTRESPONSE = _descriptor.Descriptor(
name='GetPomeriumServiceAccountResponse',
full_name='pomerium.dashboard.GetPomeriumServiceAccountResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='service_account', full_name='pomerium.dashboard.GetPomeriumServiceAccountResponse.service_account', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1597,
serialized_end=1701,
)
_LISTPOMERIUMSERVICEACCOUNTSREQUEST = _descriptor.Descriptor(
name='ListPomeriumServiceAccountsRequest',
full_name='pomerium.dashboard.ListPomeriumServiceAccountsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='namespace', full_name='pomerium.dashboard.ListPomeriumServiceAccountsRequest.namespace', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1703,
serialized_end=1758,
)
_LISTPOMERIUMSERVICEACCOUNTSRESPONSE = _descriptor.Descriptor(
name='ListPomeriumServiceAccountsResponse',
full_name='pomerium.dashboard.ListPomeriumServiceAccountsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='service_accounts', full_name='pomerium.dashboard.ListPomeriumServiceAccountsResponse.service_accounts', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1760,
serialized_end=1867,
)
_POMERIUMSESSION_GROUP = _descriptor.Descriptor(
name='Group',
full_name='pomerium.dashboard.PomeriumSession.Group',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='pomerium.dashboard.PomeriumSession.Group.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='pomerium.dashboard.PomeriumSession.Group.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='email', full_name='pomerium.dashboard.PomeriumSession.Group.email', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2210,
serialized_end=2258,
)
_POMERIUMSESSION_USER = _descriptor.Descriptor(
name='User',
full_name='pomerium.dashboard.PomeriumSession.User',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='pomerium.dashboard.PomeriumSession.User.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='pomerium.dashboard.PomeriumSession.User.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='email', full_name='pomerium.dashboard.PomeriumSession.User.email', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2260,
serialized_end=2307,
)
_POMERIUMSESSION_CLAIMSENTRY = _descriptor.Descriptor(
name='ClaimsEntry',
full_name='pomerium.dashboard.PomeriumSession.ClaimsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pomerium.dashboard.PomeriumSession.ClaimsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='pomerium.dashboard.PomeriumSession.ClaimsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2309,
serialized_end=2382,
)
_POMERIUMSESSION = _descriptor.Descriptor(
name='PomeriumSession',
full_name='pomerium.dashboard.PomeriumSession',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='pomerium.dashboard.PomeriumSession.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='user', full_name='pomerium.dashboard.PomeriumSession.user', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='groups', full_name='pomerium.dashboard.PomeriumSession.groups', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='issuer', full_name='pomerium.dashboard.PomeriumSession.issuer', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='issued_at', full_name='pomerium.dashboard.PomeriumSession.issued_at', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='expires_at', full_name='pomerium.dashboard.PomeriumSession.expires_at', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='audience', full_name='pomerium.dashboard.PomeriumSession.audience', index=6,
number=7, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='claims', full_name='pomerium.dashboard.PomeriumSession.claims', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_POMERIUMSESSION_GROUP, _POMERIUMSESSION_USER, _POMERIUMSESSION_CLAIMSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1870,
serialized_end=2382,
)
_DELETEPOMERIUMSESSIONREQUEST = _descriptor.Descriptor(
name='DeletePomeriumSessionRequest',
full_name='pomerium.dashboard.DeletePomeriumSessionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='pomerium.dashboard.DeletePomeriumSessionRequest.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2384,
serialized_end=2426,
)
_DELETEPOMERIUMSESSIONRESPONSE = _descriptor.Descriptor(
name='DeletePomeriumSessionResponse',
full_name='pomerium.dashboard.DeletePomeriumSessionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2428,
serialized_end=2459,
)
_GETPOMERIUMSESSIONREQUEST = _descriptor.Descriptor(
name='GetPomeriumSessionRequest',
full_name='pomerium.dashboard.GetPomeriumSessionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='pomerium.dashboard.GetPomeriumSessionRequest.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2461,
serialized_end=2500,
)
_GETPOMERIUMSESSIONRESPONSE = _descriptor.Descriptor(
name='GetPomeriumSessionResponse',
full_name='pomerium.dashboard.GetPomeriumSessionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='session', full_name='pomerium.dashboard.GetPomeriumSessionResponse.session', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2502,
serialized_end=2584,
)
_LISTPOMERIUMSESSIONSREQUEST = _descriptor.Descriptor(
name='ListPomeriumSessionsRequest',
full_name='pomerium.dashboard.ListPomeriumSessionsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='query', full_name='pomerium.dashboard.ListPomeriumSessionsRequest.query', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='offset', full_name='pomerium.dashboard.ListPomeriumSessionsRequest.offset', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='limit', full_name='pomerium.dashboard.ListPomeriumSessionsRequest.limit', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='order_by', full_name='pomerium.dashboard.ListPomeriumSessionsRequest.order_by', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='user_id', full_name='pomerium.dashboard.ListPomeriumSessionsRequest.user_id', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='_query', full_name='pomerium.dashboard.ListPomeriumSessionsRequest._query',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_offset', full_name='pomerium.dashboard.ListPomeriumSessionsRequest._offset',
index=1, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_limit', full_name='pomerium.dashboard.ListPomeriumSessionsRequest._limit',
index=2, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_order_by', full_name='pomerium.dashboard.ListPomeriumSessionsRequest._order_by',
index=3, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_user_id', full_name='pomerium.dashboard.ListPomeriumSessionsRequest._user_id',
index=4, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=2587,
serialized_end=2778,
)
_LISTPOMERIUMSESSIONSRESPONSE = _descriptor.Descriptor(
name='ListPomeriumSessionsResponse',
full_name='pomerium.dashboard.ListPomeriumSessionsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='sessions', full_name='pomerium.dashboard.ListPomeriumSessionsResponse.sessions', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='total_count', full_name='pomerium.dashboard.ListPomeriumSessionsResponse.total_count', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2780,
serialized_end=2886,
)
_IMPERSONATEREQUEST = _descriptor.Descriptor(
name='ImpersonateRequest',
full_name='pomerium.dashboard.ImpersonateRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='session_id', full_name='pomerium.dashboard.ImpersonateRequest.session_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2888,
serialized_end=2928,
)
_IMPERSONATERESPONSE = _descriptor.Descriptor(
name='ImpersonateResponse',
full_name='pomerium.dashboard.ImpersonateResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2930,
serialized_end=2951,
)
_RECOVERYTOKEN.fields_by_name['created_at'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_RECOVERYTOKEN.fields_by_name['modified_at'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_RECOVERYTOKEN.fields_by_name['expires_at'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_USERINFO_NAMESPACEROLESENTRY.containing_type = _USERINFO
_USERINFO.fields_by_name['namespace_roles'].message_type = _USERINFO_NAMESPACEROLESENTRY
_GETUSERINFOREQUEST.oneofs_by_name['_user_id'].fields.append(
_GETUSERINFOREQUEST.fields_by_name['user_id'])
_GETUSERINFOREQUEST.fields_by_name['user_id'].containing_oneof = _GETUSERINFOREQUEST.oneofs_by_name['_user_id']
_GETUSERINFORESPONSE.fields_by_name['user_info'].message_type = _USERINFO
_QUERYGROUPSRESPONSE.fields_by_name['groups'].message_type = _GROUPINFO
_QUERYUSERSRESPONSE.fields_by_name['users'].message_type = _USERINFO
_POMERIUMSERVICEACCOUNT.fields_by_name['expires_at'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_POMERIUMSERVICEACCOUNT.fields_by_name['issued_at'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_POMERIUMSERVICEACCOUNT.oneofs_by_name['_namespace_id'].fields.append(
_POMERIUMSERVICEACCOUNT.fields_by_name['namespace_id'])
_POMERIUMSERVICEACCOUNT.fields_by_name['namespace_id'].containing_oneof = _POMERIUMSERVICEACCOUNT.oneofs_by_name['_namespace_id']
_ADDPOMERIUMSERVICEACCOUNTREQUEST.fields_by_name['service_account'].message_type = _POMERIUMSERVICEACCOUNT
_ADDPOMERIUMSERVICEACCOUNTRESPONSE.fields_by_name['service_account'].message_type = _POMERIUMSERVICEACCOUNT
_GETPOMERIUMSERVICEACCOUNTRESPONSE.fields_by_name['service_account'].message_type = _POMERIUMSERVICEACCOUNT
_LISTPOMERIUMSERVICEACCOUNTSRESPONSE.fields_by_name['service_accounts'].message_type = _POMERIUMSERVICEACCOUNT
_POMERIUMSESSION_GROUP.containing_type = _POMERIUMSESSION
_POMERIUMSESSION_USER.containing_type = _POMERIUMSESSION
_POMERIUMSESSION_CLAIMSENTRY.fields_by_name['value'].message_type = google_dot_protobuf_dot_struct__pb2._LISTVALUE
_POMERIUMSESSION_CLAIMSENTRY.containing_type = _POMERIUMSESSION
_POMERIUMSESSION.fields_by_name['user'].message_type = _POMERIUMSESSION_USER
_POMERIUMSESSION.fields_by_name['groups'].message_type = _POMERIUMSESSION_GROUP
_POMERIUMSESSION.fields_by_name['issued_at'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_POMERIUMSESSION.fields_by_name['expires_at'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_POMERIUMSESSION.fields_by_name['claims'].message_type = _POMERIUMSESSION_CLAIMSENTRY
_GETPOMERIUMSESSIONRESPONSE.fields_by_name['session'].message_type = _POMERIUMSESSION
_LISTPOMERIUMSESSIONSREQUEST.oneofs_by_name['_query'].fields.append(
_LISTPOMERIUMSESSIONSREQUEST.fields_by_name['query'])
_LISTPOMERIUMSESSIONSREQUEST.fields_by_name['query'].containing_oneof = _LISTPOMERIUMSESSIONSREQUEST.oneofs_by_name['_query']
_LISTPOMERIUMSESSIONSREQUEST.oneofs_by_name['_offset'].fields.append(
_LISTPOMERIUMSESSIONSREQUEST.fields_by_name['offset'])
_LISTPOMERIUMSESSIONSREQUEST.fields_by_name['offset'].containing_oneof = _LISTPOMERIUMSESSIONSREQUEST.oneofs_by_name['_offset']
_LISTPOMERIUMSESSIONSREQUEST.oneofs_by_name['_limit'].fields.append(
_LISTPOMERIUMSESSIONSREQUEST.fields_by_name['limit'])
_LISTPOMERIUMSESSIONSREQUEST.fields_by_name['limit'].containing_oneof = _LISTPOMERIUMSESSIONSREQUEST.oneofs_by_name['_limit']
_LISTPOMERIUMSESSIONSREQUEST.oneofs_by_name['_order_by'].fields.append(
_LISTPOMERIUMSESSIONSREQUEST.fields_by_name['order_by'])
_LISTPOMERIUMSESSIONSREQUEST.fields_by_name['order_by'].containing_oneof = _LISTPOMERIUMSESSIONSREQUEST.oneofs_by_name['_order_by']
_LISTPOMERIUMSESSIONSREQUEST.oneofs_by_name['_user_id'].fields.append(
_LISTPOMERIUMSESSIONSREQUEST.fields_by_name['user_id'])
_LISTPOMERIUMSESSIONSREQUEST.fields_by_name['user_id'].containing_oneof = _LISTPOMERIUMSESSIONSREQUEST.oneofs_by_name['_user_id']
_LISTPOMERIUMSESSIONSRESPONSE.fields_by_name['sessions'].message_type = _POMERIUMSESSION
DESCRIPTOR.message_types_by_name['RecoveryToken'] = _RECOVERYTOKEN
DESCRIPTOR.message_types_by_name['GroupInfo'] = _GROUPINFO
DESCRIPTOR.message_types_by_name['UserInfo'] = _USERINFO
DESCRIPTOR.message_types_by_name['GetUserInfoRequest'] = _GETUSERINFOREQUEST
DESCRIPTOR.message_types_by_name['GetUserInfoResponse'] = _GETUSERINFORESPONSE
DESCRIPTOR.message_types_by_name['QueryGroupsRequest'] = _QUERYGROUPSREQUEST
DESCRIPTOR.message_types_by_name['QueryGroupsResponse'] = _QUERYGROUPSRESPONSE
DESCRIPTOR.message_types_by_name['QueryUsersRequest'] = _QUERYUSERSREQUEST
DESCRIPTOR.message_types_by_name['QueryUsersResponse'] = _QUERYUSERSRESPONSE
DESCRIPTOR.message_types_by_name['PomeriumServiceAccount'] = _POMERIUMSERVICEACCOUNT
DESCRIPTOR.message_types_by_name['AddPomeriumServiceAccountRequest'] = _ADDPOMERIUMSERVICEACCOUNTREQUEST
DESCRIPTOR.message_types_by_name['AddPomeriumServiceAccountResponse'] = _ADDPOMERIUMSERVICEACCOUNTRESPONSE
DESCRIPTOR.message_types_by_name['DeletePomeriumServiceAccountRequest'] = _DELETEPOMERIUMSERVICEACCOUNTREQUEST
DESCRIPTOR.message_types_by_name['DeletePomeriumServiceAccountResponse'] = _DELETEPOMERIUMSERVICEACCOUNTRESPONSE
DESCRIPTOR.message_types_by_name['GetPomeriumServiceAccountRequest'] = _GETPOMERIUMSERVICEACCOUNTREQUEST
DESCRIPTOR.message_types_by_name['GetPomeriumServiceAccountResponse'] = _GETPOMERIUMSERVICEACCOUNTRESPONSE
DESCRIPTOR.message_types_by_name['ListPomeriumServiceAccountsRequest'] = _LISTPOMERIUMSERVICEACCOUNTSREQUEST
DESCRIPTOR.message_types_by_name['ListPomeriumServiceAccountsResponse'] = _LISTPOMERIUMSERVICEACCOUNTSRESPONSE
DESCRIPTOR.message_types_by_name['PomeriumSession'] = _POMERIUMSESSION
DESCRIPTOR.message_types_by_name['DeletePomeriumSessionRequest'] = _DELETEPOMERIUMSESSIONREQUEST
DESCRIPTOR.message_types_by_name['DeletePomeriumSessionResponse'] = _DELETEPOMERIUMSESSIONRESPONSE
DESCRIPTOR.message_types_by_name['GetPomeriumSessionRequest'] = _GETPOMERIUMSESSIONREQUEST
DESCRIPTOR.message_types_by_name['GetPomeriumSessionResponse'] = _GETPOMERIUMSESSIONRESPONSE
DESCRIPTOR.message_types_by_name['ListPomeriumSessionsRequest'] = _LISTPOMERIUMSESSIONSREQUEST
DESCRIPTOR.message_types_by_name['ListPomeriumSessionsResponse'] = _LISTPOMERIUMSESSIONSRESPONSE
DESCRIPTOR.message_types_by_name['ImpersonateRequest'] = _IMPERSONATEREQUEST
DESCRIPTOR.message_types_by_name['ImpersonateResponse'] = _IMPERSONATERESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RecoveryToken = _reflection.GeneratedProtocolMessageType('RecoveryToken', (_message.Message,), {
'DESCRIPTOR' : _RECOVERYTOKEN,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.RecoveryToken)
})
_sym_db.RegisterMessage(RecoveryToken)
GroupInfo = _reflection.GeneratedProtocolMessageType('GroupInfo', (_message.Message,), {
'DESCRIPTOR' : _GROUPINFO,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.GroupInfo)
})
_sym_db.RegisterMessage(GroupInfo)
UserInfo = _reflection.GeneratedProtocolMessageType('UserInfo', (_message.Message,), {
'NamespaceRolesEntry' : _reflection.GeneratedProtocolMessageType('NamespaceRolesEntry', (_message.Message,), {
'DESCRIPTOR' : _USERINFO_NAMESPACEROLESENTRY,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.UserInfo.NamespaceRolesEntry)
})
,
'DESCRIPTOR' : _USERINFO,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.UserInfo)
})
_sym_db.RegisterMessage(UserInfo)
_sym_db.RegisterMessage(UserInfo.NamespaceRolesEntry)
GetUserInfoRequest = _reflection.GeneratedProtocolMessageType('GetUserInfoRequest', (_message.Message,), {
'DESCRIPTOR' : _GETUSERINFOREQUEST,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.GetUserInfoRequest)
})
_sym_db.RegisterMessage(GetUserInfoRequest)
GetUserInfoResponse = _reflection.GeneratedProtocolMessageType('GetUserInfoResponse', (_message.Message,), {
'DESCRIPTOR' : _GETUSERINFORESPONSE,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.GetUserInfoResponse)
})
_sym_db.RegisterMessage(GetUserInfoResponse)
QueryGroupsRequest = _reflection.GeneratedProtocolMessageType('QueryGroupsRequest', (_message.Message,), {
'DESCRIPTOR' : _QUERYGROUPSREQUEST,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.QueryGroupsRequest)
})
_sym_db.RegisterMessage(QueryGroupsRequest)
QueryGroupsResponse = _reflection.GeneratedProtocolMessageType('QueryGroupsResponse', (_message.Message,), {
'DESCRIPTOR' : _QUERYGROUPSRESPONSE,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.QueryGroupsResponse)
})
_sym_db.RegisterMessage(QueryGroupsResponse)
QueryUsersRequest = _reflection.GeneratedProtocolMessageType('QueryUsersRequest', (_message.Message,), {
'DESCRIPTOR' : _QUERYUSERSREQUEST,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.QueryUsersRequest)
})
_sym_db.RegisterMessage(QueryUsersRequest)
QueryUsersResponse = _reflection.GeneratedProtocolMessageType('QueryUsersResponse', (_message.Message,), {
'DESCRIPTOR' : _QUERYUSERSRESPONSE,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.QueryUsersResponse)
})
_sym_db.RegisterMessage(QueryUsersResponse)
PomeriumServiceAccount = _reflection.GeneratedProtocolMessageType('PomeriumServiceAccount', (_message.Message,), {
'DESCRIPTOR' : _POMERIUMSERVICEACCOUNT,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.PomeriumServiceAccount)
})
_sym_db.RegisterMessage(PomeriumServiceAccount)
AddPomeriumServiceAccountRequest = _reflection.GeneratedProtocolMessageType('AddPomeriumServiceAccountRequest', (_message.Message,), {
'DESCRIPTOR' : _ADDPOMERIUMSERVICEACCOUNTREQUEST,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.AddPomeriumServiceAccountRequest)
})
_sym_db.RegisterMessage(AddPomeriumServiceAccountRequest)
AddPomeriumServiceAccountResponse = _reflection.GeneratedProtocolMessageType('AddPomeriumServiceAccountResponse', (_message.Message,), {
'DESCRIPTOR' : _ADDPOMERIUMSERVICEACCOUNTRESPONSE,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.AddPomeriumServiceAccountResponse)
})
_sym_db.RegisterMessage(AddPomeriumServiceAccountResponse)
DeletePomeriumServiceAccountRequest = _reflection.GeneratedProtocolMessageType('DeletePomeriumServiceAccountRequest', (_message.Message,), {
'DESCRIPTOR' : _DELETEPOMERIUMSERVICEACCOUNTREQUEST,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.DeletePomeriumServiceAccountRequest)
})
_sym_db.RegisterMessage(DeletePomeriumServiceAccountRequest)
DeletePomeriumServiceAccountResponse = _reflection.GeneratedProtocolMessageType('DeletePomeriumServiceAccountResponse', (_message.Message,), {
'DESCRIPTOR' : _DELETEPOMERIUMSERVICEACCOUNTRESPONSE,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.DeletePomeriumServiceAccountResponse)
})
_sym_db.RegisterMessage(DeletePomeriumServiceAccountResponse)
GetPomeriumServiceAccountRequest = _reflection.GeneratedProtocolMessageType('GetPomeriumServiceAccountRequest', (_message.Message,), {
'DESCRIPTOR' : _GETPOMERIUMSERVICEACCOUNTREQUEST,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.GetPomeriumServiceAccountRequest)
})
_sym_db.RegisterMessage(GetPomeriumServiceAccountRequest)
GetPomeriumServiceAccountResponse = _reflection.GeneratedProtocolMessageType('GetPomeriumServiceAccountResponse', (_message.Message,), {
'DESCRIPTOR' : _GETPOMERIUMSERVICEACCOUNTRESPONSE,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.GetPomeriumServiceAccountResponse)
})
_sym_db.RegisterMessage(GetPomeriumServiceAccountResponse)
ListPomeriumServiceAccountsRequest = _reflection.GeneratedProtocolMessageType('ListPomeriumServiceAccountsRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTPOMERIUMSERVICEACCOUNTSREQUEST,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.ListPomeriumServiceAccountsRequest)
})
_sym_db.RegisterMessage(ListPomeriumServiceAccountsRequest)
ListPomeriumServiceAccountsResponse = _reflection.GeneratedProtocolMessageType('ListPomeriumServiceAccountsResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTPOMERIUMSERVICEACCOUNTSRESPONSE,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.ListPomeriumServiceAccountsResponse)
})
_sym_db.RegisterMessage(ListPomeriumServiceAccountsResponse)
PomeriumSession = _reflection.GeneratedProtocolMessageType('PomeriumSession', (_message.Message,), {
'Group' : _reflection.GeneratedProtocolMessageType('Group', (_message.Message,), {
'DESCRIPTOR' : _POMERIUMSESSION_GROUP,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.PomeriumSession.Group)
})
,
'User' : _reflection.GeneratedProtocolMessageType('User', (_message.Message,), {
'DESCRIPTOR' : _POMERIUMSESSION_USER,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.PomeriumSession.User)
})
,
'ClaimsEntry' : _reflection.GeneratedProtocolMessageType('ClaimsEntry', (_message.Message,), {
'DESCRIPTOR' : _POMERIUMSESSION_CLAIMSENTRY,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.PomeriumSession.ClaimsEntry)
})
,
'DESCRIPTOR' : _POMERIUMSESSION,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.PomeriumSession)
})
_sym_db.RegisterMessage(PomeriumSession)
_sym_db.RegisterMessage(PomeriumSession.Group)
_sym_db.RegisterMessage(PomeriumSession.User)
_sym_db.RegisterMessage(PomeriumSession.ClaimsEntry)
DeletePomeriumSessionRequest = _reflection.GeneratedProtocolMessageType('DeletePomeriumSessionRequest', (_message.Message,), {
'DESCRIPTOR' : _DELETEPOMERIUMSESSIONREQUEST,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.DeletePomeriumSessionRequest)
})
_sym_db.RegisterMessage(DeletePomeriumSessionRequest)
DeletePomeriumSessionResponse = _reflection.GeneratedProtocolMessageType('DeletePomeriumSessionResponse', (_message.Message,), {
'DESCRIPTOR' : _DELETEPOMERIUMSESSIONRESPONSE,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.DeletePomeriumSessionResponse)
})
_sym_db.RegisterMessage(DeletePomeriumSessionResponse)
GetPomeriumSessionRequest = _reflection.GeneratedProtocolMessageType('GetPomeriumSessionRequest', (_message.Message,), {
'DESCRIPTOR' : _GETPOMERIUMSESSIONREQUEST,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.GetPomeriumSessionRequest)
})
_sym_db.RegisterMessage(GetPomeriumSessionRequest)
GetPomeriumSessionResponse = _reflection.GeneratedProtocolMessageType('GetPomeriumSessionResponse', (_message.Message,), {
'DESCRIPTOR' : _GETPOMERIUMSESSIONRESPONSE,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.GetPomeriumSessionResponse)
})
_sym_db.RegisterMessage(GetPomeriumSessionResponse)
ListPomeriumSessionsRequest = _reflection.GeneratedProtocolMessageType('ListPomeriumSessionsRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTPOMERIUMSESSIONSREQUEST,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.ListPomeriumSessionsRequest)
})
_sym_db.RegisterMessage(ListPomeriumSessionsRequest)
ListPomeriumSessionsResponse = _reflection.GeneratedProtocolMessageType('ListPomeriumSessionsResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTPOMERIUMSESSIONSRESPONSE,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.ListPomeriumSessionsResponse)
})
_sym_db.RegisterMessage(ListPomeriumSessionsResponse)
ImpersonateRequest = _reflection.GeneratedProtocolMessageType('ImpersonateRequest', (_message.Message,), {
'DESCRIPTOR' : _IMPERSONATEREQUEST,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.ImpersonateRequest)
})
_sym_db.RegisterMessage(ImpersonateRequest)
ImpersonateResponse = _reflection.GeneratedProtocolMessageType('ImpersonateResponse', (_message.Message,), {
'DESCRIPTOR' : _IMPERSONATERESPONSE,
'__module__' : 'users_pb2'
# @@protoc_insertion_point(class_scope:pomerium.dashboard.ImpersonateResponse)
})
_sym_db.RegisterMessage(ImpersonateResponse)
DESCRIPTOR._options = None
_USERINFO_NAMESPACEROLESENTRY._options = None
_POMERIUMSESSION_CLAIMSENTRY._options = None
_USERSERVICE = _descriptor.ServiceDescriptor(
name='UserService',
full_name='pomerium.dashboard.UserService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=2954,
serialized_end=3252,
methods=[
_descriptor.MethodDescriptor(
name='GetUserInfo',
full_name='pomerium.dashboard.UserService.GetUserInfo',
index=0,
containing_service=None,
input_type=_GETUSERINFOREQUEST,
output_type=_GETUSERINFORESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='QueryGroups',
full_name='pomerium.dashboard.UserService.QueryGroups',
index=1,
containing_service=None,
input_type=_QUERYGROUPSREQUEST,
output_type=_QUERYGROUPSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='QueryUsers',
full_name='pomerium.dashboard.UserService.QueryUsers',
index=2,
containing_service=None,
input_type=_QUERYUSERSREQUEST,
output_type=_QUERYUSERSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_USERSERVICE)
DESCRIPTOR.services_by_name['UserService'] = _USERSERVICE
_POMERIUMSERVICEACCOUNTSERVICE = _descriptor.ServiceDescriptor(
name='PomeriumServiceAccountService',
full_name='pomerium.dashboard.PomeriumServiceAccountService',
file=DESCRIPTOR,
index=1,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=3255,
serialized_end=3857,
methods=[
_descriptor.MethodDescriptor(
name='AddPomeriumServiceAccount',
full_name='pomerium.dashboard.PomeriumServiceAccountService.AddPomeriumServiceAccount',
index=0,
containing_service=None,
input_type=_ADDPOMERIUMSERVICEACCOUNTREQUEST,
output_type=_ADDPOMERIUMSERVICEACCOUNTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='DeletePomeriumServiceAccount',
full_name='pomerium.dashboard.PomeriumServiceAccountService.DeletePomeriumServiceAccount',
index=1,
containing_service=None,
input_type=_DELETEPOMERIUMSERVICEACCOUNTREQUEST,
output_type=_DELETEPOMERIUMSERVICEACCOUNTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetPomeriumServiceAccount',
full_name='pomerium.dashboard.PomeriumServiceAccountService.GetPomeriumServiceAccount',
index=2,
containing_service=None,
input_type=_GETPOMERIUMSERVICEACCOUNTREQUEST,
output_type=_GETPOMERIUMSERVICEACCOUNTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ListPomeriumServiceAccounts',
full_name='pomerium.dashboard.PomeriumServiceAccountService.ListPomeriumServiceAccounts',
index=3,
containing_service=None,
input_type=_LISTPOMERIUMSERVICEACCOUNTSREQUEST,
output_type=_LISTPOMERIUMSERVICEACCOUNTSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_POMERIUMSERVICEACCOUNTSERVICE)
DESCRIPTOR.services_by_name['PomeriumServiceAccountService'] = _POMERIUMSERVICEACCOUNTSERVICE
_POMERIUMSESSIONSERVICE = _descriptor.ServiceDescriptor(
name='PomeriumSessionService',
full_name='pomerium.dashboard.PomeriumSessionService',
file=DESCRIPTOR,
index=2,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=3860,
serialized_end=4346,
methods=[
_descriptor.MethodDescriptor(
name='DeletePomeriumSession',
full_name='pomerium.dashboard.PomeriumSessionService.DeletePomeriumSession',
index=0,
containing_service=None,
input_type=_DELETEPOMERIUMSESSIONREQUEST,
output_type=_DELETEPOMERIUMSESSIONRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetPomeriumSession',
full_name='pomerium.dashboard.PomeriumSessionService.GetPomeriumSession',
index=1,
containing_service=None,
input_type=_GETPOMERIUMSESSIONREQUEST,
output_type=_GETPOMERIUMSESSIONRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Impersonate',
full_name='pomerium.dashboard.PomeriumSessionService.Impersonate',
index=2,
containing_service=None,
input_type=_IMPERSONATEREQUEST,
output_type=_IMPERSONATERESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ListPomeriumSessions',
full_name='pomerium.dashboard.PomeriumSessionService.ListPomeriumSessions',
index=3,
containing_service=None,
input_type=_LISTPOMERIUMSESSIONSREQUEST,
output_type=_LISTPOMERIUMSESSIONSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_POMERIUMSESSIONSERVICE)
DESCRIPTOR.services_by_name['PomeriumSessionService'] = _POMERIUMSESSIONSERVICE
# @@protoc_insertion_point(module_scope)
| 42.975253
| 6,495
| 0.778471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 20,832
| 0.272634
|
62f736f5442b0883d19887a8204d15682986aa90
| 6,974
|
py
|
Python
|
src/python/pants/backend/codegen/thrift/apache/rules.py
|
betaboon/pants
|
05ec375c8bfcaa0396c673847bb139326883cc08
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/codegen/thrift/apache/rules.py
|
betaboon/pants
|
05ec375c8bfcaa0396c673847bb139326883cc08
|
[
"Apache-2.0"
] | 1
|
2022-02-22T18:15:03.000Z
|
2022-02-22T18:15:03.000Z
|
src/python/pants/backend/codegen/thrift/apache/rules.py
|
ryanking/pants
|
e45b00d2eb467b599966bca262405a5d74d27bdd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
from dataclasses import dataclass
from pants.backend.codegen.thrift.apache.subsystem import ApacheThriftSubsystem
from pants.backend.codegen.thrift.target_types import ThriftSourceField
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.environment import Environment, EnvironmentRequest
from pants.engine.fs import CreateDigest, Digest, Directory, MergeDigests, RemovePrefix, Snapshot
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.process import (
BinaryNotFoundError,
BinaryPathRequest,
BinaryPaths,
BinaryPathTest,
Process,
ProcessCacheScope,
ProcessResult,
)
from pants.engine.rules import collect_rules, rule
from pants.engine.target import TransitiveTargets, TransitiveTargetsRequest
from pants.source.source_root import SourceRootsRequest, SourceRootsResult
from pants.util.logging import LogLevel
from pants.util.strutil import bullet_list
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class GenerateThriftSourcesRequest:
thrift_source_field: ThriftSourceField
lang_id: str
lang_options: tuple[str, ...]
lang_name: str
@dataclass(frozen=True)
class GeneratedThriftSources:
snapshot: Snapshot
@dataclass(frozen=True)
class ApacheThriftSetup:
path: str
@rule
async def generate_apache_thrift_sources(
request: GenerateThriftSourcesRequest,
thrift: ApacheThriftSetup,
) -> GeneratedThriftSources:
output_dir = "_generated_files"
transitive_targets, empty_output_dir_digest = await MultiGet(
Get(TransitiveTargets, TransitiveTargetsRequest([request.thrift_source_field.address])),
Get(Digest, CreateDigest([Directory(output_dir)])),
)
transitive_sources, target_sources = await MultiGet(
Get(
SourceFiles,
SourceFilesRequest(
tgt[ThriftSourceField]
for tgt in transitive_targets.closure
if tgt.has_field(ThriftSourceField)
),
),
Get(SourceFiles, SourceFilesRequest([request.thrift_source_field])),
)
sources_roots = await Get(
SourceRootsResult,
SourceRootsRequest,
SourceRootsRequest.for_files(transitive_sources.snapshot.files),
)
deduped_source_root_paths = sorted({sr.path for sr in sources_roots.path_to_root.values()})
input_digest = await Get(
Digest,
MergeDigests(
[
transitive_sources.snapshot.digest,
target_sources.snapshot.digest,
empty_output_dir_digest,
]
),
)
options_str = ""
if request.lang_options:
options_str = f":{','.join(opt for opt in request.lang_options)}"
maybe_include_paths = []
for path in deduped_source_root_paths:
maybe_include_paths.extend(["-I", path])
args = [
thrift.path,
"-out",
output_dir,
*maybe_include_paths,
"--gen",
f"{request.lang_id}{options_str}",
*target_sources.snapshot.files,
]
result = await Get(
ProcessResult,
Process(
args,
input_digest=input_digest,
output_directories=(output_dir,),
description=f"Generating {request.lang_name} sources from {request.thrift_source_field.address}.",
level=LogLevel.DEBUG,
),
)
output_snapshot = await Get(Snapshot, RemovePrefix(result.output_digest, output_dir))
return GeneratedThriftSources(output_snapshot)
@rule
async def setup_thrift_tool(apache_thrift: ApacheThriftSubsystem) -> ApacheThriftSetup:
env = await Get(Environment, EnvironmentRequest(["PATH"]))
search_paths = apache_thrift.thrift_search_paths(env)
all_thrift_binary_paths = await Get(
BinaryPaths,
BinaryPathRequest(
search_path=search_paths,
binary_name="thrift",
test=BinaryPathTest(["-version"]),
),
)
if not all_thrift_binary_paths.paths:
raise BinaryNotFoundError(
"Cannot find any `thrift` binaries using the option "
f"`[apache-thrift].thrift_search_paths`: {list(search_paths)}\n\n"
"To fix, please install Apache Thrift (https://thrift.apache.org/) with the version "
f"{apache_thrift.expected_version} (set by `[apache-thrift].expected_version`) and ensure "
"that it is discoverable via `[apache-thrift].thrift_search_paths`."
)
version_results = await MultiGet(
Get(
ProcessResult,
Process(
(binary_path.path, "-version"),
description=f"Determine Apache Thrift version for {binary_path.path}",
level=LogLevel.DEBUG,
cache_scope=ProcessCacheScope.PER_RESTART_SUCCESSFUL,
),
)
for binary_path in all_thrift_binary_paths.paths
)
invalid_versions = []
for binary_path, version_result in zip(all_thrift_binary_paths.paths, version_results):
try:
_raw_version = version_result.stdout.decode("utf-8").split()[2]
_version_components = _raw_version.split(".") # e.g. [1, 17] or [1, 17, 1]
version = f"{_version_components[0]}.{_version_components[1]}"
except IndexError:
raise AssertionError(
f"Failed to parse `thrift -version` output for {binary_path}. Please open a bug at "
f"https://github.com/pantsbuild/pants/issues/new/choose with the below data:"
f"\n\n"
f"{version_result}"
)
if version == apache_thrift.expected_version:
return ApacheThriftSetup(binary_path.path)
logger.debug(
f"The Thrift binary at {binary_path.path} has version {version}, but this "
f"project is using {apache_thrift.expected_version} "
"(set by `[apache-thrift].expected_version`). Ignoring."
)
invalid_versions.append((binary_path.path, version))
invalid_versions_str = bullet_list(
f"{path}: {version}" for path, version in sorted(invalid_versions)
)
raise BinaryNotFoundError(
"Cannot find a `thrift` binary with the expected version of "
f"{apache_thrift.expected_version} (set by `[apache-thrift].expected_version`).\n\n"
f"Found these `thrift` binaries, but they had different versions:\n\n"
f"{invalid_versions_str}\n\n"
"To fix, please install the expected version (https://thrift.apache.org/) and ensure "
"that it is discoverable via the option `[apache-thrift].thrift_search_paths`, or change "
"`[apache-thrift].expected_version`."
)
def rules():
return collect_rules()
| 35.045226
| 110
| 0.671638
| 238
| 0.034127
| 0
| 0
| 5,743
| 0.823487
| 5,421
| 0.777316
| 1,727
| 0.247634
|
62faa58bb2c555bc41f725bdab4a4f8e48cef3ac
| 1,794
|
py
|
Python
|
site_asylum/apps/delirium/migrations/0001_initial.py
|
uruz/asylum.su
|
7d7a46006fb14160b3360751b6cce1a5f960f9d0
|
[
"MIT"
] | null | null | null |
site_asylum/apps/delirium/migrations/0001_initial.py
|
uruz/asylum.su
|
7d7a46006fb14160b3360751b6cce1a5f960f9d0
|
[
"MIT"
] | null | null | null |
site_asylum/apps/delirium/migrations/0001_initial.py
|
uruz/asylum.su
|
7d7a46006fb14160b3360751b6cce1a5f960f9d0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='DeliriumUser',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('username', models.CharField(max_length=255, verbose_name='Имя пользователя', default='')),
('avatar', models.CharField(max_length=255, verbose_name='Аватара', default='')),
],
options={
'verbose_name_plural': 'Пользователи Delirium',
'verbose_name': 'Пользователь Delirium',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),
('topic', models.CharField(max_length=255, verbose_name='Топик', default='')),
('posted_at', models.DateTimeField(verbose_name='Время')),
('is_registered', models.BooleanField(verbose_name='Зарегистрирован', default=False)),
('username', models.CharField(max_length=255, verbose_name='Имя пользователя (в посте)', default='')),
('text', models.TextField(verbose_name='Пост', default='')),
('user', models.ForeignKey(blank=True, to='delirium.DeliriumUser', related_name='posts', null=True)),
],
options={
'verbose_name_plural': 'Посты',
'verbose_name': 'Пост',
},
bases=(models.Model,),
),
]
| 40.772727
| 118
| 0.567447
| 1,790
| 0.942601
| 0
| 0
| 0
| 0
| 0
| 0
| 498
| 0.262243
|
62fd1da94147ad45face770be507f1eb73d0d1b2
| 404
|
py
|
Python
|
test.py
|
quantumporium/shopping_cart_project
|
eae13f76fce82715ddbad5aebb73035b0e1ba258
|
[
"MIT"
] | null | null | null |
test.py
|
quantumporium/shopping_cart_project
|
eae13f76fce82715ddbad5aebb73035b0e1ba258
|
[
"MIT"
] | null | null | null |
test.py
|
quantumporium/shopping_cart_project
|
eae13f76fce82715ddbad5aebb73035b0e1ba258
|
[
"MIT"
] | null | null | null |
# good structure for an pytest test
from app import shopping_cart
def check_if_checkout_give_the_right_value():
'''
'''
arrange_array = [15,7, 10] # arrange
shopping_cart_array = shopping_cart.checkout(arrange_array) # act
assert shopping_cart_array == (31.99, 2.8, 34.79), "this check if the function checkout in shopping_cart work well."
check_if_checkout_give_the_right_value()
| 36.727273
| 120
| 0.747525
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 125
| 0.309406
|
62fd39c0aafef0a38c14c50d32b531ce3872cae4
| 17,658
|
py
|
Python
|
tests/unit/utils/test_win_system.py
|
markgras/salt
|
d66cd3c935533c63870b83228b978ce43e0ef70d
|
[
"Apache-2.0"
] | 9,425
|
2015-01-01T05:59:24.000Z
|
2022-03-31T20:44:05.000Z
|
tests/unit/utils/test_win_system.py
|
markgras/salt
|
d66cd3c935533c63870b83228b978ce43e0ef70d
|
[
"Apache-2.0"
] | 33,507
|
2015-01-01T00:19:56.000Z
|
2022-03-31T23:48:20.000Z
|
tests/unit/utils/test_win_system.py
|
markgras/salt
|
d66cd3c935533c63870b83228b978ce43e0ef70d
|
[
"Apache-2.0"
] | 5,810
|
2015-01-01T19:11:45.000Z
|
2022-03-31T02:37:20.000Z
|
import os
import salt.utils.platform
from tests.support.mock import patch
from tests.support.unit import TestCase, skipIf
try:
import salt.utils.win_system as win_system
except Exception as exc: # pylint: disable=broad-except
win_system = exc
class WinSystemImportTestCase(TestCase):
"""
Simply importing should not raise an error, especially on Linux
"""
def test_import(self):
if isinstance(win_system, Exception):
raise Exception(
"Importing win_system caused traceback: {}".format(win_system)
)
@skipIf(not salt.utils.platform.is_windows(), "Only test on Windows systems")
class WinSystemTestCase(TestCase):
"""
Test cases for salt.utils.win_system
"""
def test_get_computer_name(self):
"""
Should return the computer name
"""
with patch("win32api.GetComputerNameEx", return_value="FAKENAME"):
self.assertEqual(win_system.get_computer_name(), "FAKENAME")
def test_get_computer_name_fail(self):
"""
If it fails, it returns False
"""
with patch("win32api.GetComputerNameEx", return_value=None):
self.assertFalse(win_system.get_computer_name())
def test_get_pending_computer_name(self):
"""
Will return the pending computer name if one is pending
"""
expected = "PendingName"
patch_value = {"vdata": expected}
with patch("salt.utils.win_reg.read_value", return_value=patch_value):
result = win_system.get_pending_computer_name()
self.assertEqual(expected, result)
def test_get_pending_computer_name_none(self):
"""
Will return the None if the pending computer is the current name
"""
patch_value = {"vdata": os.environ.get("COMPUTERNAME")}
with patch("salt.utils.win_reg.read_value", return_value=patch_value):
self.assertIsNone(win_system.get_pending_computer_name())
def test_get_pending_computer_name_false(self):
"""
Will return False if there is no pending computer name
"""
with patch("salt.utils.win_reg.read_value", return_value=False):
self.assertIsNone(win_system.get_pending_computer_name())
def test_get_pending_component_servicing(self):
"""
If none of the keys exist, should return False
"""
with patch("salt.utils.win_reg.key_exists", return_value=False):
self.assertFalse(win_system.get_pending_component_servicing())
def test_get_pending_component_servicing_true_1(self):
"""
If the RebootPending key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[True]):
self.assertTrue(win_system.get_pending_component_servicing())
def test_get_pending_component_servicing_true_2(self):
"""
If the RebootInProgress key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[False, True]):
self.assertTrue(win_system.get_pending_component_servicing())
def test_get_pending_component_servicing_true_3(self):
"""
If the PackagesPending key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[False, False, True]):
self.assertTrue(win_system.get_pending_component_servicing())
def test_get_pending_domain_join(self):
"""
If none of the keys exist, should return False
"""
with patch("salt.utils.win_reg.key_exists", return_value=False):
self.assertFalse(win_system.get_pending_domain_join())
def test_get_pending_domain_join_true_1(self):
"""
If the AvoidSpnSet key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[True]):
self.assertTrue(win_system.get_pending_domain_join())
def test_get_pending_domain_join_true_2(self):
"""
If the JoinDomain key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[False, True]):
self.assertTrue(win_system.get_pending_domain_join())
def test_get_pending_file_rename_false_1(self):
"""
If none of the value names exist, should return False
"""
patched_return = {"success": False}
with patch("salt.utils.win_reg.read_value", return_value=patched_return):
self.assertFalse(win_system.get_pending_file_rename())
def test_get_pending_file_rename_false_2(self):
"""
If one of the value names exists but is not set, should return False
"""
patched_return = {"success": True, "vdata": "(value not set)"}
with patch("salt.utils.win_reg.read_value", return_value=patched_return):
self.assertFalse(win_system.get_pending_file_rename())
def test_get_pending_file_rename_true_1(self):
"""
If one of the value names exists and is set, should return True
"""
patched_return = {"success": True, "vdata": "some value"}
with patch("salt.utils.win_reg.read_value", return_value=patched_return):
self.assertTrue(win_system.get_pending_file_rename())
def test_get_pending_servermanager_false_1(self):
"""
If the CurrentRebootAttempts value name does not exist, should return
False
"""
patched_return = {"success": False}
with patch("salt.utils.win_reg.read_value", return_value=patched_return):
self.assertFalse(win_system.get_pending_servermanager())
def test_get_pending_servermanager_false_2(self):
"""
If the CurrentRebootAttempts value name exists but is not an integer,
should return False
"""
patched_return = {"success": True, "vdata": "(value not set)"}
with patch("salt.utils.win_reg.read_value", return_value=patched_return):
self.assertFalse(win_system.get_pending_file_rename())
def test_get_pending_servermanager_true(self):
"""
If the CurrentRebootAttempts value name exists and is an integer,
should return True
"""
patched_return = {"success": True, "vdata": 1}
with patch("salt.utils.win_reg.read_value", return_value=patched_return):
self.assertTrue(win_system.get_pending_file_rename())
def test_get_pending_dvd_reboot(self):
"""
If the DVDRebootSignal value name does not exist, should return False
"""
with patch("salt.utils.win_reg.value_exists", return_value=False):
self.assertFalse(win_system.get_pending_dvd_reboot())
def test_get_pending_dvd_reboot_true(self):
"""
If the DVDRebootSignal value name exists, should return True
"""
with patch("salt.utils.win_reg.value_exists", return_value=True):
self.assertTrue(win_system.get_pending_dvd_reboot())
def test_get_pending_update(self):
"""
If none of the keys exist and there are not subkeys, should return False
"""
with patch("salt.utils.win_reg.key_exists", return_value=False), patch(
"salt.utils.win_reg.list_keys", return_value=[]
):
self.assertFalse(win_system.get_pending_update())
def test_get_pending_update_true_1(self):
"""
If the RebootRequired key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[True]):
self.assertTrue(win_system.get_pending_update())
def test_get_pending_update_true_2(self):
"""
If the PostRebootReporting key exists, should return True
"""
with patch("salt.utils.win_reg.key_exists", side_effect=[False, True]):
self.assertTrue(win_system.get_pending_update())
def test_get_reboot_required_witnessed_false_1(self):
"""
The ``Reboot Required`` value name does not exist, should return False
"""
patched_data = {"vdata": None}
with patch("salt.utils.win_reg.read_value", return_value=patched_data):
self.assertFalse(win_system.get_reboot_required_witnessed())
def test_get_reboot_required_witnessed_false_2(self):
"""
The ``Reboot required`` value name is set to 0, should return False
"""
patched_data = {"vdata": 0}
with patch("salt.utils.win_reg.read_value", return_value=patched_data):
self.assertFalse(win_system.get_reboot_required_witnessed())
def test_get_reboot_required_witnessed_true(self):
"""
The ``Reboot required`` value name is set to 1, should return True
"""
patched_data = {"vdata": 1}
with patch("salt.utils.win_reg.read_value", return_value=patched_data):
self.assertTrue(win_system.get_reboot_required_witnessed())
def test_set_reboot_required_witnessed(self):
"""
The call to ``set_value`` should return True and should be called with
the specified parameters
"""
with patch("salt.utils.win_reg.set_value", return_value=True) as sv:
self.assertTrue(win_system.set_reboot_required_witnessed())
sv.assert_called_once_with(
hive="HKLM",
key=win_system.MINION_VOLATILE_KEY,
volatile=True,
vname=win_system.REBOOT_REQUIRED_NAME,
vdata=1,
vtype="REG_DWORD",
)
def test_get_pending_update_exe_volatile_false_1(self):
"""
If UpdateExeVolatile value name is 0, should return False
"""
patched_data = {"success": True, "vdata": 0}
with patch("salt.utils.win_reg.read_value", return_value=patched_data):
self.assertFalse(win_system.get_pending_update_exe_volatile())
def test_get_pending_update_exe_volatile_false_2(self):
"""
If UpdateExeVolatile value name is not present, should return False
"""
patched_data = {"success": False}
with patch("salt.utils.win_reg.read_value", return_value=patched_data):
self.assertFalse(win_system.get_pending_update_exe_volatile())
def test_get_pending_update_exe_volatile_true_1(self):
"""
If UpdateExeVolatile value name is not 0, should return True
"""
patched_data = {"success": True, "vdata": 1}
with patch("salt.utils.win_reg.read_value", return_value=patched_data):
self.assertTrue(win_system.get_pending_update_exe_volatile())
def test_get_pending_reboot(self):
"""
If all functions return Falsy data, should return False
"""
with patch(
"salt.utils.win_system.get_pending_update", return_value=False
), patch("salt.utils.win_update.needs_reboot", return_value=False), patch(
"salt.utils.win_system.get_pending_update_exe_volatile", return_value=False
), patch(
"salt.utils.win_system.get_pending_file_rename", return_value=False
), patch(
"salt.utils.win_system.get_pending_servermanager", return_value=False
), patch(
"salt.utils.win_system.get_pending_component_servicing", return_value=False
), patch(
"salt.utils.win_system.get_pending_dvd_reboot", return_value=False
), patch(
"salt.utils.win_system.get_reboot_required_witnessed", return_value=False
), patch(
"salt.utils.win_system.get_pending_computer_name", return_value=None
), patch(
"salt.utils.win_system.get_pending_domain_join", return_value=False
):
self.assertFalse(win_system.get_pending_reboot())
def test_get_pending_reboot_true_1(self):
"""
If any boolean returning functions return True, should return True
"""
with patch(
"salt.utils.win_system.get_pending_update", return_value=False
), patch("salt.utils.win_update.needs_reboot", return_value=False), patch(
"salt.utils.win_system.get_pending_update_exe_volatile", return_value=False
), patch(
"salt.utils.win_system.get_pending_file_rename", return_value=False
), patch(
"salt.utils.win_system.get_pending_servermanager", return_value=False
), patch(
"salt.utils.win_system.get_pending_component_servicing", return_value=False
), patch(
"salt.utils.win_system.get_pending_dvd_reboot", return_value=False
), patch(
"salt.utils.win_system.get_reboot_required_witnessed", return_value=False
), patch(
"salt.utils.win_system.get_pending_computer_name", return_value=None
), patch(
"salt.utils.win_system.get_pending_domain_join", return_value=True
):
self.assertTrue(win_system.get_pending_reboot())
def test_get_pending_reboot_true_2(self):
"""
If a computer name is returned, should return True
"""
with patch(
"salt.utils.win_system.get_pending_update", return_value=False
), patch("salt.utils.win_update.needs_reboot", return_value=False), patch(
"salt.utils.win_system.get_pending_update_exe_volatile", return_value=False
), patch(
"salt.utils.win_system.get_pending_file_rename", return_value=False
), patch(
"salt.utils.win_system.get_pending_servermanager", return_value=False
), patch(
"salt.utils.win_system.get_pending_component_servicing", return_value=False
), patch(
"salt.utils.win_system.get_pending_dvd_reboot", return_value=False
), patch(
"salt.utils.win_system.get_reboot_required_witnessed", return_value=False
), patch(
"salt.utils.win_system.get_pending_computer_name",
return_value="pending name",
):
self.assertTrue(win_system.get_pending_reboot())
def test_get_pending_reboot_details(self):
"""
All items False should return a dictionary with all items False
"""
with patch(
"salt.utils.win_system.get_pending_update", return_value=False
), patch("salt.utils.win_update.needs_reboot", return_value=False), patch(
"salt.utils.win_system.get_pending_update_exe_volatile", return_value=False
), patch(
"salt.utils.win_system.get_pending_file_rename", return_value=False
), patch(
"salt.utils.win_system.get_pending_servermanager", return_value=False
), patch(
"salt.utils.win_system.get_pending_component_servicing", return_value=False
), patch(
"salt.utils.win_system.get_pending_dvd_reboot", return_value=False
), patch(
"salt.utils.win_system.get_reboot_required_witnessed", return_value=False
), patch(
"salt.utils.win_system.get_pending_computer_name", return_value=None
), patch(
"salt.utils.win_system.get_pending_domain_join", return_value=False
):
expected = {
"Pending Component Servicing": False,
"Pending Computer Rename": False,
"Pending DVD Reboot": False,
"Pending File Rename": False,
"Pending Join Domain": False,
"Pending ServerManager": False,
"Pending Update": False,
"Pending Windows Update": False,
"Reboot Required Witnessed": False,
"Volatile Update Exe": False,
}
result = win_system.get_pending_reboot_details()
self.assertDictEqual(expected, result)
def test_get_pending_reboot_details_true(self):
"""
All items True should return a dictionary with all items True
"""
with patch(
"salt.utils.win_system.get_pending_update", return_value=True
), patch("salt.utils.win_update.needs_reboot", return_value=True), patch(
"salt.utils.win_system.get_pending_update_exe_volatile", return_value=True
), patch(
"salt.utils.win_system.get_pending_file_rename", return_value=True
), patch(
"salt.utils.win_system.get_pending_servermanager", return_value=True
), patch(
"salt.utils.win_system.get_pending_component_servicing", return_value=True
), patch(
"salt.utils.win_system.get_pending_dvd_reboot", return_value=True
), patch(
"salt.utils.win_system.get_reboot_required_witnessed", return_value=True
), patch(
"salt.utils.win_system.get_pending_computer_name",
return_value="pending name",
), patch(
"salt.utils.win_system.get_pending_domain_join", return_value=True
):
expected = {
"Pending Component Servicing": True,
"Pending Computer Rename": True,
"Pending DVD Reboot": True,
"Pending File Rename": True,
"Pending Join Domain": True,
"Pending ServerManager": True,
"Pending Update": True,
"Pending Windows Update": True,
"Reboot Required Witnessed": True,
"Volatile Update Exe": True,
}
result = win_system.get_pending_reboot_details()
self.assertDictEqual(expected, result)
| 41.942993
| 87
| 0.64979
| 17,320
| 0.980859
| 0
| 0
| 17,078
| 0.967154
| 0
| 0
| 7,260
| 0.411145
|
62fddb54eb15614ae62c4ea42765a975a997094e
| 2,738
|
py
|
Python
|
modules/pgu/gui/list.py
|
bullseyestudio/guns-game
|
3104c44e43ea7f000f6b9e756d622f98110d0a21
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
modules/pgu/gui/list.py
|
bullseyestudio/guns-game
|
3104c44e43ea7f000f6b9e756d622f98110d0a21
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2018-11-21T04:50:57.000Z
|
2018-11-21T04:50:57.000Z
|
modules/pgu/gui/list.py
|
bullseyestudio/guns-game
|
3104c44e43ea7f000f6b9e756d622f98110d0a21
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
"""
"""
from .const import *
from . import container, widget
class List(container.Container):
def __init__(self, title=None, length=None, show_empty=True, size=20, **params):
params.setdefault('cls','list')
container.Container.__init__(self, **params)
self.title = title
self.font = self.style.font
w,h = self.font.size("e"*size)
self.size = size
self.style.width = w + ( self.style.padding * 2) + 8
self.itmheight = h
self.itmwidth = w
self.vpos = 0
self.cls = params['cls']
self.padding = self.style.padding
self.offset = 0
if self.title != None:
tw,th = self.font.size(self.title)
self.offset += th + 10
def add(self,name):
itm = ListItem(value=name,cls='{0}.item'.format(self.cls),size=self.size)
container.Container.add(self,itm,self.padding,self.padding+self.offset)
container.Container.repaint(self)
self.offset += self.itmheight
def remove(self,name):
for itm in self.widgets:
if itm.get_text() == name:
container.Container.remove(self,itm)
container.Container.repaint(self)
self.offset -= self.itmheight
def paint(self,s):
if self.title != None:
# Painting the title...
tw,th = self.font.size(self.title)
s.blit(self.font.render(self.title, 1, self.style.color),((self.style.width/2) - (tw/2),0))
container.Container.paint(self,s)
# def paint(self,s):
# container.Container.paint(self,s)
# r = pygame.Rect(0,offset,self.rect.w,self.rect.h)
#
# cs = 2 #NOTE: should be in a style
#
# w,h = self.font.size(item)
# x = w-self.vpos
# if x < 0: self.vpos -= -x
# if x+cs > s.get_width(): self.vpos += x+cs-s.get_width()
# s.blit(self.font.render(item, 1, self.style.color),(-self.vpos + self.padding,offset))
# count += 1
# offset += self.height+self.padding
class ListItem(widget.Widget):
def __init__(self, value='', **params):
cls = None
if 'cls' in params:
cls = params['cls']
else:
cls = 'list'
params.setdefault('cls','{0}.item'.format(cls))
widget.Widget.__init__(self, **params)
params.setdefault('size',20)
self.size = params['size']
self.value = value
self.pos = len(str(value))
self.vpos = 0
self.font = self.style.font
w,h = self.font.size("e"*self.size)
if not self.style.height: self.style.height = h
if not self.style.width: self.style.width = w
def set_text(self, text):
self.value = text
def get_text(self):
return self.value
def paint(self,s):
r = pygame.Rect(0,0,self.rect.w,self.rect.h)
cs = 2 #NOTE: should be in a style
w,h = self.font.size(self.value[0:self.pos])
x = w-self.vpos
if x < 0: self.vpos -= -x
if x+cs > s.get_width(): self.vpos += x+cs-s.get_width()
s.blit(self.font.render(self.value, 1, self.style.color),(-self.vpos,0))
| 25.588785
| 94
| 0.657049
| 2,230
| 0.814463
| 0
| 0
| 0
| 0
| 0
| 0
| 563
| 0.205625
|
62fddef8dc0414788a7119927b0e02d28c5a35e7
| 359
|
py
|
Python
|
prog_1.py
|
swatakit/python-dash-quick-starter
|
6009cef072579fc5f1755c6bc047aeae5a6d9c75
|
[
"MIT"
] | 1
|
2020-10-21T21:05:50.000Z
|
2020-10-21T21:05:50.000Z
|
prog_1.py
|
swatakit/python-dash-quick-starter
|
6009cef072579fc5f1755c6bc047aeae5a6d9c75
|
[
"MIT"
] | null | null | null |
prog_1.py
|
swatakit/python-dash-quick-starter
|
6009cef072579fc5f1755c6bc047aeae5a6d9c75
|
[
"MIT"
] | null | null | null |
#########################################
# The Simplest form of dash application
#
# ref: https://dash.plotly.com/introduction
import dash
import dash_html_components as html
app = dash.Dash(__name__)
# Layout compose
app.layout = html.Div([
html.H1('Hello, this is a Dash Application'),
])
if __name__ == "__main__":
app.run_server(debug=False)
| 19.944444
| 49
| 0.637883
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 185
| 0.51532
|
62ff5663f9b64ba48b98538457a5c9793b4cf0c7
| 1,409
|
py
|
Python
|
ros/src/twist_controller/pid.py
|
redherring2141/CarND-Capstone
|
df230f902836923dbbc55065c3d4f12531c05cda
|
[
"MIT"
] | null | null | null |
ros/src/twist_controller/pid.py
|
redherring2141/CarND-Capstone
|
df230f902836923dbbc55065c3d4f12531c05cda
|
[
"MIT"
] | null | null | null |
ros/src/twist_controller/pid.py
|
redherring2141/CarND-Capstone
|
df230f902836923dbbc55065c3d4f12531c05cda
|
[
"MIT"
] | null | null | null |
import rospy
MIN_NUM = float('-inf')
MAX_NUM = float('inf')
class PID(object):
def __init__(self, kp, ki, kd, max_input=MAX_NUM, mn=MIN_NUM, mx=MAX_NUM):
self.kp = kp
self.ki = ki
self.kd = kd
self.min = mn
self.max = mx
# Approximation - added
self.max_abs_u = (abs(self.kp) + abs(self.ki) + abs(self.kd)) * abs(max_input)
# Controller state initialization
self.t = None
self.error = 0.0
self.integral = 0.0
def reset(self):
self.t = None
#def step(self, error, sample_time):
def step(self, value_target, value_curr, sample_time):
if self.t == None:
self.t = sample_time
self.integral = 0.0
self.error = value_target - value_curr
return 0.0
delta_t = sample_time - self.t
# Calculate error, integral, derivative
error = value_target - value_curr
integral = max(MIN_NUM, min(MAX_NUM, self.integral + error*delta_t))
derivative = (error - self.error) / delta_t
# Calculate PID control
control = max(self.min, min(self.max, (self.kp * error + self.ki * integral + self.kd * derivative)))
#rospy.logwarn("[pid.py] control = %f", control)
self.t = sample_time
self.error = error
self.integral = integral
return control
| 27.096154
| 109
| 0.570617
| 1,346
| 0.955287
| 0
| 0
| 0
| 0
| 0
| 0
| 213
| 0.151171
|
1a018ecb1b4832d82200c28fb3048b3345de111f
| 33
|
py
|
Python
|
gmocoin/__init__.py
|
makotookamura/GmoCoin
|
025d3e68364bf52418dbc3445987ff21528db732
|
[
"Apache-2.0"
] | null | null | null |
gmocoin/__init__.py
|
makotookamura/GmoCoin
|
025d3e68364bf52418dbc3445987ff21528db732
|
[
"Apache-2.0"
] | null | null | null |
gmocoin/__init__.py
|
makotookamura/GmoCoin
|
025d3e68364bf52418dbc3445987ff21528db732
|
[
"Apache-2.0"
] | 1
|
2021-07-17T16:56:03.000Z
|
2021-07-17T16:56:03.000Z
|
#!python3
__version__ = '0.0.12'
| 11
| 22
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 17
| 0.515152
|
1a01eca2e35dff0208fce46a45cc7fc79230edce
| 4,017
|
py
|
Python
|
climbing_ratings/tests/test_bradley_terry.py
|
scottwedge/climbing_ratings
|
5a36df62681487de5d5d041e379853be21611dcb
|
[
"Apache-2.0"
] | null | null | null |
climbing_ratings/tests/test_bradley_terry.py
|
scottwedge/climbing_ratings
|
5a36df62681487de5d5d041e379853be21611dcb
|
[
"Apache-2.0"
] | null | null | null |
climbing_ratings/tests/test_bradley_terry.py
|
scottwedge/climbing_ratings
|
5a36df62681487de5d5d041e379853be21611dcb
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for the bradley_terry module"""
# Copyright 2019 Dean Scarff
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
from ..bradley_terry import get_bt_summation_terms, get_bt_derivatives, sum
from .assertions import assert_close
class TestBradleyTerryFunctions(unittest.TestCase):
"""Tests for functions in the bradley_terry module"""
def setUp(self):
np.seterr(all="raise")
self.assert_close = assert_close.__get__(self, self.__class__)
def test_get_bt_summation_terms(self):
"""Test get_bt_summation_terms()"""
gamma = np.array([1.0, 2.0])
adversary_gamma = np.array([1.0, 2.0])
d1, d2 = get_bt_summation_terms(gamma, adversary_gamma)
self.assert_close([0.5, 0.5], d1, "d1")
self.assert_close([0.25, 0.25], d2, "d2")
def test_sum(self):
"""Test sum()"""
x = np.array([1.0, 2.0, 4.0, 8.0])
self.assertEqual(15.0, sum(x, 0, 4))
self.assertEqual(0.0, sum(x, 0, 0))
self.assertEqual(6.0, sum(x, 1, 3))
self.assertEqual(7.0, sum(x, 0, 3))
def test_sum(self):
"""Test sum() error compensation"""
x = np.full([10], 0.1)
self.assertEqual(1.0, sum(x, 0, 10))
x = np.array([1e100, -1.0, -1e100, 1.0])
self.assertEqual(0.0, sum(x, 0, 4))
x = np.array([1e100, 1.0, -1e100, 1.0])
self.assertEqual(2.0, sum(x, 0, 4))
def test_get_bt_derivatives_single_win(self):
"""Test get_bt_derivatives() with a single win"""
slices = [(0, 1)]
wins = np.array([1.0])
gamma = np.array([1.0])
adversary_gamma = np.array([1.0])
d1, d2 = get_bt_derivatives(slices, wins, gamma, adversary_gamma)
self.assert_close([0.5], d1, "d1")
self.assert_close([-0.25], d2, "d2")
def test_get_bt_derivatives_single_loss(self):
"""Test get_bt_derivatives() with a single loss"""
slices = [(0, 1)]
wins = np.array([0.0])
gamma = np.array([1.0])
adversary_gamma = np.array([1.0])
d1, d2 = get_bt_derivatives(slices, wins, gamma, adversary_gamma)
self.assert_close([-0.5], d1, "d1")
self.assert_close([-0.25], d2, "d2")
def test_get_bt_derivatives_four_losses(self):
"""Test get_bt_derivatives() with four losses"""
slices = [(0, 4)]
wins = np.array([0.0])
gamma = np.array([4.0, 4.0, 4.0, 4.0])
adversary_gamma = np.array([1.0, 1.0, 1.0, 1.0])
d1, d2 = get_bt_derivatives(slices, wins, gamma, adversary_gamma)
self.assert_close([-3.2], d1, "d1")
self.assert_close([-0.64], d2, "d2")
def test_get_bt_derivatives_no_ascents(self):
"""Test get_bt_derivatives() with no ascents"""
slices = [(0, 0)]
wins = np.array([])
gamma = np.array([])
adversary_gamma = np.array([])
d1, d2 = get_bt_derivatives(slices, wins, gamma, adversary_gamma)
self.assert_close([0.0], d1, "d1")
self.assert_close([0.0], d2, "d2")
def test_get_bt_derivatives(self):
"""Test get_bt_derivatives() with multiple slices"""
slices = [(0, 1), (1, 4)]
wins = np.array([1.0, 2.0])
gamma = np.array([6.0, 4.0, 4.0, 4.0])
adversary_gamma = np.array([6.0, 4.0, 12.0, 12.0])
d1, d2 = get_bt_derivatives(slices, wins, gamma, adversary_gamma)
self.assert_close([0.5, 1.0], d1, "d1")
self.assert_close([-0.25, -0.625], d2, "d2")
| 38.625
| 75
| 0.608663
| 3,248
| 0.808564
| 0
| 0
| 0
| 0
| 0
| 0
| 1,042
| 0.259398
|
1a021e021146cbc33766d3c7997455d63709bb09
| 167
|
py
|
Python
|
python/testData/inspections/PyUnresolvedReferencesInspection3K/asyncInitMethod.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/inspections/PyUnresolvedReferencesInspection3K/asyncInitMethod.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/inspections/PyUnresolvedReferencesInspection3K/asyncInitMethod.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
class A:
<error descr="function \"__init__\" cannot be async">async</error> def __init__(self):
self.foo = '2'
self.bar = '3'
a = A()
print(a.foo)
| 23.857143
| 90
| 0.580838
| 145
| 0.868263
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.269461
|
1a02584666d045aa7be3465e13485d41b2de443c
| 1,967
|
py
|
Python
|
log_analysis_tool.py
|
buildthatapp/udacity_fsnd_log_analysis_tool
|
fc340b697fa255ac67a969f06a4d192dc7e8b3ae
|
[
"MIT"
] | null | null | null |
log_analysis_tool.py
|
buildthatapp/udacity_fsnd_log_analysis_tool
|
fc340b697fa255ac67a969f06a4d192dc7e8b3ae
|
[
"MIT"
] | null | null | null |
log_analysis_tool.py
|
buildthatapp/udacity_fsnd_log_analysis_tool
|
fc340b697fa255ac67a969f06a4d192dc7e8b3ae
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Log Analysis Project for Full Stack Nanodegree by Udacity"""
import psycopg2
DBNAME = "news"
def three_most_popular_articles():
"""Queries and displays the top three most viewed articles."""
conn = psycopg2.connect(database=DBNAME)
cur = conn.cursor()
query = 'VIEW_top_three_articles'
cur.execute(query)
result = cursor.fetchall()
cur.close()
conn.close()
print()
print('Three most popular articles of all time')
print('=======================================')
for result in results:
print('"{title}" - {count} views'
.format(title=result[0], count=result[1]))
print()
return
def most_popular_authors():
"""Queries and displays the Authors with the most views."""
conn = psycopg2.connect(database=DBNAME)
cur = conn.cursor()
query = 'VIEW_most_popular_authors'
cur.execute(query)
result = cursor.fetchall()
cur.close()
conn.close()
print()
print('Three most popular authors')
print('=======================================')
for result in results:
print('"{author}" - {count} views'
.format(author=result[0], count=result[1]))
print()
return
def days_with_high_errors():
"""Queries and displays the days when errors were above 1%."""
conn = psycopg2.connect(database=DBNAME)
cur = conn.cursor()
query = 'VIEW_days_with_over_one_percent_errors'
cur.execute(query)
result = cursor.fetchall()
cur.close()
conn.close()
print()
print('Days with over 1% errors')
print('=======================================')
for result in results:
print('"{day}" - {error_rate} errors'
.format(day=result[0], error_rate=result[1]))
print()
return
def main():
three_most_popular_articles()
most_popular_authors()
days_with_high_errors()
if __name__ == '__main__':
main()
| 19.67
| 66
| 0.589731
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 680
| 0.345704
|
1a03179c783f6a71443f0dfefb1dcdf8bf7a653b
| 40
|
py
|
Python
|
samplePythonfiles/cc.py
|
fazilsha/python-automation
|
80ce94642a94276d3b970ae390a5d1464ad2f2b8
|
[
"MIT"
] | null | null | null |
samplePythonfiles/cc.py
|
fazilsha/python-automation
|
80ce94642a94276d3b970ae390a5d1464ad2f2b8
|
[
"MIT"
] | null | null | null |
samplePythonfiles/cc.py
|
fazilsha/python-automation
|
80ce94642a94276d3b970ae390a5d1464ad2f2b8
|
[
"MIT"
] | null | null | null |
print("File dd.py sucessfully executed")
| 40
| 40
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 33
| 0.825
|
1a04f2dc5f9e5f9be7e2402e0878155eb33a689e
| 5,868
|
py
|
Python
|
src/models.py
|
thowilh/geomars
|
18d8dd1f2bb15fe0a67d3e59aa76f2e3df4ac7c1
|
[
"MIT"
] | 2
|
2022-02-20T18:23:25.000Z
|
2022-02-26T19:15:33.000Z
|
src/models.py
|
thowilh/geomars
|
18d8dd1f2bb15fe0a67d3e59aa76f2e3df4ac7c1
|
[
"MIT"
] | null | null | null |
src/models.py
|
thowilh/geomars
|
18d8dd1f2bb15fe0a67d3e59aa76f2e3df4ac7c1
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import pytorch_lightning as pl
from torchvision.models import (
alexnet,
vgg16_bn,
resnet18,
resnet34,
resnet50,
densenet121,
densenet161,
)
from torch.nn import functional as F
from pytorch_lightning.metrics.functional import accuracy, precision_recall
class MarsModel(pl.LightningModule):
def __init__(self, hyper_param):
super().__init__()
self.momentum = hyper_param["momentum"]
self.optimizer = hyper_param["optimizer"]
self.lr = hyper_param["learning_rate"]
self.num_classes = hyper_param["num_classes"]
if hyper_param["model"] == "resnet18":
"""
Resnet18
"""
self.net = resnet18(pretrained=hyper_param["pretrained"])
if hyper_param["transfer_learning"] is True:
self.set_parameter_requires_grad(self.net)
num_ftrs = self.net.fc.in_features
self.net.fc = nn.Linear(num_ftrs, hyper_param["num_classes"])
elif hyper_param["model"] == "resnet34":
"""
Resnet34
"""
self.net = resnet34(pretrained=hyper_param["pretrained"])
if hyper_param["transfer_learning"] is True:
self.set_parameter_requires_grad(self.net)
num_ftrs = self.net.fc.in_features
self.net.fc = nn.Linear(num_ftrs, hyper_param["num_classes"])
elif hyper_param["model"] == "resnet50":
"""
Resnet50
"""
self.net = resnet50(pretrained=hyper_param["pretrained"])
if hyper_param["transfer_learning"] is True:
self.set_parameter_requires_grad(self.net)
num_ftrs = self.net.fc.in_features
self.net.fc = nn.Linear(num_ftrs, hyper_param["num_classes"])
elif hyper_param["model"] == "alexnet":
"""
Alexnet
"""
self.net = alexnet(pretrained=hyper_param["pretrained"])
if hyper_param["transfer_learning"] is True:
self.set_parameter_requires_grad(self.net)
num_ftrs = self.net.classifier[6].in_features
self.net.classifier[6] = nn.Linear(num_ftrs, hyper_param["num_classes"])
elif hyper_param["model"] == "vgg16":
"""
VGG16_bn
"""
self.net = vgg16_bn(pretrained=hyper_param["pretrained"])
if hyper_param["transfer_learning"] is True:
self.set_parameter_requires_grad(self.net)
num_ftrs = self.net.classifier[6].in_features
self.net.classifier[6] = nn.Linear(num_ftrs, hyper_param["num_classes"])
elif hyper_param["model"] == "densenet121":
"""
Densenet-121
"""
self.net = densenet121(pretrained=hyper_param["pretrained"])
if hyper_param["transfer_learning"] is True:
self.set_parameter_requires_grad(self.net)
num_ftrs = self.net.classifier.in_features
self.net.classifier = nn.Linear(num_ftrs, hyper_param["num_classes"])
elif hyper_param["model"] == "densenet161":
"""
Densenet-161
"""
self.net = densenet161(pretrained=hyper_param["pretrained"])
if hyper_param["transfer_learning"] is True:
self.set_parameter_requires_grad(self.net)
num_ftrs = self.net.classifier.in_features
self.net.classifier = nn.Linear(num_ftrs, hyper_param["num_classes"])
else:
print("Invalid model name, exiting...")
exit()
def forward(self, x):
return self.net(x)
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
acc = accuracy(torch.argmax(y_hat, dim=1), y, num_classes=self.num_classes)
prec, recall = precision_recall(
F.softmax(y_hat, dim=1), y, num_classes=self.num_classes, reduction="none"
)
return {
"val_loss": loss,
"val_acc": acc,
"val_prec": prec,
"val_recall": recall,
}
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean()
avg_acc = torch.stack([x["val_acc"] for x in outputs]).mean()
return {
"val_loss": avg_loss,
"progress_bar": {"val_loss": avg_loss, "val_acc": avg_acc},
}
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
return {"test_loss": loss}
def test_epoch_end(self, outputs):
avg_loss = torch.stack([x["test_loss"] for x in outputs]).mean()
logs = {"test_loss": avg_loss}
return {"test_loss": avg_loss, "log": logs}
def configure_optimizers(self):
params_to_update = []
print("Params to learn:")
for name, param in self.net.named_parameters():
if param.requires_grad is True:
params_to_update.append(param)
print("\t", name)
if self.optimizer == "adam":
optimizer = torch.optim.Adam(params_to_update, lr=self.lr)
elif self.optimizer == "sgd":
optimizer = torch.optim.SGD(
params_to_update, lr=self.lr, momentum=self.momentum
)
else:
print("Invalid optimizer, exiting...")
exit()
return optimizer
def set_parameter_requires_grad(model):
for param in model.parameters():
param.requires_grad = False
| 35.349398
| 86
| 0.581118
| 5,548
| 0.945467
| 0
| 0
| 0
| 0
| 0
| 0
| 1,025
| 0.174676
|
1a05c837044c86fc7d751b18c934f19ce77168a2
| 12,132
|
py
|
Python
|
examples/challenges/shell-plugin/ctfd/CTFd/plugins/shell-plugin/shell.py
|
ameserole/Akeso
|
868f280e88f44e65e44fbe2f6c43e6b7c92fbcab
|
[
"MIT"
] | 19
|
2018-02-26T00:19:17.000Z
|
2019-12-18T04:26:45.000Z
|
examples/challenges/shell-plugin/ctfd/CTFd/plugins/shell-plugin/shell.py
|
ameserole/Akeso
|
868f280e88f44e65e44fbe2f6c43e6b7c92fbcab
|
[
"MIT"
] | 11
|
2018-05-07T15:11:30.000Z
|
2018-11-13T16:40:41.000Z
|
examples/challenges/shell-plugin/ctfd/CTFd/plugins/shell-plugin/shell.py
|
ameserole/Akeso
|
868f280e88f44e65e44fbe2f6c43e6b7c92fbcab
|
[
"MIT"
] | 1
|
2018-08-28T15:50:09.000Z
|
2018-08-28T15:50:09.000Z
|
import logging
import os
import re
import time
import urllib
from threading import Thread
import xmlrpclib
from Queue import Queue
from flask import current_app as app, render_template, request, redirect, abort, jsonify, json as json_mod, url_for, session, Blueprint
from itsdangerous import TimedSerializer, BadTimeSignature, Signer, BadSignature
from passlib.hash import bcrypt_sha256
from CTFd.utils import sha512, is_safe_url, authed, can_send_mail, sendmail, can_register, get_config, verify_email
from CTFd.models import db, Teams, Pages
import CTFd.auth
import CTFd.views
def create_user_thread(q):
while True:
user_pair = q.get(block=True)
shell = xmlrpclib.ServerProxy('http://localhost:8000',allow_none=True)
if user_pair[2] == "create":
shell.add_user(user_pair[0], user_pair[1])
elif user_pair[2] == "change":
shell.change_user(user_pair[0], user_pair[1])
def load(app):
shell = Blueprint('shell', __name__, template_folder='shell-templates')
app.register_blueprint(shell, url_prefix='/shell')
page = Pages('shell',""" """ )
auth = Blueprint('auth', __name__)
shellexists = Pages.query.filter_by(route='shell').first()
if not shellexists:
db.session.add(page)
db.session.commit()
@app.route('/shell', methods=['GET'])
def shell_view():
if not authed():
return redirect(url_for('auth.login', next=request.path))
return render_template('shell.html',root=request.script_root)
@app.route('/register', methods=['POST', 'GET'])
def register():
if not can_register():
return redirect(url_for('auth.login'))
if request.method == 'POST':
errors = []
name = request.form['name']
email = request.form['email']
password = request.form['password']
name_len = len(name) < 2
names = Teams.query.add_columns('name', 'id').filter_by(name=name).first()
emails = Teams.query.add_columns('email', 'id').filter_by(email=email).first()
pass_short = len(password) == 0
pass_long = len(password) > 32
valid_email = re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", request.form['email'])
if not valid_email:
errors.append("That email doesn't look right")
if names:
errors.append('That team name is already taken')
if emails:
errors.append('That email has already been used')
if pass_short:
errors.append('Pick a longer password')
if pass_long:
errors.append('Pick a shorter password')
if name_len:
errors.append('Pick a longer team name')
if len(errors) > 0:
return render_template('register.html', errors=errors, name=request.form['name'], email=request.form['email'], password=request.form['password'])
else:
with app.app_context():
team = Teams(name, email.lower(), password)
db.session.add(team)
db.session.commit()
db.session.flush()
shell = xmlrpclib.ServerProxy('http://localhost:8000',allow_none=True)
shell.add_user(name, password)
session['username'] = team.name
session['id'] = team.id
session['admin'] = team.admin
session['nonce'] = sha512(os.urandom(10))
if can_send_mail() and get_config('verify_emails'): # Confirming users is enabled and we can send email.
db.session.close()
logger = logging.getLogger('regs')
logger.warn("[{0}] {1} registered (UNCONFIRMED) with {2}".format(time.strftime("%m/%d/%Y %X"),
request.form['name'].encode('utf-8'),
request.form['email'].encode('utf-8')))
return redirect(url_for('auth.confirm_user'))
else: # Don't care about confirming users
if can_send_mail(): # We want to notify the user that they have registered.
sendmail(request.form['email'], "You've successfully registered for {}".format(get_config('ctf_name')))
db.session.close()
logger = logging.getLogger('regs')
logger.warn("[{0}] {1} registered with {2}".format(time.strftime("%m/%d/%Y %X"), request.form['name'].encode('utf-8'), request.form['email'].encode('utf-8')))
return redirect(url_for('challenges.challenges_view'))
else:
return render_template('register.html')
def reset_password(data=None):
if data is not None and request.method == "GET":
return render_template('reset_password.html', mode='set')
if data is not None and request.method == "POST":
try:
s = TimedSerializer(app.config['SECRET_KEY'])
name = s.loads(urllib.unquote_plus(data.decode('base64')), max_age=1800)
except BadTimeSignature:
return render_template('reset_password.html', errors=['Your link has expired'])
except:
return render_template('reset_password.html', errors=['Your link appears broken, please try again.'])
team = Teams.query.filter_by(name=name).first_or_404()
password = request.form['password'].strip()
name = team.name
pass_short = len(password) == 0
pass_long = len(password) > 32
#http://stackoverflow.com/questions/19605150/regex-for-password-must-be-contain-at-least-8-characters-least-1-number-and-bot
errors = []
if pass_short:
errors.append('Pick a longer password')
if pass_long:
errors.append('Pick a shorter password')
if len(errors) > 0:
return render_template('reset_password.html', errors=errors)
shell = xmlrpclib.ServerProxy('http://localhost:8000',allow_none=True)
shell.change_user(name, password)
team.password = bcrypt_sha256.encrypt(password)
db.session.commit()
db.session.close()
return redirect(url_for('auth.login'))
if request.method == 'POST':
email = request.form['email'].strip()
team = Teams.query.filter_by(email=email).first()
if not team:
return render_template('reset_password.html', errors=['If that account exists you will receive an email, please check your inbox'])
s = TimedSerializer(app.config['SECRET_KEY'])
token = s.dumps(team.name)
text = """
Did you initiate a password reset?
{0}/{1}
""".format(url_for('auth.reset_password', _external=True), urllib.quote_plus(token.encode('base64')))
sendmail(email, text)
return render_template('reset_password.html', errors=['If that account exists you will receive an email, please check your inbox'])
return render_template('reset_password.html')
def profile():
if authed():
if request.method == "POST":
errors = []
name = request.form.get('name')
email = request.form.get('email')
website = request.form.get('website')
affiliation = request.form.get('affiliation')
country = request.form.get('country')
user = Teams.query.filter_by(id=session['id']).first()
if not get_config('prevent_name_change'):
names = Teams.query.filter_by(name=name).first()
name_len = len(request.form['name']) < 2
emails = Teams.query.filter_by(email=email).first()
valid_email = re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", email)
password = request.form['password'].strip()
pass_short = len(password) == 0
pass_long = len(password) > 32
if ('password' in request.form.keys() and not len(request.form['password']) == 0) and \
(not bcrypt_sha256.verify(request.form.get('confirm').strip(), user.password)):
errors.append("Your old password doesn't match what we have.")
if not valid_email:
errors.append("That email doesn't look right")
if not get_config('prevent_name_change') and names and name != session['username']:
errors.append('That team name is already taken')
if emails and emails.id != session['id']:
errors.append('That email has already been used')
if not get_config('prevent_name_change') and name_len:
errors.append('Pick a longer team name')
if website.strip() and not validate_url(website):
errors.append("That doesn't look like a valid URL")
if pass_short:
errors.append('Pick a longer password')
if pass_long:
errors.append('Pick a shorter password')
if len(errors) > 0:
return render_template('profile.html', name=name, email=email, website=website,
affiliation=affiliation, country=country, errors=errors)
else:
team = Teams.query.filter_by(id=session['id']).first()
if not get_config('prevent_name_change'):
team.name = name
if team.email != email.lower():
team.email = email.lower()
if get_config('verify_emails'):
team.verified = False
session['username'] = team.name
if 'password' in request.form.keys() and not len(request.form['password']) == 0:
team.password = bcrypt_sha256.encrypt(request.form.get('password'))
password = request.form['password'].strip()
team.website = website
team.affiliation = affiliation
team.country = country
name = team.name
if password:
shell = xmlrpclib.ServerProxy('http://localhost:8000',allow_none=True)
shell.change_user(name, password)
db.session.commit()
db.session.close()
return redirect(url_for('views.profile'))
else:
user = Teams.query.filter_by(id=session['id']).first()
name = user.name
email = user.email
website = user.website
affiliation = user.affiliation
country = user.country
prevent_name_change = get_config('prevent_name_change')
confirm_email = get_config('verify_emails') and not user.verified
return render_template('profile.html', name=name, email=email, website=website, affiliation=affiliation,
country=country, prevent_name_change=prevent_name_change, confirm_email=confirm_email)
else:
return redirect(url_for('auth.login'))
app.view_functions['auth.reset_password'] = reset_password
app.view_functions['auth.register'] = register
app.view_functions['views.profile'] = profile
| 45.609023
| 170
| 0.548714
| 0
| 0
| 0
| 0
| 3,628
| 0.299044
| 0
| 0
| 2,473
| 0.203841
|
1a08162c99a6ecd9599da9bfcff9c2e8807aa344
| 972
|
py
|
Python
|
386-Lexicographical-Numbers/solution.py
|
Tanych/CodeTracking
|
86f1cb98de801f58c39d9a48ce9de12df7303d20
|
[
"MIT"
] | null | null | null |
386-Lexicographical-Numbers/solution.py
|
Tanych/CodeTracking
|
86f1cb98de801f58c39d9a48ce9de12df7303d20
|
[
"MIT"
] | null | null | null |
386-Lexicographical-Numbers/solution.py
|
Tanych/CodeTracking
|
86f1cb98de801f58c39d9a48ce9de12df7303d20
|
[
"MIT"
] | null | null | null |
class Solution(object):
def _dfs(self,num,res,n):
if num>n:
return
res.append(num)
num=num*10
if num<=n:
for i in xrange(10):
self._dfs(num+i,res,n)
def sovleOn(self,n):
res=[]
cur=1
for i in xrange(1,n+1):
res.append(cur)
if cur*10<=n:
cur=cur*10
# if the num not end with 9,plus 1
# since if 19 the next should 2 not 20
elif cur%10!=9 and cur+1<=n:
cur+=1
else:
# get the 199--2 499--5
while (cur/10)%10==9:
cur/=10
cur=cur/10+1
return res
def lexicalOrder(self, n):
"""
:type n: int
:rtype: List[int]
"""
return self.sovleOn(n)
res=[]
for i in xrange(1,10):
self._dfs(i,res,n)
return res
| 26.27027
| 50
| 0.407407
| 972
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 157
| 0.161523
|
1a083cf15885b049f7b7a3ad09fa1e14dd77a3b3
| 8,470
|
py
|
Python
|
discord-status.py
|
byemc/discord-rhythmbox-plugin
|
46e855bd27b2bfe9d7a202135bcf228aff402fa8
|
[
"MIT"
] | 1
|
2021-11-23T05:37:25.000Z
|
2021-11-23T05:37:25.000Z
|
discord-status.py
|
byemc/discord-rhythmbox-plugin
|
46e855bd27b2bfe9d7a202135bcf228aff402fa8
|
[
"MIT"
] | null | null | null |
discord-status.py
|
byemc/discord-rhythmbox-plugin
|
46e855bd27b2bfe9d7a202135bcf228aff402fa8
|
[
"MIT"
] | null | null | null |
import gi
import time
import os
import json
gi.require_version('Notify', '0.7')
gi.require_version('Gtk', '3.0')
from gi.repository import Notify, Gtk
from gi.repository import Gio, GLib, GObject, Peas
from gi.repository import RB
from pypresence import Presence
from status_prefs import discord_status_prefs
class discord_status_dev(GObject.Object, Peas.Activatable):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "settings.json")
with open(path) as file:
settings = json.load(file)
show_notifs = settings["show_notifs"]
time_style = settings["time_style"]
try:
Notify.init("Rhythmbox")
except:
print("Failed to init Notify. Is the notificaion service running?")
is_streaming = False
RPC = Presence("589905203533185064")
connected = False
gave_up = False
try:
RPC.connect()
try:
if show_notifs:
Notify.Notification.new("Rhythmbox Discord Status Plugin", "Connected to Discord").show()
Notify.uninit()
except:
print("Failed to init Notify. Is the notificaion service running?")
connected = True
except ConnectionRefusedError:
try:
if show_notifs:
Notify.Notification.new("Rhythmbox Discord Status Plugin", "Failed to connect to discord: ConnectionRefused. Is discord open?").show()
Notify.uninit()
except:
print("Failed to init Notify. Is the notificaion service running?")
if show_notifs:
while not connected and not gave_up:
dialog = Gtk.Dialog(title = "Discord Rhythmbox Status Plugin",
parent = None,
buttons = (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK)
)
hbox = Gtk.HBox()
label = Gtk.Label("\nFailed to connect to the discord client. Make sure that discord is open. Retry?\n")
hbox.pack_start(label, True, True, 0)
dialog.vbox.pack_start(hbox, True, True, 0)
dialog.vbox.show_all()
response = dialog.run()
if (response == Gtk.ResponseType.OK):
try:
RPC.connect()
connected = True
except ConnectionRefusedError:
print('Failed to retry connection to discord')
elif (response == Gtk.ResponseType.CANCEL):
gave_up = True
dialog.destroy()
else:
pass
dialog.destroy()
__gtype_name__ = 'DiscordStatusPlugin'
object = GObject.property(type=GObject.Object)
start_date = None
playing_date = None
is_playing = False
def __init__ (self):
GObject.Object.__init__ (self)
def do_activate(self):
shell = self.object
sp = shell.props.shell_player
self.psc_id = sp.connect ('playing-song-changed',
self.playing_entry_changed)
self.pc_id = sp.connect ('playing-changed',
self.playing_changed)
self.ec_id = sp.connect ('elapsed-changed',
self.elapsed_changed)
self.pspc_id = sp.connect ('playing-song-property-changed',
self.playing_song_property_changed)
self.RPC.update(state="Playback Stopped", details="Rhythmbox Status Plugin", large_image="rhythmbox", small_image="stop", small_text="Stopped")
def do_deactivate(self):
shell = self.object
sp = shell.props.shell_player
sp.disconnect (self.psc_id)
sp.disconnect (self.pc_id)
sp.disconnect (self.ec_id)
sp.disconnect (self.pspc_id)
self.RPC.clear(pid=os.getpid())
self.RPC.close()
def get_info(self, sp):
album = None
title = None
artist = None
duration = None
if not sp.get_playing_entry().get_string(RB.RhythmDBPropType.ALBUM):
album = 'Unknown'
else:
album = sp.get_playing_entry().get_string(RB.RhythmDBPropType.ALBUM)
if not sp.get_playing_entry().get_string(RB.RhythmDBPropType.TITLE):
title = 'Unknown'
else:
title = sp.get_playing_entry().get_string(RB.RhythmDBPropType.TITLE)
if not sp.get_playing_entry().get_string(RB.RhythmDBPropType.ARTIST):
artist = 'Unknown'
else:
artist = sp.get_playing_entry().get_string(RB.RhythmDBPropType.ARTIST)
if not sp.get_playing_entry().get_ulong(RB.RhythmDBPropType.DURATION):
duration = 0
else:
duration = sp.get_playing_entry().get_ulong(RB.RhythmDBPropType.DURATION)
if len(album) < 2:
album = "%s" %(album)
return [album, title, artist, duration]
def playing_song_property_changed(self, sp, uri, property, old, newvalue):
print("playing_song_property_changed: %s %s %s %s" %(uri, property, old, newvalue))
info = self.get_info(sp)
if property == "rb:stream-song-title":
self.is_streaming = True
self.update_streaming_rpc(info, newvalue)
def update_streaming_rpc(self, info, d):
self.RPC.update(state=info[1][0:127], details=d[0:127], large_image="rhythmbox", small_image="play", small_text="Streaming", start=int(time.time()))
def playing_entry_changed(self, sp, entry):
if sp.get_playing_entry():
self.start_date = int(time.time())
self.playing_date = self.start_date
info = self.get_info(sp)
album = info[0]
title = info[1]
artist = info[2]
duration = info[3]
if duration == 0 and not self.is_streaming:
self.update_streaming_rpc(info, "Unknown - Unknown")
elif duration == 0 and self.is_streaming:
self.update_streaming_rpc(info, "Unknown - Unknown")
return
else:
self.is_streaming = False
details="%s - %s" %(title, artist)
self.is_playing = True
start_time = int(time.time())
pos = sp.get_playing_time().time
end_time = (start_time + duration - pos) if self.time_style == 1 else None
self.RPC.update(state=album[0:127], details=details[0:127], large_image="rhythmbox", small_image="play", small_text="Playing", start=start_time, end=end_time)
def playing_changed(self, sp, playing):
album = None
title = None
artist = None
if sp.get_playing_entry():
info = self.get_info(sp)
album = info[0]
title = info[1]
artist = info[2]
duration = info[3]
if duration == 0 and not self.is_streaming:
self.update_streaming_rpc(info, "Unknown - Unknown")
elif duration == 0:
return
else:
self.is_streaming = False
details="%s - %s" %(title, artist)
start_time = int(time.time())
pos = sp.get_playing_time().time
end_time = (start_time + duration - pos) if self.time_style == 1 else None
if playing:
self.is_playing = True
self.RPC.update(state=album[0:127], details=details[0:127], large_image="rhythmbox", small_image="play", small_text="Playing", start=start_time, end=end_time)
elif not playing and not sp.get_playing_entry():
self.is_playing = False
self.RPC.update(state="Playback Stopped", details="Rhythmbox Status Plugin", large_image="rhythmbox", small_image="stop", small_text="Stopped")
else:
self.is_playing = False
self.RPC.update(state=album[0:127], details=details[0:127], large_image="rhythmbox", small_image="pause", small_text="Paused")
def elapsed_changed(self, sp, elapsed):
if not self.playing_date or not self.is_playing or self.time_style == 0:
return
else:
self.playing_date += 1
if self.playing_date - elapsed == self.start_date:
return
else:
if sp.get_playing_entry() and self.is_playing and not elapsed == 0:
self.playing_date = self.start_date + elapsed
info = self.get_info(sp)
album = info[0]
title = info[1]
artist = info[2]
duration = info[3]
if duration == 0 and not self.is_streaming:
self.update_streaming_rpc(info, "Unknown - Unknown")
elif duration == 0:
return
else:
self.is_streaming = False
details="%s - %s" %(title, artist)
start_time = int(time.time())
pos = sp.get_playing_time().time
end_time = (start_time + duration - pos) if self.time_style == 1 else None
self.RPC.update(state=album[0:127], details=details[0:127], large_image="rhythmbox", small_image="play", small_text="Playing", start=start_time, end=end_time)
| 34.430894
| 166
| 0.641086
| 8,160
| 0.963173
| 0
| 0
| 0
| 0
| 0
| 0
| 1,167
| 0.137748
|