hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
02c3032f7fcf222c4dcbaeab9a4990232ced4e2b
| 3,948
|
py
|
Python
|
auxilium/__init__.py
|
sonntagsgesicht/auxilium
|
f4e34089d6230e4a2957cf8c2f462210c6c714f0
|
[
"Apache-2.0"
] | null | null | null |
auxilium/__init__.py
|
sonntagsgesicht/auxilium
|
f4e34089d6230e4a2957cf8c2f462210c6c714f0
|
[
"Apache-2.0"
] | null | null | null |
auxilium/__init__.py
|
sonntagsgesicht/auxilium
|
f4e34089d6230e4a2957cf8c2f462210c6c714f0
|
[
"Apache-2.0"
] | 1
|
2020-03-12T22:51:27.000Z
|
2020-03-12T22:51:27.000Z
|
# -*- coding: utf-8 -*-
# auxilium
# --------
# Python project for an automated test and deploy toolkit.
#
# Author: sonntagsgesicht
# Version: 0.2.8, copyright Friday, 14 January 2022
# Website: https://github.com/sonntagsgesicht/auxilium
# License: Apache License 2.0 (see LICENSE file)
from logging import log, basicConfig, getLogger, NullHandler
from os import getcwd, name as os_name
from os.path import basename, split, join
from pathlib import Path
from re import findall
from sys import exit, executable
from configparser import ConfigParser
from .add_arguments import add_parser
from .methods.root import do
from .tools.const import CONFIG_PATH, VERBOSITY_LEVELS, ICONS
getLogger(__name__).addHandler(NullHandler())
__doc__ = 'Python project for an automated test and deploy toolkit.'
__version__ = '0.2.8'
__dev_status__ = '4 - Beta'
__date__ = 'Saturday, 15 January 2022'
__author__ = 'sonntagsgesicht'
__email__ = __author__ + '@icloud.com'
__url__ = 'https://github.com/' + __author__ + '/' + __name__
__license__ = 'Apache License 2.0'
__dependencies__ = 'pip', 'dulwich', 'regtest', 'flake8', 'bandit', \
'coverage', 'twine', 'sphinx', 'sphinx-rtd-theme', \
'sphinx-math-dollar', 'karma-sphinx-theme', \
'sphinx-pytype-substitution'
__dependency_links__ = ()
__data__ = ('data/pkg.zip',)
__scripts__ = ('auxilium=auxilium:auxilium',)
__theme__ = 'karma-sphinx-theme'
''' todo
auxilium create --clone url
auxilium build --archive as zip -r Derivate.zip Derivate -x "*/.*"
'black' python code linter incl. correction
'isort . --profile black' sorts imports
'darglint' rst doc linter
'poetry' run safety check dependency management
'Cookiecutter' project templates
'pipenv'
'pyscaffold' project generator for bootstrapping high quality Python packages
'''
def auxilium(args=None):
# init config and argument parser
config = ConfigParser(allow_no_value=True)
config.read(Path.home().joinpath(CONFIG_PATH))
config.read(join(getcwd(), CONFIG_PATH))
# set icons set
if not config.getboolean('DEFAULT', 'icons', fallback=os_name == 'posix'):
ICONS.clear()
ICONS.update({'error': '!!', 'warn': '!'})
# parse arguments for cli (if not given as function arguments)
parser = add_parser(config)
if args is None:
kwargs = vars(parser.parse_args())
sys_exit = exit
else:
# find text identifier " or ' (what ever comes first)
if args.find("'") < 0 <= args.find('"') or \
0 <= args.find('"') < args.find("'"):
args = findall(r'(?:[^\s,"]|"(?:\\.|[^"])*")+', args)
args = tuple(a.replace('"', '') for a in args)
else:
args = findall(r"(?:[^\s,']|'(?:\\.|[^'])*')+", args)
args = tuple(a.replace("'", "") for a in args)
kwargs = vars(parser.parse_args(a for a in args if a))
kwargs['exit_status'] = -1
sys_exit = int
# add pkg and path to kwargs
kwargs['cwd'] = kwargs.get('cwd', getcwd())
kwargs['path'] = kwargs.get('path', getcwd())
kwargs['pkg'] = kwargs.get('name', basename(getcwd()))
# check version
if kwargs.get('version', False):
print('%s %s from %s (%s)' % (
__name__, __version__, split(__file__)[0], executable))
sys_exit()
# print help in case of no command
if kwargs.get('command', False) is None and \
not any((kwargs.get('demo', False), kwargs.get('version', False))):
parser.print_help()
sys_exit()
# init logging
item = min(kwargs.get('verbosity', False), len(VERBOSITY_LEVELS) - 1)
verbosity, formatter = VERBOSITY_LEVELS[item]
basicConfig(level=verbosity, format=formatter)
log(1, ICONS['inspect'] + '(parsed) arguments:')
for item in kwargs.items():
log(1, ICONS[''] + " %-12s : %r" % item)
# call command/method
return do(**kwargs)
| 33.457627
| 79
| 0.636272
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,619
| 0.410081
|
02c4f7ff3bc4a17aba2744ac9fa57ef882474bfa
| 7,875
|
py
|
Python
|
importtime_output_wrapper.py
|
Victor333Huesca/importtime-output-wrapper
|
15941ffe30a93a2d5ec1832e16df160caa1d51e4
|
[
"MIT"
] | 1
|
2021-02-10T13:15:47.000Z
|
2021-02-10T13:15:47.000Z
|
importtime_output_wrapper.py
|
dominikwalk/importtime_output_wrapper
|
67c94371cd92ea66f4dbdd8840cf6120db4160c0
|
[
"MIT"
] | 1
|
2021-09-01T19:25:33.000Z
|
2021-09-01T19:25:33.000Z
|
importtime_output_wrapper.py
|
dominikwalk/importtime_output_wrapper
|
67c94371cd92ea66f4dbdd8840cf6120db4160c0
|
[
"MIT"
] | null | null | null |
import re
import subprocess
import shutil
import sys
import json
import argparse
from typing import List, NamedTuple, Optional, Sequence
PATTERN_IMPORT_TIME = re.compile(r"^import time:\s+(\d+) \|\s+(\d+) \|(\s+.*)")
class InvalidInput(Exception):
pass
class Import(dict):
def __init__(self, name: str, t_self: int, t_cumu: int, depth: int, childs: List):
super().__init__()
self.__dict__ = self
self.name = name
self.depth = depth
self.t_self_us = t_self
self.t_cumulative_us = t_cumu
self.nested_imports = childs
def get_import_time(module: str) -> str:
"""
Call the importtime function as subprocess, pass all selected modules
and return the stderr output.
"""
try:
ret = subprocess.run(
(sys.executable, "-Ximporttime", "-c", f"import {module}"),
check=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE,
encoding="UTF-8",
)
except subprocess.CalledProcessError:
raise InvalidInput(f'Invalid input: Could not import module "{module}"')
return ret.stderr
def parse_import_time(s: str) -> List[Import]:
"""
Recursively parse the importtime strderr output into a uniform tree structure.
"""
root = Import("root", 0, 0, 0, [])
import_stack = [root]
for line in reversed(s.splitlines()):
m = PATTERN_IMPORT_TIME.match(line)
if m:
t_self = int(m[1])
t_cumu = int(m[2])
name = str(m[3])
depth = int((len(name) - len(name.lstrip()) - 1) / 2) + 1
new_imp = Import(
name=name.strip(), t_self=t_self, t_cumu=t_cumu, depth=depth, childs=[]
)
for _ in range(len(import_stack) - depth):
import_stack.pop()
import_stack[-1].nested_imports.insert(0, new_imp)
import_stack.append(new_imp)
if root.nested_imports == []:
raise InvalidInput("Invalid input: could not parse any imports")
return [root]
def prune_import_depth(
imports: List[Import], depth: Optional[int] = None
) -> List[Import]:
"""
Prune the unified tree structure to the desired depth level.
"""
def prune_children(childs: List[Import], depth: int):
if childs == []:
return
if depth == 0:
childs.clear()
for imp in childs:
prune_children(imp.nested_imports, depth - 1)
if depth is not None:
prune_children(imports, depth + 1)
return imports
def sort_imports(imports: List[Import], sort_by="self") -> List[Import]:
"""
Sort the unified tree structure according to the desired time key.
"""
def sort_children(childs: List[Import]) -> None:
if childs == []:
return
else:
if sort_by == "self":
childs.sort(key=lambda x: x.t_self_us, reverse=True)
elif sort_by == "cumulative":
childs.sort(key=lambda x: x.t_cumulative_us, reverse=True)
for imp in childs:
sort_children(imp.nested_imports)
sort_children(imports)
return imports
def import_tree_to_json_str(imports=List[Import]) -> str:
"""
Print the imported modules tree in json format.
"""
exclude_root = imports[0]["nested_imports"]
return json.dumps(exclude_root, indent=2)
def import_tree_to_waterfall(imports=List[Import], time_key="self", width=79) -> str:
"""
Print the imported modules tree as a waterfall diagram.
"""
output_str = ""
waterfall_output = []
max_time = 0
max_name_len = 0
imp = NamedTuple("imp", [("name", str), ("space", int), ("time", int)])
def create_name_str(childs: List[Import]) -> None:
nonlocal max_time
nonlocal max_name_len
nonlocal waterfall_output
nonlocal time_key
if childs == []:
return
else:
for child in childs:
time = {"self": child.t_self_us, "cumulative": child.t_cumulative_us}[
time_key
]
waterfall_output.append(
imp(name=child.name, space=child.depth - 1, time=time)
)
if time > max_time:
max_time = time
if (len(child.name) + child.depth) > max_name_len:
max_name_len = len(child.name) + child.depth
create_name_str(child.nested_imports)
return
create_name_str(imports[0]["nested_imports"])
header = "module name" + " " * ((max_name_len + 1) - len("module name")) + " "
header += " import time (us)" + "\n" + "-" * width + "\n"
output_str += header
for node in waterfall_output:
name = node.space * "." + str(node.name)
offset = ((max_name_len - len(name)) + 3) * " "
time_str = str(node.time)
water = "=" * int(
(node.time / max_time)
* (width - len(offset) - len(time_str) - len(name) - 2)
)
line_str = f"{name}{offset}{water}({time_str})\n"
output_str += line_str
min_width = round(1 / (node.time / max_time) + len(time_str) + len(name) + 2)
if width < min_width:
warning_msg = f"WARNING: The waterfall diagram may not be displayed correctly if the set width is too small!"
output_str += warning_msg
return output_str
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser(
description="""
This script calls the python3 -X importtime implementation with a given module
and parses the stderr output into a json format, which can then be used to
search or display the given information. It can also display the data as a
waterfall diagram in the terminal.
"""
)
parser.add_argument("module", help="the module to import")
parser.add_argument(
"--format",
nargs="?",
default="json",
choices=["json", "waterfall"],
help="output format",
)
parser.add_argument(
"--sort",
nargs="?",
choices=["self", "cumulative"],
help="sort imported modules by import-time",
)
parser.add_argument(
"--time",
nargs="?",
choices=["self", "cumulative"],
help="time to use in waterfall format (default self)",
)
parser.add_argument(
"--width",
nargs="?",
type=int,
help="width of entries in waterfall format (default to "
"environement variable COLUMNS or terminal's width)",
)
parser.add_argument(
"--depth",
nargs="?",
type=int,
help="limit depth of output format (default unlimited)",
)
args = parser.parse_args(argv)
if args.time and args.format != "waterfall":
parser.error(
"--time requires format to be set to waterfall (--format waterfall)"
)
if args.width and args.format != "waterfall":
parser.error(
"--length requires format to be set to waterfall (--format waterfall)"
)
raw_output = get_import_time(module=str(args.module))
all_imports = parse_import_time(raw_output)
pruned_imports = prune_import_depth(all_imports, args.depth)
if args.sort:
output_imports = sort_imports(imports=pruned_imports, sort_by=args.sort)
else:
output_imports = pruned_imports
if args.format == "json":
print(import_tree_to_json_str(output_imports))
elif args.format == "waterfall":
width = args.width or shutil.get_terminal_size().columns
time = args.time or "self"
print(import_tree_to_waterfall(output_imports, time_key=time, width=width))
return 0
if __name__ == "__main__":
exit(main())
| 30.405405
| 117
| 0.590349
| 360
| 0.045714
| 0
| 0
| 0
| 0
| 0
| 0
| 1,907
| 0.242159
|
02c5550343d841d31714a9ed5ade721bffe3bee2
| 6,631
|
py
|
Python
|
test/unit/test_params.py
|
davvil/sockeye
|
188db761d314a913b88a5ff44395abb77797e5b9
|
[
"Apache-2.0"
] | 1,117
|
2017-06-12T15:11:12.000Z
|
2022-03-23T00:53:51.000Z
|
test/unit/test_params.py
|
davvil/sockeye
|
188db761d314a913b88a5ff44395abb77797e5b9
|
[
"Apache-2.0"
] | 553
|
2017-06-14T09:24:10.000Z
|
2022-03-31T20:17:23.000Z
|
test/unit/test_params.py
|
davvil/sockeye
|
188db761d314a913b88a5ff44395abb77797e5b9
|
[
"Apache-2.0"
] | 369
|
2017-06-12T15:22:34.000Z
|
2022-03-30T19:32:27.000Z
|
# Copyright 2017--2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import itertools
import glob
import os.path
import tempfile
import mxnet as mx
import pytest
import sockeye.encoder
import sockeye.model
import sockeye.training
import sockeye.constants as C
def test_cleanup_param_files():
with tempfile.TemporaryDirectory() as tmp_dir:
for n in itertools.chain(range(1, 20, 2), range(21, 41)):
# Create empty files
open(os.path.join(tmp_dir, C.PARAMS_NAME % n), "w").close()
sockeye.training.cleanup_params_files(tmp_dir, 5, 40, 17, False, 8, "perplexity", "best")
expectedSurviving = set([os.path.join(tmp_dir, C.PARAMS_NAME % n)
for n in [17, 36, 37, 38, 39, 40]])
# 17 must survive because it is the best one
assert set(glob.glob(os.path.join(tmp_dir, C.PARAMS_PREFIX + "*"))) == expectedSurviving
def test_cleanup_param_files_keep_first():
with tempfile.TemporaryDirectory() as tmp_dir:
for n in itertools.chain(range(0, 20, 2), range(21, 41)):
# Create empty files
open(os.path.join(tmp_dir, C.PARAMS_NAME % n), "w").close()
sockeye.training.cleanup_params_files(tmp_dir, 5, 40, 16, True, 8, "perplexity", "best")
expectedSurviving = set([os.path.join(tmp_dir, C.PARAMS_NAME % n)
for n in [0, 16, 36, 37, 38, 39, 40]])
# 16 must survive because it is the best one
# 0 should also survive because we set keep_first to True
assert set(glob.glob(os.path.join(tmp_dir, C.PARAMS_PREFIX + "*"))) == expectedSurviving
def mock_model():
config_embed = sockeye.encoder.EmbeddingConfig(vocab_size=20, num_embed=4, dropout=0.0)
config_encoder = sockeye.encoder.EncoderConfig(model_size=4, attention_heads=1, feed_forward_num_hidden=4,
act_type='relu', num_layers=1, dropout_attention=0.0,
dropout_act=0.0, dropout_prepost=0.0,
positional_embedding_type='fixed', preprocess_sequence='none',
postprocess_sequence='none', max_seq_len_source=30,
max_seq_len_target=30)
config = sockeye.model.ModelConfig(config_data=None, vocab_source_size=20, vocab_target_size=20,
config_embed_source=config_embed, config_embed_target=config_embed,
config_encoder=config_encoder, config_decoder=config_encoder)
model = sockeye.model.SockeyeModel(config=config)
return model
def test_set_parameters():
model = mock_model()
model.initialize(init='xavier', ctx=mx.cpu(0))
p = mx.gluon.Parameter('source_target_embed_weight', shape=(20, 4))
p.initialize(init='xavier', ctx=mx.cpu(0))
model.set_parameters({'source_target_embed_weight': p})
assert mx.test_utils.same(model.params['source_target_embed_weight'].data(), p.data())
def test_set_parameters_allow_missing():
model = mock_model()
model.initialize(init='xavier', ctx=mx.cpu(0))
model.set_parameters({}, allow_missing=True)
assert 'source_target_embed_weight' in model.params
with pytest.raises(AssertionError) as e:
model.set_parameters({}, allow_missing=False)
assert str(e.value) == "Parameter 'source_target_embed_weight' is missing in new_params dictionary. " \
"Set allow_missing=True to ignore missing parameters."
def test_set_parameters_ignore_extra():
model = mock_model()
model.initialize(init='xavier', ctx=mx.cpu(0))
p = mx.gluon.Parameter('source_target_embed_weight', shape=(20, 4))
p.initialize(init='xavier', ctx=mx.cpu(0))
q = mx.gluon.Parameter('q', shape=(1, 1))
q.initialize(init='xavier', ctx=mx.cpu(0))
params = {'source_target_embed_weight': p, 'q': q}
model.set_parameters(params, ignore_extra=True)
assert 'source_target_embed_weight' in model.params
assert 'q' not in model.params
with pytest.raises(ValueError) as e:
model.set_parameters(params, ignore_extra=False)
assert str(e.value) == "Parameter 'q' in new_params dictionary is not preset in ParameterDict. " \
"Set ignore_extra=True to ignore."
def test_set_parameters_context():
model = mock_model()
model.initialize(init='xavier', ctx=[mx.cpu(0), mx.cpu(1)])
p = mx.gluon.Parameter('source_target_embed_weight', shape=(20, 4))
p.initialize(init='xavier', ctx=mx.cpu(2))
model.set_parameters({'source_target_embed_weight': p})
for i in range(2):
assert mx.test_utils.same(model.params['source_target_embed_weight'].data(mx.cpu(i)), p.data(mx.cpu(2)))
def test_set_parameters_shape():
model = mock_model()
model.initialize(init='xavier', ctx=mx.cpu(0))
p = mx.gluon.Parameter('source_target_embed_weight', shape=(10, 10))
p.initialize(init='xavier', ctx=mx.cpu(0))
with pytest.raises(AssertionError) as e:
model.set_parameters({'source_target_embed_weight': p})
assert str(e.value) == "Parameter 'source_target_embed_weight' has shape '(20, 4)' in the model but shape " \
"'(10, 10)' in the new_params dictionary."
def test_set_parameters_uninitialized():
model = mock_model()
model.initialize(init='xavier', ctx=mx.cpu(0))
p = mx.gluon.Parameter('source_target_embed_weight', shape=(20, 4))
with pytest.raises(AssertionError) as e:
model.set_parameters({'source_target_embed_weight': p})
assert str(e.value) == "Parameter 'source_target_embed_weight' is not initialized in new_params dictionary."
p.initialize(init='xavier', ctx=mx.cpu(0))
model = mock_model()
with pytest.raises(AssertionError) as e:
model.set_parameters({'source_target_embed_weight': p})
assert str(e.value) == "Parameter 'source_target_embed_weight' must be initialized before it can be reset using " \
"set_parameters."
| 47.028369
| 119
| 0.663248
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,899
| 0.286382
|
02c5bed1bce16f210d130ab65cdc1e0b0a119b39
| 8,194
|
py
|
Python
|
code/utils/data_preparation.py
|
cltl/positive-interpretations
|
27640be255e8072b3333851f605c13c80e5d7ed3
|
[
"Apache-2.0"
] | null | null | null |
code/utils/data_preparation.py
|
cltl/positive-interpretations
|
27640be255e8072b3333851f605c13c80e5d7ed3
|
[
"Apache-2.0"
] | null | null | null |
code/utils/data_preparation.py
|
cltl/positive-interpretations
|
27640be255e8072b3333851f605c13c80e5d7ed3
|
[
"Apache-2.0"
] | null | null | null |
import csv
import collections
import pandas as pd
from random import shuffle
from tqdm import tqdm
def get_all_tokens_conll(conll_file):
"""
Reads a CoNLL-2011 file and returns all tokens with their annotations in a dataframe including the original
sentence identifiers from OntoNotes
"""
all_tokens = list()
most_semroles = 0
with open(conll_file, "r") as infile:
for line in infile:
# Get sentence identifiers: distinguish between sentence count per file and per file part
# (some files are divided into multiple parts numbered as 000, 001, 002, ... etc.)
if line.startswith("#begin document"):
sent_id_part = 0
part_id = line.split("; part ")[1].rstrip("\n")
if part_id == "000":
sent_id_file = 0
else:
sent_id_file += 1
elif line.startswith("#end document"):
sent_id_file -= 1 # prevent counting too much (empty line followed by end document)
elif line == "\n":
sent_id_part += 1
sent_id_file += 1
else:
columns = line.split()
dict_token = {"file_id": columns[0],
"part_id": int(columns[1]),
"sent_id_part": int(sent_id_part),
"sent_id_file": int(sent_id_file),
"token_id": columns[2],
"word_form": columns[3],
"POS": columns[4],
"parse": columns[5],
"pred_lemma": columns[6],
"pred_frameset": columns[7],
"word_sense": columns[8],
"speaker": columns[9],
"NE": columns[10],
"coref": columns[-1].rstrip("\n")
}
semroles = {f"APRED{i}": role for i, role in enumerate(columns[11:-1], 1)}
dict_token.update(semroles)
all_tokens.append(dict_token)
if len(semroles) > most_semroles:
most_semroles = len(semroles)
cols = list(dict_token.keys())
df_tokens = pd.DataFrame(all_tokens, columns=cols)
return df_tokens
def find_original_sent_ids(df_instances, df_conll):
"""
Takes the file_id, part_id and sent_id indicating a specific sentence in the CoNLL-2011 data (where file is split
into smaller parts and sent_id restarts for each part) and finds the corresponding 'original' sentence identifier
"""
print("Finding original sentence identifiers")
for index, row in tqdm(df_instances.iterrows(), total=len(df_instances)):
# For each instance in the set, find the corresponding sent_id_file in the annotations of CoNLL-2011
file_id = row["file_id"]
part_id = row["part_id"]
sent_id_part = row["sent_id_part"]
matching_rows = df_conll.loc[(df_conll["file_id"] == file_id) & (df_conll["part_id"] == part_id) &
(df_conll["sent_id_part"] == sent_id_part)]
sent_id_file = matching_rows.iloc[0]["sent_id_file"]
df_instances.set_value(index, "sent_id_file", sent_id_file)
return df_instances
def get_role_features_from_annotations(role_annotations):
"""Splits the verb and role information (in original annotations file) to separate values"""
head, role = role_annotations.split(")] ")
head_pos, head_wf = head.lstrip("[(").split()
span, tokens = role.split(maxsplit=1)
span, label = span.rstrip(":").split(":")
role_features = (head_wf, head_pos, span, label, tokens)
return role_features
def rewrite_verb_and_role_features(df):
"""Rewrites the verb and role information in the original annotations file to separate columns"""
instances = df.to_dict("records")
for index, inst in enumerate(instances):
# Get verb features
verb = inst["verb"]
verb_features = get_role_features_from_annotations(verb)
verb_wf, verb_pos, verb_span, verb_label, verb_tokens = verb_features
# Get role features
role = inst["role"]
role_features = get_role_features_from_annotations(role)
role_head_wf, role_head_pos, role_span, role_label, role_tokens = role_features
new_dict = {"verb_wf": verb_wf,
"verb_pos": verb_pos,
"verb_span": verb_span,
"verb_label": verb_label,
"verb_tokens": verb_tokens,
"role_head_wf": role_head_wf,
"role_head_pos": role_head_pos,
"role_span": role_span,
"role_label": role_label,
"role_tokens": role_tokens,
"role_tokens": role_tokens}
inst.update(new_dict)
del inst["verb"]
del inst["role"]
instances[index] = inst
columns = list(instances[0].keys())
df = pd.DataFrame(instances, columns=columns)
return df
def transform_labels_three(row):
"""Takes original score (label) and converts to tertiary classes"""
label = int(row['label'])
if label <= 1:
return 0
if 1 < label <= 3:
return 1
if label >= 4:
return 2
def transform_labels_two(row):
"""Takes original score (label) and converts to binary classes"""
label = int(row['label'])
if label <= 2:
return 0
else:
return 1
def categorize_scores(df):
"""Takes original score (label) and converts to tertiary/binary classes"""
df["class_tertiary"] = df.apply(lambda row: transform_labels_three(row),axis=1)
df["class_binary"] = df.apply(lambda row: transform_labels_two(row),axis=1)
return df
def split_train_test(df_instances, test_ratio=0.2, to_shuffle=True):
"""Splits the instances into train and test sets. Each negation is either assigned to the train or test set."""
instances = df_instances.to_dict("records")
neg_ids = list({(inst["file_id"], inst["sent_id_file"], inst["verb_span"]) for inst in instances})
if to_shuffle:
shuffle(neg_ids)
test_size = int(len(neg_ids) * test_ratio)
test_ids = neg_ids[0:test_size]
test_instances = [inst for inst in instances if (inst["file_id"],
inst["sent_id_file"],
inst["verb_span"]) in test_ids]
train_instances = [inst for inst in instances if (inst["file_id"],
inst["sent_id_file"],
inst["verb_span"]) not in test_ids]
columns = list(train_instances[0].keys())
df_train = pd.DataFrame(train_instances, columns=columns)
df_test = pd.DataFrame(test_instances, columns=columns)
return df_train, df_test
def k_fold(df_instances, k=10):
"""Divides all the samples in k groups of samples. Each negation is either assigned to the train or test set."""
instances = df_instances.T.to_dict().values()
neg_ids = list({(inst["file_id"], inst["sent_id_file"], inst["verb_span"]) for inst in instances})
kf = list()
test_size = int(len(neg_ids) / k)
start = 0
for n in range(0, k):
test_ids = neg_ids[start:start+test_size]
test_instances = [inst for inst in instances if (inst["file_id"],
inst["sent_id_file"],
inst["verb_span"]) in test_ids]
train_instances = [inst for inst in instances if (inst["file_id"],
inst["sent_id_file"],
inst["verb_span"]) not in test_ids]
train_test = (pd.DataFrame(train_instances), pd.DataFrame(test_instances))
kf.append(train_test)
start += test_size
return kf
| 45.522222
| 117
| 0.569929
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,182
| 0.266292
|
02c6de12abd9a20df4664b77466fae5d81958b59
| 800
|
py
|
Python
|
{{ cookiecutter.project_name }}/setup.py
|
wlongxiang/cookiecutter-data-science
|
bbae41e22ac7db74430f3c6b457c2ff7f52537e1
|
[
"MIT"
] | null | null | null |
{{ cookiecutter.project_name }}/setup.py
|
wlongxiang/cookiecutter-data-science
|
bbae41e22ac7db74430f3c6b457c2ff7f52537e1
|
[
"MIT"
] | null | null | null |
{{ cookiecutter.project_name }}/setup.py
|
wlongxiang/cookiecutter-data-science
|
bbae41e22ac7db74430f3c6b457c2ff7f52537e1
|
[
"MIT"
] | null | null | null |
from setuptools import find_packages, setup
import re
VERSIONFILE = "{{ cookiecutter.project_name }}/__init__.py"
with open(VERSIONFILE, "rt") as versionfle:
verstrline = versionfle.read()
version_re = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(version_re, verstrline, re.M)
if mo:
ver_str = mo.group(1)
else:
raise ValueError("Unable to find version string in %s." % (VERSIONFILE,))
# add prod requires to setup so that pip can install dependencies for you
with open("requirements_prod.txt") as f:
required_pkgs = f.read().splitlines()
setup(
name='{{ cookiecutter.project_name }}',
packages=find_packages(),
version=ver_str,
description='{{ cookiecutter.description }}',
author='{{ cookiecutter.author_name }}',
install_requires=required_pkgs
)
| 30.769231
| 77
| 0.6975
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 317
| 0.39625
|
02c7a6f45d41d451a3a0c3c43389880ddc7a1852
| 8,759
|
py
|
Python
|
square/api/cards_api.py
|
codertjay/square-python-sdk
|
1f5f34bc792e31991db0fb2756d92c717f2dcfa4
|
[
"Apache-2.0"
] | 1
|
2022-02-28T13:18:30.000Z
|
2022-02-28T13:18:30.000Z
|
square/api/cards_api.py
|
codertjay/square-python-sdk
|
1f5f34bc792e31991db0fb2756d92c717f2dcfa4
|
[
"Apache-2.0"
] | null | null | null |
square/api/cards_api.py
|
codertjay/square-python-sdk
|
1f5f34bc792e31991db0fb2756d92c717f2dcfa4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from square.api_helper import APIHelper
from square.http.api_response import ApiResponse
from square.api.base_api import BaseApi
class CardsApi(BaseApi):
"""A Controller to access Endpoints in the square API."""
def __init__(self, config, auth_managers, call_back=None):
super(CardsApi, self).__init__(config, auth_managers, call_back)
def list_cards(self,
cursor=None,
customer_id=None,
include_disabled=False,
reference_id=None,
sort_order=None):
"""Does a GET request to /v2/cards.
Retrieves a list of cards owned by the account making the request.
A max of 25 cards will be returned.
Args:
cursor (string, optional): A pagination cursor returned by a
previous call to this endpoint. Provide this to retrieve the
next set of results for your original query. See
[Pagination](https://developer.squareup.com/docs/basics/api101/
pagination) for more information.
customer_id (string, optional): Limit results to cards associated
with the customer supplied. By default, all cards owned by the
merchant are returned.
include_disabled (bool, optional): Includes disabled cards. By
default, all enabled cards owned by the merchant are
returned.
reference_id (string, optional): Limit results to cards associated
with the reference_id supplied.
sort_order (SortOrder, optional): Sorts the returned list by when
the card was created with the specified order. This field
defaults to ASC.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v2/cards'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'cursor': cursor,
'customer_id': customer_id,
'include_disabled': include_disabled,
'reference_id': reference_id,
'sort_order': sort_order
}
_query_builder = APIHelper.append_url_with_query_parameters(
_query_builder,
_query_parameters
)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
# Apply authentication scheme on request
self.apply_auth_schemes(_request, 'global')
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
def create_card(self,
body):
"""Does a POST request to /v2/cards.
Adds a card on file to an existing merchant.
Args:
body (CreateCardRequest): An object containing the fields to POST
for the request. See the corresponding object definition for
field details.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v2/cards'
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'Content-Type': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
# Apply authentication scheme on request
self.apply_auth_schemes(_request, 'global')
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
def retrieve_card(self,
card_id):
"""Does a GET request to /v2/cards/{card_id}.
Retrieves details for a specific Card.
Args:
card_id (string): Unique ID for the desired Card.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v2/cards/{card_id}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'card_id': {'value': card_id, 'encode': True}
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.get(_query_url, headers=_headers)
# Apply authentication scheme on request
self.apply_auth_schemes(_request, 'global')
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
def disable_card(self,
card_id):
"""Does a POST request to /v2/cards/{card_id}/disable.
Disables the card, preventing any further updates or charges.
Disabling an already disabled card is allowed but has no effect.
Args:
card_id (string): Unique ID for the desired Card.
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_url_path = '/v2/cards/{card_id}/disable'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'card_id': {'value': card_id, 'encode': True}
})
_query_builder = self.config.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.config.http_client.post(_query_url, headers=_headers)
# Apply authentication scheme on request
self.apply_auth_schemes(_request, 'global')
_response = self.execute_request(_request)
decoded = APIHelper.json_deserialize(_response.text)
if type(decoded) is dict:
_errors = decoded.get('errors')
else:
_errors = None
_result = ApiResponse(_response, body=decoded, errors=_errors)
return _result
| 36.194215
| 120
| 0.612513
| 8,602
| 0.982076
| 0
| 0
| 0
| 0
| 0
| 0
| 4,652
| 0.531111
|
02c8b5302247d3f0de4a0fcfd8043adc64146600
| 1,564
|
py
|
Python
|
setup.py
|
nilp0inter/threadedprocess
|
0120d6e795782c9f527397490846cd214d9196e1
|
[
"PSF-2.0"
] | 9
|
2018-03-21T22:19:10.000Z
|
2021-06-08T12:10:15.000Z
|
setup.py
|
nilp0inter/threadedprocess
|
0120d6e795782c9f527397490846cd214d9196e1
|
[
"PSF-2.0"
] | 3
|
2019-09-18T19:57:28.000Z
|
2020-07-17T08:06:54.000Z
|
setup.py
|
nilp0inter/threadedprocess
|
0120d6e795782c9f527397490846cd214d9196e1
|
[
"PSF-2.0"
] | 4
|
2018-03-24T23:10:38.000Z
|
2020-06-18T02:26:24.000Z
|
import os
from setuptools import setup
try:
import concurrent.futures
except ImportError:
CONCURRENT_FUTURES_PRESENT = False
else:
CONCURRENT_FUTURES_PRESENT = True
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="threadedprocess",
version="0.0.5",
author="Roberto Abdelkader Martinez Perez",
author_email="robertomartinezp@gmail.com",
description=(
"A `ThreadedProcessPoolExecutor` is formed by a modified "
"`ProcessPoolExecutor` that generates processes that use a "
"`ThreadPoolExecutor` instance to run the given tasks."),
license="BSD",
keywords="concurrent futures executor process thread",
url="https://github.com/nilp0inter/threadedprocess",
py_modules=['threadedprocess'],
long_description=read('README.rst'),
install_requires=[] if CONCURRENT_FUTURES_PRESENT else ["futures"],
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: BSD License",
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
)
| 34
| 71
| 0.658568
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 887
| 0.567136
|
02c90b77315d19cadcdffd4cbada1b9dd920626e
| 2,592
|
py
|
Python
|
coremltools/converters/mil/mil/passes/const_elimination.py
|
VadimLevin/coremltools
|
66c17b0fa040a0d8088d33590ab5c355478a9e5c
|
[
"BSD-3-Clause"
] | 3
|
2018-10-02T17:23:01.000Z
|
2020-08-15T04:47:07.000Z
|
coremltools/converters/mil/mil/passes/const_elimination.py
|
holzschu/coremltools
|
5ece9069a1487d5083f00f56afe07832d88e3dfa
|
[
"BSD-3-Clause"
] | null | null | null |
coremltools/converters/mil/mil/passes/const_elimination.py
|
holzschu/coremltools
|
5ece9069a1487d5083f00f56afe07832d88e3dfa
|
[
"BSD-3-Clause"
] | 1
|
2021-05-07T15:38:20.000Z
|
2021-05-07T15:38:20.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import numpy as np
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.mil.passes.pass_registry import register_pass
def get_const_mode(val):
# Heuristics to determine if a val should be file value or immediate
# value.
if isinstance(val, (str, bool, int)):
return "immediate_value"
if isinstance(val, (np.generic, np.ndarray)):
if val.size > 10:
return "file_value"
return "immediate_value"
raise ValueError("val {} not recognized.".format(val))
def const_elimination_block(block):
# shallow copy hides changes on f.operations during the loop
for op in list(block.operations):
if op.op_type == "const":
continue
for b in op.blocks:
const_elimination_block(b)
all_outputs_are_const = True
for i, o in enumerate(op.outputs):
if o.val is not None:
with block:
res = mb.const(
val=o.val,
mode=get_const_mode(o.val),
before_op=op,
# same var name, but different python
# instance does not violate SSA property.
name=o.name,
)
op.enclosing_block.replace_uses_of_var_after_op(
anchor_op=op, old_var=o, new_var=res
)
# rename the const output
o.set_name(o.name+'_ignored')
else:
all_outputs_are_const = False
if all_outputs_are_const:
op.remove_from_block()
@register_pass(namespace="common")
def const_elimination(prog):
"""
prog: Program
# Replace non-const ops that have const Var
# outputs replaced with const op. Example:
#
# Given:
# %2, %3 = non_const_op(...) # %2 is const, %3 isn't const
# %4 = other_op(%2, %3)
#
# Result:
# _, %3 = non_const_op(...) # _ is the ignored output
# %2_const = const(mode=m) # %2_const name is for illustration only
# %4 = other_op(%2_const, %3)
#
# where m is 'file_value' / 'immediate_value' depending on heuristics
# in get_const_mode.
"""
for f_name, f in prog.functions.items():
const_elimination_block(f)
| 32
| 83
| 0.58179
| 0
| 0
| 0
| 0
| 685
| 0.264275
| 0
| 0
| 1,108
| 0.427469
|
02c92d6241ebe854f6a64f06a949f8d5440cd141
| 13,749
|
py
|
Python
|
matching/retrieval.py
|
Macielyoung/sentence_representation_matching
|
aa33147eb870a805f69dbc54c2177b11a94cf814
|
[
"Apache-2.0"
] | 22
|
2022-01-24T10:08:39.000Z
|
2022-03-31T10:47:05.000Z
|
matching/retrieval.py
|
Macielyoung/sentence_representation_matching
|
aa33147eb870a805f69dbc54c2177b11a94cf814
|
[
"Apache-2.0"
] | 3
|
2022-03-06T11:52:25.000Z
|
2022-03-15T06:32:17.000Z
|
matching/retrieval.py
|
Macielyoung/sentence_representation_matching
|
aa33147eb870a805f69dbc54c2177b11a94cf814
|
[
"Apache-2.0"
] | 5
|
2022-02-28T09:13:04.000Z
|
2022-03-22T12:50:09.000Z
|
from simcse import SimCSE
from esimcse import ESimCSE
from promptbert import PromptBERT
from sbert import SBERT
from cosent import CoSent
from config import Params
from log import logger
import torch
from transformers import AutoTokenizer
class SimCSERetrieval(object):
def __init__(self, pretrained_model_path, simcse_path, pool_type, dropout):
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_path)
model = SimCSE(Params.pretrained_model, pool_type, dropout)
self.checkpoint = torch.load(simcse_path, map_location='cpu')
model.load_state_dict(self.checkpoint['model_state_dict'])
model.eval()
self.model = model
def print_checkpoint_info(self):
loss = self.checkpoint['loss']
epoch = self.checkpoint['epoch']
model_info = {'loss': loss, 'epoch': epoch}
return model_info
def calculate_sentence_embedding(self, sentence):
device = "cpu"
input_encodings = self.tokenizer(sentence,
padding=True,
truncation=True,
max_length=Params.max_length,
return_tensors='pt')
sentence_embedding = self.model(input_encodings['input_ids'].to(device),
input_encodings['attention_mask'].to(device),
input_encodings['token_type_ids'].to(device))
return sentence_embedding
def calculate_sentence_similarity(self, sentence1, sentence2):
sentence1 = sentence1.strip()
sentence2 = sentence2.strip()
sentence1_embedding = self.calculate_sentence_embedding(sentence1)
sentence2_embedding = self.calculate_sentence_embedding(sentence2)
similarity = torch.cosine_similarity(sentence1_embedding, sentence2_embedding, dim=-1)
similarity = float(similarity.item())
return similarity
class ESimCSERetrieval(object):
def __init__(self, pretrained_model_path, esimcse_path, dropout):
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_path)
model = ESimCSE(Params.pretrained_model, dropout)
self.checkpoint = torch.load(esimcse_path, map_location='cpu')
model.load_state_dict(self.checkpoint['model_state_dict'])
model.eval()
self.model = model
def print_checkpoint_info(self):
loss = self.checkpoint['loss']
epoch = self.checkpoint['epoch']
model_info = {'loss': loss, 'epoch': epoch}
return model_info
def calculate_sentence_embedding(self, sentence):
device = "cpu"
input_encodings = self.tokenizer(sentence,
padding=True,
truncation=True,
max_length=Params.max_length,
return_tensors='pt')
sentence_embedding = self.model(input_encodings['input_ids'].to(device),
input_encodings['attention_mask'].to(device),
input_encodings['token_type_ids'].to(device))
return sentence_embedding
def calculate_sentence_similarity(self, sentence1, sentence2):
sentence1 = sentence1.strip()
sentence2 = sentence2.strip()
sentence1_embedding = self.calculate_sentence_embedding(sentence1)
sentence2_embedding = self.calculate_sentence_embedding(sentence2)
similarity = torch.cosine_similarity(sentence1_embedding, sentence2_embedding, dim=-1)
similarity = float(similarity.item())
return similarity
class PromptBertRetrieval(object):
def __init__(self, pretrained_model_path, promptbert_path, dropout):
super().__init__()
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_path)
special_token_dict = {'additional_special_tokens': ['[X]']}
self.tokenizer.add_special_tokens(special_token_dict)
mask_id = self.tokenizer.convert_tokens_to_ids(Params.mask_token)
model = PromptBERT(pretrained_model_path, dropout, mask_id)
model.encoder.resize_token_embeddings(len(self.tokenizer))
checkpoint = torch.load(promptbert_path, map_location='cpu')
model.load_state_dict(checkpoint['model_state_dict'])
self.checkpoint = checkpoint
self.model = model
def print_checkpoint_info(self):
loss = self.checkpoint['loss']
epoch = self.checkpoint['epoch']
model_info = {'loss': loss, 'epoch': epoch}
return model_info
def calculate_sentence_mask_embedding(self, sentence):
device = "cpu"
prompt_sentence = Params.prompt_templates[0].replace("[X]", sentence)
prompt_encodings = self.tokenizer(prompt_sentence,
padding=True,
truncation=True,
max_length=Params.max_length,
return_tensors='pt')
sentence_mask_embedding = self.model.calculate_mask_embedding(prompt_encodings['input_ids'].to(device),
prompt_encodings['attention_mask'].to(device),
prompt_encodings['token_type_ids'].to(device))
return sentence_mask_embedding
def calculate_sentence_embedding(self, sentence):
device = "cpu"
prompt_sentence = Params.prompt_templates[0].replace("[X]", sentence)
sentence_num = len(self.tokenizer.tokenize(sentence))
template_sentence = Params.prompt_templates[0].replace("[X]", "[X]"*sentence_num)
prompt_encodings = self.tokenizer(prompt_sentence,
padding=True,
truncation=True,
max_length=Params.max_length,
return_tensors='pt')
template_encodings = self.tokenizer(template_sentence,
padding=True,
truncation=True,
max_length=Params.max_length,
return_tensors='pt')
sentence_embedding = self.model(prompt_input_ids=prompt_encodings['input_ids'].to(device),
prompt_attention_mask=prompt_encodings['attention_mask'].to(device),
prompt_token_type_ids=prompt_encodings['token_type_ids'].to(device),
template_input_ids=template_encodings['input_ids'].to(device),
template_attention_mask=template_encodings['attention_mask'].to(device),
template_token_type_ids=template_encodings['token_type_ids'].to(device))
return sentence_embedding
def calculate_sentence_similarity(self, sentence1, sentence2):
# sentence1_embedding = self.calculate_sentence_mask_embedding(sentence1)
# sentence2_embedding = self.calculate_sentence_mask_embedding(sentence2)
sentence1_embedding = self.calculate_sentence_embedding(sentence1)
sentence2_embedding = self.calculate_sentence_embedding(sentence2)
similarity = torch.cosine_similarity(sentence1_embedding, sentence2_embedding, dim=-1)
similarity = float(similarity.item())
return similarity
class SBERTRetrieval(object):
def __init__(self, pretrained_model_path, sbert_path, pool_type, dropout):
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_path)
model = SBERT(Params.pretrained_model, pool_type, dropout)
self.checkpoint = torch.load(sbert_path, map_location='cpu')
model.load_state_dict(self.checkpoint['model_state_dict'])
model.eval()
self.model = model
def print_checkpoint_info(self):
loss = self.checkpoint['train_loss']
epoch = self.checkpoint['epoch']
model_info = {'loss': loss, 'epoch': epoch}
return model_info
def calculate_sentence_embedding(self, sentence):
device = "cpu"
input_encodings = self.tokenizer(sentence,
padding=True,
truncation=True,
max_length=Params.max_length,
return_tensors='pt')
sentence_embedding = self.model(input_encodings['input_ids'].to(device),
input_encodings['attention_mask'].to(device),
input_encodings['token_type_ids'].to(device))
return sentence_embedding
def calculate_sentence_similarity(self, sentence1, sentence2):
sentence1 = sentence1.strip()
sentence2 = sentence2.strip()
sentence1_embedding = self.calculate_sentence_embedding(sentence1)
sentence2_embedding = self.calculate_sentence_embedding(sentence2)
similarity = torch.cosine_similarity(sentence1_embedding, sentence2_embedding, dim=-1)
similarity = float(similarity.item())
return similarity
class CoSentRetrieval(object):
def __init__(self, pretrained_model_path, cosent_path):
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_path)
model = CoSent(Params.pretrained_model, Params.cosent_pool_type, Params.cosent_dropout)
self.checkpoint = torch.load(cosent_path, map_location='cpu')
model.load_state_dict(self.checkpoint['model_state_dict'])
model.eval()
self.model = model
def print_checkpoint_info(self):
loss = self.checkpoint['loss']
epoch = self.checkpoint['epoch']
model_info = {'loss': loss, 'epoch': epoch}
return model_info
def calculate_sentence_embedding(self, sentence):
device = "cpu"
input_encodings = self.tokenizer(sentence,
padding=True,
truncation=True,
max_length=Params.max_length,
return_tensors='pt')
sentence_embedding = self.model(input_encodings['input_ids'].to(device),
input_encodings['attention_mask'].to(device),
input_encodings['token_type_ids'].to(device))
return sentence_embedding
def calculate_sentence_similarity(self, sentence1, sentence2):
sentence1 = sentence1.strip()
sentence2 = sentence2.strip()
sentence1_embedding = self.calculate_sentence_embedding(sentence1)
sentence2_embedding = self.calculate_sentence_embedding(sentence2)
similarity = torch.cosine_similarity(sentence1_embedding, sentence2_embedding, dim=-1)
similarity = float(similarity.item())
return similarity
simcse_retrieval = SimCSERetrieval(Params.pretrained_model, Params.simcse_model, Params.pool_type, Params.simcse_dropout)
logger.info("start simcse model succussfully!")
esimcse_repeat_retrieval = ESimCSERetrieval(Params.pretrained_model, Params.esimcse_repeat_model, Params.esimcse_repeat_dropout)
logger.info("start esimcse repeat model succussfully!")
esimcse_same_retrieval = ESimCSERetrieval(Params.pretrained_model, Params.esimcse_same_model, Params.esimcse_same_dropout)
logger.info("start esimcse same model succussfully!")
esimcse_multi_retrieval = ESimCSERetrieval(Params.pretrained_model, Params.esimcse_multi_model, Params.esimcse_multi_dropout)
logger.info("start esimcse multi model succussfully!")
promptbert_retrieval = PromptBertRetrieval(Params.pretrained_model, Params.promptbert_model, Params.promptbert_dropout)
logger.info("start promptbert model succussfully!")
sbert_retrieval = SBERTRetrieval(Params.pretrained_model, Params.sbert_model, Params.sbert_pool_type, Params.sbert_dropout)
logger.info("start sbert model succussfully!")
cosent_retrieval = CoSentRetrieval(Params.pretrained_model, Params.cosent_model)
logger.info("start cosent model succussfully!")
if __name__ == "__main__":
# model_path = "models/esimcse_0.32_0.15_160.pth"
# model_path = "models/esimcse_multi_0.15_64.pth"
# model_path = "models/esimcse_0.15_64.pth"
# simcse_retrieval = SimCSERetrieval(Params.pretrained_model, Params.simcse_model, Params.pool_type, Params.simcse_dropout)
# model_info = simcse_retrieval.print_checkpoint_info()
# print(model_info)
model_info = sbert_retrieval.print_checkpoint_info()
print(model_info)
while True:
print("input your sentence1:")
sentence1 = input()
print("input your sentence2:")
sentence2 = input()
sbert_sentence_similarity = sbert_retrieval.calculate_sentence_similarity(sentence1, sentence2)
# promptbert_sentence_similarity = prom.calculate_sentence_similarity(sentence1, sentence2)
# print("simcse sim: {}, promptbert sim: {}".format(simcse_sentence_similarity, promptbert_sentence_similarity))
print("sbert similarity: {}".format(sbert_sentence_similarity))
| 48.928826
| 128
| 0.630664
| 11,228
| 0.816641
| 0
| 0
| 0
| 0
| 0
| 0
| 1,689
| 0.122845
|
02c969f151e36baef658a4ae669ed82de7db3bc7
| 15,885
|
py
|
Python
|
Backend/src/awattprice/notifications.py
|
a8/AWattPrice
|
008df74b66f4790276f847eecb4e05536d66b518
|
[
"BSD-3-Clause"
] | null | null | null |
Backend/src/awattprice/notifications.py
|
a8/AWattPrice
|
008df74b66f4790276f847eecb4e05536d66b518
|
[
"BSD-3-Clause"
] | 1
|
2021-05-31T06:07:21.000Z
|
2021-05-31T06:07:21.000Z
|
Backend/src/awattprice/notifications.py
|
a8/AWattPrice
|
008df74b66f4790276f847eecb4e05536d66b518
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Check which users apply to receive certain notifications.
Send notifications via APNs to those users.
"""
import asyncio
import json
from datetime import datetime
from math import floor
from pathlib import Path
from typing import List, Optional, Tuple
import arrow # type: ignore
import httpx
import jwt
from box import Box # type: ignore
from configupdater import ConfigUpdater # type: ignore
from dateutil.tz import tzstr
from fastapi import status
from loguru import logger as log
from tenacity import retry, stop_after_attempt, stop_after_delay, wait_exponential # type: ignore
from awattprice import poll
from awattprice.defaults import CURRENT_VAT, Region
from awattprice.token_manager import APNsTokenManager
from awattprice.types import APNSToken
from awattprice.utils import before_log
class DetailedPriceData:
def __init__(self, data: Box, region_identifier: int):
self.data = data
self.region_identifier = region_identifier
def get_user_prices(
self, below_value: int, region_identifier: int, vat_selection: int
) -> Tuple[List, Optional[int]]:
"""Returns a list of prices which drop below or on a certain value. Also returns a
integer which represents the lowest price point in the returned list.
The marketprices of the price points in the returned list have VAT added if the user selected it (if vat_selection is 1).
"""
below_price_data = []
lowest_index = None
current_index = 0
for price_point in self.data.prices:
timezone = tzstr("CET-1CEST,M3.5.0/2,M10.5.0/3").tzname(datetime.fromtimestamp(price_point.start_timestamp))
now_timezone = arrow.utcnow().to(timezone)
midnight = now_timezone.replace(hour=0, minute=0, second=0, microsecond=0)
tomorrow_boundary_start = midnight.shift(days=+1)
tomorrow_boundary_end = midnight.shift(days=+2)
marketprice_with_vat = price_point.marketprice
if region_identifier == 0 and vat_selection == 1:
marketprice_with_vat = round(price_point.marketprice * CURRENT_VAT, 2)
if (
price_point.start_timestamp >= tomorrow_boundary_start.timestamp
and price_point.end_timestamp <= tomorrow_boundary_end.timestamp
):
if marketprice_with_vat <= below_value:
below_price_data.append(
Box(
{
"start_timestamp": price_point.start_timestamp,
"marketprice": marketprice_with_vat,
} # Don't store end timestamp because a price point is always 1 hour long
)
)
if lowest_index is None:
lowest_index = current_index
else:
if marketprice_with_vat < below_price_data[lowest_index].marketprice:
lowest_index = current_index
current_index += 1
return below_price_data, lowest_index
class PriceDropsBelow:
# Use localization keys which are resolved on the client side
title_loc_key = "general.priceGuard"
body_loc_key_sing = "notifications.price_drops_below.body.sing" # Price drops below value only once
body_loc_key_mult = "notifications.price_drops_below.body.mult" # Price drops below value multiple times
collapse_id = "collapse.priceDropsBelowNotification.3DK203W0"
def get_body_loc_key(self, count: int) -> str:
if count == 1:
return self.body_loc_key_sing
else:
return self.body_loc_key_mult
class Notifications:
_is_initialized = False
def __init__(self, config: ConfigUpdater) -> None:
self.below_notification = PriceDropsBelow()
self.encryption_algorithm = "ES256"
try:
dev_team_id_path = Path(config.notifications.dev_team_id).expanduser()
self.dev_team_id = open(dev_team_id_path.as_posix(), "r").readlines()[0].replace("\n", "")
encryption_key_id_path = Path(config.notifications.apns_encryption_key_id).expanduser()
self.encryption_key_id = open(encryption_key_id_path.as_posix(), "r").readlines()[0].replace("\n", "")
encryption_key_path = Path(config.notifications.apns_encryption_key).expanduser()
self.encryption_key = open(encryption_key_path.as_posix(), "r").read()
self.url_path = "/3/device/{}"
except Exception as e:
log.warning(
f"Couldn't read or find file(s) containing required information to send notifications "
f"with APNs. Notifications won't be checked and won't be sent by the backend: {e}."
)
return
if config.notifications.use_sandbox:
log.debug("Using sandbox APNs server.")
self.apns_server_url = "https://api.sandbox.push.apple.com"
self.bundle_id = "me.space8.AWattPrice.dev"
else:
log.debug("Using production APNs server.")
self.apns_server_url = "https://api.push.apple.com"
self.bundle_id = "me.space8.AWattPrice"
self.apns_server_port = 443
self._is_initialized = True
@property
def is_initialized(self):
"""Return True if __init__ was successful."""
return self._is_initialized
async def handle_apns_response(db_manager, token, response, status_code, config):
# For reference of returned response and status codes see:
# https://developer.apple.com/documentation/usernotifications/setting_up_a_remote_notification_server/handling_notification_responses_from_apns
if status_code == status.HTTP_200_OK:
return
if status_code in [status.HTTP_400_BAD_REQUEST, status.HTTP_410_GONE]:
remove_token = False
if status_code == status.HTTP_410_GONE and response["reason"] == "Unregistered":
remove_token = True
if status_code == status.HTTP_400_BAD_REQUEST and response["reason"] in [
"BadDeviceToken",
"DeviceTokenNotForTopic",
]:
remove_token = True
if remove_token is True:
token_config = APNSToken(
token=token, region_identifier=0, vat_selection=0, config={}
) # Populate with token and some placeholder values
token_manager = APNsTokenManager(token_config, db_manager)
if not config.general.debug_mode:
token_manager.remove_entry()
log.debug(f"Removed invalid APNs token from database: {response}.")
@retry(
before=before_log(log, "debug"),
stop=(stop_after_delay(60) | stop_after_attempt(8)),
wait=wait_exponential(multiplier=1, min=4, max=10),
reraise=True,
)
async def price_drops_below_notification(
db_manager,
notification_defaults,
config,
price_data,
token,
below_value,
region_identifier,
vat_selection,
):
below_price_data, lowest_index = price_data.get_user_prices(below_value, region_identifier, vat_selection)
if below_price_data and lowest_index is not None:
lowest_point = below_price_data[lowest_index]
log.debug('Sending "Price Drops Below" notification to a user.')
# Get the current timezone (either CET or CEST)
timezone = tzstr("CET-1CEST,M3.5.0/2,M10.5.0/3").tzname(datetime.fromtimestamp(lowest_point.start_timestamp))
lowest_price_start = arrow.get(lowest_point.start_timestamp).to(timezone)
# Full cents, for example 4
lowest_price_floored = floor(lowest_point.marketprice)
# Decimal places of cent, for example 39
lowest_price_decimal = round((lowest_point.marketprice - lowest_price_floored) * 100)
# Together 4,39
formatted_lowest_price = f"{lowest_price_floored},{lowest_price_decimal}"
below_value_floored = floor(below_value)
below_value_decimal = round((below_value - below_value_floored) * 100)
formatted_below_value = f"{below_value_floored},{below_value_decimal}"
encryption_algorithm = notification_defaults.encryption_algorithm
# Set token data
# For reference see: https://developer.apple.com/documentation/usernotifications/setting_up_a_remote_notification_server/establishing_a_token-based_connection_to_apns
token_body = {
"iss": notification_defaults.dev_team_id,
"iat": arrow.utcnow().timestamp,
}
token_headers = {
"alg": notification_defaults.encryption_algorithm,
"kid": notification_defaults.encryption_key_id,
}
token_data_encoded = jwt.encode( # JWT is required by APNs for token based authentication
token_body,
notification_defaults.encryption_key,
algorithm=encryption_algorithm,
headers=token_headers,
)
# Set notification payload
# For reference see: https://developer.apple.com/documentation/usernotifications/setting_up_a_remote_notification_server/generating_a_remote_notification#2943365
notification_payload = {
"aps": {
"alert": {
"title-loc-key": notification_defaults.below_notification.title_loc_key,
"loc-key": notification_defaults.below_notification.get_body_loc_key(len(below_price_data)),
"loc-args": [
len(below_price_data),
formatted_below_value,
lowest_price_start.format("H"),
formatted_lowest_price,
],
},
"badge": 0,
"sound": "default",
"content-available": 0,
}
}
# Set request headers
# For reference see: https://developer.apple.com/documentation/usernotifications/setting_up_a_remote_notification_server/sending_notification_requests_to_apns
request_headers = {
"authorization": f"bearer {token_data_encoded}",
"apns-push-type": "alert",
"apns-topic": notification_defaults.bundle_id,
"apns-expiration": f"{lowest_price_start.timestamp - 3600}",
"apns-priority": "5",
"apns-collapse-id": notification_defaults.below_notification.collapse_id,
}
url = f"{notification_defaults.apns_server_url}:{notification_defaults.apns_server_port}{notification_defaults.url_path.format(token)}"
status_code = None
response = None
async with httpx.AsyncClient(http2=True) as client:
try:
response = await client.post(url, headers=request_headers, data=json.dumps(notification_payload))
except httpx.ConnectTimeout:
log.warning(f"Connect attempt to {url} timed out.")
raise
except httpx.ReadTimeout:
log.warning(f"Read from {url} timed out.")
raise
except Exception as e:
log.warning(f"Unrecognized exception at POST request to {url}: {e}.")
raise
else:
status_code = response.status_code
if response.content.decode("utf-8") == "":
data = {}
else:
try:
data = response.json()
except json.JSONDecodeError as e:
log.warning(f"Couldn't decode response from APNs servers: {e}")
raise
except Exception as e:
log.warning(f"Unknown error while decoding response from APNs servers: {e}")
raise
if response is not None and status_code is not None:
await handle_apns_response(db_manager, token, data, status_code, config)
async def check_and_send(config, data, data_region, db_manager):
# Check which users apply to receive certain notifications and send them to those users.
log.info("Checking and sending notifications.")
notification_defaults = Notifications(config)
if not notification_defaults.is_initialized:
return
all_data_to_check = {}
checked_regions_no_notifications = [] # Already checked regions which don't apply to receive notifications
await db_manager.acquire_lock()
cursor = db_manager.db.cursor()
items = cursor.execute("SELECT * FROM token_storage;").fetchall()
cursor.close()
items = [dict(x) for x in items]
notification_queue = asyncio.Queue()
for notifi_config in items:
try:
configuration = json.loads(notifi_config["configuration"])["config"]
except Exception:
log.warning(
"Internally passed notification configuration of a user couldn't be read "
"while checking if the user should receive notifications."
)
continue
# Check all notification types with following if statment to check if the user
# wants to get any notifications at all
if configuration["price_below_value_notification"]["active"] is True:
region_identifier = notifi_config["region_identifier"]
region = Region(region_identifier)
if region_identifier not in all_data_to_check:
# Runs if a user is in a different region as those which are included in the regions
# to send notification updates.
# Therefor this polls the aWATTar API of the certain region.
if region.value in checked_regions_no_notifications:
continue
if region == data_region:
region_check_notification = True
region_data = data
else:
region_data, region_check_notification = await poll.get_data(config=config, region=region)
if region_check_notification:
log.debug(f"Need to check and send notifications for data region {region.name}.")
all_data_to_check[region.value] = DetailedPriceData(Box(region_data), region.value)
else:
log.debug(f"Don't need to check and send notifications for data region {region.name}.")
checked_regions_no_notifications.append(region.value)
continue
token = notifi_config["token"]
vat_selection = notifi_config["vat_selection"]
if configuration["price_below_value_notification"]["active"] is True:
# If user applies to get price below value notifications add following item to queue
below_value = configuration["price_below_value_notification"]["below_value"]
await notification_queue.put(
(
price_drops_below_notification,
db_manager,
notification_defaults,
config,
all_data_to_check[region.value],
token,
below_value,
region_identifier,
vat_selection,
)
)
tasks = []
while notification_queue.empty() is False:
task = await notification_queue.get()
tasks.append(
asyncio.create_task(
task[0](*[task[i] for i in range(1, 9)])
)
)
await asyncio.gather(*tasks)
await db_manager.release_lock()
log.info("All notifications checked (and sent) and all connections closed.")
| 41.25974
| 174
| 0.629902
| 4,683
| 0.294806
| 0
| 0
| 5,441
| 0.342524
| 10,171
| 0.64029
| 4,423
| 0.278439
|
02ca7b014cd9960cd4ff5fbac17c8225edc804e1
| 821
|
py
|
Python
|
examples/charts/horizon.py
|
timelyportfolio/bokeh
|
a976a85535cf137c6238ce9e90b41ab14ae8ce22
|
[
"BSD-3-Clause"
] | 1
|
2015-07-17T13:57:01.000Z
|
2015-07-17T13:57:01.000Z
|
examples/charts/horizon.py
|
timelyportfolio/bokeh
|
a976a85535cf137c6238ce9e90b41ab14ae8ce22
|
[
"BSD-3-Clause"
] | null | null | null |
examples/charts/horizon.py
|
timelyportfolio/bokeh
|
a976a85535cf137c6238ce9e90b41ab14ae8ce22
|
[
"BSD-3-Clause"
] | 1
|
2021-08-01T08:38:53.000Z
|
2021-08-01T08:38:53.000Z
|
from collections import OrderedDict
import pandas as pd
from bokeh.charts import Horizon, output_file, show
# read in some stock data from the Yahoo Finance API
AAPL = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=AAPL&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
MSFT = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
IBM = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=IBM&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
xyvalues = OrderedDict(
AAPL=AAPL['Adj Close'],
Date=AAPL['Date'],
MSFT=MSFT['Adj Close'],
IBM=IBM['Adj Close'],
)
output_file("horizon.html")
hp = Horizon(
xyvalues, index='Date',
title="horizon plot using stock inputs",
width=800, height=300
)
show(hp)
| 24.147059
| 77
| 0.662607
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 377
| 0.459196
|
02cb62a1399f0fec841542f9ed5b1a63b1a1c4d2
| 4,023
|
py
|
Python
|
backend.py
|
CameronStollery/road-trip-planner
|
440207ccb9273123695c04ec3027b7822413bf2c
|
[
"MIT"
] | 1
|
2020-08-18T13:21:00.000Z
|
2020-08-18T13:21:00.000Z
|
backend.py
|
CameronStollery/road-trip-planner
|
440207ccb9273123695c04ec3027b7822413bf2c
|
[
"MIT"
] | 1
|
2020-08-11T10:24:49.000Z
|
2020-08-11T14:49:22.000Z
|
backend.py
|
CameronStollery/road-trip-planner
|
440207ccb9273123695c04ec3027b7822413bf2c
|
[
"MIT"
] | null | null | null |
# from __future__ import print_function
import pymzn
import time
from pprint import pprint
from collections import OrderedDict
import openrouteservice
from openrouteservice.geocode import pelias_search
from openrouteservice.distance_matrix import distance_matrix
client = openrouteservice.Client(key='')
# routes = client.directions(coords)
# print(routes)
def geocode(address):
return pelias_search(client, address, size = 1)
def matrix(coordinates):
# query = {'locations': coordinates, 'metrics': ['duration']}
return distance_matrix(client, coordinates)
# TODO add error classes for distance matrix errors etc
def test_matrix():
request = {'locations': [[8.34234,48.23424],[8.34423,48.26424], [8.34523,48.24424], [8.41423,48.21424]],
'profile': 'driving-car',
'metrics': ['duration']}
return distance_matrix(client, [[8.34234,48.23424],[8.34423,48.26424], [8.34523,48.24424], [8.41423,48.21424]])
def compute_results(form_input):
details = OrderedDict() # A fixed order of the entities is needed for distance matrix calls
for item in form_input:
[field_type, entity] = item.split('-')
value = form_input[item]
if entity not in details:
details[entity] = {}
details[entity][field_type] = value
pprint(details)
for entity in details:
# Get missing coordinates and well-formatted address using openrouteservice API
if details[entity]['coords'] == '':
loc = geocode(details[entity]['addr'])
details[entity]['coords'] = loc['features'][0]['geometry']['coordinates']
details[entity]['addr'] = loc_details['features'][0]['properties']['label']
# Otherwise, convert coordinates from string into list of floats and put lng before lat for ORS compatibility
else:
details[entity]['coords'] = (details[entity]['coords'][1:len(details[entity]['coords']) - 1].split(', '))[::-1]
print('FILLED IN MISSING COORDS')
pprint(details)
coordinates_list = []
for entity_value in details.values():
coordinates_list.append(entity_value['coords'])
durations = matrix(coordinates_list)
print('DURATIONS:')
pprint(durations)
for i, entity_value in enumerate(details.values()):
entity_value['matrix-durations'] = durations['durations'][i]
print('Updated details:')
pprint(details)
# MiniZinc test code
try:
solns = pymzn.minizinc('minizinc-test.mzn', 'minizinc-test.dzn', data={'capacity': 20})
pprint(solns)
except:
print('Minizinc didn\'t work lol')
# details.append(solns)
return details
if __name__ == '__main__':
"""
This just contains testing code. Delete before deploying to production environment. Code in this file shoudl only
be accessed through the compute_results function.
"""
loc_details = geocode('5 Bolinda Pl')
print(loc_details['features'][0]['geometry']['coordinates'])
print(loc_details['features'][0]['properties']['label'])
# compute_results(test_input)
# pprint(test_matrix())
# people = []
# # Prompt user to enter all names and addresses
# personId = 1
# name = ""
# while name != "DONE":
# name = input("Enter the name of person " + str(personId) + " or type \"DONE\" when you have entered everyone.")
# if name != "DONE":
# address = input("Enter their address: ")
# loc = geocode(address)
# # pprint(loc)
# people.append({'id': personId, 'address': address, 'coords': loc['features'][0]['geometry']['coordinates']})
# personId += 1
# if people == []:
# print("You haven't entered any addresses.")
# else:
# coordinates = []
# for person in people:
# coordinates.append(person['coords'])
# # print(coordinates)
# distances = matrix(coordinates)
# # distances = testMatrix()
# pprint(distances)
| 34.384615
| 123
| 0.637584
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,953
| 0.485459
|
02cbfb02bf42141ca374969c292f1c21a9ad8577
| 597
|
py
|
Python
|
examples/python/django/load-generator.py
|
ScriptBox99/pyroscope
|
fbf5bd297caf6a987f9fb6ffd0240ed804eaf9b4
|
[
"Apache-2.0"
] | 5,751
|
2021-01-01T18:58:15.000Z
|
2022-03-31T19:19:39.000Z
|
examples/python/django/load-generator.py
|
ScriptBox99/pyroscope
|
fbf5bd297caf6a987f9fb6ffd0240ed804eaf9b4
|
[
"Apache-2.0"
] | 913
|
2021-01-05T07:46:12.000Z
|
2022-03-31T20:04:39.000Z
|
examples/python/django/load-generator.py
|
admariner/pyroscope
|
e13afb40348914ae29b813881bfad0ca3b89f250
|
[
"Apache-2.0"
] | 329
|
2021-01-11T06:25:55.000Z
|
2022-03-29T08:19:33.000Z
|
import random
import requests
import time
HOSTS = [
'us-east-1',
'us-west-1',
'eu-west-1',
]
VEHICLES = [
'bike',
'scooter',
'car',
]
if __name__ == "__main__":
print(f"starting load generator")
time.sleep(15)
print('done sleeping')
while True:
host = HOSTS[random.randint(0, len(HOSTS) - 1)]
vehicle = VEHICLES[random.randint(0, len(VEHICLES) - 1)]
print(f"requesting {vehicle} from {host}")
resp = requests.get(f'http://web:8000/{vehicle}')
print(f"received {resp}")
time.sleep(random.uniform(0.2, 0.4))
| 21.321429
| 64
| 0.582915
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 185
| 0.309883
|
02cc889fddc76c8e78693be834e7b5343b8c87f5
| 3,148
|
py
|
Python
|
python/Multi-Service/content_moderator_cs.py
|
kyichii/cognitive-services-quickstart-code
|
a48549dd6b1fbb795fbe3cc5286c888306b6eb79
|
[
"MIT"
] | 2
|
2020-12-06T18:05:30.000Z
|
2020-12-09T17:01:21.000Z
|
python/Multi-Service/content_moderator_cs.py
|
diberry/cognitive-services-quickstart-code
|
53972838ff64937e099c6886ff4a3c019b2ef346
|
[
"MIT"
] | null | null | null |
python/Multi-Service/content_moderator_cs.py
|
diberry/cognitive-services-quickstart-code
|
53972838ff64937e099c6886ff4a3c019b2ef346
|
[
"MIT"
] | null | null | null |
import os
from pprint import pprint
from msrest.authentication import CognitiveServicesCredentials
from azure.cognitiveservices.vision.contentmoderator import ContentModeratorClient
from azure.cognitiveservices.vision.contentmoderator.models import ( Evaluate, OCR, FoundFaces )
'''
This quickstart uses Content Moderator to moderate a list of images.
Uses the general Cognitive Services key/endpoint. It's used when you want to
combine many Cognitive Services with just one authentication key/endpoint.
Services are not combined here, but could be potentially.
Install the Content Moderator SDK from a command prompt or IDE terminal:
pip install --upgrade azure-cognitiveservices-vision-contentmoderator
The Content Moderator SDK:
https://docs.microsoft.com/en-us/python/api/azure-cognitiveservices-vision-contentmoderator/?view=azure-python
'''
# Add your Cognitive Services subscription key and endpoint to your environment variables.
subscription_key = os.environ['COGNITIVE_SERVICES_SUBSCRIPTION_KEY']
endpoint = os.environ['COGNITIVE_SERVICES_ENDPOINT']
# List of URL images used to moderate.
IMAGE_LIST = [
"https://moderatorsampleimages.blob.core.windows.net/samples/sample2.jpg",
"https://moderatorsampleimages.blob.core.windows.net/samples/sample5.png"
]
'''
AUTHENTICATE
Create a Content Moderator client.
'''
client = ContentModeratorClient(
endpoint=endpoint,
credentials=CognitiveServicesCredentials(subscription_key)
)
'''
CONTENT MODERATOR
This quickstart moderates an image, then text and faces within the image.
'''
print('IMAGE MODERATION')
print()
# Image moderation, using image at [0]
print("Evaluate the image '{}' for adult and racy content:".format(os.path.basename(IMAGE_LIST[0])))
mod_image = client.image_moderation.evaluate_url_input(content_type="application/json", cache_image=True,
data_representation="URL", value=IMAGE_LIST[0])
assert isinstance(mod_image, Evaluate)
# Format for printing
mod_results = list(mod_image.as_dict().items())
for result in mod_results:
print(result)
# Moderating text in an image, using image at [0]
print("\nDetect, extract, and moderate text for image {}:".format(
os.path.basename(IMAGE_LIST[0])))
mod_image = client.image_moderation.ocr_url_input(language="eng", content_type="application/json",
data_representation="URL", value=IMAGE_LIST[0], cache_image=True)
assert isinstance(mod_image, OCR)
# Format for printing
mod_results = list(mod_image.as_dict().items())
for result in mod_results:
print(result)
# Moderating faces in an image, using image at [1]
print("\nDetect faces and moderate for image {}:".format(
os.path.basename(IMAGE_LIST[1])))
mod_image = client.image_moderation.find_faces_url_input(content_type="application/json", cache_image=True,
data_representation="URL", value=IMAGE_LIST[1])
assert isinstance(mod_image, FoundFaces)
# Format for printing
mod_results = list(mod_image.as_dict().items())
for result in mod_results:
print(result)
print()
| 39.35
| 115
| 0.750318
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,510
| 0.47967
|
02cee01f37525203bf54b532eb16ee060a8b571e
| 1,498
|
py
|
Python
|
usda_nutrition/admin.py
|
danielnaab/django-usda-nutrition
|
ba05bf741844a1858ad4bc2474e0640cba42994c
|
[
"BSD-3-Clause"
] | 11
|
2017-01-18T19:57:11.000Z
|
2021-06-19T08:03:21.000Z
|
usda_nutrition/admin.py
|
danielnaab/django-usda-nutrition
|
ba05bf741844a1858ad4bc2474e0640cba42994c
|
[
"BSD-3-Clause"
] | 3
|
2017-09-24T01:09:42.000Z
|
2021-08-11T02:44:55.000Z
|
usda_nutrition/admin.py
|
danielnaab/django-usda-nutrition
|
ba05bf741844a1858ad4bc2474e0640cba42994c
|
[
"BSD-3-Clause"
] | 8
|
2016-09-20T17:46:39.000Z
|
2020-04-24T16:20:44.000Z
|
from django.contrib import admin
from . import models
class ReadOnlyAdminMixin():
def get_readonly_fields(self, request, obj=None):
return list(set(
[field.name for field in self.opts.local_fields] +
[field.name for field in self.opts.local_many_to_many]
))
class ReadOnlyAdmin(ReadOnlyAdminMixin, admin.ModelAdmin):
pass
class DerivationCodeAdmin(ReadOnlyAdmin):
list_display = ('code', 'description')
class FoodDescriptionAdmin(ReadOnlyAdmin):
list_display = ('ndb_no', 'food_group', 'short_desc')
class FoodGroupAdmin(ReadOnlyAdmin):
list_display = ('code', 'description')
class FootnoteAdmin(ReadOnlyAdmin):
list_display = ('pk', 'footnote_no', 'food_description', 'footnote_type')
class NutrientDefinitionAdmin(ReadOnlyAdmin):
list_display = ('nutrient_number', 'tagname', 'nutrient_description')
class SourceCodeAdmin(ReadOnlyAdmin):
list_display = ('source_code', 'description')
class WeightAdmin(ReadOnlyAdmin):
list_display = ('food_description', 'amount', 'measure_description')
admin.site.register(models.DerivationCode, DerivationCodeAdmin)
admin.site.register(models.FoodDescription, FoodDescriptionAdmin)
admin.site.register(models.FoodGroup, FoodGroupAdmin)
admin.site.register(models.Footnote, FootnoteAdmin)
admin.site.register(models.NutrientDefinition, NutrientDefinitionAdmin)
admin.site.register(models.SourceCode, SourceCodeAdmin)
admin.site.register(models.Weight, WeightAdmin)
| 28.264151
| 77
| 0.763017
| 1,002
| 0.668892
| 0
| 0
| 0
| 0
| 0
| 0
| 241
| 0.160881
|
02cf4f4a30c1e8ca50d5538321deac3fbf7ca247
| 1,779
|
py
|
Python
|
FisherExactTest/FisherExactTest.py
|
Ae-Mc/Fisher
|
166e3ac68e304ed7418393d6a7717dd6f7032c15
|
[
"MIT"
] | null | null | null |
FisherExactTest/FisherExactTest.py
|
Ae-Mc/Fisher
|
166e3ac68e304ed7418393d6a7717dd6f7032c15
|
[
"MIT"
] | null | null | null |
FisherExactTest/FisherExactTest.py
|
Ae-Mc/Fisher
|
166e3ac68e304ed7418393d6a7717dd6f7032c15
|
[
"MIT"
] | null | null | null |
from decimal import Decimal
def Binominal(n: int, k: int) -> int:
if k > n:
return 0
result = 1
if k > n - k:
k = n - k
i = 1
while i <= k:
result *= n
result //= i
n -= 1
i += 1
return result
def pvalue(a: int, b: int,
c: int, d: int) -> Decimal:
return (Decimal(Binominal(a + b, a)
* Binominal(c + d, c))
/ Decimal(Binominal(a + b + c + d, a + c)))
def FisherLeftSide(a: int, b: int,
c: int, d: int,
baseP: Decimal) -> float:
p = 0.0
curP = float(baseP)
while(a > 0 and d > 0):
curP *= a * d
a -= 1
b += 1
c += 1
d -= 1
curP /= b * c
if curP <= baseP:
p += curP
return p
def FisherRightSide(a: int, b: int,
c: int, d: int,
baseP: Decimal) -> float:
p = float(0)
curP = float(baseP)
while(b > 0 and c > 0):
curP *= b * c
a += 1
b -= 1
c -= 1
d += 1
curP /= a * d
if curP <= baseP:
p += curP
return p
def FisherExact(a: int, b: int,
c: int, d: int) -> Decimal:
"""Calculate two-tailed Fisher's exact test for 2x2 continguency table
Args:
a: column 1 row 1
b: column 2 row 1
c: column 1 row 2
c: column 2 row 2
Returns:
Result of two-tailed Fisher's exact test stored in Decimal class
"""
if a == b == c == d:
return Decimal(1)
p = t = pvalue(a, b, c, d)
leftTail = Decimal(FisherLeftSide(a, b, c, d, t))
p += leftTail
rightTail = Decimal(FisherRightSide(a, b, c, d, t))
p += rightTail
return p
| 21.962963
| 74
| 0.441821
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 280
| 0.157392
|
02cfb28047d612da27deb76d6967a4e3c09ca214
| 1,528
|
py
|
Python
|
Signature Detection and Analysis/signprocessing.py
|
andrevks/Document-Forgery-Detection
|
77dcde3867732a55cd0f4604627d7bf67a5e79a5
|
[
"MIT"
] | null | null | null |
Signature Detection and Analysis/signprocessing.py
|
andrevks/Document-Forgery-Detection
|
77dcde3867732a55cd0f4604627d7bf67a5e79a5
|
[
"MIT"
] | null | null | null |
Signature Detection and Analysis/signprocessing.py
|
andrevks/Document-Forgery-Detection
|
77dcde3867732a55cd0f4604627d7bf67a5e79a5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 31 04:01:49 2018
@author: abhilasha
"""
#Team: GuardiansOfGalaxy
from PIL import Image, ImageEnhance
def enhance_signature(img):
bw = ImageEnhance.Color(img).enhance(0.0)
bright = ImageEnhance.Brightness(bw).enhance(2.2)
contrast = ImageEnhance.Contrast(bright).enhance(2.0)
sign = contrast.convert("RGBA")
datas = sign.getdata()
newData = []
for item in datas:
if item[0] > 200 and item[1] > 200 and item[2] > 200:
newData.append((255, 255, 255, 0))
else:
newData.append(item)
sign.putdata(newData)
sign.save("signature_alpha.png", "PNG")
def get_boxed_signature():
img = Image.open("signature_alpha.png")
img = img.convert("RGBA")
pixdata = img.load()
start_pixel = [img.size[0], img.size[1]]
end_pixel = [0,0]
for y in range(img.size[1]):
for x in range(img.size[0]):
if pixdata[x, y][0] < 200 and pixdata[x, y][1] < 200 and pixdata[x, y][2] < 200:
if x < start_pixel[0]:
start_pixel[0] = x
if y < start_pixel[1]:
start_pixel[1] = y
if x > end_pixel[0]:
end_pixel[0] = x
if y > end_pixel[1]:
end_pixel[1] = y
crop_box = (start_pixel[0]-20, start_pixel[1]-20, end_pixel[0]+20, end_pixel[1]+20)
signature = img.crop(crop_box)
signature.save('Pil.png')
if __name__ == "__main__":
filename = 'data\\Signature.jpg'
img = Image.open(filename)
enhance_signature(img)
get_boxed_signature()
| 25.466667
| 86
| 0.623037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 231
| 0.151178
|
02d171270f0a756f0c3935d386245a162e597707
| 23,802
|
py
|
Python
|
crowdsource/views.py
|
Code-and-Response/ISAC-SIMO-Repo-2
|
5b0cb0099e128dbacfdc53bf686ef2b069a51bc6
|
[
"PostgreSQL",
"Apache-2.0"
] | 5
|
2021-08-16T16:32:41.000Z
|
2022-02-22T03:47:49.000Z
|
crowdsource/views.py
|
Code-and-Response/ISAC-SIMO-Repo-2
|
5b0cb0099e128dbacfdc53bf686ef2b069a51bc6
|
[
"PostgreSQL",
"Apache-2.0"
] | 7
|
2021-04-12T14:48:48.000Z
|
2022-02-14T08:30:57.000Z
|
crowdsource/views.py
|
Code-and-Response/ISAC-SIMO-Repo-2
|
5b0cb0099e128dbacfdc53bf686ef2b069a51bc6
|
[
"PostgreSQL",
"Apache-2.0"
] | 1
|
2021-04-28T14:28:34.000Z
|
2021-04-28T14:28:34.000Z
|
from main.customdecorators import check_honeypot_conditional
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.http.response import HttpResponseRedirect, JsonResponse
from rest_framework.decorators import action
from api.models import ObjectType
from django.core.cache import cache
from crowdsource.forms import CrowdsourceForm, ImageShareForm
from crowdsource.helpers import delete_object, get_object, get_object_list, move_object, upload_object
from crowdsource.models import Crowdsource, ImageShare
from django.shortcuts import get_object_or_404, redirect, render
from main.authorization import login_url, is_admin_or_project_admin, is_admin
from django.contrib.auth.decorators import login_required, user_passes_test
from django.core import serializers
from django.conf import settings
from django.http import HttpResponse
from django.contrib import messages
from django.core.paginator import Paginator
from django.db.models import Q
from rest_framework import generics, mixins, viewsets
from rest_framework.permissions import AllowAny, IsAdminUser, IsAuthenticated
from .serializers import CrowdsourceSerializer, ImageShareSerializer
from rest_framework.response import Response
import uuid
import json
from django.utils import timezone
from datetime import timedelta
# View All Crowdsource Images + Update/Create
@check_honeypot_conditional
def crowdsource_images(request):
dash = request.user and not request.user.is_anonymous
if dash:
dash = "master/base.html"
else:
dash = "master/blank.html"
if request.method == "GET":
crowdsource_images = []
query = request.GET.get('q', '')
if request.user and not request.user.is_anonymous:
if (is_admin(request.user)):
crowdsource_images = Crowdsource.objects.order_by(
'-created_at').filter(Q(object_type__icontains=query) |
Q(image_type__icontains=query) |
Q(username__icontains=query)).distinct().all()
else:
crowdsource_images = Crowdsource.objects.filter(
created_by=request.user).order_by('-created_at').filter(Q(object_type__icontains=query) |
Q(image_type__icontains=query) |
Q(
username__icontains=query)).distinct().all()
paginator = Paginator(crowdsource_images, 50) # Show 50
page_number = request.GET.get('page', '1')
crowdsources = paginator.get_page(page_number)
else:
crowdsources = False
form = CrowdsourceForm(request=request)
return render(request, 'crowdsource_images.html',
{'crowdsources': crowdsources, 'form': form, 'query': query, 'dash': dash})
elif request.method == "POST":
if request.POST.get('id', False) and request.POST.get('id') != "0":
# EDIT
try:
if (is_admin(request.user)):
crowdsource_image = Crowdsource.objects.filter(
id=request.POST.get('id')).get()
elif request.user.is_authenticated:
crowdsource_image = Crowdsource.objects.filter(
created_by=request.user).filter(id=request.POST.get('id')).get()
else:
# Check if Session has that image
crowdsource_images = request.session.get('crowdsource_images', [])
crowdsource_image = False
for img in crowdsource_images:
if str(img.get('id')) == request.POST.get('id'):
crowdsource_image = Crowdsource.objects.filter(id=request.POST.get('id')).get()
if not crowdsource_image:
messages.error(request, "Invalid Crowdsource Image attempted to edit.")
return redirect("crowdsource")
form = CrowdsourceForm(
request.POST or None, instance=crowdsource_image, request=request)
old_object_key = crowdsource_image.bucket_key()
crowdsource_images = request.session.get('crowdsource_images', [])
if form.is_valid():
instance = form.save(commit=False)
if not instance.username:
if not request.user.is_authenticated:
instance.username = "Anonymous User - " + uuid.uuid4().hex[:6].upper()
instance.save()
# Updating the Session
for idx, val in enumerate(crowdsource_images):
if str(val.get("id")) == str(instance.id):
crowdsource_images[idx] = {'id': instance.id, 'file': instance.file.url,
'object_type': instance.object_type,
'image_type': instance.image_type, 'username': instance.username}
request.session['crowdsource_images'] = crowdsource_images
if request.user.is_authenticated and old_object_key != instance.bucket_key():
move_object(instance.bucket_key(), old_object_key)
messages.success(
request, "Crowdsource Image Updated Successfully")
return redirect("crowdsource")
except(Crowdsource.DoesNotExist):
pass
else:
# TODO: FOR NOW LIMIT 5 TOTAL UPLOADS BY SAME USER ( SAME BELOW )
if request.user.is_authenticated and not is_admin(request.user):
if (Crowdsource.objects.filter(created_by=request.user).count() >= 5):
messages.error(request, "Currently you can only upload 5 images. More will be enabled later.")
return redirect("crowdsource")
elif not request.user.is_authenticated:
if (len(request.session.get('crowdsource_images', [])) >= 5):
messages.error(request, "Currently you can only upload 5 images. More will be enabled later.")
return redirect("crowdsource")
# Create
total = 0
crowdsource_images = request.session.get('crowdsource_images', [])
for _file in request.FILES.getlist('file'):
request.FILES['file'] = _file
# If direct_upload is chosen by Admin. Upload directly to IBM BUCKET
if request.POST.get('direct_upload') and is_admin(request.user):
if type(_file) is InMemoryUploadedFile:
image_file_path = _file.open()
else:
image_file_path = open(_file.temporary_file_path(), 'rb')
ext = image_file_path.__str__().split('.')[-1]
filename = '{}.{}'.format(uuid.uuid4().hex, ext)
key = request.POST.get('object_type', 'error') + '/' + filename
upload_object(key, image_file_path, opened=True)
total += 1
print(key + " Uploaded as DIRECT-UPLOAD to IBM COS Bucket")
else:
form = CrowdsourceForm(
request.POST or None, request.FILES or None, request=request)
if form.is_valid():
instance = form.save(commit=False)
if request.user.is_authenticated:
instance.created_by = request.user
else:
instance.username = "Anonymous User - " + uuid.uuid4().hex[:6].upper()
instance.save()
crowdsource_images.append(
{'id': instance.id, 'file': instance.file.url, 'object_type': instance.object_type,
'image_type': instance.image_type, 'username': instance.username})
request.session['crowdsource_images'] = crowdsource_images
if request.user.is_authenticated and settings.PRODUCTION:
upload_object(instance.bucket_key(), instance.filepath())
total += 1
if not request.user.is_authenticated or not is_admin(request.user):
if total >= 5:
break; # TODO: FOR NOW LIMIT 5 TOTAL UPLOADS BY SAME USER
messages.success(request, "Yay, " + str(total) + " New Image(s) Contributed to Crowdsource.")
return redirect("crowdsource")
messages.error(request, "Invalid Request")
return redirect("crowdsource")
@login_required(login_url=login_url)
def crowdsource_images_delete(request, id):
if request.method == "POST":
try:
if request.user.is_admin:
crowdsource_image = Crowdsource.objects.filter(id=id).get()
else:
crowdsource_image = Crowdsource.objects.filter(created_by=request.user).filter(id=id).get()
if crowdsource_image:
if not request.user.is_admin:
delete_object(crowdsource_image.bucket_key())
elif request.GET.get('bucket', '') != 'no':
delete_object(crowdsource_image.bucket_key())
crowdsource_image.file.delete()
crowdsource_image.delete()
messages.success(
request, 'Crowdsource Image Deleted Successfully!')
return redirect("crowdsource")
except(Crowdsource.DoesNotExist):
pass
messages.error(request, "Invalid Request")
return redirect("crowdsource")
@login_required(login_url=login_url)
def image_share_download(request, id):
# Allow user to download the file of object type they had chosen to
if request.method == "POST":
if request.user.is_admin:
image_share = ImageShare.objects.filter(id=id).get()
else:
image_share = ImageShare.objects.filter(user=request.user).filter(id=id).get()
if image_share and image_share.object_type:
if image_share.status == "accepted": # If request was accepted
if ( (timezone.now() - image_share.created_at).days < 30 ): # If created_at is not older then 30 days. Allow to download.
images = get_object_list(image_share.object_type) # Download image of the chosen object type
if images:
dump = json.dumps(images)
response = HttpResponse(dump, content_type='application/json')
response['Content-Disposition'] = 'attachment; filename='+image_share.object_type+'.json' # file name as object type
return response
else:
messages.error(request, "Unable to Download Image List at the moment. Might be empty.")
return HttpResponseRedirect(request.META.get('HTTP_REFERER','/'))
else:
messages.error(request, "Image Share Request has expired (older then 30 days). Please send another request.")
else:
messages.error(request, "Image Share Request has not been accepted.")
else:
messages.error(request, "Invalid Request")
return redirect("images_share")
#######
# API #
#######
class ResponseInfo(object):
def __init__(self, user=None, **args):
self.response = {
"data": args.get('data', []),
"page": args.get('message', '1'),
"object_types": args.get('object_types', [])
}
# Crowdsource Image API
class CrowdsourceView(viewsets.ModelViewSet):
queryset = Crowdsource.objects.all()
serializer_class = CrowdsourceSerializer
permission_classes = [AllowAny]
def __init__(self, **kwargs):
self.response_format = ResponseInfo().response
super(CrowdsourceView, self).__init__(**kwargs)
def get_queryset(self):
if self.action == 'list':
page = abs(int(self.request.GET.get('page', 1)))
offset = 100 * (page - 1)
limit = 100
offsetPlusLimit = offset + limit
query = self.request.GET.get('q', '')
if self.request.user.is_authenticated:
# ALL FOR ADMIN
if self.request.user.is_admin:
ids = Crowdsource.objects.order_by('-created_at').filter(Q(object_type__icontains=query) |
Q(image_type__icontains=query) |
Q(username__icontains=query)).values_list(
'pk', flat=True)[offset:offsetPlusLimit] # Latest 100
return Crowdsource.objects.filter(pk__in=list(ids)).order_by('-created_at')
# OWN FOR OTHER
else:
ids = Crowdsource.objects.order_by('-created_at').filter(created_by=self.request.user).filter(
Q(object_type__icontains=query) |
Q(image_type__icontains=query) |
Q(username__icontains=query)).values_list('pk', flat=True)[offset:offsetPlusLimit] # Latest 100
return Crowdsource.objects.filter(pk__in=list(ids)).order_by('-created_at')
else:
return []
else:
if self.request.user.is_authenticated:
# ALL FOR ADMIN
if self.request.user.is_admin:
return Crowdsource.objects.order_by('-created_at')
# OWN FOR OTHER
else:
return Crowdsource.objects.filter(created_by=self.request.user).order_by('-created_at')
else:
return []
def list(self, request, *args, **kwargs):
response_data = super(CrowdsourceView, self).list(request, *args, **kwargs)
self.response_format["data"] = response_data.data
page = str(abs(int(self.request.GET.get('page', 1))))
self.response_format["page"] = page
OBJECT_TYPE = cache.get('all_object_type_choices_json', [])
if not OBJECT_TYPE:
OBJECT_TYPE = [
{"value": "other", "title": "Other"}
]
all_object_types = ObjectType.objects.order_by('name').values_list('name', flat=True).distinct()
for o in all_object_types:
OBJECT_TYPE.append({"value": o, "title": o.title()})
cache.set('all_object_type_choices_json', OBJECT_TYPE, 3600)
self.response_format["object_types"] = OBJECT_TYPE
return Response(self.response_format)
def destroy(self, request, *args, **kwargs):
crowdsource_image = self.get_object()
delete_object(crowdsource_image.bucket_key())
crowdsource_image.file.delete()
return super().destroy(request, *args, **kwargs)
# PRUNE old ImageShare requests (check and remove old > 60 days requests)
def prune_old_image_share():
if not cache.get('prune_image_share'):
ImageShare.objects.filter(created_at__lte=timezone.now()-timedelta(days=60)).delete()
cache.set("prune_image_share", True, 86400) # Prune every 24 hours
# Image Share Views
@check_honeypot_conditional
@login_required(login_url=login_url)
def images_share(request):
prune_old_image_share();
if request.method == "GET":
images_share = []
query = request.GET.get('q', '')
if request.user and not request.user.is_anonymous:
if (is_admin(request.user)):
images_share = ImageShare.objects.order_by(
'-created_at').filter(Q(object_type__icontains=query) |
Q(remarks__icontains=query) |
Q(status__icontains=query)).distinct().all()
else:
images_share = ImageShare.objects.filter(
user=request.user).order_by('-created_at').filter(Q(object_type__icontains=query) |
Q(remarks__icontains=query) |
Q(status__icontains=query)).distinct().all()
paginator = Paginator(images_share, 50) # Show 50
page_number = request.GET.get('page', '1')
images = paginator.get_page(page_number)
else:
images = False
form = ImageShareForm(request=request)
return render(request, 'images_share.html',
{'images_share': images, 'form': form, 'query': query})
elif request.method == "POST":
if request.POST.get('id', False) and request.POST.get('id') != "0":
# EDIT
try:
if (is_admin(request.user)):
share_image = ImageShare.objects.filter(id=request.POST.get('id')).get()
else:
share_image = ImageShare.objects.filter(user=request.user).filter(id=request.POST.get('id')).get()
if not share_image.status == "pending":
messages.error(request, "Image Request has already been " + share_image.status.title() + ". It cannot be edited now.")
return redirect("images_share")
form = ImageShareForm(
request.POST or None, instance=share_image, request=request)
if form.is_valid():
form.save()
messages.success(
request, "Image Request Updated Successfully")
return redirect("images_share")
except ImageShare.DoesNotExist:
pass
else:
# CREATE
form = ImageShareForm(request.POST or None, request=request)
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.save()
messages.success(
request, "Image Request Created Successfully")
return redirect("images_share")
messages.error(request, "Invalid Request")
return redirect("images_share")
def images_share_delete(request, id):
if request.method == "POST":
try:
if request.user.is_admin:
image = ImageShare.objects.filter(id=id).get()
else:
image = ImageShare.objects.filter(user=request.user).filter(id=id).get()
if image:
image.delete()
messages.success(
request, 'Image Request Deleted Successfully!')
return redirect("images_share")
except ImageShare.DoesNotExist:
pass
messages.error(request, "Invalid Request")
return redirect("images_share")
# Image Share API
class ImageShareView(viewsets.ModelViewSet):
queryset = ImageShare.objects.all()
serializer_class = ImageShareSerializer
permission_classes = [IsAuthenticated]
def __init__(self, **kwargs):
self.response_format = ResponseInfo().response
super(ImageShareView, self).__init__(**kwargs)
def get_queryset(self):
if self.action == 'list':
page = abs(int(self.request.GET.get('page', 1)))
offset = 100 * (page - 1)
limit = 100
offsetPlusLimit = offset + limit
query = self.request.GET.get('q', '')
if self.request.user.is_authenticated:
# ALL FOR ADMIN
if self.request.user.is_admin:
ids = ImageShare.objects.order_by(
'-created_at').filter(Q(object_type__icontains=query) |
Q(remarks__icontains=query) |
Q(status__icontains=query)).distinct().values_list(
'pk', flat=True)[offset:offsetPlusLimit]
return ImageShare.objects.filter(pk__in=list(ids)).order_by('-created_at')
# OWN FOR OTHER
else:
ids = ImageShare.objects.filter(
user=self.request.user).order_by('-created_at').filter(Q(object_type__icontains=query) |
Q(remarks__icontains=query) |
Q(status__icontains=query)).distinct().values_list('pk', flat=True)[offset:offsetPlusLimit]
return ImageShare.objects.filter(pk__in=list(ids)).order_by('-created_at')
else:
return []
else:
if self.request.user.is_authenticated:
# ALL FOR ADMIN
if self.request.user.is_admin:
return ImageShare.objects.order_by('-created_at')
# OWN FOR OTHER
else:
return ImageShare.objects.filter(user=self.request.user).order_by('-created_at')
else:
return []
def list(self, request, *args, **kwargs):
response_data = super(ImageShareView, self).list(request, *args, **kwargs)
self.response_format["data"] = response_data.data
page = str(abs(int(self.request.GET.get('page', 1))))
self.response_format["page"] = page
OBJECT_TYPE = cache.get('all_object_type_choices_json', [])
if not OBJECT_TYPE:
OBJECT_TYPE = [
{"value": "other", "title": "Other"}
]
all_object_types = ObjectType.objects.order_by('name').values_list('name', flat=True).distinct()
for o in all_object_types:
OBJECT_TYPE.append({"value": o, "title": o.title()})
cache.set('all_object_type_choices_json', OBJECT_TYPE, 3600)
self.response_format["object_types"] = OBJECT_TYPE
return Response(self.response_format)
@action(detail=True, methods=['POST'])
def download(self, request, pk=None, *args, **kwargs):
if request.user.is_admin:
image_share = ImageShare.objects.filter(id=pk).get()
else:
image_share = ImageShare.objects.filter(user=request.user).filter(id=pk).get()
if image_share and image_share.object_type:
if image_share.status == "accepted": # If request was accepted
if ( (timezone.now() - image_share.created_at).days < 30 ): # If created_at is not older then 30 days. Allow to download.
images = get_object_list(image_share.object_type) # Download image of the chosen object type
if images:
dump = json.dumps(images)
return HttpResponse(dump, content_type='application/json')
else:
return JsonResponse({'message': 'Unable to Download Image List at the moment. Might be empty.'}, status=404)
else:
return JsonResponse({'message': 'Image Share Request has expired (older then 30 days). Please send another request.'}, status=404)
else:
return JsonResponse({'message': 'Image Share Request has not been accepted.'}, status=404)
return JsonResponse({'message': 'Invalid Request'}, status=404)
| 48.279919
| 150
| 0.56991
| 8,025
| 0.337157
| 0
| 0
| 14,763
| 0.620242
| 0
| 0
| 3,622
| 0.152172
|
02d235dc4031cc79fd9ab325030c238874738554
| 2,232
|
py
|
Python
|
epochCiCdApi/ita/viewsOperations.py
|
matsumoto-epoch/epoch
|
c4b1982e68aa8cb108e6ae9b1c0de489d40d4db5
|
[
"Apache-2.0"
] | null | null | null |
epochCiCdApi/ita/viewsOperations.py
|
matsumoto-epoch/epoch
|
c4b1982e68aa8cb108e6ae9b1c0de489d40d4db5
|
[
"Apache-2.0"
] | null | null | null |
epochCiCdApi/ita/viewsOperations.py
|
matsumoto-epoch/epoch
|
c4b1982e68aa8cb108e6ae9b1c0de489d40d4db5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cgi # CGIモジュールのインポート
import cgitb
import sys
import requests
import json
import subprocess
import traceback
import os
import base64
import io
import logging
from django.shortcuts import render
from django.http import HttpResponse
from django.http.response import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
ita_host = os.environ['EPOCH_ITA_HOST']
ita_port = os.environ['EPOCH_ITA_PORT']
ita_user = os.environ['EPOCH_ITA_USER']
ita_pass = os.environ['EPOCH_ITA_PASSWORD']
# メニューID
ite_menu_operation = '2100000304'
ita_restapi_endpoint='http://' + ita_host + ':' + ita_port + '/default/menu/07_rest_api_ver1.php'
logger = logging.getLogger('apilog')
@require_http_methods(['GET'])
@csrf_exempt
def index(request):
# sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
logger.debug("CALL " + __name__ + ":{}".format(request.method))
if request.method == 'GET':
return get(request)
else:
return ""
@csrf_exempt
def get(request):
# HTTPヘッダの生成
filter_headers = {
'host': ita_host + ':' + ita_port,
'Content-Type': 'application/json',
'Authorization': base64.b64encode((ita_user + ':' + ita_pass).encode()),
'X-Command': 'FILTER',
}
#
# オペレーションの取得
#
opelist_resp = requests.post(ita_restapi_endpoint + '?no=' + ite_menu_operation, headers=filter_headers)
opelist_json = json.loads(opelist_resp.text)
logger.debug('---- Operation ----')
logger.debug(opelist_resp.text)
return JsonResponse(opelist_json, status=200)
| 28.987013
| 108
| 0.72043
| 0
| 0
| 0
| 0
| 914
| 0.398431
| 0
| 0
| 1,033
| 0.450305
|
02d4e6b3a3eee626ac3250b843b87270720d699e
| 56
|
py
|
Python
|
tests/test_init.py
|
keisuke-umezawa/chutil
|
df60440983c38a6dbbe4710019bcec5e83331904
|
[
"MIT"
] | 1
|
2019-02-16T06:20:50.000Z
|
2019-02-16T06:20:50.000Z
|
tests/test_init.py
|
keisuke-umezawa/chutil
|
df60440983c38a6dbbe4710019bcec5e83331904
|
[
"MIT"
] | null | null | null |
tests/test_init.py
|
keisuke-umezawa/chutil
|
df60440983c38a6dbbe4710019bcec5e83331904
|
[
"MIT"
] | null | null | null |
import chutil as module
def test_versions():
pass
| 9.333333
| 23
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
02d56efb28c0baac4d608dce2e0ed1e45b667e10
| 932
|
py
|
Python
|
src/service/uri_generator.py
|
HalbardHobby/git-LFS-for-Lambda
|
d19ba6fc4605d5dc2dba52acb4236c68787f8bde
|
[
"MIT"
] | null | null | null |
src/service/uri_generator.py
|
HalbardHobby/git-LFS-for-Lambda
|
d19ba6fc4605d5dc2dba52acb4236c68787f8bde
|
[
"MIT"
] | null | null | null |
src/service/uri_generator.py
|
HalbardHobby/git-LFS-for-Lambda
|
d19ba6fc4605d5dc2dba52acb4236c68787f8bde
|
[
"MIT"
] | null | null | null |
"""Generates pre-signed uri's for blob handling."""
from boto3 import client
import os
s3_client = client('s3')
def create_uri(repo_name, resource_oid, upload=False, expires_in=300):
"""Create a download uri for the given oid and repo."""
action = 'get_object'
if upload:
action = 'put_object'
params = {'Bucket': os.environ['LFS_S3_BUCKET_NAME'],
'Key': repo_name + '/' + resource_oid}
return s3_client.generate_presigned_url(action, Params=params,
ExpiresIn=expires_in)
def file_exists(repo_name, resource_oid):
"""Check if the file exists within the bucket."""
key = repo_name + '/' + resource_oid
response = s3_client.list_objects_v2(
Bucket=os.environ['LFS_S3_BUCKET_NAME'], Prefix=key)
for obj in response.get('Contents', []):
if obj['Key'] == key:
return True
return False
| 31.066667
| 71
| 0.626609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 257
| 0.275751
|
02d7c80b9c168487db13fab6edd36bd30ed15c3d
| 4,919
|
py
|
Python
|
rnn/chatbot/chatbot.py
|
llichengtong/yx4
|
17de7a6257a9f0c38e12089b2d1947927ec54c90
|
[
"Apache-2.0"
] | 128
|
2017-03-04T08:53:44.000Z
|
2020-06-05T11:19:16.000Z
|
rnn/chatbot/chatbot.py
|
github-jinwei/TensorFlowBook
|
17de7a6257a9f0c38e12089b2d1947927ec54c90
|
[
"Apache-2.0"
] | null | null | null |
rnn/chatbot/chatbot.py
|
github-jinwei/TensorFlowBook
|
17de7a6257a9f0c38e12089b2d1947927ec54c90
|
[
"Apache-2.0"
] | 120
|
2017-02-07T09:41:25.000Z
|
2022-03-17T00:57:59.000Z
|
# coding=utf8
import logging
import os
import random
import re
import numpy as np
import tensorflow as tf
from seq2seq_conversation_model import seq2seq_model
from seq2seq_conversation_model import data_utils
from seq2seq_conversation_model import tokenizer
from seq2seq_conversation_model.seq2seq_conversation_model import FLAGS, _buckets
from settings import SEQ2SEQ_MODEL_DIR
_LOGGER = logging.getLogger('track')
UNK_TOKEN_REPLACEMENT = [
'?',
'我不知道你在说什么',
'什么鬼。。。',
'宝宝不知道你在说什么呐。。。',
]
ENGLISHWORD_PATTERN = re.compile(r'[a-zA-Z0-9]')
def is_unichar_englishnum(char):
return ENGLISHWORD_PATTERN.match(char)
def trim(s):
"""
1. delete every space between chinese words
2. suppress extra spaces
:param s: some python string
:return: the trimmed string
"""
if not (isinstance(s, unicode) or isinstance(s, str)):
return s
unistr = s.decode('utf8') if type(s) != unicode else s
unistr = unistr.strip()
if not unistr:
return ''
trimmed_str = []
if unistr[0] != ' ':
trimmed_str.append(unistr[0])
for ind in xrange(1, len(unistr) - 1):
prev_char = unistr[ind - 1] if len(trimmed_str) == 0 else trimmed_str[-1]
cur_char = unistr[ind]
maybe_trim = cur_char == ' '
next_char = unistr[ind + 1]
if not maybe_trim:
trimmed_str.append(cur_char)
else:
if is_unichar_englishnum(prev_char) and is_unichar_englishnum(next_char):
trimmed_str.append(cur_char)
else:
continue
if unistr[-1] != ' ':
trimmed_str.append(unistr[-1])
return ''.join(trimmed_str)
class Chatbot():
"""
answer an enquiry using trained seq2seq model
"""
def __init__(self, model_dir):
# Create model and load parameters.
self.session = tf.InteractiveSession()
self.model = self.create_model(self.session, model_dir, True)
self.model.batch_size = 1
# Load vocabularies.
vocab_path = os.path.join(FLAGS.data_dir, "vocab%d" % FLAGS.vocab_size)
self.vocab, self.rev_vocab = data_utils.initialize_vocabulary(vocab_path)
def create_model(self, session, model_dir, forward_only):
"""Create conversation model and initialize or load parameters in session."""
model = seq2seq_model.Seq2SeqModel(
FLAGS.vocab_size, FLAGS.vocab_size, _buckets,
FLAGS.size, FLAGS.num_layers, FLAGS.max_gradient_norm,
FLAGS.batch_size,
FLAGS.learning_rate, FLAGS.learning_rate_decay_factor,
use_lstm=FLAGS.use_lstm,
forward_only=forward_only)
ckpt = tf.train.get_checkpoint_state(model_dir)
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
_LOGGER.info("Reading model parameters from %s" % ckpt.model_checkpoint_path)
model.saver.restore(session, ckpt.model_checkpoint_path)
_LOGGER.info("Read model parameter succeed!")
else:
raise ValueError(
"Failed to find legal model checkpoint files in %s" % model_dir)
return model
def generate_answer(self, enquiry):
# Get token-ids for the input sentence.
token_ids = data_utils.sentence_to_token_ids(enquiry, self.vocab, tokenizer.fmm_tokenizer)
if len(token_ids) == 0:
_LOGGER.error('lens of token ids of sentence %s is 0' % enquiry)
# Which bucket does it belong to?
bucket_id = min([b for b in xrange(len(_buckets))
if _buckets[b][0] > len(token_ids)])
# Get a 1-element batch to feed the sentence to the model.
encoder_inputs, decoder_inputs, target_weights = self.model.get_batch(
{bucket_id: [(token_ids, [])]}, bucket_id)
# Get output logits for the sentence.
_, _, output_logits = self.model.step(self.session, encoder_inputs,
decoder_inputs,
target_weights, bucket_id, True)
# This is a greedy decoder - outputs are just argmaxes of output_logits.
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
# If there is an EOS symbol in outputs, cut them at that point.
if tokenizer.EOS_ID in outputs:
outputs = outputs[:outputs.index(tokenizer.EOS_ID)]
# Print out response sentence corresponding to outputs.
answer = " ".join([self.rev_vocab[output] for output in outputs])
if tokenizer._UNK in answer:
answer = random.choice(UNK_TOKEN_REPLACEMENT)
answer = trim(answer)
return answer
def close(self):
self.session.close()
if __name__ == "__main__":
m = Chatbot(SEQ2SEQ_MODEL_DIR + '/train/')
response = m.generate_answer(u'我知道你不知道我知道你不知道我说的是什么意思')
print response
| 36.708955
| 98
| 0.645456
| 3,085
| 0.614175
| 0
| 0
| 0
| 0
| 0
| 0
| 1,109
| 0.220784
|
02d7c976dba252653f990cef7776c119996e55c4
| 5,986
|
py
|
Python
|
chip8_pygame_integration/config_test.py
|
Artoooooor/chip8
|
d5132348f3081aeb9af19814d8251084ae723379
|
[
"MIT"
] | null | null | null |
chip8_pygame_integration/config_test.py
|
Artoooooor/chip8
|
d5132348f3081aeb9af19814d8251084ae723379
|
[
"MIT"
] | null | null | null |
chip8_pygame_integration/config_test.py
|
Artoooooor/chip8
|
d5132348f3081aeb9af19814d8251084ae723379
|
[
"MIT"
] | null | null | null |
import unittest
import pygame
from chip8_pygame_integration.config import get_config, KeyBind, to_text
DEFAULT = [KeyBind(pygame.K_o, pygame.KMOD_CTRL, 'some_command')]
class ConfigLoadTest(unittest.TestCase):
def setUp(self):
self.default = None
def test_empty_pattern_returns_empty_array(self):
self.assertEqual([], get_config((), []))
def test_single_command_pattern_parses_single_key(self):
self.when_pattern_is((('comm1',),))
self.when_lines_are(['A'])
self.expect_config([KeyBind(pygame.K_a, pygame.KMOD_NONE, 'comm1')])
def test_two_command_pattern_parses_2_keys(self):
self.when_pattern_is((('comm1', 'comm2',),))
self.when_lines_are(['A D'])
self.expect_config([
KeyBind(pygame.K_a, pygame.KMOD_NONE, 'comm1'),
KeyBind(pygame.K_d, pygame.KMOD_NONE, 'comm2')])
def test_2_lines_pattern_parses_2_lines(self):
self.when_pattern_is((('comm1',), ('comm2',)))
self.when_lines_are(['A', 'D'])
self.expect_config([
KeyBind(pygame.K_a, pygame.KMOD_NONE, 'comm1'),
KeyBind(pygame.K_d, pygame.KMOD_NONE, 'comm2')])
def test_too_little_elements_in_line_return_default(self):
self.when_pattern_is((('comm1', 'comm2'),))
self.when_lines_are(['A'])
self.when_default_is(DEFAULT)
self.expect_config(DEFAULT)
def test_ctrl_is_parsed_as_KMOD_CTRL(self):
self.when_pattern_is((('comm1',),))
self.when_lines_are(['ctrl+A'])
self.expect_config([KeyBind(pygame.K_a, pygame.KMOD_CTRL, 'comm1')])
def test_two_modifiers_are_parsed(self):
self.when_pattern_is((('comm1',),))
self.when_lines_are(['ctrl+lshift+A'])
kmods = pygame.KMOD_CTRL | pygame.KMOD_LSHIFT
self.expect_config([KeyBind(pygame.K_a, kmods, 'comm1')])
def test_lowercase_keys_are_parsed(self):
self.when_pattern_is((('comm1',),))
self.when_lines_are(['a'])
self.expect_config([KeyBind(pygame.K_a, pygame.KMOD_NONE, 'comm1')])
def test_lowercase_special_keys_are_parsed(self):
self.when_pattern_is((('comm1',),))
self.when_lines_are(['space'])
self.expect_config(
[KeyBind(pygame.K_SPACE, pygame.KMOD_NONE, 'comm1')])
def test_uppercase_modifiers_are_parsed(self):
self.when_pattern_is((('comm1',),))
self.when_lines_are(['LCTRL+A'])
self.expect_config([KeyBind(pygame.K_a, pygame.KMOD_LCTRL, 'comm1')])
def test_invalid_key_results_in_default(self):
self.when_pattern_is((('comm1',),))
self.when_lines_are(['F42'])
self.when_default_is(DEFAULT)
self.expect_config(DEFAULT)
def when_pattern_is(self, pattern):
self.pattern = pattern
def when_lines_are(self, lines):
self.lines = lines
def when_default_is(self, default):
self.default = default
def expect_config(self, config):
result = get_config(self.pattern, self.lines, self.default)
self.assertEqual(config, result)
class ConfigSaveTest(unittest.TestCase):
def test_empty_pattern_generates_empty_file(self):
self.assertEqual([], to_text((), []))
def test_one_command_generates_1_line(self):
self.when_pattern_is((('comm1',),))
self.when_config_is([KeyBind(pygame.K_a, pygame.KMOD_NONE, 'comm1')])
self.expect_generated_text(['a'])
def test_two_commands_generate_line_with_2_elements(self):
self.when_pattern_is((('comm1', 'comm2'),))
self.when_config_is([KeyBind(pygame.K_a, pygame.KMOD_NONE, 'comm1'),
KeyBind(pygame.K_b, pygame.KMOD_NONE, 'comm2')])
self.expect_generated_text(['a b'])
def test_commands_are_generated_in_order_of_pattern(self):
self.when_pattern_is((('comm1', 'comm2'),))
self.when_config_is([KeyBind(pygame.K_a, pygame.KMOD_NONE, 'comm2'),
KeyBind(pygame.K_b, pygame.KMOD_NONE, 'comm1')])
self.expect_generated_text(['b a'])
def test_two_lines_generate_2_lines_(self):
self.when_pattern_is((('comm1',), ('comm2',),))
self.when_config_is([KeyBind(pygame.K_a, pygame.KMOD_NONE, 'comm2'),
KeyBind(pygame.K_b, pygame.KMOD_NONE, 'comm1')])
self.expect_generated_text(['b', 'a'])
def test_KMOD_CTRL_generates_output(self):
self.expect_3_mod_versions_handled('ctrl')
def test_KMOD_SHIFT_generates_output(self):
self.expect_3_mod_versions_handled('shift')
def test_KMOD_ALT_generates_output(self):
self.expect_3_mod_versions_handled('alt')
def test_KMOD_META_generates_output(self):
self.expect_3_mod_versions_handled('meta')
def test_KMOD_CAPS_generates_output(self):
self.expect_mod_handled('caps')
def test_KMOD_NUM_generates_output(self):
self.expect_mod_handled('num')
def test_KMOD_MODE_generates_output(self):
self.expect_mod_handled('mode')
def expect_3_mod_versions_handled(self, baseModName):
self.expect_mod_handled(baseModName)
self.expect_mod_handled('l' + baseModName)
self.expect_mod_handled('r' + baseModName)
def expect_mod_handled(self, modName):
self.when_pattern_is((('comm1',),))
fieldName = 'KMOD_' + modName.upper()
mod = getattr(pygame, fieldName)
self.when_config_is([KeyBind(pygame.K_a, mod, 'comm1')])
expected = '{}+a'.format(modName)
self.expect_generated_text([expected])
def when_pattern_is(self, pattern):
self.pattern = pattern
def when_config_is(self, config):
self.config = config
def expect_generated_text(self, text):
text = self.add_newlines(text)
self.assertEqual(text, to_text(self.pattern, self.config))
def add_newlines(self, lines):
return [l + '\n' for l in lines]
if __name__ == '__main__':
unittest.main()
| 36.278788
| 77
| 0.664885
| 5,760
| 0.962245
| 0
| 0
| 0
| 0
| 0
| 0
| 444
| 0.074173
|
02d8c4f8a25b42b9035c973df73101d47ff6f388
| 1,934
|
py
|
Python
|
Trees/Binary Trees/Preorder_Binary_Tree.py
|
jarvis-1805/DSAwithPYTHON
|
872073d1b8d0001ea8b1a54b5e327dd0c1c406f2
|
[
"Apache-2.0"
] | 1
|
2021-03-21T18:54:34.000Z
|
2021-03-21T18:54:34.000Z
|
Trees/Binary Trees/Preorder_Binary_Tree.py
|
jarvis-1805/DSAwithPYTHON
|
872073d1b8d0001ea8b1a54b5e327dd0c1c406f2
|
[
"Apache-2.0"
] | null | null | null |
Trees/Binary Trees/Preorder_Binary_Tree.py
|
jarvis-1805/DSAwithPYTHON
|
872073d1b8d0001ea8b1a54b5e327dd0c1c406f2
|
[
"Apache-2.0"
] | null | null | null |
'''
Preorder Binary Tree
For a given Binary Tree of integers, print the pre-order traversal.
Input Format:
The first and the only line of input will contain the nodes data, all separated by a single space. Since -1 is used as an indication whether the left or right node data exist for root, it will not be a part of the node data.
Output Format:
The only line of output prints the pre-order traversal of the given binary tree.
Constraints:
1 <= N <= 10^6
Where N is the total number of nodes in the binary tree.
Time Limit: 1 sec
Sample Input 1:
5 6 10 2 3 -1 -1 -1 -1 -1 9 -1 -1
Sample Ouptut 1:
5 6 2 3 9 10
Sample Input 2:
1 2 3 4 5 6 7 -1 -1 -1 -1 -1 -1 -1 -1
Sample Ouptut 2:
1 2 4 5 3 6 7
'''
from sys import stdin, setrecursionlimit
import queue
setrecursionlimit(10 ** 6)
#Following the structure used for Binary Tree
class BinaryTreeNode:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def preOrder(root):
#Your code goes here
if root is None:
return
print(root.data, end=' ')
preOrder(root.left)
preOrder(root.right)
#Taking level-order input using fast I/O method
def takeInput():
levelOrder = list(map(int, stdin.readline().strip().split(" ")))
start = 0
length = len(levelOrder)
root = BinaryTreeNode(levelOrder[start])
start += 1
q = queue.Queue()
q.put(root)
while not q.empty():
currentNode = q.get()
leftChild = levelOrder[start]
start += 1
if leftChild != -1:
leftNode = BinaryTreeNode(leftChild)
currentNode.left =leftNode
q.put(leftNode)
rightChild = levelOrder[start]
start += 1
if rightChild != -1:
rightNode = BinaryTreeNode(rightChild)
currentNode.right =rightNode
q.put(rightNode)
return root
# Main
root = takeInput()
preOrder(root)
| 21.977273
| 224
| 0.643226
| 127
| 0.065667
| 0
| 0
| 0
| 0
| 0
| 0
| 834
| 0.431231
|
02da14e11f8f22cf912a874caefb9a62ca916f39
| 1,693
|
py
|
Python
|
test/pages/base_pages.py
|
gordonnguyen/10fastfingers-auto-type
|
624a6667b67743904791929a36d12b0f12f50e05
|
[
"MIT"
] | null | null | null |
test/pages/base_pages.py
|
gordonnguyen/10fastfingers-auto-type
|
624a6667b67743904791929a36d12b0f12f50e05
|
[
"MIT"
] | 1
|
2021-03-16T13:31:33.000Z
|
2021-03-16T13:31:33.000Z
|
test/pages/base_pages.py
|
gordonnguyen/10fastfingers-auto-type
|
624a6667b67743904791929a36d12b0f12f50e05
|
[
"MIT"
] | null | null | null |
'''
Super classes for Page object
Serve as a base template for page
automating functions with selenium
'''
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from utils.best_buy.locators import Locators
from utils.best_buy import urls
class BasePage():
# Initialize browser
def __init__(self, driver):
self.driver = driver
# Wait for element to be clickable and click
def click(self, by_locator):
WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable(by_locator)).click()
# Select element in webpage and fill text
def enter_text(self, by_locator, text):
WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(by_locator)).send_keys(text)
# Check if the current url is correct to use for this object
def is_correct_url(self, target_url):
if self.driver.current_url == target_url:
return True
else:
return False
class SignInPage(BasePage):
def __init__(self, email, password):
super().__init__()
self.url = urls.sign_in
self.email = email
self.password = password
def sign_in(self, driver):
# Fill email and password fields
self.enter_text(Locators.SignInPage.email_fld, self.email)
self.enter_text(Locators.SignInPage.password_fld, self.password)
# Submit / Signin button
driver.find_element_by_class_name(self.BB_signin_selector).click()
WebDriverWait(driver, 10).until(EC.url_changes(self.driver.current_url))
#WebDriverWait(driver, 10).until(EC.url_changes(current_signin_url))
| 34.55102
| 106
| 0.705848
| 1,371
| 0.809805
| 0
| 0
| 0
| 0
| 0
| 0
| 408
| 0.240992
|
02db29d58f9fcbf982055980d5e6b51e86d8c020
| 2,419
|
py
|
Python
|
Form-Filler.py
|
Zaidtech/AUTOMATION-SCRIPTS
|
88c83e1edca02b0b86f3de4981a5f27f398b4441
|
[
"MIT"
] | 4
|
2020-11-04T13:25:48.000Z
|
2022-03-29T01:21:49.000Z
|
Form-Filler.py
|
Zaidtech/AUTOMATION-SCRIPTS
|
88c83e1edca02b0b86f3de4981a5f27f398b4441
|
[
"MIT"
] | null | null | null |
Form-Filler.py
|
Zaidtech/AUTOMATION-SCRIPTS
|
88c83e1edca02b0b86f3de4981a5f27f398b4441
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
This script has been tested on various custom google forms and other various forms with
few alteratios ..
Google forms which does include the input type "token" attribute are found
to be safer than those who don't.
Any form contains various fields.
1. input text fields
2. radio
3. checkboxes
4. textareas
5. Uploads --- important . still working.
"""
import re
import requests
from urllib.request import urlopen
from bs4 import BeautifulSoup
params = {}
url = input("Enter the website url")
page = urlopen(url)
bs_obj = BeautifulSoup(page, 'html.parser')
# bs_obj.prettify() --> it's effects on the tags buried deep in the divs
requests.session()
input_tags = bs_obj.find_all('input')
# print(input_tags)
form_action = bs_obj.find('form') # some pages have multiple form tags ...
text_tags = bs_obj.find_all('textarea')
for text in text_tags:
try:
print(text['name'])
text['name'] = "Running around and fill this form"
except:
print('Key Error')
# if form_action.attrs['action'] == "" or None:
# print("Form action not specifies")
# else:
# print(form_action)
url = form_action.attrs['action']
print(f"Post request is send in here: {url}")
# there might be some custom fields which are to be looked and inspected manually as they skip the scrapper
# like params['entry.377191685'] = 'Faculty'
# params['tos'] = 'true'
# vary accordingly as at least an attck is just not that easy. ;-)
for tag in input_tags:
try:
print(tag.attrs['aria-label'])
except:
pass
try:
if tag.attrs['value'] == "" or None:
tag.attrs['value'] = input(f"Enter the value of {tag.attrs['name']}")
params[tag.attrs['name']] = tag.attrs['value']
# except:
# value= input(f"Enter the value of {tag.attrs['name']}")
# params[tag.attrs['name']] = value
else:
params[tag.attrs['name']] = tag.attrs['value'].strip('\n')
except:
pass
print(params)
# getting the dicts as printed here... which is to be submitted
while True:
requests.session()
r = requests.post(url, data=params)
print(r.status_code)
# 200 OK ---> submitted
# 400 BAD REQUEST ERROR --> input data corrupt or server incompatible
# 401 UNAOUTHORIZED ACCESS --> validation failed (need to deal with tokens and the cookies)
| 27.804598
| 107
| 0.653162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,506
| 0.622571
|
02dbc26cd1fbc18374360e0a5ad4732c9bed896a
| 1,306
|
py
|
Python
|
lektor_root_relative_path.py
|
a2csuga/lektor-root-relative-path
|
5c200bdae50a78a2a295a3c0bb5440004b7fa72a
|
[
"MIT"
] | 2
|
2018-10-20T10:45:25.000Z
|
2019-08-12T08:53:11.000Z
|
lektor_root_relative_path.py
|
a2csuga/lektor-root-relative-path
|
5c200bdae50a78a2a295a3c0bb5440004b7fa72a
|
[
"MIT"
] | 2
|
2018-09-21T14:35:33.000Z
|
2018-10-15T21:43:08.000Z
|
lektor_root_relative_path.py
|
a2csuga/lektor-root-relative-path
|
5c200bdae50a78a2a295a3c0bb5440004b7fa72a
|
[
"MIT"
] | 1
|
2017-11-30T12:58:08.000Z
|
2017-11-30T12:58:08.000Z
|
# -*- coding: utf-8 -*-
try:
# py3
from urllib.parse import urljoin, quote
except ImportError:
# py2
from urlparse import urljoin
from urllib import quote
from lektor.pluginsystem import Plugin
from furl import furl
class RootRelativePathPlugin(Plugin):
name = u'root-relative-path'
description = u'Returns root relative path list as tuple like \
[(toppage_url, toppage_name), ...(parent_url, parent_name), (url, name)]'
def on_setup_env(self, **extra):
navi_top_page_name = self.get_config().get('navi_top_page_name') or 'Top Page'
def root_relative_path_list(current_url):
url = '/'
name = navi_top_page_name
path_list = [(url, name)]
# furl('/blog').path.segments retunrs ['/blog']
# But furl('/').path.segments retunrs ['']
# insted []. So return value here before in to the loop
if current_url == '/':
return path_list
for i in furl(current_url).path.segments:
url = quote(urljoin(url, '%s' % i))
name = i
path_list.append((url, name))
url = url + '/'
return path_list
self.env.jinja_env.filters['root_relative_path_list'] = root_relative_path_list
| 31.853659
| 87
| 0.598009
| 1,065
| 0.815467
| 0
| 0
| 0
| 0
| 0
| 0
| 389
| 0.297856
|
02dbf3b5b09c9427c60b05103927121e020bab72
| 1,375
|
py
|
Python
|
controllers/main.py
|
dduarte-odoogap/odoo_jenkins
|
69bfcf088f75426c0e4b961a60b5c15a65b37979
|
[
"BSD-2-Clause"
] | 5
|
2018-10-26T19:52:45.000Z
|
2021-11-04T03:59:22.000Z
|
controllers/main.py
|
dduarte-odoogap/odoo_jenkins
|
69bfcf088f75426c0e4b961a60b5c15a65b37979
|
[
"BSD-2-Clause"
] | null | null | null |
controllers/main.py
|
dduarte-odoogap/odoo_jenkins
|
69bfcf088f75426c0e4b961a60b5c15a65b37979
|
[
"BSD-2-Clause"
] | 6
|
2017-11-10T07:15:40.000Z
|
2021-02-24T10:55:15.000Z
|
# -*- coding: utf-8 -*-
from odoo import http
from odoo.http import request
import jenkins
class JenkinsController(http.Controller):
@http.route('/web/jenkins/jobs', type='json', auth='user')
def jenkins_get_jobs(self, **kw):
params = request.env['ir.config_parameter']
jenkins_url = params.sudo().get_param('jenkins_ci.url', default='')
jenkins_user = params.sudo().get_param('jenkins_ci.user', default='')
jenkins_password = params.sudo().get_param('jenkins_ci.password', default='')
server = jenkins.Jenkins(jenkins_url, username=jenkins_user, password=jenkins_password)
res = []
jobs = server.get_jobs()
for job in jobs:
jid = {
"color": job['color'],
"name": job['name'],
"healthReport": server.get_job_info(job['name'])['healthReport']
}
res.append(jid)
return {
'jobs': res
}
@http.route('/web/jenkins/build', type='json', auth='user')
def jenkins_build_job(self, job, **kw):
jenkins_url = self.jenkins_url
jenkins_user = self.jenkins_user
jenkins_password = self.jenkins_password
server = jenkins.Jenkins(jenkins_url, username=jenkins_user, password=jenkins_password)
res = server.build_job(job)
return {'result': res}
| 33.536585
| 95
| 0.611636
| 1,278
| 0.929455
| 0
| 0
| 1,225
| 0.890909
| 0
| 0
| 241
| 0.175273
|
02dc9cb7558321b5cc7729d952a58e8fc90917a1
| 996
|
py
|
Python
|
attendance.py
|
mykbgwl/Students-Attendance
|
bd9aef8cd12edff7fc47326fdeca6131eef575a6
|
[
"Apache-2.0"
] | null | null | null |
attendance.py
|
mykbgwl/Students-Attendance
|
bd9aef8cd12edff7fc47326fdeca6131eef575a6
|
[
"Apache-2.0"
] | 1
|
2021-05-11T08:23:13.000Z
|
2021-05-11T08:23:13.000Z
|
attendance.py
|
mykbgwl/Students-Attendance
|
bd9aef8cd12edff7fc47326fdeca6131eef575a6
|
[
"Apache-2.0"
] | null | null | null |
import cv2
import numpy as np
import pyzbar.pyzbar as pyzbar
import sys
import time
import pybase64
# Starting the webcam
capt = cv2.VideoCapture(0)
names = []
# Creating Attendees file
fob = open('attendees.txt', 'a+')
def enterData(z):
if z in names:
pass
else:
names.append(z)
z = ''.join(str(z))
fob.write(z+'\n')
return names
print('Reading code...')
# Function of Data present or not
def checkData(data):
data = str(data)
if data in names:
print('Already present')
else:
print('\n'+str(len(names)+1)+'\n'+'Attendance Marked')
enterData(data)
while True:
_, frame = capt.read()
decodedObject = pyzbar.decode(frame)
for obj in decodedObject:
checkData(obj.data)
time.sleep(1)
cv2.imshow('Frame', frame)
if cv2.waitKey(1) & 0xFF == ord('s'):
cv2.destroyAllWindows()
break
fob.close()
| 17.473684
| 63
| 0.567269
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 178
| 0.178715
|
02dcb415d51e0799783c6d81ac253913ae5ccdc0
| 19,562
|
py
|
Python
|
net.py
|
rishabnayak/SegAN
|
6f9415a079d8417ecebec3279338423286decf1c
|
[
"MIT"
] | null | null | null |
net.py
|
rishabnayak/SegAN
|
6f9415a079d8417ecebec3279338423286decf1c
|
[
"MIT"
] | null | null | null |
net.py
|
rishabnayak/SegAN
|
6f9415a079d8417ecebec3279338423286decf1c
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
import torch.nn.functional as F
import torch
from numpy.random import normal
from math import sqrt
import argparse
channel_dim = 3
ndf = 64
class GlobalConvBlock(nn.Module):
def __init__(self, in_dim, out_dim, kernel_size):
super(GlobalConvBlock, self).__init__()
pad0 = (kernel_size[0] - 1) // 2
pad1 = (kernel_size[1] - 1) // 2
self.conv_l1 = nn.Conv2d(in_dim, out_dim, kernel_size=(kernel_size[0], 1),
padding=(pad0, 0))
self.conv_l2 = nn.Conv2d(out_dim, out_dim, kernel_size=(1, kernel_size[1]),
padding=(0, pad1))
self.conv_r1 = nn.Conv2d(in_dim, out_dim, kernel_size=(1, kernel_size[1]),
padding=(0, pad1))
self.conv_r2 = nn.Conv2d(out_dim, out_dim, kernel_size=(kernel_size[0], 1),
padding=(pad0, 0))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def forward(self, x):
x_l = self.conv_l1(x)
x_l = self.conv_l2(x_l)
x_r = self.conv_r1(x)
x_r = self.conv_r2(x_r)
# combine two paths
x = x_l + x_r
return x
class ResidualBlock(nn.Module):
def __init__(self, indim):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(indim, indim*2, kernel_size=1, bias=False)
self.norm1 = nn.BatchNorm2d(indim*2)
self.relu1 = nn.LeakyReLU(0.2, inplace=True)
self.conv2 = nn.Conv2d(
indim*2, indim*2, kernel_size=3, padding=1, bias=False)
self.norm2 = nn.BatchNorm2d(indim*2)
self.relu2 = nn.LeakyReLU(0.2, inplace=True)
self.conv3 = nn.Conv2d(indim*2, indim, kernel_size=1, bias=False)
self.norm3 = nn.BatchNorm2d(indim)
self.relu3 = nn.LeakyReLU(0.2, inplace=True)
# parameter initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def forward(self, x):
residual = self.conv1(x)
residual = self.relu1(residual)
residual = self.conv2(residual)
residual = self.relu2(residual)
residual = self.conv3(residual)
residual = self.relu3(residual)
out = x + residual
return out
class ResidualBlock_D(nn.Module):
def __init__(self, indim):
super(ResidualBlock_D, self).__init__()
self.conv1 = nn.Conv2d(indim, indim*2, kernel_size=1, bias=False)
self.norm1 = nn.BatchNorm2d(indim*2)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(
indim*2, indim*2, kernel_size=3, padding=1, bias=False)
self.norm2 = nn.BatchNorm2d(indim*2)
self.relu2 = nn.ReLU(inplace=True)
self.conv3 = nn.Conv2d(indim*2, indim, kernel_size=1, bias=False)
self.norm3 = nn.BatchNorm2d(indim)
self.relu3 = nn.ReLU(inplace=True)
# parameter initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def forward(self, x):
residual = self.conv1(x)
residual = self.relu1(residual)
residual = self.conv2(residual)
residual = self.relu2(residual)
residual = self.conv3(residual)
residual = self.relu3(residual)
out = x + residual
return out
class NetS(nn.Module):
def __init__(self, ngpu):
super(NetS, self).__init__()
self.ngpu = ngpu
self.convblock1 = nn.Sequential(
# input is (channel_dim) x 128 x 128
nn.Conv2d(channel_dim, ndf, 7, 2, 3, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 64 x 64
)
self.convblock1_1 = ResidualBlock(ndf)
self.convblock2 = nn.Sequential(
# state size. (ndf) x 64 x 64
nn.Conv2d(ndf, ndf * 2, 5, 2, 2, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 32 x 32
)
self.convblock2_1 = ResidualBlock(ndf*2)
self.convblock3 = nn.Sequential(
# state size. (ndf*2) x 32 x 32
nn.Conv2d(ndf * 2, ndf * 4, 5, 2, 2, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 16 x 16
)
self.convblock3_1 = ResidualBlock(ndf*4)
self.convblock4 = nn.Sequential(
# state size. (ndf*4) x 16 x 16
nn.Conv2d(ndf * 4, ndf * 8, 5, 2, 2, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 8 x 8
)
self.convblock4_1 = ResidualBlock(ndf*8)
self.convblock5 = nn.Sequential(
# state size. (ndf*8) x 8 x 8
nn.Conv2d(ndf * 8, ndf * 8, 5, 2, 2, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
)
self.convblock5_1 = ResidualBlock(ndf*8)
self.convblock6 = nn.Sequential(
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, ndf * 16, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 16),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*16) x 2 x 2
)
self.convblock6_1 = nn.Sequential(
# state size. (ndf*16) x 2 x 2
nn.Conv2d(ndf * 16, ndf * 16, kernel_size=1, bias=False),
nn.BatchNorm2d(ndf * 16),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*16) x 2 x 2
)
self.convblock7 = nn.Sequential(
# state size. (ndf*16) x 2 x 2
nn.Conv2d(ndf * 16, ndf * 32, 3, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 32),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*32) x 1 x 1
)
# self.convblock7_1 = ResidualBlock(ndf*32)
self.convblock8 = nn.Sequential(
# state size. (ndf*32) x 1 x 1
nn.Conv2d(ndf * 32, ndf * 8, kernel_size=1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 1 x 1
)
self.deconvblock1 = nn.Sequential(
# state size. (ngf*8) x 1 x 1
nn.ConvTranspose2d(ndf * 8, ndf * 32, kernel_size=1, bias=False),
nn.BatchNorm2d(ndf * 32),
nn.ReLU(True),
# state size. (ngf*32) x 1 x 1
)
self.deconvblock2 = nn.Sequential(
# state size. (cat: ngf*32) x 1 x 1
nn.Conv2d(ndf * 64, ndf * 16, 3, 1, 1, bias=False),
nn.BatchNorm2d(ndf * 16),
nn.ReLU(True),
# state size. (ngf*16) x 2 x 2
)
self.deconvblock2_1 = nn.Sequential(
# state size. (ndf*16) x 2 x 2
nn.Conv2d(ndf * 16, ndf * 16, kernel_size=1, bias=False),
nn.BatchNorm2d(ndf * 16),
nn.ReLU(inplace=True),
# state size. (ndf*16) x 2 x 2
)
self.deconvblock3 = nn.Sequential(
# state size. (cat: ngf*16) x 2 x 2
nn.Conv2d(ndf * 16 * 2, ndf * 8, 3, 1, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
)
self.deconvblock3_1 = ResidualBlock_D(ndf*8)
self.deconvblock4 = nn.Sequential(
# state size. (ngf*8) x 4 x 4
GlobalConvBlock(ndf*8*2, ndf*8, (7, 7)),
# nn.ConvTranspose2d(ndf * 8 * 2, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 8 x 8
)
self.deconvblock4_1 = ResidualBlock_D(ndf*8)
self.deconvblock5 = nn.Sequential(
# state size. (ngf*8) x 8 x 8
GlobalConvBlock(ndf*8*2, ndf*4, (7, 7)),
# nn.ConvTranspose2d(ndf * 8 * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 16 x 16
)
self.deconvblock5_1 = ResidualBlock_D(ndf*4)
self.deconvblock6 = nn.Sequential(
# state size. (ngf*4) x 16 x 16
GlobalConvBlock(ndf*4*2, ndf*2, (9, 9)),
# nn.ConvTranspose2d(ndf * 4 * 2, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 32 x 32
)
self.deconvblock6_1 = ResidualBlock_D(ndf*2)
self.deconvblock7 = nn.Sequential(
# state size. (ngf*2) x 32 x 32
GlobalConvBlock(ndf*2*2, ndf, (9, 9)),
# nn.ConvTranspose2d(ndf * 2 * 2, ndf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf),
nn.ReLU(True),
# state size. (ngf) x 64 x 64
)
self.deconvblock7_1 = ResidualBlock_D(ndf)
self.deconvblock8 = nn.Sequential(
# state size. (ngf) x 64 x 64
GlobalConvBlock(ndf*2, ndf, (11, 11)),
# nn.ConvTranspose2d( ndf * 2, ndf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf),
nn.ReLU(True),
# state size. (ngf) x 128 x 128
)
self.deconvblock8_1 = ResidualBlock_D(ndf)
self.deconvblock9 = nn.Sequential(
# state size. (ngf) x 128 x 128
nn.Conv2d(ndf, 1, 5, 1, 2, bias=False),
# state size. (channel_dim) x 128 x 128
# nn.Sigmoid()
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.normal_(1.0, 0.02)
m.bias.data.zero_()
elif isinstance(m, nn.ConvTranspose2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, input):
# for now it only supports one GPU
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu is 1:
encoder1 = self.convblock1(input)
encoder1 = self.convblock1_1(encoder1)
encoder2 = self.convblock2(encoder1)
encoder2 = self.convblock2_1(encoder2)
encoder3 = self.convblock3(encoder2)
encoder3 = self.convblock3_1(encoder3)
encoder4 = self.convblock4(encoder3)
encoder4 = self.convblock4_1(encoder4)
encoder5 = self.convblock5(encoder4)
encoder5 = self.convblock5_1(encoder5)
encoder6 = self.convblock6(encoder5)
encoder6 = self.convblock6_1(encoder6) + encoder6
encoder7 = self.convblock7(encoder6)
encoder8 = self.convblock8(encoder7)
decoder1 = self.deconvblock1(encoder8)
decoder1 = torch.cat([encoder7, decoder1], 1)
decoder1 = F.upsample(decoder1, size=encoder6.size()[
2:], mode='bilinear')
decoder2 = self.deconvblock2(decoder1)
decoder2 = self.deconvblock2_1(decoder2) + decoder2
# concatenate along depth dimension
decoder2 = torch.cat([encoder6, decoder2], 1)
decoder2 = F.upsample(decoder2, size=encoder5.size()[
2:], mode='bilinear')
decoder3 = self.deconvblock3(decoder2)
decoder3 = self.deconvblock3_1(decoder3)
decoder3 = torch.cat([encoder5, decoder3], 1)
decoder3 = F.upsample(decoder3, size=encoder4.size()[
2:], mode='bilinear')
decoder4 = self.deconvblock4(decoder3)
decoder4 = self.deconvblock4_1(decoder4)
decoder4 = torch.cat([encoder4, decoder4], 1)
decoder4 = F.upsample(decoder4, size=encoder3.size()[
2:], mode='bilinear')
decoder5 = self.deconvblock5(decoder4)
decoder5 = self.deconvblock5_1(decoder5)
decoder5 = torch.cat([encoder3, decoder5], 1)
decoder5 = F.upsample(decoder5, size=encoder2.size()[
2:], mode='bilinear')
decoder6 = self.deconvblock6(decoder5)
decoder6 = self.deconvblock6_1(decoder6)
decoder6 = torch.cat([encoder2, decoder6], 1)
decoder6 = F.upsample(decoder6, size=encoder1.size()[
2:], mode='bilinear')
decoder7 = self.deconvblock7(decoder6)
decoder7 = self.deconvblock7_1(decoder7)
decoder7 = torch.cat([encoder1, decoder7], 1)
decoder7 = F.upsample(decoder7, size=input.size()[
2:], mode='bilinear')
decoder8 = self.deconvblock8(decoder7)
decoder8 = self.deconvblock8_1(decoder8)
decoder9 = self.deconvblock9(decoder8)
else:
print('For now we only support one GPU')
return decoder9
class NetC(nn.Module):
def __init__(self, ngpu):
super(NetC, self).__init__()
self.ngpu = ngpu
self.convblock1 = nn.Sequential(
# input is (channel_dim) x 128 x 128
nn.Conv2d(channel_dim, ndf, 7, 2, 3, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# nn.Dropout2d(p=0.2),
# state size. (ndf) x 64 x 64
)
self.convblock1_1 = nn.Sequential(
# state size. (ndf) x 64 x 64
GlobalConvBlock(ndf, ndf * 2, (13, 13)),
# nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# nn.Dropout2d(p=0.2),
# state size. (ndf*2) x 64 x 64
)
self.convblock2 = nn.Sequential(
# state size. (ndf * 2) x 64 x 64
nn.Conv2d(ndf * 1, ndf * 2, 5, 2, 2, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# nn.Dropout2d(p=0.2),
# state size. (ndf*2) x 32 x 32
)
self.convblock2_1 = nn.Sequential(
# input is (ndf*2) x 32 x 32
GlobalConvBlock(ndf * 2, ndf * 4, (11, 11)),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# nn.Dropout2d(p=0.2),
# state size. (ndf*4) x 32 x 32
)
self.convblock3 = nn.Sequential(
# state size. (ndf * 4) x 32 x 32
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# nn.Dropout2d(p=0.2),
# state size. (ndf*4) x 16 x 16
)
self.convblock3_1 = nn.Sequential(
# input is (ndf*4) x 16 x 16
GlobalConvBlock(ndf * 4, ndf * 8, (9, 9)),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# nn.Dropout2d(p=0.2),
# state size. (ndf * 8) x 16 x 16
)
self.convblock4 = nn.Sequential(
# state size. (ndf*4) x 16 x 16
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# nn.Dropout2d(p=0.2),
# state size. (ndf*8) x 8 x 8
)
self.convblock4_1 = nn.Sequential(
# input is (ndf*8) x 8 x 8
GlobalConvBlock(ndf * 8, ndf * 16, (7, 7)),
nn.BatchNorm2d(ndf * 16),
nn.LeakyReLU(0.2, inplace=True),
# nn.Dropout2d(p=0.2),
# state size. (ndf*16) x 8 x 8
)
self.convblock5 = nn.Sequential(
# state size. (ndf*8) x 8 x 8
nn.Conv2d(ndf * 8, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# nn.Dropout2d(p=0.2),
# state size. (ndf*16) x 4 x 4
)
self.convblock5_1 = nn.Sequential(
# input is (ndf*16) x 4 x 4
nn.Conv2d(ndf * 16, ndf * 32, 3, 1, 1, bias=False),
nn.BatchNorm2d(ndf * 32),
nn.LeakyReLU(0.2, inplace=True),
# nn.Dropout2d(p=0.2),
# state size. (ndf*32) x 4 x 4
)
self.convblock6 = nn.Sequential(
# state size. (ndf*32) x 4 x 4
nn.Conv2d(ndf * 8, ndf * 8, 3, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# nn.Dropout2d(p=0.2),
# state size. (ndf*32) x 2 x 2
)
# self._initialize_weights()
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.normal_(1.0, 0.02)
m.bias.data.zero_()
def forward(self, input):
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu is 1:
batchsize = input.size()[0]
out1 = self.convblock1(input)
# out1 = self.convblock1_1(out1)
out2 = self.convblock2(out1)
# out2 = self.convblock2_1(out2)
out3 = self.convblock3(out2)
# out3 = self.convblock3_1(out3)
out4 = self.convblock4(out3)
# out4 = self.convblock4_1(out4)
out5 = self.convblock5(out4)
# out5 = self.convblock5_1(out5)
out6 = self.convblock6(out5)
# out6 = self.convblock6_1(out6) + out6
output = torch.cat((input.view(batchsize, -1), 1*out1.view(batchsize, -1),
2*out2.view(batchsize, -1), 2 *
out3.view(batchsize, -1),
2*out4.view(batchsize, -1), 2 *
out5.view(batchsize, -1),
4*out6.view(batchsize, -1)), 1)
else:
print('For now we only support one GPU')
return output
| 41.183158
| 86
| 0.517227
| 19,384
| 0.990901
| 0
| 0
| 0
| 0
| 0
| 0
| 2,988
| 0.152745
|
02dd42303eb7feb71bc2c94bd3f296e7b8ac9419
| 2,155
|
py
|
Python
|
karmagrambot/__init__.py
|
caiopo/karmagrambot
|
00935d0de228e516047bc3848344290be2cfcc0f
|
[
"MIT"
] | null | null | null |
karmagrambot/__init__.py
|
caiopo/karmagrambot
|
00935d0de228e516047bc3848344290be2cfcc0f
|
[
"MIT"
] | null | null | null |
karmagrambot/__init__.py
|
caiopo/karmagrambot
|
00935d0de228e516047bc3848344290be2cfcc0f
|
[
"MIT"
] | null | null | null |
import dataset
import logging
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from .config import TOKEN, DB_URI
from .commands import HANDLERS
logging.basicConfig()
def save_message(message, db):
replied = None
if message.reply_to_message is not None:
replied = message.reply_to_message.message_id
length = None
if message.text is not None:
length = len(message.text)
elif message.caption is not None:
length = len(message.caption)
vote = None
if message.text == '+':
vote = '+'
elif message.text == '-':
vote = '-'
new_row = {
'timestamp': message.date,
'message_id': message.message_id,
'chat_id': message.chat_id,
'user_id': message.from_user.id,
'replied': replied,
'length': length,
'vote': vote,
}
db['messages'].upsert(new_row, keys=['message_id', 'chat_id'])
def save_user(user, db):
table = db['users']
new_row = {
'user_id': user.id,
'first_name': user.first_name,
'last_name': user.last_name,
'username': user.username,
}
if table.find_one(user_id=user.id) is None:
new_row['tracked'] = True
table.insert(new_row)
else:
table.update(new_row, keys=['user_id'])
def save(bot, update):
db = dataset.connect(DB_URI)
save_message(update.message, db)
save_user(update.message.from_user, db)
def track(user_id, value):
db = dataset.connect(DB_URI)
table = db['users']
new_row = {
'user_id': user_id,
'tracked': value,
}
table.upsert(new_row, keys=['user_id'])
def opt_in(bot, update):
track(update.message.from_user.id, True)
def opt_out(bot, update):
track(update.message.from_user.id, False)
def run():
updater = Updater(TOKEN)
handlers = HANDLERS + [
CommandHandler('opt_in', opt_in),
CommandHandler('opt_out', opt_out),
MessageHandler(Filters.all, save), # must be last
]
for h in handlers:
updater.dispatcher.add_handler(h)
updater.start_polling()
updater.idle()
| 21.767677
| 73
| 0.620418
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 239
| 0.110905
|
02dff69165c131d9f3101aa1c12186dc1957dfcb
| 647
|
py
|
Python
|
app/services/articles.py
|
StanislavRud/api-realword-app-test
|
9a49f299b02cec26d237f3bc4b363c8b93520b7b
|
[
"MIT"
] | 1,875
|
2019-03-27T14:26:20.000Z
|
2022-03-31T14:52:50.000Z
|
app/services/articles.py
|
StanislavRud/api-realword-app-test
|
9a49f299b02cec26d237f3bc4b363c8b93520b7b
|
[
"MIT"
] | 232
|
2019-04-11T11:05:48.000Z
|
2022-03-05T10:23:50.000Z
|
app/services/articles.py
|
StanislavRud/api-realword-app-test
|
9a49f299b02cec26d237f3bc4b363c8b93520b7b
|
[
"MIT"
] | 433
|
2019-04-11T01:48:59.000Z
|
2022-03-31T10:33:42.000Z
|
from slugify import slugify
from app.db.errors import EntityDoesNotExist
from app.db.repositories.articles import ArticlesRepository
from app.models.domain.articles import Article
from app.models.domain.users import User
async def check_article_exists(articles_repo: ArticlesRepository, slug: str) -> bool:
try:
await articles_repo.get_article_by_slug(slug=slug)
except EntityDoesNotExist:
return False
return True
def get_slug_for_article(title: str) -> str:
return slugify(title)
def check_user_can_modify_article(article: Article, user: User) -> bool:
return article.author.username == user.username
| 26.958333
| 85
| 0.775889
| 0
| 0
| 0
| 0
| 0
| 0
| 222
| 0.343122
| 0
| 0
|
02e1c3bcb817bb01646a68ea5b10f1ece433f8ce
| 1,240
|
py
|
Python
|
tests/test_decoder.py
|
carlosmouracorreia/python_mp3_decoder
|
79ea61f3ceedc07a173a216538f4acbdf1c4c6c3
|
[
"MIT"
] | 23
|
2016-06-22T14:18:28.000Z
|
2020-11-23T12:39:01.000Z
|
tests/test_decoder.py
|
carlosmouracorreia/python_mp3_decoder
|
79ea61f3ceedc07a173a216538f4acbdf1c4c6c3
|
[
"MIT"
] | 3
|
2019-12-10T01:07:41.000Z
|
2021-03-29T14:40:29.000Z
|
tests/test_decoder.py
|
carlosmouracorreia/python_mp3_decoder
|
79ea61f3ceedc07a173a216538f4acbdf1c4c6c3
|
[
"MIT"
] | 1
|
2019-10-29T11:55:08.000Z
|
2019-10-29T11:55:08.000Z
|
from pymp3decoder import Decoder
import contextlib
import os
import math
import pyaudio
CHUNK_SIZE = 4096
def take_chunk(content):
""" Split a buffer of data into chunks """
num_blocks = int(math.ceil(1.0*len(content)/CHUNK_SIZE))
for start in range(num_blocks):
yield content[CHUNK_SIZE*start:CHUNK_SIZE*(start+1)]
class TestPlayer:
@contextlib.contextmanager
def start(self):
try:
p = pyaudio.PyAudio()
self.decoder = Decoder(CHUNK_SIZE*20)
self.stream = p.open(format=p.get_format_from_width(2),
channels=2,
rate=44100,
output=True)
yield self.stream
finally:
self.stream.stop_stream()
self.stream.close()
p.terminate()
def test_file(self):
""" Open a file and decode it """
abs_location = os.path.join(os.path.dirname(os.path.abspath(__file__)), "test.mp3")
with open(abs_location, "rb") as in_file, self.start():
content = in_file.read()
for chunk in self.decoder.decode_iter(take_chunk(content)):
self.stream.write(chunk)
| 24.313725
| 91
| 0.575806
| 895
| 0.721774
| 695
| 0.560484
| 495
| 0.399194
| 0
| 0
| 89
| 0.071774
|
02e1eaa7b6938abd4a858ad69dcce256aad518c8
| 2,232
|
py
|
Python
|
bot.py
|
Euphorichuman/StarConstellationBot-TelgramBot-
|
557dad7ce1d1a96a96b4ed65b796f20a6944e3b7
|
[
"Apache-2.0"
] | null | null | null |
bot.py
|
Euphorichuman/StarConstellationBot-TelgramBot-
|
557dad7ce1d1a96a96b4ed65b796f20a6944e3b7
|
[
"Apache-2.0"
] | null | null | null |
bot.py
|
Euphorichuman/StarConstellationBot-TelgramBot-
|
557dad7ce1d1a96a96b4ed65b796f20a6944e3b7
|
[
"Apache-2.0"
] | null | null | null |
import telegram.ext
import messsages as msg
import functions as f
import matplotlib.pyplot as plt
import traceback
import os
import os.path
from os import path
def start(update, context):
nombre = update.message.chat.first_name
mensaje = "Bienvenido {}, para conocer lo que puedo hacer utiliza el comando /Help.".format(nombre)
update.message.reply_text(mensaje)
def help(update, context):
update.message.reply_text(msg.helpMessage, parse_mode=telegram.ParseMode.HTML)
#Función para mandar la figura con todas las estrellas
def allStars(update, context):
chat_id = update.message.chat.id
figure = f.stars()
figure.draw()
figure.savefig("./files/stars.png")
context.bot.send_photo(chat_id, open("./files/stars.png",'rb'))
os.remove("./files/stars.png")
#Función para mandar la figura con todas las estrellas y una constelación
def allStars1Constellation(update, context):
chat_id = update.message.chat.id
messageUser = update.message.text
constellation = messageUser.split(" ")
try:
f.searchFile("./files/constellations/", constellation[1])
figure = f.allStars1Constellation(constellation[1], f.stars(), "#fdff6e")
figure.savefig("./files/1Constellation.png")
context.bot.send_photo(chat_id, open("./files/1Constellation.png",'rb'))
os.remove("./files/1Constellation.png")
except:
update.message.reply_text(msg.errorMessage, parse_mode=telegram.ParseMode.HTML)
#Función para mandar la figura con todas las estrellas y todas las constelaciones
def allStarsAllConstellations(update, context):
chat_id = update.message.chat.id
figure = f.starsAndContellations()
figure.draw()
figure.savefig("./files/StarsAndConstellations.png")
context.bot.send_photo(chat_id, open("./files/StarsAndConstellations.png",'rb'))
os.remove("./files/StarsAndConstellations.png")
#Función para mandar una lista de las constelaciones disponibles
def constellations(update, context):
update.message.reply_text(msg.constellationsMessage)
#Función para mandar una lista de las constelaciones disponibles
def about(update, context):
update.message.reply_text(msg.infoMessage, parse_mode=telegram.ParseMode.HTML)
| 37.2
| 103
| 0.744624
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 714
| 0.319035
|
02e39765ed97295a34732ab36aceb4ca2cfebe3b
| 2,706
|
py
|
Python
|
tests/unit/states/test_slack.py
|
amaclean199/salt
|
8aaac011b4616e3c9e74a1daafb4a2146a5a430f
|
[
"Apache-2.0"
] | 12
|
2015-01-21T00:18:25.000Z
|
2021-07-11T07:35:26.000Z
|
tests/unit/states/test_slack.py
|
amaclean199/salt
|
8aaac011b4616e3c9e74a1daafb4a2146a5a430f
|
[
"Apache-2.0"
] | 1
|
2015-10-05T22:03:10.000Z
|
2015-10-05T22:03:10.000Z
|
tests/unit/states/test_slack.py
|
amaclean199/salt
|
8aaac011b4616e3c9e74a1daafb4a2146a5a430f
|
[
"Apache-2.0"
] | 12
|
2015-01-05T09:50:42.000Z
|
2019-08-19T01:43:40.000Z
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import skipIf, TestCase
from tests.support.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch
)
# Import Salt Libs
import salt.states.slack as slack
@skipIf(NO_MOCK, NO_MOCK_REASON)
class SlackTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.states.slack
'''
def setup_loader_modules(self):
return {slack: {}}
# 'post_message' function tests: 1
def test_post_message(self):
'''
Test to send a message to a Slack channel.
'''
name = 'slack-message'
channel = '#general'
from_name = 'SuperAdmin'
message = 'This state was executed successfully.'
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
with patch.dict(slack.__opts__, {'test': True}):
comt = ('The following message is to be sent to Slack: {0}'
.format(message))
ret.update({'comment': comt})
self.assertDictEqual(slack.post_message(name, channel, from_name,
message), ret)
with patch.dict(slack.__opts__, {'test': False}):
comt = ('Slack channel is missing: None')
ret.update({'comment': comt, 'result': False})
self.assertDictEqual(slack.post_message(name, None, from_name,
message), ret)
comt = ('Slack from name is missing: None')
ret.update({'comment': comt, 'result': False})
self.assertDictEqual(slack.post_message(name, channel, None,
message), ret)
comt = ('Slack message is missing: None')
ret.update({'comment': comt, 'result': False})
self.assertDictEqual(slack.post_message(name, channel, from_name,
None), ret)
mock = MagicMock(return_value=True)
with patch.dict(slack.__salt__, {'slack.post_message': mock}):
comt = ('Sent message: slack-message')
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(slack.post_message(name, channel,
from_name, message),
ret)
| 35.605263
| 77
| 0.543607
| 2,202
| 0.813747
| 0
| 0
| 2,235
| 0.825942
| 0
| 0
| 703
| 0.259793
|
02e3d3385e104cc569c1458b36ecf6ad0a158a11
| 613
|
py
|
Python
|
lintcode/499.py
|
jianershi/algorithm
|
c3c38723b9c5f1cc745550d89e228f92fd4abfb2
|
[
"MIT"
] | 1
|
2021-01-08T06:57:49.000Z
|
2021-01-08T06:57:49.000Z
|
lintcode/499.py
|
jianershi/algorithm
|
c3c38723b9c5f1cc745550d89e228f92fd4abfb2
|
[
"MIT"
] | null | null | null |
lintcode/499.py
|
jianershi/algorithm
|
c3c38723b9c5f1cc745550d89e228f92fd4abfb2
|
[
"MIT"
] | 1
|
2021-01-08T06:57:52.000Z
|
2021-01-08T06:57:52.000Z
|
"""
499. Word Count (Map Reduce)
https://www.lintcode.com/problem/word-count-map-reduce/description
"""
class WordCount:
# @param {str} line a text, for example "Bye Bye see you next"
def mapper(self, _, line):
# Write your code here
# Please use 'yield key, value'
word_lists = line.split(" ")
for word in word_lists:
yield word, 1
# @param key is from mapper
# @param values is a set of value with the same key
def reducer(self, key, values):
# Write your code here
# Please use 'yield key, value'
yield key, sum(values)
| 29.190476
| 66
| 0.611746
| 508
| 0.828711
| 325
| 0.530179
| 0
| 0
| 0
| 0
| 352
| 0.574225
|
02e41ad97ede483bee9810e5ea1fa7bf1e5f726c
| 1,205
|
py
|
Python
|
dataframe/statistic.py
|
kuangtu/pandas_exec
|
659dec5eef488bec11daec33333ff8366a0d1a91
|
[
"MIT"
] | null | null | null |
dataframe/statistic.py
|
kuangtu/pandas_exec
|
659dec5eef488bec11daec33333ff8366a0d1a91
|
[
"MIT"
] | null | null | null |
dataframe/statistic.py
|
kuangtu/pandas_exec
|
659dec5eef488bec11daec33333ff8366a0d1a91
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
import numpy as np
import pandas as pd
def countnum():
dates = pd.date_range(start="2019-01-01", end="2019-05-31", freq='M')
# print(dates)
# print(dates[0])
# print(type(dates[0]))
col1 = [i for i in range(1, len(dates) + 1)]
# print(col1)
col2 = [i + 1 for i in range(1, len(dates) + 1)]
df = pd.DataFrame({'col1': col1, 'col2': col2}, index=dates)
# print(df)
dict_ic = {}
dict_ic['date'] = df
df_ic = pd.concat(dict_ic.values(), keys=dict_ic.keys())
# print (df_ic)
# 基于list统计
mean = df_ic.groupby(level=0).apply(lambda frame: len(
[i for i in frame['col1'].values if i > 2]) / len(frame['col1']))
print(mean)
def statfunc():
perf_dict = {"code": ['000001', '000002', '000003'],
"close": [100, 91.1, 5.4],
"vol": [1000, 200, 3000]}
df = pd.DataFrame(perf_dict)
#最大、最小值所在位置
print(df['close'].idxmin())
min_close = df.iloc[df['close'].idxmin(),:]
print(min_close)
max_close = df.iloc[df['close'].idxmax(), :]
print(max_close)
result = df.any(axis=0)
print(result)
if __name__ == '__main__':
# countnum()
statfunc()
| 25.104167
| 73
| 0.557676
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 307
| 0.248986
|
02e6d57771f13dca80d99555989b7627a7fef655
| 6,513
|
py
|
Python
|
asv/results.py
|
pitrou/asv
|
d6efa34f1308a212bc3c2f386f2f6584bbb5398f
|
[
"BSD-3-Clause"
] | null | null | null |
asv/results.py
|
pitrou/asv
|
d6efa34f1308a212bc3c2f386f2f6584bbb5398f
|
[
"BSD-3-Clause"
] | null | null | null |
asv/results.py
|
pitrou/asv
|
d6efa34f1308a212bc3c2f386f2f6584bbb5398f
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import base64
import os
import zlib
from .environment import get_environment
from . import util
def iter_results_paths(results):
"""
Iterate over all of the result file paths.
"""
skip_files = set([
'machine.json', 'benchmarks.json'
])
for root, dirs, files in os.walk(results):
for filename in files:
if filename not in skip_files and filename.endswith('.json'):
yield (root, filename)
def iter_results(results):
"""
Iterate over all of the result files.
"""
for (root, filename) in iter_results_paths(results):
yield Results.load(os.path.join(root, filename))
def iter_results_for_machine(results, machine_name):
"""
Iterate over all of the result files for a particular machine.
"""
return iter_results(os.path.join(results, machine_name))
def iter_results_for_machine_and_hash(results, machine_name, commit):
"""
Iterate over all of the result files with a given hash for a
particular machine.
"""
for (root, filename) in iter_results_paths(
os.path.join(results, machine_name)):
results_commit = filename.split('-')[0]
max_len = max(len(commit), len(results_commit))
if results_commit[:max_len] == commit[:max_len]:
yield Results.load(os.path.join(root, filename))
def iter_existing_hashes(results):
"""
Iterate over all of the result commit hashes and dates. Each
element yielded is the pair (hash, date).
May return duplicates. Use `get_existing_hashes` if that matters.
"""
for result in iter_results(results):
yield result.commit_hash, result.date
def get_existing_hashes(results):
"""
Get all of the commit hashes that have already been tested.
Each element yielded is the pair (hash, date).
"""
hashes = list(set(iter_existing_hashes(results)))
return hashes
def find_latest_result_hash(machine, root):
"""
Find the latest result for the given machine.
"""
root = os.path.join(root, machine)
latest_date = 0
latest_hash = ''
for commit_hash, date in iter_existing_hashes(root):
if date > latest_date:
latest_date = date
latest_hash = commit_hash
return latest_hash
def get_filename(machine, commit_hash, env):
"""
Get the result filename for a given machine, commit_hash and
environment.
"""
return os.path.join(
machine,
"{0}-{1}.json".format(
commit_hash[:8],
env.name))
class Results(object):
"""
Manage a set of benchmark results for a single machine and commit
hash.
"""
api_version = 1
def __init__(self, params, env, commit_hash, date):
"""
Parameters
----------
params : dict
Parameters describing the environment in which the
benchmarks were run.
env : Environment object
Environment in which the benchmarks were run.
commit_hash : str
The commit hash for the benchmark run.
date : int
Javascript timestamp for when the commit was merged into
the repository.
"""
self._params = params
self._env = env
self._commit_hash = commit_hash
self._date = date
self._results = {}
self._profiles = {}
self._python = env.python
self._filename = get_filename(
params['machine'], self._commit_hash, env)
@property
def commit_hash(self):
return self._commit_hash
@property
def date(self):
return self._date
@property
def params(self):
return self._params
@property
def results(self):
return self._results
@property
def env(self):
return self._env
def add_time(self, benchmark_name, time):
"""
Add benchmark times.
Parameters
----------
benchmark_name : str
Name of benchmark
time : number
Numeric result
"""
self._results[benchmark_name] = time
def add_profile(self, benchmark_name, profile):
"""
Add benchmark profile data.
Parameters
----------
benchmark_name : str
Name of benchmark
profile : bytes
`cProfile` data
"""
self._profiles[benchmark_name] = base64.b64encode(
zlib.compress(profile))
def get_profile(self, benchmark_name):
"""
Get the profile data for the given benchmark name.
"""
return zlib.decompress(
base64.b64decode(self._profiles[benchmark_name]))
def has_profile(self, benchmark_name):
"""
Does the given benchmark data have profiling information?
"""
return benchmark_name in self._profiles
def save(self, result_dir):
"""
Save the results to disk.
Parameters
----------
result_dir : str
Path to root of results tree.
"""
path = os.path.join(result_dir, self._filename)
util.write_json(path, {
'results': self._results,
'params': self._params,
'requirements': self._env.requirements,
'commit_hash': self._commit_hash,
'date': self._date,
'python': self._python,
'profiles': self._profiles
}, self.api_version)
@classmethod
def load(cls, path):
"""
Load results from disk.
Parameters
----------
path : str
Path to results file.
"""
d = util.load_json(path, cls.api_version)
obj = cls(
d['params'],
get_environment('', d['python'], d['requirements']),
d['commit_hash'],
d['date'])
obj._results = d['results']
if 'profiles' in d:
obj._profiles = d['profiles']
obj._filename = os.path.join(*path.split(os.path.sep)[-2:])
return obj
def rm(self, result_dir):
path = os.path.join(result_dir, self._filename)
os.remove(path)
@classmethod
def update(cls, path):
util.update_json(cls, path, cls.api_version)
| 25.641732
| 73
| 0.590511
| 3,748
| 0.575464
| 1,371
| 0.210502
| 980
| 0.150468
| 0
| 0
| 2,457
| 0.377246
|
02e9695d836ae2a21a14a0f80cc396334b03974f
| 1,188
|
py
|
Python
|
core/secretfinder/utils.py
|
MakoSec/pacu
|
f06f110e6c181f34b89b803e7c2024563acc9fbc
|
[
"BSD-3-Clause"
] | 26
|
2021-03-29T13:39:28.000Z
|
2022-03-21T10:57:58.000Z
|
core/secretfinder/utils.py
|
MakoSec/pacu
|
f06f110e6c181f34b89b803e7c2024563acc9fbc
|
[
"BSD-3-Clause"
] | 1
|
2021-06-02T02:39:40.000Z
|
2021-06-02T02:39:40.000Z
|
core/secretfinder/utils.py
|
MakoSec/pacu
|
f06f110e6c181f34b89b803e7c2024563acc9fbc
|
[
"BSD-3-Clause"
] | 8
|
2021-02-23T12:17:04.000Z
|
2022-02-25T13:28:14.000Z
|
import math
import json
import re
import os
class Color:
GREEN = '\033[92m'
BLUE = '\033[94m'
RED = '\033[91m'
# noinspection SpellCheckingInspection
ENDC = '\033[0m'
@staticmethod
def print(color, text):
print('{}{}{}'.format(color, text, Color.ENDC))
def shannon_entropy(data):
if not data:
return 0
entropy = 0
for character_i in range(256):
px = data.count(chr(character_i)) / len(data)
if px > 0:
entropy += - px * math.log(px, 2)
return entropy
def regex_checker(userdata):
results = {}
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
try:
f =open(os.path.join(__location__, 'regexs.json'))
data = f.read()
regexs = json.loads(data)
for key in regexs:
regex = re.compile(regexs[key])
result = regex.findall(userdata)
if result:
results[key] = result
except Exception as e:
raise e
f.close()
return results
def contains_secret(data, THRESHOLD=3.5):
return shannon_entropy(data) > THRESHOLD
| 21.214286
| 58
| 0.570707
| 245
| 0.206229
| 0
| 0
| 97
| 0.08165
| 0
| 0
| 98
| 0.082492
|
02e997ec752171db83c0a7598b23b28d81788b83
| 2,342
|
py
|
Python
|
validation/step_03_-_predict_state/step_03_-_plot_results.py
|
martin0004/drone_perception_system
|
ac76a002179bd1a7219f3c76747bd50aba0a0aea
|
[
"MIT"
] | 1
|
2021-08-25T08:16:27.000Z
|
2021-08-25T08:16:27.000Z
|
validation/step_03_-_predict_state/step_03_-_plot_results.py
|
martin0004/drone_perception_system
|
ac76a002179bd1a7219f3c76747bd50aba0a0aea
|
[
"MIT"
] | null | null | null |
validation/step_03_-_predict_state/step_03_-_plot_results.py
|
martin0004/drone_perception_system
|
ac76a002179bd1a7219f3c76747bd50aba0a0aea
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from typing import Tuple
def clean_df_headers(df: pd.DataFrame) -> pd.DataFrame:
"""Remove leading and trailing spaces in DataFrame headers."""
headers = pd.Series(df.columns)
new_headers = [header.strip() for header in headers]
new_headers = pd.Series(new_headers)
df.columns = new_headers
return df
def configure_ax(ax: plt.axes,
df: pd.DataFrame = None,
xlabel: str = None,
ylabel: Tuple[int,int] = None,
ylim: str = None,
title: str = None,
legend: bool = False
) -> plt.axes:
"""Configure Matplotlib axe."""
if df is not None:
x = df.index
for h in df.columns:
y = df[h]
ax.plot(x, y,label=h)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if ylim is not None:
ax.set_ylim(ylim)
if title is not None:
ax.set_title(title)
if legend is not None:
ax.legend()
return ax
if __name__ == "__main__":
# Load sensor data
df_data = pd.read_csv("step_03_-_scenario_08_-_after_tuning.txt")
# Remove leading and trailing spaces in df headers
df_data = clean_df_headers(df_data)
# Set "time" column as DataFrame index
df_data = df_data.set_index("time")
# Plot results
fig = plt.figure()
fig.suptitle("True & Predicted States \n (Global Frame)")
# X-Position and X-Speed
ax = plt.subplot(3,1,1)
df = df_data[["quad.pos.x", "quad.est.x", "quad.vel.x", "quad.est.vx"]]
ax = configure_ax(ax, df = df, ylabel = "X-Positions [m] \n X-Velocities [m/s]", title = "After Tuning", legend = True)
# Y-Position and Y-Speed
ax = plt.subplot(3,1,2)
df = df_data[["quad.pos.y", "quad.est.y", "quad.vel.y", "quad.est.vy"]]
ax = configure_ax(ax, df = df, ylabel = "Y-Positions [m] \n Y-Velocities [m/s]", legend = True)
# Z-Position and Z-Speed
ax = plt.subplot(3,1,3)
df = df_data[["quad.pos.z", "quad.est.z", "quad.vel.z", "quad.est.vz"]]
ax = configure_ax(ax, df = df, xlabel = "Time [s]", ylabel = "Z-Positions [m] \n Z-Velocities [m/s]", legend = True)
plt.show()
| 27.232558
| 123
| 0.585824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 674
| 0.287788
|
02eb83c13dc0114b6ab1c905f8a724d75ccb3d34
| 8,036
|
py
|
Python
|
lvsr/dependency/datasets.py
|
mzapotoczny/dependency-parser
|
e37f94e23cb61d6658774f5f9843219df331eb74
|
[
"MIT"
] | 3
|
2017-06-07T06:41:18.000Z
|
2019-10-26T13:08:23.000Z
|
lvsr/dependency/datasets.py
|
mzapotoczny/dependency-parser
|
e37f94e23cb61d6658774f5f9843219df331eb74
|
[
"MIT"
] | null | null | null |
lvsr/dependency/datasets.py
|
mzapotoczny/dependency-parser
|
e37f94e23cb61d6658774f5f9843219df331eb74
|
[
"MIT"
] | 1
|
2020-11-26T17:40:18.000Z
|
2020-11-26T17:40:18.000Z
|
'''
Created on Mar 20, 2016
'''
import numpy
import numbers
from fuel.datasets.hdf5 import H5PYDataset
from fuel.utils import Subset
class VMap(object):
def __init__(self, vmap_dset):
dtype = vmap_dset.dtype
dtype_unicode = [('key', 'U%d' % (dtype[0].itemsize/4,)),
('val', dtype[1])]
vmap_dset = numpy.array(vmap_dset).view(dtype=dtype_unicode)
self.tok2num = dict(vmap_dset)
self.num2tok = {num: char for char, num in self.tok2num.iteritems()}
class VMapIdentity(VMap):
def __init__(self, dset):
dset = numpy.array(dset)
self.tok2num = {}
for sample in dset:
for num in sample:
self.tok2num[num] = num;
self.num2tok = self.tok2num
#self.num2tok = {num: char for char, num in self.tok2num.iteritems()}
class H5PYTextDataset(H5PYDataset):
_clean_sources = ['deps', 'features_per_word']
def __init__(self, sources_map, **kwargs):
self.sources_map = sources_map
super(H5PYTextDataset, self).__init__(**kwargs)
self.open()
self.value_maps = {}
for source in self._file_handle:
if '_value_map' not in source and source not in self._clean_sources:
continue
if source not in self._clean_sources:
source = source[:-len('_value_map')]
try:
vmap_dset = self._file_handle[
self._file_handle[source].attrs['value_map']]
self.value_maps[source] = VMap(vmap_dset)
except KeyError:
self.value_maps[source] = VMapIdentity(self._file_handle[source])
self.char2num = self.value_maps[self.sources_map['labels']].tok2num
self.num2char = self.value_maps[self.sources_map['labels']].num2tok
self.num_characters = len(self.num2char)
self.eos_label = 0
self.bos_label = 0
def _parse_dataset_info(self):
self._out_of_memory_open()
handle = self._file_handle
available_splits = self.get_all_splits(handle)
which_sets = self.which_sets
provides_sources = None
for split in which_sets:
if split not in available_splits:
raise ValueError(
"'{}' split is not provided by this ".format(split) +
"dataset. Available splits are " +
"{}.".format(available_splits))
split_provides_sources = set(
self.get_provided_sources(handle, split))
if provides_sources:
provides_sources &= split_provides_sources
else:
provides_sources = split_provides_sources
if 'additionals' in provides_sources:
sources = map(lambda x: x[:-len('_value_map')],
filter(lambda x: '_value_map' in x, handle))
self.additional_sources = sorted(list(set(sources) - provides_sources))
provides_sources |= set(sources)
else:
self.additional_sources = []
self.provides_sources = tuple(sorted(provides_sources))
self.vlen_sources = self.get_vlen_sources(handle)
self.default_axis_labels = self.get_axis_labels(handle)
self._out_of_memory_close()
@staticmethod
def get_subsets(h5file, splits, sources):
split_sources = set([r['source'] for r in h5file.attrs['split']])
if 'additionals' in split_sources:
sources = [s if s in split_sources else 'additionals' for s in sources]
return H5PYDataset.get_subsets(h5file, splits, sources)
def _in_memory_get_data(self, state=None, request=None):
raise NotImplemented()
def _out_of_memory_get_data(self, state=None, request=None):
if not isinstance(request, (numbers.Integral, slice, list)):
raise ValueError()
data = []
shapes = []
handle = self._file_handle
integral_request = isinstance(request, numbers.Integral)
if self.additional_sources != []:
additional_name = [s for s in self.additional_sources if s in self.sources]
if len(additional_name) > 0:
additional_name = additional_name[0]
asubset = self.subsets[self.sources.index(additional_name)]
additionals_shapes = asubset.index_within_subset(
handle['additionals'].dims[0]['shapes'], request,
sort_indices=self.sort_indices)
additionals_data = asubset.index_within_subset(
handle['additionals'], request,
sort_indices=self.sort_indices)
for source_name, subset in zip(self.sources, self.subsets):
# Process the data request within the context of the data source
# subset
if source_name in self.additional_sources:
source_index = self.additional_sources.index(source_name)
if integral_request:
data.append(
additionals_data[source_index::additionals_shapes[1]])
shapes.append(additionals_shapes[:1])
else:
data.append(
numpy.array([x[source_index::additionals_shapes[i][1]]
for i,x in enumerate(additionals_data)])
)
shapes.append(additionals_shapes[:,:1])
else:
data.append(
subset.index_within_subset(
handle[source_name], request,
sort_indices=self.sort_indices))
# If this source has variable length, get the shapes as well
if source_name in self.vlen_sources:
shapes.append(
subset.index_within_subset(
handle[source_name].dims[0]['shapes'], request,
sort_indices=self.sort_indices))
else:
shapes.append(None)
#from IPython import embed; embed()
return data, shapes
def num_features(self, feature_name):
if feature_name in self._clean_sources and 'per_word' in feature_name:
return self._file_handle.attrs[feature_name+'_length']
else:
return len(self.value_maps[feature_name].tok2num)
def decode(self, labels, keep_eos=False):
return map(lambda x: self.num2char[x], labels[1:-1])
def pretty_print(self, labels, example):
labels = self.decode(labels)
return labels
def get_value_map_name(self, source):
if source == '':
return source
if source in self.value_maps:
return source
else:
return self.get_value_map_name(source[:source.rindex('_')])
def print_text(self, words):
key = words.keys()[0]
vkey = self.get_value_map_name(key)
value = words[key]
sentence = []
vmap = self.value_maps[vkey].num2tok
try:
if value.ndim == 2:
max_char = self.value_maps[vkey].tok2num['UNK']
for i in xrange(value.shape[0]):
word = []
for vi,v in enumerate(value[i]):
word += [vmap[v]]
if vi > 0 and v > max_char:
break
sentence += [''.join(word[1:-1])]
except:
return "UNKNOWN TEXT"
return ' '.join(sentence[1:-1])
def validate_solution(self, inp, candidate_out):
num_words = len(inp.values()[0])
deps = candidate_out[0]
pointers = candidate_out[1]
if deps.shape[0] != pointers.shape[0] or \
deps.shape[0] != num_words:
return False
return True
| 40.585859
| 87
| 0.56558
| 7,888
| 0.981583
| 0
| 0
| 325
| 0.040443
| 0
| 0
| 600
| 0.074664
|
02ebdfddbb50d875cc9962bf326ad8e9c362cfea
| 1,444
|
py
|
Python
|
setup.py
|
LandRegistry/govuk-frontend-wtf
|
3ac1501dd220ad8f4cff0137f2d87e973c9e1243
|
[
"MIT"
] | 10
|
2021-02-02T11:38:42.000Z
|
2022-01-21T15:10:23.000Z
|
setup.py
|
LandRegistry/govuk-frontend-wtf
|
3ac1501dd220ad8f4cff0137f2d87e973c9e1243
|
[
"MIT"
] | 23
|
2021-04-26T09:19:22.000Z
|
2022-03-31T15:13:31.000Z
|
setup.py
|
LandRegistry/govuk-frontend-wtf
|
3ac1501dd220ad8f4cff0137f2d87e973c9e1243
|
[
"MIT"
] | 6
|
2021-02-04T11:09:51.000Z
|
2021-06-01T08:39:02.000Z
|
import glob
import os
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
templates = []
directories = glob.glob("govuk_frontend_wtf/templates/*.html")
for directory in directories:
templates.append(os.path.relpath(os.path.dirname(directory), "govuk_frontend_wtf") + "/*.html")
setuptools.setup(
name="govuk-frontend-wtf",
version="1.0.0",
author="Matt Shaw",
author_email="matthew.shaw@landregistry.gov.uk",
description="GOV.UK Frontend WTForms Widgets",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/LandRegistry/govuk-frontend-wtf",
packages=setuptools.find_packages(exclude=["tests"]),
package_data={"govuk_frontend_wtf": templates},
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Environment :: Web Environment",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Topic :: Software Development :: Code Generators",
"Topic :: Software Development :: User Interfaces",
"Topic :: Text Processing :: Markup :: HTML",
],
python_requires=">=3.6",
install_requires=[
"deepmerge",
"flask",
"flask-wtf",
"govuk-frontend-jinja<2.0.0",
"jinja2",
"wtforms",
],
)
| 31.391304
| 99
| 0.644044
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 714
| 0.49446
|
02ebe98586fb9a06d031ee215ed1a172f2753298
| 2,930
|
py
|
Python
|
project/experiments/exp_003_best_Walker2D/src/4.plot_1.py
|
liusida/thesis-bodies
|
dceb8a36efd2cefc611f6749a52b56b9d3572f7a
|
[
"MIT"
] | null | null | null |
project/experiments/exp_003_best_Walker2D/src/4.plot_1.py
|
liusida/thesis-bodies
|
dceb8a36efd2cefc611f6749a52b56b9d3572f7a
|
[
"MIT"
] | null | null | null |
project/experiments/exp_003_best_Walker2D/src/4.plot_1.py
|
liusida/thesis-bodies
|
dceb8a36efd2cefc611f6749a52b56b9d3572f7a
|
[
"MIT"
] | null | null | null |
import os
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from common.tflogs2pandas import tflog2pandas, many_logs2pandas
from common.gym_interface import template
bodies = [300]
all_seeds = list(range(20))
all_stackframe = [0,4]
cache_filename = "output_data/tmp/plot0"
try:
df = pd.read_pickle(cache_filename)
except:
# if True:
dfs = []
for body in bodies:
for seed in all_seeds:
for stackframe in all_stackframe:
path = f"output_data/tensorboard/model-{body}"
if stackframe>0:
path += f"-stack{stackframe}"
path += f"-sd{seed}/SAC_1"
print(f"Loading {path}")
if not os.path.exists(path):
continue
df = tflog2pandas(path)
df["body"] = body
df["seed"] = seed
df["stackframe"] = stackframe
df = df[df["metric"] == f"eval/{body}_mean_reward"]
print(df.shape)
print(df.head())
dfs.append(df)
df = pd.concat(dfs)
df.to_pickle(cache_filename)
print(df.shape)
# df = df[::100]
print(df[df["seed"]==0].head())
print(df[df["seed"]==1].head())
print(df[df["seed"]==2].head())
print(df[df["seed"]==3].head())
df1 = pd.DataFrame(columns=df.columns)
print(df1)
for body in bodies:
for seed in all_seeds:
for stackframe in all_stackframe:
df2 = df[(df["body"]==body) & (df["seed"]==seed) & (df["stackframe"]==stackframe)]
print(df2.shape)
x = df2.iloc[df2["value"].argsort().iloc[-1]]
df1 = df1.append(x)
# for i in range(30):
if False:
step_number = 60000
x = df2.iloc[(df2["step"] - step_number).abs().argsort()[0]]
if abs(x["step"]-step_number)>1500:
print("no")
else:
# print(x)
x = x.copy()
# x["step"] = step_number
df1 = df1.append(x)
df1 = df1[df1["step"]>550000]
print(df1)
print("control")
df2 = df1[df1["stackframe"]==0]
print(f"{df2['value'].mean():.03f} +- {2*df2['value'].std():.03f}")
print("treatment: stackframe")
df2 = df1[df1["stackframe"]==4]
print(f"{df2['value'].mean():.03f} +- {2*df2['value'].std():.03f}")
print(df1.shape, df.shape)
df = df1
fig, axes = plt.subplots(nrows=1, ncols=1, sharey=True, figsize=[10,10])
sns.barplot(ax=axes, data=df1, x="stackframe", y="value")
# axes = [axes]
# axes = axes.flatten()
# for idx, body in enumerate(bodies):
# sns.lineplot(
# ax=axes[idx],
# data=df[df["body"]==body],
# x="step", y="value", hue="stackframe",
# markers=True, dashes=False
# ).set_title(template(body))
plt.legend()
plt.tight_layout()
plt.savefig("output_data/plots/0.png")
# plt.show()
| 31.505376
| 94
| 0.543686
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 837
| 0.285666
|
02ec3adf599332a9c2e8596007821b919933d4a9
| 167
|
py
|
Python
|
wsgi.py
|
emilan21/macvert
|
ac219507a6b20372861667f4ade8084c9902a231
|
[
"MIT"
] | null | null | null |
wsgi.py
|
emilan21/macvert
|
ac219507a6b20372861667f4ade8084c9902a231
|
[
"MIT"
] | null | null | null |
wsgi.py
|
emilan21/macvert
|
ac219507a6b20372861667f4ade8084c9902a231
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# mac_convertor.py - Converts mac address from various formats to other formats
from macvert.web import app
if __name__ == '__main__':
app.run()
| 20.875
| 79
| 0.742515
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 106
| 0.634731
|
02ed59dd65e3f0007ed59a3660fc0e47a1a878ad
| 461
|
py
|
Python
|
config/dotenv.py
|
CharuchithRanjit/open-pos
|
ac749a0f2a6c59077d2c13f13e776963e130501f
|
[
"MIT"
] | null | null | null |
config/dotenv.py
|
CharuchithRanjit/open-pos
|
ac749a0f2a6c59077d2c13f13e776963e130501f
|
[
"MIT"
] | null | null | null |
config/dotenv.py
|
CharuchithRanjit/open-pos
|
ac749a0f2a6c59077d2c13f13e776963e130501f
|
[
"MIT"
] | null | null | null |
"""
Loads dotenv variables
Classes:
None
Functions:
None
Misc variables:
DATABASE_KEY (str) -- The key for the database
DATABASE_PASSWORD (str) -- The password for the database
DATABASE_URL (str) -- The url for the database
"""
import os
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
DATABASE_KEY = os.environ.get("DATABASE_KEY")
DATABASE_PASSWORD = os.environ.get("DATABASE_PASSWORD")
DATABASE_URL = os.environ.get("SUPABASE_URL")
| 20.954545
| 56
| 0.776573
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 277
| 0.600868
|
02eda03f6cdc3686e65cef76614261ad8f2d5c5b
| 462
|
py
|
Python
|
617. Merge Two Binary Trees/solution1.py
|
zhoudaxia233/LeetCode
|
66e8087e982f0517e22926a5574557ff006c9e4d
|
[
"MIT"
] | 3
|
2019-08-18T15:56:13.000Z
|
2019-11-29T10:20:05.000Z
|
617. Merge Two Binary Trees/solution1.py
|
zhoudaxia233/LeetCode
|
66e8087e982f0517e22926a5574557ff006c9e4d
|
[
"MIT"
] | null | null | null |
617. Merge Two Binary Trees/solution1.py
|
zhoudaxia233/LeetCode
|
66e8087e982f0517e22926a5574557ff006c9e4d
|
[
"MIT"
] | null | null | null |
class Solution:
def mergeTrees(self, t1, t2):
"""
:type t1: TreeNode
:type t2: TreeNode
:rtype: TreeNode
"""
# This method changes the existing tree instead of returning a new one
if t1 and t2:
t1.val += t2.val
t1.left = self.mergeTrees(t1.left, t2.left)
t1.right = self.mergeTrees(t1.right, t2.right)
return t1
else:
return t1 or t2
| 28.875
| 78
| 0.52381
| 461
| 0.997835
| 0
| 0
| 0
| 0
| 0
| 0
| 164
| 0.354978
|
02ef52ac7a4592df5ce1f94d82e027c617d780cc
| 1,094
|
py
|
Python
|
tests/zero_model_test.py
|
shatadru99/archai
|
8501080f8ecc73327979c02387e02011efb4c335
|
[
"MIT"
] | 1
|
2020-01-29T18:45:42.000Z
|
2020-01-29T18:45:42.000Z
|
tests/zero_model_test.py
|
shatadru99/archai
|
8501080f8ecc73327979c02387e02011efb4c335
|
[
"MIT"
] | null | null | null |
tests/zero_model_test.py
|
shatadru99/archai
|
8501080f8ecc73327979c02387e02011efb4c335
|
[
"MIT"
] | 1
|
2020-01-31T15:51:53.000Z
|
2020-01-31T15:51:53.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
from archai.nas.model import Model
from archai.nas.macro_builder import MacroBuilder
from archai.common.common import common_init
def test_darts_zero_model():
conf = common_init(config_filepath='confs/algos/darts.yaml')
conf_search = conf['nas']['search']
model_desc = conf_search['model_desc']
macro_builder = MacroBuilder(model_desc)
model_desc = macro_builder.build()
m = Model(model_desc, False, True)
y, aux = m(torch.rand((1, 3, 32, 32)))
assert isinstance(y, torch.Tensor) and y.shape==(1,10) and aux is None
def test_petridish_zero_model():
conf = common_init(config_filepath='confs/petridish_cifar.yaml')
conf_search = conf['nas']['search']
model_desc = conf_search['model_desc']
macro_builder = MacroBuilder(model_desc)
model_desc = macro_builder.build()
m = Model(model_desc, False, True)
y, aux = m(torch.rand((1, 3, 32, 32)))
assert isinstance(y, torch.Tensor) and y.shape==(1,10) and aux is None
| 36.466667
| 75
| 0.698355
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 175
| 0.159963
|
02f1e0ca932dc0360686a58ec3b261b9a83d5c58
| 13,461
|
py
|
Python
|
reports/dataset.py
|
TexasDigitalLibrary/dataverse-reports
|
90f849a1b6c0d772d19de336f9f48cd290256392
|
[
"MIT"
] | 5
|
2018-10-07T14:37:40.000Z
|
2021-09-14T08:57:19.000Z
|
reports/dataset.py
|
TexasDigitalLibrary/dataverse-reports
|
90f849a1b6c0d772d19de336f9f48cd290256392
|
[
"MIT"
] | 11
|
2019-08-30T15:29:37.000Z
|
2021-12-20T19:44:37.000Z
|
reports/dataset.py
|
TexasDigitalLibrary/dataverse-reports
|
90f849a1b6c0d772d19de336f9f48cd290256392
|
[
"MIT"
] | 4
|
2018-01-30T18:20:54.000Z
|
2021-09-30T09:04:44.000Z
|
import logging
import datetime
class DatasetReports(object):
def __init__(self, dataverse_api=None, dataverse_database=None, config=None):
if dataverse_api is None:
print('Dataverse API required to create dataset reports.')
return
if dataverse_database is None:
print('Dataverse database required to create dataset reports.')
return
if config is None:
print('Dataverse configuration required to create dataset reports.')
return
self.dataverse_api = dataverse_api
self.dataverse_database = dataverse_database
# Ensure trailing slash on work_dir
if config['work_dir'][len(config['work_dir'])-1] != '/':
config['work_dir'] = config['work_dir'] + '/'
self.config = config
self.logger = logging.getLogger('dataverse-reports')
def report_datasets_recursive(self, dataverse_identifier):
# List of datasets
datasets = []
self.logger.info("Begin loading datasets for %s.", dataverse_identifier)
self.load_datasets_recursive(datasets, dataverse_identifier)
self.logger.info("Finished loading %s datasets for %s", str(len(datasets)), dataverse_identifier)
return datasets
def load_datasets_recursive(self, datasets={}, dataverse_identifier=None):
if dataverse_identifier is None:
self.logger.error("Dataverse identifier is required.")
return
self.logger.info("Loading dataverse: %s.", dataverse_identifier)
# Load dataverse
dataverse_response = self.dataverse_api.get_dataverse(identifier=dataverse_identifier)
response_json = dataverse_response.json()
if 'data' in response_json:
dataverse = response_json['data']
self.logger.info("Dataverse name: %s", dataverse['name'])
# Retrieve dvObjects for this dataverse
dataverse_contents = self.dataverse_api.get_dataverse_contents(identifier=dataverse_identifier)
self.logger.info('Total dvObjects in this dataverse: ' + str(len(dataverse_contents)))
for dvObject in dataverse_contents:
if dvObject['type'] == 'dataset':
# Add dataset to this dataverse
self.logger.info("Adding dataset %s to dataverse %s.", str(dvObject['id']), str(dataverse_identifier))
self.add_dataset(datasets, dataverse_identifier, dvObject['id'], dvObject['identifier'])
if dvObject['type'] == 'dataverse':
self.logger.info("Found new dataverse %s.", str(dvObject['id']))
self.load_datasets_recursive(datasets, dvObject['id'])
else:
self.logger.warn("Dataverse was empty.")
def add_dataset(self, datasets, dataverse_identifier, dataset_id, dataset_identifier):
# Load dataset
self.logger.info("Dataset id: %s", dataset_id)
self.logger.info("Dataset identifier: %s", dataset_identifier)
dataset_response = self.dataverse_api.get_dataset(identifier=dataset_id)
response_json = dataset_response.json()
if 'data' in response_json:
dataset = response_json['data']
if 'latestVersion' in dataset:
latest_version = dataset['latestVersion']
metadata_blocks = latest_version['metadataBlocks']
# Flatten the latest_version information
for key, value in latest_version.items():
if key != 'metadataBlocks':
dataset[key] = value
# Flatten the nested citation fields information
citation = metadata_blocks['citation']
fields = citation['fields']
for item in fields:
self.logger.debug("Looking at field: %s.", item['typeName'])
valuesString = self.get_value_recursive('', item)
if valuesString.endswith(' ; '):
valuesString = valuesString[:-len(' ; ')]
typeName = item['typeName']
dataset[typeName] = valuesString
# Remove nested information
dataset.pop('latestVersion')
if (self.config['include_dataset_metrics']):
# Calculate previous month
last_month = self.get_last_month()
# Use Make Data Count endpoints to gather views and downloads statistics
dataset_metrics_options = ['viewsUnique', 'viewsMonth', 'viewsTotal', 'downloadsUnique', 'downloadsMonth', 'downloadsTotal']
for dataset_metrics_option in dataset_metrics_options:
self.logger.debug("Calling endpoint for dataset metric: " + dataset_metrics_option)
if dataset_metrics_option == 'viewsMonth':
dataset_metrics_response = self.dataverse_api.get_dataset_metric(identifier=dataset_id,option='viewsTotal',doi=dataset_identifier,date=last_month)
elif dataset_metrics_option == 'downloadsMonth':
dataset_metrics_response = self.dataverse_api.get_dataset_metric(identifier=dataset_id,option='downloadsTotal',doi=dataset_identifier,date=last_month)
else:
dataset_metrics_response = self.dataverse_api.get_dataset_metric(identifier=dataset_id,option=dataset_metrics_option,doi=dataset_identifier)
dataset_metrics_json = dataset_metrics_response.json()
if dataset_metrics_json['status'] == 'OK':
if dataset_metrics_option == 'viewsMonth':
if 'viewsTotal' in dataset_metrics_json['data']:
self.logger.info("MDC metric (" + dataset_metrics_option + "): " + str(dataset_metrics_json['data']['viewsTotal']))
dataset[dataset_metrics_option] = dataset_metrics_json['data']['viewsTotal']
else:
self.logger.debug("Unable to find viewsTotal in response.")
elif dataset_metrics_option == 'downloadsMonth':
if 'downloadsTotal' in dataset_metrics_json['data']:
self.logger.info("MDC metric (" + dataset_metrics_option + "): " + str(dataset_metrics_json['data']['downloadsTotal']))
dataset[dataset_metrics_option] = dataset_metrics_json['data']['downloadsTotal']
else:
self.logger.debug("Unable to find downloadsTotal in response.")
elif dataset_metrics_option in dataset_metrics_json['data']:
self.logger.info("MDC metric (" + dataset_metrics_option + "): " + str(dataset_metrics_json['data'][dataset_metrics_option]))
dataset[dataset_metrics_option] = dataset_metrics_json['data'][dataset_metrics_option]
else:
self.logger.error("Unable to find dataset metric in response.")
else:
self.logger.error("API call was unsuccessful.")
self.logger.error(dataset_metrics_json)
dataset[dataset_metrics_option] = 0
# Use dataverse_database to retrieve cumulative download count of file in this dataset
download_count = self.dataverse_database.get_download_count(dataset_id=dataset_id)
self.logger.info("Download count for dataset: %s", str(download_count))
dataset['downloadCount'] = download_count
if 'files' in dataset:
contentSize = 0
count_restricted = 0
files = dataset['files']
for file in files:
if 'dataFile' in file:
if file['restricted']:
count_restricted += 1
dataFile = file['dataFile']
filesize = int(dataFile['filesize'])
contentSize += filesize
self.logger.info('Totel size (bytes) of all files in this dataset: %s', str(contentSize))
# Convert to megabytes for reports
dataset['contentSize (MB)'] = (contentSize/1048576)
dataset['totalFiles'] = len(files)
dataset['totalRestrictedFiles'] = count_restricted
# Retrieve dataverse to get alias
dataverse_response = self.dataverse_api.get_dataverse(identifier=dataverse_identifier)
response_json = dataverse_response.json()
dataverse = response_json['data']
self.logger.info("Adding dataset to dataverse with alias: %s", str(dataverse['alias']))
dataset['dataverse'] = dataverse['alias']
datasets.append(dataset)
else:
self.logger.warn("Dataset was empty.")
def get_value_recursive(self, valuesString, field):
if not field['multiple']:
if field['typeClass'] == 'primitive':
valuesString += field['value']
self.logger.debug("New value of valuesString: %s", str(valuesString))
return valuesString
elif field['typeClass'] == 'controlledVocabulary':
subValue = ''
for value in field['value']:
subValue += value + ', '
subValue = subValue[:-2]
valuesString += subValue
self.logger.debug("New value of valuesString: %s", str(valuesString))
return valuesString
elif field['typeClass'] == 'compound':
subValue = ''
if isinstance(field['value'], list):
for value in field['value']:
if isinstance(value, str):
self.logger.debug("Value: %s", value)
for key, elements in value.items():
if not elements['multiple']:
subValue += elements['value']
else:
subValue += self.get_value_recursive(valuesString, subValue, elements['value'])
self.logger.debug("New subValue: %s", subValue)
subValue += " - "
valuesString += subValue + " ; "
else:
value = field['value']
for key, elements in value.items():
if not elements['multiple']:
subValue += elements['value']
else:
subValue += self.get_value_recursive(valuesString, subValue, elements['value'])
self.logger.debug("New subValue: %s", subValue)
subValue += " - "
valuesString += subValue + " ; "
if valuesString.endswith(' ; '):
valuesString = valuesString[:-len(' ; ')]
self.logger.debug("New value of valuesString: %s", str(valuesString))
return valuesString
else:
self.logger.debug("Unrecognized typeClass: %s", field['typeClass'])
else:
if field['typeClass'] == 'primitive':
subValue = ''
for value in field['value']:
subValue += value + ', '
subValue = subValue[:-2]
valuesString += subValue
self.logger.debug("New value of valuesString: %s", str(valuesString))
return valuesString
elif field['typeClass'] == 'controlledVocabulary':
subValue = ''
for value in field['value']:
subValue += value + ', '
subValue = subValue[:-2]
valuesString += subValue
self.logger.debug("New value of valuesString: %s", str(valuesString))
return valuesString
elif field['typeClass'] == 'compound':
subValue = ''
for value in field['value']:
for key, elements in value.items():
self.logger.debug("Key: %s", key)
if not elements['multiple']:
subValue += elements['value']
else:
subValue += self.get_value_recursive(valuesString, subValue, elements['value'])
self.logger.debug("New subValue: %s", subValue)
subValue += " - "
subValue = subValue[:-3]
valuesString += subValue + " ; "
self.logger.debug("New value of valuesString: %s", str(valuesString))
return valuesString
else:
self.logger.debug("Unrecognized typeClass: %s", field['typeClass'])
def get_last_month(self):
now = datetime.datetime.now()
previous = now.date().replace(day=1) - datetime.timedelta(days=1)
last_month = previous.strftime("%Y-%m")
return last_month
| 51.18251
| 174
| 0.553079
| 13,429
| 0.997623
| 0
| 0
| 0
| 0
| 0
| 0
| 2,897
| 0.215214
|
02f1e521d0c60cd1bdde651eb786414631bc4c55
| 1,377
|
py
|
Python
|
classifier.py
|
hemu243/focus-web-crawler
|
8e882315d947f04b207ec76a64fa952f18105d73
|
[
"MIT"
] | 2
|
2020-02-03T02:31:09.000Z
|
2021-02-03T11:54:44.000Z
|
classifier.py
|
hemu243/focus-web-crawler
|
8e882315d947f04b207ec76a64fa952f18105d73
|
[
"MIT"
] | null | null | null |
classifier.py
|
hemu243/focus-web-crawler
|
8e882315d947f04b207ec76a64fa952f18105d73
|
[
"MIT"
] | null | null | null |
#
from abc import ABCMeta
import metapy
class WebClassifier(object):
"""
This module is abstract module which needs to be called by
inherit
"""
__metaclass__ = ABCMeta
def __init__(self, configFile):
"""
Initialized basic of classifier like fwd index, index
:param configFile:
"""
#metapy.log_to_stderr()
# Loading indexes
self.invertedIndex = metapy.index.make_inverted_index(configFile)
self.fwdIndex = metapy.index.make_forward_index(configFile)
# Define multi class data set
self.multiClassDataset = metapy.classify.MulticlassDataset(self.fwdIndex)
self.classifier = self.getClassifier(self.multiClassDataset, self.fwdIndex, self.invertedIndex)
def getClassifier(self, training, fwdIndex, invertedIndex):
"""
This function needs to implemented by inherit class
:param training: training set
:param fwdIndex: fwdIndex created by metapy
:param invertedIndex: inverted index created by metapy
:return: classifier instance
"""
raise NotImplemented("Must implement this function by inherit class")
def score(self, link_text, page_title, body_text):
"""
Should implemented by inherit class
:param link_text: url link text (anchor text)
:param page_title: page title
:param body_text: body text
:return: double score value between 0 - 1
"""
raise NotImplemented("Must implement this function by inherit class")
| 30.6
| 97
| 0.760349
| 1,334
| 0.968773
| 0
| 0
| 0
| 0
| 0
| 0
| 759
| 0.551198
|
02f2dc9948709df77cd05687fd7477b4be25fe0c
| 609
|
py
|
Python
|
backend/tests/access/test_access_event_publish.py
|
fjacob21/mididecweb
|
b65f28eb6fdeafa265796b6190a4264a5eac54ce
|
[
"MIT"
] | null | null | null |
backend/tests/access/test_access_event_publish.py
|
fjacob21/mididecweb
|
b65f28eb6fdeafa265796b6190a4264a5eac54ce
|
[
"MIT"
] | 88
|
2016-11-12T14:54:38.000Z
|
2018-08-02T00:25:07.000Z
|
backend/tests/access/test_access_event_publish.py
|
mididecouverte/mididecweb
|
b65f28eb6fdeafa265796b6190a4264a5eac54ce
|
[
"MIT"
] | null | null | null |
from src.access import EventPublishAccess
from generate_access_data import generate_access_data
def test_publish_event_access():
sessions = generate_access_data()
event = sessions['user'].events.get('test')
useraccess = EventPublishAccess(sessions['user'], event)
manageraccess = EventPublishAccess(sessions['manager'], event)
superaccess = EventPublishAccess(sessions['super'], event)
noneaccess = EventPublishAccess(sessions['none'], event)
assert not useraccess.granted()
assert manageraccess.granted()
assert superaccess.granted()
assert not noneaccess.granted()
| 38.0625
| 66
| 0.760263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 40
| 0.065681
|
02f3815c21333fd777c5f7c2b3c081090f107885
| 4,296
|
py
|
Python
|
aiorabbitmq_admin/base.py
|
miili/aiorabbitmq-admin
|
38df67a77cd029429af9add12ead3152f58ed748
|
[
"MIT"
] | null | null | null |
aiorabbitmq_admin/base.py
|
miili/aiorabbitmq-admin
|
38df67a77cd029429af9add12ead3152f58ed748
|
[
"MIT"
] | null | null | null |
aiorabbitmq_admin/base.py
|
miili/aiorabbitmq-admin
|
38df67a77cd029429af9add12ead3152f58ed748
|
[
"MIT"
] | null | null | null |
import json
import aiohttp
from copy import deepcopy
class Resource(object):
"""
A base class for API resources
"""
# """List of allowed methods, allowed values are
# ```['GET', 'PUT', 'POST', 'DELETE']``"""
# ALLOWED_METHODS = []
def __init__(self, url, auth):
"""
:param url: The RabbitMQ API url to connect to. This should include the
protocol and port number.
:type url: str
:param auth: The authentication to pass to the request. See
`aiohttp' authentication`_ documentation. For the simplest case of
a username and password, simply pass in a tuple of
``('username', 'password')``
:type auth: Requests auth
.. _Requests' authentication: https://docs.aiohttp.org/en/stable/client_reference.html#aiohttp.BasicAuth
""" # noqa
self.url = url.rstrip('/')
if isinstance(auth, tuple):
auth = aiohttp.BasicAuth(*auth)
self.auth = auth
self.headers = {
'Content-type': 'application/json',
}
self.session_args = {
'raise_for_status': True
}
async def _api_get(self, url, **kwargs):
"""
A convenience wrapper for _get. Adds headers, auth and base url by
default
"""
kwargs['url'] = self.url + url
kwargs['auth'] = self.auth
headers = deepcopy(self.headers)
headers.update(kwargs.get('headers', {}))
kwargs['headers'] = headers
return await self._get(**kwargs)
async def _get(self, *args, **kwargs):
"""
A wrapper for getting things
:returns: The response of your get
:rtype: dict
"""
async with aiohttp.ClientSession(**self.session_args) as session:
async with session.get(*args, **kwargs) as resp:
return await resp.json()
async def _api_put(self, url, **kwargs):
"""
A convenience wrapper for _put. Adds headers, auth and base url by
default
"""
kwargs['url'] = self.url + url
kwargs['auth'] = self.auth
headers = deepcopy(self.headers)
headers.update(kwargs.get('headers', {}))
kwargs['headers'] = headers
await self._put(**kwargs)
async def _put(self, *args, **kwargs):
"""
A wrapper for putting things. It will also json encode your 'data' parameter
:returns: The response of your put
:rtype: dict
"""
if 'data' in kwargs:
kwargs['data'] = json.dumps(kwargs['data']).encode()
async with aiohttp.ClientSession(**self.session_args) as session:
await session.put(*args, **kwargs)
async def _api_post(self, url, **kwargs):
"""
A convenience wrapper for _post. Adds headers, auth and base url by
default
"""
kwargs['url'] = self.url + url
kwargs['auth'] = self.auth
headers = deepcopy(self.headers)
headers.update(kwargs.get('headers', {}))
kwargs['headers'] = headers
await self._post(**kwargs)
async def _post(self, *args, **kwargs):
"""
A wrapper for posting things. It will also json encode your 'data' parameter
:returns: The response of your post
:rtype: dict
"""
if 'data' in kwargs:
kwargs['data'] = json.dumps(kwargs['data']).encode()
async with aiohttp.ClientSession(**self.session_args) as session:
await session.post(*args, **kwargs)
async def _api_delete(self, url, **kwargs):
"""
A convenience wrapper for _delete. Adds headers, auth and base url by
default
"""
kwargs['url'] = self.url + url
kwargs['auth'] = self.auth
headers = deepcopy(self.headers)
headers.update(kwargs.get('headers', {}))
kwargs['headers'] = headers
await self._delete(**kwargs)
async def _delete(self, *args, **kwargs):
"""
A wrapper for deleting things
:returns: The response of your delete
:rtype: dict
"""
async with aiohttp.ClientSession(**self.session_args) as session:
await session.delete(*args, **kwargs)
| 30.253521
| 112
| 0.570531
| 4,240
| 0.986965
| 0
| 0
| 0
| 0
| 3,079
| 0.716713
| 1,922
| 0.447393
|
02f390bbfb313d944ca9d6c202d4c1f28b3a192e
| 115,464
|
py
|
Python
|
from_3b1b/old/highD.py
|
tigerking/manim2
|
93e8957e433b8e59acb5a5213a4074ee0125b823
|
[
"MIT"
] | null | null | null |
from_3b1b/old/highD.py
|
tigerking/manim2
|
93e8957e433b8e59acb5a5213a4074ee0125b823
|
[
"MIT"
] | null | null | null |
from_3b1b/old/highD.py
|
tigerking/manim2
|
93e8957e433b8e59acb5a5213a4074ee0125b823
|
[
"MIT"
] | null | null | null |
from manim2.imports import *
##########
#force_skipping
#revert_to_original_skipping_status
##########
class Slider(NumberLine):
CONFIG = {
"color" : WHITE,
"x_min" : -1,
"x_max" : 1,
"unit_size" : 2,
"center_value" : 0,
"number_scale_val" : 0.75,
"label_scale_val" : 1,
"numbers_with_elongated_ticks" : [],
"line_to_number_vect" : LEFT,
"line_to_number_buff" : MED_LARGE_BUFF,
"dial_radius" : 0.1,
"dial_color" : YELLOW,
"include_real_estate_ticks" : True,
}
def __init__(self, **kwargs):
NumberLine.__init__(self, **kwargs)
self.rotate(np.pi/2)
self.init_dial()
if self.include_real_estate_ticks:
self.add_real_estate_ticks()
def init_dial(self):
dial = Dot(
radius = self.dial_radius,
color = self.dial_color,
)
dial.move_to(self.number_to_point(self.center_value))
re_dial = dial.copy()
re_dial.set_fill(opacity = 0)
self.add(dial, re_dial)
self.dial = dial
self.re_dial = re_dial
self.last_sign = -1
def add_label(self, tex):
label = TexMobject(tex)
label.scale(self.label_scale_val)
label.move_to(self.get_top())
label.shift(MED_LARGE_BUFF*UP)
self.add(label)
self.label = label
def add_real_estate_ticks(
self,
re_per_tick = 0.05,
colors = [BLUE, RED],
max_real_estate = 1,
):
self.real_estate_ticks = VGroup(*[
self.get_tick(self.center_value + u*np.sqrt(x + re_per_tick))
for x in np.arange(0, max_real_estate, re_per_tick)
for u in [-1, 1]
])
self.real_estate_ticks.set_stroke(width = 3)
self.real_estate_ticks.set_color_by_gradient(*colors)
self.add(self.real_estate_ticks)
self.add(self.dial)
return self.real_estate_ticks
def set_value(self, x):
re = (x - self.center_value)**2
for dial, val in (self.dial, x), (self.re_dial, re):
dial.move_to(self.number_to_point(val))
return self
def set_center_value(self, x):
self.center_value = x
return self
def change_real_estate(self, d_re):
left_over = 0
curr_re = self.get_real_estate()
if d_re < -curr_re:
left_over = d_re + curr_re
d_re = -curr_re
self.set_real_estate(curr_re + d_re)
return left_over
def set_real_estate(self, target_re):
if target_re < 0:
raise Exception("Cannot set real estate below 0")
self.re_dial.move_to(self.number_to_point(target_re))
self.update_dial_by_re_dial()
return self
def get_dial_supplement_animation(self):
return UpdateFromFunc(self.dial, self.update_dial_by_re_dial)
def update_dial_by_re_dial(self, dial = None):
dial = dial or self.dial
re = self.get_real_estate()
sign = np.sign(self.get_value() - self.center_value)
if sign == 0:
sign = -self.last_sign
self.last_sign *= -1
dial.move_to(self.number_to_point(
self.center_value + sign*np.sqrt(abs(re))
))
return dial
def get_value(self):
return self.point_to_number(self.dial.get_center())
def get_real_estate(self):
return self.point_to_number(self.re_dial.get_center())
def copy(self):
return self.deepcopy()
class SliderScene(Scene):
CONFIG = {
"n_sliders" : 4,
"slider_spacing" : MED_LARGE_BUFF,
"slider_config" : {},
"center_point" : None,
"total_real_estate" : 1,
"ambiently_change_sliders" : False,
"ambient_velocity_magnitude" : 1.0,
"ambient_acceleration_magnitude" : 1.0,
"ambient_jerk_magnitude" : 1.0/2,
}
def setup(self):
if self.center_point is None:
self.center_point = np.zeros(self.n_sliders)
sliders = VGroup(*[
Slider(center_value = cv, **self.slider_config)
for cv in self.center_point
])
sliders.arrange(RIGHT, buff = self.slider_spacing)
sliders[0].add_numbers()
sliders[0].set_value(
self.center_point[0] + np.sqrt(self.total_real_estate)
)
self.sliders = sliders
self.add_labels_to_sliders()
self.add(sliders)
def add_labels_to_sliders(self):
if len(self.sliders) <= 4:
for slider, char in zip(self.sliders, "xyzw"):
slider.add_label(char)
for slider in self.sliders[1:]:
slider.label.align_to(self.sliders[0].label, UP)
else:
for i, slider in enumerate(self.sliders):
slider.add_label("x_{%d}"%(i+1))
return self
def reset_dials(self, values, run_time = 1, **kwargs):
target_vector = self.get_target_vect_from_subset_of_values(values, **kwargs)
radius = np.sqrt(self.total_real_estate)
def update_sliders(sliders):
curr_vect = self.get_vector()
curr_vect -= self.center_point
curr_vect *= radius/get_norm(curr_vect)
curr_vect += self.center_point
self.set_to_vector(curr_vect)
return sliders
self.play(*[
ApplyMethod(slider.set_value, value)
for value, slider in zip(target_vector, self.sliders)
] + [
UpdateFromFunc(self.sliders, update_sliders)
], run_time = run_time)
def get_target_vect_from_subset_of_values(self, values, fixed_indices = None):
if fixed_indices is None:
fixed_indices = []
curr_vector = self.get_vector()
target_vector = np.array(self.center_point, dtype = 'float')
unspecified_vector = np.array(self.center_point, dtype = 'float')
unspecified_indices = []
for i in range(len(curr_vector)):
if i < len(values) and values[i] is not None:
target_vector[i] = values[i]
else:
unspecified_indices.append(i)
unspecified_vector[i] = curr_vector[i]
used_re = get_norm(target_vector - self.center_point)**2
left_over_re = self.total_real_estate - used_re
if left_over_re < -0.001:
raise Exception("Overspecified reset")
uv_norm = get_norm(unspecified_vector - self.center_point)
if uv_norm == 0 and left_over_re > 0:
unspecified_vector[unspecified_indices] = 1
uv_norm = get_norm(unspecified_vector - self.center_point)
if uv_norm > 0:
unspecified_vector -= self.center_point
unspecified_vector *= np.sqrt(left_over_re)/uv_norm
unspecified_vector += self.center_point
return target_vector + unspecified_vector - self.center_point
def set_to_vector(self, vect):
assert len(vect) == len(self.sliders)
for slider, value in zip(self.sliders, vect):
slider.set_value(value)
def get_vector(self):
return np.array([slider.get_value() for slider in self.sliders])
def get_center_point(self):
return np.array([slider.center_value for slider in self.sliders])
def set_center_point(self, new_center_point):
self.center_point = np.array(new_center_point)
for x, slider in zip(new_center_point, self.sliders):
slider.set_center_value(x)
return self
def get_current_total_real_estate(self):
return sum([
slider.get_real_estate()
for slider in self.sliders
])
def get_all_dial_supplement_animations(self):
return [
slider.get_dial_supplement_animation()
for slider in self.sliders
]
def initialize_ambiant_slider_movement(self):
self.ambiently_change_sliders = True
self.ambient_change_end_time = np.inf
self.ambient_change_time = 0
self.ambient_velocity, self.ambient_acceleration, self.ambient_jerk = [
self.get_random_vector(magnitude)
for magnitude in [
self.ambient_velocity_magnitude,
self.ambient_acceleration_magnitude,
self.ambient_jerk_magnitude,
]
]
##Ensure counterclockwise rotations in 2D
if len(self.ambient_velocity) == 2:
cross = np.cross(self.get_vector(), self.ambient_velocity)
if cross < 0:
self.ambient_velocity *= -1
self.add_foreground_mobjects(self.sliders)
def wind_down_ambient_movement(self, time = 1, wait = True):
self.ambient_change_end_time = self.ambient_change_time + time
if wait:
self.wait(time)
if self.skip_animations:
self.ambient_change_time += time
def ambient_slider_movement_update(self):
#Set velocity_magnitude based on start up or wind down
velocity_magnitude = float(self.ambient_velocity_magnitude)
if self.ambient_change_time <= 1:
velocity_magnitude *= smooth(self.ambient_change_time)
time_until_end = self.ambient_change_end_time - self.ambient_change_time
if time_until_end <= 1:
velocity_magnitude *= smooth(time_until_end)
if time_until_end < 0:
self.ambiently_change_sliders = False
return
center_point = self.get_center_point()
target_vector = self.get_vector() - center_point
if get_norm(target_vector) == 0:
return
vectors_and_magnitudes = [
(self.ambient_acceleration, self.ambient_acceleration_magnitude),
(self.ambient_velocity, velocity_magnitude),
(target_vector, np.sqrt(self.total_real_estate)),
]
jerk = self.get_random_vector(self.ambient_jerk_magnitude)
deriv = jerk
for vect, mag in vectors_and_magnitudes:
vect += self.frame_duration*deriv
if vect is self.ambient_velocity:
unit_r_vect = target_vector / get_norm(target_vector)
vect -= np.dot(vect, unit_r_vect)*unit_r_vect
vect *= mag/get_norm(vect)
deriv = vect
self.set_to_vector(target_vector + center_point)
self.ambient_change_time += self.frame_duration
def get_random_vector(self, magnitude):
result = 2*np.random.random(len(self.sliders)) - 1
result *= magnitude / get_norm(result)
return result
def update_frame(self, *args, **kwargs):
if self.ambiently_change_sliders:
self.ambient_slider_movement_update()
Scene.update_frame(self, *args, **kwargs)
def wait(self, time = 1):
if self.ambiently_change_sliders:
self.play(Animation(self.sliders, run_time = time))
else:
Scene.wait(self,time)
##########
class MathIsATease(Scene):
def construct(self):
randy = Randolph()
lashes = VGroup()
for eye in randy.eyes:
for angle in np.linspace(-np.pi/3, np.pi/3, 12):
lash = Line(ORIGIN, RIGHT)
lash.set_stroke(DARK_GREY, 2)
lash.set_width(0.27)
lash.next_to(ORIGIN, RIGHT, buff = 0)
lash.rotate(angle + np.pi/2)
lash.shift(eye.get_center())
lashes.add(lash)
lashes.do_in_place(lashes.stretch, 0.8, 1)
lashes.shift(0.04*DOWN)
fan = SVGMobject(
file_name = "fan",
fill_opacity = 1,
fill_color = YELLOW,
stroke_width = 2,
stroke_color = YELLOW,
height = 0.7,
)
VGroup(*fan[-12:]).set_fill(YELLOW_E)
fan.rotate(-np.pi/4)
fan.move_to(randy)
fan.shift(0.85*UP+0.25*LEFT)
self.add(randy)
self.play(
ShowCreation(lashes, lag_ratio = 0),
randy.change, "tease",
randy.look, OUT,
)
self.add_foreground_mobjects(fan)
eye_bottom_y = randy.eyes.get_bottom()[1]
self.play(
ApplyMethod(
lashes.apply_function,
lambda p : [p[0], eye_bottom_y, p[2]],
rate_func = Blink.CONFIG["rate_func"],
),
Blink(randy),
DrawBorderThenFill(fan),
)
self.play(
ApplyMethod(
lashes.apply_function,
lambda p : [p[0], eye_bottom_y, p[2]],
rate_func = Blink.CONFIG["rate_func"],
),
Blink(randy),
)
self.wait()
class TODODeterminants(TODOStub):
CONFIG = {
"message" : "Determinants clip"
}
class CircleToPairsOfPoints(Scene):
def construct(self):
plane = NumberPlane(written_coordinate_height = 0.3)
plane.scale(2)
plane.add_coordinates(y_vals = [-1, 1])
background_plane = plane.copy()
background_plane.set_color(GREY)
background_plane.fade()
circle = Circle(radius = 2, color = YELLOW)
x, y = [np.sqrt(2)/2]*2
dot = Dot(2*x*RIGHT + 2*y*UP, color = LIGHT_GREY)
equation = TexMobject("x", "^2", "+", "y", "^2", "=", "1")
equation.set_color_by_tex("x", GREEN)
equation.set_color_by_tex("y", RED)
equation.to_corner(UP+LEFT)
equation.add_background_rectangle()
coord_pair = TexMobject("(", "-%.02f"%x, ",", "-%.02f"%y, ")")
fixed_numbers = coord_pair.get_parts_by_tex("-")
fixed_numbers.set_fill(opacity = 0)
coord_pair.add_background_rectangle()
coord_pair.next_to(dot, UP+RIGHT, SMALL_BUFF)
numbers = VGroup(*[
DecimalNumber(val).replace(num, dim_to_match = 1)
for val, num in zip([x, y], fixed_numbers)
])
numbers[0].set_color(GREEN)
numbers[1].set_color(RED)
def get_update_func(i):
return lambda t : dot.get_center()[i]/2.0
self.add(background_plane, plane)
self.play(ShowCreation(circle))
self.play(
FadeIn(coord_pair),
Write(numbers, run_time = 1),
ShowCreation(dot),
)
self.play(
Write(equation),
*[
Transform(
number.copy(),
equation.get_parts_by_tex(tex),
remover = True
)
for tex, number in zip("xy", numbers)
]
)
self.play(FocusOn(dot, run_time = 1))
self.play(
Rotating(
dot, run_time = 7, in_place = False,
rate_func = smooth,
),
MaintainPositionRelativeTo(coord_pair, dot),
*[
ChangingDecimal(
num, get_update_func(i),
tracked_mobject = fixed_num
)
for num, i, fixed_num in zip(
numbers, (0, 1), fixed_numbers
)
]
)
self.wait()
######### Rotation equations ##########
rot_equation = TexMobject(
"\\Rightarrow"
"\\big(\\cos(\\theta)x - \\sin(\\theta)y\\big)^2 + ",
"\\big(\\sin(\\theta)x + \\cos(\\theta)y\\big)^2 = 1",
)
rot_equation.scale(0.9)
rot_equation.next_to(equation, RIGHT)
rot_equation.add_background_rectangle()
words = TextMobject("Rotational \\\\ symmetry")
words.next_to(ORIGIN, UP)
words.to_edge(RIGHT)
words.add_background_rectangle()
arrow = Arrow(
words.get_left(), rot_equation.get_bottom(),
path_arc = -np.pi/6
)
randy = Randolph(color = GREY_BROWN)
randy.to_corner(DOWN+LEFT)
self.play(
Write(rot_equation, run_time = 2),
FadeOut(coord_pair),
FadeOut(numbers),
FadeOut(dot),
FadeIn(randy)
)
self.play(randy.change, "confused", rot_equation)
self.play(Blink(randy))
self.play(
Write(words, run_time = 1),
ShowCreation(arrow),
randy.look_at, words
)
plane.remove(*plane.coordinate_labels)
self.play(
Rotate(
plane, np.pi/3,
run_time = 4,
rate_func = there_and_back
),
Animation(equation),
Animation(rot_equation),
Animation(words),
Animation(arrow),
Animation(circle),
randy.change, "hooray"
)
self.wait()
class GreatSourceOfMaterial(TeacherStudentsScene):
def construct(self):
self.teacher_says(
"It's a great source \\\\ of material.",
target_mode = "hooray"
)
self.change_student_modes(*["happy"]*3)
self.wait(3)
class CirclesSpheresSumsSquares(ExternallyAnimatedScene):
pass
class BackAndForth(Scene):
def construct(self):
analytic = TextMobject("Analytic")
analytic.shift(FRAME_X_RADIUS*LEFT/2)
analytic.to_edge(UP, buff = MED_SMALL_BUFF)
geometric = TextMobject("Geometric")
geometric.shift(FRAME_X_RADIUS*RIGHT/2)
geometric.to_edge(UP, buff = MED_SMALL_BUFF)
h_line = Line(LEFT, RIGHT).scale(FRAME_X_RADIUS)
h_line.to_edge(UP, LARGE_BUFF)
v_line = Line(UP, DOWN).scale(FRAME_Y_RADIUS)
self.add(analytic, geometric, h_line, v_line)
pair = TexMobject("(", "x", ",", "y", ")")
pair.shift(FRAME_X_RADIUS*LEFT/2 + FRAME_Y_RADIUS*UP/3)
triplet = TexMobject("(", "x", ",", "y", ",", "z", ")")
triplet.shift(FRAME_X_RADIUS*LEFT/2 + FRAME_Y_RADIUS*DOWN/2)
for mob in pair, triplet:
arrow = DoubleArrow(LEFT, RIGHT)
arrow.move_to(mob)
arrow.shift(2*RIGHT)
mob.arrow = arrow
circle_eq = TexMobject("x", "^2", "+", "y", "^2", "=", "1")
circle_eq.move_to(pair)
sphere_eq = TexMobject("x", "^2", "+", "y", "^2", "+", "z", "^2", "=", "1")
sphere_eq.move_to(triplet)
plane = NumberPlane(x_unit_size = 2, y_unit_size = 2)
circle = Circle(radius = 2, color = YELLOW)
plane_group = VGroup(plane, circle)
plane_group.scale(0.4)
plane_group.next_to(h_line, DOWN, SMALL_BUFF)
plane_group.shift(FRAME_X_RADIUS*RIGHT/2)
self.play(Write(pair))
# self.play(ShowCreation(pair.arrow))
self.play(ShowCreation(plane, run_time = 3))
self.play(Write(triplet))
# self.play(ShowCreation(triplet.arrow))
self.wait(3)
for tup, eq, to_draw in (pair, circle_eq, circle), (triplet, sphere_eq, VMobject()):
for mob in tup, eq:
mob.xyz = VGroup(*[sm for sm in map(mob.get_part_by_tex, "xyz") if sm is not None])
self.play(
ReplacementTransform(tup.xyz, eq.xyz),
FadeOut(VGroup(*[sm for sm in tup if sm not in tup.xyz])),
)
self.play(
Write(VGroup(*[sm for sm in eq if sm not in eq.xyz])),
ShowCreation(to_draw)
)
self.wait(3)
class SphereForming(ExternallyAnimatedScene):
pass
class PreviousVideos(Scene):
def construct(self):
titles = VGroup(*list(map(TextMobject, [
"Pi hiding in prime regularities",
"Visualizing all possible pythagorean triples",
"Borsuk-Ulam theorem",
])))
titles.to_edge(UP, buff = MED_SMALL_BUFF)
screen = ScreenRectangle(height = 6)
screen.next_to(titles, DOWN)
title = titles[0]
self.add(title, screen)
self.wait(2)
for new_title in titles[1:]:
self.play(Transform(title, new_title))
self.wait(2)
class TODOTease(TODOStub):
CONFIG = {
"message" : "Tease"
}
class AskAboutLongerLists(TeacherStudentsScene):
def construct(self):
question = TextMobject(
"What about \\\\",
"$(x_1, x_2, x_3, x_4)?$"
)
tup = question[1]
alt_tups = list(map(TextMobject, [
"$(x_1, x_2, x_3, x_4, x_5)?$",
"$(x_1, x_2, \\dots, x_{99}, x_{100})?$"
]))
self.student_says(question, run_time = 1)
self.wait()
for alt_tup in alt_tups:
alt_tup.move_to(tup)
self.play(Transform(tup, alt_tup))
self.wait()
self.wait()
self.play(
RemovePiCreatureBubble(self.students[1]),
self.teacher.change, "raise_right_hand"
)
self.change_student_modes(
*["confused"]*3,
look_at_arg = self.teacher.get_top() + 2*UP
)
self.play(self.teacher.look, UP)
self.wait(5)
self.student_says(
"I...don't see it.",
target_mode = "maybe",
student_index = 0
)
self.wait(3)
class FourDCubeRotation(ExternallyAnimatedScene):
pass
class HypersphereRotation(ExternallyAnimatedScene):
pass
class FourDSurfaceRotating(ExternallyAnimatedScene):
pass
class Professionals(PiCreatureScene):
def construct(self):
self.introduce_characters()
self.add_equation()
self.analogies()
def introduce_characters(self):
titles = VGroup(*list(map(TextMobject, [
"Mathematician",
"Computer scientist",
"Physicist",
])))
self.remove(*self.pi_creatures)
for title, pi in zip(titles, self.pi_creatures):
title.next_to(pi, DOWN)
self.play(
Animation(VectorizedPoint(pi.eyes.get_center())),
FadeIn(pi),
Write(title, run_time = 1),
)
self.wait()
def add_equation(self):
quaternion = TexMobject(
"\\frac{1}{2}", "+",
"0", "\\textbf{i}", "+",
"\\frac{\\sqrt{6}}{4}", "\\textbf{j}", "+",
"\\frac{\\sqrt{6}}{4}", "\\textbf{k}",
)
quaternion.scale(0.7)
quaternion.next_to(self.mathy, UP)
quaternion.set_color_by_tex_to_color_map({
"i" : RED,
"j" : GREEN,
"k" : BLUE,
})
array = TexMobject("[a_1, a_2, \\dots, a_{100}]")
array.next_to(self.compy, UP)
kets = TexMobject(
"\\alpha",
"|\\!\\uparrow\\rangle + ",
"\\beta",
"|\\!\\downarrow\\rangle"
)
kets.set_color_by_tex_to_color_map({
"\\alpha" : GREEN,
"\\beta" : RED,
})
kets.next_to(self.physy, UP)
terms = VGroup(quaternion, array, kets)
for term, pi in zip(terms, self.pi_creatures):
self.play(
Write(term, run_time = 1),
pi.change, "pondering", term
)
self.wait(2)
self.terms = terms
def analogies(self):
examples = VGroup()
plane = ComplexPlane(
x_radius = 2.5,
y_radius = 1.5,
)
plane.add_coordinates()
plane.add(Circle(color = YELLOW))
plane.scale(0.75)
examples.add(plane)
examples.add(Circle())
examples.arrange(RIGHT, buff = 2)
examples.to_edge(UP, buff = LARGE_BUFF)
labels = VGroup(*list(map(TextMobject, ["2D", "3D"])))
title = TextMobject("Fly by instruments")
title.scale(1.5)
title.to_edge(UP)
for label, example in zip(labels, examples):
label.next_to(example, DOWN)
self.play(
ShowCreation(example),
Write(label, run_time = 1)
)
example.add(label)
self.wait()
self.wait()
self.play(
FadeOut(examples),
self.terms.shift, UP,
Write(title, run_time = 2)
)
self.play(*[
ApplyMethod(
pi.change, mode, self.terms.get_left(),
run_time = 2,
rate_func = squish_rate_func(smooth, a, a+0.5)
)
for pi, mode, a in zip(
self.pi_creatures,
["confused", "sassy", "erm"],
np.linspace(0, 0.5, len(self.pi_creatures))
)
])
self.wait()
self.play(Animation(self.terms[-1]))
self.wait(2)
######
def create_pi_creatures(self):
self.mathy = Mathematician()
self.physy = PiCreature(color = PINK)
self.compy = PiCreature(color = PURPLE)
pi_creatures = VGroup(self.mathy, self.compy, self.physy)
for pi in pi_creatures:
pi.scale(0.7)
pi_creatures.arrange(RIGHT, buff = 3)
pi_creatures.to_edge(DOWN, buff = LARGE_BUFF)
return pi_creatures
class OfferAHybrid(SliderScene):
CONFIG = {
"n_sliders" : 3,
}
def construct(self):
self.remove(self.sliders)
titles = self.get_titles()
h_line = Line(LEFT, RIGHT).scale(FRAME_X_RADIUS)
h_line.next_to(titles, DOWN)
v_lines = VGroup(*[
Line(UP, DOWN).scale(FRAME_Y_RADIUS)
for x in range(2)
])
v_lines.generate_target()
for line, vect in zip(v_lines.target, [LEFT, RIGHT]):
line.shift(vect*FRAME_X_RADIUS/3)
equation = TexMobject("x^2 + y^2 + z^2 = 1")
equation.generate_target()
equation.shift(FRAME_X_RADIUS*LEFT/2)
equation.target.shift(FRAME_WIDTH*LEFT/3)
self.add(titles, h_line, v_lines, equation)
self.wait()
self.play(*list(map(MoveToTarget, [titles, v_lines, equation])))
self.play(Write(self.sliders, run_time = 1))
self.initialize_ambiant_slider_movement()
self.wait(10)
self.wind_down_ambient_movement()
self.wait()
def get_titles(self):
titles = VGroup(*list(map(TextMobject, [
"Analytic", "Hybrid", "Geometric"
])))
titles.to_edge(UP)
titles[1].set_color(BLUE)
titles.generate_target()
titles[1].scale_in_place(0.001)
titles[0].shift(FRAME_X_RADIUS*LEFT/2)
titles.target[0].shift(FRAME_WIDTH*LEFT/3)
titles[2].shift(FRAME_X_RADIUS*RIGHT/2)
titles.target[2].shift(FRAME_WIDTH*RIGHT/3)
return titles
class TODOBoxExample(TODOStub):
CONFIG = {
"message" : "Box Example"
}
class RotatingSphereWithWanderingPoint(ExternallyAnimatedScene):
pass
class DismissProjection(PiCreatureScene):
CONFIG = {
"screen_rect_color" : WHITE,
"example_vect" : np.array([0.52, 0.26, 0.53, 0.60]),
}
def construct(self):
self.remove(self.pi_creature)
self.show_all_spheres()
self.discuss_4d_sphere_definition()
self.talk_through_animation()
self.transition_to_next_scene()
def show_all_spheres(self):
equations = VGroup(*list(map(TexMobject, [
"x^2 + y^2 = 1",
"x^2 + y^2 + z^2 = 1",
"x^2 + y^2 + z^2 + w^2 = 1",
])))
colors = [YELLOW, GREEN, BLUE]
for equation, edge, color in zip(equations, [LEFT, ORIGIN, RIGHT], colors):
equation.set_color(color)
equation.shift(3*UP)
equation.to_edge(edge)
equations[1].shift(LEFT)
spheres = VGroup(
self.get_circle(equations[0]),
self.get_sphere_screen(equations[1], DOWN),
self.get_sphere_screen(equations[2], DOWN),
)
for equation, sphere in zip(equations, spheres):
self.play(
Write(equation),
LaggedStartMap(ShowCreation, sphere),
)
self.wait()
self.equations = equations
self.spheres = spheres
def get_circle(self, equation):
result = VGroup(
NumberPlane(
x_radius = 2.5,
y_radius = 2,
).fade(0.4),
Circle(color = YELLOW, radius = 1),
)
result.scale(0.7)
result.next_to(equation, DOWN)
return result
def get_sphere_screen(self, equation, vect):
square = Rectangle()
square.set_width(equation.get_width())
square.stretch_to_fit_height(3)
square.next_to(equation, vect)
square.set_color(self.screen_rect_color)
return square
def discuss_4d_sphere_definition(self):
sphere = self.spheres[-1]
equation = self.equations[-1]
sphere_words = TextMobject("``4-dimensional sphere''")
sphere_words.next_to(sphere, DOWN+LEFT, buff = LARGE_BUFF)
arrow = Arrow(
sphere_words.get_right(), sphere.get_bottom(),
path_arc = np.pi/3,
color = BLUE
)
descriptor = TexMobject(
"\\text{Just lists of numbers like }",
"(%.02f \\,, %.02f \\,, %.02f \\,, %.02f \\,)"%tuple(self.example_vect)
)
descriptor[1].set_color(BLUE)
descriptor.next_to(sphere_words, DOWN)
dot = Dot(descriptor[1].get_top())
dot.set_fill(WHITE, opacity = 0.75)
self.play(
Write(sphere_words),
ShowCreation(
arrow,
rate_func = squish_rate_func(smooth, 0.5, 1)
),
run_time = 3,
)
self.wait()
self.play(Write(descriptor, run_time = 2))
self.wait()
self.play(
dot.move_to, equation.get_left(),
dot.set_fill, None, 0,
path_arc = -np.pi/12
)
self.wait(2)
self.sphere_words = sphere_words
self.sphere_arrow = arrow
self.descriptor = descriptor
def talk_through_animation(self):
sphere = self.spheres[-1]
morty = self.pi_creature
alt_dims = VGroup(*list(map(TextMobject, ["5D", "6D", "7D"])))
alt_dims.next_to(morty.eyes, UP, SMALL_BUFF)
alt_dim = alt_dims[0]
self.play(FadeIn(morty))
self.play(morty.change, "raise_right_hand", sphere)
self.wait(3)
self.play(morty.change, "confused", sphere)
self.wait(3)
self.play(
morty.change, "erm", alt_dims,
FadeIn(alt_dim)
)
for new_alt_dim in alt_dims[1:]:
self.wait()
self.play(Transform(alt_dim, new_alt_dim))
self.wait()
self.play(morty.change, "concerned_musician")
self.play(FadeOut(alt_dim))
self.wait()
self.play(morty.change, "angry", sphere)
self.wait(2)
def transition_to_next_scene(self):
equation = self.equations[-1]
self.equations.remove(equation)
tup = self.descriptor[1]
self.descriptor.remove(tup)
equation.generate_target()
equation.target.center().to_edge(UP)
tup.generate_target()
tup.target.next_to(equation.target, DOWN)
tup.target.set_color(WHITE)
self.play(LaggedStartMap(FadeOut, VGroup(*[
self.equations, self.spheres,
self.sphere_words, self.sphere_arrow,
self.descriptor,
self.pi_creature
])))
self.play(*list(map(MoveToTarget, [equation, tup])))
self.wait()
###
def create_pi_creature(self):
return Mortimer().scale(0.8).to_corner(DOWN+RIGHT)
class RotatingSphere(ExternallyAnimatedScene):
pass
class Introduce4DSliders(SliderScene):
CONFIG = {
"slider_config" : {
"include_real_estate_ticks" : False,
"numbers_with_elongated_ticks" : [-1, 0, 1],
"tick_frequency" : 0.25,
"tick_size" : 0.05,
"dial_color" : YELLOW,
},
"slider_spacing" : LARGE_BUFF,
}
def construct(self):
self.match_last_scene()
self.introduce_sliders()
self.ask_about_constraint()
def match_last_scene(self):
self.start_vect = DismissProjection.CONFIG["example_vect"]
self.remove(self.sliders)
equation = TexMobject("x^2 + y^2 + z^2 + w^2 = 1")
x, y, z, w = self.start_vect
tup = TexMobject(
"(", "%.02f \\,"%x,
",", "%.02f \\,"%y,
",", "%.02f \\,"%z,
",", "%.02f \\,"%w, ")"
)
equation.center().to_edge(UP)
equation.set_color(BLUE)
tup.next_to(equation, DOWN)
self.sliders.next_to(tup, DOWN)
self.sliders.shift(0.8*LEFT)
self.add(equation, tup)
self.wait()
self.equation = equation
self.tup = tup
def introduce_sliders(self):
self.set_to_vector(self.start_vect)
numbers = self.tup.get_parts_by_tex(".")
self.tup.remove(*numbers)
dials = VGroup(*[slider.dial for slider in self.sliders])
dial_copies = dials.copy()
dials.set_fill(opacity = 0)
self.play(LaggedStartMap(FadeIn, self.sliders))
self.play(*[
Transform(
num, dial,
run_time = 3,
rate_func = squish_rate_func(smooth, a, a+0.5),
remover = True
)
for num, dial, a in zip(
numbers, dial_copies,
np.linspace(0, 0.5, len(numbers))
)
])
dials.set_fill(opacity = 1)
self.initialize_ambiant_slider_movement()
self.play(FadeOut(self.tup))
self.wait(10)
def ask_about_constraint(self):
equation = self.equation
rect = SurroundingRectangle(equation, color = GREEN)
randy = Randolph().scale(0.5)
randy.next_to(rect, DOWN+LEFT, LARGE_BUFF)
self.play(ShowCreation(rect))
self.play(FadeIn(randy))
self.play(randy.change, "pondering", rect)
self.wait()
for mob in self.sliders, rect:
self.play(randy.look_at, mob)
self.play(Blink(randy))
self.wait()
self.wait()
class TwoDimensionalCase(Introduce4DSliders):
CONFIG = {
"n_sliders" : 2,
}
def setup(self):
SliderScene.setup(self)
self.sliders.shift(RIGHT)
for number in self.sliders[0].numbers:
value = int(number.get_tex_string())
number.move_to(center_of_mass([
slider.number_to_point(value)
for slider in self.sliders
]))
plane = NumberPlane(
x_radius = 2.5,
y_radius = 2.5,
)
plane.fade(0.25)
plane.axes.set_color(GREY)
plane.add_coordinates()
plane.to_edge(LEFT)
origin = plane.coords_to_point(0, 0)
circle = Circle(radius = 1, color = WHITE)
circle.move_to(plane.coords_to_point(*self.center_point))
dot = Dot(color = YELLOW)
dot.move_to(plane.coords_to_point(1, 0))
equation = TexMobject("x^2 + y^2 = 1")
equation.to_corner(UP + RIGHT)
self.add(plane, circle, dot, equation)
self.add_foreground_mobjects(dot)
self.plane = plane
self.circle = circle
self.dot = dot
self.equation = equation
def construct(self):
self.let_values_wander()
self.introduce_real_estate()
self.let_values_wander(6)
self.comment_on_cheap_vs_expensive_real_estate()
self.nudge_x_from_one_example()
self.note_circle_steepness()
self.add_tick_marks()
self.write_distance_squared()
def let_values_wander(self, total_time = 5):
self.initialize_ambiant_slider_movement()
self.wait(total_time - 1)
self.wind_down_ambient_movement()
def introduce_real_estate(self):
x_squared_mob = VGroup(*self.equation[:2])
y_squared_mob = VGroup(*self.equation[3:5])
x_rect = SurroundingRectangle(x_squared_mob)
y_rect = SurroundingRectangle(y_squared_mob)
rects = VGroup(x_rect, y_rect)
decimals = VGroup(*[
DecimalNumber(num**2)
for num in self.get_vector()
])
decimals.arrange(RIGHT, buff = LARGE_BUFF)
decimals.next_to(rects, DOWN, LARGE_BUFF)
real_estate_word = TextMobject("``Real estate''")
real_estate_word.next_to(decimals, DOWN, MED_LARGE_BUFF)
self.play(FadeIn(real_estate_word))
colors = GREEN, RED
arrows = VGroup()
for rect, decimal, color in zip(rects, decimals, colors):
rect.set_color(color)
decimal.set_color(color)
arrow = Arrow(
rect.get_bottom()+SMALL_BUFF*UP, decimal.get_top(),
tip_length = 0.2,
)
arrow.set_color(color)
arrows.add(arrow)
self.play(ShowCreation(rect))
self.play(
ShowCreation(arrow),
Write(decimal)
)
self.wait()
sliders = self.sliders
def create_update_func(i):
return lambda alpha : sliders[i].get_real_estate()
self.add_foreground_mobjects(decimals)
self.decimals = decimals
self.decimal_update_anims = [
ChangingDecimal(decimal, create_update_func(i))
for i, decimal in enumerate(decimals)
]
self.real_estate_word = real_estate_word
def comment_on_cheap_vs_expensive_real_estate(self):
blue_rects = VGroup()
red_rects = VGroup()
for slider in self.sliders:
for x1, x2 in (-0.5, 0.5), (0.75, 1.0), (-1.0, -0.75):
p1, p2 = list(map(slider.number_to_point, [x1, x2]))
rect = Rectangle(
stroke_width = 0,
fill_opacity = 0.5,
width = 0.25,
height = (p2-p1)[1]
)
rect.move_to((p1+p2)/2)
if np.mean([x1, x2]) == 0:
rect.set_color(BLUE)
blue_rects.add(rect)
else:
rect.set_color(RED)
red_rects.add(rect)
blue_rects.save_state()
self.play(DrawBorderThenFill(blue_rects))
self.wait()
self.play(ReplacementTransform(blue_rects, red_rects))
self.wait()
self.play(FadeOut(red_rects))
blue_rects.restore()
self.real_estate_rects = VGroup(blue_rects, red_rects)
def nudge_x_from_one_example(self):
x_re = self.decimals[0]
rect = SurroundingRectangle(x_re)
self.reset_dials([1, 0])
self.wait()
self.play(ShowCreation(rect))
self.play(FadeOut(rect))
self.play(FocusOn(self.dot))
self.wait()
self.reset_dials([0.9, -np.sqrt(0.19)])
x_brace, y_brace = [
Brace(
VGroup(slider.dial, Dot(slider.number_to_point(0))),
vect
)
for slider, vect in zip(self.sliders, [LEFT, RIGHT])
]
x_text = x_brace.get_tex("0.9")
y_text = y_brace.get_tex("%.02f"%self.sliders[1].get_value())
self.play(
GrowFromCenter(x_brace),
Write(x_text)
)
self.play(ReplacementTransform(
VGroup(x_text.copy()), x_re
))
self.wait(2)
self.play(
GrowFromCenter(y_brace),
Write(y_text),
)
self.wait(2)
self.play(FadeIn(self.real_estate_rects))
self.reset_dials([1, 0], run_time = 1)
self.reset_dials([0.9, -np.sqrt(0.19)], run_time = 2)
self.play(FadeOut(self.real_estate_rects))
self.play(*list(map(FadeOut, [x_brace, y_brace, x_text, y_text])))
self.wait()
def note_circle_steepness(self):
line = Line(
self.plane.coords_to_point(0.5, 1),
self.plane.coords_to_point(1.5, -1),
)
rect = Rectangle(
stroke_width = 0,
fill_color = BLUE,
fill_opacity = 0.5,
)
rect.replace(line, stretch = True)
self.play(DrawBorderThenFill(rect, stroke_color = YELLOW))
for x, u in (1, 1), (0.8, 1), (1, 1), (0.8, -1), (1, 1):
self.reset_dials([x, u*np.sqrt(1 - x**2)])
self.play(FadeOut(rect))
def add_tick_marks(self):
self.remove_foreground_mobjects(self.sliders)
self.add(self.sliders)
old_ticks = VGroup()
all_ticks = VGroup()
for slider in self.sliders:
slider.tick_size = 0.1
slider.add_real_estate_ticks()
slider.remove(slider.get_tick_marks())
all_ticks.add(*slider.real_estate_ticks)
old_ticks.add(*slider.get_tick_marks()[:-3])
self.play(
FadeOut(old_ticks),
ShowCreation(all_ticks, run_time = 3),
Animation(VGroup(*[slider.dial for slider in self.sliders])),
)
self.add_foreground_mobjects(self.sliders)
self.wait()
for x in np.arange(0.95, 0.05, -0.05):
self.reset_dials(
[np.sqrt(x), np.sqrt(1-x)],
run_time = 0.5
)
self.wait(0.5)
self.initialize_ambiant_slider_movement()
self.wait(10)
def write_distance_squared(self):
d_squared = TexMobject("(\\text{Distance})^2")
d_squared.next_to(self.real_estate_word, DOWN)
d_squared.set_color(YELLOW)
self.play(Write(d_squared))
self.wait(3)
#####
def update_frame(self, *args, **kwargs):
if hasattr(self, "dot"):
x, y = self.get_vector()
self.dot.move_to(self.plane.coords_to_point(x, y))
if hasattr(self, "decimals"):
for anim in self.decimal_update_anims:
anim.update(0)
SliderScene.update_frame(self, *args, **kwargs)
class TwoDimensionalCaseIntro(TwoDimensionalCase):
def construct(self):
self.initialize_ambiant_slider_movement()
self.wait(10)
class ThreeDCase(TwoDimensionalCase):
CONFIG = {
"n_sliders" : 3,
"slider_config" : {
"include_real_estate_ticks" : True,
"numbers_with_elongated_ticks" : [],
"tick_frequency" : 1,
"tick_size" : 0.1,
},
}
def setup(self):
SliderScene.setup(self)
self.equation = TexMobject(
"x^2", "+", "y^2", "+", "z^2", "=", "1"
)
self.equation.to_corner(UP+RIGHT)
self.add(self.equation)
def construct(self):
self.force_skipping()
self.add_real_estate_decimals()
self.initialize_ambiant_slider_movement()
self.point_out_third_slider()
self.wait(3)
self.hold_x_at(0.5, 12)
self.revert_to_original_skipping_status()
self.hold_x_at(0.85, 12)
return
self.hold_x_at(1, 5)
def add_real_estate_decimals(self):
rects = VGroup(*[
SurroundingRectangle(self.equation.get_part_by_tex(char))
for char in "xyz"
])
decimals = VGroup(*[
DecimalNumber(num**2)
for num in self.get_vector()
])
decimals.arrange(RIGHT, buff = LARGE_BUFF)
decimals.next_to(rects, DOWN, LARGE_BUFF)
colors = [GREEN, RED, BLUE]
arrows = VGroup()
for rect, decimal, color in zip(rects, decimals, colors):
rect.set_color(color)
decimal.set_color(color)
arrow = Arrow(
rect.get_bottom()+SMALL_BUFF*UP, decimal.get_top(),
tip_length = 0.2,
color = color
)
arrows.add(arrow)
real_estate_word = TextMobject("``Real estate''")
real_estate_word.next_to(decimals, DOWN, MED_LARGE_BUFF)
sliders = self.sliders
def create_update_func(i):
return lambda alpha : sliders[i].get_real_estate()
self.add_foreground_mobjects(decimals)
self.decimals = decimals
self.decimal_update_anims = [
ChangingDecimal(decimal, create_update_func(i))
for i, decimal in enumerate(decimals)
]
self.add(rects, arrows, real_estate_word)
self.rects = rects
self.arrows = arrows
self.real_estate_word = real_estate_word
def point_out_third_slider(self):
rect = SurroundingRectangle(self.sliders[-1])
self.wait(4)
self.play(ShowCreation(rect))
self.play(FadeOut(rect))
self.wait(8)
def hold_x_at(self, x_val, wait_time):
#Save these
all_sliders = self.sliders
original_total_real_estate = self.total_real_estate
self.reset_dials([x_val], run_time = 3)
self.sliders = VGroup(*self.sliders[1:])
self.total_real_estate = self.total_real_estate-x_val**2
self.initialize_ambiant_slider_movement()
self.wait(wait_time-2)
self.wind_down_ambient_movement()
self.sliders = all_sliders
self.total_real_estate = original_total_real_estate
self.initialize_ambiant_slider_movement()
####
class ThreeDCaseInsert(ThreeDCase):
def construct(self):
self.add_real_estate_decimals()
self.reset_dials([0.85, np.sqrt(1-0.85**2)], run_time = 0)
self.reset_dials([1], run_time = 3)
self.wait()
class SphereAtRest(ExternallyAnimatedScene):
pass
class BugOnASurface(TeacherStudentsScene):
def construct(self):
self.teacher_says("You're a bug \\\\ on a surface")
self.wait(3)
class SphereWithWanderingDotAtX0point5(ExternallyAnimatedScene):
pass
class MoveSphereSliceFromPoint5ToPoint85(ExternallyAnimatedScene):
pass
class SphereWithWanderingDotAtX0point85(ExternallyAnimatedScene):
pass
class MoveSphereSliceFromPoint85To1(ExternallyAnimatedScene):
pass
class BugOnTheSurfaceSlidersPart(ThreeDCase):
CONFIG = {
"run_time" : 30
}
def construct(self):
self.add_real_estate_decimals()
self.reset_dials([0.9], run_time = 0)
time_range = np.arange(0, self.run_time, self.frame_duration)
for time in ProgressDisplay(time_range):
t = 0.3*np.sin(2*np.pi*time/7.0) + 1
u = 0.3*np.sin(4*np.pi*time/7.0) + 1.5
self.set_to_vector([
np.cos(u),
np.sin(u)*np.cos(t),
np.sin(u)*np.sin(t),
])
self.wait(self.frame_duration)
class BugOnTheSurfaceSpherePart(ExternallyAnimatedScene):
pass
class FourDCase(SliderScene, TeacherStudentsScene):
def setup(self):
TeacherStudentsScene.setup(self)
SliderScene.setup(self)
self.sliders.scale(0.9)
self.sliders.to_edge(UP)
self.sliders.shift(2*RIGHT)
self.initialize_ambiant_slider_movement()
def construct(self):
self.show_initial_exchange()
self.fix_one_slider()
self.ask_now_what()
self.set_aside_sliders()
def show_initial_exchange(self):
dot = Dot(fill_opacity = 0)
dot.to_corner(UP+LEFT, buff = 2)
self.play(Animation(dot))
self.wait()
self.play(
Animation(self.sliders),
self.teacher.change, "raise_right_hand",
)
self.change_student_modes(
*["pondering"]*3,
look_at_arg = self.sliders
)
self.wait(4)
def fix_one_slider(self):
x_slider = self.sliders[0]
dial = x_slider.dial
self.wind_down_ambient_movement(wait = False)
self.play(self.teacher.change, "speaking")
self.sliders.remove(x_slider)
self.total_real_estate = get_norm(self.get_vector())**2
self.initialize_ambiant_slider_movement()
arrow = Arrow(LEFT, RIGHT, color = GREEN)
arrow.next_to(dial, LEFT)
self.play(
ShowCreation(arrow),
dial.set_color, arrow.get_color()
)
self.change_student_modes(
"erm", "confused", "hooray",
look_at_arg = self.sliders,
added_anims = [self.teacher.change, "plain"]
)
self.wait(5)
self.x_slider = x_slider
self.x_arrow = arrow
def ask_now_what(self):
self.student_says(
"Okay...now what?",
target_mode = "raise_left_hand",
student_index = 0,
added_anims = [self.teacher.change, "plain"]
)
self.change_student_modes(
None, "pondering", "pondering",
look_at_arg = self.students[0].bubble,
)
self.wait(4)
self.play(RemovePiCreatureBubble(self.students[0]))
def set_aside_sliders(self):
self.sliders.add(self.x_slider)
self.total_real_estate = 1
self.initialize_ambiant_slider_movement()
self.play(
self.sliders.scale, 0.5,
self.sliders.to_corner, UP+RIGHT,
FadeOut(self.x_arrow)
)
self.teacher_says(
"Time for some \\\\ high-dimensional \\\\ strangeness!",
target_mode = "hooray",
)
self.wait(7)
#####
def non_blink_wait(self, time = 1):
SliderScene.wait(self, time)
class TwoDBoxExample(Scene):
def setup(self):
scale_factor = 1.7
self.plane = NumberPlane()
self.plane.scale(scale_factor)
self.plane.add_coordinates()
self.plane.axes.set_color(GREY)
self.add(self.plane)
def construct(self):
self.add_box()
self.label_corner_coordinates()
self.add_corner_circles()
self.add_center_circle()
self.compute_radius()
def add_box(self):
box = Square(color = RED, stroke_width = 6)
line = Line(
self.plane.coords_to_point(-1, -1),
self.plane.coords_to_point(1, 1),
)
box.replace(line, stretch = True)
self.play(ShowCreation(box))
self.wait()
def label_corner_coordinates(self):
corner_dots = VGroup()
coords_group = VGroup()
for x, y in it.product(*[[1, -1]]*2):
point = self.plane.coords_to_point(x, y)
dot = Dot(point, color = WHITE)
coords = TexMobject("(%d, %d)"%(x, y))
coords.add_background_rectangle()
coords.next_to(point, point, SMALL_BUFF)
corner_dots.add(dot)
coords_group.add(coords)
self.play(
ShowCreation(dot),
Write(coords, run_time = 1)
)
self.add_foreground_mobjects(coords_group)
self.corner_dots = corner_dots
self.coords_group = coords_group
def add_corner_circles(self):
line = Line(
self.plane.coords_to_point(-1, 0),
self.plane.coords_to_point(1, 0),
)
circle = Circle(color = YELLOW)
circle.replace(line, dim_to_match = 0)
circles = VGroup(*[
circle.copy().move_to(dot)
for dot in self.corner_dots
])
radius = Line(ORIGIN, self.plane.coords_to_point(1, 0))
radius.set_stroke(GREY, 6)
radius.rotate(-np.pi/4)
c0_center = circles[0].get_center()
radius.shift(c0_center)
r_equals_1 = TexMobject("r = 1")
r_equals_1.add_background_rectangle()
r_equals_1.next_to(
radius.point_from_proportion(0.75),
UP+RIGHT, SMALL_BUFF
)
self.play(LaggedStartMap(ShowCreation, circles))
self.play(
ShowCreation(radius),
Write(r_equals_1)
)
for angle in -np.pi/4, -np.pi/2, 3*np.pi/4:
self.play(Rotating(
radius, about_point = c0_center,
radians = angle,
run_time = 1,
rate_func = smooth,
))
self.wait(0.5)
self.play(*list(map(FadeOut, [radius, r_equals_1])))
self.wait()
self.corner_radius = radius
self.corner_circles = circles
def add_center_circle(self):
r = np.sqrt(2) - 1
radius = Line(ORIGIN, self.plane.coords_to_point(r, 0))
radius.set_stroke(WHITE)
circle = Circle(color = GREEN)
circle.replace(
VGroup(radius, radius.copy().rotate(np.pi)),
dim_to_match = 0
)
radius.rotate(np.pi/4)
r_equals_q = TexMobject("r", "= ???")
r_equals_q[1].add_background_rectangle()
r_equals_q.next_to(radius, RIGHT, buff = -SMALL_BUFF)
self.play(GrowFromCenter(circle, run_time = 2))
self.play(circle.scale, 1.2, rate_func = wiggle)
self.play(ShowCreation(radius))
self.play(Write(r_equals_q))
self.wait(2)
self.play(FadeOut(r_equals_q[1]))
self.inner_radius = radius
self.inner_circle = circle
self.inner_r = r_equals_q[0]
def compute_radius(self):
triangle = Polygon(
ORIGIN,
self.plane.coords_to_point(1, 0),
self.plane.coords_to_point(1, 1),
fill_color = BLUE,
fill_opacity = 0.5,
stroke_width = 6,
stroke_color = WHITE,
)
bottom_one = TexMobject("1")
bottom_one.next_to(triangle.get_bottom(), UP, SMALL_BUFF)
bottom_one.shift(MED_SMALL_BUFF*RIGHT)
side_one = TexMobject("1")
side_one.next_to(triangle, RIGHT)
sqrt_1_plus_1 = TexMobject("\\sqrt", "{1^2 + 1^2}")
sqrt_2 = TexMobject("\\sqrt", "{2}")
for sqrt in sqrt_1_plus_1, sqrt_2:
sqrt.add_background_rectangle()
sqrt.next_to(ORIGIN, UP, SMALL_BUFF)
sqrt.rotate(np.pi/4)
sqrt.shift(triangle.get_center())
root_2_value = TexMobject("\\sqrt{2} \\approx 1.414")
root_2_value.to_corner(UP+RIGHT)
root_2_value.add_background_rectangle()
root_2_minus_1_value = TexMobject(
"\\sqrt{2} - 1 \\approx 0.414"
)
root_2_minus_1_value.next_to(root_2_value, DOWN)
root_2_minus_1_value.to_edge(RIGHT)
root_2_minus_1_value.add_background_rectangle()
corner_radius = self.corner_radius
c0_center = self.corner_circles[0].get_center()
corner_radius.rotate(-np.pi/2, about_point = c0_center)
rhs = TexMobject("=", "\\sqrt", "{2}", "-1")
rhs.next_to(self.inner_r, RIGHT, SMALL_BUFF, DOWN)
rhs.shift(0.5*SMALL_BUFF*DOWN)
sqrt_2_target = VGroup(*rhs[1:3])
rhs.add_background_rectangle()
self.play(
FadeIn(triangle),
Write(VGroup(bottom_one, side_one, sqrt_1_plus_1))
)
self.wait(2)
self.play(ReplacementTransform(sqrt_1_plus_1, sqrt_2))
self.play(
Write(root_2_value, run_time = 1),
*list(map(FadeOut, [bottom_one, side_one]))
)
self.wait()
self.play(ShowCreation(corner_radius))
self.play(Rotating(
corner_radius, about_point = c0_center,
run_time = 2,
rate_func = smooth
))
self.play(FadeOut(triangle), Animation(corner_radius))
self.wait()
self.play(
Write(rhs),
Transform(sqrt_2, sqrt_2_target),
)
self.play(Write(root_2_minus_1_value))
self.wait(2)
class ThreeDBoxExample(ExternallyAnimatedScene):
pass
class ThreeDCubeCorners(Scene):
def construct(self):
coordinates = VGroup(*[
TexMobject("(%d,\\, %d,\\, %d)"%(x, y, z))
for x, y, z in it.product(*3*[[1, -1]])
])
coordinates.arrange(DOWN, aligned_edge = LEFT)
name = TextMobject("Corners: ")
name.next_to(coordinates[0], LEFT)
group = VGroup(name, coordinates)
group.set_height(FRAME_HEIGHT - 1)
group.to_edge(LEFT)
self.play(Write(name, run_time = 2))
self.play(LaggedStartMap(FadeIn, coordinates, run_time = 3))
self.wait()
class ShowDistanceFormula(TeacherStudentsScene):
def construct(self):
rule = TexMobject(
"||(", "x_1", ", ", "x_2", "\\dots, ", "x_n", ")||",
"=",
"\\sqrt", "{x_1^2", " + ", "x_2^2", " +\\cdots", "x_n^2", "}"
)
rule.set_color_by_tex_to_color_map({
"x_1" : GREEN,
"x_2" : RED,
"x_n" : BLUE,
})
for part in rule.get_parts_by_tex("x_"):
if len(part) > 2:
part[1].set_color(WHITE)
rule.next_to(self.teacher, UP, LARGE_BUFF)
rule.to_edge(RIGHT)
rule.shift(UP)
rule.save_state()
rule.shift(2*DOWN)
rule.set_fill(opacity = 0)
self.play(
rule.restore,
self.teacher.change, "raise_right_hand",
)
self.wait(3)
self.student_says("Why?", student_index = 0)
self.play(self.teacher.change, "thinking")
self.wait(3)
class GeneralizePythagoreanTheoremBeyondTwoD(ThreeDScene):
def construct(self):
tex_to_color_map = {
"x" : GREEN,
"y" : RED,
"z" : BLUE,
}
rect = Rectangle(
height = 4, width = 5,
fill_color = WHITE,
fill_opacity = 0.2,
)
diag = Line(
rect.get_corner(DOWN+LEFT),
rect.get_corner(UP+RIGHT),
color = YELLOW
)
bottom = Line(rect.get_left(), rect.get_right())
bottom.move_to(rect.get_bottom())
bottom.set_color(tex_to_color_map["x"])
side = Line(rect.get_bottom(), rect.get_top())
side.move_to(rect.get_right())
side.set_color(tex_to_color_map["y"])
x = TexMobject("x")
x.next_to(rect.get_bottom(), UP, SMALL_BUFF)
y = TexMobject("y")
y.next_to(rect.get_right(), LEFT, SMALL_BUFF)
hyp = TexMobject("\\sqrt", "{x", "^2 + ", "y", "^2}")
hyp.set_color_by_tex_to_color_map(tex_to_color_map)
hyp.next_to(ORIGIN, UP)
hyp.rotate(diag.get_angle())
hyp.shift(diag.get_center())
group = VGroup(rect, bottom, side, diag, x, y, hyp)
self.add(rect)
for line, tex in (bottom, x), (side, y), (diag, hyp):
self.play(
ShowCreation(line),
Write(tex, run_time = 1)
)
self.wait()
self.play(
group.rotate, 0.45*np.pi, LEFT,
group.shift, 2*DOWN
)
corner = diag.get_end()
z_line = Line(corner, corner + 3*UP)
z_line.set_color(tex_to_color_map["z"])
z = TexMobject("z")
z.set_color(tex_to_color_map["z"])
z.next_to(z_line, RIGHT)
dot = Dot(z_line.get_end())
three_d_diag = Line(diag.get_start(), z_line.get_end())
three_d_diag.set_color(MAROON_B)
self.play(
ShowCreation(z_line),
ShowCreation(dot),
Write(z, run_time = 1)
)
self.play(ShowCreation(three_d_diag))
self.wait()
full_group = VGroup(group, z_line, z, three_d_diag, dot)
self.play(Rotating(
full_group, radians = -np.pi/6,
axis = UP,
run_time = 10,
))
self.wait()
class ThreeDBoxFormulas(Scene):
def construct(self):
question = TexMobject(
"||(1, 1, 1)||", "=", "???"
)
answer = TexMobject(
"||(1, 1, 1)||", "&=", "\\sqrt{1^2 + 1^2 + 1^2}\\\\",
"&= \\sqrt{3}\\\\", "&\\approx", "1.73",
)
for mob in question, answer:
mob.to_corner(UP+LEFT)
inner_r = TexMobject(
"\\text{Inner radius}", "&=", "\\sqrt{3} - 1\\\\",
"&\\approx", "0.73"
)
inner_r.next_to(answer, DOWN, LARGE_BUFF, LEFT)
inner_r.set_color(GREEN_C)
VGroup(question, answer).shift(0.55*RIGHT)
self.play(Write(question))
self.wait(2)
self.play(ReplacementTransform(question, answer))
self.wait(2)
self.play(Write(inner_r))
self.wait(2)
class AskAboutHigherDimensions(TeacherStudentsScene):
def construct(self):
self.teacher_says(
"What happens for \\\\ higher dimensions?"
)
self.change_student_modes(*["pondering"]*3)
self.wait(2)
self.student_thinks(
"$\\sqrt{N} - 1$",
target_mode = "happy",
student_index = 1
)
self.wait()
pi = self.students[1]
self.play(pi.change, "confused", pi.bubble)
self.wait(3)
class TenSliders(SliderScene):
CONFIG = {
"n_sliders" : 10,
"run_time": 30,
"slider_spacing" : 0.75,
"ambient_acceleration_magnitude" : 2.0,
}
def construct(self):
self.initialize_ambiant_slider_movement()
self.wait(self.run_time)
self.wind_down_ambient_movement()
class TwoDBoxWithSliders(TwoDimensionalCase):
CONFIG = {
"slider_config" : {
"include_real_estate_ticks" : True,
"tick_frequency" : 1,
"numbers_with_elongated_ticks" : [],
"tick_size" : 0.1,
"dial_color" : YELLOW,
"x_min" : -2,
"x_max" : 2,
"unit_size" : 1.5,
},
"center_point" : [1, -1],
}
def setup(self):
TwoDimensionalCase.setup(self)
##Correct from previous setup
self.remove(self.equation)
self.sliders.shift(RIGHT)
VGroup(*self.get_top_level_mobjects()).shift(RIGHT)
x_slider = self.sliders[0]
for number in x_slider.numbers:
value = int(number.get_tex_string())
number.next_to(
x_slider.number_to_point(value),
LEFT, MED_SMALL_BUFF
)
self.plane.axes.set_color(BLUE)
##Add box material
corner_circles = VGroup(*[
self.circle.copy().move_to(
self.plane.coords_to_point(*coords)
).set_color(GREY)
for coords in ((1, 1), (-1, 1), (-1, -1))
])
line = Line(
self.plane.coords_to_point(-1, -1),
self.plane.coords_to_point(1, 1),
)
box = Square(color = RED)
box.replace(line, stretch = True)
self.add(box, corner_circles)
self.box = box
self.corner_circles = corner_circles
def construct(self):
self.ask_about_off_center_circle()
self.recenter_circle()
self.write_x_and_y_real_estate()
self.swap_with_top_right_circle()
self.show_center_circle()
self.describe_tangent_point()
self.perterb_point()
self.wander_on_inner_circle()
self.ask_about_inner_real_estate()
def ask_about_off_center_circle(self):
question = TextMobject("Off-center circles?")
question.next_to(self.plane, UP)
self.initialize_ambiant_slider_movement()
self.play(Write(question))
self.wait(4)
self.wind_down_ambient_movement()
self.question = question
def recenter_circle(self):
original_center_point = self.center_point
self.play(
self.circle.move_to, self.plane.coords_to_point(0, 0),
Animation(self.sliders),
*[
ApplyMethod(
mob.shift,
slider.number_to_point(0)-slider.number_to_point(slider.center_value)
)
for slider in self.sliders
for mob in [slider.real_estate_ticks, slider.dial]
]
)
self.center_point = [0, 0]
for x, slider in zip(self.center_point, self.sliders):
slider.center_value = x
self.initialize_ambiant_slider_movement()
self.wait(7)
self.wind_down_ambient_movement()
self.play(
self.circle.move_to,
self.plane.coords_to_point(*original_center_point),
Animation(self.sliders),
*[
ApplyMethod(
mob.shift,
slider.number_to_point(x)-slider.number_to_point(0)
)
for x, slider in zip(original_center_point, self.sliders)
for mob in [slider.real_estate_ticks, slider.dial]
]
)
self.center_point = original_center_point
for x, slider in zip(self.center_point, self.sliders):
slider.center_value = x
self.initialize_ambiant_slider_movement()
self.wait(5)
def write_x_and_y_real_estate(self):
phrases = VGroup(
TextMobject("$x$", "real estate:", "$(x-1)^2$"),
TextMobject("$y$", "real estate:", "$(y+1)^2$"),
)
phrases.next_to(self.plane, UP)
phrases[0].set_color_by_tex("x", GREEN)
phrases[1].set_color_by_tex("y", RED)
x_brace, y_brace = [
Brace(slider.real_estate_ticks, RIGHT)
for slider in self.sliders
]
x_brace.set_color(GREEN)
y_brace.set_color(RED)
self.play(FadeOut(self.question))
self.play(
Write(phrases[0]),
GrowFromCenter(x_brace)
)
self.wait(3)
self.play(
Transform(*phrases),
Transform(x_brace, y_brace)
)
self.wait(5)
self.wind_down_ambient_movement(wait = False)
self.play(*list(map(FadeOut, [x_brace, phrases[0]])))
def swap_with_top_right_circle(self):
alt_circle = self.corner_circles[0]
slider = self.sliders[1]
self.play(
self.circle.move_to, alt_circle,
alt_circle.move_to, self.circle,
Animation(slider),
*[
ApplyMethod(
mob.shift,
slider.number_to_point(1) - slider.number_to_point(-1)
)
for mob in (slider.real_estate_ticks, slider.dial)
]
)
slider.center_value = 1
self.center_point[1] = 1
self.initialize_ambiant_slider_movement()
self.wait(3)
def show_center_circle(self):
origin = self.plane.coords_to_point(0, 0)
radius = get_norm(
self.plane.coords_to_point(np.sqrt(2)-1, 0) - origin
)
circle = Circle(radius = radius, color = GREEN)
circle.move_to(origin)
self.play(FocusOn(circle))
self.play(GrowFromCenter(circle, run_time = 2))
self.wait(3)
def describe_tangent_point(self):
target_vector = np.array([
1-np.sqrt(2)/2, 1-np.sqrt(2)/2
])
point = self.plane.coords_to_point(*target_vector)
origin = self.plane.coords_to_point(0, 0)
h_line = Line(point[1]*UP + origin[0]*RIGHT, point)
v_line = Line(point[0]*RIGHT+origin[1]*UP, point)
while get_norm(self.get_vector()-target_vector) > 0.5:
self.wait()
self.wind_down_ambient_movement(0)
self.reset_dials(target_vector)
self.play(*list(map(ShowCreation, [h_line, v_line])))
self.wait()
re_line = DashedLine(
self.sliders[0].dial.get_left() + MED_SMALL_BUFF*LEFT,
self.sliders[1].dial.get_right() + MED_SMALL_BUFF*RIGHT,
)
words = TextMobject("Evenly shared \\\\ real estate")
words.scale(0.8)
words.next_to(re_line, RIGHT)
self.play(ShowCreation(re_line))
self.play(Write(words))
self.wait()
self.evenly_shared_words = words
self.re_line = re_line
def perterb_point(self):
#Perturb dials
target_vector = np.array([
1 - np.sqrt(0.7),
1 - np.sqrt(0.3),
])
ghost_dials = VGroup(*[
slider.dial.copy()
for slider in self.sliders
])
ghost_dials.set_fill(WHITE, opacity = 0.75)
self.add_foreground_mobjects(ghost_dials)
self.reset_dials(target_vector)
self.wait()
#Comment on real estate exchange
x_words = TextMobject("Gain expensive \\\\", "real estate")
y_words = TextMobject("Give up cheap \\\\", "real estate")
VGroup(x_words, y_words).scale(0.8)
x_words.next_to(self.re_line, UP+LEFT)
x_words.shift(SMALL_BUFF*(DOWN+LEFT))
y_words.next_to(self.re_line, UP+RIGHT)
y_words.shift(MED_LARGE_BUFF*UP)
x_arrow, y_arrow = [
Arrow(
words[1].get_edge_center(vect), self.sliders[i].dial,
tip_length = 0.15,
)
for i, words, vect in zip(
(0, 1), [x_words, y_words], [RIGHT, LEFT]
)
]
self.play(
Write(x_words, run_time = 2),
ShowCreation(x_arrow)
)
self.wait()
self.play(FadeOut(self.evenly_shared_words))
self.play(
Write(y_words, run_time = 2),
ShowCreation(y_arrow)
)
self.wait(2)
#Swap perspective
word_starts = VGroup(y_words[0], x_words[0])
crosses = VGroup()
new_words = VGroup()
for w1, w2 in zip(word_starts, reversed(word_starts)):
crosses.add(Cross(w1))
w1_copy = w1.copy()
w1_copy.generate_target()
w1_copy.target.next_to(w2, UP, SMALL_BUFF)
new_words.add(w1_copy)
self.play(*[
ApplyMethod(
slider.real_estate_ticks.shift,
slider.number_to_point(0)-slider.number_to_point(1)
)
for slider in self.sliders
])
self.wait()
self.play(ShowCreation(crosses))
self.play(
LaggedStartMap(MoveToTarget, new_words),
Animation(crosses)
)
self.wait(3)
#Return to original position
target_vector = np.array(2*[1-np.sqrt(0.5)])
self.play(LaggedStartMap(FadeOut, VGroup(*[
ghost_dials,
x_words, y_words,
x_arrow, y_arrow,
crosses, new_words,
])))
self.remove_foreground_mobjects(ghost_dials)
self.reset_dials(target_vector)
self.center_point = np.zeros(2)
for x, slider in zip(self.center_point, self.sliders):
slider.center_value = x
self.set_to_vector(target_vector)
self.total_real_estate = self.get_current_total_real_estate()
self.wait(2)
def wander_on_inner_circle(self):
self.initialize_ambiant_slider_movement()
self.wait(9)
def ask_about_inner_real_estate(self):
question = TextMobject("What is \\\\ $x^2 + y^2$?")
question.next_to(self.re_line, RIGHT)
rhs = TexMobject("<0.5^2 + 0.5^2")
rhs.scale(0.8)
rhs.next_to(question, DOWN)
rhs.to_edge(RIGHT)
half_line = Line(*[
slider.number_to_point(0.5) + MED_LARGE_BUFF*vect
for slider, vect in zip(self.sliders, [LEFT, RIGHT])
])
half = TexMobject("0.5")
half.scale(self.sliders[0].number_scale_val)
half.next_to(half_line, LEFT, SMALL_BUFF)
target_vector = np.array(2*[1-np.sqrt(0.5)])
while get_norm(target_vector - self.get_vector()) > 0.5:
self.wait()
self.wind_down_ambient_movement(0)
self.reset_dials(target_vector)
self.play(Write(question))
self.wait(3)
self.play(
ShowCreation(half_line),
Write(half)
)
self.wait()
self.play(Write(rhs))
self.wait(3)
class AskWhy(TeacherStudentsScene):
def construct(self):
self.student_says(
"Wait, why?",
target_mode = "confused"
)
self.wait(3)
class MentionComparisonToZeroPointFive(TeacherStudentsScene):
def construct(self):
self.teacher_says(
"Comparing to $0.5$ will \\\\"+\
"be surprisingly useful!",
target_mode = "hooray"
)
self.change_student_modes(*["happy"]*3)
self.wait(3)
class ThreeDBoxExampleWithSliders(SliderScene):
CONFIG = {
"n_sliders" : 3,
"slider_config" : {
"x_min" : -2,
"x_max" : 2,
"unit_size" : 1.5,
},
"center_point" : np.ones(3),
}
def setup(self):
SliderScene.setup(self)
self.sliders.shift(2*RIGHT)
def construct(self):
self.initialize_ambiant_slider_movement()
self.name_corner_sphere()
self.point_out_closest_point()
self.compare_to_halfway_point()
self.reframe_as_inner_sphere_point()
self.place_bound_on_inner_real_estate()
self.comment_on_inner_sphere_smallness()
def name_corner_sphere(self):
sphere_name = TextMobject(
"""Sphere with radius 1\\\\
centered at (1, 1, 1)"""
)
sphere_name.to_corner(UP+LEFT)
arrow = Arrow(
sphere_name, VGroup(*self.sliders[0].numbers[-2:]),
color = BLUE
)
self.play(
LaggedStartMap(FadeIn, sphere_name,),
ShowCreation(arrow, rate_func = squish_rate_func(smooth, 0.7, 1)),
run_time = 3
)
self.wait(5)
self.sphere_name = sphere_name
self.arrow = arrow
def point_out_closest_point(self):
target_x = 1-np.sqrt(1./3)
target_vector = np.array(3*[target_x])
re_words = TextMobject(
"$x$, $y$ and $z$ each have \\\\",
"$\\frac{1}{3}$", "units of real estate"
)
re_words.to_corner(UP+LEFT)
re_line = DashedLine(*[
self.sliders[i].number_to_point(target_x) + MED_SMALL_BUFF*vect
for i, vect in [(0, LEFT), (2, RIGHT)]
])
new_arrow = Arrow(
re_words.get_corner(DOWN+RIGHT), re_line.get_left(),
color = BLUE
)
self.wind_down_ambient_movement()
self.play(*[
ApplyMethod(slider.set_value, x)
for x, slider in zip(target_vector, self.sliders)
])
self.play(ShowCreation(re_line))
self.play(
FadeOut(self.sphere_name),
Transform(self.arrow, new_arrow),
)
self.play(LaggedStartMap(FadeIn, re_words))
self.wait(2)
self.re_words = re_words
self.re_line = re_line
def compare_to_halfway_point(self):
half_line = Line(*[
self.sliders[i].number_to_point(0.5)+MED_SMALL_BUFF*vect
for i, vect in [(0, LEFT), (2, RIGHT)]
])
half_line.set_stroke(MAROON_B, 6)
half_label = TexMobject("0.5")
half_label.scale(self.sliders[0].number_scale_val)
half_label.next_to(half_line, LEFT, MED_SMALL_BUFF)
half_label.set_color(half_line.get_color())
curr_vector = self.get_vector()
target_vector = 0.5*np.ones(3)
ghost_dials = VGroup(*[
slider.dial.copy().set_fill(WHITE, 0.5)
for slider in self.sliders
])
cross = Cross(self.re_words.get_parts_by_tex("frac"))
new_re = TexMobject("(0.5)^2 = 0.25")
new_re.next_to(cross, DOWN, MED_SMALL_BUFF, LEFT)
new_re.set_color(MAROON_B)
self.play(
FadeOut(self.arrow),
Write(half_label, run_time = 1),
ShowCreation(half_line)
)
self.wait()
self.add(ghost_dials)
self.play(*[
ApplyMethod(slider.set_value, 0.5)
for slider in self.sliders
])
self.play(ShowCreation(cross))
self.play(Write(new_re))
self.wait(3)
self.play(
FadeOut(new_re),
FadeOut(cross),
*[
ApplyMethod(slider.set_value, x)
for x, slider in zip(curr_vector, self.sliders)
]
)
def reframe_as_inner_sphere_point(self):
s = self.sliders[0]
shift_vect = s.number_to_point(0)-s.number_to_point(1)
curr_vector = self.get_vector()
self.set_center_point(np.zeros(3))
self.set_to_vector(curr_vector)
self.total_real_estate = self.get_current_total_real_estate()
all_re_ticks = VGroup(*[
slider.real_estate_ticks
for slider in self.sliders
])
inner_sphere_words = TextMobject(
"Also a point on \\\\", "the inner sphere"
)
inner_sphere_words.next_to(self.re_line, RIGHT)
question = TextMobject("How much \\\\", "real estate?")
question.next_to(self.re_line, RIGHT, MED_LARGE_BUFF)
self.play(
Animation(self.sliders),
FadeOut(self.re_words),
LaggedStartMap(
ApplyMethod, all_re_ticks,
lambda m : (m.shift, shift_vect)
)
)
self.initialize_ambiant_slider_movement()
self.play(Write(inner_sphere_words))
self.wait(5)
self.wind_down_ambient_movement(0)
self.play(*[
ApplyMethod(slider.set_value, x)
for slider, x in zip(self.sliders, curr_vector)
])
self.play(ReplacementTransform(
inner_sphere_words, question
))
self.wait(2)
self.re_question = question
def place_bound_on_inner_real_estate(self):
bound = TexMobject(
"&< 3(0.5)^2 ",
"= 0.75"
)
bound.next_to(self.re_question, DOWN)
bound.to_edge(RIGHT)
self.play(Write(bound))
self.wait(2)
def comment_on_inner_sphere_smallness(self):
self.initialize_ambiant_slider_movement()
self.wait(15)
class Rotating3DCornerSphere(ExternallyAnimatedScene):
pass
class FourDBoxExampleWithSliders(ThreeDBoxExampleWithSliders):
CONFIG = {
"n_sliders" : 4,
"center_point" : np.ones(4),
}
def construct(self):
self.list_corner_coordinates()
self.show_16_corner_spheres()
self.show_closest_point()
self.show_real_estate_at_closest_point()
self.reframe_as_inner_sphere_point()
self.compute_inner_radius_numerically()
self.inner_sphere_touches_box()
def list_corner_coordinates(self):
title = TextMobject(
"$2 \\!\\times\\! 2 \\!\\times\\! 2 \\!\\times\\! 2$ box vertices:"
)
title.shift(FRAME_X_RADIUS*LEFT/2)
title.to_edge(UP)
coordinates = list(it.product(*4*[[1, -1]]))
coordinate_mobs = VGroup(*[
TexMobject("(%d, %d, %d, %d)"%tup)
for tup in coordinates
])
coordinate_mobs.arrange(DOWN, aligned_edge = LEFT)
coordinate_mobs.scale(0.8)
left_column = VGroup(*coordinate_mobs[:8])
right_column = VGroup(*coordinate_mobs[8:])
right_column.next_to(left_column, RIGHT)
coordinate_mobs.next_to(title, DOWN)
self.play(Write(title))
self.play(LaggedStartMap(FadeIn, coordinate_mobs))
self.wait()
self.coordinate_mobs = coordinate_mobs
self.coordinates = coordinates
self.box_vertices_title = title
def show_16_corner_spheres(self):
sphere_words = VGroup(TextMobject("Sphere centered at"))
sphere_words.scale(0.8)
sphere_words.next_to(self.sliders, RIGHT)
sphere_words.shift(2*UP)
self.add(sphere_words)
pairs = list(zip(self.coordinate_mobs, self.coordinates))
for coord_mob, coords in pairs[1:] + [pairs[0]]:
coord_mob.set_color(GREEN)
coord_mob_copy = coord_mob.copy()
coord_mob_copy.next_to(sphere_words, DOWN)
for slider, x in zip(self.sliders, coords):
point = slider.number_to_point(x)
slider.real_estate_ticks.move_to(point)
slider.dial.move_to(point)
self.sliders[0].dial.move_to(
self.sliders[0].number_to_point(coords[0]+1)
)
self.add(coord_mob_copy)
self.wait()
self.remove(coord_mob_copy)
coord_mob.set_color(WHITE)
self.add(coord_mob_copy)
sphere_words.add(coord_mob_copy)
self.sphere_words = sphere_words
self.play(
self.sliders.center,
sphere_words.shift, LEFT,
*list(map(FadeOut, [
self.coordinate_mobs, self.box_vertices_title
]))
)
self.initialize_ambiant_slider_movement()
self.wait(4)
def show_closest_point(self):
target_vector = 0.5*np.ones(4)
re_line = DashedLine(*[
self.sliders[i].number_to_point(0.5)+MED_SMALL_BUFF*vect
for i, vect in [(0, LEFT), (-1, RIGHT)]
])
half_label = TexMobject("0.5")
half_label.scale(self.sliders[0].number_scale_val)
half_label.next_to(re_line, LEFT, MED_SMALL_BUFF)
half_label.set_color(MAROON_B)
self.wind_down_ambient_movement()
self.play(*[
ApplyMethod(slider.set_value, x)
for x, slider in zip(target_vector, self.sliders)
])
self.play(ShowCreation(re_line))
self.play(Write(half_label))
self.wait(2)
self.re_line = re_line
self.half_label = half_label
def show_real_estate_at_closest_point(self):
words = TextMobject("Total real estate:")
value = TexMobject("4(0.5)^2 = 4(0.25) = 1")
value.next_to(words, DOWN)
re_words = VGroup(words, value)
re_words.scale(0.8)
re_words.next_to(self.sphere_words, DOWN, MED_LARGE_BUFF)
re_rects = VGroup()
for slider in self.sliders:
rect = Rectangle(
width = 2*slider.tick_size,
height = 0.5*slider.unit_size,
stroke_width = 0,
fill_color = MAROON_B,
fill_opacity = 0.75,
)
rect.move_to(slider.number_to_point(0.75))
re_rects.add(rect)
self.play(FadeIn(re_words))
self.play(LaggedStartMap(DrawBorderThenFill, re_rects, run_time = 3))
self.wait(2)
self.re_words = re_words
self.re_rects = re_rects
def reframe_as_inner_sphere_point(self):
sphere_words = self.sphere_words
sphere_words.generate_target()
sphere_words.target.shift(2*DOWN)
old_coords = sphere_words.target[1]
new_coords = TexMobject("(0, 0, 0, 0)")
new_coords.replace(old_coords, dim_to_match = 1)
new_coords.set_color(old_coords.get_color())
Transform(old_coords, new_coords).update(1)
self.play(Animation(self.sliders), *[
ApplyMethod(
s.real_estate_ticks.move_to, s.number_to_point(0),
run_time = 2,
rate_func = squish_rate_func(smooth, a, a+0.5)
)
for s, a in zip(self.sliders, np.linspace(0, 0.5, 4))
])
self.play(
MoveToTarget(sphere_words),
self.re_words.next_to, sphere_words.target, UP, MED_LARGE_BUFF,
path_arc = np.pi
)
self.wait(2)
re_shift_vect = 0.5*self.sliders[0].unit_size*DOWN
self.play(LaggedStartMap(
ApplyMethod, self.re_rects,
lambda m : (m.shift, re_shift_vect),
path_arc = np.pi
))
self.wait()
re_words_rect = SurroundingRectangle(self.re_words)
self.play(ShowCreation(re_words_rect))
self.wait()
self.play(FadeOut(re_words_rect))
self.wait()
self.set_center_point(np.zeros(4))
self.initialize_ambiant_slider_movement()
self.wait(4)
def compute_inner_radius_numerically(self):
computation = TexMobject(
"R_\\text{Inner}",
"&= ||(1, 1, 1, 1)|| - 1 \\\\",
# "&= \\sqrt{1^2 + 1^2 + 1^2 + 1^2} - 1 \\\\",
"&= \\sqrt{4} - 1 \\\\",
"&= 1"
)
computation.scale(0.8)
computation.to_corner(UP+LEFT)
computation.shift(DOWN)
brace = Brace(VGroup(*computation[1][1:-2]), UP)
brace_text = brace.get_text("Distance to corner")
brace_text.scale(0.8, about_point = brace_text.get_bottom())
VGroup(brace, brace_text).set_color(RED)
self.play(LaggedStartMap(FadeIn, computation, run_time = 3))
self.play(GrowFromCenter(brace))
self.play(Write(brace_text, run_time = 2))
self.wait(16)
computation.add(brace, brace_text)
self.computation = computation
def inner_sphere_touches_box(self):
touching_words = TextMobject(
"This point touches\\\\",
"the $2 \\!\\times\\! 2 \\!\\times\\! 2 \\!\\times\\! 2$ box!"
)
touching_words.to_corner(UP+LEFT)
arrow = Arrow(MED_SMALL_BUFF*DOWN, 3*RIGHT+DOWN)
arrow.set_color(BLUE)
arrow.shift(touching_words.get_bottom())
self.wind_down_ambient_movement(wait = False)
self.play(FadeOut(self.computation))
self.reset_dials([1])
self.play(Write(touching_words))
self.play(ShowCreation(arrow))
self.wait(2)
class TwoDInnerSphereTouchingBox(TwoDBoxWithSliders, PiCreatureScene):
def setup(self):
TwoDBoxWithSliders.setup(self)
PiCreatureScene.setup(self)
self.remove(self.sliders)
self.remove(self.dot)
self.circle.set_color(GREY)
self.randy.next_to(self.plane, RIGHT, LARGE_BUFF, DOWN)
def construct(self):
little_inner_circle, big_inner_circle = [
Circle(
radius = radius*self.plane.x_unit_size,
color = GREEN
).move_to(self.plane.coords_to_point(0, 0))
for radius in (np.sqrt(2)-1, 1)
]
randy = self.randy
tangency_points = VGroup(*[
Dot(self.plane.coords_to_point(x, y))
for x, y in [(1, 0), (0, 1), (-1, 0), (0, -1)]
])
tangency_points.set_fill(YELLOW, 0.5)
self.play(
ShowCreation(little_inner_circle),
randy.change, "pondering", little_inner_circle
)
self.wait()
self.play(
ReplacementTransform(
little_inner_circle.copy(), big_inner_circle
),
little_inner_circle.fade,
randy.change, "confused"
)
big_inner_circle.save_state()
self.play(big_inner_circle.move_to, self.circle)
self.play(big_inner_circle.restore)
self.wait()
self.play(LaggedStartMap(
DrawBorderThenFill, tangency_points,
rate_func = double_smooth
))
self.play(randy.change, "maybe")
self.play(randy.look_at, self.circle)
self.wait()
self.play(randy.look_at, little_inner_circle)
self.wait()
####
def create_pi_creature(self):
self.randy = Randolph().flip()
return self.randy
class FiveDBoxExampleWithSliders(FourDBoxExampleWithSliders):
CONFIG = {
"n_sliders" : 5,
"center_point" : np.ones(5),
}
def setup(self):
FourDBoxExampleWithSliders.setup(self)
self.sliders.center()
def construct(self):
self.show_32_corner_spheres()
self.show_closest_point()
self.show_halfway_point()
self.reframe_as_inner_sphere_point()
self.compute_radius()
self.poke_out_of_box()
def show_32_corner_spheres(self):
sphere_words = VGroup(TextMobject("Sphere centered at"))
sphere_words.next_to(self.sliders, RIGHT, MED_LARGE_BUFF)
sphere_words.shift(2.5*UP)
self.add(sphere_words)
n_sphere_words = TextMobject("32 corner spheres")
n_sphere_words.to_edge(LEFT)
n_sphere_words.shift(2*UP)
self.add(n_sphere_words)
for coords in it.product(*5*[[-1, 1]]):
s = str(tuple(coords))
s = s.replace("1", "+1")
s = s.replace("-+1", "-1")
coords_mob = TexMobject(s)
coords_mob.set_color(GREEN)
coords_mob.next_to(sphere_words, DOWN)
for slider, x in zip(self.sliders, coords):
for mob in slider.real_estate_ticks, slider.dial:
mob.move_to(slider.number_to_point(x))
self.sliders[0].dial.move_to(
self.sliders[0].number_to_point(coords[0]+1)
)
self.add(coords_mob)
self.wait(0.25)
self.remove(coords_mob)
self.add(coords_mob)
sphere_words.add(coords_mob)
self.sphere_words = sphere_words
self.initialize_ambiant_slider_movement()
self.play(FadeOut(n_sphere_words))
self.wait(3)
def show_closest_point(self):
target_x = 1-np.sqrt(0.2)
re_line = DashedLine(*[
self.sliders[i].number_to_point(target_x)+MED_SMALL_BUFF*vect
for i, vect in [(0, LEFT), (-1, RIGHT)]
])
re_words = TextMobject(
"$0.2$", "units of real \\\\ estate each"
)
re_words.next_to(self.sphere_words, DOWN, MED_LARGE_BUFF)
re_rects = VGroup()
for slider in self.sliders:
rect = Rectangle(
width = 2*slider.tick_size,
height = (1-target_x)*slider.unit_size,
stroke_width = 0,
fill_color = GREEN,
fill_opacity = 0.75,
)
rect.move_to(slider.number_to_point(1), UP)
re_rects.add(rect)
self.wind_down_ambient_movement()
self.reset_dials(5*[target_x])
self.play(
ShowCreation(re_line),
Write(re_words, run_time = 2)
)
self.play(LaggedStartMap(
DrawBorderThenFill, re_rects,
rate_func = double_smooth
))
self.wait()
self.re_rects = re_rects
self.re_words = re_words
self.re_line = re_line
def show_halfway_point(self):
half_line = Line(*[
self.sliders[i].number_to_point(0.5)+MED_SMALL_BUFF*vect
for i, vect in [(0, LEFT), (-1, RIGHT)]
])
half_line.set_color(MAROON_B)
half_label = TexMobject("0.5")
half_label.scale(self.sliders[0].number_scale_val)
half_label.next_to(half_line, LEFT, MED_SMALL_BUFF)
half_label.set_color(half_line.get_color())
curr_vector = self.get_vector()
ghost_dials = VGroup(*[
slider.dial.copy().set_fill(WHITE, 0.75)
for slider in self.sliders
])
point_25 = TexMobject("0.25")
point_25.set_color(half_label.get_color())
point_25.move_to(self.re_words[0], RIGHT)
self.re_words.save_state()
self.play(
Write(half_label),
ShowCreation(half_line)
)
self.wait(2)
self.add(ghost_dials)
self.play(*[
ApplyMethod(slider.set_value, 0.5)
for slider in self.sliders
])
self.play(Transform(self.re_words[0], point_25))
self.wait(2)
self.play(*[
ApplyMethod(slider.set_value, x)
for x, slider in zip(curr_vector, self.sliders)
])
self.play(self.re_words.restore)
def reframe_as_inner_sphere_point(self):
s = self.sliders[0]
shift_vect = s.number_to_point(0)-s.number_to_point(1)
re_ticks = VGroup(*[
slider.real_estate_ticks
for slider in self.sliders
])
re_rects = self.re_rects
re_rects.generate_target()
for rect, slider in zip(re_rects.target, self.sliders):
height = slider.unit_size*(1-np.sqrt(0.2))
rect.set_height(height)
rect.move_to(slider.number_to_point(0), DOWN)
self.sphere_words.generate_target()
old_coords = self.sphere_words.target[1]
new_coords = TexMobject(str(tuple(5*[0])))
new_coords.replace(old_coords, dim_to_match = 1)
new_coords.set_color(old_coords.get_color())
Transform(old_coords, new_coords).update(1)
self.re_words.generate_target()
new_re = TexMobject("0.31")
new_re.set_color(GREEN)
old_re = self.re_words.target[0]
new_re.move_to(old_re, RIGHT)
Transform(old_re, new_re).update(1)
self.play(
Animation(self.sliders),
LaggedStartMap(
ApplyMethod, re_ticks,
lambda m : (m.shift, shift_vect),
path_arc = np.pi
),
MoveToTarget(self.sphere_words),
)
self.play(
MoveToTarget(
re_rects,
run_time = 2,
lag_ratio = 0.5,
path_arc = np.pi
),
MoveToTarget(self.re_words),
)
self.wait(2)
self.set_center_point(np.zeros(5))
self.total_real_estate = (np.sqrt(5)-1)**2
self.initialize_ambiant_slider_movement()
self.wait(12)
def compute_radius(self):
computation = TexMobject(
"R_{\\text{inner}} &= \\sqrt{5}-1 \\\\",
"&\\approx 1.24"
)
computation.to_corner(UP+LEFT)
self.play(Write(computation, run_time = 2))
self.wait(12)
def poke_out_of_box(self):
self.wind_down_ambient_movement(0)
self.reset_dials([np.sqrt(5)-1])
words = TextMobject("Poking outside \\\\ the box!")
words.to_edge(LEFT)
words.set_color(RED)
arrow = Arrow(
words.get_top(),
self.sliders[0].dial,
path_arc = -np.pi/3,
color = words.get_color()
)
self.play(
ShowCreation(arrow),
Write(words)
)
self.wait(2)
class SkipAheadTo10(TeacherStudentsScene):
def construct(self):
self.teacher_says(
"Let's skip ahead \\\\ to 10 dimensions",
target_mode = "hooray"
)
self.change_student_modes(
"pleading", "confused", "horrified"
)
self.wait(3)
class TenDBoxExampleWithSliders(FiveDBoxExampleWithSliders):
CONFIG = {
"n_sliders" : 10,
"center_point" : np.ones(10),
"ambient_velocity_magnitude" : 2.0,
"ambient_acceleration_magnitude" : 3.0,
}
def setup(self):
FourDBoxExampleWithSliders.setup(self)
self.sliders.to_edge(RIGHT)
def construct(self):
self.initial_wandering()
self.show_closest_point()
self.reframe_as_inner_sphere_point()
self.compute_inner_radius_numerically()
self.wander_on_inner_sphere()
self.poke_outside_outer_box()
def initial_wandering(self):
self.initialize_ambiant_slider_movement()
self.wait(9)
def show_closest_point(self):
target_x = 1-np.sqrt(1./self.n_sliders)
re_line = DashedLine(*[
self.sliders[i].number_to_point(target_x)+MED_SMALL_BUFF*vect
for i, vect in [(0, LEFT), (-1, RIGHT)]
])
re_rects = VGroup()
for slider in self.sliders:
rect = Rectangle(
width = 2*slider.tick_size,
height = (1-target_x)*slider.unit_size,
stroke_width = 0,
fill_color = GREEN,
fill_opacity = 0.75,
)
rect.move_to(slider.number_to_point(1), UP)
re_rects.add(rect)
self.wind_down_ambient_movement()
self.reset_dials(self.n_sliders*[target_x])
self.play(ShowCreation(re_line))
self.play(LaggedStartMap(
DrawBorderThenFill, re_rects,
rate_func = double_smooth
))
self.wait(2)
self.re_line = re_line
self.re_rects = re_rects
def reframe_as_inner_sphere_point(self):
s = self.sliders[0]
shift_vect = s.number_to_point(0)-s.number_to_point(1)
re_ticks = VGroup(*[
slider.real_estate_ticks
for slider in self.sliders
])
re_rects = self.re_rects
re_rects.generate_target()
for rect, slider in zip(re_rects.target, self.sliders):
height = slider.unit_size*(1-np.sqrt(1./self.n_sliders))
rect.stretch_to_fit_height(height)
rect.move_to(slider.number_to_point(0), DOWN)
self.play(
Animation(self.sliders),
LaggedStartMap(
ApplyMethod, re_ticks,
lambda m : (m.shift, shift_vect),
path_arc = np.pi
),
)
self.play(
MoveToTarget(
re_rects,
run_time = 2,
lag_ratio = 0.5,
path_arc = np.pi
),
)
self.wait(2)
self.set_center_point(np.zeros(self.n_sliders))
self.total_real_estate = (np.sqrt(self.n_sliders)-1)**2
self.initialize_ambiant_slider_movement()
self.wait(5)
def compute_inner_radius_numerically(self):
computation = TexMobject(
"R_{\\text{inner}} &= \\sqrt{10}-1 \\\\",
"&\\approx 2.16"
)
computation.to_corner(UP+LEFT)
self.play(Write(computation, run_time = 2))
def wander_on_inner_sphere(self):
self.wait(10)
def poke_outside_outer_box(self):
self.wind_down_ambient_movement()
self.reset_dials([np.sqrt(10)-1])
words = TextMobject(
"Outside the \\emph{outer} \\\\",
"bounding box!"
)
words.to_edge(LEFT)
words.set_color(RED)
arrow = Arrow(
words.get_top(),
self.sliders[0].dial,
path_arc = -np.pi/3,
color = words.get_color()
)
self.play(
Write(words, run_time = 2),
ShowCreation(arrow)
)
self.wait(3)
class TwoDOuterBox(TwoDInnerSphereTouchingBox):
def construct(self):
words = TextMobject("$4 \\!\\times\\! 4$ outer bounding box")
words.next_to(self.plane, UP)
words.set_color(MAROON_B)
line = Line(
self.plane.coords_to_point(-2, -2),
self.plane.coords_to_point(2, 2),
)
box = Square(color = words.get_color())
box.replace(line, stretch = True)
box.set_stroke(width = 8)
self.play(
Write(words),
ShowCreation(box),
self.randy.change, "pondering",
)
self.wait(3)
self.outer_box = box
class ThreeDOuterBoundingBox(ExternallyAnimatedScene):
pass
class ThreeDOuterBoundingBoxWords(Scene):
def construct(self):
words = TextMobject(
"$4 \\!\\times\\! 4\\!\\times\\! 4$ outer\\\\",
"bounding box"
)
words.set_width(FRAME_WIDTH-1)
words.to_edge(DOWN)
words.set_color(MAROON_B)
self.play(Write(words))
self.wait(4)
class FaceDistanceDoesntDependOnDimension(TwoDOuterBox):
def construct(self):
self.force_skipping()
TwoDOuterBox.construct(self)
self.randy.change("confused")
self.revert_to_original_skipping_status()
line = Line(
self.plane.coords_to_point(0, 0),
self.outer_box.get_right(),
buff = 0,
stroke_width = 6,
color = YELLOW
)
length_words = TextMobject("Always 2, in all dimensions")
length_words.next_to(self.plane, RIGHT, MED_LARGE_BUFF, UP)
arrow = Arrow(length_words[4].get_bottom(), line.get_center())
self.play(ShowCreation(line))
self.play(
Write(length_words),
ShowCreation(arrow)
)
self.play(self.randy.change, "thinking")
self.wait(3)
class TenDCornerIsVeryFarAway(TenDBoxExampleWithSliders):
CONFIG = {
"center_point" : np.zeros(10)
}
def construct(self):
self.show_re_rects()
def show_re_rects(self):
re_rects = VGroup()
for slider in self.sliders:
rect = Rectangle(
width = 2*slider.tick_size,
height = slider.unit_size,
stroke_width = 0,
fill_color = GREEN,
fill_opacity = 0.75,
)
rect.move_to(slider.number_to_point(0), DOWN)
re_rects.add(rect)
rect.save_state()
rect.stretch_to_fit_height(0)
rect.move_to(rect.saved_state, DOWN)
self.set_to_vector(np.zeros(10))
self.play(
LaggedStartMap(
ApplyMethod, re_rects,
lambda m : (m.restore,),
lag_ratio = 0.3,
),
LaggedStartMap(
ApplyMethod, self.sliders,
lambda m : (m.set_value, 1),
lag_ratio = 0.3,
),
run_time = 10,
)
self.wait()
class InnerRadiusIsUnbounded(TeacherStudentsScene):
def construct(self):
self.teacher_says("Inner radius \\\\ is unbounded")
self.change_student_modes(*["erm"]*3)
self.wait(3)
class ProportionOfSphereInBox(GraphScene):
CONFIG = {
"x_axis_label" : "Dimension",
"y_axis_label" : "",
"y_max" : 1.5,
"y_min" : 0,
"y_tick_frequency" : 0.25,
"y_labeled_nums" : np.linspace(0.25, 1, 4),
"x_min" : 0,
"x_max" : 50,
"x_tick_frequency" : 5,
"x_labeled_nums" : list(range(10, 50, 10)),
"num_graph_anchor_points" : 100,
}
def construct(self):
self.setup_axes()
title = TextMobject(
"Proportion of inner sphere \\\\ inside box"
)
title.next_to(self.y_axis, RIGHT, MED_SMALL_BUFF, UP)
self.add(title)
graph = self.get_graph(lambda x : np.exp(0.1*(9-x)))
max_y = self.coords_to_point(0, 1)[1]
too_high = graph.points[:,1] > max_y
graph.points[too_high, 1] = max_y
footnote = TextMobject("""
\\begin{flushleft}
*I may or may not have used an easy-to-compute \\\\
but not-totally-accurate curve here, due to \\\\
the surprising difficulty in computing the real \\\\
proportion :)
\\end{flushleft}
""",)
footnote.scale(0.75)
footnote.next_to(
graph.point_from_proportion(0.3),
UP+RIGHT, SMALL_BUFF
)
footnote.set_color(YELLOW)
self.play(ShowCreation(graph, run_time = 5, rate_func=linear))
self.wait()
self.add(footnote)
self.wait(0.25)
class ShowingToFriend(PiCreatureScene, SliderScene):
CONFIG = {
"n_sliders" : 10,
"ambient_acceleration_magnitude" : 3.0,
"seconds_to_blink" : 4,
}
def setup(self):
PiCreatureScene.setup(self)
SliderScene.setup(self)
self.sliders.scale(0.75)
self.sliders.next_to(
self.morty.get_corner(UP+LEFT), UP, MED_LARGE_BUFF
)
self.initialize_ambiant_slider_movement()
def construct(self):
morty, randy = self.morty, self.randy
self.play(morty.change, "raise_right_hand", self.sliders)
self.play(randy.change, "happy", self.sliders)
self.wait(7)
self.play(randy.change, "skeptical", morty.eyes)
self.wait(3)
self.play(randy.change, "thinking", self.sliders)
self.wait(6)
###
def create_pi_creatures(self):
self.morty = Mortimer()
self.morty.to_edge(DOWN).shift(4*RIGHT)
self.randy = Randolph()
self.randy.to_edge(DOWN).shift(4*LEFT)
return VGroup(self.morty, self.randy)
def non_blink_wait(self, time = 1):
SliderScene.wait(self, time)
class QuestionsFromStudents(TeacherStudentsScene):
def construct(self):
self.student_says(
"Is 10-dimensional \\\\ space real?",
target_mode = "sassy",
run_time = 2,
)
self.wait()
self.teacher_says(
"No less real \\\\ than reals",
target_mode = "shruggie",
content_introduction_class = FadeIn,
)
self.wait(2)
self.student_says(
"How do you think \\\\ about volume?",
student_index = 0,
content_introduction_class = FadeIn,
)
self.wait()
self.student_says(
"How do cubes work?",
student_index = 2,
run_time = 2,
)
self.wait(2)
class FunHighDSpherePhenomena(Scene):
def construct(self):
title = TextMobject(
"Fun high-D sphere phenomena"
)
title.to_edge(UP)
title.set_color(BLUE)
h_line = Line(LEFT, RIGHT).scale(5)
h_line.next_to(title, DOWN)
self.add(title, h_line)
items = VGroup(*list(map(TextMobject, [
"$\\cdot$ Most volume is near the equator",
"$\\cdot$ Most volume is near the surface",
"$\\cdot$ Sphere packing in 8 dimensions",
"$\\cdot$ Sphere packing in 24 dimensions",
])))
items.arrange(
DOWN, buff = MED_LARGE_BUFF, aligned_edge = LEFT
)
items.next_to(h_line, DOWN)
for item in items:
self.play(LaggedStartMap(FadeIn, item, run_time = 2))
self.wait()
class TODOBugOnSurface(TODOStub):
CONFIG = {
"message" : "Bug on surface"
}
class CoordinateFree(PiCreatureScene):
def construct(self):
plane = NumberPlane(x_radius = 2.5, y_radius = 2.5)
plane.add_coordinates()
plane.to_corner(UP+LEFT)
self.add(plane)
circles = VGroup(*[
Circle(color = YELLOW).move_to(
plane.coords_to_point(*coords)
)
for coords in it.product(*2*[[-1, 1]])
])
inner_circle = Circle(
radius = np.sqrt(2)-1,
color = GREEN
).move_to(plane.coords_to_point(0, 0))
self.add_foreground_mobjects(circles, inner_circle)
self.play(PiCreatureSays(
self.pi_creature, "Lose the \\\\ coordinates!",
target_mode = "hooray"
))
self.play(FadeOut(plane, run_time = 2))
self.wait(3)
class Skeptic(TeacherStudentsScene, SliderScene):
def setup(self):
SliderScene.setup(self)
TeacherStudentsScene.setup(self)
self.sliders.scale(0.7)
self.sliders.next_to(self.teacher, UP, aligned_edge = LEFT)
self.sliders.to_edge(UP)
self.initialize_ambiant_slider_movement()
def construct(self):
analytic_thought = VGroup(TextMobject("No different from"))
equation = TexMobject(
"x", "^2 + ", "y", "^2 + ", "z", "^2 + ", "w", "^2 = 1"
)
variables = VGroup(*[
equation.get_part_by_tex(tex)
for tex in "xyzw"
])
slider_labels = VGroup(*[
slider.label for slider in self.sliders
])
equation.next_to(analytic_thought, DOWN)
analytic_thought.add(equation)
all_real_estate_ticks = VGroup(*it.chain(*[
slider.real_estate_ticks
for slider in self.sliders
]))
box = Square(color = RED)
box.next_to(self.sliders, LEFT)
line = Line(box.get_center(), box.get_corner(UP+RIGHT))
line.set_color(YELLOW)
self.student_says(
analytic_thought,
student_index = 0,
target_mode = "sassy",
added_anims = [self.teacher.change, "guilty"]
)
self.wait(2)
equation.remove(*variables)
self.play(ReplacementTransform(variables, slider_labels))
self.play(
self.teacher.change, "pondering", slider_labels,
RemovePiCreatureBubble(
self.students[0], target_mode = "hesitant"
),
)
self.wait(4)
bubble = self.teacher.get_bubble(
"It's much \\\\ more playful!",
bubble_class = SpeechBubble
)
bubble.resize_to_content()
VGroup(bubble, bubble.content).next_to(self.teacher, UP+LEFT)
self.play(
self.teacher.change, "hooray",
ShowCreation(bubble),
Write(bubble.content)
)
self.wait(3)
self.play(
RemovePiCreatureBubble(
self.teacher, target_mode = "raise_right_hand",
look_at_arg = self.sliders
),
*[
ApplyMethod(pi.change, "pondering")
for pi in self.students
]
)
self.play(Animation(self.sliders), LaggedStartMap(
ApplyMethod, all_real_estate_ticks,
lambda m : (m.shift, SMALL_BUFF*LEFT),
rate_func = wiggle,
lag_ratio = 0.3,
run_time = 4,
))
self.play(
ShowCreation(box),
self.teacher.change, "happy"
)
self.play(ShowCreation(line))
self.wait(3)
#####
def non_blink_wait(self, time = 1):
SliderScene.wait(self, time)
class ClipFrom4DBoxExampleTODO(TODOStub):
CONFIG = {
"message" : "Clip from 4d box example"
}
class JustBecauseYouCantVisualize(Scene):
def construct(self):
phrase = "\\raggedright "
phrase += "Just because you can't visualize\\\\ "
phrase += "something doesn't mean you can't\\\\ "
phrase += "still think about it visually."
phrase_mob = TextMobject(*phrase.split(" "))
phrase_mob.set_color_by_tex("visual", YELLOW)
phrase_mob.next_to(ORIGIN, UP)
for part in phrase_mob:
self.play(LaggedStartMap(
FadeIn, part,
run_time = 0.05*len(part)
))
self.wait(2)
class Announcements(TeacherStudentsScene):
def construct(self):
title = TextMobject("Announcements")
title.scale(1.5)
title.to_edge(UP, buff = MED_SMALL_BUFF)
h_line = Line(LEFT, RIGHT).scale(3)
h_line.next_to(title, DOWN)
self.add(title, h_line)
items = VGroup(*list(map(TextMobject, [
"$\\cdot$ Where to learn more",
"$\\cdot$ Q\\&A Followup (podcast!)",
])))
items.arrange(DOWN, aligned_edge = LEFT)
items.next_to(h_line, DOWN)
self.play(
Write(items[0], run_time = 2),
)
self.play(*[
ApplyMethod(pi.change, "hooray", items)
for pi in self.pi_creatures
])
self.play(Write(items[1], run_time = 2))
self.wait(2)
class Promotion(PiCreatureScene):
CONFIG = {
"seconds_to_blink" : 5,
}
def construct(self):
url = TextMobject("https://brilliant.org/3b1b/")
url.to_corner(UP+LEFT)
rect = Rectangle(height = 9, width = 16)
rect.set_height(5.5)
rect.next_to(url, DOWN)
rect.to_edge(LEFT)
self.play(
Write(url),
self.pi_creature.change, "raise_right_hand"
)
self.play(ShowCreation(rect))
self.wait(2)
self.change_mode("thinking")
self.wait()
self.look_at(url)
self.wait(10)
self.change_mode("happy")
self.wait(10)
self.change_mode("raise_right_hand")
self.wait(10)
self.remove(rect)
self.play(
url.next_to, self.pi_creature, UP+LEFT
)
url_rect = SurroundingRectangle(url)
self.play(ShowCreation(url_rect))
self.play(FadeOut(url_rect))
self.wait(3)
class BrilliantGeometryQuiz(ExternallyAnimatedScene):
pass
class BrilliantScrollThroughCourses(ExternallyAnimatedScene):
pass
class Podcast(TeacherStudentsScene):
def construct(self):
title = TextMobject("Podcast!")
title.scale(1.5)
title.to_edge(UP)
title.shift(FRAME_X_RADIUS*LEFT/2)
self.add(title)
q_and_a = TextMobject("Q\\&A Followup")
q_and_a.next_to(self.teacher.get_corner(UP+LEFT), UP, LARGE_BUFF)
self.play(
LaggedStartMap(
ApplyMethod, self.pi_creatures,
lambda pi : (pi.change, "hooray", title)
),
Write(title)
)
self.wait(5)
self.play(
Write(q_and_a),
self.teacher.change, "raise_right_hand",
)
self.wait(4)
class HighDPatreonThanks(PatreonThanks):
CONFIG = {
"specific_patrons" : [
"Desmos",
"Burt Humburg",
"CrypticSwarm",
"Juan Benet",
"Ali Yahya",
"William",
"Mayank M. Mehrotra",
"Lukas Biewald",
"Samantha D. Suplee",
"James Park",
"Yana Chernobilsky",
"Kaustuv DeBiswas",
"Kathryn Schmiedicke",
"Yu Jun",
"dave nicponski",
"Damion Kistler",
"Markus Persson",
"Yoni Nazarathy",
"Corey Ogburn",
"Ed Kellett",
"Joseph John Cox",
"Dan Buchoff",
"Luc Ritchie",
"Erik Sundell",
"Xueqi Li",
"David Stork",
"Tianyu Ge",
"Ted Suzman",
"Amir Fayazi",
"Linh Tran",
"Andrew Busey",
"Michael McGuffin",
"John Haley",
"Ankalagon",
"Eric Lavault",
"Tomohiro Furusawa",
"Boris Veselinovich",
"Julian Pulgarin",
"Jeff Linse",
"Cooper Jones",
"Ryan Dahl",
"Mark Govea",
"Robert Teed",
"Jason Hise",
"Meshal Alshammari",
"Bernd Sing",
"Nils Schneider",
"James Thornton",
"Mustafa Mahdi",
"Mathew Bramson",
"Jerry Ling",
"Vecht",
"Shimin Kuang",
"Rish Kundalia",
"Achille Brighton",
"Ripta Pasay",
]
}
class Thumbnail(SliderScene):
CONFIG = {
"n_sliders" : 10,
}
def construct(self):
for slider in self.sliders:
self.remove(slider.label)
slider.remove(slider.label)
vect = np.random.random(10) - 0.5
vect /= get_norm(vect)
self.set_to_vector(vect)
title = TextMobject("10D Sphere?")
title.scale(2)
title.to_edge(UP)
self.add(title)
| 31.817029
| 99
| 0.561275
| 115,176
| 0.997506
| 0
| 0
| 0
| 0
| 0
| 0
| 8,180
| 0.070845
|
02f4fc8fa710340e57d5ba18128bb096623e09a7
| 871
|
py
|
Python
|
start_palpeo.py
|
RealDebian/Palpeo
|
23be184831a3c529cf933277944e7aacda08cdad
|
[
"MIT"
] | null | null | null |
start_palpeo.py
|
RealDebian/Palpeo
|
23be184831a3c529cf933277944e7aacda08cdad
|
[
"MIT"
] | null | null | null |
start_palpeo.py
|
RealDebian/Palpeo
|
23be184831a3c529cf933277944e7aacda08cdad
|
[
"MIT"
] | null | null | null |
from link_extractor import run_enumeration
from colorama import Fore
from utils.headers import HEADERS
from time import sleep
import requests
import database
import re
import json
from bs4 import BeautifulSoup
import colorama
print(Fore.GREEN + '-----------------------------------' + Fore.RESET, Fore.RED)
print('尸闩㇄尸㠪龱 - Website Link Extractor')
print(' by @RealDebian | V0.02')
print(Fore.GREEN + '-----------------------------------' + Fore.RESET)
print()
sleep(1)
print('Example:')
print()
target_host = str(input('Target Site: '))
print('Select the Protocol (http|https)')
sleep(.5)
protocol = str(input('http=0 | https=1: '))
while True:
if protocol == '0':
run_enumeration('http://' + target_host)
break
elif protocol == '1':
run_enumeration('https://' + target_host)
break
else:
print('Wrong option!')
| 24.194444
| 80
| 0.624569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 267
| 0.302378
|
02f5826c6c30c33aa057a91cc4e4070320f7be69
| 4,994
|
py
|
Python
|
tests/test_scores_das_01.py
|
wavestoweather/enstools
|
d0f612b0187b0ad54dfbbb78aa678564f46eaedf
|
[
"Apache-2.0"
] | 5
|
2021-12-16T14:08:00.000Z
|
2022-03-02T14:08:10.000Z
|
tests/test_scores_das_01.py
|
wavestoweather/enstools
|
d0f612b0187b0ad54dfbbb78aa678564f46eaedf
|
[
"Apache-2.0"
] | null | null | null |
tests/test_scores_das_01.py
|
wavestoweather/enstools
|
d0f612b0187b0ad54dfbbb78aa678564f46eaedf
|
[
"Apache-2.0"
] | null | null | null |
import xarray
import numpy
from enstools.scores import DisplacementAmplitudeScore
def test_embed_image():
"""
test of embed_image from match_pyramide_ic
"""
# create test image
test_im = xarray.DataArray(numpy.random.randn(5, 3))
# new array should have shape (8, 4)
result = DisplacementAmplitudeScore.match_pyramid_ic.embed_image(test_im, 4)
numpy.testing.assert_array_equal(numpy.array(result.shape), numpy.array((8, 4)))
# new array should have shape (24, 6)
result = DisplacementAmplitudeScore.match_pyramid_ic.embed_image(test_im, 4, 3, 3)
numpy.testing.assert_array_equal(numpy.array(result.shape), numpy.array((24, 6)))
# input image should be part of result image
numpy.testing.assert_array_equal(test_im, result[:5, :3])
def test_map_backwards():
"""
test of backward mapping from match_pyramide_ic
"""
# create test image
test_im = numpy.zeros((5, 5))
test_im[2, 2] = 1
# create displacement vectors
xdis = numpy.ones((5, 5))
ydis = xdis
# apply mapping
result = DisplacementAmplitudeScore.match_pyramid_ic.map_backward(test_im, xdis, ydis)
expected = numpy.zeros((5, 5))
expected[1, 1] = 1
numpy.testing.assert_array_equal(result, expected)
def test_gauss_kern():
"""
test of gauss_kern from match_pyramide_ic
"""
result = DisplacementAmplitudeScore.match_pyramid_ic.gauss_kern(1, 1)
numpy.testing.assert_equal(result.sum(), 1)
def test_downsize():
"""
test of downsize from match_pyramid
"""
# create test image
test_image = numpy.random.randn(4, 4)
# downsize by factor 2
result = DisplacementAmplitudeScore.match_pyramid_ic.downsize(test_image, 2)
numpy.testing.assert_equal(result[0, 0], test_image[0:2, 0:2].mean())
def test_match_pyramid():
"""
test of match_pyramid from match_pyramid
"""
# create two test images
im1 = numpy.zeros((5, 5))
im1[1:3, 1:3] = 1
im2 = numpy.zeros((5, 5))
im2[2:4, 2:4] = 1
result, xdis, ydis, lse = DisplacementAmplitudeScore.match_pyramid_ic.match_pyramid(im1, im2)
numpy.testing.assert_array_almost_equal(numpy.round(result), im2)
def test_calc_das():
"""
test of pure das calculation calc_das from calc_das.py
"""
# create two test images
obs = numpy.zeros((5, 5))
obs[1:3, 1:3] = 1
fct = numpy.zeros((5, 5))
fct[2:4, 2:4] = 1
# morph fct to obs,obs-space
morph_o, xdis_o, ydis_o, lse_o = DisplacementAmplitudeScore.match_pyramid_ic.match_pyramid(fct, obs)
# morph obs to fct,fct-space
morph_f, xdis_f, ydis_f, lse_f = DisplacementAmplitudeScore.match_pyramid_ic.match_pyramid(obs, fct)
# reproduce expected values
das, dis, amp, rms_obs = DisplacementAmplitudeScore.calc_das.calc_das(obs, fct, xdis_o, ydis_o,
lse_o, xdis_f, ydis_f, lse_f,
dis_max=5, threshold=0.5)
expected = (0.48602544875444409, 0.35238775926722798, 0.1336376894872161, 1.0)
numpy.testing.assert_array_almost_equal((das, dis, amp, rms_obs), expected)
def test_threshold_data():
"""
test of threshold data from calc_das
"""
# create test data
obs = numpy.random.randn(10, 10)
sum_obs = numpy.sum(obs)
# set everything below 1 to zero
filtered = DisplacementAmplitudeScore.calc_das.threshold_data(obs, 1)
for x in range(10):
for y in range(10):
numpy.testing.assert_equal(filtered[x, y] == 0 or filtered[x, y] > 1, True)
# the input array should remain unchanged
numpy.testing.assert_equal(numpy.sum(obs), sum_obs)
def test_das():
"""
test of the actual DAS score
"""
# create test data
obs = numpy.zeros((100, 100))
obs[50:52, 50:52] = 2
fct = numpy.zeros((100, 100))
fct[51:53, 51:53] = 2
# perform calculation
das = DisplacementAmplitudeScore.das(obs, fct)
numpy.testing.assert_array_almost_equal(das["das"], 0.857092469745)
numpy.testing.assert_array_almost_equal(das["dis"], 0.027265825324)
numpy.testing.assert_array_almost_equal(das["amp"], 0.829826644421)
numpy.testing.assert_array_almost_equal(das["rms_obs"], 0.11111111)
# perfect score
das = DisplacementAmplitudeScore.das(obs, obs)
numpy.testing.assert_array_almost_equal(das["das"], 0.0)
numpy.testing.assert_array_almost_equal(das["dis"], 0.0)
numpy.testing.assert_array_almost_equal(das["amp"], 0.0)
# only values below threshold
obs[50:52, 50:52] = 1
fct[51:53, 51:53] = 1
das = DisplacementAmplitudeScore.das(obs, fct, threshold=1)
numpy.testing.assert_array_equal(das["das"], numpy.nan)
numpy.testing.assert_array_equal(das["dis"], numpy.nan)
numpy.testing.assert_array_equal(das["amp"], numpy.nan)
numpy.testing.assert_array_equal(das["rms_obs"], numpy.nan)
| 33.293333
| 119
| 0.665999
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,059
| 0.212054
|
02f6d5351b6d28ac6a5a83e1bce309686a5a07fc
| 833
|
py
|
Python
|
src/backend/backend/shopit/migrations/0024_auto_20201028_2008.py
|
tejpratap545/E-Commerce-Application
|
c1aada5d86f231e5acd6ba4c6c9b88ff4b351f7a
|
[
"MIT"
] | null | null | null |
src/backend/backend/shopit/migrations/0024_auto_20201028_2008.py
|
tejpratap545/E-Commerce-Application
|
c1aada5d86f231e5acd6ba4c6c9b88ff4b351f7a
|
[
"MIT"
] | 7
|
2021-08-13T23:05:47.000Z
|
2022-02-27T10:23:46.000Z
|
src/backend/backend/shopit/migrations/0024_auto_20201028_2008.py
|
tejpratap545/E-Commerce-Application
|
c1aada5d86f231e5acd6ba4c6c9b88ff4b351f7a
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.2 on 2020-10-28 14:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shopit', '0023_availablefilterselectoptions_value'),
]
operations = [
migrations.RemoveField(
model_name='productinfo',
name='is_available',
),
migrations.RemoveField(
model_name='productinfo',
name='stock',
),
migrations.AddField(
model_name='product',
name='popularity',
field=models.SmallIntegerField(blank=True, default=5, null=True),
),
migrations.AddField(
model_name='product',
name='stock',
field=models.PositiveIntegerField(blank=True, default=1, null=True),
),
]
| 26.03125
| 80
| 0.57503
| 740
| 0.888355
| 0
| 0
| 0
| 0
| 0
| 0
| 180
| 0.216086
|
02f729610f64d4759bc9416f6b95eedcf29070ca
| 1,804
|
py
|
Python
|
aoc/event2019/day19/solve.py
|
rjbatista/AoC
|
5c6ca4bcb376c24ec730eb12fd7044f5326ee473
|
[
"MIT"
] | null | null | null |
aoc/event2019/day19/solve.py
|
rjbatista/AoC
|
5c6ca4bcb376c24ec730eb12fd7044f5326ee473
|
[
"MIT"
] | null | null | null |
aoc/event2019/day19/solve.py
|
rjbatista/AoC
|
5c6ca4bcb376c24ec730eb12fd7044f5326ee473
|
[
"MIT"
] | null | null | null |
from event2019.day13.computer_v4 import Computer_v4
########
# PART 1
computer = Computer_v4([])
computer.load_code("event2019/day19/input.txt")
def get_value(x, y):
out = []
computer.reload_code()
computer.run([x, y], out)
return out[0]
def get_area(side = 50):
area = []
min_x, width = 0, 0
for y in range(side):
row_min_x = None
x = min_x
# first '#'
while True:
val = get_value(x, y)
if val != 1:
x += 1
else:
row_min_x = x
min_x = x
break
if x == side:
break
if row_min_x != None:
# last '#'
if width > 0:
x += width - 1
while True:
val = get_value(x, y)
if val == 1:
x += 1
else:
width = x - row_min_x
break
assert x != side
area += [(row_min_x, width)]
return area
answer = sum([width for x, width in get_area() if x != None])
print("Part 1 =", answer)
assert answer == 203 # check with accepted answer
########
# PART 2
def get_top_right_in_beam(square_size = 100):
# skip ahead depending on square size
x, y = (square_size * 5, square_size * 10)
while True:
if get_value(x, y) == 1:
if get_value(x + square_size - 1, y - square_size + 1) == 1: # True implies top_left and bottom_right
return x, y - square_size + 1
y += 1
else:
x += 1
x, y = get_top_right_in_beam()
answer = 10000 * x + y
print("Part 2 =", 10000 * x + y)
assert answer == 8771057 # check with accepted answer
| 21.73494
| 114
| 0.471729
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 233
| 0.129157
|
02f79e3624d623adc544da46b4a6554d6c1bfa3b
| 849
|
py
|
Python
|
fileo/accounts/forms.py
|
Tiqur/Fileo
|
0c663f3bb28985d2d7b4cb475a95b1592cfb2013
|
[
"MIT"
] | null | null | null |
fileo/accounts/forms.py
|
Tiqur/Fileo
|
0c663f3bb28985d2d7b4cb475a95b1592cfb2013
|
[
"MIT"
] | null | null | null |
fileo/accounts/forms.py
|
Tiqur/Fileo
|
0c663f3bb28985d2d7b4cb475a95b1592cfb2013
|
[
"MIT"
] | null | null | null |
from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.forms import UserCreationForm
from .models import FileoUser
User = FileoUser()
class UserLoginForm(forms.ModelForm):
password = forms.CharField(label='Password', widget=forms.PasswordInput)
class Meta:
model = FileoUser
fields = ('email', 'password')
def clean(self):
email = self.cleaned_data['email']
password = self.cleaned_data['password']
if not authenticate(email=email, password=password):
raise forms.ValidationError('Invalid login')
class UserRegisterForm(UserCreationForm):
email = forms.EmailField(max_length=60, help_text='Add a valid email address')
class Meta:
model = FileoUser
fields = ('email', 'username', 'password1', 'password2')
| 29.275862
| 82
| 0.69258
| 661
| 0.778563
| 0
| 0
| 0
| 0
| 0
| 0
| 125
| 0.147232
|
02f7dfdc4c7be780ca3def3290b1d78bbe909246
| 959
|
py
|
Python
|
setup.py
|
jnsgruk/lightkube-models
|
7fce1ed1d00ee599eaa4fad82868ec6b55c84c8d
|
[
"MIT"
] | 1
|
2021-10-14T08:49:10.000Z
|
2021-10-14T08:49:10.000Z
|
setup.py
|
jnsgruk/lightkube-models
|
7fce1ed1d00ee599eaa4fad82868ec6b55c84c8d
|
[
"MIT"
] | 2
|
2021-10-14T18:09:31.000Z
|
2021-10-14T18:09:52.000Z
|
setup.py
|
jnsgruk/lightkube-models
|
7fce1ed1d00ee599eaa4fad82868ec6b55c84c8d
|
[
"MIT"
] | 1
|
2021-10-13T15:08:58.000Z
|
2021-10-13T15:08:58.000Z
|
from setuptools import setup
from pathlib import Path
from lightkube.models import __version__
setup(
name='lightkube-models',
version=__version__,
description='Models and Resources for lightkube module',
long_description=Path("README.md").read_text(),
long_description_content_type="text/markdown",
author='Giuseppe Tribulato',
author_email='gtsystem@gmail.com',
license='Apache Software License',
url='https://github.com/gtsystem/lightkube-models',
packages=['lightkube.models', 'lightkube.resources'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
]
)
| 33.068966
| 60
| 0.667362
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 542
| 0.565172
|
02f8318053016bd127b7feb86e89f4c704276dce
| 465
|
py
|
Python
|
kagi/upper/west/_capital/four.py
|
jedhsu/kagi
|
1301f7fc437bb445118b25ca92324dbd58d6ad2d
|
[
"MIT"
] | null | null | null |
kagi/upper/west/_capital/four.py
|
jedhsu/kagi
|
1301f7fc437bb445118b25ca92324dbd58d6ad2d
|
[
"MIT"
] | null | null | null |
kagi/upper/west/_capital/four.py
|
jedhsu/kagi
|
1301f7fc437bb445118b25ca92324dbd58d6ad2d
|
[
"MIT"
] | null | null | null |
"""
*Upper-West Capital 4* ⠨
The upper-west capital four gi.
"""
from dataclasses import dataclass
from ....._gi import Gi
from ....capital import CapitalGi
from ...._gi import StrismicGi
from ....west import WesternGi
from ...._number import FourGi
from ..._gi import UpperGi
__all__ = ["UpperWestCapital4"]
@dataclass
class UpperWestCapital4(
Gi,
StrismicGi,
UpperGi,
WesternGi,
CapitalGi,
FourGi,
):
symbol = "\u2828"
| 15
| 33
| 0.668817
| 128
| 0.27409
| 0
| 0
| 139
| 0.297645
| 0
| 0
| 104
| 0.222698
|
02f87c91bee648002483bc9254e7698d4ec9f8f2
| 5,626
|
py
|
Python
|
tests/test_dictattr.py
|
atsuoishimoto/jashin
|
6705839461dd9fdfe50cbc6f93fe9ba2da889f0a
|
[
"MIT"
] | 1
|
2020-06-04T23:44:48.000Z
|
2020-06-04T23:44:48.000Z
|
tests/test_dictattr.py
|
sojin-project/jashin
|
6705839461dd9fdfe50cbc6f93fe9ba2da889f0a
|
[
"MIT"
] | null | null | null |
tests/test_dictattr.py
|
sojin-project/jashin
|
6705839461dd9fdfe50cbc6f93fe9ba2da889f0a
|
[
"MIT"
] | null | null | null |
import enum
from typing import Any, Dict
from jashin.dictattr import *
def test_dictattr() -> None:
class Test:
field1 = ItemAttr[str]()
field2 = ItemAttr[str](name="field_2")
field3 = ItemAttr[str](default="default_field3")
def __dictattr_get__(self) -> Dict[str, Any]:
return self._dict
_dict = {
"field1": "field1_value",
"field_2": "field2_value",
}
d = Test()
assert d.field1 == "field1_value"
assert d.field2 == "field2_value"
assert d.field3 == "default_field3"
def test_loader() -> None:
class Child(DictModel):
field1 = ItemAttr(int)
class Label(enum.Enum):
A = 100
B = 200
class Parent(DictModel):
field1 = ItemAttr(Child)
field2 = ItemAttr(Label)
d = Parent({"field1": {"field1": "100", "field2": "200",}, "field2": 200})
assert isinstance(d.field1, Child)
assert d.field1.field1 == 100
assert d.field2 is Label.B
def test_set() -> None:
class Parent(DictModel):
field1 = ItemAttr[str]()
d = Parent({})
d.field1 = "abc"
assert d.values == {"field1": "abc"}
class Parent2(DictModel):
field1 = ItemAttr[str](dump=int)
d2 = Parent2({})
d2.field1 = "1000"
assert d2.values == {"field1": 1000}
class Child3(DictModel):
field31 = ItemAttr[str]()
class Parent3(DictModel):
field1 = ItemAttr(Child3)
d3 = Parent3({"field1": {"field31": "value"}})
c = Child3({"field31": "value2"})
d3.field1 = c
assert d3.field1.field31 == "value2"
assert d3.values == {"field1": {"field31": "value2"}}
def test_del() -> None:
class Parent(DictModel):
field1 = ItemAttr[str]()
d = Parent({"field1": "abc"})
del d.field1
assert d.values == {}
def test_list_get() -> None:
class Parent1(DictModel):
field1 = SequenceAttr[int]()
d = Parent1({"field1": [1, 2]})
assert len(d.field1) == 2
assert d.field1[0] == 1
assert d.field1[1] == 2
class Child2(DictModel):
field1 = ItemAttr[str]()
class Parent2(DictModel):
field1 = SequenceAttr(Child2)
d2 = Parent2({"field1": [{"field1": "100",}, {"field1": "200",}]})
assert len(d2.field1) == 2
assert d2.field1[0].field1 == "100"
assert d2.field1[1].field1 == "200"
def test_list_set() -> None:
class Parent1(DictModel):
field1 = SequenceAttr[int]()
d = Parent1({"field1": [1, 2]})
d.field1 = [3, 4, 5]
assert len(d.field1) == 3
assert list(d.field1) == [3, 4, 5]
d.field1[0] = 100
assert d.values == {"field1": [100, 4, 5]}
d.field1[:] = [1]
assert d.values == {"field1": [1]}
del d.field1[0]
assert d.values == {"field1": []}
class Parent2(DictModel):
field1 = SequenceAttr[int](dump=lambda v: v * 2)
d2 = Parent2({"field1": [1, 2]})
d2.field1 = [3, 4, 5]
assert len(d2.field1) == 3
assert list(d2.field1) == [6, 8, 10]
d2.field1[1] = 10
assert d2.values == {"field1": [6, 20, 10]}
class Child3(DictModel):
field1 = ItemAttr[str]()
class Parent3(DictModel):
field1 = SequenceAttr(Child3)
d3 = Parent3({"field1": [{"field1": "100",}, {"field1": "200",}]})
seq = [
Child3({"field1": "300"}),
Child3({"field1": "400"}),
Child3({"field1": "500"}),
]
d3.field1 = seq
assert len(d3.field1) == 3
assert d3.field1[0].field1 == "300"
assert d3.field1[1].field1 == "400"
assert d3.field1[2].field1 == "500"
c = Child3({"field1": "600"})
d3.field1[1] = c
assert d3.values["field1"][1]["field1"] == "600"
d3.field1[:] = [c]
assert d3.values == {"field1": [{"field1": "600"}]}
def test_dict_get() -> None:
class Parent1(DictModel):
field1 = MappingAttr[str, int]()
d = Parent1({"field1": {"k1": 1, "k2": 2}})
assert len(d.field1) == 2
assert d.field1["k1"] == 1
assert d.field1["k2"] == 2
class Child2(DictModel):
field1 = ItemAttr[str]()
class Parent2(DictModel):
field1 = MappingAttr[int, Child2](Child2)
d2 = Parent2({"field1": {0: {"field1": "100",}, 1: {"field1": "200",}}})
assert len(d2.field1) == 2
assert d2.field1[0].field1 == "100"
assert d2.field1[1].field1 == "200"
def test_Dict_set() -> None:
class Parent1(DictModel):
field1 = MappingAttr[str, int]()
d = Parent1({"field1": {"k1": 1, "k2": 2}})
d.field1 = {"k3": 3, "k4": 4, "k5": 5}
assert len(d.field1) == 3
assert dict(d.field1) == {"k3": 3, "k4": 4, "k5": 5}
assert d.field1["k3"] == 3
class Parent2(DictModel):
field1 = MappingAttr[str, int](dump=lambda v: v * 2)
d2 = Parent2({"field1": {"k1": 1, "k2": 2}})
d2.field1 = {"k3": 3, "k4": 4, "k5": 5}
assert len(d2.field1) == 3
assert list(d2.field1.values()) == [6, 8, 10]
assert d2.field1["k3"] == 6
class Child3(DictModel):
field1 = ItemAttr[str]()
class Parent3(DictModel):
field1 = MappingAttr[str, Child3](Child3)
d3 = Parent3({"field1": {"a": {"field1": "100",}, "b": {"field1": "200",}}})
v = {
"x": Child3({"field1": "300"}),
"y": Child3({"field1": "400"}),
"z": Child3({"field1": "500"}),
}
d3.field1 = v
assert len(d3.field1) == 3
assert d3.field1["x"].field1 == "300"
assert d3.field1["y"].field1 == "400"
assert d3.field1["z"].field1 == "500"
c = Child3({"field1": "600"})
d3.field1["1"] = c
assert d3.values["field1"]["1"]["field1"] == "600"
| 24.25
| 80
| 0.545325
| 1,763
| 0.313367
| 0
| 0
| 0
| 0
| 0
| 0
| 814
| 0.144685
|
02f8b65e136d03ceacb32c0a454b3d2ad573a0cb
| 191
|
py
|
Python
|
acmicpc/5612.py
|
juseongkr/BOJ
|
8f10a2bf9a7d695455493fbe7423347a8b648416
|
[
"Apache-2.0"
] | 7
|
2020-02-03T10:00:19.000Z
|
2021-11-16T11:03:57.000Z
|
acmicpc/5612.py
|
juseongkr/Algorithm-training
|
8f10a2bf9a7d695455493fbe7423347a8b648416
|
[
"Apache-2.0"
] | 1
|
2021-01-03T06:58:24.000Z
|
2021-01-03T06:58:24.000Z
|
acmicpc/5612.py
|
juseongkr/Algorithm-training
|
8f10a2bf9a7d695455493fbe7423347a8b648416
|
[
"Apache-2.0"
] | 1
|
2020-01-22T14:34:03.000Z
|
2020-01-22T14:34:03.000Z
|
n = int(input())
m = int(input())
r = m
for i in range(n):
a, b = map(int, input().split())
m += a
m -= b
if m < 0:
print(0)
exit()
r = max(r, m)
print(r)
| 14.692308
| 36
| 0.418848
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
02f9422687e1cf10a5083c7345c12d1a45915872
| 66,679
|
py
|
Python
|
tests/function/test_func_partition.py
|
ddimatos/zhmc-ansible-modules
|
6eb29056052f499021a4bab26539872b25050640
|
[
"Apache-2.0"
] | null | null | null |
tests/function/test_func_partition.py
|
ddimatos/zhmc-ansible-modules
|
6eb29056052f499021a4bab26539872b25050640
|
[
"Apache-2.0"
] | null | null | null |
tests/function/test_func_partition.py
|
ddimatos/zhmc-ansible-modules
|
6eb29056052f499021a4bab26539872b25050640
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2017-2020 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Function tests for the 'zhmc_partition' Ansible module.
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
import mock
import re
from zhmcclient import Client
from zhmcclient_mock import FakedSession
from plugins.modules import zhmc_partition
from .func_utils import mock_ansible_module
# FakedSession() init arguments
FAKED_SESSION_KWARGS = dict(
host='fake-host',
hmc_name='faked-hmc-name',
hmc_version='2.13.1',
api_version='1.8'
)
# Faked Console that is used for all tests
# (with property names as specified in HMC data model)
FAKED_CONSOLE_URI = '/api/console'
FAKED_CONSOLE = {
'object-uri': FAKED_CONSOLE_URI,
'class': 'console',
'name': 'hmc-1',
'description': 'Console HMC1',
'version': '2.13.0',
}
# Faked CPC in DPM mode that is used for all tests
# (with property names as specified in HMC data model)
FAKED_CPC_1_OID = 'fake-cpc-1'
FAKED_CPC_1_URI = '/api/cpcs/' + FAKED_CPC_1_OID
FAKED_CPC_1 = {
'object-id': FAKED_CPC_1_OID,
'object-uri': FAKED_CPC_1_URI,
'class': 'cpc',
'name': 'cpc-name-1',
'description': 'CPC #1 in DPM mode',
'status': 'active',
'dpm-enabled': True,
'is-ensemble-member': False,
'iml-mode': 'dpm',
}
# Faked partition that is used for these tests. Most properties are set to
# their default values. Note, we are prepping a faked partition; we are not
# passing these properties to PartitionManager.create().
FAKED_PARTITION_1_NAME = 'part-name-1'
FAKED_PARTITION_1_OID = 'fake-part-1'
FAKED_PARTITION_1_URI = '/api/partitions/' + FAKED_PARTITION_1_OID
FAKED_PARTITION_1 = {
'object-id': FAKED_PARTITION_1_OID,
'object-uri': FAKED_PARTITION_1_URI,
'parent': FAKED_CPC_1_URI,
'class': 'partition',
'name': FAKED_PARTITION_1_NAME,
'description': 'Partition #1',
'short-name': 'PART1',
'partition-id': '4F',
'ifl-processors': 1,
'initial-memory': 1024,
'maximum-memory': 2048,
'status': 'stopped',
'acceptable-status': ['active', 'stopped'],
'has-unacceptable-status': False,
# The remaining properties get their default values:
'is-locked': False,
'type': 'linux',
'autogenerate-partition-id': True,
'os-name': '',
'os-type': '',
'os-version': '',
'reserve-resources': False,
'degraded-adapters': [],
'processor-mode': 'shared',
'cp-processors': 0,
'ifl-absolute-processor-capping': False,
'cp-absolute-processor-capping': False,
'ifl-absolute-processor-capping-value': 1.0,
'cp-absolute-processor-capping-value': 1.0,
'ifl-processing-weight-capped': False,
'cp-processing-weight-capped': False,
'minimum-ifl-processing-weight': 1,
'minimum-cp-processing-weight': 1,
'initial-ifl-processing-weight': 100,
'initial-cp-processing-weight': 100,
'current-ifl-processing-weight': 42,
'current-cp-processing-weight': 100,
'maximum-ifl-processing-weight': 999,
'maximum-cp-processing-weight': 999,
'processor-management-enabled': False,
'reserved-memory': 1024,
'auto-start': False,
'boot-device': 'none',
'boot-network-device': None,
'boot-ftp-host': None,
'boot-ftp-username': None,
'boot-ftp-password': None,
'boot-ftp-insfile': None,
'boot-removable-media': None,
'boot-removable-media-type': None,
'boot-timeout': 60,
'boot-storage-device': None,
'boot-logical-unit-number': '',
'boot-world-wide-port-name': '',
'boot-configuration-selector': 0,
'boot-record-lba': None,
'boot-os-specific-parameters': None,
'boot-iso-image-name': None,
'boot-iso-ins-file': None,
'access-global-performance-data': False,
'permit-cross-partition-commands': False,
'access-basic-counter-set': False,
'access-problem-state-counter-set': False,
'access-crypto-activity-counter-set': False,
'access-extended-counter-set': False,
'access-coprocessor-group-set': False,
'access-basic-sampling': False,
'access-diagnostic-sampling': False,
'permit-des-key-import-functions': True,
'permit-aes-key-import-functions': True,
'threads-per-processor': 0,
'virtual-function-uris': [],
'nic-uris': [],
'hba-uris': [],
'storage-group-uris': [],
'crypto-configuration': None,
# SSC-only properties; they are not present for type='linux'
# 'ssc-host-name': None,
# 'ssc-boot-selection': None,
# 'ssc-ipv4-gateway': None,
# 'ssc-dns-servers': None,
# 'ssc-master-userid': None,
# 'ssc-master-pw': None,
}
# Faked HBA that is used for these tests (for partition boot from storage).
# Most properties are set to their default values.
FAKED_HBA_1_NAME = 'hba-1'
FAKED_HBA_1_OID = 'fake-hba-1'
FAKED_HBA_1_URI = FAKED_PARTITION_1_URI + '/hbas/' + FAKED_HBA_1_OID
FAKED_HBA_1 = {
'element-id': FAKED_HBA_1_OID,
'element-uri': FAKED_HBA_1_URI,
'parent': FAKED_PARTITION_1_URI,
'class': 'hba',
'name': FAKED_HBA_1_NAME,
'description': 'HBA #1',
'device_number': '012F',
'wwpn': 'abcdef0123456789',
'adapter-port-uri': 'faked-adapter-port-uri',
}
# Faked adapter, port and vswitch used for the OSA NIC.
FAKED_ADAPTER_1_NAME = 'osa adapter #1'
FAKED_ADAPTER_1_OID = 'fake-osa-adapter-1'
FAKED_ADAPTER_1_URI = '/api/adapters/' + FAKED_ADAPTER_1_OID
FAKED_ADAPTER_1_ID = '110'
FAKED_PORT_1_INDEX = 0
FAKED_PORT_1_NAME = 'Port #1'
FAKED_PORT_1_OID = 'fake-port-1'
FAKED_PORT_1_URI = '/api/adapters/' + FAKED_ADAPTER_1_OID + '/ports/' + \
FAKED_PORT_1_OID
FAKED_VSWITCH_1_NAME = 'vswitch-1'
FAKED_VSWITCH_1_OID = 'fake-vswitch-1'
FAKED_VSWITCH_1_URI = '/api/virtual-switches/' + FAKED_VSWITCH_1_OID
FAKED_ADAPTER_1 = {
'object-id': FAKED_ADAPTER_1_OID,
'object-uri': FAKED_ADAPTER_1_URI,
'parent': FAKED_CPC_1_URI,
'class': 'adapter',
'name': FAKED_ADAPTER_1_NAME,
'description': 'OSA adapter #1',
'type': 'osd',
'adapter-family': 'osa',
'port-count': 1,
'network-port-uris': [FAKED_PORT_1_URI],
'adapter-id': FAKED_ADAPTER_1_ID,
}
FAKED_PORT_1 = {
'element-id': FAKED_PORT_1_OID,
'element-uri': FAKED_PORT_1_URI,
'parent': FAKED_ADAPTER_1_URI,
'class': 'network-port',
'name': FAKED_PORT_1_NAME,
'description': 'Port #1 of OSA adapter #1',
'index': FAKED_PORT_1_INDEX,
}
FAKED_VSWITCH_1 = {
'object-id': FAKED_VSWITCH_1_OID,
'object-uri': FAKED_VSWITCH_1_URI,
'parent': FAKED_CPC_1_URI,
'class': 'virtual-switch',
'name': FAKED_VSWITCH_1_NAME,
'description': 'vswitch for OSA adapter #1',
'type': 'osd',
'backing-adapter-uri': FAKED_ADAPTER_1_URI,
'port': FAKED_PORT_1_INDEX,
}
# Faked OSA NIC that is used for these tests (for partition boot from storage).
# Most properties are set to their default values.
FAKED_NIC_1_NAME = 'nic-1'
FAKED_NIC_1_OID = 'fake-nic-1'
FAKED_NIC_1_URI = FAKED_PARTITION_1_URI + '/nics/' + FAKED_NIC_1_OID
FAKED_NIC_1 = {
'element-id': FAKED_NIC_1_OID,
'element-uri': FAKED_NIC_1_URI,
'parent': FAKED_PARTITION_1_URI,
'class': 'nic',
'name': FAKED_NIC_1_NAME,
'description': 'NIC #1',
'device_number': '022F',
'virtual-switch-uri': FAKED_VSWITCH_1_URI,
'type': 'osd',
'ssc-management-nic': False,
'mac-address': 'fa:ce:da:dd:6e:55',
}
# Faked crypto adapters
# (with property names as specified in HMC data model)
FAKED_CRYPTO_ADAPTER_1 = {
'object-id': 'crypto-adapter-oid-1',
# We need object-uri for the assertions
'object-uri': '/api/cpcs/cpc-oid-1/adapters/crypto-adapter-oid-1',
'parent': '/api/cpcs/cpc-oid-1',
'class': 'adapter',
'name': 'crypto-adapter-name-1',
'crypto-number': 1,
'crypto-type': 'ep11-coprocessor',
'udx-loaded': True,
'description': 'Crypto adapter #1',
'status': 'active',
'type': 'crypto',
'adapter-id': '02A',
'adapter-family': 'crypto',
'detected-card-type': 'crypto-express-5s',
'card-location': 'vvvv-wwww',
'state': 'online',
'physical-channel-status': 'operating',
}
FAKED_CRYPTO_ADAPTER_2 = {
'object-id': 'crypto-adapter-oid-2',
# We need object-uri for the assertions
'object-uri': '/api/cpcs/cpc-oid-1/adapters/crypto-adapter-oid-2',
'parent': '/api/cpcs/cpc-oid-1',
'class': 'adapter',
'name': 'crypto-adapter-name-2',
'crypto-number': 2,
'crypto-type': 'cca-coprocessor',
'udx-loaded': True,
'description': 'Crypto adapter #2',
'status': 'active',
'type': 'crypto',
'adapter-id': '02B',
'adapter-family': 'crypto',
'detected-card-type': 'crypto-express-5s',
'card-location': 'vvvv-wwww',
'state': 'online',
'physical-channel-status': 'operating',
}
# Translation table from 'state' module input parameter to corresponding
# desired partition 'status' property value. 'None' means the partition
# does not exist.
PARTITION_STATUS_FROM_STATE = {
'absent': None,
'stopped': 'stopped',
'active': 'active',
}
def get_failure_msg(mod_obj):
"""
Return the module failure message, as a string (i.e. the 'msg' argument
of the call to fail_json()).
If the module succeeded, return None.
"""
def func(msg):
return msg
if not mod_obj.fail_json.called:
return None
call_args = mod_obj.fail_json.call_args
# The following makes sure we get the arguments regardless of whether they
# were specified as positional or keyword arguments:
return func(*call_args[0], **call_args[1])
def get_module_output(mod_obj):
"""
Return the module output as a tuple (changed, partition_properties) (i.e.
the arguments of the call to exit_json()).
If the module failed, return None.
"""
def func(changed, partition):
return changed, partition
if not mod_obj.exit_json.called:
return None
call_args = mod_obj.exit_json.call_args
# The following makes sure we get the arguments regardless of whether they
# were specified as positional or keyword arguments:
return func(*call_args[0], **call_args[1])
CRYPTO_CONFIG_SUCCESS_TESTCASES = [
(
"No_change_to_empty_config",
# adapters:
[],
# initial_config:
None,
# input_props:
None,
# exp_config:
None,
# exp_changed:
False
),
(
"Add adapter to empty config",
# adapters:
[
FAKED_CRYPTO_ADAPTER_1,
FAKED_CRYPTO_ADAPTER_2,
],
# initial_config:
None,
# input_props:
dict(
crypto_configuration=dict(
crypto_adapter_names=[
FAKED_CRYPTO_ADAPTER_1['name'],
],
crypto_domain_configurations=[
],
),
),
# exp_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
],
'crypto-domain-configurations': [
],
},
# exp_changed:
True
),
(
"Add domain to empty config",
# adapters:
[
FAKED_CRYPTO_ADAPTER_1,
FAKED_CRYPTO_ADAPTER_2,
],
# initial_config:
None,
# input_props:
dict(
crypto_configuration=dict(
crypto_adapter_names=[
],
crypto_domain_configurations=[
dict(domain_index=3, access_mode='control-usage'),
],
),
),
# exp_config:
{
'crypto-adapter-uris': [
],
'crypto-domain-configurations': [
{'domain-index': 3, 'access-mode': 'control-usage'},
],
},
# exp_changed:
True
),
(
"Add adapter+domain to empty config",
# adapters:
[
FAKED_CRYPTO_ADAPTER_1,
FAKED_CRYPTO_ADAPTER_2,
],
# initial_config:
None,
# input_props:
dict(
crypto_configuration=dict(
crypto_adapter_names=[
FAKED_CRYPTO_ADAPTER_1['name'],
],
crypto_domain_configurations=[
dict(domain_index=3, access_mode='control-usage'),
],
),
),
# exp_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 3, 'access-mode': 'control-usage'},
],
},
# exp_changed:
True
),
(
"Change access mode of domain",
# adapters:
[
FAKED_CRYPTO_ADAPTER_1,
FAKED_CRYPTO_ADAPTER_2,
],
# initial_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 3, 'access-mode': 'control'},
],
},
# input_props:
dict(
crypto_configuration=dict(
crypto_adapter_names=[
FAKED_CRYPTO_ADAPTER_1['name'],
],
crypto_domain_configurations=[
dict(domain_index=3, access_mode='control-usage'),
],
),
),
# exp_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 3, 'access-mode': 'control-usage'},
],
},
# exp_changed:
True
),
(
"No change to adapter+domain",
# adapters:
[
FAKED_CRYPTO_ADAPTER_1,
FAKED_CRYPTO_ADAPTER_2,
],
# initial_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 2, 'access-mode': 'control-usage'},
],
},
# input_props:
dict(
crypto_configuration=dict(
crypto_adapter_names=[
FAKED_CRYPTO_ADAPTER_1['name'],
],
crypto_domain_configurations=[
dict(domain_index=2, access_mode='control-usage'),
],
),
),
# exp_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 2, 'access-mode': 'control-usage'},
],
},
# exp_changed:
False
),
(
"Add adapter to adapter+domain",
# adapters:
[
FAKED_CRYPTO_ADAPTER_1,
FAKED_CRYPTO_ADAPTER_2,
],
# initial_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 2, 'access-mode': 'control-usage'},
],
},
# input_props:
dict(
crypto_configuration=dict(
crypto_adapter_names=[
FAKED_CRYPTO_ADAPTER_1['name'],
FAKED_CRYPTO_ADAPTER_2['name'],
],
crypto_domain_configurations=[
dict(domain_index=2, access_mode='control-usage'),
],
),
),
# exp_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
FAKED_CRYPTO_ADAPTER_2['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 2, 'access-mode': 'control-usage'},
],
},
# exp_changed:
True
),
(
"Add domain to adapter+domain",
# adapters:
[
FAKED_CRYPTO_ADAPTER_1,
FAKED_CRYPTO_ADAPTER_2,
],
# initial_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 2, 'access-mode': 'control-usage'},
],
},
# input_props:
dict(
crypto_configuration=dict(
crypto_adapter_names=[
FAKED_CRYPTO_ADAPTER_1['name'],
],
crypto_domain_configurations=[
dict(domain_index=2, access_mode='control-usage'),
dict(domain_index=3, access_mode='control'),
],
),
),
# exp_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 2, 'access-mode': 'control-usage'},
{'domain-index': 3, 'access-mode': 'control'},
],
},
# exp_changed:
True
),
(
"Add adapter+domain to adapter+domain",
# adapters:
[
FAKED_CRYPTO_ADAPTER_1,
FAKED_CRYPTO_ADAPTER_2,
],
# initial_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 2, 'access-mode': 'control-usage'},
],
},
# input_props:
dict(
crypto_configuration=dict(
crypto_adapter_names=[
FAKED_CRYPTO_ADAPTER_1['name'],
FAKED_CRYPTO_ADAPTER_2['name'],
],
crypto_domain_configurations=[
dict(domain_index=2, access_mode='control-usage'),
dict(domain_index=3, access_mode='control'),
],
),
),
# exp_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
FAKED_CRYPTO_ADAPTER_2['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 2, 'access-mode': 'control-usage'},
{'domain-index': 3, 'access-mode': 'control'},
],
},
# exp_changed:
True
),
(
"Remove adapter+domain from adapter+domain",
# adapters:
[
FAKED_CRYPTO_ADAPTER_1,
FAKED_CRYPTO_ADAPTER_2,
],
# initial_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 2, 'access-mode': 'control-usage'},
],
},
# input_props:
dict(
crypto_configuration=dict(
crypto_adapter_names=[
],
crypto_domain_configurations=[
],
),
),
# exp_config:
{
'crypto-adapter-uris': [
],
'crypto-domain-configurations': [
],
},
# exp_changed:
True
),
(
"Remove adapter+domain from 2 adapters + 2 domains",
# adapters:
[
FAKED_CRYPTO_ADAPTER_1,
FAKED_CRYPTO_ADAPTER_2,
],
# initial_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
FAKED_CRYPTO_ADAPTER_2['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 2, 'access-mode': 'control-usage'},
{'domain-index': 3, 'access-mode': 'control'},
],
},
# input_props:
dict(
crypto_configuration=dict(
crypto_adapter_names=[
FAKED_CRYPTO_ADAPTER_1['name'],
],
crypto_domain_configurations=[
dict(domain_index=2, access_mode='control-usage'),
],
),
),
# exp_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 2, 'access-mode': 'control-usage'},
],
},
# exp_changed:
True
),
(
"Check domain index numbers provided as strings",
# adapters:
[
FAKED_CRYPTO_ADAPTER_1,
FAKED_CRYPTO_ADAPTER_2,
],
# initial_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
FAKED_CRYPTO_ADAPTER_2['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 2, 'access-mode': 'control-usage'},
{'domain-index': 3, 'access-mode': 'control'},
],
},
# input_props:
dict(
crypto_configuration=dict(
crypto_adapter_names=[
FAKED_CRYPTO_ADAPTER_1['name'],
],
crypto_domain_configurations=[
# Here we provide the domain index as a string:
dict(domain_index="2", access_mode='control-usage'),
],
),
),
# exp_config:
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 2, 'access-mode': 'control-usage'},
],
},
# exp_changed:
True
),
]
class TestPartition(object):
"""
All tests for partitions.
"""
def setup_method(self):
"""
Using the zhmcclient mock support, set up a CPC in DPM mode, that has
no partitions.
"""
self.session = FakedSession(**FAKED_SESSION_KWARGS)
self.client = Client(self.session)
self.console = self.session.hmc.consoles.add(FAKED_CONSOLE)
self.faked_cpc = self.session.hmc.cpcs.add(FAKED_CPC_1)
cpcs = self.client.cpcs.list()
assert len(cpcs) == 1
self.cpc = cpcs[0]
self.faked_crypto_adapters = []
self.faked_crypto_adapter_names = []
def setup_partition(self, initial_state, additional_props=None):
"""
Prepare the faked partition, on top of the CPC created by
setup_method().
"""
self.partition_name = FAKED_PARTITION_1_NAME
if initial_state in ('stopped', 'active'):
# Create the partition (it is in stopped state by default)
partition_props = FAKED_PARTITION_1.copy()
if additional_props:
partition_props.update(additional_props)
self.faked_partition = self.faked_cpc.partitions.add(
partition_props)
partitions = self.cpc.partitions.list()
assert len(partitions) == 1
self.partition = partitions[0]
if initial_state == 'active':
self.partition.start()
self.partition.pull_full_properties()
else:
self.faked_partition = None
self.partition = None
def setup_hba(self):
"""
Prepare the faked HBA, on top of the faked partition created by
setup_partition().
"""
self.hba_name = FAKED_HBA_1_NAME
if self.partition:
# Create the HBA
self.faked_hba = self.faked_partition.hbas.add(FAKED_HBA_1)
hbas = self.partition.hbas.list(full_properties=True)
assert len(hbas) == 1
self.hba = hbas[0]
else:
self.faked_hba = None
self.hba = None
def setup_nic(self):
"""
Prepare the faked NIC, on top of the faked partition created by
setup_partition().
"""
self.faked_adapter = self.faked_cpc.adapters.add(FAKED_ADAPTER_1)
self.faked_vswitch = self.faked_cpc.virtual_switches.add(
FAKED_VSWITCH_1)
self.nic_name = FAKED_NIC_1_NAME
if self.partition:
# Create the NIC
self.faked_nic = self.faked_partition.nics.add(FAKED_NIC_1)
nics = self.partition.nics.list(full_properties=True)
assert len(nics) == 1
self.nic = nics[0]
else:
self.faked_nic = None
self.nic = None
def setup_crypto_adapter(self, adapter_props):
"""
Prepare a faked crypto adapter, on top of the faked CPC created by
setup_method().
"""
faked_adapter = self.faked_cpc.adapters.add(adapter_props)
self.faked_crypto_adapters.append(faked_adapter)
self.faked_crypto_adapter_names.append(
faked_adapter.properties['name'])
@pytest.mark.parametrize(
"check_mode", [False, True])
@pytest.mark.parametrize(
"initial_state", ['absent', 'stopped', 'active'])
@pytest.mark.parametrize(
"desired_state", ['absent', 'stopped', 'active'])
@pytest.mark.parametrize(
"properties, props_changed", [
# Note: properties is a dict of property values, with the property
# names as keys (with underscores, as needed for the 'properties'
# Ansible module input parameter). If a dict value is a tuple, its
# first item is the input value, and its second item is the
# expected value. Otherwise, the dict value is both input value and
# expected value.
# Note: The required properties are always added if not specified.
# special cases:
({}, True),
# Note: Property 'crypto_configuration' is tested in separate meth.
# allowed update-only properties:
# TODO: Add a test for boot_network_nic_name (requires NIC):
# ({'boot_network_nic_name': 'fake-nic-name'}, True),
# TODO: Add a test for boot_storage_hba_name (requires HBA):
# ({'boot_storage_hba_name': 'fake-hba-name'}, True),
({'acceptable_status': ['active', 'stopped', 'degraded']}, True),
({'processor_management_enabled': True}, True),
({'ifl_absolute_processor_capping': True}, True),
({'ifl_absolute_processor_capping_value': 0.9}, True),
({'ifl_absolute_processor_capping_value': ("0.9", 0.9)}, True),
({'ifl_processing_weight_capped': True}, True),
({'minimum_ifl_processing_weight': 10}, True),
({'minimum_ifl_processing_weight': ("10", 10)}, True),
({'maximum_ifl_processing_weight': 200}, True),
({'maximum_ifl_processing_weight': ("200", 200)}, True),
({'initial_ifl_processing_weight': 50}, True),
({'initial_ifl_processing_weight': ("50", 50)}, True),
({'cp_absolute_processor_capping': True}, True),
({'cp_absolute_processor_capping_value': 0.9}, True),
({'cp_absolute_processor_capping_value': ("0.9", 0.9)}, True),
({'cp_processing_weight_capped': True}, True),
({'minimum_cp_processing_weight': 10}, True),
({'minimum_cp_processing_weight': ("10", 10)}, True),
({'maximum_cp_processing_weight': 200}, True),
({'maximum_cp_processing_weight': ("200", 200)}, True),
({'initial_cp_processing_weight': 50}, True),
({'initial_cp_processing_weight': ("50", 50)}, True),
({'boot_logical_unit_number': '0123'}, True),
({'boot_world_wide_port_name': '0123456789abcdef'}, True),
({'boot_os_specific_parameters': u'fak\u00E9'}, True),
({'boot_iso_ins_file': u'fak\u00E9'}, True),
({'ssc_boot_selection': 'fake'}, True),
# allowed create+update properties:
({'description': 'fake'}, True),
({'description': u'fak\u00E9'}, True),
({'short_name': 'FAKE'}, True),
({'partition_id': '7F'}, True),
({'autogenerate_partition_id': False}, True),
({'ifl_processors': 1}, True),
({'ifl_processors': 2}, True),
({'ifl_processors': ("3", 3)}, True),
({'cp_processors': 0}, True),
({'cp_processors': 10}, True),
({'cp_processors': ("3", 3)}, True),
({'processor_mode': 'dedicated'}, True),
({'initial_memory': 2048}, True),
({'initial_memory': ("2048", 2048)}, True),
({'maximum_memory': 4096}, True),
({'maximum_memory': ("4096", 4096)}, True),
({'reserve_resources': True}, True),
({'boot_device': 'ftp'}, True),
({'boot_timeout': 120}, True),
({'boot_timeout': ("120", 120)}, True),
({'boot_ftp_host': u'fak\u00E9'}, True),
({'boot_ftp_username': u'fak\u00E9'}, True),
({'boot_ftp_password': u'fak\u00E9'}, True),
({'boot_ftp_insfile': u'fak\u00E9'}, True),
({'boot_removable_media': u'fak\u00E9'}, True),
({'boot_removable_media_type': 'fake'}, True),
({'boot_configuration_selector': 4}, True),
({'boot_configuration_selector': ("4", 4)}, True),
({'boot_record_lba': "12ff"}, True),
({'access_global_performance_data': True}, True),
({'permit_cross_partition_commands': True}, True),
({'access_basic_counter_set': True}, True),
({'access_problem_state_counter_set': True}, True),
({'access_crypto_activity_counter_set': True}, True),
({'access_extended_counter_set': True}, True),
({'access_coprocessor_group_set': True}, True),
({'access_basic_sampling': True}, True),
({'access_diagnostic_sampling': True}, True),
({'permit_des_key_import_functions': False}, True),
({'permit_aes_key_import_functions': False}, True),
({'ssc_host_name': u'fak\u00E9'}, True),
({'ssc_ipv4_gateway': u'fak\u00E9'}, True),
({'ssc_dns_servers': [u'fak\u00E9']}, True),
({'ssc_master_userid': u'fak\u00E9'}, True),
({'ssc_master_pw': u'fak\u00E9'}, True),
])
@mock.patch("plugins.modules.zhmc_partition.AnsibleModule",
autospec=True)
def test_success(
self, ansible_mod_cls, properties, props_changed, desired_state,
initial_state, check_mode):
"""
Tests for successful operations on partition, dependent on
parametrization. The fact gathering is not tested here.
"""
# Prepare the initial partition before the test is run
self.setup_partition(initial_state)
# Set some expectations for this test from its parametrization
exp_status = (PARTITION_STATUS_FROM_STATE[initial_state] if check_mode
else PARTITION_STATUS_FROM_STATE[desired_state])
exp_part_exists = (initial_state != 'absent' if check_mode
else desired_state != 'absent')
exp_part_returned = (desired_state != 'absent' and exp_part_exists)
exp_changed = (initial_state != desired_state or
props_changed and desired_state != 'absent')
input_props = dict()
exp_props = dict()
for prop_name in properties:
hmc_prop_name = prop_name.replace('_', '-')
value = properties[prop_name]
if isinstance(value, tuple):
assert len(value) == 2
input_props[prop_name] = value[0]
exp_props[hmc_prop_name] = value[1]
else:
input_props[prop_name] = value
exp_props[hmc_prop_name] = value
# Set up required input properties:
if 'ifl_processors' not in properties and \
'cp_processors' not in properties:
input_props['ifl_processors'] = 1
exp_props['ifl-processors'] = 1
if 'initial_memory' not in properties:
input_props['initial_memory'] = 512
exp_props['initial-memory'] = 512
if 'maximum_memory' not in properties:
input_props['maximum_memory'] = 512
exp_props['maximum-memory'] = 512
# Prepare module input parameters
params = {
'hmc_host': 'fake-host',
'hmc_auth': dict(userid='fake-userid',
password='fake-password'),
'cpc_name': self.cpc.name,
'name': self.partition_name,
'state': desired_state,
'properties': input_props,
'expand_storage_groups': False,
'expand_crypto_adapters': False,
'log_file': None,
'faked_session': self.session,
}
# Prepare mocks for AnsibleModule object
mod_obj = mock_ansible_module(ansible_mod_cls, params, check_mode)
# Exercise the code to be tested
with pytest.raises(SystemExit) as exc_info:
zhmc_partition.main()
exit_code = exc_info.value.args[0]
# Assert module exit code
assert exit_code == 0, \
"Module unexpectedly failed with this message:\n{0}". \
format(get_failure_msg(mod_obj))
# Assert module output
changed, part_props = get_module_output(mod_obj)
assert changed == exp_changed
if exp_part_returned:
assert part_props != {}
if not check_mode:
assert part_props['status'] == exp_status
assert part_props['name'] == params['name']
if exp_props:
for hmc_prop_name in exp_props:
assert part_props[hmc_prop_name] == \
exp_props[hmc_prop_name], \
"Property: {0}".format(hmc_prop_name)
else:
assert part_props == {}
# Assert the partition resource
if not check_mode:
parts = self.cpc.partitions.list()
if exp_part_exists:
assert len(parts) == 1
part = parts[0]
part.pull_full_properties()
assert part.properties['status'] == exp_status
assert part.properties['name'] == params['name']
if properties:
for hmc_prop_name in exp_props:
assert part.properties[hmc_prop_name] == \
exp_props[hmc_prop_name], \
"Property: {0}".format(hmc_prop_name)
else:
assert len(parts) == 0
@pytest.mark.parametrize(
"check_mode", [False, True])
@pytest.mark.parametrize(
"initial_state", ['stopped', 'active'])
@pytest.mark.parametrize(
"desired_state", ['facts'])
@pytest.mark.parametrize(
"expand_storage_groups", [False, True])
@pytest.mark.parametrize(
"expand_crypto_adapters", [False, True])
@mock.patch("plugins.modules.zhmc_partition.AnsibleModule",
autospec=True)
def test_facts_success(
self, ansible_mod_cls, expand_crypto_adapters,
expand_storage_groups, desired_state, initial_state, check_mode):
"""
Tests for successful fact gathering on partitions, dependent on
parametrization.
"""
# Prepare the initial partition before the test is run
self.setup_partition(initial_state)
self.setup_hba()
self.setup_nic()
# Prepare module input parameters
params = {
'hmc_host': 'fake-host',
'hmc_auth': dict(userid='fake-userid',
password='fake-password'),
'cpc_name': self.cpc.name,
'name': self.partition_name,
'state': desired_state,
'expand_storage_groups': expand_storage_groups,
'expand_crypto_adapters': expand_crypto_adapters,
'log_file': None,
'faked_session': self.session,
}
# Prepare mocks for AnsibleModule object
mod_obj = mock_ansible_module(ansible_mod_cls, params, check_mode)
# Exercise the code to be tested
with pytest.raises(SystemExit) as exc_info:
zhmc_partition.main()
exit_code = exc_info.value.args[0]
# Assert module exit code
assert exit_code == 0, \
"Module unexpectedly failed with this message:\n{0}". \
format(get_failure_msg(mod_obj))
# Assert module output
changed, part_props = get_module_output(mod_obj)
assert changed is False
assert isinstance(part_props, dict)
assert 'nics' in part_props
assert 'hbas' in part_props
assert 'virtual-functions' in part_props
if expand_storage_groups:
assert 'storage-groups' in part_props
else:
assert 'storage-groups' not in part_props
if part_props['crypto-configuration']:
if expand_crypto_adapters:
assert 'crypto-adapters' in part_props['crypto-configuration']
else:
assert 'crypto-adapters' not in \
part_props['crypto-configuration']
for pname in part_props:
pvalue = part_props[pname]
if pname == 'nics':
assert len(pvalue) == 1
nic_props = pvalue[0]
exp_nic_props = dict(self.nic.properties)
exp_nic_props['adapter-name'] = FAKED_ADAPTER_1_NAME
exp_nic_props['adapter-port'] = FAKED_PORT_1_INDEX
exp_nic_props['adapter-id'] = FAKED_ADAPTER_1_ID
assert nic_props == exp_nic_props
elif pname == 'hbas':
assert len(pvalue) == 1
hba_props = pvalue[0]
assert hba_props == self.hba.properties
elif pname == 'virtual-functions':
assert len(pvalue) == 0
elif pname == 'storage-groups':
assert len(pvalue) == 0 # Not set up
else:
if pname == 'crypto-configuration' and pvalue and \
'crypto-adapters' in pvalue:
ca_value = pvalue['crypto-adapters']
assert len(ca_value) == 0 # Not set up
exp_value = self.partition.properties[pname]
assert pvalue == exp_value
@pytest.mark.parametrize(
"check_mode", [False])
@pytest.mark.parametrize(
"initial_state", ['absent', 'stopped', 'active'])
@pytest.mark.parametrize(
"desired_state", ['stopped', 'active'])
@pytest.mark.parametrize(
"properties, test_when_created, test_when_modified", [
# invalid properties (according to data model):
({None: 1}, True, True),
({'': 1}, True, True),
({'boo_invalid_prop': 1}, True, True),
# valid properties specified with hyphens instead of underscores:
({'ifl-processors': 4}, True, True),
# properties provided as module input parameter:
({'name': 'new-name'}, True, True),
# create-only properties (tested only when modified):
({'type': 'ssc'}, False, True),
# properties handled via their artificial properties:
({'boot_network_device': '/api/faked-nic-uri'}, True, True),
({'boot_storage_device': '/api/faked-hba-uri'}, True, True),
# update-only properties (tested only when created):
({'boot_network_nic_name': 'faked-nic-name'}, True, False),
({'boot_storage_hba_name': 'faked-hba-name'}, True, False),
# read-only properties:
({'object_uri': '/api/fake-partition-uri'}, True, True),
({'object_id': 'fake-oid'}, True, True),
({'parent': 'fake-parent'}, True, True),
({'class': 'fake-partition'}, True, True),
({'status': 'new-status'}, True, True),
({'has_unacceptable_status': False}, True, True),
({'is_locked': False}, True, True),
({'os_name': 'MyLinux'}, True, True),
({'os_type': 'Linux'}, True, True),
({'os_version': '3.10'}, True, True),
({'degraded_adapters': ''}, True, True),
({'current_ifl_processing_weight': 50}, True, True),
({'current_cp_processing_weight': 50}, True, True),
({'reserved_memory': 1024}, True, True),
({'auto_start': True}, True, True),
({'boot_iso_image_name': 'fake-iso-image-name'}, True, True),
({'threads_per_processor': 2}, True, True),
({'virtual_function_uris': ['/api/fake-vf-uri']}, True, True),
({'nic_uris': ['/api/fake-nic-uri']}, True, True),
({'hba_uris': ['/api/fake-hba-uri']}, True, True),
])
@mock.patch("plugins.modules.zhmc_partition.AnsibleModule",
autospec=True)
def test_error_properties(
self, ansible_mod_cls, properties, test_when_created,
test_when_modified, desired_state, initial_state, check_mode):
"""
Test a property in the 'properties' module input parameter that is
valid according to the data model, but not allowed for some reason.
The invalidity is detected by the Ansible module, causing a module
failure to be indicated with a "ParameterError" failure message.
"""
# Skip tests for properties that are not to be tested when the
# partition is being created or is being modified.
is_created = (initial_state in ('absent',) and
desired_state in ('stopped', 'active'))
if is_created and not test_when_created:
return
is_modified = (initial_state in ('stopped', 'active') and
desired_state in ('stopped', 'active'))
if is_modified and not test_when_modified:
return
# Prepare the initial partition before the test is run
self.setup_partition(initial_state)
# Set up required input properties:
props = properties.copy()
if 'ifl_processors' not in props and 'cp_processors' not in props:
props['ifl_processors'] = 1
if 'initial_memory' not in props:
props['initial_memory'] = 512
if 'maximum_memory' not in props:
props['maximum_memory'] = 512
# Prepare module input parameters
params = {
'hmc_host': 'fake-host',
'hmc_auth': dict(userid='fake-userid',
password='fake-password'),
'cpc_name': self.cpc.name,
'name': self.partition_name,
'state': desired_state,
'properties': props,
'expand_storage_groups': False,
'expand_crypto_adapters': False,
'log_file': None,
'faked_session': self.session,
}
# Prepare mocks for AnsibleModule object
mod_obj = mock_ansible_module(ansible_mod_cls, params, check_mode)
# Exercise the code to be tested
with pytest.raises(SystemExit) as exc_info:
zhmc_partition.main()
exit_code = exc_info.value.args[0]
# Assert module exit code
assert exit_code == 1, \
"Module unexpectedly succeeded with this output:\n" \
"changed: {!r}, partition: {!r}". \
format(*get_module_output(mod_obj))
# Assert the failure message
msg = get_failure_msg(mod_obj)
assert msg.startswith("ParameterError:")
@pytest.mark.parametrize(
"check_mode", [False, True])
@pytest.mark.parametrize(
"initial_state", ['stopped', 'active'])
@pytest.mark.parametrize(
"desired_state", ['stopped', 'active'])
@mock.patch("plugins.modules.zhmc_partition.AnsibleModule",
autospec=True)
def test_boot_storage_success(
self, ansible_mod_cls, desired_state, initial_state, check_mode):
"""
Tests for successful configuration of boot from storage.
"""
# Prepare the initial partition and HBA before the test is run
self.setup_partition(initial_state)
assert self.partition
self.setup_hba()
# Set some expectations for this test from its parametrization
exp_status = (PARTITION_STATUS_FROM_STATE[initial_state] if check_mode
else PARTITION_STATUS_FROM_STATE[desired_state])
properties = {
'boot_device': 'storage-adapter',
'boot_storage_hba_name': self.hba_name, # artif. prop.
'boot_logical_unit_number': '0002',
'boot_world_wide_port_name': '1023456789abcdef',
}
exp_properties = {
'boot_device': 'storage-adapter',
'boot_storage_device': self.hba.uri, # real prop for artif. prop.
'boot_logical_unit_number': '0002',
'boot_world_wide_port_name': '1023456789abcdef',
}
# Prepare module input parameters
params = {
'hmc_host': 'fake-host',
'hmc_auth': dict(userid='fake-userid',
password='fake-password'),
'cpc_name': self.cpc.name,
'name': self.partition_name,
'state': desired_state,
'properties': properties,
'expand_storage_groups': False,
'expand_crypto_adapters': False,
'log_file': None,
'faked_session': self.session,
}
# Prepare mocks for AnsibleModule object
mod_obj = mock_ansible_module(ansible_mod_cls, params, check_mode)
# Exercise the code to be tested
with pytest.raises(SystemExit) as exc_info:
zhmc_partition.main()
exit_code = exc_info.value.args[0]
# Assert module exit code
assert exit_code == 0, \
"Module unexpectedly failed with this message:\n{0}". \
format(get_failure_msg(mod_obj))
# Assert module output
changed, part_props = get_module_output(mod_obj)
assert changed
assert part_props != {}
if not check_mode:
assert part_props['status'] == exp_status
assert part_props['name'] == params['name']
for prop_name in exp_properties:
hmc_prop_name = prop_name.replace('_', '-')
assert part_props[hmc_prop_name] == \
exp_properties[prop_name], \
"Property: {0}".format(prop_name)
# Assert the partition resource
if not check_mode:
parts = self.cpc.partitions.list()
assert len(parts) == 1
part = parts[0]
part.pull_full_properties()
assert part.properties['status'] == exp_status
assert part.properties['name'] == params['name']
for prop_name in exp_properties:
hmc_prop_name = prop_name.replace('_', '-')
assert part.properties[hmc_prop_name] == \
exp_properties[prop_name], \
"Property: {0}".format(prop_name)
@pytest.mark.parametrize(
"check_mode", [False, True])
@pytest.mark.parametrize(
"initial_state", ['stopped', 'active'])
@pytest.mark.parametrize(
"desired_state", ['stopped', 'active'])
@mock.patch("plugins.modules.zhmc_partition.AnsibleModule",
autospec=True)
def test_boot_storage_error_hba_not_found(
self, ansible_mod_cls, desired_state, initial_state, check_mode):
"""
Tests for successful configuration of boot from storage.
"""
# Prepare the initial partition and HBA before the test is run
self.setup_partition(initial_state)
assert self.partition
self.setup_hba()
properties = {
'boot_device': 'storage-adapter',
'boot_storage_hba_name': 'invalid-hba-name', # artif. prop.
'boot_logical_unit_number': '0002',
'boot_world_wide_port_name': '1023456789abcdef',
}
# Prepare module input parameters
params = {
'hmc_host': 'fake-host',
'hmc_auth': dict(userid='fake-userid',
password='fake-password'),
'cpc_name': self.cpc.name,
'name': self.partition_name,
'state': desired_state,
'properties': properties,
'expand_storage_groups': False,
'expand_crypto_adapters': False,
'log_file': None,
'faked_session': self.session,
}
# Prepare mocks for AnsibleModule object
mod_obj = mock_ansible_module(ansible_mod_cls, params, check_mode)
# Exercise the code to be tested
with pytest.raises(SystemExit) as exc_info:
zhmc_partition.main()
exit_code = exc_info.value.args[0]
# Assert module exit code
assert exit_code == 1, \
"Module unexpectedly succeeded with this output:\n" \
"changed: {!r}, partition: {!r}". \
format(*get_module_output(mod_obj))
# Assert the failure message
msg = get_failure_msg(mod_obj)
assert msg.startswith("ParameterError:")
@pytest.mark.parametrize(
"check_mode", [False, True])
@pytest.mark.parametrize(
"initial_state", ['stopped', 'active'])
@pytest.mark.parametrize(
"desired_state", ['stopped', 'active'])
@mock.patch("plugins.modules.zhmc_partition.AnsibleModule",
autospec=True)
def test_boot_network_success(
self, ansible_mod_cls, desired_state, initial_state, check_mode):
"""
Tests for successful configuration of boot from network.
"""
# Prepare the initial partition and HBA before the test is run
self.setup_partition(initial_state)
assert self.partition
self.setup_nic()
# Set some expectations for this test from its parametrization
exp_status = (PARTITION_STATUS_FROM_STATE[initial_state] if check_mode
else PARTITION_STATUS_FROM_STATE[desired_state])
properties = {
'boot_device': 'network-adapter',
'boot_network_nic_name': self.nic_name, # artif. prop.
}
exp_properties = {
'boot_device': 'network-adapter',
'boot_network_device': self.nic.uri, # real prop for artif. prop.
}
# Prepare module input parameters
params = {
'hmc_host': 'fake-host',
'hmc_auth': dict(userid='fake-userid',
password='fake-password'),
'cpc_name': self.cpc.name,
'name': self.partition_name,
'state': desired_state,
'properties': properties,
'expand_storage_groups': False,
'expand_crypto_adapters': False,
'log_file': None,
'faked_session': self.session,
}
# Prepare mocks for AnsibleModule object
mod_obj = mock_ansible_module(ansible_mod_cls, params, check_mode)
# Exercise the code to be tested
with pytest.raises(SystemExit) as exc_info:
zhmc_partition.main()
exit_code = exc_info.value.args[0]
# Assert module exit code
assert exit_code == 0, \
"Module unexpectedly failed with this message:\n{0}". \
format(get_failure_msg(mod_obj))
# Assert module output
changed, part_props = get_module_output(mod_obj)
assert changed
assert part_props != {}
if not check_mode:
assert part_props['status'] == exp_status
assert part_props['name'] == params['name']
for prop_name in exp_properties:
hmc_prop_name = prop_name.replace('_', '-')
assert part_props[hmc_prop_name] == \
exp_properties[prop_name], \
"Property: {0}".format(prop_name)
# Assert the partition resource
if not check_mode:
parts = self.cpc.partitions.list()
assert len(parts) == 1
part = parts[0]
part.pull_full_properties()
assert part.properties['status'] == exp_status
assert part.properties['name'] == params['name']
for prop_name in exp_properties:
hmc_prop_name = prop_name.replace('_', '-')
assert part.properties[hmc_prop_name] == \
exp_properties[prop_name], \
"Property: {0}".format(prop_name)
@pytest.mark.parametrize(
"check_mode", [False, True])
@pytest.mark.parametrize(
"initial_state", ['stopped', 'active'])
@pytest.mark.parametrize(
"desired_state", ['stopped', 'active'])
@mock.patch("plugins.modules.zhmc_partition.AnsibleModule",
autospec=True)
def test_boot_network_error_hba_not_found(
self, ansible_mod_cls, desired_state, initial_state, check_mode):
"""
Tests for successful configuration of boot from network.
"""
# Prepare the initial partition and HBA before the test is run
self.setup_partition(initial_state)
assert self.partition
self.setup_nic()
properties = {
'boot_device': 'network-adapter',
'boot_network_nic_name': 'invalid-nic-name', # artif. prop.
}
# Prepare module input parameters
params = {
'hmc_host': 'fake-host',
'hmc_auth': dict(userid='fake-userid',
password='fake-password'),
'cpc_name': self.cpc.name,
'name': self.partition_name,
'state': desired_state,
'properties': properties,
'expand_storage_groups': False,
'expand_crypto_adapters': False,
'log_file': None,
'faked_session': self.session,
}
# Prepare mocks for AnsibleModule object
mod_obj = mock_ansible_module(ansible_mod_cls, params, check_mode)
# Exercise the code to be tested
with pytest.raises(SystemExit) as exc_info:
zhmc_partition.main()
exit_code = exc_info.value.args[0]
# Assert module exit code
assert exit_code == 1, \
"Module unexpectedly succeeded with this output:\n" \
"changed: {0!r}, partition: {1!r}". \
format(*get_module_output(mod_obj))
# Assert the failure message
msg = get_failure_msg(mod_obj)
assert msg.startswith("ParameterError:")
@pytest.mark.parametrize(
"check_mode", [False, True])
@pytest.mark.parametrize(
# We omit initial state 'absent' due to limitations in the mock support
# (when creating partitions, it does not populate them with all
# properties).
"initial_state", ['stopped', 'active'])
@pytest.mark.parametrize(
"desired_state", ['stopped', 'active'])
@pytest.mark.parametrize(
"desc, adapters, initial_config, input_props, exp_config, exp_changed",
CRYPTO_CONFIG_SUCCESS_TESTCASES)
@mock.patch("plugins.modules.zhmc_partition.AnsibleModule",
autospec=True)
def test_crypto_config_success(
self, ansible_mod_cls, desc, adapters, initial_config, input_props,
exp_config, exp_changed, desired_state, initial_state, check_mode):
"""
Tests for successful crypto configuration.
"""
# Prepare the initial partition and crypto adapters
self.setup_partition(initial_state,
{'crypto-configuration': initial_config})
for adapter_props in adapters:
self.setup_crypto_adapter(adapter_props)
# Set some expectations for this test from its parametrization
exp_status = (PARTITION_STATUS_FROM_STATE[initial_state] if check_mode
else PARTITION_STATUS_FROM_STATE[desired_state])
# Adjust expected changes - the exp_changed argument only indicates the
# expectation for changes to the crypto config property.
if desired_state != initial_state:
exp_changed = True
properties = input_props
if self.partition:
self.partition.pull_full_properties()
exp_properties = self.partition.properties.copy()
else:
exp_properties = {}
exp_properties['crypto-configuration'] = exp_config
# Prepare module input parameters
params = {
'hmc_host': 'fake-host',
'hmc_auth': dict(userid='fake-userid',
password='fake-password'),
'cpc_name': self.cpc.name,
'name': self.partition_name,
'state': desired_state,
'properties': properties,
'expand_storage_groups': False,
'expand_crypto_adapters': False,
'log_file': None,
'faked_session': self.session,
}
# Prepare mocks for AnsibleModule object
mod_obj = mock_ansible_module(ansible_mod_cls, params, check_mode)
# Exercise the code to be tested
with pytest.raises(SystemExit) as exc_info:
zhmc_partition.main()
exit_code = exc_info.value.args[0]
# Assert module exit code
assert exit_code == 0, \
"Module unexpectedly failed with this message:\n{0}". \
format(get_failure_msg(mod_obj))
# Assert module output
changed, part_props = get_module_output(mod_obj)
assert changed == exp_changed
assert part_props != {}
if not check_mode:
assert part_props['status'] == exp_status
assert part_props['name'] == params['name']
for prop_name in exp_properties:
# Because we built the expected properties from the initial
# properties (adding the crypto_config property we test),
# we need to skip the 'status' property (it would still show
# the initial value).
if prop_name == 'status':
continue
hmc_prop_name = prop_name.replace('_', '-')
assert hmc_prop_name in part_props
result_property = part_props[hmc_prop_name]
exp_property = exp_properties[prop_name]
assert result_property == exp_property, \
"Property: {0}".format(prop_name)
# Assert the partition resource
if not check_mode:
parts = self.cpc.partitions.list()
assert len(parts) == 1
part = parts[0]
part.pull_full_properties()
assert part.properties['status'] == exp_status
assert part.properties['name'] == params['name']
for prop_name in exp_properties:
# Because we built the expected properties from the initial
# properties (adding the crypto_config property we test),
# we need to skip the 'status' property (it would still show
# the initial value).
if prop_name == 'status':
continue
hmc_prop_name = prop_name.replace('_', '-')
assert hmc_prop_name in part.properties
part_property = part.properties[hmc_prop_name]
exp_property = exp_properties[prop_name]
assert part_property == exp_property, \
"Property: {0}".format(prop_name)
@pytest.mark.parametrize(
"check_mode", [False, True])
@pytest.mark.parametrize(
# We omit initial state 'absent' due to limitations in the mock support
# (when creating partitions, it does not populate them with all
# properties).
"initial_state", ['stopped', 'active'])
@pytest.mark.parametrize(
"desired_state", ['stopped', 'active'])
@pytest.mark.parametrize(
"adapters, initial_config", [
(
[
FAKED_CRYPTO_ADAPTER_1,
FAKED_CRYPTO_ADAPTER_2,
],
{
'crypto-adapter-uris': [
FAKED_CRYPTO_ADAPTER_1['object-uri'],
],
'crypto-domain-configurations': [
{'domain-index': 3, 'access-mode': 'control'},
],
},
),
])
@pytest.mark.parametrize(
"input_props, error_msg_pattern", [
(
dict(
crypto_configuration='abc', # error: no dictionary
),
"ParameterError: .*",
),
(
dict(
crypto_configuration=dict(
# error: no crypto_adapter_names field
crypto_domain_configurations=[
dict(domain_index=3, access_mode='control-usage'),
],
),
),
"ParameterError: .*crypto_adapter_names.*",
),
(
dict(
crypto_configuration=dict(
crypto_adapter_names=[
'invalid-adapter-name', # error: not found
],
crypto_domain_configurations=[
dict(domain_index=3, access_mode='control-usage'),
],
),
),
"ParameterError: .*invalid-adapter-name.*",
),
(
dict(
crypto_configuration=dict(
crypto_adapter_names=[
FAKED_CRYPTO_ADAPTER_1['name'],
],
# error: no crypto_domain_configurations field
),
),
"ParameterError: .*crypto_domain_configurations.*",
),
(
dict(
crypto_configuration=dict(
crypto_adapter_names=[
FAKED_CRYPTO_ADAPTER_1['name'],
],
crypto_domain_configurations=[
dict(access_mode='control-usage'),
# error: no domain_index field
],
),
),
"ParameterError: .*domain_index.*",
),
(
dict(
crypto_configuration=dict(
crypto_adapter_names=[
FAKED_CRYPTO_ADAPTER_1['name'],
],
crypto_domain_configurations=[
dict(domain_index=3),
# error: no access_mode field
],
),
),
"ParameterError: .*access_mode.*",
),
])
@mock.patch("plugins.modules.zhmc_partition.AnsibleModule",
autospec=True)
def test_crypto_config_parm_errors(
self, ansible_mod_cls, input_props, error_msg_pattern, adapters,
initial_config, desired_state, initial_state, check_mode):
"""
Tests for 'crypto_configuration' property with parameter errors.
"""
# Prepare the initial partition and crypto adapters
self.setup_partition(initial_state,
{'crypto-configuration': initial_config})
for adapter_props in adapters:
self.setup_crypto_adapter(adapter_props)
# Prepare module input parameters
params = {
'hmc_host': 'fake-host',
'hmc_auth': dict(userid='fake-userid',
password='fake-password'),
'cpc_name': self.cpc.name,
'name': self.partition_name,
'state': desired_state,
'properties': input_props,
'expand_storage_groups': False,
'expand_crypto_adapters': False,
'log_file': None,
'faked_session': self.session,
}
# Prepare mocks for AnsibleModule object
mod_obj = mock_ansible_module(ansible_mod_cls, params, check_mode)
# Exercise the code to be tested
with pytest.raises(SystemExit) as exc_info:
zhmc_partition.main()
exit_code = exc_info.value.args[0]
# Assert module exit code
assert exit_code == 1, \
"Module unexpectedly succeeded with this output:\n" \
"changed: {0!r}, partition: {1!r}". \
format(*get_module_output(mod_obj))
# Assert the failure message
msg = get_failure_msg(mod_obj)
pattern = r'^{0}$'.format(error_msg_pattern)
assert re.match(pattern, msg)
| 36.238587
| 79
| 0.558182
| 43,848
| 0.657598
| 0
| 0
| 40,581
| 0.608602
| 0
| 0
| 24,955
| 0.374256
|
02f942ae72f558610fdbd2e0d719bb8a1bc37d6c
| 1,849
|
py
|
Python
|
users/models.py
|
uoe-compsci-grp30/campusgame
|
d2d7ba99210f352a7b45a1db06cea0a09e3b8c31
|
[
"MIT"
] | null | null | null |
users/models.py
|
uoe-compsci-grp30/campusgame
|
d2d7ba99210f352a7b45a1db06cea0a09e3b8c31
|
[
"MIT"
] | null | null | null |
users/models.py
|
uoe-compsci-grp30/campusgame
|
d2d7ba99210f352a7b45a1db06cea0a09e3b8c31
|
[
"MIT"
] | null | null | null |
import uuid
from django.contrib.auth.models import AbstractUser
from django.db import models
"""
The user model that represents a user participating in the game.
Implemented using the built-in Django user model: AbstractUser.
"""
class User(AbstractUser):
""" The User class that represents a user that has created an account.
Implemented using the built-in Django user model 'AbstractUser'.
The User class consists of an id that uniquely identifies a user. It uses a uuid in order to be more secure.
It also contains a profile picture that is uploaded by the user.
"""
id = models.UUIDField(default=uuid.uuid4, primary_key=True) # id uniquely identifies a user
is_gamekeeper = models.BooleanField(default=False) # is the user a gamekeeper?
class GameParticipation(models.Model):
"""
Game Participation class represents information about a user currently participating in a game. This is useful because it provides an easy way to store data about users currently playing a game. The class consists of a User that is currently playing the game. A Game that the user is currently participating in. The current Zone that the user is in. A boolean value of whether the user is alive. A boolean value of whether the user is eliminated
"""
user = models.ForeignKey(User, on_delete=models.CASCADE) # User that is currently participating in a game
game = models.ForeignKey("games.Game", on_delete=models.CASCADE) # What game is the user currently participating in
current_zone = models.ForeignKey("games.Zone", on_delete=models.DO_NOTHING) # What zone is the user currently in
score = models.IntegerField(default=0) # User score
is_alive = models.BooleanField(default=False) # Is the player alive
is_eliminated = models.BooleanField(default=False) # Is the player eliminated
| 52.828571
| 449
| 0.760411
| 1,610
| 0.870741
| 0
| 0
| 0
| 0
| 0
| 0
| 1,202
| 0.650081
|
02fa655762a8c5f87ff87bed426342d23902e763
| 4,743
|
py
|
Python
|
slidingwindow_generator/slidingwindow_generator.py
|
flashspys/SlidingWindowGenerator
|
bdcefd9506732ea9c9734bd4e8e81a884b78f08c
|
[
"Apache-2.0"
] | 3
|
2021-03-27T12:50:36.000Z
|
2022-01-16T15:30:22.000Z
|
slidingwindow_generator/slidingwindow_generator.py
|
flashspys/SlidingWindowGenerator
|
bdcefd9506732ea9c9734bd4e8e81a884b78f08c
|
[
"Apache-2.0"
] | 3
|
2020-10-07T05:28:46.000Z
|
2020-11-05T08:32:01.000Z
|
slidingwindow_generator/slidingwindow_generator.py
|
flashspys/SlidingWindowGenerator
|
bdcefd9506732ea9c9734bd4e8e81a884b78f08c
|
[
"Apache-2.0"
] | 1
|
2020-11-08T23:39:20.000Z
|
2020-11-08T23:39:20.000Z
|
import numpy as np
import tensorflow as tf
class SlidingWindowGenerator:
def __init__(self, input_width, label_width, shift,
train_df, val_df, test_df,
label_columns=None):
# Store raw data with dataframe type
self.train_df = train_df
self.val_df = val_df
self.test_df = test_df
# Work out the label column indices.
self.label_columns = label_columns
if label_columns is not None:
self.label_columns_indices = {name: i for i, name in
enumerate(label_columns)}
self.column_indices = {name: i for i, name in
enumerate(train_df.columns)}
# Work out the window parameters.
self.input_width = input_width
self.label_width = label_width
self.shift = shift # indicates the label offset far from input
self.total_window_size = input_width + shift
self.input_slice = slice(0, input_width)
self.input_indices = np.arange(self.total_window_size)[
self.input_slice]
self.label_start = self.total_window_size - self.label_width
self.labels_slice = slice(self.label_start, None)
self.label_indices = np.arange(self.total_window_size)[
self.labels_slice]
def __repr__(self):
return '\n'.join([
f'Total window size: {self.total_window_size}',
f'Input indices: {self.input_indices}',
f'Label indices: {self.label_indices}',
f'Label column name(s): {self.label_columns}'])
def train(self, sequence_stride=1, shuffle=True, batch_size=32):
"""
make train time series dataset
:param sequence_stride: int
:param shuffle: boolean
:param batch_size: int
:return: time_series data
"""
return self.make_dataset(self.train_df,
sequence_stride=sequence_stride,
shuffle=shuffle, batch_size=batch_size)
def val(self, sequence_stride=1, shuffle=False, batch_size=32):
"""
make validation time series dataset
:param sequence_stride: int
:param shuffle: boolean
:param batch_size: int
:return: time_series data
"""
return self.make_dataset(self.val_df, sequence_stride=sequence_stride,
shuffle=shuffle, batch_size=batch_size)
def test(self, sequence_stride=1, shuffle=False, batch_size=32):
"""
make test time series dataset
:param sequence_stride: int
:param shuffle: boolean
:param batch_size: int
:return: time_series data
"""
return self.make_dataset(self.test_df, sequence_stride=sequence_stride,
shuffle=shuffle, batch_size=batch_size)
def example(self, sequence_stride=1, shuffle=True, batch_size=32):
"""
Get and cache an example batch of `inputs, labels` for checking shape
"""
result = getattr(self, '_example', None)
if result is None:
# No example batch was found, so get one from the `.train` dataset
result = next(iter(
self.train(sequence_stride=sequence_stride, shuffle=shuffle,
batch_size=batch_size)))
# And cache it for next time
self._example = result
return result
def split_window(self, features):
"""
input 을 input 용 column 들로 이루어진 data 와
label 용 column 들로 이루어진 data 로 분리
"""
inputs = features[:, self.input_slice, :]
labels = features[:, self.labels_slice, :]
if self.label_columns is not None:
labels = tf.stack(
[labels[:, :, self.column_indices[name]] for name in
self.label_columns],
axis=-1)
# Slicing doesn't preserve static shape information, so set the shapes
# manually. This way the `tf.data.Datasets` are easier to inspect.
inputs.set_shape([None, self.input_width, None])
labels.set_shape([None, self.label_width, None])
return inputs, labels
def make_dataset(self, data, sequence_stride=1, shuffle=True,
batch_size=32):
data = np.array(data, dtype=np.float32)
ds = tf.keras.preprocessing.timeseries_dataset_from_array(
data=data,
targets=None,
sequence_length=self.total_window_size,
sequence_stride=sequence_stride,
shuffle=shuffle,
batch_size=batch_size, )
ds = ds.map(self.split_window)
return ds
| 37.346457
| 79
| 0.59709
| 4,735
| 0.990379
| 0
| 0
| 0
| 0
| 0
| 0
| 1,357
| 0.283832
|
02fb4db8ebfb72289be41e8479130a4d82ec14a9
| 1,737
|
py
|
Python
|
carla/util.py
|
dixantmittal/intelligent-autonomous-vehicle-controller
|
7ccebabe8ecb972780a492c36f48ef8f1671be71
|
[
"MIT"
] | 1
|
2019-12-18T06:23:19.000Z
|
2019-12-18T06:23:19.000Z
|
carla/util.py
|
dixantmittal/intelligent-autonomous-vehicle-controller
|
7ccebabe8ecb972780a492c36f48ef8f1671be71
|
[
"MIT"
] | null | null | null |
carla/util.py
|
dixantmittal/intelligent-autonomous-vehicle-controller
|
7ccebabe8ecb972780a492c36f48ef8f1671be71
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB), and the INTEL Visual Computing Lab.
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import datetime
import sys
from contextlib import contextmanager
@contextmanager
def make_connection(client_type, *args, **kwargs):
"""Context manager to create and connect a networking client object."""
client = None
try:
client = client_type(*args, **kwargs)
client.connect()
yield client
finally:
if client is not None:
client.disconnect()
class StopWatch(object):
def __init__(self):
self.start = datetime.datetime.now()
self.end = None
def stop(self):
self.end = datetime.datetime.now()
def milliseconds(self):
return 1000.0 * (self.end - self.start).total_seconds()
def to_hex_str(header):
return ':'.join('{:02x}'.format(ord(c)) for c in header)
if sys.version_info >= (3, 3):
import shutil
def print_over_same_line(text):
terminal_width = shutil.get_terminal_size((80, 20)).columns
empty_space = max(0, terminal_width - len(text))
sys.stdout.write('\r' + text + empty_space * ' ')
sys.stdout.flush()
else:
# Workaround for older Python versions.
def print_over_same_line(text):
line_length = max(print_over_same_line._last_line_length, len(text))
empty_space = max(0, line_length - len(text))
sys.stdout.write('\r' + text + empty_space * ' ')
sys.stdout.flush()
print_over_same_line._last_line_length = line_length
print_over_same_line._last_line_length = 0
| 28.016129
| 80
| 0.663788
| 274
| 0.157743
| 321
| 0.184801
| 337
| 0.194013
| 0
| 0
| 385
| 0.221647
|
02fc1e3721895fe496443e7ceaa950d900683542
| 3,002
|
py
|
Python
|
examples/session2-fi/start2.py
|
futurice/PythonInBrowser
|
066ab28ffad265efc7968b87f33dab2c68216d9d
|
[
"MIT"
] | 4
|
2015-12-08T19:34:49.000Z
|
2019-09-08T22:11:05.000Z
|
examples/session2-fi/start2.py
|
futurice/PythonInBrowser
|
066ab28ffad265efc7968b87f33dab2c68216d9d
|
[
"MIT"
] | 18
|
2016-10-14T13:48:39.000Z
|
2019-10-11T12:14:21.000Z
|
examples/session2-fi/start2.py
|
futurice/PythonInBrowser
|
066ab28ffad265efc7968b87f33dab2c68216d9d
|
[
"MIT"
] | 4
|
2015-11-18T15:18:43.000Z
|
2018-03-02T09:36:23.000Z
|
# Käydään läpi mitä opimme viime viikolla (ja myös jotakin uutta)
# Jos jokin asia mietityttää, kysy vain rohkeasti apua!
##### INFO #####
# Viime viikon tärkeimmät asiat olivat:
# 1. print-komento
# 2. muuttujan käyttö
# 3. kilpikonnan käyttäminen piirtämiseen
# Ohelmointi vaatii usein tiedon etsimistä muista lähteistä
# ja uuden tiedon soveltamista omaan ohjelmaasi.
# Käytännössä tietoa ohjelmoinnista löytää hyvin Internetistä.
# Käytä viime viikon tehtäviä lähteenä tehdessäsi seuraavia tehtäviä
##### TEHTÄVÄT #####
##### TEHTÄVÄ 1 #####
# 1. kirjoita koodinpätkä joka printtaa kaksi riviä
# Ensimmäisellä rivillä tulee olla teksti:
# "Minun lempivärini on 'lempivärisi'"
# Toisella rivillä pitää olla yhtälö joka laskee
# kuukauden jäljellä olevat päivät
# VINKKI: tarkista tietokoneelta kuinka monesko päivä tänään on ja kuinka monta päivää tässä kuussa on.
# Printtauksen tulee sisältää vain yksi numero: yhtälön ratkaisu
# <------ kirjoita koodisi tähän (ja klikkaa 'Run' printataksesi)------->
##### TEHTÄVÄ 2 #####
# Yhtenä päivänä lempivärisi saattaa olla vihreä ja toisena oranssi.
# Luo muuttuja nimeltä lempivari ja anna sille arvoksi lempivärisi
# <------ kirjoita muuttuja tähän ------->
# Kirjoita sitten koodi joka printtaa tekstin "Lempivärini in 'lempivärisi'"
# Käytä tällä kertaa muuttujaa lempivari ilmaisemaan lempivärisi
# <------ kirjoita koodisi tähän (ja klikkaa 'Run' printataksesi)------->
# Tarkistuksena muuta lempiVari muuttujan arvoa ja klikkaa 'Run'
# tarkista että lempiväri on muuttunut printtauksessa
##### TEHTÄVÄ 3 #####
# Pystyäksemme piirtämään viereiselle piirtoalueelle, meidän täytyy käyttää kilpikonnaa
# Tätä varten meidän tulee tuoda (importtaa) kilpikonna ja asettaa se muuttujaan.
# <------ Tuo (import) kilpikonna tässä ------->
# näin: import turtle
# <------ aseta kilpikonna muuttujaan 'jane', muistatko? ------>
# Piirrä seuraava kuvia
#
# eteenpäin 50 pikseliä, käännä 135 astetta oikealle
# eteenpäin 100 pikseliä, käännä 135 astetta oikealle, eteenpäin 100 pikseliä,
# käännä 135 astetta oikealla ja siirrä 50 pikseliä eteenpäin.
#
# Pystytkö arvaamaan minkä kuvion kilpikonna piirtää?
# <------ kirjoita koodisi tähän ------->
# On mahdollista piirtää myös muilla väreillä. Musta on vain oletusväri.
# Kilpikonnan värin voi muuttaa lisäämällä seuraavan rivin ennen piirtämistä:
# jane.color("pink")
# Voit myös käyttää muuttujaa määrittääksesi piirroksen värin.
# Muuta muuttujan lempivari arvo englanniksi esim. "green" (vihreä), "blue" (sininen) tai "yellow" (keltainen)
# ja korvaa väriä vaihtava koodi seuraavalla rivillä
#
# jane.color(lempivari)
#
# Muista että käyttäessäsi muuttujia et tarvitse lainausmerkkejä
# Onnittelut! Olet käynyt läpi viime viikon tärkeimmät asiat
# ja oppinut piirtämään eri väreillä
##### LISÄTEHTÄVÄT #####
# Mikä olisi helpoin tapa piirtää kolmio loppuun?
# Muuta muuttujan lempivari arvoa ja kokeile että se toimii.
# Miten voisit piirtää toisen kolmion eri suuntaan ja eri värillä
| 37.525
| 110
| 0.758161
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,112
| 0.975243
|
02fcd2548a49becf32a01085ecf16e34635af225
| 32,807
|
py
|
Python
|
train.py
|
EdwardLeeMacau/PFFNet
|
dfa6e45062627ce6ab7a1b1a37bada5cccae7167
|
[
"MIT"
] | null | null | null |
train.py
|
EdwardLeeMacau/PFFNet
|
dfa6e45062627ce6ab7a1b1a37bada5cccae7167
|
[
"MIT"
] | null | null | null |
train.py
|
EdwardLeeMacau/PFFNet
|
dfa6e45062627ce6ab7a1b1a37bada5cccae7167
|
[
"MIT"
] | null | null | null |
"""
FileName [ train.py ]
PackageName [ PFFNet ]
Synopsis [ Train the model ]
Usage:
>>> python train.py --normalized --cuda
"""
import argparse
import os
import shutil
from datetime import date
import matplotlib
import numpy as np
import pandas as pd
import torch
import torchvision
import torchvision.models
from torchvision import transforms
from matplotlib import pyplot as plt
from matplotlib import gridspec
from skimage.measure import compare_psnr, compare_ssim
from torch import nn, optim
from torch.backends import cudnn
from torch.utils.data import DataLoader
from torchvision.transforms import (
CenterCrop, Compose, Normalize, RandomCrop, Resize, ToTensor)
from torchvision.utils import make_grid
import cmdparser
import graphs
import utils
from model import lossnet
from data import DatasetFromFolder
from model.rpnet import Net
from model.rpnet_improve import ImproveNet
from model.lossnet import LossNetwork
# Select Device
device = utils.selectDevice()
cudnn.benchmark = True
# Normalization(Mean Shift)
mean = torch.Tensor([0.485, 0.456, 0.406]).to(device)
std = torch.Tensor([0.229, 0.224, 0.225]).to(device)
def getDataset(opt, transform):
"""
Return the dataloader object
Parameters
----------
opt : namespace
transform : torchvision.transform
Return
------
train_loader, val_loader : torch.utils.data.DataLoader
"""
train_dataset = DatasetFromFolder(opt.train, transform=transform)
val_dataset = DatasetFromFolder(opt.val, transform=transform)
train_loader = DataLoader(
dataset=train_dataset,
num_workers=opt.threads,
batch_size=opt.batchsize,
pin_memory=True,
shuffle=True
)
val_loader = DataLoader(
dataset=val_dataset,
num_workers=opt.threads,
batch_size=opt.batchsize,
pin_memory=True,
shuffle=True
)
return train_loader, val_loader
def getOptimizer(model, opt):
"""
Return the optimizer (and schedular)
Parameters
----------
model : torch.nn.Model
opt : namespace
Return
------
optimizer : torch.optim
"""
if opt.optimizer == "Adam":
optimizer = optim.Adam(
filter(lambda p: p.requires_grad, model.parameters()),
lr=opt.lr,
weight_decay=opt.weight_decay
)
elif opt.optimizer == "SGD":
optimizer = optim.SGD(
filter(lambda p: p.requires_grad, model.parameters()),
lr=opt.lr,
weight_decay=opt.weight_decay
)
elif opt.optimizer == "ASGD":
optimizer = optim.ASGD(
filter(lambda p: p.requires_grad, model.parameters()),
lr=opt.lr,
lambd=1e-4,
alpha=0.75,
t0=1000000.0,
weight_decay=opt.weight_decay
)
elif opt.optimizer == "Adadelta":
optimizer = optim.Adadelta(
filter(lambda p: p.requires_grad, model.parameters()),
lr=opt.lr,
rho=0.9,
eps=1e-06,
weight_decay=opt.weight_decay
)
elif opt.optimizer == "Adagrad":
optimizer = optim.Adagrad(
filter(lambda p: p.requires_grad, model.parameters()),
lr=opt.lr,
lr_decay=0,
weight_decay=opt.weight_decay,
initial_accumulator_value=0
)
elif opt.optimizer == "Adam":
optimizer = optim.Adam(
filter(lambda p: p.requires_grad, model.parameters()),
lr=opt.lr,
weight_decay=opt.weight_decay
)
elif opt.optimizer == "SGD":
optimizer = optim.SGD(
filter(lambda p: p.requires_grad, model.parameters()),
lr=opt.lr,
weight_decay=opt.weight_decay
)
elif opt.optimizer == "ASGD":
optimizer = optim.ASGD(
filter(lambda p: p.requires_grad, model.parameters()),
lr=opt.lr,
lambd=1e-4,
alpha=0.75,
t0=1000000.0,
weight_decay=opt.weight_decay
)
elif opt.optimizer == "Adadelta":
optimizer = optim.Adadelta(
filter(lambda p: p.requires_grad, model.parameters()),
lr=opt.lr,
rho=0.9,
eps=1e-06,
weight_decay=opt.weight_decay
)
elif opt.optimizer == "Adagrad":
optimizer = optim.Adagrad(
filter(lambda p: p.requires_grad, model.parameters()),
lr=opt.lr,
lr_decay=0,
weight_decay=opt.weight_decay,
initial_accumulator_value=0
)
elif opt.optimizer == "SparseAdam":
optimizer = optim.SparseAdam(
filter(lambda p: p.requires_grad, model.parameters()),
lr=opt.lr,
betas=(opt.b1, opt.b2),
eps=1e-08
)
elif opt.optimizer == "Adamax":
optimizer = optim.Adamax(
filter(lambda p: p.requires_grad, model.parameters()),
lr=opt.lr,
betas=(opt.b1, opt.b2),
eps=1e-08,
weight_decay=opt.weight_dacay
)
else:
raise ValueError(opt.optimizer, " doesn't exist.")
return optimizer
# TODO: Developing
def logMsg(epoch, iteration, train_loader, perceptual, trainloss, perceloss)
msg = "===> [Epoch {}] [{:4d}/{:4d}] ImgLoss: (Mean: {:.6f}, Std: {:.6f})".format(
epoch, iteration, len(train_loader), np.mean(trainloss), np.std(trainloss)
)
if not perceptual is None:
msg = "\t".join([msg, "PerceptualLoss: (Mean: {:.6f}, Std: {:.6f})".format(np.mean(perceloss), np.std(perceloss))])
return msg
def getFigureSpec(iteration: int, perceptual: bool):
"""
Get 2x2 Figure And Axis
Parameters
----------
iterations : int
perceptual : bool
If true, generate the axis of perceptual loss
Return
------
fig, axis : matplotlib.figure.Figure, matplotlib.axes.Axes
The plotting instance.
"""
fig, grids = plt.figure(figsize=(19.2, 10.8)), gridspec.GridSpec(2, 2)
axis = [ fig.add_subplot(gs) for gs in grids ]
for ax in axis:
ax.set_xlabel("Epoch(s) / Iteration: {}".format(iteration))
# Linear scale of Loss
axis[0].set_ylabel("Image Loss")
axis[0].set_title("Loss")
# Log scale of Loss
axis[1].set_yscale("log")
axis[1].set_ylabel("Image Loss")
axis[1].set_title("Loss (Log scale)")
# PSNR
axis[2].set_title("Average PSNR")
# Learning Rate
axis[3].set_yscale('log')
axis[3].set_title("Learning Rate")
# Add TwinScale for Perceptual Loss
if perceptual:
axis.append( axis[0].twinx() )
axis[4].set_ylabel("Perceptual Loss")
axis.append( axis[1].twinx() )
axis[5].set_ylabel("Perceptual Loss")
return fig, axis
def getPerceptualModel(model):
"""
Return the Perceptual Model
Parameters
----------
model : str
The name of the perceptual Model.
Return
------
perceptual : {nn.Module, None}
Not None if the perceptual model is supported.
"""
perceptual = None
if opt.perceptual == 'vgg16':
print("==========> Using VGG16 as Perceptual Loss Model")
perceptual = LossNetwork(
torchvision.models.vgg16(pretrained=True),
lossnet.VGG16_Layer
)
if opt.perceptual == 'vgg16_bn':
print("==========> Using VGG16 with Batch Normalization as Perceptual Loss Model")
perceptual = LossNetwork(
torchvision.models.vgg16_bn(pretrained=True),
lossnet.VGG16_bn_Layer
)
if opt.perceptual == 'vgg19':
print("==========> Using VGG19 as Perceptual Loss Model")
perceptual = LossNetwork(
torchvision.models.vgg19(pretrained=True),
lossnet.VGG19_Layer
)
if opt.perceptual == 'vgg19_bn':
print("==========> Using VGG19 with Batch Normalization as Perceptual Loss Model")
perceptual = LossNetwork(
torchvision.models.vgg19_bn(pretrained=True),
lossnet.VGG19_bn_Layer
)
if opt.perceptual == "resnet18":
print("==========> Using Resnet18 as Perceptual Loss Model")
perceptual = LossNetwork(
torchvision.models.resnet18(pretrained=True),
lossnet.Resnet18_Layer
)
if opt.perceptual == "resnet34":
print("==========> Using Resnet34 as Perceptual Loss Model")
perceptual = LossNetwork(
torchvision.models.resnet34(pretrained=True),
lossnet.Resnet34_Layer
)
if opt.perceptual == "resnet50":
print("==========> Using Resnet50 as Perceptual Loss Model")
perceptual = LossNetwork(
torchvision.models.resnet50(pertrained=True),
lossnet.Resnet50_Layer
)
return perceptual
# TODO: Developing
def getTrainSpec(opt):
"""
Initialize the objects needs at Training.
Parameters
----------
opt : namespace
(...)
Return
------
model
optimizer
criterion
perceptual
train_loader, val_loader
scheduler
epoch,
loss_iter, perc_iter, mse_iter, psnr_iter, ssim_iter, lr_iter
iterations, opt,
name,
fig,
axis,
saveCheckpoint
"""
if opt.fixrandomseed:
seed = 1334
torch.manual_seed(seed)
if opt.cuda: torch.cuda.manual_seed(seed)
print("==========> Loading datasets")
img_transform = Compose([ToTensor(), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) if opt.normalize else ToTensor()
# Dataset
train_loader, val_loader = getDataset(opt, img_transform)
# TODO: Parameters Selection
# TODO: Mean shift Layer Handling
# Load Model
print("==========> Building model")
model = ImproveNet(opt.rb)
# ----------------------------------------------- #
# Loss: L1 Norm / L2 Norm #
# Perceptual Model (Optional) #
# TODO Append Layer (Optional) #
# ----------------------------------------------- #
criterion = nn.MSELoss(reduction='mean')
perceptual = None if (opt.perceptual is None) else getPerceptualModel(opt.perceptual).eval()
# ----------------------------------------------- #
# Optimizer and learning rate scheduler #
# ----------------------------------------------- #
print("==========> Setting Optimizer: {}".format(opt.optimizer))
optimizer = getOptimizer(model, opt)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=opt.milestones, gamma=opt.gamma)
# ----------------------------------------------- #
# Option: resume training process from checkpoint #
# ----------------------------------------------- #
if opt.resume:
if os.path.isfile(opt.resume):
print("=> loading checkpoint '{}'".format(opt.resume))
model, optimizer, _, _, scheduler = utils.loadCheckpoint(opt.resume, model, optimizer, scheduler)
else:
raise Exception("=> no checkpoint found at '{}'".format(opt.resume))
# ----------------------------------------------- #
# Option: load weights from a pretrain network #
# ----------------------------------------------- #
if opt.pretrained:
if os.path.isfile(opt.pretrained):
print("=> loading pretrained model '{}'".format(opt.pretrained))
model = utils.loadModel(opt.pretrained, model, True)
else:
raise Exception("=> no pretrained model found at '{}'".format(opt.pretrained))
# Select training device
if opt.cuda:
print("==========> Setting GPU")
model = nn.DataParallel(model, device_ids=[i for i in range(opt.gpus)]).cuda()
criterion = criterion.cuda()
if perceptual is not None: perceptual = perceptual.cuda()
else:
print("==========> Setting CPU")
model = model.cpu()
criterion = criterion.cpu()
if perceptual is not None: perceptual = perceptual.cpu()
# Create container
length = opt.epochs * len(train_loader) // opt.val_interval
loss_iter = np.empty(length, dtype=float)
perc_iter = np.empty(length, dtype=float)
psnr_iter = np.empty(length, dtype=float)
ssim_iter = np.empty(length, dtype=float)
mse_iter = np.empty(length, dtype=float)
lr_iter = np.empty(length, dtype=float)
iterations = np.empty(length, dtype=float)
loss_iter[:] = np.nan
perc_iter[:] = np.nan
psnr_iter[:] = np.nan
ssim_iter[:] = np.nan
mse_iter[:] = np.nan
lr_iter[:] = np.nan
iterations[:] = np.nan
# Set plotter to plot the loss curves
twinx = (opt.perceptual is not None)
fig, axis = getFigureSpec(len(train_loader), twinx)
# Set Model Saving Function
if opt.save_item == "model":
print("==========> Save Function: saveModel()")
saveCheckpoint = utils.saveModel
elif opt.save_item == "checkpoint":
print("==========> Save Function: saveCheckpoint()")
saveCheckpoint = utils.saveCheckpoint
else:
raise ValueError("Save Checkpoint Function Error")
return (
model, optimizer, criterion, perceptual, train_loader, val_loader, scheduler, epoch,
loss_iter, perc_iter, mse_iter, psnr_iter, ssim_iter, lr_iter, iterations, opt,
name, fig, axis, saveCheckpoint
)
def main(opt):
"""
Main process of train.py
Parameters
----------
opt : namespace
The option (hyperparameters) of these model
"""
if opt.fixrandomseed:
seed = 1334
torch.manual_seed(seed)
if opt.cuda:
torch.cuda.manual_seed(seed)
print("==========> Loading datasets")
img_transform = Compose([ToTensor(), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) if opt.normalize else ToTensor()
# Dataset
train_loader, val_loader = getDataset(opt, img_transform)
# TODO: Parameters Selection
# TODO: Mean shift Layer Handling
# Load Model
print("==========> Building model")
model = ImproveNet(opt.rb)
# ----------------------------------------------- #
# Loss: L1 Norm / L2 Norm #
# Perceptual Model (Optional) #
# TODO Append Layer (Optional) #
# ----------------------------------------------- #
criterion = nn.MSELoss(reduction='mean')
perceptual = None if (opt.perceptual is None) else getPerceptualModel(opt.perceptual).eval()
# ----------------------------------------------- #
# Optimizer and learning rate scheduler #
# ----------------------------------------------- #
print("==========> Setting Optimizer: {}".format(opt.optimizer))
optimizer = getOptimizer(model, opt)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=opt.milestones, gamma=opt.gamma)
# ----------------------------------------------- #
# Option: resume training process from checkpoint #
# ----------------------------------------------- #
if opt.resume:
if os.path.isfile(opt.resume):
print("=> loading checkpoint '{}'".format(opt.resume))
model, optimizer, _, _, scheduler = utils.loadCheckpoint(opt.resume, model, optimizer, scheduler)
else:
raise Exception("=> no checkpoint found at '{}'".format(opt.resume))
# ----------------------------------------------- #
# Option: load weights from a pretrain network #
# ----------------------------------------------- #
if opt.pretrained:
if os.path.isfile(opt.pretrained):
print("=> loading pretrained model '{}'".format(opt.pretrained))
model = utils.loadModel(opt.pretrained, model, True)
else:
raise Exception("=> no pretrained model found at '{}'".format(opt.pretrained))
# Select training device
if opt.cuda:
print("==========> Setting GPU")
model = nn.DataParallel(model, device_ids=[i for i in range(opt.gpus)]).cuda()
criterion = criterion.cuda()
if perceptual is not None:
perceptual = perceptual.cuda()
else:
print("==========> Setting CPU")
model = model.cpu()
criterion = criterion.cpu()
if perceptual is not None:
perceptual = perceptual.cpu()
# Create container
length = opt.epochs * len(train_loader) // opt.val_interval
loss_iter = np.empty(length, dtype=float)
perc_iter = np.empty(length, dtype=float)
psnr_iter = np.empty(length, dtype=float)
ssim_iter = np.empty(length, dtype=float)
mse_iter = np.empty(length, dtype=float)
lr_iter = np.empty(length, dtype=float)
iterations = np.empty(length, dtype=float)
loss_iter[:] = np.nan
perc_iter[:] = np.nan
psnr_iter[:] = np.nan
ssim_iter[:] = np.nan
mse_iter[:] = np.nan
lr_iter[:] = np.nan
iterations[:] = np.nan
# Set plotter to plot the loss curves
twinx = (opt.perceptual is not None)
fig, axis = getFigureSpec(len(train_loader), twinx)
# Set Model Saving Function
if opt.save_item == "model":
print("==========> Save Function: saveModel()")
saveCheckpoint = utils.saveModel
elif opt.save_item == "checkpoint":
print("==========> Save Function: saveCheckpoint()")
saveCheckpoint = utils.saveCheckpoint
else:
raise ValueError("Save Checkpoint Function Error")
# Start Training
print("==========> Training")
for epoch in range(opt.starts, opt.epochs + 1):
loss_iter, perc_iter, mse_iter, psnr_iter, ssim_iter, lr_iter, iterations, _, _ = train(
model, optimizer, criterion, perceptual, train_loader, val_loader, scheduler, epoch,
loss_iter, perc_iter, mse_iter, psnr_iter, ssim_iter, lr_iter, iterations,
opt, name, fig, axis, saveCheckpoint
)
scheduler.step()
# Save the last checkpoint for resume training
utils.saveCheckpoint(os.path.join(opt.checkpoints, name, "final.pth"), model, optimizer, scheduler, epoch, len(train_loader))
# TODO: Fine tuning
return
def train(model, optimizer, criterion, perceptual, train_loader, val_loader,
scheduler: optim.lr_scheduler.MultiStepLR, epoch: int, loss_iter,
perc_iter, mse_iter, psnr_iter, ssim_iter, lr_iter, iters, opt, name,
fig: matplotlib.figure.Figure, ax: matplotlib.axes.Axes,
saveCheckpoint=utils.saveCheckpoint):
"""
Main function of training and vaildation
Parameters
----------
model, optimizer, criterion : nn.Module, optim.Optimizer, nn.Module
The main elements of the Neural Network
perceptual : {nn.Module, None} optional
Pass None or a pretrained Neural Network to calculate perceptual loss
train_loader, val_loader : DataLoader
The training and validation dataset
scheduler : optim.lr_scheduler.MultiStepLR
Learning rate scheduler
epoch : int
The processing train epoch
loss_iter, perc_iter, mse_iter, psnr_iter, ssim_iter, iters : 1D-Array like
The container to record the training performance
opt : namespace
The training option
name : str
(...)
fig, ax : matplotlib.figure.Figure, matplotlib.axes.Axes
(...)
saveCheckpoint : callable
(...)
"""
trainloss, perceloss = [], []
for iteration, (data, label) in enumerate(train_loader, 1):
steps = len(train_loader) * (epoch - 1) + iteration
model.train()
# ----------------------------------------------------- #
# Handling: #
# 1. Perceptual Loss #
# 2. Multiscaling #
# 2.0 Without Multiscaling (multiscaling = [1.0]) #
# 2.1 Regular Multiscaling #
# 2.2 Random Multiscaling #
# ----------------------------------------------------- #
# 2.0 Without Multiscaling
if opt.multiscale == [1.0]:
optimizer.zero_grad()
data, label = data.to(device), label.to(device)
output = model(data)
# Calculate loss
image_loss = criterion(output, label)
if perceptual is not None: perceptual_loss = perceptual(output, label)
# Backpropagation
loss = image_loss if (perceptual is None) else image_loss + opt.perceptual_weight * percuptual_loss
loss.backward()
optimizer.step()
# Record the training loss
trainloss.append(image_loss.item())
if perceptual is not None: perceloss.append(perceptual_loss.item())
# TODO: Efficient Issue
# TODO: Resizing Loss
# 2.1 Regular Multiscaling
elif not opt.multiscaleShuffle:
data, label = data.to(device), label.to(device)
originWidth, originHeight = data.shape[1:3]
for scale in opt.multiscale:
optimizer.zero_grad()
if scale != 1.0:
newSize = (int(originWidth * scale), int(originHeight * scale))
data, label = Resize(size=newSize)(data), Resize(size=newSize)(label)
output = model(data)
# Calculate loss
image_loss = criterion(output, label)
if perceptual is not None: perceptual_loss = perceptual(output, label)
# Backpropagation
loss = image_loss if (perceptual is None) else image_loss + opt.perceptual_weight * percuptual_loss
loss.backward()
optimizer.step()
# Record the training loss
trainloss.append(image_loss.item())
if perceptual is not None: perceloss.append(perceptual_loss.item())
# TODO: Check Usage
# 2.2 Random Multiscaling
else:
optimizer.zero_grad()
data, label = data.to(device), label.to(device)
originWidth, originHeight = data.shape[1:3]
scale = np.random.choice(opt.multiscale, 1)
if scale != 1.0:
newSize = (int(originWidth * scale), int(originHeight * scale))
data, label = Resize(size=newSize)(data), Resize(size=newSize)(label)
output = model(data)
# Calculate loss
image_loss = criterion(output, label)
if perceptual is not None: perceptual_loss = perceptual(output, label)
# Backpropagation
loss = image_loss if (perceptual is None) else image_loss + opt.perceptual_weight * percuptual_loss
loss.backward()
optimizer.step()
# Record the training loss
trainloss.append(image_loss.item())
if perceptual is not None: perceloss.append(perceptual_loss.item())
# ----------------------------------------------------- #
# Execute for a period #
# 1. Print the training message #
# 2. Plot the gradient of each layer (Deprecated) #
# 3. Validate the model #
# 4. Saving the network #
# ----------------------------------------------------- #
# 1. Print the training message
if steps % opt.log_interval == 0:
msg = "===> [Epoch {}] [{:4d}/{:4d}] ImgLoss: (Mean: {:.6f}, Std: {:.6f})".format(
epoch, iteration, len(train_loader), np.mean(trainloss), np.std(trainloss)
)
if not perceptual is None:
msg = "\t".join([msg, "PerceptualLoss: (Mean: {:.6f}, Std: {:.6f})".format(np.mean(perceloss), np.std(perceloss))])
print(msg)
# 2. Print the gradient statistic message for each layer
# graphs.draw_gradient()
# 3. Save the model
if steps % opt.save_interval == 0:
checkpoint_path = os.path.join(opt.checkpoints, name, "{}.pth".format(steps))
saveCheckpoint(checkpoint_path, model, optimizer, scheduler, epoch, iteration)
# 4. Validating the network
if steps % opt.val_interval == 0:
mse, psnr = validate(model, val_loader, criterion, epoch, iteration, normalize=opt.normalize)
idx = steps // opt.val_interval - 1
loss_iter[idx] = np.mean(trainloss)
mse_iter[idx] = mse
psnr_iter[idx] = psnr
lr_iter[idx] = optimizer.param_groups[0]["lr"]
iters[idx] = steps / len(train_loader)
if perceptual is not None: perc_iter[idx] = np.mean(perceloss)
# Clean up the list
trainloss, preceloss = [], []
# Save the loss
df = pd.DataFrame(data={
'Iterations': iters * len(train_loader),
'TrainL2Loss': loss_iter,
'TrainPerceptual': perc_iter,
'ValidationLoss': mse_iter,
'ValidationPSNR': psnr_iter
})
# Loss (Training Curve) Message
df = df.nlargest(5, 'ValidationPSNR').append(df)
df.to_excel(os.path.join(opt.detail, name, "statistical.xlsx"))
# Show images in grid with validation set
# graphs.grid_show()
# Plot TrainLoss, ValidationLoss
fig, ax = training_curve(
loss_iter, perc_iter, mse_iter, psnr_iter, ssim_iter, iters, lr_iter,
epoch, len(train_loader), fig, ax
)
plt.tight_layout()
plt.savefig(os.path.join(opt.detail, name, "loss.png"))
return loss_iter, perc_iter, mse_iter, psnr_iter, ssim_iter, lr_iter, iters, fig, ax
def training_curve(train_loss, perc_iter, val_loss, psnr, ssim, x, lr, epoch, iters_per_epoch,
fig: matplotlib.figure.Figure, axis: matplotlib.axes.Axes, linewidth=0.25):
"""
Plot out learning rate, training loss, validation loss and PSNR.
Parameters
----------
train_loss, perc_iter, val_loss, psnr, ssim, lr, x: 1D-array like
(...)
iters_per_epoch : int
To show the iterations in the epoch
fig, axis : matplotlib.figure.Figure, matplotlib.axes.Axes
Matplotlib plotting object.
linewidth : float
Default linewidth
Return
------
fig, axis : matplotlib.figure.Figure, matplotlib.axes.Axes
The training curve
"""
# Linear scale of loss curve
ax = axis[0]
ax.clear()
line1, = ax.plot(x, val_loss, label="Validation Loss", color='red', linewidth=linewidth)
line2, = ax.plot(x, train_loss, label="Train Loss", color='blue', linewidth=linewidth)
ax.plot(x, np.repeat(np.amin(val_loss), len(x)), linestyle=':', linewidth=linewidth)
ax.set_xlabel("Epoch(s) / Iteration: {}".format(iters_per_epoch))
ax.set_ylabel("Image Loss")
ax.set_title("Loss")
if not np.isnan(perc_iter).all():
ax = axis[4]
ax.clear()
line4, = ax.plot(x, perc_iter, label="Perceptual Loss", color='green', linewidth=linewidth)
ax.set_ylabel("Perceptual Loss")
ax.legend(handles=(line1, line2, line4, )) if not np.isnan(perc_iter).all() else ax.legend(handles=(line1, line2, ))
# Log scale of loss curve
ax = axis[1]
ax.clear()
line1, = ax.plot(x, val_loss, label="Validation Loss", color='red', linewidth=linewidth)
line2, = ax.plot(x, train_loss, label="Train Loss", color='blue', linewidth=linewidth)
ax.plot(x, np.repeat(np.amin(val_loss), len(x)), linestyle=':', linewidth=linewidth)
ax.set_xlabel("Epoch(s) / Iteration: {}".format(iters_per_epoch))
ax.set_yscale('log')
ax.set_title("Loss(Log scale)")
if not np.isnan(perc_iter).all():
ax = axis[5]
ax.clear()
line4, = ax.plot(x, perc_iter, label="Perceptual Loss", color='green', linewidth=linewidth)
ax.set_ylabel("Perceptual Loss")
ax.legend(handles=(line1, line2, line4, )) if not np.isnan(perc_iter).all() else ax.legend(handles=(line1, line2, ))
# Linear scale of PSNR, SSIM
ax = axis[2]
ax.clear()
line1, = ax.plot(x, psnr, label="PSNR", color='blue', linewidth=linewidth)
ax.plot(x, np.repeat(np.amax(psnr), len(x)), linestyle=':', linewidth=linewidth)
ax.set_xlabel("Epochs(s) / Iteration: {}".format(iters_per_epoch))
ax.set_ylabel("Average PSNR")
ax.set_title("Validation Performance")
ax.legend(handles=(line1, ))
# Learning Rate Curve
ax = axis[3]
ax.clear()
line1, = ax.plot(x, lr, label="Learning Rate", color='cyan', linewidth=linewidth)
ax.set_xlabel("Epochs(s) / Iteration: {}".format(iters_per_epoch))
ax.set_title("Learning Rate")
ax.set_yscale('log')
ax.legend(handles=(line1, ))
return fig, axis
def validate(model: nn.Module, loader: DataLoader, criterion: nn.Module, epoch, iteration, normalize=False):
"""
Validate the model
Parameters
----------
model : nn.Module
The neural networks to train
loader : torch.utils.data.DataLoader
The training data
epoch : int
The training epoch
criterion : nn.Module
Loss function
normalize : bool
If true, normalize the image before and after the NN.
Return
------
mse, psnr : np.float
np.mean(mse) and np.mean(psnr)
"""
psnrs, mses = [], []
model.eval()
with torch.no_grad():
for index, (data, label) in enumerate(loader, 1):
data, label = data.to(device), label.to(device)
output = model(data)
mse = criterion(output, label).item()
mses.append(mse)
if normalize:
data = data * std[:, None, None] + mean[:, None, None]
label = label * std[:, None, None] + mean[:, None, None]
output = output * std[:, None, None] + mean[:, None, None]
mse = criterion(output, label).item()
psnr = 10 * np.log10(1.0 / mse)
mses.append(mse)
psnrs.append(psnr)
print("===> [Epoch {}] [ Vaild ] MSE: {:.6f}, PSNR: {:.4f}".format(epoch, np.mean(mses), np.mean(psnrs)))
return np.mean(mses), np.mean(psnrs)
if __name__ == "__main__":
# Clean up OS screen
os.system('clear')
# Cmd Parser
parser = cmdparser.parser
opt = parser.parse_args()
# Check arguments
if opt.cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
if opt.resume and opt.pretrained:
raise ValueError("opt.resume and opt.pretrain should not be True in the same time.")
if opt.resume and (not os.path.isfile(opt.resume)):
raise ValueError("{} doesn't not exists".format(opt.resume))
if opt.pretrained and (not os.path.isfile(opt.pretrained)):
raise ValueError("{} doesn't not exists".format(opt.pretrained))
# Check training dataset directory
for path in opt.train:
if not os.path.exists(path):
raise ValueError("{} doesn't exist".format(path))
# Check validation dataset directory
for path in opt.val:
if not os.path.exists(path):
raise ValueError("{} doesn't exist".format(path))
# Make checkpoint storage directory
name = "{}_{}".format(opt.tag, date.today().strftime("%Y%m%d"))
os.makedirs(os.path.join(opt.checkpoints, name), exist_ok=True)
# Copy the code of model to logging file
if os.path.exists(os.path.join(opt.detail, name, 'model')):
shutil.rmtree(os.path.join(opt.detail, name, 'model'))
if os.path.exists(os.path.join(opt.checkpoints, name, 'model')):
shutil.rmtree(os.path.join(opt.checkpoints, name, 'model'))
shutil.copytree('./model', os.path.join(opt.detail, name, 'model'))
shutil.copytree('./model', os.path.join(opt.checkpoints, name, 'model'))
shutil.copyfile(__file__, os.path.join(opt.detail, name, os.path.basename(__file__)))
# Show Detail
print('==========> Training setting')
utils.details(opt, os.path.join(opt.detail, name, 'args.txt'))
# Execute main process
main(opt)
| 33.648205
| 140
| 0.572073
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10,195
| 0.310757
|
02fe1589d692043102c05d5d014222183830f3c7
| 45,373
|
py
|
Python
|
clients/python/core_pb2.py
|
cloudwheels/grpc-test-gateway
|
5fe6564804cc1dfd2761138977d9282519b8ffc6
|
[
"MIT"
] | 3
|
2020-05-01T15:27:18.000Z
|
2020-05-28T15:11:34.000Z
|
clients/python/core_pb2.py
|
cloudwheels/grpc-test-gateway
|
5fe6564804cc1dfd2761138977d9282519b8ffc6
|
[
"MIT"
] | null | null | null |
clients/python/core_pb2.py
|
cloudwheels/grpc-test-gateway
|
5fe6564804cc1dfd2761138977d9282519b8ffc6
|
[
"MIT"
] | 3
|
2020-09-15T17:24:52.000Z
|
2021-07-07T10:01:25.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: core.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='core.proto',
package='org.dash.platform.dapi.v0',
syntax='proto3',
serialized_pb=_b('\n\ncore.proto\x12\x19org.dash.platform.dapi.v0\"\x12\n\x10GetStatusRequest\"\xe5\x01\n\x11GetStatusResponse\x12\x14\n\x0c\x63ore_version\x18\x01 \x01(\r\x12\x18\n\x10protocol_version\x18\x02 \x01(\r\x12\x0e\n\x06\x62locks\x18\x03 \x01(\r\x12\x13\n\x0btime_offset\x18\x04 \x01(\r\x12\x13\n\x0b\x63onnections\x18\x05 \x01(\r\x12\r\n\x05proxy\x18\x06 \x01(\t\x12\x12\n\ndifficulty\x18\x07 \x01(\x01\x12\x0f\n\x07testnet\x18\x08 \x01(\x08\x12\x11\n\trelay_fee\x18\t \x01(\x01\x12\x0e\n\x06\x65rrors\x18\n \x01(\t\x12\x0f\n\x07network\x18\x0b \x01(\t\"<\n\x0fGetBlockRequest\x12\x10\n\x06height\x18\x01 \x01(\rH\x00\x12\x0e\n\x04hash\x18\x02 \x01(\tH\x00\x42\x07\n\x05\x62lock\"!\n\x10GetBlockResponse\x12\r\n\x05\x62lock\x18\x01 \x01(\x0c\"]\n\x16SendTransactionRequest\x12\x13\n\x0btransaction\x18\x01 \x01(\x0c\x12\x17\n\x0f\x61llow_high_fees\x18\x02 \x01(\x08\x12\x15\n\rbypass_limits\x18\x03 \x01(\x08\"1\n\x17SendTransactionResponse\x12\x16\n\x0etransaction_id\x18\x01 \x01(\t\"#\n\x15GetTransactionRequest\x12\n\n\x02id\x18\x01 \x01(\t\"-\n\x16GetTransactionResponse\x12\x13\n\x0btransaction\x18\x01 \x01(\x0c\"x\n!BlockHeadersWithChainLocksRequest\x12\x19\n\x0f\x66rom_block_hash\x18\x01 \x01(\x0cH\x00\x12\x1b\n\x11\x66rom_block_height\x18\x02 \x01(\rH\x00\x12\r\n\x05\x63ount\x18\x03 \x01(\rB\x0c\n\nfrom_block\"\xd3\x01\n\"BlockHeadersWithChainLocksResponse\x12@\n\rblock_headers\x18\x01 \x01(\x0b\x32\'.org.dash.platform.dapi.v0.BlockHeadersH\x00\x12^\n\x1d\x63hain_lock_signature_messages\x18\x02 \x01(\x0b\x32\x35.org.dash.platform.dapi.v0.ChainLockSignatureMessagesH\x00\x42\x0b\n\tresponses\"\x1f\n\x0c\x42lockHeaders\x12\x0f\n\x07headers\x18\x01 \x03(\x0c\".\n\x1a\x43hainLockSignatureMessages\x12\x10\n\x08messages\x18\x01 \x03(\x0c\"3\n!GetEstimatedTransactionFeeRequest\x12\x0e\n\x06\x62locks\x18\x01 \x01(\r\"1\n\"GetEstimatedTransactionFeeResponse\x12\x0b\n\x03\x66\x65\x65\x18\x01 \x01(\x01\x32\x89\x06\n\x04\x43ore\x12\x66\n\tgetStatus\x12+.org.dash.platform.dapi.v0.GetStatusRequest\x1a,.org.dash.platform.dapi.v0.GetStatusResponse\x12\x63\n\x08getBlock\x12*.org.dash.platform.dapi.v0.GetBlockRequest\x1a+.org.dash.platform.dapi.v0.GetBlockResponse\x12x\n\x0fsendTransaction\x12\x31.org.dash.platform.dapi.v0.SendTransactionRequest\x1a\x32.org.dash.platform.dapi.v0.SendTransactionResponse\x12u\n\x0egetTransaction\x12\x30.org.dash.platform.dapi.v0.GetTransactionRequest\x1a\x31.org.dash.platform.dapi.v0.GetTransactionResponse\x12\x99\x01\n\x1agetEstimatedTransactionFee\x12<.org.dash.platform.dapi.v0.GetEstimatedTransactionFeeRequest\x1a=.org.dash.platform.dapi.v0.GetEstimatedTransactionFeeResponse\x12\xa6\x01\n%subscribeToBlockHeadersWithChainLocks\x12<.org.dash.platform.dapi.v0.BlockHeadersWithChainLocksRequest\x1a=.org.dash.platform.dapi.v0.BlockHeadersWithChainLocksResponse0\x01\x62\x06proto3')
)
_GETSTATUSREQUEST = _descriptor.Descriptor(
name='GetStatusRequest',
full_name='org.dash.platform.dapi.v0.GetStatusRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=41,
serialized_end=59,
)
_GETSTATUSRESPONSE = _descriptor.Descriptor(
name='GetStatusResponse',
full_name='org.dash.platform.dapi.v0.GetStatusResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='core_version', full_name='org.dash.platform.dapi.v0.GetStatusResponse.core_version', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='protocol_version', full_name='org.dash.platform.dapi.v0.GetStatusResponse.protocol_version', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='blocks', full_name='org.dash.platform.dapi.v0.GetStatusResponse.blocks', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='time_offset', full_name='org.dash.platform.dapi.v0.GetStatusResponse.time_offset', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='connections', full_name='org.dash.platform.dapi.v0.GetStatusResponse.connections', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='proxy', full_name='org.dash.platform.dapi.v0.GetStatusResponse.proxy', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='difficulty', full_name='org.dash.platform.dapi.v0.GetStatusResponse.difficulty', index=6,
number=7, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='testnet', full_name='org.dash.platform.dapi.v0.GetStatusResponse.testnet', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='relay_fee', full_name='org.dash.platform.dapi.v0.GetStatusResponse.relay_fee', index=8,
number=9, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='errors', full_name='org.dash.platform.dapi.v0.GetStatusResponse.errors', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='network', full_name='org.dash.platform.dapi.v0.GetStatusResponse.network', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=62,
serialized_end=291,
)
_GETBLOCKREQUEST = _descriptor.Descriptor(
name='GetBlockRequest',
full_name='org.dash.platform.dapi.v0.GetBlockRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='height', full_name='org.dash.platform.dapi.v0.GetBlockRequest.height', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hash', full_name='org.dash.platform.dapi.v0.GetBlockRequest.hash', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='block', full_name='org.dash.platform.dapi.v0.GetBlockRequest.block',
index=0, containing_type=None, fields=[]),
],
serialized_start=293,
serialized_end=353,
)
_GETBLOCKRESPONSE = _descriptor.Descriptor(
name='GetBlockResponse',
full_name='org.dash.platform.dapi.v0.GetBlockResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='block', full_name='org.dash.platform.dapi.v0.GetBlockResponse.block', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=355,
serialized_end=388,
)
_SENDTRANSACTIONREQUEST = _descriptor.Descriptor(
name='SendTransactionRequest',
full_name='org.dash.platform.dapi.v0.SendTransactionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='transaction', full_name='org.dash.platform.dapi.v0.SendTransactionRequest.transaction', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allow_high_fees', full_name='org.dash.platform.dapi.v0.SendTransactionRequest.allow_high_fees', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bypass_limits', full_name='org.dash.platform.dapi.v0.SendTransactionRequest.bypass_limits', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=390,
serialized_end=483,
)
_SENDTRANSACTIONRESPONSE = _descriptor.Descriptor(
name='SendTransactionResponse',
full_name='org.dash.platform.dapi.v0.SendTransactionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='transaction_id', full_name='org.dash.platform.dapi.v0.SendTransactionResponse.transaction_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=485,
serialized_end=534,
)
_GETTRANSACTIONREQUEST = _descriptor.Descriptor(
name='GetTransactionRequest',
full_name='org.dash.platform.dapi.v0.GetTransactionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='org.dash.platform.dapi.v0.GetTransactionRequest.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=536,
serialized_end=571,
)
_GETTRANSACTIONRESPONSE = _descriptor.Descriptor(
name='GetTransactionResponse',
full_name='org.dash.platform.dapi.v0.GetTransactionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='transaction', full_name='org.dash.platform.dapi.v0.GetTransactionResponse.transaction', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=573,
serialized_end=618,
)
_BLOCKHEADERSWITHCHAINLOCKSREQUEST = _descriptor.Descriptor(
name='BlockHeadersWithChainLocksRequest',
full_name='org.dash.platform.dapi.v0.BlockHeadersWithChainLocksRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='from_block_hash', full_name='org.dash.platform.dapi.v0.BlockHeadersWithChainLocksRequest.from_block_hash', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='from_block_height', full_name='org.dash.platform.dapi.v0.BlockHeadersWithChainLocksRequest.from_block_height', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='count', full_name='org.dash.platform.dapi.v0.BlockHeadersWithChainLocksRequest.count', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='from_block', full_name='org.dash.platform.dapi.v0.BlockHeadersWithChainLocksRequest.from_block',
index=0, containing_type=None, fields=[]),
],
serialized_start=620,
serialized_end=740,
)
_BLOCKHEADERSWITHCHAINLOCKSRESPONSE = _descriptor.Descriptor(
name='BlockHeadersWithChainLocksResponse',
full_name='org.dash.platform.dapi.v0.BlockHeadersWithChainLocksResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='block_headers', full_name='org.dash.platform.dapi.v0.BlockHeadersWithChainLocksResponse.block_headers', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='chain_lock_signature_messages', full_name='org.dash.platform.dapi.v0.BlockHeadersWithChainLocksResponse.chain_lock_signature_messages', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='responses', full_name='org.dash.platform.dapi.v0.BlockHeadersWithChainLocksResponse.responses',
index=0, containing_type=None, fields=[]),
],
serialized_start=743,
serialized_end=954,
)
_BLOCKHEADERS = _descriptor.Descriptor(
name='BlockHeaders',
full_name='org.dash.platform.dapi.v0.BlockHeaders',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='headers', full_name='org.dash.platform.dapi.v0.BlockHeaders.headers', index=0,
number=1, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=956,
serialized_end=987,
)
_CHAINLOCKSIGNATUREMESSAGES = _descriptor.Descriptor(
name='ChainLockSignatureMessages',
full_name='org.dash.platform.dapi.v0.ChainLockSignatureMessages',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='messages', full_name='org.dash.platform.dapi.v0.ChainLockSignatureMessages.messages', index=0,
number=1, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=989,
serialized_end=1035,
)
_GETESTIMATEDTRANSACTIONFEEREQUEST = _descriptor.Descriptor(
name='GetEstimatedTransactionFeeRequest',
full_name='org.dash.platform.dapi.v0.GetEstimatedTransactionFeeRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='blocks', full_name='org.dash.platform.dapi.v0.GetEstimatedTransactionFeeRequest.blocks', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1037,
serialized_end=1088,
)
_GETESTIMATEDTRANSACTIONFEERESPONSE = _descriptor.Descriptor(
name='GetEstimatedTransactionFeeResponse',
full_name='org.dash.platform.dapi.v0.GetEstimatedTransactionFeeResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='fee', full_name='org.dash.platform.dapi.v0.GetEstimatedTransactionFeeResponse.fee', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1090,
serialized_end=1139,
)
_GETBLOCKREQUEST.oneofs_by_name['block'].fields.append(
_GETBLOCKREQUEST.fields_by_name['height'])
_GETBLOCKREQUEST.fields_by_name['height'].containing_oneof = _GETBLOCKREQUEST.oneofs_by_name['block']
_GETBLOCKREQUEST.oneofs_by_name['block'].fields.append(
_GETBLOCKREQUEST.fields_by_name['hash'])
_GETBLOCKREQUEST.fields_by_name['hash'].containing_oneof = _GETBLOCKREQUEST.oneofs_by_name['block']
_BLOCKHEADERSWITHCHAINLOCKSREQUEST.oneofs_by_name['from_block'].fields.append(
_BLOCKHEADERSWITHCHAINLOCKSREQUEST.fields_by_name['from_block_hash'])
_BLOCKHEADERSWITHCHAINLOCKSREQUEST.fields_by_name['from_block_hash'].containing_oneof = _BLOCKHEADERSWITHCHAINLOCKSREQUEST.oneofs_by_name['from_block']
_BLOCKHEADERSWITHCHAINLOCKSREQUEST.oneofs_by_name['from_block'].fields.append(
_BLOCKHEADERSWITHCHAINLOCKSREQUEST.fields_by_name['from_block_height'])
_BLOCKHEADERSWITHCHAINLOCKSREQUEST.fields_by_name['from_block_height'].containing_oneof = _BLOCKHEADERSWITHCHAINLOCKSREQUEST.oneofs_by_name['from_block']
_BLOCKHEADERSWITHCHAINLOCKSRESPONSE.fields_by_name['block_headers'].message_type = _BLOCKHEADERS
_BLOCKHEADERSWITHCHAINLOCKSRESPONSE.fields_by_name['chain_lock_signature_messages'].message_type = _CHAINLOCKSIGNATUREMESSAGES
_BLOCKHEADERSWITHCHAINLOCKSRESPONSE.oneofs_by_name['responses'].fields.append(
_BLOCKHEADERSWITHCHAINLOCKSRESPONSE.fields_by_name['block_headers'])
_BLOCKHEADERSWITHCHAINLOCKSRESPONSE.fields_by_name['block_headers'].containing_oneof = _BLOCKHEADERSWITHCHAINLOCKSRESPONSE.oneofs_by_name['responses']
_BLOCKHEADERSWITHCHAINLOCKSRESPONSE.oneofs_by_name['responses'].fields.append(
_BLOCKHEADERSWITHCHAINLOCKSRESPONSE.fields_by_name['chain_lock_signature_messages'])
_BLOCKHEADERSWITHCHAINLOCKSRESPONSE.fields_by_name['chain_lock_signature_messages'].containing_oneof = _BLOCKHEADERSWITHCHAINLOCKSRESPONSE.oneofs_by_name['responses']
DESCRIPTOR.message_types_by_name['GetStatusRequest'] = _GETSTATUSREQUEST
DESCRIPTOR.message_types_by_name['GetStatusResponse'] = _GETSTATUSRESPONSE
DESCRIPTOR.message_types_by_name['GetBlockRequest'] = _GETBLOCKREQUEST
DESCRIPTOR.message_types_by_name['GetBlockResponse'] = _GETBLOCKRESPONSE
DESCRIPTOR.message_types_by_name['SendTransactionRequest'] = _SENDTRANSACTIONREQUEST
DESCRIPTOR.message_types_by_name['SendTransactionResponse'] = _SENDTRANSACTIONRESPONSE
DESCRIPTOR.message_types_by_name['GetTransactionRequest'] = _GETTRANSACTIONREQUEST
DESCRIPTOR.message_types_by_name['GetTransactionResponse'] = _GETTRANSACTIONRESPONSE
DESCRIPTOR.message_types_by_name['BlockHeadersWithChainLocksRequest'] = _BLOCKHEADERSWITHCHAINLOCKSREQUEST
DESCRIPTOR.message_types_by_name['BlockHeadersWithChainLocksResponse'] = _BLOCKHEADERSWITHCHAINLOCKSRESPONSE
DESCRIPTOR.message_types_by_name['BlockHeaders'] = _BLOCKHEADERS
DESCRIPTOR.message_types_by_name['ChainLockSignatureMessages'] = _CHAINLOCKSIGNATUREMESSAGES
DESCRIPTOR.message_types_by_name['GetEstimatedTransactionFeeRequest'] = _GETESTIMATEDTRANSACTIONFEEREQUEST
DESCRIPTOR.message_types_by_name['GetEstimatedTransactionFeeResponse'] = _GETESTIMATEDTRANSACTIONFEERESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetStatusRequest = _reflection.GeneratedProtocolMessageType('GetStatusRequest', (_message.Message,), dict(
DESCRIPTOR = _GETSTATUSREQUEST,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetStatusRequest)
))
_sym_db.RegisterMessage(GetStatusRequest)
GetStatusResponse = _reflection.GeneratedProtocolMessageType('GetStatusResponse', (_message.Message,), dict(
DESCRIPTOR = _GETSTATUSRESPONSE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetStatusResponse)
))
_sym_db.RegisterMessage(GetStatusResponse)
GetBlockRequest = _reflection.GeneratedProtocolMessageType('GetBlockRequest', (_message.Message,), dict(
DESCRIPTOR = _GETBLOCKREQUEST,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetBlockRequest)
))
_sym_db.RegisterMessage(GetBlockRequest)
GetBlockResponse = _reflection.GeneratedProtocolMessageType('GetBlockResponse', (_message.Message,), dict(
DESCRIPTOR = _GETBLOCKRESPONSE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetBlockResponse)
))
_sym_db.RegisterMessage(GetBlockResponse)
SendTransactionRequest = _reflection.GeneratedProtocolMessageType('SendTransactionRequest', (_message.Message,), dict(
DESCRIPTOR = _SENDTRANSACTIONREQUEST,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.SendTransactionRequest)
))
_sym_db.RegisterMessage(SendTransactionRequest)
SendTransactionResponse = _reflection.GeneratedProtocolMessageType('SendTransactionResponse', (_message.Message,), dict(
DESCRIPTOR = _SENDTRANSACTIONRESPONSE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.SendTransactionResponse)
))
_sym_db.RegisterMessage(SendTransactionResponse)
GetTransactionRequest = _reflection.GeneratedProtocolMessageType('GetTransactionRequest', (_message.Message,), dict(
DESCRIPTOR = _GETTRANSACTIONREQUEST,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetTransactionRequest)
))
_sym_db.RegisterMessage(GetTransactionRequest)
GetTransactionResponse = _reflection.GeneratedProtocolMessageType('GetTransactionResponse', (_message.Message,), dict(
DESCRIPTOR = _GETTRANSACTIONRESPONSE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetTransactionResponse)
))
_sym_db.RegisterMessage(GetTransactionResponse)
BlockHeadersWithChainLocksRequest = _reflection.GeneratedProtocolMessageType('BlockHeadersWithChainLocksRequest', (_message.Message,), dict(
DESCRIPTOR = _BLOCKHEADERSWITHCHAINLOCKSREQUEST,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.BlockHeadersWithChainLocksRequest)
))
_sym_db.RegisterMessage(BlockHeadersWithChainLocksRequest)
BlockHeadersWithChainLocksResponse = _reflection.GeneratedProtocolMessageType('BlockHeadersWithChainLocksResponse', (_message.Message,), dict(
DESCRIPTOR = _BLOCKHEADERSWITHCHAINLOCKSRESPONSE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.BlockHeadersWithChainLocksResponse)
))
_sym_db.RegisterMessage(BlockHeadersWithChainLocksResponse)
BlockHeaders = _reflection.GeneratedProtocolMessageType('BlockHeaders', (_message.Message,), dict(
DESCRIPTOR = _BLOCKHEADERS,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.BlockHeaders)
))
_sym_db.RegisterMessage(BlockHeaders)
ChainLockSignatureMessages = _reflection.GeneratedProtocolMessageType('ChainLockSignatureMessages', (_message.Message,), dict(
DESCRIPTOR = _CHAINLOCKSIGNATUREMESSAGES,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.ChainLockSignatureMessages)
))
_sym_db.RegisterMessage(ChainLockSignatureMessages)
GetEstimatedTransactionFeeRequest = _reflection.GeneratedProtocolMessageType('GetEstimatedTransactionFeeRequest', (_message.Message,), dict(
DESCRIPTOR = _GETESTIMATEDTRANSACTIONFEEREQUEST,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetEstimatedTransactionFeeRequest)
))
_sym_db.RegisterMessage(GetEstimatedTransactionFeeRequest)
GetEstimatedTransactionFeeResponse = _reflection.GeneratedProtocolMessageType('GetEstimatedTransactionFeeResponse', (_message.Message,), dict(
DESCRIPTOR = _GETESTIMATEDTRANSACTIONFEERESPONSE,
__module__ = 'core_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetEstimatedTransactionFeeResponse)
))
_sym_db.RegisterMessage(GetEstimatedTransactionFeeResponse)
_CORE = _descriptor.ServiceDescriptor(
name='Core',
full_name='org.dash.platform.dapi.v0.Core',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=1142,
serialized_end=1919,
methods=[
_descriptor.MethodDescriptor(
name='getStatus',
full_name='org.dash.platform.dapi.v0.Core.getStatus',
index=0,
containing_service=None,
input_type=_GETSTATUSREQUEST,
output_type=_GETSTATUSRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='getBlock',
full_name='org.dash.platform.dapi.v0.Core.getBlock',
index=1,
containing_service=None,
input_type=_GETBLOCKREQUEST,
output_type=_GETBLOCKRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='sendTransaction',
full_name='org.dash.platform.dapi.v0.Core.sendTransaction',
index=2,
containing_service=None,
input_type=_SENDTRANSACTIONREQUEST,
output_type=_SENDTRANSACTIONRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='getTransaction',
full_name='org.dash.platform.dapi.v0.Core.getTransaction',
index=3,
containing_service=None,
input_type=_GETTRANSACTIONREQUEST,
output_type=_GETTRANSACTIONRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='getEstimatedTransactionFee',
full_name='org.dash.platform.dapi.v0.Core.getEstimatedTransactionFee',
index=4,
containing_service=None,
input_type=_GETESTIMATEDTRANSACTIONFEEREQUEST,
output_type=_GETESTIMATEDTRANSACTIONFEERESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='subscribeToBlockHeadersWithChainLocks',
full_name='org.dash.platform.dapi.v0.Core.subscribeToBlockHeadersWithChainLocks',
index=5,
containing_service=None,
input_type=_BLOCKHEADERSWITHCHAINLOCKSREQUEST,
output_type=_BLOCKHEADERSWITHCHAINLOCKSRESPONSE,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_CORE)
DESCRIPTOR.services_by_name['Core'] = _CORE
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class CoreStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.getStatus = channel.unary_unary(
'/org.dash.platform.dapi.v0.Core/getStatus',
request_serializer=GetStatusRequest.SerializeToString,
response_deserializer=GetStatusResponse.FromString,
)
self.getBlock = channel.unary_unary(
'/org.dash.platform.dapi.v0.Core/getBlock',
request_serializer=GetBlockRequest.SerializeToString,
response_deserializer=GetBlockResponse.FromString,
)
self.sendTransaction = channel.unary_unary(
'/org.dash.platform.dapi.v0.Core/sendTransaction',
request_serializer=SendTransactionRequest.SerializeToString,
response_deserializer=SendTransactionResponse.FromString,
)
self.getTransaction = channel.unary_unary(
'/org.dash.platform.dapi.v0.Core/getTransaction',
request_serializer=GetTransactionRequest.SerializeToString,
response_deserializer=GetTransactionResponse.FromString,
)
self.getEstimatedTransactionFee = channel.unary_unary(
'/org.dash.platform.dapi.v0.Core/getEstimatedTransactionFee',
request_serializer=GetEstimatedTransactionFeeRequest.SerializeToString,
response_deserializer=GetEstimatedTransactionFeeResponse.FromString,
)
self.subscribeToBlockHeadersWithChainLocks = channel.unary_stream(
'/org.dash.platform.dapi.v0.Core/subscribeToBlockHeadersWithChainLocks',
request_serializer=BlockHeadersWithChainLocksRequest.SerializeToString,
response_deserializer=BlockHeadersWithChainLocksResponse.FromString,
)
class CoreServicer(object):
# missing associated documentation comment in .proto file
pass
def getStatus(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getBlock(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def sendTransaction(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getTransaction(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getEstimatedTransactionFee(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def subscribeToBlockHeadersWithChainLocks(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_CoreServicer_to_server(servicer, server):
rpc_method_handlers = {
'getStatus': grpc.unary_unary_rpc_method_handler(
servicer.getStatus,
request_deserializer=GetStatusRequest.FromString,
response_serializer=GetStatusResponse.SerializeToString,
),
'getBlock': grpc.unary_unary_rpc_method_handler(
servicer.getBlock,
request_deserializer=GetBlockRequest.FromString,
response_serializer=GetBlockResponse.SerializeToString,
),
'sendTransaction': grpc.unary_unary_rpc_method_handler(
servicer.sendTransaction,
request_deserializer=SendTransactionRequest.FromString,
response_serializer=SendTransactionResponse.SerializeToString,
),
'getTransaction': grpc.unary_unary_rpc_method_handler(
servicer.getTransaction,
request_deserializer=GetTransactionRequest.FromString,
response_serializer=GetTransactionResponse.SerializeToString,
),
'getEstimatedTransactionFee': grpc.unary_unary_rpc_method_handler(
servicer.getEstimatedTransactionFee,
request_deserializer=GetEstimatedTransactionFeeRequest.FromString,
response_serializer=GetEstimatedTransactionFeeResponse.SerializeToString,
),
'subscribeToBlockHeadersWithChainLocks': grpc.unary_stream_rpc_method_handler(
servicer.subscribeToBlockHeadersWithChainLocks,
request_deserializer=BlockHeadersWithChainLocksRequest.FromString,
response_serializer=BlockHeadersWithChainLocksResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'org.dash.platform.dapi.v0.Core', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaCoreServicer(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
# missing associated documentation comment in .proto file
pass
def getStatus(self, request, context):
# missing associated documentation comment in .proto file
pass
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def getBlock(self, request, context):
# missing associated documentation comment in .proto file
pass
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def sendTransaction(self, request, context):
# missing associated documentation comment in .proto file
pass
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def getTransaction(self, request, context):
# missing associated documentation comment in .proto file
pass
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def getEstimatedTransactionFee(self, request, context):
# missing associated documentation comment in .proto file
pass
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def subscribeToBlockHeadersWithChainLocks(self, request, context):
# missing associated documentation comment in .proto file
pass
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaCoreStub(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
# missing associated documentation comment in .proto file
pass
def getStatus(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
# missing associated documentation comment in .proto file
pass
raise NotImplementedError()
getStatus.future = None
def getBlock(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
# missing associated documentation comment in .proto file
pass
raise NotImplementedError()
getBlock.future = None
def sendTransaction(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
# missing associated documentation comment in .proto file
pass
raise NotImplementedError()
sendTransaction.future = None
def getTransaction(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
# missing associated documentation comment in .proto file
pass
raise NotImplementedError()
getTransaction.future = None
def getEstimatedTransactionFee(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
# missing associated documentation comment in .proto file
pass
raise NotImplementedError()
getEstimatedTransactionFee.future = None
def subscribeToBlockHeadersWithChainLocks(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
# missing associated documentation comment in .proto file
pass
raise NotImplementedError()
def beta_create_Core_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('org.dash.platform.dapi.v0.Core', 'getBlock'): GetBlockRequest.FromString,
('org.dash.platform.dapi.v0.Core', 'getEstimatedTransactionFee'): GetEstimatedTransactionFeeRequest.FromString,
('org.dash.platform.dapi.v0.Core', 'getStatus'): GetStatusRequest.FromString,
('org.dash.platform.dapi.v0.Core', 'getTransaction'): GetTransactionRequest.FromString,
('org.dash.platform.dapi.v0.Core', 'sendTransaction'): SendTransactionRequest.FromString,
('org.dash.platform.dapi.v0.Core', 'subscribeToBlockHeadersWithChainLocks'): BlockHeadersWithChainLocksRequest.FromString,
}
response_serializers = {
('org.dash.platform.dapi.v0.Core', 'getBlock'): GetBlockResponse.SerializeToString,
('org.dash.platform.dapi.v0.Core', 'getEstimatedTransactionFee'): GetEstimatedTransactionFeeResponse.SerializeToString,
('org.dash.platform.dapi.v0.Core', 'getStatus'): GetStatusResponse.SerializeToString,
('org.dash.platform.dapi.v0.Core', 'getTransaction'): GetTransactionResponse.SerializeToString,
('org.dash.platform.dapi.v0.Core', 'sendTransaction'): SendTransactionResponse.SerializeToString,
('org.dash.platform.dapi.v0.Core', 'subscribeToBlockHeadersWithChainLocks'): BlockHeadersWithChainLocksResponse.SerializeToString,
}
method_implementations = {
('org.dash.platform.dapi.v0.Core', 'getBlock'): face_utilities.unary_unary_inline(servicer.getBlock),
('org.dash.platform.dapi.v0.Core', 'getEstimatedTransactionFee'): face_utilities.unary_unary_inline(servicer.getEstimatedTransactionFee),
('org.dash.platform.dapi.v0.Core', 'getStatus'): face_utilities.unary_unary_inline(servicer.getStatus),
('org.dash.platform.dapi.v0.Core', 'getTransaction'): face_utilities.unary_unary_inline(servicer.getTransaction),
('org.dash.platform.dapi.v0.Core', 'sendTransaction'): face_utilities.unary_unary_inline(servicer.sendTransaction),
('org.dash.platform.dapi.v0.Core', 'subscribeToBlockHeadersWithChainLocks'): face_utilities.unary_stream_inline(servicer.subscribeToBlockHeadersWithChainLocks),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_Core_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
('org.dash.platform.dapi.v0.Core', 'getBlock'): GetBlockRequest.SerializeToString,
('org.dash.platform.dapi.v0.Core', 'getEstimatedTransactionFee'): GetEstimatedTransactionFeeRequest.SerializeToString,
('org.dash.platform.dapi.v0.Core', 'getStatus'): GetStatusRequest.SerializeToString,
('org.dash.platform.dapi.v0.Core', 'getTransaction'): GetTransactionRequest.SerializeToString,
('org.dash.platform.dapi.v0.Core', 'sendTransaction'): SendTransactionRequest.SerializeToString,
('org.dash.platform.dapi.v0.Core', 'subscribeToBlockHeadersWithChainLocks'): BlockHeadersWithChainLocksRequest.SerializeToString,
}
response_deserializers = {
('org.dash.platform.dapi.v0.Core', 'getBlock'): GetBlockResponse.FromString,
('org.dash.platform.dapi.v0.Core', 'getEstimatedTransactionFee'): GetEstimatedTransactionFeeResponse.FromString,
('org.dash.platform.dapi.v0.Core', 'getStatus'): GetStatusResponse.FromString,
('org.dash.platform.dapi.v0.Core', 'getTransaction'): GetTransactionResponse.FromString,
('org.dash.platform.dapi.v0.Core', 'sendTransaction'): SendTransactionResponse.FromString,
('org.dash.platform.dapi.v0.Core', 'subscribeToBlockHeadersWithChainLocks'): BlockHeadersWithChainLocksResponse.FromString,
}
cardinalities = {
'getBlock': cardinality.Cardinality.UNARY_UNARY,
'getEstimatedTransactionFee': cardinality.Cardinality.UNARY_UNARY,
'getStatus': cardinality.Cardinality.UNARY_UNARY,
'getTransaction': cardinality.Cardinality.UNARY_UNARY,
'sendTransaction': cardinality.Cardinality.UNARY_UNARY,
'subscribeToBlockHeadersWithChainLocks': cardinality.Cardinality.UNARY_STREAM,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'org.dash.platform.dapi.v0.Core', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| 42.885633
| 2,847
| 0.760518
| 7,037
| 0.155092
| 0
| 0
| 0
| 0
| 0
| 0
| 14,506
| 0.319706
|
02fe97635bdf12eb93fa73109a7854ea036f69bf
| 546
|
py
|
Python
|
python_high/chapter_3/3.1.py
|
Rolling-meatballs/deepshare
|
47c1e599c915ccd0a123fa9ab26e1f20738252ef
|
[
"MIT"
] | null | null | null |
python_high/chapter_3/3.1.py
|
Rolling-meatballs/deepshare
|
47c1e599c915ccd0a123fa9ab26e1f20738252ef
|
[
"MIT"
] | null | null | null |
python_high/chapter_3/3.1.py
|
Rolling-meatballs/deepshare
|
47c1e599c915ccd0a123fa9ab26e1f20738252ef
|
[
"MIT"
] | null | null | null |
name = " alberT"
one = name.rsplit()
print("one:", one)
two = name.index('al', 0)
print("two:", two)
three = name.index('T', -1)
print("three:", three)
four = name.replace('l', 'p')
print("four:", four)
five = name.split('l')
print("five:", five)
six = name.upper()
print("six:", six)
seven = name.lower()
print("seven:", seven)
eight = name[1]
print("eight:", eight )
nine = name[:3]
print("nine:", nine)
ten = name[-2:]
print("ten:", ten)
eleven = name.index("e")
print("eleven:", eleven)
twelve = name[:-1]
print("twelve:", twelve)
| 14.756757
| 29
| 0.598901
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 115
| 0.210623
|
02feb42fde4ca975bc72c9c78d9e0931c5f1d4a2
| 384
|
py
|
Python
|
src/views/simplepage/models.py
|
svenvandescheur/svenv.nl-new
|
c448714853d96ad31d26c825d8b35c4890be40a1
|
[
"MIT"
] | null | null | null |
src/views/simplepage/models.py
|
svenvandescheur/svenv.nl-new
|
c448714853d96ad31d26c825d8b35c4890be40a1
|
[
"MIT"
] | null | null | null |
src/views/simplepage/models.py
|
svenvandescheur/svenv.nl-new
|
c448714853d96ad31d26c825d8b35c4890be40a1
|
[
"MIT"
] | null | null | null |
from cms.extensions import PageExtension
from cms.extensions.extension_pool import extension_pool
from django.utils.translation import ugettext as _
from filer.fields.image import FilerImageField
class SimplePageExtension(PageExtension):
"""
A generic website page.
"""
image = FilerImageField(verbose_name=_("image"))
extension_pool.register(SimplePageExtension)
| 25.6
| 56
| 0.796875
| 138
| 0.359375
| 0
| 0
| 0
| 0
| 0
| 0
| 46
| 0.119792
|
f301917c422d9318495feced737c153caa8bd9a9
| 290
|
py
|
Python
|
baekjoon/not-classified/10844/10844.py
|
honux77/algorithm
|
2ed8cef1fbee7ad96d8f2ae583666d52bd8892ee
|
[
"MIT"
] | 2
|
2019-02-08T01:23:07.000Z
|
2020-11-19T12:23:52.000Z
|
baekjoon/not-classified/10844/10844.py
|
honux77/algorithm
|
2ed8cef1fbee7ad96d8f2ae583666d52bd8892ee
|
[
"MIT"
] | null | null | null |
baekjoon/not-classified/10844/10844.py
|
honux77/algorithm
|
2ed8cef1fbee7ad96d8f2ae583666d52bd8892ee
|
[
"MIT"
] | null | null | null |
n = int(input())
s = [[0] * 10 for _ in range(n + 1)]
s[1] = [0] + [1] * 9
mod = 1000 ** 3
for i in range(2, n + 1):
for j in range(0, 9 + 1):
if j >= 1:
s[i][j] += s[i - 1][j - 1]
if j <= 8:
s[i][j] += s[i - 1][j + 1]
print(sum(s[n]) % mod)
| 19.333333
| 38
| 0.358621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
f302cba30df57e2c4fa0a9201628774e666043a8
| 3,021
|
py
|
Python
|
Ideas/cricket-umpire-assistance-master/visualization/test2.py
|
hsspratt/Nott-Hawkeye1
|
178f4f0fef62e8699f6057d9d50adfd61a851047
|
[
"MIT"
] | null | null | null |
Ideas/cricket-umpire-assistance-master/visualization/test2.py
|
hsspratt/Nott-Hawkeye1
|
178f4f0fef62e8699f6057d9d50adfd61a851047
|
[
"MIT"
] | 1
|
2021-11-11T22:15:36.000Z
|
2021-11-11T22:15:36.000Z
|
Ideas/cricket-umpire-assistance-master/visualization/test2.py
|
hsspratt/Nott-Hawkeye1
|
178f4f0fef62e8699f6057d9d50adfd61a851047
|
[
"MIT"
] | null | null | null |
### INITIALIZE VPYTHON
# -----------------------------------------------------------------------
from __future__ import division
from visual import *
from physutil import *
from visual.graph import *
### SETUP ELEMENTS FOR GRAPHING, SIMULATION, VISUALIZATION, TIMING
# ------------------------------------------------------------------------
# Set window title
scene.title = "Projectile Motion Particle Model"
# Make scene background black
scene.background = color.black
# Define scene objects (units are in meters)
field = box(pos = vector(0, 0, 0), size = (300, 10, 100), color = color.green, opacity = 0.3)
ball = sphere(radius = 5, color = color.blue)
# Define axis marks the field with a specified number of tick marks
xaxis = PhysAxis(field, 10) # 10 tick marks
yaxis = PhysAxis(field, 5, # 5 tick marks
axisType = "y",
labelOrientation = "left",
startPos = vector(-150, 0, 0), # start the y axis at the left edge of the scene
length = 100) # units are in meters
# Set up graph with two plots
posgraph = PhysGraph(2)
# Set up trail to mark the ball's trajectory
trail = curve(color = color.yellow, radius = 1) # units are in meters
# Set up motion map for ball
motionMap = MotionMap(ball, 8.163, # expected end time in seconds
10, # number of markers to draw
labelMarkerOffset = vector(0, -20, 0),
dropTime = False)
# Set timer in top right of screen
timerDisplay = PhysTimer(140, 150) # timer position (units are in meters)
### SETUP PARAMETERS AND INITIAL CONDITIONS
# ----------------------------------------------------------------------------------------
# Define parameters
ball.m = 0.6 # mass of ball in kg
ball.pos = vector(-150, 0, 0) # initial position of the ball in(x, y, z) form, units are in meters
ball.v = vector(30, 40, 0) # initial velocity of car in (vx, vy, vz) form, units are m/s
g = vector(0, -9.8, 0) # acceleration due to gravity; units are m/s/s
# Define time parameters
t = 0 # starting time
deltat = 0.001 # time step units are s
### CALCULATION LOOP; perform physics updates and drawing
# ------------------------------------------------------------------------------------
while ball.pos.y >= 0 : #while the ball's y-position is greater than 0 (above the ground)
# Required to make animation visible / refresh smoothly (keeps program from running faster
# than 1000 frames/s)
rate(1000)
# Compute Net Force
Fnet = ball.m * g
# Newton's 2nd Law
ball.v = ball.v + (Fnet/ball.m * deltat)
# Position update
ball.pos = ball.pos + ball.v * deltat
# Update motion map, graph, timer, and trail
motionMap.update(t, ball.v)
posgraph.plot(t, ball.pos.x, ball.pos.y) # plot x and y position vs. time
trail.append(pos = ball.pos)
timerDisplay.update(t)
# Time update
t = t + deltat
### OUTPUT
# --------------------------------------------------------------------------------------
# Print the final time and the ball's final position
print t
print ball.pos
| 32.138298
| 98
| 0.589209
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,813
| 0.600132
|
f3041c623ca233066149adf01d25baef21dbb909
| 727
|
py
|
Python
|
parking_systems/models.py
|
InaraShalfei/parking_system
|
f1b326f12037808ab80e3b1d6b305235ba59a0db
|
[
"MIT"
] | null | null | null |
parking_systems/models.py
|
InaraShalfei/parking_system
|
f1b326f12037808ab80e3b1d6b305235ba59a0db
|
[
"MIT"
] | null | null | null |
parking_systems/models.py
|
InaraShalfei/parking_system
|
f1b326f12037808ab80e3b1d6b305235ba59a0db
|
[
"MIT"
] | null | null | null |
from django.db import models
class Parking(models.Model):
def __str__(self):
return f'Парковочное место №{self.id}'
class Reservation(models.Model):
parking_space = models.ForeignKey(Parking, on_delete=models.CASCADE, related_name='reservations',
verbose_name='Номер парковочного места')
start_time = models.DateTimeField(verbose_name='Время начала брони')
finish_time = models.DateTimeField(verbose_name='Время окончания брони')
class Meta:
ordering = ['-start_time']
def __str__(self):
format = "%d.%m.%y %H:%M"
return f'Бронирование №{self.id} (c {self.start_time.strftime(format)} по {self.finish_time.strftime(format)})'
| 33.045455
| 119
| 0.671252
| 783
| 0.957213
| 0
| 0
| 0
| 0
| 0
| 0
| 338
| 0.413203
|
f30518d94f19b9e7816aaf41734cf24e7b19c736
| 4,875
|
py
|
Python
|
sktime/classification/kernel_based/_rocket_classifier.py
|
ltoniazzi/sktime
|
0ea07803115c1ec7463dde99f049b131d639f4a7
|
[
"BSD-3-Clause"
] | 1
|
2021-11-02T18:56:12.000Z
|
2021-11-02T18:56:12.000Z
|
sktime/classification/kernel_based/_rocket_classifier.py
|
ltoniazzi/sktime
|
0ea07803115c1ec7463dde99f049b131d639f4a7
|
[
"BSD-3-Clause"
] | null | null | null |
sktime/classification/kernel_based/_rocket_classifier.py
|
ltoniazzi/sktime
|
0ea07803115c1ec7463dde99f049b131d639f4a7
|
[
"BSD-3-Clause"
] | 1
|
2021-04-30T08:12:18.000Z
|
2021-04-30T08:12:18.000Z
|
# -*- coding: utf-8 -*-
"""RandOm Convolutional KErnel Transform (ROCKET)."""
__author__ = "Matthew Middlehurst"
__all__ = ["ROCKETClassifier"]
import numpy as np
from sklearn.linear_model import RidgeClassifierCV
from sklearn.pipeline import make_pipeline
from sklearn.utils.multiclass import class_distribution
from sktime.classification.base import BaseClassifier
from sktime.transformations.panel.rocket import Rocket
from sktime.utils.validation.panel import check_X
from sktime.utils.validation.panel import check_X_y
class ROCKETClassifier(BaseClassifier):
"""Classifier wrapped for the ROCKET transformer using RidgeClassifierCV.
Parameters
----------
num_kernels : int, number of kernels for ROCKET transform
(default=10,000)
n_jobs : int, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
``-1`` means using all processors.
random_state : int or None, seed for random, integer,
optional (default to no seed)
Attributes
----------
classifier : ROCKET classifier
n_classes : extracted from the data
Notes
-----
@article{dempster_etal_2019,
author = {Dempster, Angus and Petitjean, Francois and Webb,
Geoffrey I},
title = {ROCKET: Exceptionally fast and accurate time series
classification using random convolutional kernels},
year = {2019},
journal = {arXiv:1910.13051}
}
Java version
https://github.com/uea-machine-learning/tsml/blob/master/src/main/java/
tsml/classifiers/shapelet_based/ROCKETClassifier.java
"""
# Capability tags
capabilities = {
"multivariate": True,
"unequal_length": False,
"missing_values": False,
"train_estimate": False,
"contractable": False,
}
def __init__(
self,
num_kernels=10000,
n_jobs=1,
random_state=None,
):
self.num_kernels = num_kernels
self.n_jobs = n_jobs
self.random_state = random_state
self.classifier = None
self.n_classes = 0
self.classes_ = []
self.class_dictionary = {}
super(ROCKETClassifier, self).__init__()
def fit(self, X, y):
"""Build a pipeline containing the ROCKET transformer and RidgeClassifierCV.
Parameters
----------
X : nested pandas DataFrame of shape [n_instances, 1]
Nested dataframe with univariate time-series in cells.
y : array-like, shape = [n_instances] The class labels.
Returns
-------
self : object
"""
X, y = check_X_y(X, y)
self.n_classes = np.unique(y).shape[0]
self.classes_ = class_distribution(np.asarray(y).reshape(-1, 1))[0][0]
for index, classVal in enumerate(self.classes_):
self.class_dictionary[classVal] = index
self.classifier = rocket_pipeline = make_pipeline(
Rocket(
num_kernels=self.num_kernels,
random_state=self.random_state,
n_jobs=self.n_jobs,
),
RidgeClassifierCV(alphas=np.logspace(-3, 3, 10), normalize=True),
)
rocket_pipeline.fit(X, y)
self._is_fitted = True
return self
def predict(self, X):
"""Find predictions for all cases in X.
Parameters
----------
X : The training input samples. array-like or pandas data frame.
If a Pandas data frame is passed, a check is performed that it only
has one column.
If not, an exception is thrown, since this classifier does not yet have
multivariate capability.
Returns
-------
output : array of shape = [n_test_instances]
"""
self.check_is_fitted()
X = check_X(X)
return self.classifier.predict(X)
def predict_proba(self, X):
"""Find probability estimates for each class for all cases in X.
Parameters
----------
X : The training input samples. array-like or sparse matrix of shape
= [n_test_instances, series_length]
If a Pandas data frame is passed (sktime format) a check is
performed that it only has one column.
If not, an exception is thrown, since this classifier does not
yet have
multivariate capability.
Returns
-------
output : array of shape = [n_test_instances, num_classes] of
probabilities
"""
self.check_is_fitted()
X = check_X(X)
dists = np.zeros((X.shape[0], self.n_classes))
preds = self.classifier.predict(X)
for i in range(0, X.shape[0]):
dists[i, np.where(self.classes_ == preds[i])] = 1
return dists
| 30.85443
| 84
| 0.611487
| 4,345
| 0.891282
| 0
| 0
| 0
| 0
| 0
| 0
| 2,755
| 0.565128
|
f3052e2208b42e9e168f9e6bcc11e27d4f1b41d3
| 9,922
|
py
|
Python
|
mc/opcodes.py
|
iximeow/binja-m16c
|
debf368e5df90a96d6c8b0bc128626a9d6834bb4
|
[
"0BSD"
] | 12
|
2020-01-15T00:51:06.000Z
|
2021-10-02T12:45:50.000Z
|
mc/opcodes.py
|
iximeow/binja-m16c
|
debf368e5df90a96d6c8b0bc128626a9d6834bb4
|
[
"0BSD"
] | 2
|
2020-02-03T08:26:26.000Z
|
2020-07-01T19:51:44.000Z
|
mc/opcodes.py
|
iximeow/binja-m16c
|
debf368e5df90a96d6c8b0bc128626a9d6834bb4
|
[
"0BSD"
] | 4
|
2020-02-03T07:51:12.000Z
|
2021-02-14T19:13:07.000Z
|
import re
from . import tables
from .instr import Instruction
from .instr.nop import *
from .instr.alu import *
from .instr.bcd import *
from .instr.bit import *
from .instr.flag import *
from .instr.mov import *
from .instr.smov import *
from .instr.ld_st import *
from .instr.stack import *
from .instr.jmp import *
from .instr.call import *
from .instr.ctx import *
from .instr.trap import *
enumerations = {
'R': tables.rx_ax,
'I': tables.dsp8_dsp16_abs16,
'6': tables.dsp8_abs16,
'7': tables.r0x_r0y_dsp8_abs16,
'8': tables.r0x_dsp8_abs16,
'A': tables.reg16_dsp8_dsp16_dsp20_abs16,
'E': tables.reg8l_dsp8_dsp16_abs16,
'N': tables.reg8_dsp8_dsp16_abs16,
'C': tables.creg,
'J': tables.cnd_j3,
'K': tables.cnd_j4,
'M': tables.cnd_bm4,
}
encodings = {
'0111_011z_1111_dddd': AbsReg,
'0111_011z_0110_dddd': AdcImm,
'1011_000z_ssss_dddd': AdcReg,
'0111_011z_1110_dddd': Adcf,
'0111_011z_0100_dddd': AddImm,
'1100_100z_iiii_dddd': AddImm4,
'1000_0DDD;8': AddImm8,
'1010_000z_ssss_dddd': AddReg,
'0010_0DSS;7': AddReg8,
'0111_110z_1110_1011': AddImmSP,
'0111_1101_1011_iiii': AddImm4SP,
'1111_100z_iiii_dddd': Adjnz,
'0111_011z_0010_dddd': AndImm,
'1001_0DDD;8': AndImm8,
'1001_000z_ssss_dddd': AndReg,
'0001_0DSS;7': AndReg8,
'0111_1110_0100_ssss': Band,
'0111_1110_1000_dddd': Bclr,
'0100_0bbb': BclrSB,
'0111_1110_0010_dddd': Bmcnd,
'0111_1101_1101_CCCC;M': BmcndC,
'0111_1110_0101_ssss': Bnand,
'0111_1110_0111_ssss': Bnor,
'0111_1110_1010_dddd': Bnot,
'0101_0bbb': BnotSB,
'0111_1110_0011_ssss': Bntst,
'0111_1110_1101_ssss': Bnxor,
'0111_1110_0110_ssss': Bor,
'0111_1110_1001_dddd': Bset,
'0100_1bbb': BsetSB,
'0111_1110_1011_ssss': Btst,
'0101_1bbb': BtstSB,
'0111_1110_0000_dddd': Btstc,
'0111_1110_0001_dddd': Btsts,
'0111_1110_1100_ssss': Bxor,
'0000_0000': Brk,
'0111_011z_1000_dddd': CmpImm,
'1101_000z_iiii_dddd': CmpImm4,
'1110_0DDD;8': CmpImm8,
'1100_000z_ssss_dddd': CmpReg,
'0011_1DSS;7': CmpReg8,
'0111_1100_1110_1110': DadcImm8,
'0111_1101_1110_1110': DadcImm16,
'0111_1100_1110_0110': DadcReg8,
'0111_1101_1110_0110': DadcReg16,
'0111_1100_1110_1100': DaddImm8,
'0111_1101_1110_1100': DaddImm16,
'0111_1100_1110_0100': DaddReg8,
'0111_1101_1110_0100': DaddReg16,
'1010_1DDD;8': Dec,
'1111_d010': DecAdr,
'0111_110z_1110_0001': DivImm,
'0111_011z_1101_ssss': DivReg,
'0111_110z_1110_0000': DivuImm,
'0111_011z_1100_ssss': DivuReg,
'0111_110z_1110_0011': DivxImm,
'0111_011z_1001_ssss': DivxReg,
'0111_1100_1110_1111': DsbbImm8,
'0111_1101_1110_1111': DsbbImm16,
'0111_1100_1110_0111': DsbbReg8,
'0111_1101_1110_0111': DsbbReg16,
'0111_1100_1110_1101': DsubImm8,
'0111_1101_1110_1101': DsubImm16,
'0111_1100_1110_0101': DsubReg8,
'0111_1101_1110_0101': DsubReg16,
'0111_1100_1111_0010': Enter,
'0111_1101_1111_0010': Exitd,
'0111_1100_0110_DDDD;E': Exts,
'0111_1100_1111_0011': ExtsR0,
'1110_1011_0fff_0101': Fclr,
'1110_1011_0fff_0100': Fset,
'1010_0DDD;8': Inc,
'1011_d010': IncAdr,
'1110_1011_11ii_iiii': Int,
'1111_0110': Into,
'0110_1CCC;J': Jcnd1,
'0111_1101_1100_CCCC;K': Jcnd2,
'0110_0iii': Jmp3,
'1111_1110': Jmp8,
'1111_0100': Jmp16,
'1111_1100': JmpAbs,
'0111_1101_0010_ssss': Jmpi,
'0111_1101_0000_SSSS;A': JmpiAbs,
'1110_1110': Jmps,
'1111_0101': Jsr16,
'1111_1101': JsrAbs,
'0111_1101_0011_ssss': Jsri,
'0111_1101_0001_SSSS;A': JsriAbs,
'1110_1111': Jsrs,
'1110_1011_0DDD;C_0000': LdcImm,
'0111_1010_1DDD;C_ssss': LdcReg,
'0111_1100_1111_0000': Ldctx,
'0111_010z_1000_dddd': Lde,
'0111_010z_1001_dddd': LdeA0,
'0111_010z_1010_dddd': LdeA1A0,
'0111_1101_1010_0iii': Ldipl,
'0111_010z_1100_dddd': MovImmReg,
'1101_100z_iiii_dddd': MovImm4Reg,
'1100_0DDD;8': MovImm8Reg,
'1110_d010': MovImm8Adr,
'1010_d010': MovImm16Adr,
'1011_0DDD;8': MovZero8Reg,
'0111_001z_ssss_dddd': MovRegReg,
'0011_0dss': MovRegAdr,
'0000_0sDD;6': MovReg8Reg,
'0000_1DSS;7': MovRegReg8,
'0111_010z_1011_dddd': MovIndSPReg,
'0111_010z_0011_ssss': MovRegIndSP,
'1110_1011_0DDD;R_SSSS;I': Mova,
'0111_1100_10rr_DDDD;N': MovdirR0LReg,
'0111_1100_00rr_SSSS;N': MovdirRegR0L,
'0111_110z_0101_dddd': MulImm,
'0111_100z_ssss_dddd': MulReg,
'0111_110z_0100_dddd': MuluImm,
'0111_000z_ssss_dddd': MuluReg,
'0111_010z_0101_dddd': NegReg,
'0000_0100': Nop,
'0111_010z_0111_dddd': NotReg,
'1011_1DDD;8': NotReg8,
'0111_011z_0011_dddd': OrImm,
'1001_1DDD;8': OrImm8,
'1001_100z_ssss_dddd': OrReg,
'0001_1DSS;7': OrReg8,
'0111_010z_1101_dddd': Pop,
'1001_d010': PopReg8,
'1101_d010': PopAdr,
'1110_1011_0DDD;C_0011': Popc,
'1110_1101': Popm,
'0111_110z_1110_0010': PushImm,
'0111_010z_0100_ssss': Push,
'1000_s010': PushReg8,
'1100_s010': PushAdr,
'0111_1101_1001_SSSS;I': Pusha,
'1110_1011_0SSS;C_0010': Pushc,
'1110_1100': Pushm,
'1111_1011': Reit,
'0111_110z_1111_0001': Rmpa,
'1110_000z_iiii_dddd': RotImm4,
'0111_010z_0110_dddd': RotR1H,
'0111_011z_1010_dddd': Rolc,
'0111_011z_1011_dddd': Rorc,
'1111_0011': Rts,
'0111_011z_0111_dddd': SbbImm,
'1011_100z_ssss_dddd': SbbReg,
'1111_000z_iiii_dddd': ShaImm4,
'0111_010z_1111_dddd': ShaR1H,
'1110_1011_101d_iiii': Sha32Imm4,
'1110_1011_001d_0001': Sha32R1H,
'1110_100z_iiii_dddd': ShlImm4,
'0111_010z_1110_dddd': ShlR1H,
'1110_1011_100d_iiii': Shl32Imm4,
'1110_1011_000d_0001': Shl32R1H,
'0111_110z_1110_1001': Smovb,
'0111_110z_1110_1000': Smovf,
'0111_110z_1110_1010': Sstr,
'0111_1011_1SSS;C_dddd': StcReg,
'0111_1100_1100_DDDD;A': StcPc,
'0111_1101_1111_0000': Stctx,
'0111_010z_0000_ssss': Ste,
'0111_010z_0001_ssss': SteA0,
'0111_010z_0010_ssss': SteA1A0,
'1101_0DDD;8': Stnz,
'1100_1DDD;8': Stz,
'1101_1DDD;8': Stzx,
'0111_011z_0101_dddd': SubImm,
'1000_1DDD;8': SubImm8,
'1010_100z_ssss_dddd': SubReg,
'0010_1DSS;7': SubReg8,
'0111_011z_0000_dddd': TstImm,
'1000_000z_ssss_dddd': TstReg,
'1111_1111': Und,
'0111_1101_1111_0011': Wait,
'0111_101z_00ss_dddd': Xchg,
'0111_011z_0001_dddd': XorImm,
'1000_100z_ssss_dddd': XorReg,
}
def generate_tables():
for encoding, instr in encodings.items():
def expand_encoding(table, parts):
part, *parts = parts
if ';' in part:
part, enum = part.split(';', 2)
else:
enum = ''
assert len(part) == 4 and len(enum) <= 1
chunks = []
try:
chunks.append(int(part, 2))
except ValueError:
wildcard_part = re.sub(r'[A-Z]', '0', part)
instr_code = int(re.sub(r'[^01]', '0', wildcard_part), 2)
instr_mask = int(re.sub(r'[^01]', '0', wildcard_part.replace('0', '1')), 2)
operand_mask = int(re.sub(r'[^01]', '1', wildcard_part.replace('1', '0')), 2)
operand_code = 0
while True:
chunks.append(instr_code | operand_code)
if operand_code == operand_mask:
break
# The following line cleverly uses carries to make a counter only from the bits
# that are set in `operand_mask`. To understand it, consider that `instr_mask`
# is the inverse of `operand_mask`, and adding 1 to a 011...1 chunk changes it
# into a 100...0 chunk.
operand_code = ((operand_code | instr_mask) + 1) & operand_mask
if enum:
shift = 4 - re.search(r'[A-Z]+', part).end()
chunks, chunk_templates = [], chunks
for template in chunk_templates:
for legal_bits in enumerations[enum]:
chunks.append(template | (legal_bits << shift))
for chunk in chunks:
if parts:
try:
subtable = table[chunk]
except KeyError:
subtable = table[chunk] = dict()
assert isinstance(subtable, dict)
expand_encoding(subtable, parts)
else:
assert chunk not in table, "{} conflicts with {}".format(instr, table[chunk])
table[chunk] = instr
parts = encoding.split('_')
while re.match(r"^[a-z]+$", parts[-1]):
parts.pop()
expand_encoding(Instruction.opcodes, parts)
def print_assigned():
def contract_encoding(table, parts):
for part, entry in table.items():
if isinstance(entry, dict):
contract_encoding(entry, (*parts, part))
else:
encoding = '_'.join('{:04b}'.format(part) for part in (*parts, part))
mnemonic = entry().name()
print('{:20s} {}'.format(encoding, mnemonic))
contract_encoding(Instruction.opcodes, ())
def print_unassigned():
def contract_encoding(table, parts):
unassigned = set(range(16))
for part, entry in table.items():
unassigned.remove(part)
if isinstance(entry, dict):
contract_encoding(entry, (*parts, part))
for part in unassigned:
print('_'.join('{:04b}'.format(part) for part in (*parts, part)))
contract_encoding(Instruction.opcodes, ())
generate_tables()
# print_assigned()
# print_unassigned()
| 28.429799
| 99
| 0.621951
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,723
| 0.375227
|
f30593af5391112f0f58041cdf450a938ae282be
| 797
|
py
|
Python
|
class16.py
|
SamratAdhikari/Python_class_files
|
47053e39b81c0d8f7485790fea8711aa25727caf
|
[
"MIT"
] | null | null | null |
class16.py
|
SamratAdhikari/Python_class_files
|
47053e39b81c0d8f7485790fea8711aa25727caf
|
[
"MIT"
] | null | null | null |
class16.py
|
SamratAdhikari/Python_class_files
|
47053e39b81c0d8f7485790fea8711aa25727caf
|
[
"MIT"
] | null | null | null |
# import calculate
# import calculate as cal
# from calculate import diff as df
# from calculate import *
# print(cal.pi)
# pi = 3.1415
# print(diff(5,2))
# print(pi)
# print(calculate.pi)
# print(calculate.sum(3))
# print(calculate.div(2,1))
# print(abs(-23.21))
# print(math.ceil(5.23))
# print(dir(math))
# # print(dir(calculate))
# print(calculate.area_peri.__doc__)
import random as rd
# content = dir(rd)
# print(content)
def jumble(x):
xplit = x.split()
result = []
check = []
while(True):
index = rd.randint(0, len(xplit)-1)
if index not in check:
check.append(index)
result.append(xplit[index])
if len(check) == len(xplit):
print(result)
break
txt = str(input("Enter a string: "))
jumble(txt)
| 20.435897
| 43
| 0.604768
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 408
| 0.51192
|
f30618f542da8cbd2c4223847a99725100131374
| 901
|
py
|
Python
|
hsir/law.py
|
WenjieZ/wuhan-pneumonia
|
3d26955daa2deedec57cdd3effb3118531bbea7f
|
[
"BSD-3-Clause"
] | 6
|
2020-01-26T07:33:41.000Z
|
2020-02-25T22:15:43.000Z
|
hsir/law.py
|
WenjieZ/wuhan-pneumonia
|
3d26955daa2deedec57cdd3effb3118531bbea7f
|
[
"BSD-3-Clause"
] | 2
|
2020-02-17T16:12:50.000Z
|
2020-02-29T21:31:17.000Z
|
hsir/law.py
|
WenjieZ/wuhan-pneumonia
|
3d26955daa2deedec57cdd3effb3118531bbea7f
|
[
"BSD-3-Clause"
] | 1
|
2020-03-07T00:13:05.000Z
|
2020-03-07T00:13:05.000Z
|
from abc import ABCMeta, abstractmethod
import numpy as np
__all__ = ['Law', 'Bin', 'Poi', 'Gau']
class Law(metaclass=ABCMeta):
@staticmethod
@abstractmethod
def sample(n, d):
pass
@staticmethod
@abstractmethod
def loglikely(n, d, k):
pass
@staticmethod
def likelihood(n, d, k):
return np.exp(loglikely(n, d, k))
class Bin(Law):
def sample(n, d):
return np.random.binomial(n, d)
def loglikely(n, d, k):
return k*np.log(d) + (n-k)*np.log(1-d)
class Poi(Law):
def sample(n, d):
return np.random.poisson(n*d)
def loglikely(n, d, k):
return k*np.log(n*d) - n*d + k - k*np.log(1+k) # - np.sum(np.log(np.arange(k)+1))
class Gau(Law):
def sample(n, d=1):
return n * (1 + 0.1*np.random.randn())
def loglikely(n, d, k):
return -50 * np.log(k/n)**2
| 20.022222
| 89
| 0.54828
| 785
| 0.871254
| 0
| 0
| 226
| 0.250832
| 0
| 0
| 54
| 0.059933
|
f30640fd7966c16ad8a70aa7a32537803f35f977
| 3,172
|
py
|
Python
|
src/dummy/toga_dummy/widgets/canvas.py
|
Donyme/toga
|
2647c7dc5db248025847e3a60b115ff51d4a0d4a
|
[
"BSD-3-Clause"
] | null | null | null |
src/dummy/toga_dummy/widgets/canvas.py
|
Donyme/toga
|
2647c7dc5db248025847e3a60b115ff51d4a0d4a
|
[
"BSD-3-Clause"
] | null | null | null |
src/dummy/toga_dummy/widgets/canvas.py
|
Donyme/toga
|
2647c7dc5db248025847e3a60b115ff51d4a0d4a
|
[
"BSD-3-Clause"
] | null | null | null |
import re
from .base import Widget
class Canvas(Widget):
def create(self):
self._action('create Canvas')
def set_on_draw(self, handler):
self._set_value('on_draw', handler)
def set_context(self, context):
self._set_value('context', context)
def line_width(self, width=2.0):
self._set_value('line_width', width)
def fill_style(self, color=None):
if color is not None:
num = re.search('^rgba\((\d*\.?\d*), (\d*\.?\d*), (\d*\.?\d*), (\d*\.?\d*)\)$', color)
if num is not None:
r = num.group(1)
g = num.group(2)
b = num.group(3)
a = num.group(4)
rgba = str(r + ', ' + g + ', ' + b + ', ' + a)
self._set_value('fill_style', rgba)
else:
pass
# Support future colosseum versions
# for named_color, rgb in colors.NAMED_COLOR.items():
# if named_color == color:
# exec('self._set_value('fill_style', color)
else:
# set color to black
self._set_value('fill_style', '0, 0, 0, 1')
def stroke_style(self, color=None):
self.fill_style(color)
def close_path(self):
self._action('close path')
def closed_path(self, x, y):
self._action('closed path', x=x, y=y)
def move_to(self, x, y):
self._action('move to', x=x, y=y)
def line_to(self, x, y):
self._action('line to', x=x, y=y)
def bezier_curve_to(self, cp1x, cp1y, cp2x, cp2y, x, y):
self._action('bezier curve to', cp1x=cp1x, cp1y=cp1y, cp2x=cp2x, cp2y=cp2y, x=x, y=y)
def quadratic_curve_to(self, cpx, cpy, x, y):
self._action('quadratic curve to', cpx=cpx, cpy=cpy, x=x, y=y)
def arc(self, x, y, radius, startangle, endangle, anticlockwise):
self._action('arc', x=x, y=y, radius=radius, startangle=startangle, endangle=endangle, anticlockwise=anticlockwise)
def ellipse(self, x, y, radiusx, radiusy, rotation, startangle, endangle, anticlockwise):
self._action('ellipse', x=x, y=y, radiusx=radiusx, radiusy=radiusy, rotation=rotation, startangle=startangle, endangle=endangle, anticlockwise=anticlockwise)
def rect(self, x, y, width, height):
self._action('rect', x=x, y=y, width=width, height=height)
# Drawing Paths
def fill(self, fill_rule, preserve):
self._set_value('fill rule', fill_rule)
if preserve:
self._action('fill preserve')
else:
self._action('fill')
def stroke(self):
self._action('stroke')
# Transformations
def rotate(self, radians):
self._action('rotate', radians=radians)
def scale(self, sx, sy):
self._action('scale', sx=sx, sy=sy)
def translate(self, tx, ty):
self._action('translate', tx=tx, ty=ty)
def reset_transform(self):
self._action('reset transform')
def write_text(self, text, x, y, font):
self._action('write text', text=text, x=x, y=y, font=font)
def rehint(self):
self._action('rehint Canvas')
| 31.72
| 165
| 0.573455
| 3,133
| 0.987705
| 0
| 0
| 0
| 0
| 0
| 0
| 587
| 0.185057
|
f30707b9e83fba3280f410223a1fd7949422e724
| 164
|
py
|
Python
|
rpncalc/exceptions.py
|
newmanrs/rpncalc
|
8663e5221efd78c12889b6db4eda20821b27d52a
|
[
"MIT"
] | null | null | null |
rpncalc/exceptions.py
|
newmanrs/rpncalc
|
8663e5221efd78c12889b6db4eda20821b27d52a
|
[
"MIT"
] | 11
|
2021-11-10T04:28:51.000Z
|
2022-02-25T05:19:22.000Z
|
rpncalc/exceptions.py
|
newmanrs/rpncalc
|
8663e5221efd78c12889b6db4eda20821b27d52a
|
[
"MIT"
] | null | null | null |
class RpnCalcError(Exception):
"""Calculator Generic Exception"""
pass
class StackDepletedError(RpnCalcError):
""" Stack is out of items """
pass
| 18.222222
| 39
| 0.682927
| 160
| 0.97561
| 0
| 0
| 0
| 0
| 0
| 0
| 63
| 0.384146
|
f3075ca7074510343a47f280f9ff997c85f925fa
| 3,815
|
py
|
Python
|
tests/unit/schemas/test_base_schema_class.py
|
gamechanger/dusty
|
dd9778e3a4f0c623209e53e98aa9dc1fe76fc309
|
[
"MIT"
] | 421
|
2015-06-02T16:29:59.000Z
|
2021-06-03T18:44:42.000Z
|
tests/unit/schemas/test_base_schema_class.py
|
gamechanger/dusty
|
dd9778e3a4f0c623209e53e98aa9dc1fe76fc309
|
[
"MIT"
] | 404
|
2015-06-02T20:23:42.000Z
|
2019-08-21T16:59:41.000Z
|
tests/unit/schemas/test_base_schema_class.py
|
gamechanger/dusty
|
dd9778e3a4f0c623209e53e98aa9dc1fe76fc309
|
[
"MIT"
] | 16
|
2015-06-16T17:21:02.000Z
|
2020-03-27T02:27:09.000Z
|
from unittest import TestCase
from schemer import Schema, Array, ValidationException
from dusty.schemas.base_schema_class import DustySchema, DustySpecs
from ...testcases import DustyTestCase
class TestDustySchemaClass(TestCase):
def setUp(self):
self.base_schema = Schema({'street': {'type': basestring},
'house_number': {'type': int, 'default': 1}})
self.bigger_schema = Schema({'address': {'type': self.base_schema, 'default': {}},
'first_name': {'type': basestring, 'required': True},
'last_name': {'type': basestring, 'default': 'johnson'}})
def test_init_invalid_doc(self):
doc = {'street': 'dogstoon',
'house_number': '1'}
with self.assertRaises(ValidationException):
DustySchema(self.base_schema, doc)
def test_valid_doc(self):
doc = {'street': 'dogstoon',
'house_number': 1}
dusty_schema = DustySchema(self.base_schema, doc)
self.assertEquals(dusty_schema['street'], 'dogstoon')
self.assertEquals(dusty_schema['house_number'], 1)
def test_setting_defaults(self):
doc = {'street': 'dogstoon'}
dusty_schema = DustySchema(self.base_schema, doc)
self.assertEquals(dusty_schema['street'], 'dogstoon')
self.assertEquals(dusty_schema['house_number'], 1)
def test_setting_defaults_more_complicated_1(self):
doc = {'first_name': 'dusty'}
dusty_schema = DustySchema(self.bigger_schema, doc)
self.assertEquals(dusty_schema['first_name'], 'dusty')
self.assertEquals(dusty_schema['last_name'], 'johnson')
self.assertEquals(dusty_schema['address'], {'house_number': 1})
def test_setting_defaults_more_complicated_2(self):
doc = {'first_name': 'dusty',
'address': {'street': 'dogstoon'}}
dusty_schema = DustySchema(self.bigger_schema, doc)
self.assertEquals(dusty_schema['address']['street'], 'dogstoon')
self.assertEquals(dusty_schema['address']['house_number'], 1)
def test_in_1(self):
doc = {'first_name': 'dusty',
'address': {'street': 'dogstoon'}}
dusty_schema = DustySchema(self.bigger_schema, doc)
self.assertTrue('first_name' in dusty_schema)
def test_in_2(self):
doc = {'first_name': 'dusty',
'address': {'street': 'dogstoon'}}
dusty_schema = DustySchema(self.bigger_schema, doc)
self.assertFalse('first_names' in dusty_schema)
def test_keys(self):
doc = {'street': 'dogstoon',
'house_number': 1}
dusty_schema = DustySchema(self.base_schema, doc)
self.assertEquals(set(['street', 'house_number']), set(dusty_schema.keys()))
def test_values(self):
doc = {'street': 'dogstoon',
'house_number': 1}
dusty_schema = DustySchema(self.base_schema, doc)
self.assertEquals(set(['dogstoon', 1]), set(dusty_schema.values()))
class TestDustySpecsClass(DustyTestCase):
def test_finds_app_or_lib(self):
specs = DustySpecs(self.temp_specs_path)
self.assertEquals(specs.get_app_or_lib('app-a'), specs['apps']['app-a'])
self.assertEquals(specs.get_app_or_lib('lib-a'), specs['libs']['lib-a'])
def test_raises_without_app_or_lib(self):
specs = DustySpecs(self.temp_specs_path)
with self.assertRaises(KeyError):
specs.get_app_or_lib('non-existant-thingy')
def test_get_app_or_service(self):
specs = DustySpecs(self.temp_specs_path)
self.assertEquals(specs.get_app_or_service('app-a'), specs['apps']['app-a'])
self.assertEquals(specs.get_app_or_service('service-a'), specs['services']['service-a'])
| 42.865169
| 96
| 0.636173
| 3,616
| 0.947837
| 0
| 0
| 0
| 0
| 0
| 0
| 782
| 0.20498
|
f309247f76f7d18c28aea4b2f1973377cd29af7f
| 5,470
|
py
|
Python
|
Objected-Oriented Systems/Python_OOP_SDA/Task1.py
|
syedwaleedhyder/Freelance_Projects
|
7e2b85fc968850fc018014667b5ce9af0f00cb09
|
[
"MIT"
] | 1
|
2020-08-13T17:26:13.000Z
|
2020-08-13T17:26:13.000Z
|
Objected-Oriented Systems/Python_OOP_SDA/Task1.py
|
syedwaleedhyder/Freelance_Projects
|
7e2b85fc968850fc018014667b5ce9af0f00cb09
|
[
"MIT"
] | null | null | null |
Objected-Oriented Systems/Python_OOP_SDA/Task1.py
|
syedwaleedhyder/Freelance_Projects
|
7e2b85fc968850fc018014667b5ce9af0f00cb09
|
[
"MIT"
] | null | null | null |
from abc import ABCMeta, abstractmethod, abstractproperty
from datetime import datetime, date
class Item(metaclass=ABCMeta):
def __init__(self, code, name, quantity, cost, offer):
self.item_code=code
self.item_name=name
self.quantity_on_hand=quantity
self.cost_price=cost
self.on_offer=offer
pass
@property
def quantity_on_hand(self): # implements the get - this name is *the* name
return self._quantity_on_hand
#
@quantity_on_hand.setter
def quantity_on_hand(self, value): # name must be the same
self._quantity_on_hand = value
@property
def cost_price(self): # implements the get - this name is *the* name
return self._cost_price
#
@cost_price.setter
def cost_price(self, value): # name must be the same
self._cost_price = value
def changeOffer():
if(self.on_offer == "Yes"):
self.on_offer = "No"
elif(self.on_offer == "No"):
self.on_offer == "Yes"
@abstractmethod
def selling_price(self):
pass
@abstractmethod
def offer_price(self):
pass
@abstractmethod
def profit_margin(self):
pass
@abstractmethod
def discount_rate(self):
pass
def to_string(self):
if(self.on_offer == "Yes"):
offer = "**Offer"
else:
offer = "(No Offer)"
string = self.item_code + " " + self.item_name + " Availalbe= " + str(self.quantity_on_hand) + " " + offer
return string
class Perishable(Item):
def __init__(self, code, name, quantity, cost, offer, expiry):
Item.__init__(self, code, name, quantity, cost, offer)
self.expiry_date = expiry
def profit_margin(self):
return self.cost_price * 0.25
def selling_price(self):
return self.cost_price + self.profit_margin()
def days_before_expiry(self):
now = datetime.now().date()
days = self.expiry_date- now
return days.days
def discount_rate(self):
days = self.days_before_expiry()
price = self.selling_price()
if(days < 15):
return price * 0.3
elif(days < 30):
return price * 0.2
elif (days > 29):
return price * 0.1
def offer_price(self):
if(self.on_offer == "No"):
return selling_price()
return self.selling_price() - self.discount_rate()
def to_string(self):
if(self.on_offer == "Yes"):
offer = "**Offer**"
else:
offer = "(No Offer)"
string = self.item_code + " " + self.item_name + " Available= " + str(self.quantity_on_hand) + " Price: $" + str(self.offer_price()) +" " + offer + " Expiry Date: " + self.expiry_date.strftime('%d %b %Y') + " Perishable Item"
return string
class NonPerishable(Item):
def __init__(self, code, name, quantity, cost, offer):
Item.__init__(self, code, name, quantity, cost, offer)
def profit_margin(self):
return self.cost_price * 0.3
def selling_price(self):
return self.cost_price + self.profit_margin()
def discount_rate(self):
return self.selling_price() * 0.1
def offer_price(self):
if(self.on_offer == "No"):
return self.selling_price()
return self.selling_price() - self.discount_rate()
def to_string(self):
if(self.on_offer == "Yes"):
offer = "**Offer**"
else:
offer = "(No Offer)"
string = self.item_code + " " + self.item_name + " Available= " + str(self.quantity_on_hand) + " Price: $" + str(self.offer_price()) +" " + offer + " Non Perishable Item"
return string
class Grocer:
def __init__(self):
self.items_list = []
def print_items(self):
for item in self.items_list:
print(item.to_string())
def add_to_list(self, item_to_be_added):
self.items_list.append(item_to_be_added)
return
def update_quantity_on_hand(self, item_code, new_quantity):
if(new_quantity < 0):
print("Quantity cannot be zero. Failed to update.")
return False
for item in self.items_list:
if(item.item_code == item_code):
item.quantity_on_hand = new_quantity
return True
perishable = Perishable("P101", "Real Raisins", 10, 2, "Yes", date(2018,12, 10))
non_perishable = NonPerishable("NP210", "Tan Baking Paper", 25, 2, "No")
perishable2 = Perishable("P105", "Eggy Soup Tofu", 14, 1.85, "Yes", date(2018,11, 26))
grocer = Grocer()
grocer.add_to_list(perishable)
grocer.add_to_list(non_perishable)
grocer.add_to_list(perishable2)
grocer.print_items()
grocer.update_quantity_on_hand("P105", 10)
print()
grocer.print_items()
####################################################################
#DISCUSSION
"""
Single Responsibility Principle:
1) IN Perishable clas.
2) In NonPersishable class.
Open Closed Principle
1) Abstract class Item is open to be extended
2) Abstract class Item is closed for modification
Interface Segregation Principle
1) For using Perishable items, user don't have to know anything about Non-perishable items.
2) For using Non-perishable items, users don't have to know tha details of Perishable items.
Hence users are not forced to use methods they don't require.
"""
####################################################################
| 31.988304
| 233
| 0.609506
| 4,270
| 0.780622
| 0
| 0
| 700
| 0.127971
| 0
| 0
| 1,162
| 0.212431
|
f30949586393ae32e93e9cb38a2df996aa7486fd
| 1,116
|
py
|
Python
|
compose/production/mongodb_backup/scripts/list_dbs.py
|
IMTEK-Simulation/mongodb-backup-container-image
|
b0e04c03cab9321d6b4277ee88412938fec95726
|
[
"MIT"
] | null | null | null |
compose/production/mongodb_backup/scripts/list_dbs.py
|
IMTEK-Simulation/mongodb-backup-container-image
|
b0e04c03cab9321d6b4277ee88412938fec95726
|
[
"MIT"
] | null | null | null |
compose/production/mongodb_backup/scripts/list_dbs.py
|
IMTEK-Simulation/mongodb-backup-container-image
|
b0e04c03cab9321d6b4277ee88412938fec95726
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
host = 'mongodb'
port = 27017
ssl_ca_cert='/run/secrets/rootCA.pem'
ssl_certfile='/run/secrets/tls_cert.pem'
ssl_keyfile='/run/secrets/tls_key.pem'
# don't turn these signal into exceptions, just die.
# necessary for integrating into bash script pipelines seamlessly.
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
# get administrator credentials
with open('/run/secrets/username','r') as f:
username = f.read()
with open('/run/secrets/password','r') as f:
password = f.read()
from pymongo import MongoClient
client = MongoClient(host, port,
ssl=True,
username=username,
password=password,
authSource=username, # assume admin database and admin user share name
ssl_ca_certs=ssl_ca_cert,
ssl_certfile=ssl_certfile,
ssl_keyfile=ssl_keyfile,
tlsAllowInvalidHostnames=True)
# Within the container environment, mongod runs on host 'mongodb'.
# That hostname, however, is not mentioned within the host certificate.
dbs = client.list_database_names()
for db in dbs:
print(db)
client.close()
| 27.9
| 74
| 0.750896
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 496
| 0.444444
|
f309f375f4df1f396c2fac2fda0007631441102b
| 1,087
|
py
|
Python
|
host.py
|
KeePinnnn/social_media_analytic
|
d13580c7dcfc87699bf42c0f870fefccc2f4c78b
|
[
"MIT"
] | 1
|
2019-09-13T13:08:28.000Z
|
2019-09-13T13:08:28.000Z
|
host.py
|
KeePinnnn/social_media_analytic
|
d13580c7dcfc87699bf42c0f870fefccc2f4c78b
|
[
"MIT"
] | null | null | null |
host.py
|
KeePinnnn/social_media_analytic
|
d13580c7dcfc87699bf42c0f870fefccc2f4c78b
|
[
"MIT"
] | null | null | null |
from flask import Flask, send_from_directory, request, Response, render_template, jsonify
from test import demo
import subprocess
import os
app = Flask(__name__, static_folder='static')
@app.route('/')
def home():
return "hello world"
@app.route('/detector')
def detector():
return send_from_directory(app.static_folder, 'index.html')
@app.route('/get_details', methods=['GET'])
def get_info():
username = request.args['username']
content = request.args['content']
if os.path.exists('./result.txt'):
os.remove('./result.txt')
if username and content:
subprocess.Popen(['python', 'test.py', username, content]).pid
while True:
if os.path.exists('./result.txt'):
with open('./result.txt', 'r') as file:
data = file.read()
if data:
return jsonify(result=data), 200
else:
continue
@app.route('/<path:path>', methods=['GET'])
def get_fonts(path):
return app.send_static_file(path)
if __name__ == "__main__":
app.run(debug=True)
| 26.512195
| 89
| 0.620055
| 0
| 0
| 0
| 0
| 831
| 0.764489
| 0
| 0
| 190
| 0.174793
|
f30ad04d785ff96d12b9344dbb04adb8373f99e0
| 5,985
|
py
|
Python
|
venv/lib/python3.6/site-packages/torch/_jit_internal.py
|
databill86/HyperFoods
|
9267937c8c70fd84017c0f153c241d2686a356dd
|
[
"MIT"
] | 2
|
2020-09-30T00:11:09.000Z
|
2021-10-04T13:00:38.000Z
|
venv/lib/python3.6/site-packages/torch/_jit_internal.py
|
databill86/HyperFoods
|
9267937c8c70fd84017c0f153c241d2686a356dd
|
[
"MIT"
] | null | null | null |
venv/lib/python3.6/site-packages/torch/_jit_internal.py
|
databill86/HyperFoods
|
9267937c8c70fd84017c0f153c241d2686a356dd
|
[
"MIT"
] | null | null | null |
"""
The weak_script annotation needs to be here instead of inside torch/jit/ so it
can be used in other places in torch/ (namely torch.nn) without running into
circular dependency problems
"""
import weakref
import inspect
try:
import builtins # PY3
except Exception:
import __builtin__ as builtins # PY2
# Tracks standalone weak script functions
_compiled_weak_fns = weakref.WeakKeyDictionary()
# Tracks which methods should be converted to strong methods
_weak_script_methods = weakref.WeakKeyDictionary()
# Converted modules and their corresponding WeakScriptModuleProxy objects
_weak_modules = weakref.WeakKeyDictionary()
# Types that have been declared as weak modules
_weak_types = weakref.WeakKeyDictionary()
# Wrapper functions that can call either of 2 functions depending on a boolean
# argument
_boolean_dispatched = weakref.WeakKeyDictionary()
COMPILATION_PENDING = object()
COMPILED = object()
def createResolutionCallback(frames_up=0):
"""
Creates a function which, given a string variable name,
returns the value of the variable in the scope of the caller of
the function which called createResolutionCallback (by default).
This is used to enable access in-scope Python variables inside
TorchScript fragments.
frames_up is number of additional frames to go up on the stack.
The default value is 0, which correspond to the frame of the caller
of createResolutionCallback. Also for example, if frames_up is set
to 1, then the frame of the caller's caller of createResolutionCallback
will be taken.
For example, the following program prints 2::
def bar():
cb = createResolutionCallback(1)
print(cb("foo"))
def baz():
foo = 2
bar()
baz()
"""
frame = inspect.currentframe()
i = 0
while i < frames_up + 1:
frame = frame.f_back
i += 1
f_locals = frame.f_locals
f_globals = frame.f_globals
def env(key):
if key in f_locals:
return f_locals[key]
elif key in f_globals:
return f_globals[key]
elif hasattr(builtins, key):
return getattr(builtins, key)
else:
return None
return env
def weak_script(fn, _frames_up=0):
"""
Marks a function as a weak script function. When used in a script function
or ScriptModule, the weak script function will be lazily compiled and
inlined in the graph. When not used in a script function, the weak script
annotation has no effect.
"""
_compiled_weak_fns[fn] = {
"status": COMPILATION_PENDING,
"compiled_fn": None,
"rcb": createResolutionCallback(_frames_up + 1)
}
return fn
def weak_module(cls):
_weak_types[cls] = {
"method_stubs": None
}
return cls
def weak_script_method(fn):
_weak_script_methods[fn] = {
"rcb": createResolutionCallback(frames_up=2),
"original_method": fn
}
return fn
def boolean_dispatch(arg_name, arg_index, default, if_true, if_false):
"""
Dispatches to either of 2 weak script functions based on a boolean argument.
In Torch Script, the boolean argument must be constant so that the correct
function to use can be determined at compile time.
"""
if _compiled_weak_fns.get(if_true) is None or _compiled_weak_fns.get(if_false) is None:
raise RuntimeError("both functions must be weak script")
def fn(*args, **kwargs):
dispatch_flag = False
if arg_name in kwargs:
dispatch_flag = kwargs[arg_name]
elif arg_index < len(args):
dispatch_flag = args[arg_index]
if dispatch_flag:
return if_true(*args, **kwargs)
else:
return if_false(*args, **kwargs)
if if_true.__doc__ is None and if_false.__doc__ is not None:
doc = if_false.__doc__
if_true.__doc__ = doc
elif if_false.__doc__ is None and if_true.__doc__ is not None:
doc = if_true.__doc__
if_false.__doc__ = doc
else:
raise RuntimeError("only one function can have a docstring")
fn.__doc__ = doc
_boolean_dispatched[fn] = {
"if_true": if_true,
"if_false": if_false,
"index": arg_index,
"default": default,
"arg_name": arg_name
}
return fn
try:
import typing
from typing import Tuple, List
def is_tuple(ann):
# For some reason Python 3.7 violates the Type[A, B].__origin__ == Type rule
return ann.__module__ == 'typing' and \
(getattr(ann, '__origin__', None) is typing.Tuple or
getattr(ann, '__origin__', None) is tuple)
except ImportError:
# A minimal polyfill for versions of Python that don't have typing.
# Note that this means that they also don't support the fancy annotation syntax, so
# those instances will only be used in our tiny `type: ` comment interpreter.
# The __getitem__ in typing is implemented using metaclasses, but I'm too lazy for that.
class TupleCls(object):
def __getitem__(self, types):
return TupleInstance(types)
class TupleInstance(object):
def __init__(self, types):
setattr(self, '__args__', types)
class ListInstance(object):
def __init__(self, types):
setattr(self, '__args__', types)
class ListCls(object):
def __getitem__(self, types):
return TupleInstance(types)
Tuple = TupleCls()
List = ListCls()
def is_tuple(ann):
return isinstance(ann, TupleInstance)
# allows BroadcastingList instance to be subscriptable
class BroadcastingListCls(object):
def __getitem__(self, types):
return
# mypy doesn't support parameters on types, so we have to explicitly type each
# list size
BroadcastingList1 = BroadcastingListCls()
for i in range(2, 7):
globals()["BroadcastingList{}".format(i)] = BroadcastingList1
| 29.628713
| 92
| 0.671846
| 499
| 0.083375
| 0
| 0
| 0
| 0
| 0
| 0
| 2,626
| 0.438764
|
f30afc0871d71087c3fea4199baf57d7f3c9c853
| 706
|
py
|
Python
|
examples/qiushi.py
|
qDonl/Spider
|
ec7e7519b173b004314fc41cf1a65c2a662eb8d5
|
[
"Unlicense"
] | null | null | null |
examples/qiushi.py
|
qDonl/Spider
|
ec7e7519b173b004314fc41cf1a65c2a662eb8d5
|
[
"Unlicense"
] | null | null | null |
examples/qiushi.py
|
qDonl/Spider
|
ec7e7519b173b004314fc41cf1a65c2a662eb8d5
|
[
"Unlicense"
] | null | null | null |
import re, requests
def parse_page(url):
headers = {
'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36"
}
response = requests.get(url, headers=headers)
text = response.content.decode("utf-8")
contents = re.findall(r'<div class="content">.*?<span>(.*?)</span>', text, re.DOTALL)
for content in contents:
x = re.sub(r'<.*?>|\n', '', content)
print(x.strip())
print("===="*20)
def main():
base_url = "https://www.qiushibaike.com/text/page/{}/"
for x in range(1, 10):
url = base_url.format(x)
parse_page(url)
if __name__ == '__main__':
main()
| 30.695652
| 138
| 0.589235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 252
| 0.356941
|
f30c9db8e27b84a58028614e5f7dd98149676ac3
| 4,267
|
py
|
Python
|
benchmark/python/benchmark/benchmark_main.py
|
toschmidt/pg-cv
|
897909fdb2a7824137f2128c6bd98151f6ed3cf4
|
[
"MIT"
] | 3
|
2021-03-19T04:52:26.000Z
|
2021-09-13T14:11:44.000Z
|
benchmark/python/benchmark/benchmark_main.py
|
toschmidt/pg-cv
|
897909fdb2a7824137f2128c6bd98151f6ed3cf4
|
[
"MIT"
] | null | null | null |
benchmark/python/benchmark/benchmark_main.py
|
toschmidt/pg-cv
|
897909fdb2a7824137f2128c6bd98151f6ed3cf4
|
[
"MIT"
] | null | null | null |
from benchmark_query import BenchmarkQuery
from clear import ClearViews, ClearQuery, ClearPublic
from compare_query import CompareQuery
from database import Database
from setup import SetupPublic, SetupViews, SetupQuery
from timing import Timing
# remove all possible side effects of a query
def clear_query(db: Database, config: dict, query_type: str, query: str):
config['query_type'] = query_type
config['query'] = query
ClearQuery(db, config).execute()
ClearViews(db, config).execute()
db.commit()
# setup query and corresponding auxiliary tables needed for the maintenance approach
def setup_query(db: Database, config: dict, query_type: str, query: str):
config['query_type'] = query_type
config['query'] = query
SetupViews(db, config).execute()
SetupQuery(db, config).execute()
db.commit()
# benchmark a query and clear the result after that
def benchmark(db: Database, config: dict, query_type: str, query: str, timing=False):
clear_query(db, config, query_type, query)
setup_query(db, config, query_type, query)
config['query_type'] = query_type
config['query'] = query
BenchmarkQuery(db, config).execute(timing)
clear_query(db, config, query_type, query)
db.commit()
# check if the result of all maintenance approaches is identical
def compare(db: Database, config: dict, query: str):
for query_type in config['query_types']:
clear_query(db, config, query_type, query)
for query_type in config['query_types']:
setup_query(db, config, query_type, query)
config['query'] = query
CompareQuery(db, config).execute()
for query_type in config['query_types']:
clear_query(db, config, query_type, query)
db.commit()
def main(plpy, args: str):
# some config parameter
# the map is passed down to the functions
config = {'src': '/benchmark/sql',
'dataset': 'tpch',
'query_types': ['bv', 'cv', 'dv', 'ev', 'fv'],
'queries': ['q1', 'q3', 'q6', 'q15', 'q20'],
'batch_size': 1000,
'max_batch_size': 10000}
# perform some basic argument parsing
args = args.split()
operation = args[0]
timing = Timing(config)
db = Database(plpy, timing)
# load the TPC-H relations
if operation == 'setup':
SetupPublic(db, config).execute()
db.commit()
# create auxiliary tables, views and functions for the given maintenance approach
elif operation == 'setup_query' and len(args) == 3:
clear_query(db, config, args[1], args[2])
setup_query(db, config, args[1], args[2])
# check for correctness of the given query
elif operation == 'compare' and len(args) == 2:
compare(db, config, args[1])
# check correctness of all available queries
elif operation == 'compare_all' and len(args) == 2:
config['batch_size'] = int(args[1])
for query in config['queries']:
compare(db, config, query)
# benchmark the given query for the obtained batch_size
elif operation == 'benchmark' and len(args) == 3:
benchmark(db, config, args[1], args[2], True)
timing.save(db)
# benchmark all queries and all maintenance approaches for the given batch size
elif operation == 'benchmark_all' and len(args) == 3:
config['batch_size'] = int(args[2])
for query_type in config['query_types']:
for query in config['queries']:
# warmup (discard first three iterations)
benchmark(db, config, query_type, query, False)
benchmark(db, config, query_type, query, False)
benchmark(db, config, query_type, query, False)
for i in range(int(args[1])):
benchmark(db, config, query_type, query, True)
# write execution times to the database
timing.save(db)
# clear everything, including TPC-H relations
elif operation == 'clear':
for query_type in config['query_types']:
for query in config['queries']:
clear_query(db, config, query_type, query)
ClearPublic(db, config).execute()
db.commit()
else:
raise RuntimeError('Missing arguments!')
| 33.865079
| 85
| 0.644715
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,200
| 0.281228
|
f30dee16b7aab145441edae420bc159552e96a76
| 3,787
|
py
|
Python
|
nelpy/plotting/decoding.py
|
shayokdutta/nelpy_modified
|
8f3bd505beed570bfe917ed0a7f1d8c13f31b69a
|
[
"MIT"
] | null | null | null |
nelpy/plotting/decoding.py
|
shayokdutta/nelpy_modified
|
8f3bd505beed570bfe917ed0a7f1d8c13f31b69a
|
[
"MIT"
] | null | null | null |
nelpy/plotting/decoding.py
|
shayokdutta/nelpy_modified
|
8f3bd505beed570bfe917ed0a7f1d8c13f31b69a
|
[
"MIT"
] | null | null | null |
__all__ = ['plot_cum_error_dist']
import numpy as np
# import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from mpl_toolkits.axes_grid.inset_locator import inset_axes
import itertools
from . import palettes
# colors = itertools.cycle(npl.palettes.color_palette(palette="sweet", n_colors=15))
# from ..core import *
# from ..auxiliary import *
from .. import decoding
# from . import utils # import plotting/utils
def plot_cum_error_dist(*, cumhist=None, bincenters=None,
bst=None, extern=None, decodefunc=None,
k=None, transfunc=None, n_extern=None,
n_bins = None, extmin=None, extmax=None,
sigma=None, lw=None, ax=None, inset=True,
inset_ax=None, color=None, **kwargs):
"""Plot (and optionally compute) the cumulative distribution of
decoding errors, evaluated using a cross-validation procedure.
See Fig 3.(b) of "Analysis of Hippocampal Memory Replay Using Neural
Population Decoding", Fabian Kloosterman, 2012.
Parameters
----------
Returns
-------
"""
if ax is None:
ax = plt.gca()
if lw is None:
lw=1.5
if decodefunc is None:
decodefunc = decoding.decode1D
if k is None:
k=5
if n_extern is None:
n_extern=100
if n_bins is None:
n_bins = 200
if extmin is None:
extmin=0
if extmax is None:
extmax=100
if sigma is None:
sigma = 3
# Get the color from the current color cycle
if color is None:
line, = ax.plot(0, 0.5)
color = line.get_color()
line.remove()
# if cumhist or bincenters are NOT provided, then compute them
if cumhist is None or bincenters is None:
assert bst is not None, "if cumhist and bincenters are not given, then bst must be provided to recompute them!"
assert extern is not None, "if cumhist and bincenters are not given, then extern must be provided to recompute them!"
cumhist, bincenters = \
decoding.cumulative_dist_decoding_error_using_xval(
bst=bst,
extern=extern,
decodefunc=decoding.decode1D,
k=k,
transfunc=transfunc,
n_extern=n_extern,
extmin=extmin,
extmax=extmax,
sigma=sigma,
n_bins=n_bins)
# now plot results
ax.plot(bincenters, cumhist, lw=lw, color=color, **kwargs)
ax.set_xlim(bincenters[0], bincenters[-1])
ax.set_xlabel('error [cm]')
ax.set_ylabel('cumulative probability')
ax.set_ylim(0)
if inset:
if inset_ax is None:
inset_ax = inset_axes(parent_axes=ax,
width="60%",
height="50%",
loc=4,
borderpad=2)
inset_ax.plot(bincenters, cumhist, lw=lw, color=color, **kwargs)
# annotate inset
thresh1 = 0.7
bcidx = np.asscalar(np.argwhere(cumhist>thresh1)[0]-1)
inset_ax.hlines(thresh1, 0, bincenters[bcidx], color=color, alpha=0.9, linestyle='--')
inset_ax.vlines(bincenters[bcidx], 0, thresh1, color=color, alpha=0.9, linestyle='--')
inset_ax.set_xlim(0,12*np.ceil(bincenters[bcidx]/10))
thresh2 = 0.5
bcidx = np.asscalar(np.argwhere(cumhist>thresh2)[0]-1)
inset_ax.hlines(thresh2, 0, bincenters[bcidx], color=color, alpha=0.6, linestyle='--')
inset_ax.vlines(bincenters[bcidx], 0, thresh2, color=color, alpha=0.6, linestyle='--')
inset_ax.set_yticks((0,thresh1, thresh2, 1))
inset_ax.set_ylim(0)
return ax, inset_ax
return ax
| 32.930435
| 125
| 0.601532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 930
| 0.245577
|
f30ee9cbdc128ebb414011f1922779899d37a824
| 77
|
py
|
Python
|
code/abc122_a_01.py
|
KoyanagiHitoshi/AtCoder
|
731892543769b5df15254e1f32b756190378d292
|
[
"MIT"
] | 3
|
2019-08-16T16:55:48.000Z
|
2021-04-11T10:21:40.000Z
|
code/abc122_a_01.py
|
KoyanagiHitoshi/AtCoder
|
731892543769b5df15254e1f32b756190378d292
|
[
"MIT"
] | null | null | null |
code/abc122_a_01.py
|
KoyanagiHitoshi/AtCoder
|
731892543769b5df15254e1f32b756190378d292
|
[
"MIT"
] | null | null | null |
b=input()
print("A" if b=="T" else "T" if b=="A" else "G" if b=="C" else "C")
| 38.5
| 67
| 0.506494
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 21
| 0.272727
|
f30f0fecb3a5195d2294443d51e5048fb142c4a9
| 847
|
py
|
Python
|
setup.py
|
carrasquel/wikipit
|
b8d2f870406eef866f68a4f7e5caca5398a671c2
|
[
"MIT"
] | 1
|
2020-05-17T14:53:23.000Z
|
2020-05-17T14:53:23.000Z
|
setup.py
|
carrasquel/wikipit
|
b8d2f870406eef866f68a4f7e5caca5398a671c2
|
[
"MIT"
] | 1
|
2020-05-18T21:58:06.000Z
|
2020-05-18T21:58:06.000Z
|
setup.py
|
carrasquel/wikipit
|
b8d2f870406eef866f68a4f7e5caca5398a671c2
|
[
"MIT"
] | 1
|
2020-05-17T18:15:48.000Z
|
2020-05-17T18:15:48.000Z
|
"""Setup specifications for gitignore project."""
from os import path
from setuptools import setup
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name="wikipit",
version="1.0.4",
description="A Command Line Tool to Search Wikipedia in the terminal.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/carrasquel/wikipit",
author="Nelson Carrasquel",
license='MIT',
author_email="carrasquel@outlook.com",
py_modules=["wikipit"],
entry_points={
"console_scripts": [
"wikipit = wikipit:wiki"
]
},
install_requires=[
"wikipedia",
"Click"
]
)
| 24.911765
| 75
| 0.662338
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 358
| 0.422668
|
f31108a183ca826267db22b5fdc9dd872d8b503e
| 1,469
|
py
|
Python
|
samples.py
|
daimeng/py-geode
|
a4146804e4def71a6b430e5a16f6e0b1a65deefe
|
[
"MIT"
] | null | null | null |
samples.py
|
daimeng/py-geode
|
a4146804e4def71a6b430e5a16f6e0b1a65deefe
|
[
"MIT"
] | 9
|
2018-11-15T00:44:11.000Z
|
2019-03-01T02:52:34.000Z
|
samples.py
|
daimeng/py-geode
|
a4146804e4def71a6b430e5a16f6e0b1a65deefe
|
[
"MIT"
] | null | null | null |
import aiohttp
import time
import ujson
import asyncio
import prettyprinter
import numpy as np
import pandas as pd
from geode.dispatcher import AsyncDispatcher
prettyprinter.install_extras(include=['dataclasses'])
pd.set_option('display.float_format', '{:.4f}'.format)
async def main():
client = await AsyncDispatcher.init()
s = time.time()
res = None
async with aiohttp.ClientSession(json_serialize=ujson.dumps) as session:
# if len(sys.argv) > 1 and sys.argv[1] == 'g':
# res = await client.batch_geocode([
# '500 Rutherford Ave, Charlestown MA',
# 'Cake Factory',
# '21 Henr St, Bristol, UK',
# 'TD Bank 250 Cambridge Street Boston, MA 02114',
# m.GeoPoint(lon=-94.5823, lat=34.1368)
# ], session=session)
# else:
res = await client.distance_matrix(
origins=np.array([
(37.1165, -92.2353),
(34.1368, -94.5823),
(37.1165, -92.2353)
]),
destinations=np.array([
(34.1368, -94.5823),
(36.3408, -96.0384),
(32.2834, -92.0286),
(32.2834, -92.0286)
]),
session=session,
provider='google')
t = time.time() - s
prettyprinter.pprint(res)
print('Duration: %dms' % (t * 1000))
if __name__ == '__main__':
asyncio.run(main())
| 27.203704
| 76
| 0.538462
| 0
| 0
| 0
| 0
| 0
| 0
| 1,142
| 0.7774
| 408
| 0.27774
|
f3126093965615fe8a8564523762df648831f740
| 171
|
py
|
Python
|
functional_tests.py
|
idanmel/soccer_friends
|
db370c384e99308c5f6a39a18eac1556b83cc786
|
[
"MIT"
] | null | null | null |
functional_tests.py
|
idanmel/soccer_friends
|
db370c384e99308c5f6a39a18eac1556b83cc786
|
[
"MIT"
] | null | null | null |
functional_tests.py
|
idanmel/soccer_friends
|
db370c384e99308c5f6a39a18eac1556b83cc786
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
browser = webdriver.Firefox()
browser.get('http://localhost:8000')
try:
assert 'Django' in browser.title
finally:
browser.close()
| 17.1
| 36
| 0.730994
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 31
| 0.181287
|
f3128d3872baa827767bd09bf278c2956175ee90
| 963
|
py
|
Python
|
lorenzsj/blog/views.py
|
lorenzsj/lorenzsj
|
631c6632f8fe70a021836c52aafd8746e13fc8a8
|
[
"MIT"
] | null | null | null |
lorenzsj/blog/views.py
|
lorenzsj/lorenzsj
|
631c6632f8fe70a021836c52aafd8746e13fc8a8
|
[
"MIT"
] | null | null | null |
lorenzsj/blog/views.py
|
lorenzsj/lorenzsj
|
631c6632f8fe70a021836c52aafd8746e13fc8a8
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import User
from rest_framework import viewsets
from rest_framework import permissions
from rest_framework.response import Response
from blog.models import Post
from blog.serializers import PostSerializer
from blog.serializers import UserSerializer
from blog.permissions import IsAuthorOrReadOnly
class UserViewSet(viewsets.ReadOnlyModelViewSet):
"""This viewset automatically provides `list` and `detail` actions."""
queryset = User.objects.all()
serializer_class = UserSerializer
class PostViewSet(viewsets.ModelViewSet):
"""This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions.
"""
queryset = Post.objects.all()
serializer_class = PostSerializer
permission_classes = [
permissions.IsAuthenticatedOrReadOnly,
IsAuthorOrReadOnly,
]
def perform_create(self, serializer):
serializer.save(author=self.request.user)
| 31.064516
| 74
| 0.764278
| 627
| 0.65109
| 0
| 0
| 0
| 0
| 0
| 0
| 182
| 0.188993
|
f3133d707d13f1d41040304efdb1e48fd46e0e3f
| 4,270
|
py
|
Python
|
src/piminder_service/resources/db_autoinit.py
|
ZAdamMac/pyminder
|
059f57cb7cea4f517f77b1bbf391ce99f25d83bb
|
[
"MIT"
] | null | null | null |
src/piminder_service/resources/db_autoinit.py
|
ZAdamMac/pyminder
|
059f57cb7cea4f517f77b1bbf391ce99f25d83bb
|
[
"MIT"
] | 3
|
2021-05-05T21:08:24.000Z
|
2021-06-23T10:47:40.000Z
|
src/piminder_service/resources/db_autoinit.py
|
ZAdamMac/pyminder
|
059f57cb7cea4f517f77b1bbf391ce99f25d83bb
|
[
"MIT"
] | null | null | null |
"""
This script is a component of Piminder's back-end controller.
Specifically, it is a helper utility to be used to intialize a database for the user and message tables.
Author: Zac Adam-MacEwen (zadammac@kenshosec.com)
An Arcana Labs utility.
Produced under license.
Full license and documentation to be found at:
https://github.com/ZAdamMac/Piminder
"""
import bcrypt
import getpass
import os
import pymysql
__version__ = "1.0.0" # This is the version of service that we can init, NOT the version of the script itself.
spec_tables = [
"""CREATE TABLE `messages` (
`id` CHAR(36) NOT NULL,
`name` VARCHAR(255) NOT NULL,
`message` TEXT DEFAULT NULL,
`errorlevel` CHAR(5) DEFAULT NULL,
`time_raised` TIMESTAMP,
`read_flag` BIT DEFAULT 0,
PRIMARY KEY (`id`)
)""",
"""CREATE TABLE `users` (
`username` CHAR(36) NOT NULL,
`password` VARCHAR(255) NOT NULL,
`permlevel` INT(1) DEFAULT 1,
`memo` TEXT DEFAULT NULL,
PRIMARY KEY (`username`)
)"""
]
def connect_to_db():
"""Detects if it is necessary to prompt for the root password, and either way,
establishes the db connection, returning it.
:return:
"""
print("We must now connect to the database.")
try:
db_user = os.environ['PIMINDER_DB_USER']
except KeyError:
print("Missing envvar: Piminder_DB_USER")
exit(1)
root_password = None
try:
root_password = os.environ['PIMINDER_DB_PASSWORD']
except KeyError:
print("Missing envvar: Piminder_DB_PASSWORD")
exit(1)
try:
db_host = os.environ['PIMINDER_DB_HOST']
except KeyError:
print("Missing envvar: Piminder_DB_HOST")
exit(1)
finally:
conn = pymysql.connect(host=db_host, user=db_user,
password=root_password, db='Piminder',
charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor)
return conn
def create_tables(list_tables, connection):
"""Accepts a list of create statements for tables and pushes them to the DB.
:param list_tables: A list of CREATE statements in string form.
:param connection: a pymysql.connect() object, such as returned by connect_to_db
:return:
"""
cursor = connection.cursor()
connection.begin()
for table in list_tables:
try:
cursor.execute(table)
except pymysql.err.ProgrammingError:
print("Error in the following statement; table was skipped.")
print(table)
except pymysql.err.OperationalError as error:
if str(error.args[0]) == 1050: # This table already exists
print("%s, skipping" % error.args[1])
else:
print(error)
connection.commit()
def create_administrative_user(connection):
"""Creates an administrative user if it does not already exist.
:param connection:
:return:
"""
print("Validating an admin user exists:")
try:
admin_name = os.environ['PIMINDER_ADMIN_USER']
except KeyError:
print("Missing envvar: Piminder_ADMIN_USER")
exit(1)
cur = connection.cursor()
command = "SELECT count(username) AS howmany FROM users WHERE permlevel like 3;"
# Wait, how many admins are there?
cur.execute(command)
count = cur.fetchone()["howmany"]
if count < 1: # Only do this if no more than 0 exists.
command = "INSERT INTO users (username, password, memo, permlevel) VALUES (%s, %s, 'Default User', 3);"
try:
root_password = os.environ['PIMINDER_ADMIN_PASSWORD']
except KeyError:
print("Missing envvar: Piminder_ADMIN_PASSWORD")
exit(1)
hashed_rootpw = bcrypt.hashpw(root_password.encode('utf8'), bcrypt.gensalt())
cur.execute(command, (admin_name, hashed_rootpw))
print("Created administrative user: %s" % admin_name)
else:
print("Administrative user already exists, skipping.")
connection.commit()
def runtime():
print("Now Creating Tables")
mariadb = connect_to_db()
create_tables(spec_tables, mariadb)
create_administrative_user(mariadb)
mariadb.commit()
mariadb.close()
print("Done.")
| 31.865672
| 111
| 0.646136
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,269
| 0.531382
|
f314e1c52a7971b18107dd68a650e6479dbddda8
| 7,455
|
py
|
Python
|
conftest.py
|
jirikuncar/renku-python
|
69df9ea1d5db3c63fd2ea3537c7e46d079360c8f
|
[
"Apache-2.0"
] | null | null | null |
conftest.py
|
jirikuncar/renku-python
|
69df9ea1d5db3c63fd2ea3537c7e46d079360c8f
|
[
"Apache-2.0"
] | null | null | null |
conftest.py
|
jirikuncar/renku-python
|
69df9ea1d5db3c63fd2ea3537c7e46d079360c8f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2017 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pytest configuration."""
import json
import os
import shutil
import sys
import tempfile
import time
import types
import pytest
import responses
from click.testing import CliRunner
@pytest.fixture(scope='module')
def renku_path(tmpdir_factory):
"""Temporary instance path."""
path = str(tmpdir_factory.mktemp('renku'))
yield path
shutil.rmtree(path)
@pytest.fixture()
def instance_path(renku_path, monkeypatch):
"""Temporary instance path."""
orig_pwd = os.getcwd()
with monkeypatch.context() as m:
m.chdir(renku_path)
yield renku_path
@pytest.fixture()
def runner(monkeypatch):
"""Create a runner on isolated filesystem."""
from renku.cli._config import RENKU_HOME
monkeypatch.setenv('RENKU_CONFIG', RENKU_HOME)
return CliRunner()
@pytest.fixture()
def run(runner, capsys):
"""Return a callable runner."""
import contextlib
from renku import cli
@contextlib.contextmanager
def chdir(path):
"""Change the current working directory."""
cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(cwd)
class redirect_stdin(contextlib.ContextDecorator):
"""Implement missing redirect stdin based on ``contextlib.py``."""
_stream = 'stdin'
def __init__(self, new_target):
"""Keep the original stream."""
self._new_target = new_target
# We use a list of old targets to make this CM re-entrant
self._old_targets = []
def __enter__(self):
"""Change the stream value."""
self._old_targets.append(getattr(sys, self._stream))
setattr(sys, self._stream, self._new_target)
return self._new_target
def __exit__(self, exctype, excinst, exctb):
"""Restore the stream value."""
setattr(sys, self._stream, self._old_targets.pop())
managers = {
'stdout': lambda path: contextlib.redirect_stdout(path.open('wb')),
'stderr': lambda path: contextlib.redirect_stderr(path.open('wb')),
'stdin':
lambda path: redirect_stdin(
path.open('rb') if not hasattr(path, 'read') else path
),
}
def generate(args=('update', ), cwd=None, **streams):
"""Generate an output."""
with capsys.disabled(), contextlib.ExitStack() as stack:
for name, stream in streams.items():
stack.enter_context(managers[name](stream))
if cwd is not None:
stack.enter_context(chdir(str(cwd)))
try:
cli.cli.main(
args=args,
prog_name=runner.get_default_prog_name(cli.cli),
)
except SystemExit as e:
return 0 if e.code is None else e.code
except Exception:
raise
return generate
@pytest.fixture()
def isolated_runner(monkeypatch):
"""Create a runner on isolated filesystem."""
from renku.cli._config import RENKU_HOME
monkeypatch.setenv('RENKU_CONFIG', RENKU_HOME)
runner_ = CliRunner()
with runner_.isolated_filesystem():
yield runner_
@pytest.fixture()
def data_file(tmpdir):
"""Create a sample data file."""
p = tmpdir.mkdir('data').join('file')
p.write('1234')
return p
@pytest.fixture(scope='module')
def repository():
"""Yield a Renku repository."""
from renku import cli
from renku.api import LocalClient
runner = CliRunner()
with runner.isolated_filesystem() as project_path:
result = runner.invoke(cli.cli, ['init', '.'], catch_exceptions=False)
assert result.exit_code == 0
yield project_path
@pytest.fixture
def project(repository):
"""Create a test project."""
from git import Repo
repo = Repo(repository)
commit = repo.head.commit
os.chdir(repository)
yield repository
os.chdir(repository)
repo.head.reset(commit, index=True, working_tree=True)
# remove any extra non-tracked files (.pyc, etc)
repo.git.clean('-xdff')
@pytest.fixture()
def client(repository):
"""Return a Renku repository."""
from git import Repo
from renku.api import LocalClient
repo = Repo(repository)
commit = repo.head.commit
os.chdir(repository)
yield LocalClient(path=repository)
os.chdir(repository)
repo.head.reset(commit, index=True, working_tree=True)
# remove any extra non-tracked files (.pyc, etc)
repo.git.clean('-xdff')
@pytest.fixture()
def dataset(client):
"""Create a dataset."""
with client.with_dataset(name='dataset') as dataset:
dataset.authors = {
'name': 'me',
'email': 'me@example.com',
}
return dataset
@pytest.fixture()
def dataset_responses():
"""Authentication responses."""
with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:
def request_callback(request):
return (200, {'Content-Type': 'application/text'}, '1234')
rsps.add_callback(
responses.GET,
'http://example.com/file',
callback=request_callback
)
rsps.add_callback(
responses.GET,
'https://example.com/file',
callback=request_callback
)
yield rsps
@pytest.fixture(scope='module')
def directory_tree(tmpdir_factory):
"""Create a test directory tree."""
# initialize
p = tmpdir_factory.mktemp('directory_tree')
p.join('file').write('1234')
p.join('dir2').mkdir()
p.join('dir2/file2').write('5678')
return p
@pytest.fixture(scope='module')
def data_repository(directory_tree):
"""Create a test repo."""
from git import Repo, Actor
# initialize
repo = Repo.init(directory_tree.strpath)
# add a file
repo.index.add([directory_tree.join('file').strpath])
repo.index.commit('test commit', author=Actor('me', 'me@example.com'))
# commit changes to the same file with a different user
directory_tree.join('file').write('5678')
repo.index.add([directory_tree.join('file').strpath])
repo.index.commit('test commit', author=Actor('me2', 'me2@example.com'))
# commit a second file
repo.index.add([directory_tree.join('dir2/file2').strpath])
repo.index.commit('test commit', author=Actor('me', 'me@example.com'))
# return the repo
return repo
@pytest.fixture(autouse=True)
def add_client(doctest_namespace):
"""Add Renku client to doctest namespace."""
from renku.api import LocalClient
doctest_namespace['client'] = LocalClient(path=tempfile.mkdtemp())
| 27.921348
| 78
| 0.643997
| 777
| 0.104155
| 4,399
| 0.589678
| 6,474
| 0.867828
| 0
| 0
| 2,232
| 0.299196
|
f315277c03047d954514d2d9908c6f026aae74fa
| 624
|
py
|
Python
|
kuchinawa/Logger.py
|
threemeninaboat3247/kuchinawa
|
81094e358e4dad9529a15fa526f2307caaceb82e
|
[
"MIT"
] | 4
|
2017-11-29T04:14:19.000Z
|
2022-01-21T13:00:23.000Z
|
kuchinawa/Logger.py
|
threemeninaboat3247/kuchinawa
|
81094e358e4dad9529a15fa526f2307caaceb82e
|
[
"MIT"
] | 3
|
2018-05-07T14:49:29.000Z
|
2018-05-08T11:49:17.000Z
|
kuchinawa/Logger.py
|
threemeninaboat3247/kuchinawa
|
81094e358e4dad9529a15fa526f2307caaceb82e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
""" --- Description ---
Module:
Logger.py
Abstract:
A module for logging
Modified:
threemeninaboat3247 2018/04/30
--- End ---
"""
# Standard library imports
import logging
logger = logging.getLogger('Kuchinawa Log')
# ログレベルの設定
logger.setLevel(10)
# ログのファイル出力先を設定
fh = logging.FileHandler('kuchinawa.log')
logger.addHandler(fh)
# ログのコンソール出力の設定
sh = logging.StreamHandler()
logger.addHandler(sh)
# ログの出力形式の設定
formatter = logging.Formatter('%(asctime)s:%(lineno)d:%(levelname)s:%(message)s')
fh.setFormatter(formatter)
sh.setFormatter(formatter)
| 20.129032
| 81
| 0.674679
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 446
| 0.626404
|
f3159c44193bd89a772b6f2bca9dbffb2ffaa8bc
| 5,933
|
py
|
Python
|
test/search/capacity.py
|
sbutler/spotseeker_server
|
02bd2d646eab9f26ddbe8536b30e391359796c9c
|
[
"Apache-2.0"
] | null | null | null |
test/search/capacity.py
|
sbutler/spotseeker_server
|
02bd2d646eab9f26ddbe8536b30e391359796c9c
|
[
"Apache-2.0"
] | null | null | null |
test/search/capacity.py
|
sbutler/spotseeker_server
|
02bd2d646eab9f26ddbe8536b30e391359796c9c
|
[
"Apache-2.0"
] | null | null | null |
""" Copyright 2012, 2013 UW Information Technology, University of Washington
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.test import TestCase
from django.conf import settings
from django.test.client import Client
from spotseeker_server.models import Spot, SpotExtendedInfo, SpotType
import simplejson as json
from django.test.utils import override_settings
from mock import patch
from django.core import cache
from spotseeker_server import models
@override_settings(SPOTSEEKER_AUTH_MODULE='spotseeker_server.auth.all_ok')
class SpotSearchCapacityTest(TestCase):
def test_capacity(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
spot1 = Spot.objects.create(name="capacity: 1", capacity=1)
spot1.save()
spot2 = Spot.objects.create(name="capacity: 2", capacity=2)
spot2.save()
spot3 = Spot.objects.create(name="capacity: 3", capacity=3)
spot3.save()
spot4 = Spot.objects.create(name="capacity: 4", capacity=4)
spot4.save()
spot5 = Spot.objects.create(name="capacity: 50", capacity=50)
spot5.save()
c = Client()
response = c.get("/api/v1/spot", {'capacity': '', 'name': 'capacity'})
self.assertEquals(response["Content-Type"], "application/json", "Has the json header")
spots = json.loads(response.content)
has_1 = False
has_2 = False
has_3 = False
has_4 = False
has_5 = False
for spot in spots:
if spot['id'] == spot1.pk:
has_1 = True
if spot['id'] == spot2.pk:
has_2 = True
if spot['id'] == spot3.pk:
has_3 = True
if spot['id'] == spot4.pk:
has_4 = True
if spot['id'] == spot5.pk:
has_5 = True
self.assertEquals(has_1, True)
self.assertEquals(has_2, True)
self.assertEquals(has_3, True)
self.assertEquals(has_4, True)
self.assertEquals(has_5, True)
response = c.get("/api/v1/spot", {'capacity': '1'})
self.assertEquals(response["Content-Type"], "application/json", "Has the json header")
spots = json.loads(response.content)
has_1 = False
has_2 = False
has_3 = False
has_4 = False
has_5 = False
for spot in spots:
if spot['id'] == spot1.pk:
has_1 = True
if spot['id'] == spot2.pk:
has_2 = True
if spot['id'] == spot3.pk:
has_3 = True
if spot['id'] == spot4.pk:
has_4 = True
if spot['id'] == spot5.pk:
has_5 = True
self.assertEquals(has_1, True)
self.assertEquals(has_2, True)
self.assertEquals(has_3, True)
self.assertEquals(has_4, True)
self.assertEquals(has_5, True)
response = c.get("/api/v1/spot", {'capacity': '49'})
self.assertEquals(response["Content-Type"], "application/json", "Has the json header")
spots = json.loads(response.content)
has_1 = False
has_2 = False
has_3 = False
has_4 = False
has_5 = False
for spot in spots:
if spot['id'] == spot1.pk:
has_1 = True
if spot['id'] == spot2.pk:
has_2 = True
if spot['id'] == spot3.pk:
has_3 = True
if spot['id'] == spot4.pk:
has_4 = True
if spot['id'] == spot5.pk:
has_5 = True
self.assertEquals(has_1, False)
self.assertEquals(has_2, False)
self.assertEquals(has_3, False)
self.assertEquals(has_4, False)
self.assertEquals(has_5, True)
response = c.get("/api/v1/spot", {'capacity': '501'})
self.assertEquals(response["Content-Type"], "application/json", "Has the json header")
spots = json.loads(response.content)
has_1 = False
has_2 = False
has_3 = False
has_4 = False
has_5 = False
for spot in spots:
if spot['id'] == spot1.pk:
has_1 = True
if spot['id'] == spot2.pk:
has_2 = True
if spot['id'] == spot3.pk:
has_3 = True
if spot['id'] == spot4.pk:
has_4 = True
if spot['id'] == spot5.pk:
has_5 = True
self.assertEquals(has_1, False)
self.assertEquals(has_2, False)
self.assertEquals(has_3, False)
self.assertEquals(has_4, False)
self.assertEquals(has_5, False)
response = c.get("/api/v1/spot", {'capacity': '1', 'distance': '100', 'limit': '4'})
#testing sorting by distance, which is impossible given no center
self.assertEquals(response.status_code, 400)
| 36.398773
| 98
| 0.532783
| 4,878
| 0.822181
| 0
| 0
| 4,953
| 0.834822
| 0
| 0
| 1,321
| 0.222653
|
f3163b561595dcd3e021c0a5f070a6337bbb8499
| 1,745
|
py
|
Python
|
model/k1_clustering_pre-processing.py
|
not-a-hot-dog/spotify_project
|
b928fecb136cffdd62c650b054ca543047800f11
|
[
"MIT"
] | null | null | null |
model/k1_clustering_pre-processing.py
|
not-a-hot-dog/spotify_project
|
b928fecb136cffdd62c650b054ca543047800f11
|
[
"MIT"
] | 1
|
2019-12-08T17:23:49.000Z
|
2019-12-08T17:23:49.000Z
|
model/k1_clustering_pre-processing.py
|
not-a-hot-dog/spotify_project
|
b928fecb136cffdd62c650b054ca543047800f11
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from model.helper_functions import build_playlist_features
print('Reading data into memory')
pid_list = np.genfromtxt('../data/train_pids.csv', skip_header=1, dtype=int)
playlistfile = '../data/playlists.csv'
playlist_df = pd.read_csv(playlistfile)
trackfile = '../data/songs_100000_feat_cleaned.csv'
track_df = pd.read_csv(trackfile, index_col='track_uri')
print('Finding playlist features')
playlist_features = build_playlist_features(pid_list, playlist_df, track_df)
playlist_features.to_csv('../data/playlist_features_train.csv')
print('Finding top artists')
# Find the top artists who dominate playlists
top_playlist_defining_artists = playlist_features.artist_uri_top.value_counts(normalize=False)
top_playlist_defining_artists.to_csv('../data/top_playlist_defining_artists_train_all.csv', header=True)
top_playlist_defining_artists = playlist_features.artist_uri_top.value_counts().index.values[:50]
np.savetxt('../data/top_playlist_defining_artists_train.csv', top_playlist_defining_artists, delimiter=',', fmt="%s")
# Keep only those artists who dominate playlists and one hot encode
artists_to_keep = playlist_features.artist_uri_top.isin(top_playlist_defining_artists)
playlist_features.artist_uri_top = playlist_features.artist_uri_top[artists_to_keep]
playlist_features.artist_uri_freq = playlist_features.artist_uri_freq[artists_to_keep]
playlist_features.artist_uri_freq.fillna(0, inplace=True)
top_artist_dummies = pd.get_dummies(playlist_features.artist_uri_top)
playlist_features = pd.concat([playlist_features, top_artist_dummies], axis=1)
playlist_features.drop(['artist_uri_top'], axis=1, inplace=True)
playlist_features.to_csv('../data/playlist_features_with_artists_train.csv')
| 52.878788
| 117
| 0.837249
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 495
| 0.283668
|
f3166c7800fb37b00a35784025071d85b46a881a
| 731
|
py
|
Python
|
app/main/__init__.py
|
a2hsh/udacity-fsnd-capstone
|
545f78111784756f469127bcb4a656306a7fe242
|
[
"MIT"
] | null | null | null |
app/main/__init__.py
|
a2hsh/udacity-fsnd-capstone
|
545f78111784756f469127bcb4a656306a7fe242
|
[
"MIT"
] | null | null | null |
app/main/__init__.py
|
a2hsh/udacity-fsnd-capstone
|
545f78111784756f469127bcb4a656306a7fe242
|
[
"MIT"
] | null | null | null |
# routes Blueprint
from flask import Blueprint, jsonify, request, redirect, render_template
from flask_cors import CORS
from os import environ
# initializing the blueprint
main = Blueprint('main', __name__)
CORS(main, resources={r'*': {'origins': '*'}})
@main.after_request
def after_request(response):
'''defining extra headers'''
response.headers.add('Access-Control-Allow-Headers',
'Content-Type,Authorization,true')
response.headers.add('Access-Control-Allow-Methods',
'GET,PATCH,POST,DELETE,OPTIONS')
response.headers.add('Content-Type', 'application/json')
return response
# importing routes
from . import actors, movies, errors
| 31.782609
| 73
| 0.675787
| 0
| 0
| 0
| 0
| 402
| 0.549932
| 0
| 0
| 273
| 0.373461
|
f316cbca5e61cde2ebe07f8eb9690a7626e13407
| 497
|
py
|
Python
|
agenda/tests/test_models.py
|
migueleichler/django-tdd
|
5b8bd6088b5e2de4d70026b761391bce3aa52f32
|
[
"MIT"
] | null | null | null |
agenda/tests/test_models.py
|
migueleichler/django-tdd
|
5b8bd6088b5e2de4d70026b761391bce3aa52f32
|
[
"MIT"
] | null | null | null |
agenda/tests/test_models.py
|
migueleichler/django-tdd
|
5b8bd6088b5e2de4d70026b761391bce3aa52f32
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from agenda.models import Compromisso
from model_mommy import mommy
class CompromissoModelTest(TestCase):
def setUp(self):
self.instance = mommy.make('Compromisso')
def test_string_representation(self):
self.assertEqual(str(self.instance), self.instance.titulo)
def test_obrigatory_fields(self):
created = Compromisso.objects.create(horario=self.instance.horario)
self.assertTrue(isinstance(created, Compromisso))
| 27.611111
| 75
| 0.750503
| 392
| 0.788732
| 0
| 0
| 0
| 0
| 0
| 0
| 13
| 0.026157
|
f31b214b07d8c2680f0f9e730882cb62c105cf97
| 1,868
|
py
|
Python
|
tests/test_crypto/test_registry/test_misc.py
|
valory-xyz/agents-aea
|
8f38efa96041b0156ed1ae328178e395dbabf2fc
|
[
"Apache-2.0"
] | null | null | null |
tests/test_crypto/test_registry/test_misc.py
|
valory-xyz/agents-aea
|
8f38efa96041b0156ed1ae328178e395dbabf2fc
|
[
"Apache-2.0"
] | null | null | null |
tests/test_crypto/test_registry/test_misc.py
|
valory-xyz/agents-aea
|
8f38efa96041b0156ed1ae328178e395dbabf2fc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022 Valory AG
# Copyright 2018-2020 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains misc tests for the registry (crypto/ledger_api/contract)."""
import logging
import pytest
from aea.crypto.registries.base import Registry
from aea.exceptions import AEAException
logger = logging.getLogger(__name__)
@pytest.mark.parametrize(
"current_id,is_valid",
[
("a", True),
("_", True),
("0", False),
("_0", True),
("-", False),
("ABCDE", True),
("author/package:0.1.0", True),
("author/package:0.1.", False),
("0author/package:0.1.0", False),
],
)
def test_validation_item_id(current_id, is_valid):
"""Test validation of item id id."""
registry = Registry()
entrypoint = "some_entrypoint:SomeEntrypoint"
if is_valid:
registry.register(current_id, entry_point=entrypoint)
else:
with pytest.raises(
AEAException,
match=rf"Malformed ItemId: '{current_id}'\. It must be of the form .*\.",
):
registry.register(current_id, entry_point=entrypoint)
| 31.133333
| 85
| 0.599036
| 0
| 0
| 0
| 0
| 796
| 0.426124
| 0
| 0
| 1,133
| 0.606531
|