hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8d1e337949743c66a06ba7436ef71c6e7a5e537b | 925 | py | Python | built-in/TensorFlow/Research/recommendation/DeepFM_for_TensorFlow/configs/config.py | Huawei-Ascend/modelzoo | df51ed9c1d6dbde1deef63f2a037a369f8554406 | [
"Apache-2.0"
] | null | null | null | built-in/TensorFlow/Research/recommendation/DeepFM_for_TensorFlow/configs/config.py | Huawei-Ascend/modelzoo | df51ed9c1d6dbde1deef63f2a037a369f8554406 | [
"Apache-2.0"
] | 3 | 2021-03-31T20:15:40.000Z | 2022-02-09T23:50:46.000Z | built-in/TensorFlow/Research/recommendation/DeepFM_for_TensorFlow/configs/config.py | Huawei-Ascend/modelzoo | df51ed9c1d6dbde1deef63f2a037a369f8554406 | [
"Apache-2.0"
] | null | null | null | BASE_DIR = './'
num_gpu = 1
num_inputs = 39
num_features = 200000
batch_size = 16000
multi_hot_flags = [False]
multi_hot_len = 1
n_epoches = 20
iteration_per_loop = 10
#one_step = 60/iteration_per_loop # for one step debug
one_step = 0
line_per_sample = 1000
graph_path = "./"
# n_step_update = 10
#test_record = "/home/guohuifeng/sjtu-1023/test.svm.100w.tfrecord.1000perline"
#train_record = "/home/guohuifeng/sjtu-1023/train.svm.1000w.tfrecord.1000perline"
#test_record = "/home/guohuifeng/sjtu-multi-card/test.svm.100w.tfrecord.1000perline"
#train_record = "/home/guohuifeng/sjtu-multi-card/train.svm.1000w.tfrecord.1000perline"
#record_path = "./tf_record"
record_path = "/autotest/CI_daily/ModelZoo_DeepFM_TF/data/deepfm"
train_tag = 'train_part'
test_tag = 'test_part'
#record_path = "/home/guohuifeng/sjtu-multi-card"
#train_tag = 'train.svm'
#test_tag = 'test.svm'
train_size = 41257636
test_size = 4582981
| 26.428571 | 87 | 0.762162 | BASE_DIR = './'
num_gpu = 1
num_inputs = 39
num_features = 200000
batch_size = 16000
multi_hot_flags = [False]
multi_hot_len = 1
n_epoches = 20
iteration_per_loop = 10
#one_step = 60/iteration_per_loop # for one step debug
one_step = 0
line_per_sample = 1000
graph_path = "./"
# n_step_update = 10
#test_record = "/home/guohuifeng/sjtu-1023/test.svm.100w.tfrecord.1000perline"
#train_record = "/home/guohuifeng/sjtu-1023/train.svm.1000w.tfrecord.1000perline"
#test_record = "/home/guohuifeng/sjtu-multi-card/test.svm.100w.tfrecord.1000perline"
#train_record = "/home/guohuifeng/sjtu-multi-card/train.svm.1000w.tfrecord.1000perline"
#record_path = "./tf_record"
record_path = "/autotest/CI_daily/ModelZoo_DeepFM_TF/data/deepfm"
train_tag = 'train_part'
test_tag = 'test_part'
#record_path = "/home/guohuifeng/sjtu-multi-card"
#train_tag = 'train.svm'
#test_tag = 'test.svm'
train_size = 41257636
test_size = 4582981
| 0 | 0 | 0 |
3f9aaa71d7e7327d95e2013a050f319f927640be | 1,803 | py | Python | app.py | stone-payments/athena | 15f548ce569be35e26ea4fd08488e0120da7a25e | [
"Apache-2.0"
] | 2 | 2019-06-04T17:25:24.000Z | 2019-07-10T15:05:10.000Z | app.py | stone-payments/athena | 15f548ce569be35e26ea4fd08488e0120da7a25e | [
"Apache-2.0"
] | 1 | 2021-06-01T21:45:31.000Z | 2021-06-01T21:45:31.000Z | app.py | stone-payments/athena | 15f548ce569be35e26ea4fd08488e0120da7a25e | [
"Apache-2.0"
] | 2 | 2018-06-11T18:48:01.000Z | 2020-06-22T12:40:29.000Z | from collections_edges import *
from collectors_and_savers.saver import SaverThread
from custom_configurations.config import *
from graphql_queries.graphql_queries import *
from mongodb_queries.mongodb_queries import *
from mongodb_connect.mongraph import *
from collection_modules.log_message import *
db = Mongraph(db_name=db_name, db_url=db_url, username=username, password=password, mongo_port=mongo_port,
hash_indexes=hash_indexes, hash_indexes_unique=hash_indexes_unique,
full_text_indexes=full_text_indexes)
save_queue = Queue(queue_max_size)
save_edges_name_queue = Queue(queue_max_size)
saver = SaverThread(db=db, queue=save_queue, edges_name_queue=save_edges_name_queue)
saver.start()
while True:
start_time = time.time()
job(orgs)
print("--- %s seconds ---" % (time.time() - start_time))
| 40.977273 | 106 | 0.700499 | from collections_edges import *
from collectors_and_savers.saver import SaverThread
from custom_configurations.config import *
from graphql_queries.graphql_queries import *
from mongodb_queries.mongodb_queries import *
from mongodb_connect.mongraph import *
from collection_modules.log_message import *
db = Mongraph(db_name=db_name, db_url=db_url, username=username, password=password, mongo_port=mongo_port,
hash_indexes=hash_indexes, hash_indexes_unique=hash_indexes_unique,
full_text_indexes=full_text_indexes)
save_queue = Queue(queue_max_size)
save_edges_name_queue = Queue(queue_max_size)
saver = SaverThread(db=db, queue=save_queue, edges_name_queue=save_edges_name_queue)
saver.start()
def job(orgs_list):
for org in orgs_list:
log.info(org + " Org")
org_collection(db, org, org_query, "Org")
log.info(org + " Repo")
repo(db, org, repo_query, "Repo")
log.info(org + " Dev")
dev(db, org, dev_query, "Dev")
log.info(org + " Teams")
teams(db, org, teams_query, "Teams")
log.info(org + " Teams_dev")
teams_dev(db, org, teams_dev_query, query_teams_dev_mongo, save_queue)
log.info(org + " Teams_repo")
teams_repo(db, org, teams_repo_query, query_teams_repo_mongo, save_queue)
log.info(org + " Commit")
commit_collector(db, org, commit_query, query_commit_mongo, "Commit", save_edges_name_queue)
log.info(org + " fork")
fork_collector(db, org, fork_query, query_fork_mongo, "Fork", save_edges_name_queue)
log.info(org + " issue")
issue(db, org, issue_query, issue_mongo, "Issue", save_edges_name_queue)
while True:
start_time = time.time()
job(orgs)
print("--- %s seconds ---" % (time.time() - start_time))
| 937 | 0 | 23 |
abb3449a5dfa589d6853531001a544c8fce20201 | 1,472 | py | Python | dragonfire/conversational/corpus/ubuntudata.py | kameranis/Dragonfire | 8ba4e588d3fd40257ed4e7ac9138f88d2bc0b3da | [
"MIT"
] | 1 | 2019-06-12T17:12:29.000Z | 2019-06-12T17:12:29.000Z | dragonfire/conversational/corpus/ubuntudata.py | kameranis/Dragonfire | 8ba4e588d3fd40257ed4e7ac9138f88d2bc0b3da | [
"MIT"
] | 2 | 2022-02-10T06:30:37.000Z | 2022-02-10T06:50:22.000Z | dragonfire/deepconv/corpus/ubuntudata.py | Allyn69/Dragonfire | 4c0e873e0bee3553bf14dfb1dded85e7fa515434 | [
"MIT"
] | 1 | 2019-01-16T05:10:38.000Z | 2019-01-16T05:10:38.000Z | import os
from tqdm import tqdm
"""
Ubuntu Dialogue Corpus
http://arxiv.org/abs/1506.08909
"""
class UbuntuData:
"""
"""
def __init__(self, dirName):
"""
Args:
dirName (string): directory where to load the corpus
"""
self.MAX_NUMBER_SUBDIR = 10
self.conversations = []
__dir = os.path.join(dirName, "dialogs")
number_subdir = 0
for sub in tqdm(os.scandir(__dir), desc="Ubuntu dialogs subfolders", total=len(os.listdir(__dir))):
if number_subdir == self.MAX_NUMBER_SUBDIR:
print("WARNING: Early stoping, only extracting {} directories".format(self.MAX_NUMBER_SUBDIR))
return
if sub.is_dir():
number_subdir += 1
for f in os.scandir(sub.path):
if f.name.endswith(".tsv"):
self.conversations.append({"lines": self.loadLines(f.path)})
def loadLines(self, fileName):
"""
Args:
fileName (str): file to load
Return:
list<dict<str>>: the extracted fields for each line
"""
lines = []
with open(fileName, 'r') as f:
for line in f:
l = line[line.rindex("\t")+1:].strip() # Strip metadata (timestamps, speaker names)
lines.append({"text": l})
return lines
| 26.285714 | 110 | 0.543478 | import os
from tqdm import tqdm
"""
Ubuntu Dialogue Corpus
http://arxiv.org/abs/1506.08909
"""
class UbuntuData:
"""
"""
def __init__(self, dirName):
"""
Args:
dirName (string): directory where to load the corpus
"""
self.MAX_NUMBER_SUBDIR = 10
self.conversations = []
__dir = os.path.join(dirName, "dialogs")
number_subdir = 0
for sub in tqdm(os.scandir(__dir), desc="Ubuntu dialogs subfolders", total=len(os.listdir(__dir))):
if number_subdir == self.MAX_NUMBER_SUBDIR:
print("WARNING: Early stoping, only extracting {} directories".format(self.MAX_NUMBER_SUBDIR))
return
if sub.is_dir():
number_subdir += 1
for f in os.scandir(sub.path):
if f.name.endswith(".tsv"):
self.conversations.append({"lines": self.loadLines(f.path)})
def loadLines(self, fileName):
"""
Args:
fileName (str): file to load
Return:
list<dict<str>>: the extracted fields for each line
"""
lines = []
with open(fileName, 'r') as f:
for line in f:
l = line[line.rindex("\t")+1:].strip() # Strip metadata (timestamps, speaker names)
lines.append({"text": l})
return lines
def getConversations(self):
return self.conversations
| 40 | 0 | 27 |
be2da8598b6eb480599d3a639e5beaceb7a6775a | 3,115 | py | Python | cea/interfaces/arcgis/CityEnergyAnalyst.py | pajotca/CityEnergyAnalyst | f3d0a08f7b5f5967961bf831625544a95c7702f0 | [
"MIT"
] | null | null | null | cea/interfaces/arcgis/CityEnergyAnalyst.py | pajotca/CityEnergyAnalyst | f3d0a08f7b5f5967961bf831625544a95c7702f0 | [
"MIT"
] | null | null | null | cea/interfaces/arcgis/CityEnergyAnalyst.py | pajotca/CityEnergyAnalyst | f3d0a08f7b5f5967961bf831625544a95c7702f0 | [
"MIT"
] | null | null | null | """
ArcGIS Toolbox for integrating the CEA with ArcGIS.
ArcGIS starts by creating an instance of Toolbox, which in turn names the tools to include in the interface.
These tools shell out to ``cli.py`` because the ArcGIS python version is old and can't be updated. Therefore
we would decouple the python version used by CEA from the ArcGIS version.
See the script ``install_toolbox.py`` for the mechanics of installing the toolbox into the ArcGIS system.
"""
import inspect
import cea.config
import cea.inputlocator
import cea.interfaces.arcgis.arcgishelper
reload(cea.interfaces.arcgis.arcgishelper)
from cea.interfaces.arcgis.arcgishelper import *
__author__ = "Daren Thomas"
__copyright__ = "Copyright 2016, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Daren Thomas", "Martin Mosteiro Romero", "Jimeno A. Fonseca"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
arcpy.env.overwriteOutput = True
class Toolbox(object):
"""List the tools to show in the toolbox."""
# here some magic: create the list of script classes based on the ``scripts.yml`` file.
# any tools that need more configuration can just be overwritten below.
import cea.scripts
for cea_script in cea.scripts.for_interface('arcgis'):
tool = create_cea_tool(cea_script)
globals()[tool.__name__] = tool
# ----------------------------------------------------------------------------------------------------------------------
# Redefine tools that need more than just the basic definition below.
# The name of the class should be the same as the name in the scripts.yml file with dashes removed and first letters
# capitalized and ending in "Tool"
class DemandTool(CeaTool):
"""integrate the demand script with ArcGIS"""
def override_parameter_info(self, parameter_info, parameter):
"""Override this method if you need to use a non-default ArcGIS parameter handling"""
if parameter.name == 'buildings':
# ignore this parameter in the ArcGIS interface
return None
return parameter_info
| 37.083333 | 120 | 0.685393 | """
ArcGIS Toolbox for integrating the CEA with ArcGIS.
ArcGIS starts by creating an instance of Toolbox, which in turn names the tools to include in the interface.
These tools shell out to ``cli.py`` because the ArcGIS python version is old and can't be updated. Therefore
we would decouple the python version used by CEA from the ArcGIS version.
See the script ``install_toolbox.py`` for the mechanics of installing the toolbox into the ArcGIS system.
"""
import inspect
import cea.config
import cea.inputlocator
import cea.interfaces.arcgis.arcgishelper
reload(cea.interfaces.arcgis.arcgishelper)
from cea.interfaces.arcgis.arcgishelper import *
__author__ = "Daren Thomas"
__copyright__ = "Copyright 2016, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Daren Thomas", "Martin Mosteiro Romero", "Jimeno A. Fonseca"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
arcpy.env.overwriteOutput = True
class Toolbox(object):
"""List the tools to show in the toolbox."""
def __init__(self):
self.label = 'City Energy Analyst'
self.alias = 'cea'
self.tools = [tool for tool in globals().values()
if inspect.isclass(tool) and issubclass(tool, CeaTool) and not tool is CeaTool]
# here some magic: create the list of script classes based on the ``scripts.yml`` file.
# any tools that need more configuration can just be overwritten below.
import cea.scripts
for cea_script in cea.scripts.for_interface('arcgis'):
tool = create_cea_tool(cea_script)
globals()[tool.__name__] = tool
# ----------------------------------------------------------------------------------------------------------------------
# Redefine tools that need more than just the basic definition below.
# The name of the class should be the same as the name in the scripts.yml file with dashes removed and first letters
# capitalized and ending in "Tool"
class DemandTool(CeaTool):
"""integrate the demand script with ArcGIS"""
def __init__(self):
self.cea_tool = 'demand'
self.label = 'Demand'
self.description = 'Calculate the Demand'
self.category = 'Demand forecasting'
self.canRunInBackground = False
def override_parameter_info(self, parameter_info, parameter):
"""Override this method if you need to use a non-default ArcGIS parameter handling"""
if parameter.name == 'buildings':
# ignore this parameter in the ArcGIS interface
return None
return parameter_info
class RadiationDaysimTool(CeaTool):
def __init__(self):
self.cea_tool = 'radiation-daysim'
self.label = 'Solar radiation (Daysim engine)'
self.description = 'Use Daysim to calculate solar radiation for a scenario'
self.category = 'Energy potentials'
self.canRunInBackground = False
def override_parameter_info(self, parameter_info, parameter):
if parameter.name == 'buildings':
return None
else:
return parameter_info | 842 | 14 | 130 |
84db050390b63741d2af23cf8bf689b6b1c61075 | 204 | py | Python | Pytest/test/test_setup_teardown.py | koichi210/Python | 9bc0be009bec15499540c1bf9ae802ffe1acfe10 | [
"MIT"
] | null | null | null | Pytest/test/test_setup_teardown.py | koichi210/Python | 9bc0be009bec15499540c1bf9ae802ffe1acfe10 | [
"MIT"
] | null | null | null | Pytest/test/test_setup_teardown.py | koichi210/Python | 9bc0be009bec15499540c1bf9ae802ffe1acfe10 | [
"MIT"
] | null | null | null | from main.greeting import English,Spanish
import pytest
| 22.666667 | 43 | 0.735294 | from main.greeting import English,Spanish
import pytest
def test_English_greeting1():
assert English().greeting1() == 'Hello'
def test_English_greeting2():
assert English().greeting2() == 'Bye'
| 102 | 0 | 46 |
4de0c9a0b5e4267fc00eac9b3f4d73d0299cb74d | 3,928 | py | Python | junos_rest/parser.py | checktheroads/junos-rest | df9df95b13ac9d02e67c025a4e43d0ad112b7897 | [
"WTFPL"
] | 2 | 2020-01-24T21:54:50.000Z | 2020-03-07T23:36:53.000Z | junos_rest/parser.py | checktheroads/junos-rest | df9df95b13ac9d02e67c025a4e43d0ad112b7897 | [
"WTFPL"
] | null | null | null | junos_rest/parser.py | checktheroads/junos-rest | df9df95b13ac9d02e67c025a4e43d0ad112b7897 | [
"WTFPL"
] | null | null | null | """Parsing functions for JunOS XML/HTTP responses."""
# Third Party Imports
import xmltodict
from boltons.iterutils import remap
# Project Imports
from junos_rest.constants import RESULTS
_NAMESPACES = {
"http://xml.juniper.net/xnm/1.1/xnm:error": "error",
"http://xml.juniper.net/xnm/1.1/xnm:token": "token",
"http://xml.juniper.net/xnm/1.1/xnm:message": "message",
"@http://xml.juniper.net/junos/*/junos:style": "style",
"http://xml.juniper.net/xnm/1.1/xnm:line-number": "line-number",
"http://xml.juniper.net/xnm/1.1/xnm:column": "column",
"http://xml.juniper.net/xnm/1.1/xnm:statement": "statement",
"http://xml.juniper.net/xnm/1.1/xnm:edit-path": "edit-path",
"http://xml.juniper.net/xnm/1.1/xnm:source-daemon": "source-daemon",
}
_DELETE_KEYS = ("@xmlns",)
def _fix_keys(path, key, value):
"""Replace XML namespace keys with human-readable keys.
Also deletes unneeded keys. Used by remap function to iterate
through a dictionary, is run per-key.
"""
if key in _NAMESPACES:
return _NAMESPACES[key], value
elif key in _DELETE_KEYS:
return False
return key, value
def _remap_visit(path, key, value):
"""Process input dictionary.
Iterate through one level of child dictionaries, and one level of
list children.
"""
if isinstance(value, dict):
fixed_value = remap(value, visit=_fix_keys)
elif isinstance(value, list):
fixed_value = []
for item in value:
if isinstance(item, dict):
fixed_item = remap(item, visit=_fix_keys)
else:
fixed_item = item
fixed_value.append(fixed_item)
if key in _NAMESPACES:
fixed_key = _NAMESPACES[key]
fixed_value = value
elif key in _DELETE_KEYS:
return False
else:
fixed_key = key
fixed_value = value
return fixed_key, fixed_value
async def parse_xml(xml):
"""Parse raw XML string to dict.
Arguments:
xml {str} -- Raw XML
Returns:
{dict} -- XML as parsed dict
"""
parsed = xmltodict.parse(xml, dict_constructor=dict, process_namespaces=True)
mapped = remap(parsed, visit=_remap_visit)
return mapped
async def parse_results(response):
"""Parse raw HTTP response object for success/failure messages.
Arguments:
response {object} -- Raw httpx response object
Returns:
{dict} -- Constructed results dict
"""
parsed = await parse_xml(xml=RESULTS.format(results=response.content))
status = response.status_code
result = parsed.get("results")
if "error" in result or "error" in result.get("commit-results", {}):
error = result.get("error") or result["commit-results"].get("error")
if error is not None:
details, messages = error
output = {"status": "fail", "data": messages["message"], "detail": details}
else:
output = {
"status": "fail",
"data": "An unknown error occured",
"detail": [],
}
elif (
status == 200
and "commit-results" not in result
and "load-configuration-results" in result
):
load_success = result["load-configuration-results"].get("load-success", 1)
if load_success is None:
output = {"status": "success", "data": None}
elif load_success == 1:
output = {"status": "error", "message": response.text.strip()}
elif (
status == 200
and result["commit-results"]["routing-engine"].get("commit-success") is None
):
output = {"status": "success", "data": None}
elif status in range(200, 300) and not response.text:
output = {"status": "success", "data": None}
elif status in range(400, 600):
output = {"status": "error", "message": response.text}
return output
| 29.757576 | 87 | 0.613289 | """Parsing functions for JunOS XML/HTTP responses."""
# Third Party Imports
import xmltodict
from boltons.iterutils import remap
# Project Imports
from junos_rest.constants import RESULTS
_NAMESPACES = {
"http://xml.juniper.net/xnm/1.1/xnm:error": "error",
"http://xml.juniper.net/xnm/1.1/xnm:token": "token",
"http://xml.juniper.net/xnm/1.1/xnm:message": "message",
"@http://xml.juniper.net/junos/*/junos:style": "style",
"http://xml.juniper.net/xnm/1.1/xnm:line-number": "line-number",
"http://xml.juniper.net/xnm/1.1/xnm:column": "column",
"http://xml.juniper.net/xnm/1.1/xnm:statement": "statement",
"http://xml.juniper.net/xnm/1.1/xnm:edit-path": "edit-path",
"http://xml.juniper.net/xnm/1.1/xnm:source-daemon": "source-daemon",
}
_DELETE_KEYS = ("@xmlns",)
def _fix_keys(path, key, value):
"""Replace XML namespace keys with human-readable keys.
Also deletes unneeded keys. Used by remap function to iterate
through a dictionary, is run per-key.
"""
if key in _NAMESPACES:
return _NAMESPACES[key], value
elif key in _DELETE_KEYS:
return False
return key, value
def _remap_visit(path, key, value):
"""Process input dictionary.
Iterate through one level of child dictionaries, and one level of
list children.
"""
if isinstance(value, dict):
fixed_value = remap(value, visit=_fix_keys)
elif isinstance(value, list):
fixed_value = []
for item in value:
if isinstance(item, dict):
fixed_item = remap(item, visit=_fix_keys)
else:
fixed_item = item
fixed_value.append(fixed_item)
if key in _NAMESPACES:
fixed_key = _NAMESPACES[key]
fixed_value = value
elif key in _DELETE_KEYS:
return False
else:
fixed_key = key
fixed_value = value
return fixed_key, fixed_value
async def parse_xml(xml):
"""Parse raw XML string to dict.
Arguments:
xml {str} -- Raw XML
Returns:
{dict} -- XML as parsed dict
"""
parsed = xmltodict.parse(xml, dict_constructor=dict, process_namespaces=True)
mapped = remap(parsed, visit=_remap_visit)
return mapped
async def parse_results(response):
"""Parse raw HTTP response object for success/failure messages.
Arguments:
response {object} -- Raw httpx response object
Returns:
{dict} -- Constructed results dict
"""
parsed = await parse_xml(xml=RESULTS.format(results=response.content))
status = response.status_code
result = parsed.get("results")
if "error" in result or "error" in result.get("commit-results", {}):
error = result.get("error") or result["commit-results"].get("error")
if error is not None:
details, messages = error
output = {"status": "fail", "data": messages["message"], "detail": details}
else:
output = {
"status": "fail",
"data": "An unknown error occured",
"detail": [],
}
elif (
status == 200
and "commit-results" not in result
and "load-configuration-results" in result
):
load_success = result["load-configuration-results"].get("load-success", 1)
if load_success is None:
output = {"status": "success", "data": None}
elif load_success == 1:
output = {"status": "error", "message": response.text.strip()}
elif (
status == 200
and result["commit-results"]["routing-engine"].get("commit-success") is None
):
output = {"status": "success", "data": None}
elif status in range(200, 300) and not response.text:
output = {"status": "success", "data": None}
elif status in range(400, 600):
output = {"status": "error", "message": response.text}
return output
| 0 | 0 | 0 |
c2a4ae281d2e676fb1c503292ee3797a034eb4ce | 384 | py | Python | chapter-4/timeoutflow.py | outerbounds/dsbook | 411b55c2057a3ba1e1d893cde03d6ec97d529969 | [
"Apache-2.0"
] | 27 | 2021-05-29T14:36:34.000Z | 2022-03-22T10:12:40.000Z | chapter-4/timeoutflow.py | saibaldas/dsbook | be6b4670ed33a2001de8f28f6fb4151111cb26ca | [
"Apache-2.0"
] | null | null | null | chapter-4/timeoutflow.py | saibaldas/dsbook | be6b4670ed33a2001de8f28f6fb4151111cb26ca | [
"Apache-2.0"
] | 6 | 2021-05-29T14:36:40.000Z | 2022-03-09T14:57:46.000Z | from metaflow import FlowSpec, timeout, step, retry
import time
if __name__ == '__main__':
TimeoutFlow() | 19.2 | 51 | 0.578125 | from metaflow import FlowSpec, timeout, step, retry
import time
class TimeoutFlow(FlowSpec):
@retry
@timeout(seconds=5)
@step
def start(self):
for i in range(int(time.time() % 10)):
print(i)
time.sleep(1)
self.next(self.end)
@step
def end(self):
print('success!')
if __name__ == '__main__':
TimeoutFlow() | 136 | 116 | 23 |
07a86dd4b6595df924e8f400e51ac15395925039 | 850 | py | Python | ums/userapp/actions/__init__.py | hookehu/web | 637047ff47bf5df6ee3152e6976162bb8e85531c | [
"MIT"
] | null | null | null | ums/userapp/actions/__init__.py | hookehu/web | 637047ff47bf5df6ee3152e6976162bb8e85531c | [
"MIT"
] | null | null | null | ums/userapp/actions/__init__.py | hookehu/web | 637047ff47bf5df6ee3152e6976162bb8e85531c | [
"MIT"
] | null | null | null | #-*- coding:utf-8 -*-
import os
actions = {}
init_pkg()
| 25 | 49 | 0.515294 | #-*- coding:utf-8 -*-
import os
actions = {}
def init_pkg():
#print os.path.dirname(__file__)
files = os.listdir(os.path.dirname(__file__))
for file in files:
fns = file.split('.')
if len(fns) < 2:
continue
if fns[1] != 'py':
continue
if fns[0] == '__init__':
continue
#__all__.append(fns[0])
pkg_name = __name__ + "." + fns[0]
m = __import__(pkg_name, fromlist=('*'))
_ms = dir(m)
for name in _ms:
if "Action" not in name:
continue
actions[name] = getattr(m, name)
#print actions
init_pkg()
def register_builtin_actions(site):
for k, v in actions.items():
if not hasattr(v, 'url'):
continue
url = getattr(v, 'url')
site.register_view(url, v)
| 747 | 0 | 45 |
56000331a30f5b48371ea12bf125b26ee06aa709 | 1,950 | py | Python | third_party/WebKit/Tools/Scripts/webkitpy/common/html_diff.py | google-ar/chromium | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 777 | 2017-08-29T15:15:32.000Z | 2022-03-21T05:29:41.000Z | third_party/WebKit/Tools/Scripts/webkitpy/common/html_diff.py | harrymarkovskiy/WebARonARCore | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 66 | 2017-08-30T18:31:18.000Z | 2021-08-02T10:59:35.000Z | third_party/WebKit/Tools/Scripts/webkitpy/common/html_diff.py | harrymarkovskiy/WebARonARCore | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 123 | 2017-08-30T01:19:34.000Z | 2022-03-17T22:55:31.000Z | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility for outputting a HTML diff of two multi-line strings.
The main purpose of this utility is to show the difference between
text baselines (-expected.txt files) and actual text results.
Note, in the standard library module difflib, there is also a HtmlDiff class,
although it outputs a larger and more complex HTML table than we need.
"""
import difflib
_TEMPLATE = """<html>
<head>
<style>.del { background: #faa; } .add { background: #afa; }</style>
</head>
<body>
<pre>%s</pre>
</body>
</html>
"""
def html_diff(a_text, b_text):
"""Returns a diff between two strings as HTML."""
# Diffs can be between multiple text files of different encodings
# so we always want to deal with them as byte arrays, not unicode strings.
assert isinstance(a_text, str)
assert isinstance(b_text, str)
a_lines = a_text.splitlines(True)
b_lines = b_text.splitlines(True)
return _TEMPLATE % html_diff_body(a_lines, b_lines)
| 30 | 78 | 0.69641 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility for outputting a HTML diff of two multi-line strings.
The main purpose of this utility is to show the difference between
text baselines (-expected.txt files) and actual text results.
Note, in the standard library module difflib, there is also a HtmlDiff class,
although it outputs a larger and more complex HTML table than we need.
"""
import difflib
_TEMPLATE = """<html>
<head>
<style>.del { background: #faa; } .add { background: #afa; }</style>
</head>
<body>
<pre>%s</pre>
</body>
</html>
"""
def html_diff(a_text, b_text):
"""Returns a diff between two strings as HTML."""
# Diffs can be between multiple text files of different encodings
# so we always want to deal with them as byte arrays, not unicode strings.
assert isinstance(a_text, str)
assert isinstance(b_text, str)
a_lines = a_text.splitlines(True)
b_lines = b_text.splitlines(True)
return _TEMPLATE % html_diff_body(a_lines, b_lines)
def html_diff_body(a_lines, b_lines):
matcher = difflib.SequenceMatcher(None, a_lines, b_lines)
output = []
for tag, a_start, a_end, b_start, b_end in matcher.get_opcodes():
a_chunk = ''.join(a_lines[a_start:a_end])
b_chunk = ''.join(b_lines[b_start:b_end])
output.append(_format_chunk(tag, a_chunk, b_chunk))
return ''.join(output)
def _format_chunk(tag, a_chunk, b_chunk):
if tag == 'delete':
return _format_delete(a_chunk)
if tag == 'insert':
return _format_insert(b_chunk)
if tag == 'replace':
return _format_delete(a_chunk) + _format_insert(b_chunk)
assert tag == 'equal'
return a_chunk
def _format_insert(chunk):
return '<span class="add">%s</span>' % chunk
def _format_delete(chunk):
return '<span class="del">%s</span>' % chunk
| 740 | 0 | 92 |
78d590937fe6d9686ecf625134b085c5f1e14ae5 | 6,373 | py | Python | examples/cifar10/train.py | fidelity/stoke | 7c993c4d1dcdfeae2a838c6396b88452dfa1b7ad | [
"Apache-2.0"
] | 42 | 2021-06-23T19:36:54.000Z | 2022-01-25T08:21:21.000Z | examples/cifar10/train.py | fidelity/stoke | 7c993c4d1dcdfeae2a838c6396b88452dfa1b7ad | [
"Apache-2.0"
] | 22 | 2021-06-23T20:31:49.000Z | 2022-03-28T05:11:48.000Z | examples/cifar10/train.py | fidelity/stoke | 7c993c4d1dcdfeae2a838c6396b88452dfa1b7ad | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright FMR LLC <opensource@fidelity.com>
# SPDX-License-Identifier: Apache-2.0
"""CIFAR10 training script demonstrating a few different stoke options
Based loosely on: https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
"""
import torch
import torchvision.datasets as tv_datasets
import torchvision.transforms as tv_transforms
from configs import *
from model import resnet152
from spock.builder import ConfigArgBuilder
from torch.nn import CrossEntropyLoss
from torch.optim import SGD
from torch.utils.data.distributed import DistributedSampler
from stoke import DeepspeedConfig, DeepspeedZeROConfig, Stoke, StokeOptimizer
if __name__ == "__main__":
main()
| 34.080214 | 118 | 0.661855 | # -*- coding: utf-8 -*-
# Copyright FMR LLC <opensource@fidelity.com>
# SPDX-License-Identifier: Apache-2.0
"""CIFAR10 training script demonstrating a few different stoke options
Based loosely on: https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html
"""
import torch
import torchvision.datasets as tv_datasets
import torchvision.transforms as tv_transforms
from configs import *
from model import resnet152
from spock.builder import ConfigArgBuilder
from torch.nn import CrossEntropyLoss
from torch.optim import SGD
from torch.utils.data.distributed import DistributedSampler
from stoke import DeepspeedConfig, DeepspeedZeROConfig, Stoke, StokeOptimizer
def train(train_dataloader, cifar_stoke: Stoke, epoch: int):
cifar_stoke.print_on_devices(f"Starting Epoch {epoch + 1}")
cifar_stoke.model_access.train()
for idx, (x, y) in enumerate(train_dataloader):
# Call the model through the stoke object interface
outputs = cifar_stoke.model(x)
# Call the loss through the stoke object interface
loss = cifar_stoke.loss(outputs, y)
# Print some loss info
cifar_stoke.print_ema_loss(prepend_msg=f"Step {idx+1} -- EMA Loss")
# Call backward through the stoke object interface
cifar_stoke.backward(loss=loss)
# Call step through the stoke object interface
cifar_stoke.step()
return epoch + 1
def predict(test_dataloader, cifar_stoke: Stoke):
# Switch to eval mode
cifar_stoke.model_access.eval()
total_y = 0
total_correct = 0
# Wrap with no grads context just to be safe
with torch.no_grad():
for x, y in test_dataloader:
outputs = cifar_stoke.model(x)
_, preds = torch.max(outputs, dim=1)
total_y += y.size(0)
total_correct += torch.sum(preds == y).item()
cifar_stoke.print_on_devices(
msg=f"Current Test Accuracy: {((total_correct/total_y) * 100):.3f}"
)
def main():
# Use spock to grab all the configs
configs = ConfigArgBuilder(
DataConfig, OSSConfig, RunConfig, SDDPConfig, SGDConfig, ZeROConfig
).generate()
# Create the resnet-152 model
model = resnet152()
# Define the loss function
loss = CrossEntropyLoss()
# Make the StokeOptimizer object
optimizer = StokeOptimizer(
optimizer=SGD,
optimizer_kwargs={
"lr": configs.SGDConfig.lr,
"momentum": configs.SGDConfig.momentum,
"weight_decay": configs.SGDConfig.weight_decay,
},
)
# Handle some extra config objects so we can easily switch between different stoke options from the config yaml(s)
extra_configs = [
DeepspeedConfig(
zero_optimization=DeepspeedZeROConfig(
stage=configs.ZeROConfig.zero,
contiguous_gradients=configs.ZeROConfig.contiguous_gradients,
overlap_comm=configs.ZeROConfig.overlap_comm,
),
dump_state=True,
)
]
# Build the base stoke object
cifar_stoke = Stoke(
model=model,
optimizer=optimizer,
loss=loss,
batch_size_per_device=configs.DataConfig.batch_size,
gpu=configs.RunConfig.gpu,
fp16=configs.RunConfig.fp16,
distributed=configs.RunConfig.distributed,
fairscale_oss=configs.RunConfig.oss,
fairscale_sddp=configs.RunConfig.sddp,
configs=extra_configs,
grad_accum_steps=configs.RunConfig.grad_accum,
verbose=True,
)
# Set up a transform pipeline for CIFAR10 training data -- do some simple augmentation for illustration
transform_train = tv_transforms.Compose(
[
tv_transforms.RandomHorizontalFlip(),
tv_transforms.ToTensor(),
tv_transforms.Normalize(
mean=configs.DataConfig.normalize_mean,
std=configs.DataConfig.normalize_std,
),
]
)
# Set up a transform pipeline for CIFAR10 test data
transform_test = tv_transforms.Compose(
[
tv_transforms.ToTensor(),
tv_transforms.Normalize(
mean=configs.DataConfig.normalize_mean,
std=configs.DataConfig.normalize_std,
),
]
)
# Get CIFAR10 training data from torchvision
training_dataset = tv_datasets.CIFAR10(
root=configs.DataConfig.root_dir,
train=True,
download=True,
transform=transform_train,
)
# Get CIFAR10 test data from torchvision
test_dataset = tv_datasets.CIFAR10(
root=configs.DataConfig.root_dir,
train=False,
download=True,
transform=transform_test,
)
# If distributed then roll a sampler else None
train_sampler = (
DistributedSampler(
dataset=training_dataset,
num_replicas=cifar_stoke.world_size,
rank=cifar_stoke.rank,
)
if configs.RunConfig.distributed is not None
else None
)
# Construct the DataLoader
train_loader = cifar_stoke.DataLoader(
dataset=training_dataset,
sampler=train_sampler,
num_workers=configs.DataConfig.n_workers
if configs.DataConfig.n_workers is not None
else 0,
)
# If distributed then roll a sampler else None
test_sampler = (
DistributedSampler(
dataset=test_dataset,
num_replicas=cifar_stoke.world_size,
rank=cifar_stoke.rank,
)
if configs.RunConfig.distributed is not None
else None
)
test_loader = cifar_stoke.DataLoader(
dataset=test_dataset,
sampler=test_sampler,
num_workers=configs.DataConfig.n_workers
if configs.DataConfig.n_workers is not None
else 0,
)
# Initial overall acc which should be ~10% given the 10 CIFAR10 classes
predict(test_dataloader=test_loader, cifar_stoke=cifar_stoke)
n_epochs = 0
while n_epochs < configs.RunConfig.num_epoch:
n_epochs = train(
train_dataloader=train_loader, cifar_stoke=cifar_stoke, epoch=n_epochs
)
# Reset the ema stats after each epoch
cifar_stoke.reset_ema()
# Check test loss every epoch
predict(test_dataloader=test_loader, cifar_stoke=cifar_stoke)
if __name__ == "__main__":
main()
| 5,588 | 0 | 69 |
1db5cbca273aa598c3db5c705adde5e13cc1fd2a | 997 | py | Python | module/standard-library/ftplib/app.py | treedbox/python-3-basic-exercises | 631e86f8ad97bca7ab06e1f3ebd9d104fc5c43b8 | [
"MIT"
] | null | null | null | module/standard-library/ftplib/app.py | treedbox/python-3-basic-exercises | 631e86f8ad97bca7ab06e1f3ebd9d104fc5c43b8 | [
"MIT"
] | null | null | null | module/standard-library/ftplib/app.py | treedbox/python-3-basic-exercises | 631e86f8ad97bca7ab06e1f3ebd9d104fc5c43b8 | [
"MIT"
] | null | null | null | from ftplib import FTP
from os.path import abspath, join, dirname
host = 'your.com'
user = 'youlogin'
password = 'yourpass'
remotedir = '/folder/on/server/'
filename = 'file.txt'
localdir = abspath(dirname(__file__))
absFilePathAutoSlash = abspath(join(dirname(__file__), filename))
ftp = FTP(host)
ftp.login(user=user, passwd=password)
ftp.cwd(remotedir)
# Download
# RETR Retrieve a copy of the file
# wb = write + binary mode
# 1024 = buffer size
# Upload
# STOR Accept the data and to store the data as a file at the server site
# wb = read + binary mode
# 1024 = buffer size
# Upload
# storFile(absFilePathAutoSlash)
# Download
retrFile(absFilePathAutoSlash)
"""
ftplib.error_perm: 530 Login authentication failed
"""
| 20.770833 | 73 | 0.705115 | from ftplib import FTP
from os.path import abspath, join, dirname
host = 'your.com'
user = 'youlogin'
password = 'yourpass'
remotedir = '/folder/on/server/'
filename = 'file.txt'
localdir = abspath(dirname(__file__))
absFilePathAutoSlash = abspath(join(dirname(__file__), filename))
ftp = FTP(host)
ftp.login(user=user, passwd=password)
ftp.cwd(remotedir)
# Download
# RETR Retrieve a copy of the file
# wb = write + binary mode
# 1024 = buffer size
def retrFile(file):
with open(file, 'wb') as local_file:
ftp.retrbinary('RETR ' + filename, local_file.write)
ftp.quit()
# Upload
# STOR Accept the data and to store the data as a file at the server site
# wb = read + binary mode
# 1024 = buffer size
def storFile(file):
with open(file, 'rb') as fobj:
ftp.storbinary('STOR ' + filename, fobj, 1024)
ftp.quit()
# Upload
# storFile(absFilePathAutoSlash)
# Download
retrFile(absFilePathAutoSlash)
"""
ftplib.error_perm: 530 Login authentication failed
"""
| 218 | 0 | 44 |
b47d40a7073d5fdf1b8015c7eacb2aaf529a0e2f | 1,649 | py | Python | main.py | agranium/pydl | bdd076f5c21dbabb35b77ef48b68cc1a3fe713f2 | [
"MIT"
] | null | null | null | main.py | agranium/pydl | bdd076f5c21dbabb35b77ef48b68cc1a3fe713f2 | [
"MIT"
] | null | null | null | main.py | agranium/pydl | bdd076f5c21dbabb35b77ef48b68cc1a3fe713f2 | [
"MIT"
] | null | null | null | """
pyDL
Python と Selenium を使用したCLIダウンローダー。
Chromeのプロファイルを指定するとキャッシュを流用できる。
"""
import click
import json
import dl
@click.group()
@click.option('--incomplete', '-i', type=str, default='./incomplete', help='Incomplete Files Directory.')
@click.option('--download', '-d', type=str, default='./download', help='Downloaded Files Directory.')
@click.option('--profile', '-p', type=str, default='./profile', help='Profile Directory.')
@click.option('--headless', '-h', type=bool, is_flag=True, help='Running with Headless Browzer.')
@click.pass_context
@cmd.command()
@click.argument('queue_file', nargs=1)
@click.pass_context
@cmd.command()
@click.pass_context
@cmd.command()
@click.argument('queue_file', nargs=1)
@click.pass_context
if __name__ == "__main__":
cmd(obj={})
| 23.898551 | 105 | 0.606428 | """
pyDL
Python と Selenium を使用したCLIダウンローダー。
Chromeのプロファイルを指定するとキャッシュを流用できる。
"""
import click
import json
import dl
@click.group()
@click.option('--incomplete', '-i', type=str, default='./incomplete', help='Incomplete Files Directory.')
@click.option('--download', '-d', type=str, default='./download', help='Downloaded Files Directory.')
@click.option('--profile', '-p', type=str, default='./profile', help='Profile Directory.')
@click.option('--headless', '-h', type=bool, is_flag=True, help='Running with Headless Browzer.')
@click.pass_context
def cmd(ctx, incomplete, download, profile, headless):
ctx.obj["incomplete"] = incomplete
ctx.obj["download"] = download
ctx.obj["profile"] = profile
ctx.obj["headless"] = headless
@cmd.command()
@click.argument('queue_file', nargs=1)
@click.pass_context
def down(ctx, queue_file):
down = dl.Downloader(
ctx.obj["incomplete"],
ctx.obj["download"],
ctx.obj["profile"],
ctx.obj["headless"]
)
down.run(queue_file)
@cmd.command()
@click.pass_context
def idle(ctx):
down = dl.Downloader(
ctx.obj["incomplete"],
ctx.obj["download"],
ctx.obj["profile"],
ctx.obj["headless"]
)
down.idle()
@cmd.command()
@click.argument('queue_file', nargs=1)
@click.pass_context
def init(ctx, queue_file):
with open(queue_file, "w", encoding="utf-8") as f:
json.dump(
{
"queue": [],
"downloading": [],
"finished": []
},
f, indent=2, ensure_ascii=False
)
if __name__ == "__main__":
cmd(obj={})
| 775 | 0 | 88 |
46023abe514a170bb1a44fa4af80bd684ee3edc3 | 700 | py | Python | examples/Graph_Neural_Networks/Common/Node2vec.py | EdisonLeeeee/GraphGallery | 4eec9c5136bda14809bd22584b26cc346cdb633b | [
"MIT"
] | 300 | 2020-08-09T04:27:41.000Z | 2022-03-30T07:43:41.000Z | examples/Graph_Neural_Networks/Common/Node2vec.py | EdisonLeeeee/GraphGallery | 4eec9c5136bda14809bd22584b26cc346cdb633b | [
"MIT"
] | 5 | 2020-11-05T06:16:50.000Z | 2021-12-11T05:05:22.000Z | examples/Graph_Neural_Networks/Common/Node2vec.py | EdisonLeeeee/GraphGallery | 4eec9c5136bda14809bd22584b26cc346cdb633b | [
"MIT"
] | 51 | 2020-09-23T15:37:12.000Z | 2022-03-05T01:28:56.000Z | #!/usr/bin/env python
# coding: utf-8
import graphgallery
print("GraphGallery version: ", graphgallery.__version__)
'''
Load Datasets
- cora/citeseer/pubmed
'''
from graphgallery.datasets import Planetoid
data = Planetoid('cora', root="~/GraphData/datasets/", verbose=False)
graph = data.graph
splits = data.split_nodes()
from graphgallery.gallery.embedding import Node2Vec
trainer = Node2Vec()
trainer.fit(graph.adj_matrix)
# embedding = trainer.get_embedding()
accuracy = trainer.evaluate_nodeclas(graph.node_label,
splits.train_nodes,
splits.test_nodes)
print(f'Test accuracy {accuracy:.2%}')
| 26.923077 | 70 | 0.665714 | #!/usr/bin/env python
# coding: utf-8
import graphgallery
print("GraphGallery version: ", graphgallery.__version__)
'''
Load Datasets
- cora/citeseer/pubmed
'''
from graphgallery.datasets import Planetoid
data = Planetoid('cora', root="~/GraphData/datasets/", verbose=False)
graph = data.graph
splits = data.split_nodes()
from graphgallery.gallery.embedding import Node2Vec
trainer = Node2Vec()
trainer.fit(graph.adj_matrix)
# embedding = trainer.get_embedding()
accuracy = trainer.evaluate_nodeclas(graph.node_label,
splits.train_nodes,
splits.test_nodes)
print(f'Test accuracy {accuracy:.2%}')
| 0 | 0 | 0 |
64d8298fd402b2365fa4d7ef66a2738ba51bdd1e | 3,080 | py | Python | embedding/tag_generation/gen_phraser_tags.py | akkefa/Islam-360 | 2fa49872f92e1abcb9a31a893b4654f7485711ae | [
"MIT"
] | null | null | null | embedding/tag_generation/gen_phraser_tags.py | akkefa/Islam-360 | 2fa49872f92e1abcb9a31a893b4654f7485711ae | [
"MIT"
] | null | null | null | embedding/tag_generation/gen_phraser_tags.py | akkefa/Islam-360 | 2fa49872f92e1abcb9a31a893b4654f7485711ae | [
"MIT"
] | null | null | null | """gensim phrases tags"""
from urduhack.preprocessing import remove_punctuation, replace_numbers
from urduhack.tokenization.words import fix_join_words
from urduhack.tokenization import sentence_tokenizer
from sklearn.model_selection import ParameterGrid
from urduhack import normalize
from urduhack.stop_words import STOP_WORDS
from pathlib import Path
import pandas as pd
import json
from gensim.models.phrases import Phraser, Phrases
import re
# SENTENCES = "/home/ikram/workplace/projects/Islam-360/embedding/w2v/translation_sentences.txt"
# DOCUMENTS = []
# with open(SENTENCES, "r") as file:
# for line in file:
# line = line.strip().split()
# line = [token for token in line if token not in STOP_WORDS]
# DOCUMENTS.append(line)
DOCUMENTS = []
paths = Path('/home/ikram/workplace/datasets/translation_and_tafaseer/csvs').glob('*.csv')
for path in paths:
path_in_str = str(path)
print(path_in_str)
df = pd.read_csv(path_in_str)
for index, row in df.iterrows():
if isinstance(row['translation'], str):
translation = row['translation'].strip()
translation = replace_numbers(remove_punctuation(fix_join_words(normalize(translation))))
translation = re.sub(" +", " ", translation)
DOCUMENTS.append(translation.split())
# DOCUMENTS = []
# df = pd.read_csv("/Users/muhammadfahid/PycharmProjects/data_preprocess/islam-360/ahsanulbayan.db.csv")
# for index, row in df.iterrows():
# if isinstance(row['translation'], str):
# translation = normalize(row['translation'])
# translation = translation.strip()
# translation = fix_join_words(translation)
# trans = remove_punctuation(translation)
# trans = re.sub(" +", " ", trans)
# trans = trans.split()
# DOCUMENTS.append(trans)
# if isinstance(row['tafseer'], str):
# sents = remove_punctuation(fix_join_words(normalize(row['tafseer']).strip())).split()
# DOCUMENTS.append(sents)
# Gensim Phrases
# {(30, 50), (25, 40), (40, 20)}
PARAMS = {
"min_count": [2, 5, 10, 15, 20, 25, 30],
"threshold": [10, 30, 40, 50, 100, 200, 300]
}
for index, param in enumerate(ParameterGrid(PARAMS)):
print(f"Model Training: {index}")
all_phrases = {}
phrases = Phrases(DOCUMENTS, **param)
bi_gram = Phraser(phrases)
Bi_PHRASES = []
for doc in DOCUMENTS:
bi_grams = bi_gram[doc]
Bi_PHRASES.append(bi_grams)
# {(10, 10), (15, 20), (20, 10)}
tri_phrases = Phrases(Bi_PHRASES)
TRI_PHRASES = {}
for phrase, score in tri_phrases.export_phrases(Bi_PHRASES):
phrase = phrase.decode("utf-8").replace("_", " ")
if len(phrase.split()) > 2:
all_phrases[phrase] = score
results = {k: v for k, v in sorted(all_phrases.items(), key=lambda item: item[1], reverse=True)}
print(f"Model Dumping {index}")
with open(f"models/phrases_ahsan_{param['min_count']}_{param['threshold']}.json", "w") as out_json:
json.dump(results, out_json, ensure_ascii=False, indent=4)
| 35 | 104 | 0.666883 | """gensim phrases tags"""
from urduhack.preprocessing import remove_punctuation, replace_numbers
from urduhack.tokenization.words import fix_join_words
from urduhack.tokenization import sentence_tokenizer
from sklearn.model_selection import ParameterGrid
from urduhack import normalize
from urduhack.stop_words import STOP_WORDS
from pathlib import Path
import pandas as pd
import json
from gensim.models.phrases import Phraser, Phrases
import re
# SENTENCES = "/home/ikram/workplace/projects/Islam-360/embedding/w2v/translation_sentences.txt"
# DOCUMENTS = []
# with open(SENTENCES, "r") as file:
# for line in file:
# line = line.strip().split()
# line = [token for token in line if token not in STOP_WORDS]
# DOCUMENTS.append(line)
DOCUMENTS = []
paths = Path('/home/ikram/workplace/datasets/translation_and_tafaseer/csvs').glob('*.csv')
for path in paths:
path_in_str = str(path)
print(path_in_str)
df = pd.read_csv(path_in_str)
for index, row in df.iterrows():
if isinstance(row['translation'], str):
translation = row['translation'].strip()
translation = replace_numbers(remove_punctuation(fix_join_words(normalize(translation))))
translation = re.sub(" +", " ", translation)
DOCUMENTS.append(translation.split())
# DOCUMENTS = []
# df = pd.read_csv("/Users/muhammadfahid/PycharmProjects/data_preprocess/islam-360/ahsanulbayan.db.csv")
# for index, row in df.iterrows():
# if isinstance(row['translation'], str):
# translation = normalize(row['translation'])
# translation = translation.strip()
# translation = fix_join_words(translation)
# trans = remove_punctuation(translation)
# trans = re.sub(" +", " ", trans)
# trans = trans.split()
# DOCUMENTS.append(trans)
# if isinstance(row['tafseer'], str):
# sents = remove_punctuation(fix_join_words(normalize(row['tafseer']).strip())).split()
# DOCUMENTS.append(sents)
# Gensim Phrases
# {(30, 50), (25, 40), (40, 20)}
PARAMS = {
"min_count": [2, 5, 10, 15, 20, 25, 30],
"threshold": [10, 30, 40, 50, 100, 200, 300]
}
for index, param in enumerate(ParameterGrid(PARAMS)):
print(f"Model Training: {index}")
all_phrases = {}
phrases = Phrases(DOCUMENTS, **param)
bi_gram = Phraser(phrases)
Bi_PHRASES = []
for doc in DOCUMENTS:
bi_grams = bi_gram[doc]
Bi_PHRASES.append(bi_grams)
# {(10, 10), (15, 20), (20, 10)}
tri_phrases = Phrases(Bi_PHRASES)
TRI_PHRASES = {}
for phrase, score in tri_phrases.export_phrases(Bi_PHRASES):
phrase = phrase.decode("utf-8").replace("_", " ")
if len(phrase.split()) > 2:
all_phrases[phrase] = score
results = {k: v for k, v in sorted(all_phrases.items(), key=lambda item: item[1], reverse=True)}
print(f"Model Dumping {index}")
with open(f"models/phrases_ahsan_{param['min_count']}_{param['threshold']}.json", "w") as out_json:
json.dump(results, out_json, ensure_ascii=False, indent=4)
| 0 | 0 | 0 |
b09d3db0223f486b3224ce57b43da9812196989d | 1,042 | py | Python | tomef/tokenizer/pt_tokenizer.py | unlikelymaths/tomef | 57b629a3ee932486c55afcf62ef9d8224488ae65 | [
"MIT"
] | null | null | null | tomef/tokenizer/pt_tokenizer.py | unlikelymaths/tomef | 57b629a3ee932486c55afcf62ef9d8224488ae65 | [
"MIT"
] | 14 | 2020-01-28T22:36:41.000Z | 2022-03-11T23:44:22.000Z | tomef/tokenizer/pt_tokenizer.py | unlikelymaths/tomef | 57b629a3ee932486c55afcf62ef9d8224488ae65 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # PT Tokenizer
# <div style="position: absolute; right:0;top:0"><a href="./tokenizer.ipynb" style="text-decoration: none"> <font size="5">←</font></a>
# <a href="../evaluation.py.ipynb" style="text-decoration: none"> <font size="5">↑</font></a></div>
#
# This is a wrapper around the Penn Treebank tokenizer provided by the NLTK.
# For more information see https://www.nltk.org/api/nltk.tokenize.html
#
# ---
# ## Setup and Settings
# ---
# In[5]:
from __init__ import init_vars
init_vars(vars())
import nltk
try:
nltk.data.find('tokenizers/punkt')
except LookupError:
nltk.download('punkt')
from nltk.tokenize import word_tokenize
import tokenizer.common
from tokenizer.token_util import TokenizerBase
# ---
# ## Build PTTokenizer class
# ---
# In[3]:
| 22.652174 | 135 | 0.691939 | #!/usr/bin/env python
# coding: utf-8
# # PT Tokenizer
# <div style="position: absolute; right:0;top:0"><a href="./tokenizer.ipynb" style="text-decoration: none"> <font size="5">←</font></a>
# <a href="../evaluation.py.ipynb" style="text-decoration: none"> <font size="5">↑</font></a></div>
#
# This is a wrapper around the Penn Treebank tokenizer provided by the NLTK.
# For more information see https://www.nltk.org/api/nltk.tokenize.html
#
# ---
# ## Setup and Settings
# ---
# In[5]:
from __init__ import init_vars
init_vars(vars())
import nltk
try:
nltk.data.find('tokenizers/punkt')
except LookupError:
nltk.download('punkt')
from nltk.tokenize import word_tokenize
import tokenizer.common
from tokenizer.token_util import TokenizerBase
# ---
# ## Build PTTokenizer class
# ---
# In[3]:
class PTTokenizer(TokenizerBase):
def tokenize(self, text, *args):
text = text.replace(tokenizer.common.separator_token,tokenizer.common.separator_token_replacement)
return word_tokenize(text)
| 153 | 12 | 54 |
9e8b67848ea7acba12e81e06298ef6bed073818f | 656 | py | Python | benchmarks/xcrypto-ref/flow/benchmarks/mpn_plot_perf.py | gokhankici/iodine | 7b5d00eb37bf31b9d1c4e69e176271244e86b26f | [
"MIT"
] | 9 | 2019-05-31T08:52:38.000Z | 2021-12-12T15:31:00.000Z | benchmarks/xcrypto-ref/flow/benchmarks/mpn_plot_perf.py | gokhankici/xenon | d749abd865f2017cda323cf63cf38b585de9e7af | [
"MIT"
] | 6 | 2019-02-06T09:43:53.000Z | 2019-06-04T11:10:19.000Z | benchmarks/xcrypto-ref/flow/benchmarks/mpn_plot_perf.py | gokhankici/iodine | 7b5d00eb37bf31b9d1c4e69e176271244e86b26f | [
"MIT"
] | 1 | 2021-01-30T00:03:25.000Z | 2021-01-30T00:03:25.000Z |
print("Running performance analysis...")
print("Out dir : %s" % out_dir)
print("Architecture: %s" % arch)
import os
import sys
import csv
csv_records = []
csv_path = os.path.join(out_dir,"mpn-performance-%s.csv" % arch)
for record in performance:
func, lx,ly,instr_s,instr_e,cycle_s,cycle_e = record
cycles = cycle_e - cycle_s
instrs = instr_e - instr_s
csv_records.append (
[arch, func, lx, ly, cycles, instrs]
)
with open(csv_path, 'w') as fh:
writer = csv.writer(fh, delimiter = ',',quotechar="\"")
for row in csv_records:
writer.writerow(row)
print("Written results to %s" % csv_path)
| 19.878788 | 67 | 0.644817 |
print("Running performance analysis...")
print("Out dir : %s" % out_dir)
print("Architecture: %s" % arch)
import os
import sys
import csv
csv_records = []
csv_path = os.path.join(out_dir,"mpn-performance-%s.csv" % arch)
for record in performance:
func, lx,ly,instr_s,instr_e,cycle_s,cycle_e = record
cycles = cycle_e - cycle_s
instrs = instr_e - instr_s
csv_records.append (
[arch, func, lx, ly, cycles, instrs]
)
with open(csv_path, 'w') as fh:
writer = csv.writer(fh, delimiter = ',',quotechar="\"")
for row in csv_records:
writer.writerow(row)
print("Written results to %s" % csv_path)
| 0 | 0 | 0 |
fe63b72ed28da50f68feedc662b5048cfcfa23e7 | 3,039 | py | Python | test/integration/ggrc/services/test_audits.py | j0gurt/ggrc-core | 84662dc85aa8864c907eabe70b8efccf92298a1f | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-01-04T10:55:14.000Z | 2019-01-04T10:55:14.000Z | test/integration/ggrc/services/test_audits.py | farcry4998/ggrc-core | c469039dabb55033c1b379850feb19e8dda2e2a1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | test/integration/ggrc/services/test_audits.py | farcry4998/ggrc-core | c469039dabb55033c1b379850feb19e8dda2e2a1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright (C) 2018 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for actions available on audits handle."""
from ggrc.models import Audit
from ggrc.models import Program
from ggrc.models import all_models
from integration.ggrc import generator
from integration.ggrc import TestCase
from integration.ggrc.query_helper import WithQueryApi
from integration.ggrc.models import factories
class TestAuditActions(TestCase, WithQueryApi):
"""Test Audit related actions"""
def test_filter_by_evidence_url(self):
"""Filter by = operator."""
evidence_url = "http://i.imgur.com/Lppr247.jpg"
audits = self._get_first_result_set(
self._make_query_dict("Audit",
expression=["evidence url", "=", evidence_url]),
"Audit",
)
self.assertEqual(audits["count"], 1)
self.assertEqual(len(audits["values"]), audits["count"])
def test_audit_post_put(self):
"""Test create document and map it to audit"""
data = {
"link": "test_link",
}
evidence_kind = all_models.Evidence.URL
data["kind"] = evidence_kind
resp, evidence = self.gen.generate_object(
all_models.Evidence,
data
)
self.assertEqual(resp.status_code, 201)
self.assertTrue(
all_models.Evidence.query.filter(
all_models.Evidence.id == resp.json["evidence"]['id'],
all_models.Evidence.kind == evidence_kind,
).all()
)
evidence = all_models.Evidence.query.get(evidence.id)
self.assertEqual(evidence.link, "test_link")
audit = Audit.query.filter(Audit.slug == "Aud-1").first()
data = {
"source": self.gen.create_stub(audit),
"destination": self.gen.create_stub(evidence),
"context": self.gen.create_stub(audit.context)
}
resp, _ = self.gen.generate_object(
all_models.Relationship, add_fields=False, data=data)
self.assertEqual(resp.status_code, 201)
audits = self._get_first_result_set(
self._make_query_dict("Audit",
expression=["evidence url", "=", "test_link"]),
"Audit",
)
self.assertEqual(audits["count"], 1)
def test_evidence_create_an_map(self):
"""Test document is created and mapped to audit"""
audit = factories.AuditFactory(slug="Audit")
evidence = factories.EvidenceFileFactory(
title="evidence",
)
factories.RelationshipFactory(
source=audit,
destination=evidence,
)
self.assertEqual(audit.evidences_file[0].title, "evidence")
| 33.395604 | 78 | 0.666667 | # Copyright (C) 2018 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for actions available on audits handle."""
from ggrc.models import Audit
from ggrc.models import Program
from ggrc.models import all_models
from integration.ggrc import generator
from integration.ggrc import TestCase
from integration.ggrc.query_helper import WithQueryApi
from integration.ggrc.models import factories
class TestAuditActions(TestCase, WithQueryApi):
"""Test Audit related actions"""
def setUp(self):
self.clear_data()
super(TestAuditActions, self).setUp()
self.client.get("/login")
self.gen = generator.ObjectGenerator()
filename = "program_audit.csv"
self.import_file(filename)
self.assertEqual(2, Audit.query.count())
audit = Audit.query.filter(Audit.slug == "Aud-1").first()
program = Program.query.filter(Program.slug == "prog-1").first()
self.assertEqual(audit.program_id, program.id)
def test_filter_by_evidence_url(self):
"""Filter by = operator."""
evidence_url = "http://i.imgur.com/Lppr247.jpg"
audits = self._get_first_result_set(
self._make_query_dict("Audit",
expression=["evidence url", "=", evidence_url]),
"Audit",
)
self.assertEqual(audits["count"], 1)
self.assertEqual(len(audits["values"]), audits["count"])
def test_audit_post_put(self):
"""Test create document and map it to audit"""
data = {
"link": "test_link",
}
evidence_kind = all_models.Evidence.URL
data["kind"] = evidence_kind
resp, evidence = self.gen.generate_object(
all_models.Evidence,
data
)
self.assertEqual(resp.status_code, 201)
self.assertTrue(
all_models.Evidence.query.filter(
all_models.Evidence.id == resp.json["evidence"]['id'],
all_models.Evidence.kind == evidence_kind,
).all()
)
evidence = all_models.Evidence.query.get(evidence.id)
self.assertEqual(evidence.link, "test_link")
audit = Audit.query.filter(Audit.slug == "Aud-1").first()
data = {
"source": self.gen.create_stub(audit),
"destination": self.gen.create_stub(evidence),
"context": self.gen.create_stub(audit.context)
}
resp, _ = self.gen.generate_object(
all_models.Relationship, add_fields=False, data=data)
self.assertEqual(resp.status_code, 201)
audits = self._get_first_result_set(
self._make_query_dict("Audit",
expression=["evidence url", "=", "test_link"]),
"Audit",
)
self.assertEqual(audits["count"], 1)
def test_evidence_create_an_map(self):
"""Test document is created and mapped to audit"""
audit = factories.AuditFactory(slug="Audit")
evidence = factories.EvidenceFileFactory(
title="evidence",
)
factories.RelationshipFactory(
source=audit,
destination=evidence,
)
self.assertEqual(audit.evidences_file[0].title, "evidence")
| 425 | 0 | 25 |
13944a47e76c26127157df1c29f339f9da38e2a0 | 1,110 | py | Python | old_tests/test_stats_describe.py | acequia-package/Acequia | 330b409cbb8faf8180608814669bf0efe27e5edd | [
"MIT"
] | null | null | null | old_tests/test_stats_describe.py | acequia-package/Acequia | 330b409cbb8faf8180608814669bf0efe27e5edd | [
"MIT"
] | 1 | 2020-02-14T13:57:48.000Z | 2020-03-22T13:38:07.000Z | old_tests/test_stats_describe.py | acequia-package/acequia | 330b409cbb8faf8180608814669bf0efe27e5edd | [
"MIT"
] | null | null | null |
"""
Test stats.Describe object
"""
import acequia as aq
if __name__ == '__main__':
srcdir = r'.\testdata\dinogws_smalltest\\'
outdir = r'.\output\tables\\'
fpath = f'{srcdir}B29A0016001_1.csv'
gw = aq.GwSeries.from_dinogws(fpath)
hdr('test self._create_list()')
ds = aq.GwListStats(srcdir)
gws = ds._create_list()
hdr('test self._table_series()')
ds = aq.GwListStats(srcdir)
tbl1 = ds._table_series()
hdr('# test self.timestatstable(gxg=False) ')
ds = aq.GwListStats(srcdir)
tbl2 = ds.timestatstable(gxg=False)
hdr('# test self.timestatstable(gxg=True) ')
ds = aq.GwListStats(srcdir)
tbl3 = ds.timestatstable(gxg=True)
hdr('# test custom function aq.gwliststats(gxg=False)')
tbl4 = aq.gwliststats(srcdir, gxg=False)
hdr('# test custom function aq.gwliststats(gxg=True)')
tbl5 = aq.gwliststats(srcdir, gxg=True, ref='surface')
hdr('# test custom function aq.gwlocstats() ')
tbl6 = aq.gwlocstats(tbl4)
| 20.555556 | 59 | 0.631532 |
"""
Test stats.Describe object
"""
import acequia as aq
def hdr(msg):
print()
print('#','-'*50)
print(msg)
print('#','-'*50)
print()
if __name__ == '__main__':
srcdir = r'.\testdata\dinogws_smalltest\\'
outdir = r'.\output\tables\\'
fpath = f'{srcdir}B29A0016001_1.csv'
gw = aq.GwSeries.from_dinogws(fpath)
hdr('test self._create_list()')
ds = aq.GwListStats(srcdir)
gws = ds._create_list()
hdr('test self._table_series()')
ds = aq.GwListStats(srcdir)
tbl1 = ds._table_series()
hdr('# test self.timestatstable(gxg=False) ')
ds = aq.GwListStats(srcdir)
tbl2 = ds.timestatstable(gxg=False)
hdr('# test self.timestatstable(gxg=True) ')
ds = aq.GwListStats(srcdir)
tbl3 = ds.timestatstable(gxg=True)
hdr('# test custom function aq.gwliststats(gxg=False)')
tbl4 = aq.gwliststats(srcdir, gxg=False)
hdr('# test custom function aq.gwliststats(gxg=True)')
tbl5 = aq.gwliststats(srcdir, gxg=True, ref='surface')
hdr('# test custom function aq.gwlocstats() ')
tbl6 = aq.gwlocstats(tbl4)
| 75 | 0 | 23 |
4d465e72fafc64cbd88dcb0a112954375940cf86 | 1,001 | py | Python | setup.py | bhnwmc/txWS | f531030fc6eb005d6d48b865e490a8361017618d | [
"Apache-2.0"
] | 30 | 2015-03-05T06:41:34.000Z | 2020-07-24T15:52:15.000Z | setup.py | bhnwmc/txWS | f531030fc6eb005d6d48b865e490a8361017618d | [
"Apache-2.0"
] | 7 | 2015-02-05T07:04:52.000Z | 2019-06-14T03:08:58.000Z | setup.py | bhnwmc/txWS | f531030fc6eb005d6d48b865e490a8361017618d | [
"Apache-2.0"
] | 11 | 2015-01-28T17:46:34.000Z | 2021-01-13T14:49:12.000Z | #!/usr/bin/env python
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from setuptools import setup
setup(
name="txWS",
py_modules=["txws"],
setup_requires=["vcversioner", "six"],
vcversioner={},
author="Corbin Simpson",
author_email="simpsoco@osuosl.org",
description="Twisted WebSockets wrapper",
long_description=open("README.rst").read(),
license="MIT/X11",
url="http://github.com/MostAwesomeDude/txWS",
)
| 33.366667 | 79 | 0.724276 | #!/usr/bin/env python
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from setuptools import setup
setup(
name="txWS",
py_modules=["txws"],
setup_requires=["vcversioner", "six"],
vcversioner={},
author="Corbin Simpson",
author_email="simpsoco@osuosl.org",
description="Twisted WebSockets wrapper",
long_description=open("README.rst").read(),
license="MIT/X11",
url="http://github.com/MostAwesomeDude/txWS",
)
| 0 | 0 | 0 |
b4ca760ef959bba41593c27920745d0def66d16f | 1,322 | py | Python | Course/Data structures and algorithms/3.Basic algorithm/3.Faster divide and conquer algorithms/1.DivideAndConquer.py | IulianOctavianPreda/Udacity | 4349f4c12c838bcf3e53409f943ca8aacd58c94b | [
"MIT"
] | null | null | null | Course/Data structures and algorithms/3.Basic algorithm/3.Faster divide and conquer algorithms/1.DivideAndConquer.py | IulianOctavianPreda/Udacity | 4349f4c12c838bcf3e53409f943ca8aacd58c94b | [
"MIT"
] | null | null | null | Course/Data structures and algorithms/3.Basic algorithm/3.Faster divide and conquer algorithms/1.DivideAndConquer.py | IulianOctavianPreda/Udacity | 4349f4c12c838bcf3e53409f943ca8aacd58c94b | [
"MIT"
] | null | null | null | '''
Divide and conquer
In this section, we'll discuss algorithms that use a strategy called divide and conquer to solve problems more efficiently.
The name comes from the idea that the algorithm will break (or divide) the problem down into sub-problems that can be more easily solved (or conquered).
Then, the solutions to these sub-problems are combined to yield an answer to the original problem.
Over the next several videos, Eric will walk through an example—specifically, he'll demonstrate how to use a divide and conquer algorithm to efficiently find the median element out of a collection of unsorted numbers.
Following this, you'll have the opportunity to do some hands-on work and solve several different problems using a divide-and-conquer approach.
https://youtu.be/x9LzFLGgH88
2:
https://youtu.be/rhCx4vVJOwc
Additional Read - The problem of finding kth smallest element from an unsorted array is generally called as kth order statistic.
The BFPRT (1973) solution mentioned above is widely known as Median of medians(https://en.wikipedia.org/wiki/Median_of_medians), that we will discuss in the next video.
3: https://youtu.be/7DEYao1bEnE
4: https://youtu.be/UCs8HY6-FB0
5: https://youtu.be/bw_bGIWQUII
6: https://youtu.be/fjR5Y8iuMfI
7: https://youtu.be/Wk5hEuBMvQc
8: https://youtu.be/7tUR8nHKpXs
'''
| 57.478261 | 218 | 0.791225 | '''
Divide and conquer
In this section, we'll discuss algorithms that use a strategy called divide and conquer to solve problems more efficiently.
The name comes from the idea that the algorithm will break (or divide) the problem down into sub-problems that can be more easily solved (or conquered).
Then, the solutions to these sub-problems are combined to yield an answer to the original problem.
Over the next several videos, Eric will walk through an example—specifically, he'll demonstrate how to use a divide and conquer algorithm to efficiently find the median element out of a collection of unsorted numbers.
Following this, you'll have the opportunity to do some hands-on work and solve several different problems using a divide-and-conquer approach.
https://youtu.be/x9LzFLGgH88
2:
https://youtu.be/rhCx4vVJOwc
Additional Read - The problem of finding kth smallest element from an unsorted array is generally called as kth order statistic.
The BFPRT (1973) solution mentioned above is widely known as Median of medians(https://en.wikipedia.org/wiki/Median_of_medians), that we will discuss in the next video.
3: https://youtu.be/7DEYao1bEnE
4: https://youtu.be/UCs8HY6-FB0
5: https://youtu.be/bw_bGIWQUII
6: https://youtu.be/fjR5Y8iuMfI
7: https://youtu.be/Wk5hEuBMvQc
8: https://youtu.be/7tUR8nHKpXs
'''
| 0 | 0 | 0 |
5e8c61986b2ee3ee2113d9b2bcebc7b147650893 | 4,034 | py | Python | surreal/components/distribution_adapters/adapter_utils.py | rosea-tf/surreal | 8abfb18538340d50146c9c44f5ecb8a1e7d89ac3 | [
"Apache-2.0"
] | 6 | 2019-12-17T17:56:26.000Z | 2022-01-13T20:54:06.000Z | surreal/components/distribution_adapters/adapter_utils.py | rosea-tf/surreal | 8abfb18538340d50146c9c44f5ecb8a1e7d89ac3 | [
"Apache-2.0"
] | 4 | 2019-11-04T07:17:27.000Z | 2019-11-04T07:19:25.000Z | surreal/components/distribution_adapters/adapter_utils.py | rosea-tf/surreal | 8abfb18538340d50146c9c44f5ecb8a1e7d89ac3 | [
"Apache-2.0"
] | 2 | 2019-11-29T15:38:54.000Z | 2020-02-24T11:24:04.000Z | # Copyright 2019 ducandu GmbH, All Rights Reserved
# (this is a modified version of the Apache 2.0 licensed RLgraph file of the same name).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import re
from surreal.components.distributions.distribution import Distribution
from surreal.utils.errors import SurrealError
def get_adapter_spec_from_distribution_spec(distribution_spec):
"""
Args:
distribution_spec (Union[dict,Distribution]): The spec of the Distribution object, for which to return an
appropriate DistributionAdapter spec dict.
Returns:
dict: The spec-dict to make a DistributionAdapter.
"""
# Create a dummy-distribution to get features from it.
distribution = Distribution.make(distribution_spec)
distribution_type_str = re.sub(r'[\W]|distribution$', "", type(distribution).__name__.lower())
if distribution_type_str == "categorical":
return dict(type="categorical-distribution-adapter")
elif distribution_type_str == "gumbelsoftmax":
return dict(type="gumbel-softmax-distribution-adapter")
elif distribution_type_str == "bernoulli":
return dict(type="bernoulli-distribution-adapter")
elif distribution_type_str == "normal":
return dict(type="normal-distribution-adapter")
elif distribution_type_str == "multivariatenormal":
return dict(type="multivariate-normal-distribution-adapter")
elif distribution_type_str == "beta":
return dict(type="beta-distribution-adapter")
elif distribution_type_str == "squashednormal":
return dict(type="squashed-normal-distribution-adapter")
elif distribution_type_str == "mixture":
return dict(
type="mixture-distribution-adapter",
_args=[get_adapter_spec_from_distribution_spec(re.sub(r'[\W]|distribution$', "", type(s).__name__.lower())) for
s in distribution.sub_distributions]
)
else:
raise SurrealError("'{}' is an unknown Distribution type!".format(distribution_type_str))
| 49.195122 | 123 | 0.722608 | # Copyright 2019 ducandu GmbH, All Rights Reserved
# (this is a modified version of the Apache 2.0 licensed RLgraph file of the same name).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import re
from surreal.components.distributions.distribution import Distribution
from surreal.utils.errors import SurrealError
def get_adapter_spec_from_distribution_spec(distribution_spec):
"""
Args:
distribution_spec (Union[dict,Distribution]): The spec of the Distribution object, for which to return an
appropriate DistributionAdapter spec dict.
Returns:
dict: The spec-dict to make a DistributionAdapter.
"""
# Create a dummy-distribution to get features from it.
distribution = Distribution.make(distribution_spec)
distribution_type_str = re.sub(r'[\W]|distribution$', "", type(distribution).__name__.lower())
if distribution_type_str == "categorical":
return dict(type="categorical-distribution-adapter")
elif distribution_type_str == "gumbelsoftmax":
return dict(type="gumbel-softmax-distribution-adapter")
elif distribution_type_str == "bernoulli":
return dict(type="bernoulli-distribution-adapter")
elif distribution_type_str == "normal":
return dict(type="normal-distribution-adapter")
elif distribution_type_str == "multivariatenormal":
return dict(type="multivariate-normal-distribution-adapter")
elif distribution_type_str == "beta":
return dict(type="beta-distribution-adapter")
elif distribution_type_str == "squashednormal":
return dict(type="squashed-normal-distribution-adapter")
elif distribution_type_str == "mixture":
return dict(
type="mixture-distribution-adapter",
_args=[get_adapter_spec_from_distribution_spec(re.sub(r'[\W]|distribution$', "", type(s).__name__.lower())) for
s in distribution.sub_distributions]
)
else:
raise SurrealError("'{}' is an unknown Distribution type!".format(distribution_type_str))
def get_distribution_spec_from_adapter(distribution_adapter):
distribution_adapter_type_str = type(distribution_adapter).__name__
if distribution_adapter_type_str == "CategoricalDistributionAdapter":
return dict(type="categorical")
elif distribution_adapter_type_str == "GumbelSoftmaxDistributionAdapter":
return dict(type="gumbel-softmax")
elif distribution_adapter_type_str == "BernoulliDistributionAdapter":
return dict(type="bernoulli")
# TODO: What about multi-variate normal with non-trivial co-var matrices?
elif distribution_adapter_type_str == "NormalDistributionAdapter":
return dict(type="normal")
elif distribution_adapter_type_str == "BetaDistributionAdapter":
return dict(type="beta")
elif distribution_adapter_type_str == "SquashedNormalDistributionAdapter":
return dict(type="squashed-normal")
elif distribution_adapter_type_str == "MixtureDistributionAdapter":
# TODO: MixtureDistribution is generic (any sub-distributions, but its AA is not (only supports mixture-Normal))
return dict(type="mixture", _args=["multivariate-normal" for _ in range(distribution_adapter.num_mixtures)])
elif distribution_adapter_type_str == "PlainOutputAdapter":
return None
else:
raise SurrealError("'{}' is an unknown DistributionAdapter type!".format(distribution_adapter_type_str))
| 1,385 | 0 | 23 |
65641f137b8c739a193b1d0ca888e64d71a032f4 | 499 | py | Python | src/dressup/exceptions.py | paw-lu/dressup | d6b7971c1d1dd2e365974dda62e06eb5c65b85d2 | [
"MIT"
] | 15 | 2020-05-23T20:47:47.000Z | 2022-01-02T18:57:47.000Z | src/dressup/exceptions.py | paw-lu/dressup | d6b7971c1d1dd2e365974dda62e06eb5c65b85d2 | [
"MIT"
] | 154 | 2020-05-23T03:19:15.000Z | 2021-09-10T03:21:21.000Z | src/dressup/exceptions.py | pscosta5/dressup | d6b7971c1d1dd2e365974dda62e06eb5c65b85d2 | [
"MIT"
] | 1 | 2021-04-13T16:11:13.000Z | 2021-04-13T16:11:13.000Z | """Exceptions for library."""
class DressUpException(Exception):
"""Base exception for all exceptions raised by the library."""
def __repr__(self) -> str:
"""Representation of DressUpException."""
return "DressUpException()"
class InvalidUnicodeTypeError(DressUpException, ValueError):
"""The provided unicode type does not exist."""
def __repr__(self) -> str:
"""Representation of InvalidUnicodeTypeError."""
return "InvalidUnicodeTypeError()"
| 27.722222 | 66 | 0.687375 | """Exceptions for library."""
class DressUpException(Exception):
"""Base exception for all exceptions raised by the library."""
def __repr__(self) -> str:
"""Representation of DressUpException."""
return "DressUpException()"
class InvalidUnicodeTypeError(DressUpException, ValueError):
"""The provided unicode type does not exist."""
def __repr__(self) -> str:
"""Representation of InvalidUnicodeTypeError."""
return "InvalidUnicodeTypeError()"
| 0 | 0 | 0 |
3542a929fefdc83f5ff2de121cb95c8f14d2cd3b | 50 | py | Python | nlptasks/task_classification_tf_serving.py | allenwind/tf2bert | 9820223559543529d4dcc703e2742ab8fd14d58e | [
"Apache-2.0"
] | 4 | 2021-06-16T02:26:18.000Z | 2021-09-24T11:06:51.000Z | nlptasks/task_classification_tf_serving.py | allenwind/tf2bert | 9820223559543529d4dcc703e2742ab8fd14d58e | [
"Apache-2.0"
] | null | null | null | nlptasks/task_classification_tf_serving.py | allenwind/tf2bert | 9820223559543529d4dcc703e2742ab8fd14d58e | [
"Apache-2.0"
] | null | null | null | # https://tf.wiki/zh_hans/deployment/serving.html
| 25 | 49 | 0.78 | # https://tf.wiki/zh_hans/deployment/serving.html
| 0 | 0 | 0 |
4d667a256e662f4c1605b4cc20159387ea62d5a2 | 1,891 | py | Python | 00_Code/01_LeetCode/937_ReorderLogFiles.py | KartikKannapur/Data_Structures_and_Algorithms_Python | 66e3c8112826aeffb78bd74d02be1a8d1e478de8 | [
"MIT"
] | 1 | 2017-06-11T04:57:07.000Z | 2017-06-11T04:57:07.000Z | 00_Code/01_LeetCode/937_ReorderLogFiles.py | KartikKannapur/Data_Structures_and_Algorithms_Python | 66e3c8112826aeffb78bd74d02be1a8d1e478de8 | [
"MIT"
] | null | null | null | 00_Code/01_LeetCode/937_ReorderLogFiles.py | KartikKannapur/Data_Structures_and_Algorithms_Python | 66e3c8112826aeffb78bd74d02be1a8d1e478de8 | [
"MIT"
] | null | null | null | """
You have an array of logs. Each log is a space delimited string of words.
For each log, the first word in each log is an alphanumeric identifier. Then, either:
Each word after the identifier will consist only of lowercase letters, or;
Each word after the identifier will consist only of digits.
We will call these two varieties of logs letter-logs and digit-logs. It is guaranteed that each log has at least one word after its identifier.
Reorder the logs so that all of the letter-logs come before any digit-log. The letter-logs are ordered lexicographically ignoring identifier, with the identifier used in case of ties. The digit-logs should be put in their original order.
Return the final order of the logs.
Example 1:
Input: ["a1 9 2 3 1","g1 act car","zo4 4 7","ab1 off key dog","a8 act zoo"]
Output: ["g1 act car","a8 act zoo","ab1 off key dog","a1 9 2 3 1","zo4 4 7"]
Note:
0 <= logs.length <= 100
3 <= logs[i].length <= 100
logs[i] is guaranteed to have an identifier, and a word after the identifier.
"""
| 31.516667 | 239 | 0.645161 | """
You have an array of logs. Each log is a space delimited string of words.
For each log, the first word in each log is an alphanumeric identifier. Then, either:
Each word after the identifier will consist only of lowercase letters, or;
Each word after the identifier will consist only of digits.
We will call these two varieties of logs letter-logs and digit-logs. It is guaranteed that each log has at least one word after its identifier.
Reorder the logs so that all of the letter-logs come before any digit-log. The letter-logs are ordered lexicographically ignoring identifier, with the identifier used in case of ties. The digit-logs should be put in their original order.
Return the final order of the logs.
Example 1:
Input: ["a1 9 2 3 1","g1 act car","zo4 4 7","ab1 off key dog","a8 act zoo"]
Output: ["g1 act car","a8 act zoo","ab1 off key dog","a1 9 2 3 1","zo4 4 7"]
Note:
0 <= logs.length <= 100
3 <= logs[i].length <= 100
logs[i] is guaranteed to have an identifier, and a word after the identifier.
"""
class Solution:
def reorderLogFiles(self, logs):
"""
:type logs: List[str]
:rtype: List[str]
"""
"""
Method: Sort + Questions Logic
* Segreate the logs based on the word after
the identifier, into digit and letter logs
* Interesting sorted() function - sorting based
on 2 parameters
sorted(arr, key=lambda x:(x[1], x[0]))
61 / 61 test cases passed.
Status: Accepted
Runtime: 44 ms
"""
arr_digit_logs = []
arr_letter_logs = []
for ele in logs:
if ele.split()[1].isnumeric():
arr_digit_logs.append(ele)
else:
arr_letter_logs.append(ele)
return (sorted(arr_letter_logs, key=lambda ele: (ele.split()[1:], ele.split()[0])) + arr_digit_logs) | 0 | 832 | 23 |
eb2e5538d2cce2417090de871132eac6854e10c8 | 14,637 | py | Python | fluiddb/security/permission.py | fluidinfo/fluiddb | b5a8c8349f3eaf3364cc4efba4736c3e33b30d96 | [
"Apache-2.0"
] | 3 | 2021-05-10T14:41:30.000Z | 2021-12-16T05:53:30.000Z | fluiddb/security/permission.py | fluidinfo/fluiddb | b5a8c8349f3eaf3364cc4efba4736c3e33b30d96 | [
"Apache-2.0"
] | null | null | null | fluiddb/security/permission.py | fluidinfo/fluiddb | b5a8c8349f3eaf3364cc4efba4736c3e33b30d96 | [
"Apache-2.0"
] | 2 | 2018-01-24T09:03:21.000Z | 2021-06-25T08:34:54.000Z | from fluiddb.cache.permission import (
CachingPermissionAPI, CachingPermissionCheckerAPI)
from fluiddb.data.path import getParentPath
from fluiddb.data.permission import Operation
from fluiddb.data.user import Role
from fluiddb.model.exceptions import UnknownPathError
from fluiddb.security.exceptions import PermissionDeniedError
class SecurePermissionAPI(object):
"""The public API to secure permission-related functionality.
@param user: The L{User} to perform operations on behalf of.
"""
def get(self, values):
"""See L{PermissionAPI.get}.
@raise PermissionDeniedError: Raised if the user is not authorized to
see the specified permissions.
"""
self._checkPermissions(values)
return self._permissions.get(values)
def set(self, values):
"""See L{PermissionAPI.set}.
@raise PermissionDeniedError: Raised if the user is not authorized to
change the specified permissions.
"""
self._checkPermissions([(path, operation)
for path, operation, _, _ in values])
return self._permissions.set(values)
def _checkPermissions(self, values):
"""Check C{CONTROL} permissions for a set of path-operation pairs.
@param values: A sequence of C{(path, Operation)} 2-tuples with the
that should be checked.
@raise PermissionDeniedError: Raised if the user doesn't have
C{CONTROL} permissions for a given path-L{Operation} pair.
@raise RuntimeError: Raised if an invalid L{Operation} is provided.
"""
pathsAndOperations = set()
for path, operation in values:
if operation in [Operation.WRITE_TAG_VALUE,
Operation.READ_TAG_VALUE,
Operation.DELETE_TAG_VALUE,
Operation.CONTROL_TAG_VALUE]:
pathsAndOperations.add((path, Operation.CONTROL_TAG_VALUE))
elif operation in [Operation.UPDATE_TAG, Operation.DELETE_TAG,
Operation.CONTROL_TAG]:
pathsAndOperations.add((path, Operation.CONTROL_TAG))
elif operation in Operation.NAMESPACE_OPERATIONS:
pathsAndOperations.add((path, Operation.CONTROL_NAMESPACE))
else:
raise RuntimeError('Invalid operation %r.' % operation)
deniedOperations = checkPermissions(self._user, pathsAndOperations)
if deniedOperations:
raise PermissionDeniedError(self._user.username, deniedOperations)
def checkPermissions(user, values):
"""Check permissions for a list of path-operation pairs.
Note that the special C{fluiddb/id} virtual tag is handled as a
special case. Specifically, the path presence checking logic doesn't
raise an L{UnknownPathError} and all tag related permission are always
granted (because permissions for C{fluiddb/id} are never checked).
This isn't ideal, but for now it's the behaviour in place.
@param user: The user to check the permissions for.
@param values: A sequence of C{(path, Operation)} 2-tuples
representing the actions to check.
@raise FeatureError: Raised if the given C{list} of values is empty or
if one of the given actions is invalid.
@raise UnknownUserError: Raised if a user don't exist for user
operations.
@raise UnknownPathError: Raised if any of the given paths doesn't
exist.
@return: A C{list} of C{(path, Operation)} 2-tuples that represent
denied actions.
"""
if not values:
return []
api = CachingPermissionCheckerAPI()
if user.isSuperuser():
checker = SuperuserPermissionChecker(api)
elif user.isAnonymous():
checker = AnonymousPermissionChecker(api, user)
else:
checker = UserPermissionChecker(api, user)
return checker.check(values)
class PermissionCheckerBase(object):
"""Base class for permission checkers."""
PASSTHROUGH_OPERATIONS = [Operation.WRITE_TAG_VALUE,
Operation.CREATE_NAMESPACE]
def _getDeniedOperations(self, values):
"""Get information about denied permissions.
All operations are assumed to be denied to begin with. Each requested
L{Operation} is checked against a permission to determine if access
should be granted. Operations that are not explicitly granted access
by a permission are denied.
The following rules are used to determine whether access should be
granted or denied:
- Access is always granted for L{Operation}s on the special
C{fluiddb/id} virtual tag.
- C{Operation.CREATE_NAMESPACE} and C{Operation.WRITE_TAG_VALUE}
operations on unknown L{Tag.path}s and L{Namespace.path}s are
allowed if the L{User} has the related permission on the nearest
parent L{Namespace}. The model layer automatically creates missing
L{Namespace}s and L{Tag}s, so we need to make sure that the parents
of implicit paths provide access for the user to create children.
- Finally, access is only given if a L{NamespacePermission} or
L{TagPermission} explicitly grant the L{User} access to perform the
L{Operation} on the L{Tag} or L{Namespace}.
@param values: A sequence of C{(path, Operation)} 2-tuples
representing actions that should be checked.
@raise UnknownPathError: Raised if any of the given paths doesn't
exist (and the L{User} doesn't have permission to create them).
@return: A C{list} of C{(path, Operation)} 2-tuples that represent
denied actions.
"""
deniedTagOperations = set()
deniedNamespaceOperations = set()
unknownPaths = self._api.getUnknownPaths(values)
parentPaths = self._api.getUnknownParentPaths(unknownPaths)
remainingUnknownPaths = set(unknownPaths)
for path, operation in values:
if path == u'fluiddb/id':
continue
if (operation in self.PASSTHROUGH_OPERATIONS
and path in unknownPaths):
parentPath = parentPaths.get(path)
if parentPath:
remainingUnknownPaths.remove(path)
deniedNamespaceOperations.add(
(parentPath, Operation.CREATE_NAMESPACE))
elif operation in Operation.NAMESPACE_OPERATIONS:
deniedNamespaceOperations.add((path, operation))
elif path not in unknownPaths:
deniedTagOperations.add((path, operation))
if remainingUnknownPaths:
raise UnknownPathError(remainingUnknownPaths)
deniedTagOperations = self._getDeniedTagOperations(deniedTagOperations)
deniedTagOperations.update(
self._getDeniedNamespaceOperations(deniedNamespaceOperations))
return list(deniedTagOperations)
def _getDeniedNamespaceOperations(self, values):
"""Determine whether L{Namespace} L{Operation}s are allowed.
@param values: A C{set} of C{(Namespace.path, Operation)} 2-tuples
representing actions that should be checked.
@return: A C{set} of C{(Namespace.path, Operation)} 2-tuples that
represent denied actions.
"""
if not values:
return set()
paths = set(path for path, operation in values)
permissions = self._api.getNamespacePermissions(paths)
return values - self._getGrantedOperations(permissions, values)
def _getDeniedTagOperations(self, values):
"""Determine whether L{Tag} L{Operation}s are allowed.
@param values: A C{set} of C{(Tag.path, Operation)} 2-tuples
representing actions that should be checked.
@return: A C{set} of C{(Tag.path, Operation)} 2-tuples that represent
denied actions.
"""
if not values:
return set()
paths = set(path for path, operation in values)
permissions = self._api.getTagPermissions(paths)
return values - self._getGrantedOperations(permissions, values)
def _getGrantedOperations(self, permissions, values):
"""Determine which operations are granted given a set of permissions.
@param permissions: A C{dict} mapping paths to L{PermissionBase}
instances.
@param values: A C{set} of C{(path, Operation)} 2-tuples representing
actions that should be checked.
@return: A C{set} of C{(path, Operation)} 2-tuples that represent
granted actions.
"""
allowedOperations = set()
for path, operation in values:
permission = permissions.get(path)
if permission and permission.allow(operation, self._user.id):
allowedOperations.add((path, operation))
return allowedOperations
class SuperuserPermissionChecker(PermissionCheckerBase):
"""Permission checker for L{User}s with the L{Role.SUPERUSER} role.
Permission for all actions is always granted to L{User}s with the
L{Role.SUPERUSER}.
@param api: The L{PermissionCheckerAPI} instance to use when performing
permission checks.
"""
def check(self, values):
"""Check permissions for a L{User} with the L{Role.SUPERUSER} role.
@param values: A sequence of C{(path, Operation)} 2-tuples
representing actions that should be checked.
@raise UnknownUserError: Raised if a user don't exist for user
operations.
@return: A C{list} of C{(path, Operation)} 2-tuples representing
actions that are denied.
"""
# Check paths for tag or namespace related operations.
pathsAndOperations = [(path, operation) for path, operation in values
if operation in Operation.PATH_OPERATIONS]
unknownPaths = self._api.getUnknownPaths(pathsAndOperations)
if unknownPaths:
raise UnknownPathError(unknownPaths)
return []
class AnonymousPermissionChecker(PermissionCheckerBase):
"""Permission checker for L{User}s with the L{Role.ANONYMOUS} role.
Anonymous users have read-only access to (some) data in Fluidinfo and may
never perform operations that create new objects. In particular,
anonymous users may only perform actions that match an operation in the
L{Operation.ALLOWED_ANONYMOUS_OPERATIONS} list.
@param api: The L{PermissionCheckerAPI} instance to use when performing
permission checks.
@param user: The anonymous L{User} to perform checks on behalf of.
"""
def check(self, values):
"""Check permissions for a L{User} with the L{Role.ANONYMOUS} role.
@param values: A sequence of C{(path, Operation)} 2-tuples
representing actions that should be checked.
@return: A C{list} of C{(path, Operation)} 2-tuples representing
actions that are denied.
"""
deniedOperations = []
storedOperations = set()
for path, operation in values:
if operation not in Operation.ALLOWED_ANONYMOUS_OPERATIONS:
deniedOperations.append((path, operation))
continue
else:
storedOperations.add((path, operation))
if not storedOperations:
return deniedOperations
return deniedOperations + self._getDeniedOperations(storedOperations)
class UserPermissionChecker(PermissionCheckerBase):
"""Permission checker for L{User}s with the L{Role.USER} role.
Normal users have read/write access to data in Fluidinfo as granted by
L{NamespacePermission}s and L{TagPermission}s. L{Operation}s in the
L{Operation.USER_OPERATIONS} list are always denied, as is the ability to
create or delete root L{Namespace}s.
@param api: The L{PermissionCheckerAPI} instance to use when performing
permission checks.
@param user: The L{User} to perform checks on behalf of.
"""
def check(self, values):
"""Check permissions for a L{User} with the L{Role.USER} role.
@param values: A sequence of C{(path, Operation)} 2-tuples
representing actions that should be checked.
@raise UnknownUserError: Raised if a user don't exist for user
operations.
@return: A C{list} of C{(path, Operation)} 2-tuples representing
actions that are denied.
"""
deniedOperations = []
storedOperations = set()
for path, operation in values:
# Create object is always allowed for normal users.
if operation == Operation.CREATE_OBJECT:
continue
# Create root namespaces is always denied for normal users.
elif path is None and operation == Operation.CREATE_NAMESPACE:
deniedOperations.append((path, operation))
continue
# Delete root namespaces is always denied for normal users.
elif (path is not None and getParentPath(path) is None
and operation == Operation.DELETE_NAMESPACE):
deniedOperations.append((path, operation))
continue
# User managers are always allowed to perform user operations.
elif (self._user.role == Role.USER_MANAGER
and operation in Operation.USER_OPERATIONS):
continue
# Updating user data is only allowed for the own user.
elif (operation == Operation.UPDATE_USER
and self._user.username == path):
continue
# All other user operations are always denied for normal users.
elif operation in Operation.USER_OPERATIONS:
deniedOperations.append((path, operation))
continue
else:
# Operations that have to be checked in the database.
storedOperations.add((path, operation))
if not storedOperations:
return deniedOperations
return deniedOperations + self._getDeniedOperations(storedOperations)
| 41.939828 | 79 | 0.651295 | from fluiddb.cache.permission import (
CachingPermissionAPI, CachingPermissionCheckerAPI)
from fluiddb.data.path import getParentPath
from fluiddb.data.permission import Operation
from fluiddb.data.user import Role
from fluiddb.model.exceptions import UnknownPathError
from fluiddb.security.exceptions import PermissionDeniedError
class SecurePermissionAPI(object):
"""The public API to secure permission-related functionality.
@param user: The L{User} to perform operations on behalf of.
"""
def __init__(self, user):
self._user = user
self._permissions = CachingPermissionAPI(user)
def get(self, values):
"""See L{PermissionAPI.get}.
@raise PermissionDeniedError: Raised if the user is not authorized to
see the specified permissions.
"""
self._checkPermissions(values)
return self._permissions.get(values)
def set(self, values):
"""See L{PermissionAPI.set}.
@raise PermissionDeniedError: Raised if the user is not authorized to
change the specified permissions.
"""
self._checkPermissions([(path, operation)
for path, operation, _, _ in values])
return self._permissions.set(values)
def _checkPermissions(self, values):
"""Check C{CONTROL} permissions for a set of path-operation pairs.
@param values: A sequence of C{(path, Operation)} 2-tuples with the
that should be checked.
@raise PermissionDeniedError: Raised if the user doesn't have
C{CONTROL} permissions for a given path-L{Operation} pair.
@raise RuntimeError: Raised if an invalid L{Operation} is provided.
"""
pathsAndOperations = set()
for path, operation in values:
if operation in [Operation.WRITE_TAG_VALUE,
Operation.READ_TAG_VALUE,
Operation.DELETE_TAG_VALUE,
Operation.CONTROL_TAG_VALUE]:
pathsAndOperations.add((path, Operation.CONTROL_TAG_VALUE))
elif operation in [Operation.UPDATE_TAG, Operation.DELETE_TAG,
Operation.CONTROL_TAG]:
pathsAndOperations.add((path, Operation.CONTROL_TAG))
elif operation in Operation.NAMESPACE_OPERATIONS:
pathsAndOperations.add((path, Operation.CONTROL_NAMESPACE))
else:
raise RuntimeError('Invalid operation %r.' % operation)
deniedOperations = checkPermissions(self._user, pathsAndOperations)
if deniedOperations:
raise PermissionDeniedError(self._user.username, deniedOperations)
def checkPermissions(user, values):
"""Check permissions for a list of path-operation pairs.
Note that the special C{fluiddb/id} virtual tag is handled as a
special case. Specifically, the path presence checking logic doesn't
raise an L{UnknownPathError} and all tag related permission are always
granted (because permissions for C{fluiddb/id} are never checked).
This isn't ideal, but for now it's the behaviour in place.
@param user: The user to check the permissions for.
@param values: A sequence of C{(path, Operation)} 2-tuples
representing the actions to check.
@raise FeatureError: Raised if the given C{list} of values is empty or
if one of the given actions is invalid.
@raise UnknownUserError: Raised if a user don't exist for user
operations.
@raise UnknownPathError: Raised if any of the given paths doesn't
exist.
@return: A C{list} of C{(path, Operation)} 2-tuples that represent
denied actions.
"""
if not values:
return []
api = CachingPermissionCheckerAPI()
if user.isSuperuser():
checker = SuperuserPermissionChecker(api)
elif user.isAnonymous():
checker = AnonymousPermissionChecker(api, user)
else:
checker = UserPermissionChecker(api, user)
return checker.check(values)
class PermissionCheckerBase(object):
"""Base class for permission checkers."""
PASSTHROUGH_OPERATIONS = [Operation.WRITE_TAG_VALUE,
Operation.CREATE_NAMESPACE]
def _getDeniedOperations(self, values):
"""Get information about denied permissions.
All operations are assumed to be denied to begin with. Each requested
L{Operation} is checked against a permission to determine if access
should be granted. Operations that are not explicitly granted access
by a permission are denied.
The following rules are used to determine whether access should be
granted or denied:
- Access is always granted for L{Operation}s on the special
C{fluiddb/id} virtual tag.
- C{Operation.CREATE_NAMESPACE} and C{Operation.WRITE_TAG_VALUE}
operations on unknown L{Tag.path}s and L{Namespace.path}s are
allowed if the L{User} has the related permission on the nearest
parent L{Namespace}. The model layer automatically creates missing
L{Namespace}s and L{Tag}s, so we need to make sure that the parents
of implicit paths provide access for the user to create children.
- Finally, access is only given if a L{NamespacePermission} or
L{TagPermission} explicitly grant the L{User} access to perform the
L{Operation} on the L{Tag} or L{Namespace}.
@param values: A sequence of C{(path, Operation)} 2-tuples
representing actions that should be checked.
@raise UnknownPathError: Raised if any of the given paths doesn't
exist (and the L{User} doesn't have permission to create them).
@return: A C{list} of C{(path, Operation)} 2-tuples that represent
denied actions.
"""
deniedTagOperations = set()
deniedNamespaceOperations = set()
unknownPaths = self._api.getUnknownPaths(values)
parentPaths = self._api.getUnknownParentPaths(unknownPaths)
remainingUnknownPaths = set(unknownPaths)
for path, operation in values:
if path == u'fluiddb/id':
continue
if (operation in self.PASSTHROUGH_OPERATIONS
and path in unknownPaths):
parentPath = parentPaths.get(path)
if parentPath:
remainingUnknownPaths.remove(path)
deniedNamespaceOperations.add(
(parentPath, Operation.CREATE_NAMESPACE))
elif operation in Operation.NAMESPACE_OPERATIONS:
deniedNamespaceOperations.add((path, operation))
elif path not in unknownPaths:
deniedTagOperations.add((path, operation))
if remainingUnknownPaths:
raise UnknownPathError(remainingUnknownPaths)
deniedTagOperations = self._getDeniedTagOperations(deniedTagOperations)
deniedTagOperations.update(
self._getDeniedNamespaceOperations(deniedNamespaceOperations))
return list(deniedTagOperations)
def _getDeniedNamespaceOperations(self, values):
"""Determine whether L{Namespace} L{Operation}s are allowed.
@param values: A C{set} of C{(Namespace.path, Operation)} 2-tuples
representing actions that should be checked.
@return: A C{set} of C{(Namespace.path, Operation)} 2-tuples that
represent denied actions.
"""
if not values:
return set()
paths = set(path for path, operation in values)
permissions = self._api.getNamespacePermissions(paths)
return values - self._getGrantedOperations(permissions, values)
def _getDeniedTagOperations(self, values):
"""Determine whether L{Tag} L{Operation}s are allowed.
@param values: A C{set} of C{(Tag.path, Operation)} 2-tuples
representing actions that should be checked.
@return: A C{set} of C{(Tag.path, Operation)} 2-tuples that represent
denied actions.
"""
if not values:
return set()
paths = set(path for path, operation in values)
permissions = self._api.getTagPermissions(paths)
return values - self._getGrantedOperations(permissions, values)
def _getGrantedOperations(self, permissions, values):
"""Determine which operations are granted given a set of permissions.
@param permissions: A C{dict} mapping paths to L{PermissionBase}
instances.
@param values: A C{set} of C{(path, Operation)} 2-tuples representing
actions that should be checked.
@return: A C{set} of C{(path, Operation)} 2-tuples that represent
granted actions.
"""
allowedOperations = set()
for path, operation in values:
permission = permissions.get(path)
if permission and permission.allow(operation, self._user.id):
allowedOperations.add((path, operation))
return allowedOperations
class SuperuserPermissionChecker(PermissionCheckerBase):
"""Permission checker for L{User}s with the L{Role.SUPERUSER} role.
Permission for all actions is always granted to L{User}s with the
L{Role.SUPERUSER}.
@param api: The L{PermissionCheckerAPI} instance to use when performing
permission checks.
"""
def __init__(self, api):
self._api = api
def check(self, values):
"""Check permissions for a L{User} with the L{Role.SUPERUSER} role.
@param values: A sequence of C{(path, Operation)} 2-tuples
representing actions that should be checked.
@raise UnknownUserError: Raised if a user don't exist for user
operations.
@return: A C{list} of C{(path, Operation)} 2-tuples representing
actions that are denied.
"""
# Check paths for tag or namespace related operations.
pathsAndOperations = [(path, operation) for path, operation in values
if operation in Operation.PATH_OPERATIONS]
unknownPaths = self._api.getUnknownPaths(pathsAndOperations)
if unknownPaths:
raise UnknownPathError(unknownPaths)
return []
class AnonymousPermissionChecker(PermissionCheckerBase):
"""Permission checker for L{User}s with the L{Role.ANONYMOUS} role.
Anonymous users have read-only access to (some) data in Fluidinfo and may
never perform operations that create new objects. In particular,
anonymous users may only perform actions that match an operation in the
L{Operation.ALLOWED_ANONYMOUS_OPERATIONS} list.
@param api: The L{PermissionCheckerAPI} instance to use when performing
permission checks.
@param user: The anonymous L{User} to perform checks on behalf of.
"""
def __init__(self, api, user):
self._api = api
self._user = user
def check(self, values):
"""Check permissions for a L{User} with the L{Role.ANONYMOUS} role.
@param values: A sequence of C{(path, Operation)} 2-tuples
representing actions that should be checked.
@return: A C{list} of C{(path, Operation)} 2-tuples representing
actions that are denied.
"""
deniedOperations = []
storedOperations = set()
for path, operation in values:
if operation not in Operation.ALLOWED_ANONYMOUS_OPERATIONS:
deniedOperations.append((path, operation))
continue
else:
storedOperations.add((path, operation))
if not storedOperations:
return deniedOperations
return deniedOperations + self._getDeniedOperations(storedOperations)
class UserPermissionChecker(PermissionCheckerBase):
"""Permission checker for L{User}s with the L{Role.USER} role.
Normal users have read/write access to data in Fluidinfo as granted by
L{NamespacePermission}s and L{TagPermission}s. L{Operation}s in the
L{Operation.USER_OPERATIONS} list are always denied, as is the ability to
create or delete root L{Namespace}s.
@param api: The L{PermissionCheckerAPI} instance to use when performing
permission checks.
@param user: The L{User} to perform checks on behalf of.
"""
def __init__(self, api, user):
self._api = api
self._user = user
def check(self, values):
"""Check permissions for a L{User} with the L{Role.USER} role.
@param values: A sequence of C{(path, Operation)} 2-tuples
representing actions that should be checked.
@raise UnknownUserError: Raised if a user don't exist for user
operations.
@return: A C{list} of C{(path, Operation)} 2-tuples representing
actions that are denied.
"""
deniedOperations = []
storedOperations = set()
for path, operation in values:
# Create object is always allowed for normal users.
if operation == Operation.CREATE_OBJECT:
continue
# Create root namespaces is always denied for normal users.
elif path is None and operation == Operation.CREATE_NAMESPACE:
deniedOperations.append((path, operation))
continue
# Delete root namespaces is always denied for normal users.
elif (path is not None and getParentPath(path) is None
and operation == Operation.DELETE_NAMESPACE):
deniedOperations.append((path, operation))
continue
# User managers are always allowed to perform user operations.
elif (self._user.role == Role.USER_MANAGER
and operation in Operation.USER_OPERATIONS):
continue
# Updating user data is only allowed for the own user.
elif (operation == Operation.UPDATE_USER
and self._user.username == path):
continue
# All other user operations are always denied for normal users.
elif operation in Operation.USER_OPERATIONS:
deniedOperations.append((path, operation))
continue
else:
# Operations that have to be checked in the database.
storedOperations.add((path, operation))
if not storedOperations:
return deniedOperations
return deniedOperations + self._getDeniedOperations(storedOperations)
| 230 | 0 | 108 |
c2ee47ae3fcb12ce56ad46f2199c6b246c81eab5 | 210 | py | Python | matchzoo/models/__init__.py | ZeonTrevor/MatchZoo | e5865cedde1868d2e00d5d5446f2c21a4cf11b44 | [
"Apache-2.0"
] | null | null | null | matchzoo/models/__init__.py | ZeonTrevor/MatchZoo | e5865cedde1868d2e00d5d5446f2c21a4cf11b44 | [
"Apache-2.0"
] | null | null | null | matchzoo/models/__init__.py | ZeonTrevor/MatchZoo | e5865cedde1868d2e00d5d5446f2c21a4cf11b44 | [
"Apache-2.0"
] | null | null | null | from .anmm import *
from .arci import *
from .arcii import *
from .cdssm import *
from .drmm import *
from .dssm import *
from .duet import *
from .knrm import *
from .model import *
from .matchpyramid import * | 21 | 27 | 0.719048 | from .anmm import *
from .arci import *
from .arcii import *
from .cdssm import *
from .drmm import *
from .dssm import *
from .duet import *
from .knrm import *
from .model import *
from .matchpyramid import * | 0 | 0 | 0 |
72aa92c823dc0961a51c54c84e22f66b923f6a2d | 790 | py | Python | legacy/python-har-daemon/harchiverd/settings.py | GilHoggarth/ukwa-manage | 5893e9ea16c02e76eb81b2ccf7e161eeb183db9a | [
"Apache-2.0"
] | 1 | 2021-05-18T21:47:29.000Z | 2021-05-18T21:47:29.000Z | legacy/python-har-daemon/harchiverd/settings.py | GilHoggarth/ukwa-manage | 5893e9ea16c02e76eb81b2ccf7e161eeb183db9a | [
"Apache-2.0"
] | 67 | 2017-11-22T11:13:18.000Z | 2022-03-25T09:48:49.000Z | legacy/python-har-daemon/harchiverd/settings.py | GilHoggarth/ukwa-manage | 5893e9ea16c02e76eb81b2ccf7e161eeb183db9a | [
"Apache-2.0"
] | 4 | 2020-01-17T17:23:54.000Z | 2021-04-11T09:46:09.000Z | import os
import logging
# Settings that can be overridden via environment variables.
LOG_FILE = os.getenv("LOG_FILE", "/logs/harchiverd.log")
LOG_LEVEL = os.getenv("LOG_LEVEL", 'DEBUG')
OUTPUT_DIRECTORY = os.getenv("OUTPUT_DIRECTORY", "/images")
WEBSERVICE = os.getenv("WEBSERVICE", "http://webrender:8000/webtools/domimage")
PROTOCOLS = ["http", "https"]
AMQP_URL = os.getenv("AMQP_URL", "amqp://guest:guest@rabbitmq:5672/%2f")
AMQP_EXCHANGE = os.getenv("AMQP_EXCHANGE", "heritrix")
AMQP_QUEUE = os.getenv("AMQP_QUEUE", "to-webrender")
AMQP_KEY = os.getenv("AMQP_KEY", "to-webrender")
AMQP_OUTLINK_QUEUE = os.getenv("AMQP_OUTLINK_QUEUE", "heritrix-outlinks")
#AMQP_URL="amqp://guest:guest@192.168.45.26:5672/%2f"
| 41.578947 | 87 | 0.675949 | import os
import logging
# Settings that can be overridden via environment variables.
LOG_FILE = os.getenv("LOG_FILE", "/logs/harchiverd.log")
LOG_LEVEL = os.getenv("LOG_LEVEL", 'DEBUG')
OUTPUT_DIRECTORY = os.getenv("OUTPUT_DIRECTORY", "/images")
WEBSERVICE = os.getenv("WEBSERVICE", "http://webrender:8000/webtools/domimage")
PROTOCOLS = ["http", "https"]
AMQP_URL = os.getenv("AMQP_URL", "amqp://guest:guest@rabbitmq:5672/%2f")
AMQP_EXCHANGE = os.getenv("AMQP_EXCHANGE", "heritrix")
AMQP_QUEUE = os.getenv("AMQP_QUEUE", "to-webrender")
AMQP_KEY = os.getenv("AMQP_KEY", "to-webrender")
AMQP_OUTLINK_QUEUE = os.getenv("AMQP_OUTLINK_QUEUE", "heritrix-outlinks")
#AMQP_URL="amqp://guest:guest@192.168.45.26:5672/%2f"
| 0 | 0 | 0 |
1810fa978d8c0e2c3c0d8729c38fd7f7e9fe5672 | 890 | py | Python | demo4.py | KomaBeyond/docker-ImageManager | e94db1178ed396c56497ab77921eaf6db497dc67 | [
"Apache-2.0"
] | null | null | null | demo4.py | KomaBeyond/docker-ImageManager | e94db1178ed396c56497ab77921eaf6db497dc67 | [
"Apache-2.0"
] | null | null | null | demo4.py | KomaBeyond/docker-ImageManager | e94db1178ed396c56497ab77921eaf6db497dc67 | [
"Apache-2.0"
] | null | null | null | #-*- coding:utf-8 -*-
import wx
from wx.py.shell import ShellFrame
if __name__ == '__main__':
app = App(False)
app.MainLoop()
| 24.722222 | 74 | 0.604494 | #-*- coding:utf-8 -*-
import wx
from wx.py.shell import ShellFrame
class Frame(wx.Frame):
def __init__(self, title, size, pos=wx.DefaultPosition):
super(Frame, self).__init__(None, title=title, size=size, pos=pos)
panel = wx.Panel(self, -1)
panel.SetBackgroundColour('#FFF')
menuBar = wx.MenuBar()
menu = wx.Menu()
shell = menu.Append(-1, "Shell", "Open python shell")
menuBar.Append(menu, "Open")
self.Bind(wx.EVT_MENU, self.onShell, shell)
self.SetMenuBar(menuBar)
def onShell(self, event):
sFrame = ShellFrame(self)
sFrame.Show()
class App(wx.App):
def OnPreInit(self):
self.frame = Frame(title="MyApp", size=(550, 400))
self.frame.Centre(True)
self.frame.Show(True)
return True
if __name__ == '__main__':
app = App(False)
app.MainLoop()
| 631 | -2 | 125 |
162200112afc07095c82d163ecdcb944a828cc3b | 6,617 | py | Python | src/python/grpcio_tests/commands.py | txl0591/grpc | 8b732dc466fb8a567c1bca9dbb84554d29087395 | [
"Apache-2.0"
] | 117 | 2017-10-02T21:34:35.000Z | 2022-03-02T01:49:03.000Z | src/python/grpcio_tests/commands.py | txl0591/grpc | 8b732dc466fb8a567c1bca9dbb84554d29087395 | [
"Apache-2.0"
] | 4 | 2017-10-03T22:45:30.000Z | 2018-09-27T07:31:00.000Z | src/python/grpcio_tests/commands.py | txl0591/grpc | 8b732dc466fb8a567c1bca9dbb84554d29087395 | [
"Apache-2.0"
] | 24 | 2017-10-31T12:14:15.000Z | 2021-12-11T10:07:46.000Z | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides distutils command classes for the gRPC Python setup process."""
from distutils import errors as _errors
import glob
import os
import os.path
import platform
import re
import shutil
import subprocess
import sys
import traceback
import setuptools
from setuptools.command import build_ext
from setuptools.command import build_py
from setuptools.command import easy_install
from setuptools.command import install
from setuptools.command import test
PYTHON_STEM = os.path.dirname(os.path.abspath(__file__))
GRPC_STEM = os.path.abspath(PYTHON_STEM + '../../../../')
GRPC_PROTO_STEM = os.path.join(GRPC_STEM, 'src', 'proto')
PROTO_STEM = os.path.join(PYTHON_STEM, 'src', 'proto')
PYTHON_PROTO_TOP_LEVEL = os.path.join(PYTHON_STEM, 'src')
class BuildProtoModules(setuptools.Command):
"""Command to generate project *_pb2.py modules from proto files."""
description = 'build protobuf modules'
user_options = [
('include=', None, 'path patterns to include in protobuf generation'),
('exclude=', None, 'path patterns to exclude from protobuf generation')
]
class BuildPy(build_py.build_py):
"""Custom project build command."""
class TestLite(setuptools.Command):
"""Command to run tests without fetching or building anything."""
description = 'run tests without fetching or building anything.'
user_options = []
def _add_eggs_to_path(self):
"""Fetch install and test requirements"""
self.distribution.fetch_build_eggs(self.distribution.install_requires)
self.distribution.fetch_build_eggs(self.distribution.tests_require)
| 32.920398 | 82 | 0.646063 | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides distutils command classes for the gRPC Python setup process."""
from distutils import errors as _errors
import glob
import os
import os.path
import platform
import re
import shutil
import subprocess
import sys
import traceback
import setuptools
from setuptools.command import build_ext
from setuptools.command import build_py
from setuptools.command import easy_install
from setuptools.command import install
from setuptools.command import test
PYTHON_STEM = os.path.dirname(os.path.abspath(__file__))
GRPC_STEM = os.path.abspath(PYTHON_STEM + '../../../../')
GRPC_PROTO_STEM = os.path.join(GRPC_STEM, 'src', 'proto')
PROTO_STEM = os.path.join(PYTHON_STEM, 'src', 'proto')
PYTHON_PROTO_TOP_LEVEL = os.path.join(PYTHON_STEM, 'src')
class CommandError(object):
pass
class GatherProto(setuptools.Command):
description = 'gather proto dependencies'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
# TODO(atash) ensure that we're running from the repository directory when
# this command is used
try:
shutil.rmtree(PROTO_STEM)
except Exception as error:
# We don't care if this command fails
pass
shutil.copytree(GRPC_PROTO_STEM, PROTO_STEM)
for root, _, _ in os.walk(PYTHON_PROTO_TOP_LEVEL):
path = os.path.join(root, '__init__.py')
open(path, 'a').close()
class BuildProtoModules(setuptools.Command):
"""Command to generate project *_pb2.py modules from proto files."""
description = 'build protobuf modules'
user_options = [
('include=', None, 'path patterns to include in protobuf generation'),
('exclude=', None, 'path patterns to exclude from protobuf generation')
]
def initialize_options(self):
self.exclude = None
self.include = r'.*\.proto$'
def finalize_options(self):
pass
def run(self):
import grpc_tools.protoc as protoc
include_regex = re.compile(self.include)
exclude_regex = re.compile(self.exclude) if self.exclude else None
paths = []
for walk_root, directories, filenames in os.walk(PROTO_STEM):
for filename in filenames:
path = os.path.join(walk_root, filename)
if include_regex.match(path) and not (
exclude_regex and exclude_regex.match(path)):
paths.append(path)
# TODO(kpayson): It would be nice to do this in a batch command,
# but we currently have name conflicts in src/proto
for path in paths:
command = [
'grpc_tools.protoc',
'-I {}'.format(PROTO_STEM),
'--python_out={}'.format(PROTO_STEM),
'--grpc_python_out={}'.format(PROTO_STEM),
] + [path]
if protoc.main(command) != 0:
sys.stderr.write(
'warning: Command:\n{}\nFailed'.format(command))
# Generated proto directories dont include __init__.py, but
# these are needed for python package resolution
for walk_root, _, _ in os.walk(PROTO_STEM):
path = os.path.join(walk_root, '__init__.py')
open(path, 'a').close()
class BuildPy(build_py.build_py):
"""Custom project build command."""
def run(self):
try:
self.run_command('build_package_protos')
except CommandError as error:
sys.stderr.write('warning: %s\n' % error.message)
build_py.build_py.run(self)
class TestLite(setuptools.Command):
"""Command to run tests without fetching or building anything."""
description = 'run tests without fetching or building anything.'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
# distutils requires this override.
pass
def run(self):
self._add_eggs_to_path()
import tests
loader = tests.Loader()
loader.loadTestsFromNames(['tests'])
runner = tests.Runner()
result = runner.run(loader.suite)
if not result.wasSuccessful():
sys.exit('Test failure')
def _add_eggs_to_path(self):
"""Fetch install and test requirements"""
self.distribution.fetch_build_eggs(self.distribution.install_requires)
self.distribution.fetch_build_eggs(self.distribution.tests_require)
class RunInterop(test.test):
description = 'run interop test client/server'
user_options = [('args=', 'a', 'pass-thru arguments for the client/server'),
('client', 'c', 'flag indicating to run the client'),
('server', 's', 'flag indicating to run the server')]
def initialize_options(self):
self.args = ''
self.client = False
self.server = False
def finalize_options(self):
if self.client and self.server:
raise _errors.DistutilsOptionError(
'you may only specify one of client or server')
def run(self):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(
self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
if self.client:
self.run_client()
elif self.server:
self.run_server()
def run_server(self):
# We import here to ensure that our setuptools parent has had a chance to
# edit the Python system path.
from tests.interop import server
sys.argv[1:] = self.args.split()
server.serve()
def run_client(self):
# We import here to ensure that our setuptools parent has had a chance to
# edit the Python system path.
from tests.interop import client
sys.argv[1:] = self.args.split()
client.test_interoperability()
| 3,563 | 605 | 258 |
37e953ae2d41d39e1d47b0ae463e5e09d78db822 | 1,242 | py | Python | bluebottle/payments/migrations/0007_auto_20210302_1417.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 10 | 2015-05-28T18:26:40.000Z | 2021-09-06T10:07:03.000Z | bluebottle/payments/migrations/0007_auto_20210302_1417.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 762 | 2015-01-15T10:00:59.000Z | 2022-03-31T15:35:14.000Z | bluebottle/payments/migrations/0007_auto_20210302_1417.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 9 | 2015-02-20T13:19:30.000Z | 2022-03-08T14:09:17.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2021-03-02 13:17
from __future__ import unicode_literals
from django.db import migrations
| 24.84 | 49 | 0.544283 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2021-03-02 13:17
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('payments', '0006_auto_20181115_1321'),
]
operations = [
migrations.RemoveField(
model_name='orderpayment',
name='authorization_action',
),
migrations.RemoveField(
model_name='orderpayment',
name='order',
),
migrations.RemoveField(
model_name='orderpayment',
name='user',
),
migrations.RemoveField(
model_name='payment',
name='order_payment',
),
migrations.RemoveField(
model_name='payment',
name='polymorphic_ctype',
),
migrations.RemoveField(
model_name='transaction',
name='payment',
),
migrations.RemoveField(
model_name='transaction',
name='polymorphic_ctype',
),
migrations.DeleteModel(
name='OrderPayment',
),
migrations.DeleteModel(
name='OrderPaymentAction',
),
]
| 0 | 1,070 | 23 |
7cc6ac075442c20c1dc60a1d477e25e3ee36bd5e | 240 | py | Python | poppurri/common/tests.py | ariel17/poppurri | 56c2f7ec96402e96776b928cd8be5454f7c2ab67 | [
"MIT"
] | null | null | null | poppurri/common/tests.py | ariel17/poppurri | 56c2f7ec96402e96776b928cd8be5454f7c2ab67 | [
"MIT"
] | 12 | 2017-06-01T13:15:33.000Z | 2017-06-01T13:15:35.000Z | poppurri/common/tests.py | ariel17/poppurri | 56c2f7ec96402e96776b928cd8be5454f7c2ab67 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Description: TODO
"""
__author__ = "Ariel Gerardo Rios (ariel.gerardo.rios@gmail.com)"
from django.test import TestCase
# Create your tests here.
# vim: ai ts=4 sts=4 et sw=4 ft=python
| 16 | 64 | 0.675 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Description: TODO
"""
__author__ = "Ariel Gerardo Rios (ariel.gerardo.rios@gmail.com)"
from django.test import TestCase
# Create your tests here.
# vim: ai ts=4 sts=4 et sw=4 ft=python
| 0 | 0 | 0 |
985de59aac4786ddd76c2cf63fc1fdf14bcaeafe | 2,290 | py | Python | app.py | joostrijneveld/eetvoorkeur | ca3887b6934992cb94f848f2cd9766d168a27af8 | [
"CC0-1.0"
] | 2 | 2016-04-03T21:53:52.000Z | 2016-04-05T09:39:14.000Z | app.py | joostrijneveld/eetvoorkeur | ca3887b6934992cb94f848f2cd9766d168a27af8 | [
"CC0-1.0"
] | null | null | null | app.py | joostrijneveld/eetvoorkeur | ca3887b6934992cb94f848f2cd9766d168a27af8 | [
"CC0-1.0"
] | null | null | null | from flask import Flask
from flask import render_template
from flask.ext.socketio import SocketIO, emit
from hashlib import sha256
import sys
app = Flask(__name__)
app.config['SECRET_KEY'] = 'replaceme'
app.config['ADMIN_URL'] = '/admin'
app.config['DEBUG'] = True
# Replace the above secrets and specify other overrides here, or alternatively,
# create a config.py file that has a configure(app) function that adds these.
try:
import config
config.configure(app)
except ImportError:
pass
socketio = SocketIO(app)
admin_secret = app.config['SECRET_KEY'] + "ADMIN_SECRET"
app.config['ADMIN_SECRET'] = sha256(admin_secret.encode('utf-8')).hexdigest()
# eetvoorkeur relies completely on a run-time state. This means that the state
# is reset whenever the app is restarted. Future versions might rely on a
# database of some kind, but for now, this was the easiest prototype.
state = {"step": 1,
"options": [{'name': 'Albert Heijn', 'votes': 0},
{'name': 'De Fest', 'votes': 0},
{'name': 'Lotus', 'votes': 0},
],
"deadlines": ["16:00", "17:00", "18:15"],
}
@app.route('/')
@app.route(app.config['ADMIN_URL'])
@socketio.on('state update')
@socketio.on('vote')
@socketio.on('new option')
app.run(debug=True, threaded=True)
| 31.369863 | 79 | 0.649782 | from flask import Flask
from flask import render_template
from flask.ext.socketio import SocketIO, emit
from hashlib import sha256
import sys
app = Flask(__name__)
app.config['SECRET_KEY'] = 'replaceme'
app.config['ADMIN_URL'] = '/admin'
app.config['DEBUG'] = True
# Replace the above secrets and specify other overrides here, or alternatively,
# create a config.py file that has a configure(app) function that adds these.
try:
import config
config.configure(app)
except ImportError:
pass
socketio = SocketIO(app)
admin_secret = app.config['SECRET_KEY'] + "ADMIN_SECRET"
app.config['ADMIN_SECRET'] = sha256(admin_secret.encode('utf-8')).hexdigest()
# eetvoorkeur relies completely on a run-time state. This means that the state
# is reset whenever the app is restarted. Future versions might rely on a
# database of some kind, but for now, this was the easiest prototype.
state = {"step": 1,
"options": [{'name': 'Albert Heijn', 'votes': 0},
{'name': 'De Fest', 'votes': 0},
{'name': 'Lotus', 'votes': 0},
],
"deadlines": ["16:00", "17:00", "18:15"],
}
@app.route('/')
def index(admin=False):
return render_template('index.html', admin=admin, state=state)
@app.route(app.config['ADMIN_URL'])
def admin():
return index(admin=app.config['ADMIN_SECRET'])
@socketio.on('state update')
def update_state(message):
if ('admin_secret' not in message or
message['admin_secret'] != app.config['ADMIN_SECRET']):
return
if state['step'] == 0 and 'deadlines' in message:
state['step'] = 1
state['deadlines'] = message['deadlines']
emit('state change', state, broadcast=True)
@socketio.on('vote')
def vote(message):
if 'option' in message and message['option'] < len(state['options']):
state['options'][message['option']]['votes'] += 1
emit('state change', state, broadcast=True)
@socketio.on('new option')
def new_option(message):
if ('newoption' in message and
message['newoption'] not in [x['name'] for x in state['options']]):
state['options'].append({'name': message['newoption'], 'votes': 0})
emit('state change', state, broadcast=True)
app.run(debug=True, threaded=True)
| 845 | 0 | 110 |
893a15394e6fee261373c36aece687225e36317e | 1,956 | py | Python | src/anyvar/utils/formats.py | theferrit32/anyvar | 44c30a808155d1510590a9d72a1570307cad6a3a | [
"Apache-2.0"
] | 5 | 2020-09-28T14:25:17.000Z | 2022-02-10T21:12:30.000Z | src/anyvar/utils/formats.py | theferrit32/anyvar | 44c30a808155d1510590a9d72a1570307cad6a3a | [
"Apache-2.0"
] | 5 | 2020-09-02T15:31:32.000Z | 2021-12-01T05:30:53.000Z | src/anyvar/utils/formats.py | theferrit32/anyvar | 44c30a808155d1510590a9d72a1570307cad6a3a | [
"Apache-2.0"
] | 3 | 2020-09-02T15:30:04.000Z | 2021-05-20T21:08:55.000Z | import re
format_regexps = {
"hgvs": [
# just the accession and variant type
r"^[^:]+:[cgnopr]",
],
"spdi": [
# SequenceId:Position:DeletionLength:InsertedSequence
r"^[^:]+:\d+:(\d+|\w*):\w*"
],
"gnomad": [
# 1-55516888-G-GA
r"^\d+-\d+-\w*-\w*$",
],
"beacon": [
# 13 : 32936732 G > C
r"\d+\s*:\s*\d+\s*\w+\s*>\s*\w+",
],
"text": [
r"\w",
]
}
format_regexps = {
t: [re.compile(e) for e in exprs]
for t, exprs in format_regexps.items()}
def infer_plausible_formats(o):
"""Returns a *set* of plausible formats of the given variation
definition. Format inference is permissive: that is, all
well-formed variation of a particular syntax should be correctly
recognized, but some invalid variation may be incorrectly
recognized. This function will typically return a set with 0 or 1
item.
Recognized string formats:
* "hgvs": NM_000551.3:c.456A>T
* "spdi": e.g., Seq1:4:AT:CCC
* "beacon": e.g., 13 : 32936732 G > C
* "gnomad": 1-55516888-G-GA
If the input is a list, then the resulting set is the
*intersection* of this function applied to all members of the
list. A list of lists (i.e., a list of list of haplotypes that
forms a genotype) is supported. Because the intersection of
inferred types is returned, the data are expected to be
homogeneously typed. That is, this function is not intended to
handle cases of a haplotype defined by alleles in different
formats.
"""
if o is None:
return []
if isinstance(o, list):
return(set.intersection(infer_plausible_formats(elem) for elem in o))
if isinstance(o, str):
return set(t
for t, exprs in format_regexps.items()
if any(e.match(o) for e in exprs))
raise RuntimeError("Cannot infer format of a " + type(o))
| 27.942857 | 77 | 0.598671 | import re
format_regexps = {
"hgvs": [
# just the accession and variant type
r"^[^:]+:[cgnopr]",
],
"spdi": [
# SequenceId:Position:DeletionLength:InsertedSequence
r"^[^:]+:\d+:(\d+|\w*):\w*"
],
"gnomad": [
# 1-55516888-G-GA
r"^\d+-\d+-\w*-\w*$",
],
"beacon": [
# 13 : 32936732 G > C
r"\d+\s*:\s*\d+\s*\w+\s*>\s*\w+",
],
"text": [
r"\w",
]
}
format_regexps = {
t: [re.compile(e) for e in exprs]
for t, exprs in format_regexps.items()}
def infer_plausible_formats(o):
"""Returns a *set* of plausible formats of the given variation
definition. Format inference is permissive: that is, all
well-formed variation of a particular syntax should be correctly
recognized, but some invalid variation may be incorrectly
recognized. This function will typically return a set with 0 or 1
item.
Recognized string formats:
* "hgvs": NM_000551.3:c.456A>T
* "spdi": e.g., Seq1:4:AT:CCC
* "beacon": e.g., 13 : 32936732 G > C
* "gnomad": 1-55516888-G-GA
If the input is a list, then the resulting set is the
*intersection* of this function applied to all members of the
list. A list of lists (i.e., a list of list of haplotypes that
forms a genotype) is supported. Because the intersection of
inferred types is returned, the data are expected to be
homogeneously typed. That is, this function is not intended to
handle cases of a haplotype defined by alleles in different
formats.
"""
if o is None:
return []
if isinstance(o, list):
return(set.intersection(infer_plausible_formats(elem) for elem in o))
if isinstance(o, str):
return set(t
for t, exprs in format_regexps.items()
if any(e.match(o) for e in exprs))
raise RuntimeError("Cannot infer format of a " + type(o))
| 0 | 0 | 0 |
f0a58bb0b7b87f01c85d672d7ea6856595df2840 | 1,183 | py | Python | examples/comm-broadcast-gather/gather.py | dapatil211/Jacinle | 7638a46dc06223a1871d88f92aade644883f40a0 | [
"MIT"
] | 114 | 2018-01-25T04:44:07.000Z | 2022-03-09T14:33:42.000Z | third_party/Jacinle/examples/comm-broadcast-gather/gather.py | dair-iitd/1oML_workdir | 37117de4abf1774548786e9534c90977d67091d8 | [
"Apache-2.0"
] | 7 | 2018-05-08T17:02:24.000Z | 2022-02-09T23:44:06.000Z | third_party/Jacinle/examples/comm-broadcast-gather/gather.py | dair-iitd/1oML_workdir | 37117de4abf1774548786e9534c90977d67091d8 | [
"Apache-2.0"
] | 268 | 2018-04-08T10:54:35.000Z | 2022-03-01T07:10:02.000Z | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : gather.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 02/16/2018
#
# This file is part of Jacinle.
# Distributed under terms of the MIT license.
import time
import multiprocessing as mp
from jacinle.comm.gather import make_gather_pair
from jacinle.utils.meta import map_exec_method
if __name__ == '__main__':
main()
| 26.288889 | 96 | 0.640744 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : gather.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 02/16/2018
#
# This file is part of Jacinle.
# Distributed under terms of the MIT license.
import time
import multiprocessing as mp
from jacinle.comm.gather import make_gather_pair
from jacinle.utils.meta import map_exec_method
def mainloop_pull(pipe):
with pipe.activate():
while True:
msg = pipe.recv()
print('Received: worker_id=#{}, msg={}.'.format(msg['worker_id'], msg))
def mainloop_push(worker_id, pipe):
print('Initialized: worker_id=#{}.'.format(worker_id))
with pipe.activate():
while True:
msg = dict(text='Hello world!', time=time.strftime('%H:%M:%S'), worker_id=worker_id)
pipe.send(msg)
print('Sent: msg={}.'.format(msg))
time.sleep(1)
def main():
pull, pushs = make_gather_pair('jaincle-test', nr_workers=4, mode='ipc')
push_procs = [mp.Process(target=mainloop_push, args=(i, p)) for i, p in enumerate(pushs)]
map_exec_method('start', push_procs)
mainloop_pull(pull)
if __name__ == '__main__':
main()
| 706 | 0 | 69 |
d9e6de0f0279e1d093c59e51ba89d56cb207b7f2 | 12,247 | py | Python | scripts/study_case/ID_4/torch_geometric/nn/models/autoencoder.py | kzbnb/numerical_bugs | bc22e72bcc06df6ce7889a25e0aeed027bde910b | [
"Apache-2.0"
] | 8 | 2021-06-30T06:55:14.000Z | 2022-03-18T01:57:14.000Z | scripts/study_case/ID_4/torch_geometric/nn/models/autoencoder.py | kzbnb/numerical_bugs | bc22e72bcc06df6ce7889a25e0aeed027bde910b | [
"Apache-2.0"
] | 1 | 2021-06-30T03:08:15.000Z | 2021-06-30T03:08:15.000Z | scripts/study_case/ID_4/torch_geometric/nn/models/autoencoder.py | kzbnb/numerical_bugs | bc22e72bcc06df6ce7889a25e0aeed027bde910b | [
"Apache-2.0"
] | 2 | 2021-11-17T11:19:48.000Z | 2021-11-18T03:05:58.000Z | import math
import random
import torch
import numpy as np
from sklearn.metrics import roc_auc_score, average_precision_score
from scripts.study_case.ID_4.torch_geometric.utils import to_undirected
from ..inits import reset
EPS = 1e-15
MAX_LOGVAR = 10
class InnerProductDecoder(torch.nn.Module):
r"""The inner product decoder from the `"Variational Graph Auto-Encoders"
<https://arxiv.org/abs/1611.07308>`_ paper
.. math::
\sigma(\mathbf{Z}\mathbf{Z}^{\top})
where :math:`\mathbf{Z} \in \mathbb{R}^{N \times d}` denotes the latent
space produced by the encoder."""
def forward(self, z, edge_index, sigmoid=True):
r"""Decodes the latent variables :obj:`z` into edge probabilties for
the given node-pairs :obj:`edge_index`.
Args:
z (Tensor): The latent space :math:`\mathbf{Z}`.
sigmoid (bool, optional): If set to :obj:`False`, does not apply
the logistic sigmoid function to the output.
(default: :obj:`True`)
"""
value = (z[edge_index[0]] * z[edge_index[1]]).sum(dim=1)
return torch.sigmoid(value) if sigmoid else value
def forward_all(self, z, sigmoid=True):
r"""Decodes the latent variables :obj:`z` into a probabilistic dense
adjacency matrix.
Args:
z (Tensor): The latent space :math:`\mathbf{Z}`.
sigmoid (bool, optional): If set to :obj:`False`, does not apply
the logistic sigmoid function to the output.
(default: :obj:`True`)
"""
adj = torch.matmul(z, z.t())
return torch.sigmoid(adj) if sigmoid else adj
class GAE(torch.nn.Module):
r"""The Graph Auto-Encoder model from the
`"Variational Graph Auto-Encoders" <https://arxiv.org/abs/1611.07308>`_
paper based on user-defined encoder and decoder models.
Args:
encoder (Module): The encoder module.
decoder (Module, optional): The decoder module. If set to :obj:`None`,
will default to the
:class:`torch_geometric.nn.models.InnerProductDecoder`.
(default: :obj:`None`)
"""
def encode(self, *args, **kwargs):
r"""Runs the encoder and computes node-wise latent variables."""
return self.encoder(*args, **kwargs)
def decode(self, *args, **kwargs):
r"""Runs the decoder and computes edge probabilties."""
return self.decoder(*args, **kwargs)
def split_edges(self, data, val_ratio=0.05, test_ratio=0.1):
r"""Splits the edges of a :obj:`torch_geometric.data.Data` object
into positve and negative train/val/test edges.
Args:
data (Data): The data object.
val_ratio (float, optional): The ratio of positive validation
edges. (default: :obj:`0.05`)
test_ratio (float, optional): The ratio of positive test
edges. (default: :obj:`0.1`)
"""
assert 'batch' not in data # No batch-mode.
row, col = data.edge_index
data.edge_index = None
# Return upper triangular portion.
mask = row < col
row, col = row[mask], col[mask]
n_v = int(math.floor(val_ratio * row.size(0)))
n_t = int(math.floor(test_ratio * row.size(0)))
# Positive edges.
perm = torch.randperm(row.size(0))
row, col = row[perm], col[perm]
r, c = row[:n_v], col[:n_v]
data.val_pos_edge_index = torch.stack([r, c], dim=0)
r, c = row[n_v:n_v + n_t], col[n_v:n_v + n_t]
data.test_pos_edge_index = torch.stack([r, c], dim=0)
r, c = row[n_v + n_t:], col[n_v + n_t:]
data.train_pos_edge_index = torch.stack([r, c], dim=0)
data.train_pos_edge_index = to_undirected(data.train_pos_edge_index)
# Negative edges.
num_nodes = data.num_nodes
neg_adj_mask = torch.ones(num_nodes, num_nodes, dtype=torch.uint8)
neg_adj_mask = neg_adj_mask.triu(diagonal=1)
neg_adj_mask[row, col] = 0
neg_row, neg_col = neg_adj_mask.nonzero().t()
perm = random.sample(
range(neg_row.size(0)), min(n_v + n_t, neg_row.size(0)))
perm = torch.tensor(perm)
perm = perm.to(torch.long)
neg_row, neg_col = neg_row[perm], neg_col[perm]
neg_adj_mask[neg_row, neg_col] = 0
data.train_neg_adj_mask = neg_adj_mask
row, col = neg_row[:n_v], neg_col[:n_v]
data.val_neg_edge_index = torch.stack([row, col], dim=0)
row, col = neg_row[n_v:n_v + n_t], neg_col[n_v:n_v + n_t]
data.test_neg_edge_index = torch.stack([row, col], dim=0)
return data
def recon_loss(self, z, pos_edge_index):
r"""Given latent variables :obj:`z`, computes the binary cross
entropy loss for positive edges :obj:`pos_edge_index` and negative
sampled edges.
Args:
z (Tensor): The latent space :math:`\mathbf{Z}`.
pos_edge_index (LongTensor): The positive edges to train against.
"""
pos_loss = -torch.log(
self.decoder(z, pos_edge_index, sigmoid=True) + EPS).mean()
neg_edge_index = negative_sampling(pos_edge_index, z.size(0))
neg_loss = -torch.log(
1 - self.decoder(z, neg_edge_index, sigmoid=True) + EPS).mean()
return pos_loss + neg_loss
def test(self, z, pos_edge_index, neg_edge_index):
r"""Given latent variables :obj:`z`, positive edges
:obj:`pos_edge_index` and negative edges :obj:`neg_edge_index`,
computes area under the ROC curve (AUC) and average precision (AP)
scores.
Args:
z (Tensor): The latent space :math:`\mathbf{Z}`.
pos_edge_index (LongTensor): The positive edges to evaluate
against.
neg_edge_index (LongTensor): The negative edges to evaluate
against.
"""
pos_y = z.new_ones(pos_edge_index.size(1))
neg_y = z.new_zeros(neg_edge_index.size(1))
y = torch.cat([pos_y, neg_y], dim=0)
pos_pred = self.decoder(z, pos_edge_index, sigmoid=True)
neg_pred = self.decoder(z, neg_edge_index, sigmoid=True)
pred = torch.cat([pos_pred, neg_pred], dim=0)
y, pred = y.detach().cpu().numpy(), pred.detach().cpu().numpy()
return roc_auc_score(y, pred), average_precision_score(y, pred)
class VGAE(GAE):
r"""The Variational Graph Auto-Encoder model from the
`"Variational Graph Auto-Encoders" <https://arxiv.org/abs/1611.07308>`_
paper.
Args:
encoder (Module): The encoder module to compute :math:`\mu` and
:math:`\log\sigma^2`.
decoder (Module, optional): The decoder module. If set to :obj:`None`,
will default to the
:class:`torch_geometric.nn.models.InnerProductDecoder`.
(default: :obj:`None`)
"""
def encode(self, *args, **kwargs):
""""""
self.__mu__, self.__logvar__ = self.encoder(*args, **kwargs)
# self.__logvar__ = self.__logvar__.clamp(max=MAX_LOGVAR)
z = self.reparametrize(self.__mu__, self.__logvar__)
return z
def kl_loss(self, mu=None, logvar=None):
r"""Computes the KL loss, either for the passed arguments :obj:`mu`
and :obj:`logvar`, or based on latent variables from last encoding.
Args:
mu (Tensor, optional): The latent space for :math:`\mu`. If set to
:obj:`None`, uses the last computation of :math:`mu`.
(default: :obj:`None`)
logvar (Tensor, optional): The latent space for
:math:`\log\sigma^2`. If set to :obj:`None`, uses the last
computation of :math:`\log\sigma^2`.(default: :obj:`None`)
"""
mu = self.__mu__ if mu is None else mu
# logvar = self.__logvar__ if logvar is None else logvar.clamp(
# max=MAX_LOGVAR)
logvar = self.__logvar__
return -0.5 * torch.mean(
torch.sum(1 + logvar - mu**2 - logvar.log(), dim=1))
class ARGA(GAE):
r"""The Adversarially Regularized Graph Auto-Encoder model from the
`"Adversarially Regularized Graph Autoencoder for Graph Embedding"
<https://arxiv.org/abs/1802.04407>`_ paper.
paper.
Args:
encoder (Module): The encoder module.
discriminator (Module): The discriminator module.
decoder (Module, optional): The decoder module. If set to :obj:`None`,
will default to the
:class:`torch_geometric.nn.models.InnerProductDecoder`.
(default: :obj:`None`)
"""
def reg_loss(self, z):
r"""Computes the regularization loss of the encoder.
Args:
z (Tensor): The latent space :math:`\mathbf{Z}`.
"""
real = torch.sigmoid(self.discriminator(z))
real_loss = -torch.log(real + EPS).mean()
return real_loss
def discriminator_loss(self, z):
r"""Computes the loss of the discriminator.
Args:
z (Tensor): The latent space :math:`\mathbf{Z}`.
"""
real = torch.sigmoid(self.discriminator(torch.randn_like(z)))
fake = torch.sigmoid(self.discriminator(z.detach()))
real_loss = -torch.log(real + EPS).mean()
fake_loss = -torch.log(1 - fake + EPS).mean()
return real_loss + fake_loss
class ARGVA(ARGA):
r"""The Adversarially Regularized Variational Graph Auto-Encoder model from
the `"Adversarially Regularized Graph Autoencoder for Graph Embedding"
<https://arxiv.org/abs/1802.04407>`_ paper.
paper.
Args:
encoder (Module): The encoder module to compute :math:`\mu` and
:math:`\log\sigma^2`.
discriminator (Module): The discriminator module.
decoder (Module, optional): The decoder module. If set to :obj:`None`,
will default to the
:class:`torch_geometric.nn.models.InnerProductDecoder`.
(default: :obj:`None`)
"""
@property
@property
def encode(self, *args, **kwargs):
""""""
return self.VGAE.encode(*args, **kwargs)
| 35.914956 | 79 | 0.614191 | import math
import random
import torch
import numpy as np
from sklearn.metrics import roc_auc_score, average_precision_score
from scripts.study_case.ID_4.torch_geometric.utils import to_undirected
from ..inits import reset
EPS = 1e-15
MAX_LOGVAR = 10
def negative_sampling(pos_edge_index, num_nodes):
idx = (pos_edge_index[0] * num_nodes + pos_edge_index[1])
idx = idx.to(torch.device('cpu'))
rng = range(num_nodes**2)
perm = torch.tensor(random.sample(rng, idx.size(0)))
mask = torch.from_numpy(np.isin(perm, idx).astype(np.uint8))
rest = mask.nonzero().view(-1)
while rest.numel() > 0: # pragma: no cover
tmp = torch.tensor(random.sample(rng, rest.size(0)))
mask = torch.from_numpy(np.isin(tmp, idx).astype(np.uint8))
perm[rest] = tmp
rest = mask.nonzero().view(-1)
row, col = perm / num_nodes, perm % num_nodes
return torch.stack([row, col], dim=0).to(pos_edge_index.device)
class InnerProductDecoder(torch.nn.Module):
r"""The inner product decoder from the `"Variational Graph Auto-Encoders"
<https://arxiv.org/abs/1611.07308>`_ paper
.. math::
\sigma(\mathbf{Z}\mathbf{Z}^{\top})
where :math:`\mathbf{Z} \in \mathbb{R}^{N \times d}` denotes the latent
space produced by the encoder."""
def forward(self, z, edge_index, sigmoid=True):
r"""Decodes the latent variables :obj:`z` into edge probabilties for
the given node-pairs :obj:`edge_index`.
Args:
z (Tensor): The latent space :math:`\mathbf{Z}`.
sigmoid (bool, optional): If set to :obj:`False`, does not apply
the logistic sigmoid function to the output.
(default: :obj:`True`)
"""
value = (z[edge_index[0]] * z[edge_index[1]]).sum(dim=1)
return torch.sigmoid(value) if sigmoid else value
def forward_all(self, z, sigmoid=True):
r"""Decodes the latent variables :obj:`z` into a probabilistic dense
adjacency matrix.
Args:
z (Tensor): The latent space :math:`\mathbf{Z}`.
sigmoid (bool, optional): If set to :obj:`False`, does not apply
the logistic sigmoid function to the output.
(default: :obj:`True`)
"""
adj = torch.matmul(z, z.t())
return torch.sigmoid(adj) if sigmoid else adj
class GAE(torch.nn.Module):
r"""The Graph Auto-Encoder model from the
`"Variational Graph Auto-Encoders" <https://arxiv.org/abs/1611.07308>`_
paper based on user-defined encoder and decoder models.
Args:
encoder (Module): The encoder module.
decoder (Module, optional): The decoder module. If set to :obj:`None`,
will default to the
:class:`torch_geometric.nn.models.InnerProductDecoder`.
(default: :obj:`None`)
"""
def __init__(self, encoder, decoder=None):
super(GAE, self).__init__()
self.encoder = encoder
self.decoder = InnerProductDecoder() if decoder is None else decoder
GAE.reset_parameters(self)
def reset_parameters(self):
reset(self.encoder)
reset(self.decoder)
def encode(self, *args, **kwargs):
r"""Runs the encoder and computes node-wise latent variables."""
return self.encoder(*args, **kwargs)
def decode(self, *args, **kwargs):
r"""Runs the decoder and computes edge probabilties."""
return self.decoder(*args, **kwargs)
def split_edges(self, data, val_ratio=0.05, test_ratio=0.1):
r"""Splits the edges of a :obj:`torch_geometric.data.Data` object
into positve and negative train/val/test edges.
Args:
data (Data): The data object.
val_ratio (float, optional): The ratio of positive validation
edges. (default: :obj:`0.05`)
test_ratio (float, optional): The ratio of positive test
edges. (default: :obj:`0.1`)
"""
assert 'batch' not in data # No batch-mode.
row, col = data.edge_index
data.edge_index = None
# Return upper triangular portion.
mask = row < col
row, col = row[mask], col[mask]
n_v = int(math.floor(val_ratio * row.size(0)))
n_t = int(math.floor(test_ratio * row.size(0)))
# Positive edges.
perm = torch.randperm(row.size(0))
row, col = row[perm], col[perm]
r, c = row[:n_v], col[:n_v]
data.val_pos_edge_index = torch.stack([r, c], dim=0)
r, c = row[n_v:n_v + n_t], col[n_v:n_v + n_t]
data.test_pos_edge_index = torch.stack([r, c], dim=0)
r, c = row[n_v + n_t:], col[n_v + n_t:]
data.train_pos_edge_index = torch.stack([r, c], dim=0)
data.train_pos_edge_index = to_undirected(data.train_pos_edge_index)
# Negative edges.
num_nodes = data.num_nodes
neg_adj_mask = torch.ones(num_nodes, num_nodes, dtype=torch.uint8)
neg_adj_mask = neg_adj_mask.triu(diagonal=1)
neg_adj_mask[row, col] = 0
neg_row, neg_col = neg_adj_mask.nonzero().t()
perm = random.sample(
range(neg_row.size(0)), min(n_v + n_t, neg_row.size(0)))
perm = torch.tensor(perm)
perm = perm.to(torch.long)
neg_row, neg_col = neg_row[perm], neg_col[perm]
neg_adj_mask[neg_row, neg_col] = 0
data.train_neg_adj_mask = neg_adj_mask
row, col = neg_row[:n_v], neg_col[:n_v]
data.val_neg_edge_index = torch.stack([row, col], dim=0)
row, col = neg_row[n_v:n_v + n_t], neg_col[n_v:n_v + n_t]
data.test_neg_edge_index = torch.stack([row, col], dim=0)
return data
def recon_loss(self, z, pos_edge_index):
r"""Given latent variables :obj:`z`, computes the binary cross
entropy loss for positive edges :obj:`pos_edge_index` and negative
sampled edges.
Args:
z (Tensor): The latent space :math:`\mathbf{Z}`.
pos_edge_index (LongTensor): The positive edges to train against.
"""
pos_loss = -torch.log(
self.decoder(z, pos_edge_index, sigmoid=True) + EPS).mean()
neg_edge_index = negative_sampling(pos_edge_index, z.size(0))
neg_loss = -torch.log(
1 - self.decoder(z, neg_edge_index, sigmoid=True) + EPS).mean()
return pos_loss + neg_loss
def test(self, z, pos_edge_index, neg_edge_index):
r"""Given latent variables :obj:`z`, positive edges
:obj:`pos_edge_index` and negative edges :obj:`neg_edge_index`,
computes area under the ROC curve (AUC) and average precision (AP)
scores.
Args:
z (Tensor): The latent space :math:`\mathbf{Z}`.
pos_edge_index (LongTensor): The positive edges to evaluate
against.
neg_edge_index (LongTensor): The negative edges to evaluate
against.
"""
pos_y = z.new_ones(pos_edge_index.size(1))
neg_y = z.new_zeros(neg_edge_index.size(1))
y = torch.cat([pos_y, neg_y], dim=0)
pos_pred = self.decoder(z, pos_edge_index, sigmoid=True)
neg_pred = self.decoder(z, neg_edge_index, sigmoid=True)
pred = torch.cat([pos_pred, neg_pred], dim=0)
y, pred = y.detach().cpu().numpy(), pred.detach().cpu().numpy()
return roc_auc_score(y, pred), average_precision_score(y, pred)
class VGAE(GAE):
r"""The Variational Graph Auto-Encoder model from the
`"Variational Graph Auto-Encoders" <https://arxiv.org/abs/1611.07308>`_
paper.
Args:
encoder (Module): The encoder module to compute :math:`\mu` and
:math:`\log\sigma^2`.
decoder (Module, optional): The decoder module. If set to :obj:`None`,
will default to the
:class:`torch_geometric.nn.models.InnerProductDecoder`.
(default: :obj:`None`)
"""
def __init__(self, encoder, decoder=None):
super(VGAE, self).__init__(encoder, decoder)
def reparametrize(self, mu, logvar):
if self.training:
return mu + torch.randn_like(logvar) * torch.exp(logvar)
else:
return mu
def encode(self, *args, **kwargs):
""""""
self.__mu__, self.__logvar__ = self.encoder(*args, **kwargs)
# self.__logvar__ = self.__logvar__.clamp(max=MAX_LOGVAR)
z = self.reparametrize(self.__mu__, self.__logvar__)
return z
def kl_loss(self, mu=None, logvar=None):
r"""Computes the KL loss, either for the passed arguments :obj:`mu`
and :obj:`logvar`, or based on latent variables from last encoding.
Args:
mu (Tensor, optional): The latent space for :math:`\mu`. If set to
:obj:`None`, uses the last computation of :math:`mu`.
(default: :obj:`None`)
logvar (Tensor, optional): The latent space for
:math:`\log\sigma^2`. If set to :obj:`None`, uses the last
computation of :math:`\log\sigma^2`.(default: :obj:`None`)
"""
mu = self.__mu__ if mu is None else mu
# logvar = self.__logvar__ if logvar is None else logvar.clamp(
# max=MAX_LOGVAR)
logvar = self.__logvar__
return -0.5 * torch.mean(
torch.sum(1 + logvar - mu**2 - logvar.log(), dim=1))
class ARGA(GAE):
r"""The Adversarially Regularized Graph Auto-Encoder model from the
`"Adversarially Regularized Graph Autoencoder for Graph Embedding"
<https://arxiv.org/abs/1802.04407>`_ paper.
paper.
Args:
encoder (Module): The encoder module.
discriminator (Module): The discriminator module.
decoder (Module, optional): The decoder module. If set to :obj:`None`,
will default to the
:class:`torch_geometric.nn.models.InnerProductDecoder`.
(default: :obj:`None`)
"""
def __init__(self, encoder, discriminator, decoder=None):
self.discriminator = discriminator
super(ARGA, self).__init__(encoder, decoder)
reset(self.discriminator)
def reset_parameters(self):
super(ARGA, self).reset_parameters()
reset(self.discriminator)
def reg_loss(self, z):
r"""Computes the regularization loss of the encoder.
Args:
z (Tensor): The latent space :math:`\mathbf{Z}`.
"""
real = torch.sigmoid(self.discriminator(z))
real_loss = -torch.log(real + EPS).mean()
return real_loss
def discriminator_loss(self, z):
r"""Computes the loss of the discriminator.
Args:
z (Tensor): The latent space :math:`\mathbf{Z}`.
"""
real = torch.sigmoid(self.discriminator(torch.randn_like(z)))
fake = torch.sigmoid(self.discriminator(z.detach()))
real_loss = -torch.log(real + EPS).mean()
fake_loss = -torch.log(1 - fake + EPS).mean()
return real_loss + fake_loss
class ARGVA(ARGA):
r"""The Adversarially Regularized Variational Graph Auto-Encoder model from
the `"Adversarially Regularized Graph Autoencoder for Graph Embedding"
<https://arxiv.org/abs/1802.04407>`_ paper.
paper.
Args:
encoder (Module): The encoder module to compute :math:`\mu` and
:math:`\log\sigma^2`.
discriminator (Module): The discriminator module.
decoder (Module, optional): The decoder module. If set to :obj:`None`,
will default to the
:class:`torch_geometric.nn.models.InnerProductDecoder`.
(default: :obj:`None`)
"""
def __init__(self, encoder, discriminator, decoder=None):
super(ARGVA, self).__init__(encoder, discriminator, decoder)
self.VGAE = VGAE(encoder, decoder)
@property
def __mu__(self):
return self.VGAE.__mu__
@property
def __logvar__(self):
return self.VGAE.__logvar__
def reparametrize(self, mu, logvar):
return self.VGAE.reparametrize(mu, logvar)
def encode(self, *args, **kwargs):
""""""
return self.VGAE.encode(*args, **kwargs)
def kl_loss(self, mu=None, logvar=None):
return self.VGAE.kl_loss(mu, logvar)
| 1,751 | 0 | 318 |
439124cfc7c2672a2b3d34d8a8a39bceb022736b | 2,731 | py | Python | module1-introduction-to-sql/rpg_queries.py | rsskga/DS-Unit-3-Sprint-2-SQL-and-Databases | f4ad78d9bc7ac8296f951959c05b8a963cdc0e3b | [
"MIT"
] | null | null | null | module1-introduction-to-sql/rpg_queries.py | rsskga/DS-Unit-3-Sprint-2-SQL-and-Databases | f4ad78d9bc7ac8296f951959c05b8a963cdc0e3b | [
"MIT"
] | null | null | null | module1-introduction-to-sql/rpg_queries.py | rsskga/DS-Unit-3-Sprint-2-SQL-and-Databases | f4ad78d9bc7ac8296f951959c05b8a963cdc0e3b | [
"MIT"
] | null | null | null | # pylint: disable=invalid-name
"""Lambda module for learning sqlite3"""
import os
import sqlite3
###############################################################################
print(f"\n\n" + "#" * 79)
print(f"Assignment - Part 1, Querying a Database")
print(f"#" * 79 + "\n")
###############################################################################
path = os.path.join("file:",
os.path.abspath("."),
"module1-introduction-to-sql/",
"rpg_db.sqlite3")
db = sqlite3.connect(path)
c = db.cursor()
query1 = "SELECT COUNT() FROM charactercreator_character"
chars = c.execute(query1).fetchone()[0]
print(f"There are {chars} total characters.")
print(f"Subclasses:")
query2 = "SELECT COUNT() FROM charactercreator_cleric"
clerics = c.execute(query2).fetchone()[0]
print(f" Cleric: {clerics}")
query3 = "SELECT COUNT() FROM charactercreator_fighter"
fighters = c.execute(query3).fetchone()[0]
print(f" Fighter: {fighters}")
query4 = "SELECT COUNT() FROM charactercreator_thief"
thieves = c.execute(query4).fetchone()[0]
print(f" Thief: {thieves}")
query5 = "SELECT COUNT() FROM charactercreator_necromancer"
necros = c.execute(query5).fetchone()[0]
print(f" Necromancer: {necros}")
query6 = "SELECT COUNT() FROM charactercreator_mage"
magi = c.execute(query6).fetchone()[0] - necros
print(f" Mage: {magi}")
query7 = "SELECT COUNT() FROM armory_item"
items = c.execute(query7).fetchone()[0]
print(f"There are {items} total items.")
query8 = "SELECT COUNT() FROM armory_weapon"
weapons = c.execute(query8).fetchone()[0]
print(f"{weapons} items are weapons.")
non_weapons = items - weapons
print(f"{non_weapons} items are non-weapons.")
characters = range(1, 21)
total_items = 0
total_weapons = 0
for character in characters:
query1 = "SELECT COUNT() " \
"FROM charactercreator_character_inventory " \
"WHERE character_id = " + str(character)
items = c.execute(query1).fetchone()[0]
total_items += items
print(f"Character {character} has {items} items.")
query2 = "SELECT character_id " \
"FROM charactercreator_character_inventory c " \
"WHERE character_id = " + str(character) + " AND " \
"EXISTS(SELECT item_ptr_id " \
"FROM armory_weapon " \
"WHERE item_ptr_id = c.item_id)"
weapons = len(c.execute(query2).fetchall())
total_weapons += weapons
print(f"Character {character} has {weapons} weapons.")
ave_items = total_items / 20
ave_weapons = total_weapons / 20
print(f"On average, characters 1-20 have {ave_items} items.")
print(f"On average, characters 1-20 have {ave_weapons} weapons.")
# save and close
db.close()
| 32.903614 | 79 | 0.628341 | # pylint: disable=invalid-name
"""Lambda module for learning sqlite3"""
import os
import sqlite3
###############################################################################
print(f"\n\n" + "#" * 79)
print(f"Assignment - Part 1, Querying a Database")
print(f"#" * 79 + "\n")
###############################################################################
path = os.path.join("file:",
os.path.abspath("."),
"module1-introduction-to-sql/",
"rpg_db.sqlite3")
db = sqlite3.connect(path)
c = db.cursor()
query1 = "SELECT COUNT() FROM charactercreator_character"
chars = c.execute(query1).fetchone()[0]
print(f"There are {chars} total characters.")
print(f"Subclasses:")
query2 = "SELECT COUNT() FROM charactercreator_cleric"
clerics = c.execute(query2).fetchone()[0]
print(f" Cleric: {clerics}")
query3 = "SELECT COUNT() FROM charactercreator_fighter"
fighters = c.execute(query3).fetchone()[0]
print(f" Fighter: {fighters}")
query4 = "SELECT COUNT() FROM charactercreator_thief"
thieves = c.execute(query4).fetchone()[0]
print(f" Thief: {thieves}")
query5 = "SELECT COUNT() FROM charactercreator_necromancer"
necros = c.execute(query5).fetchone()[0]
print(f" Necromancer: {necros}")
query6 = "SELECT COUNT() FROM charactercreator_mage"
magi = c.execute(query6).fetchone()[0] - necros
print(f" Mage: {magi}")
query7 = "SELECT COUNT() FROM armory_item"
items = c.execute(query7).fetchone()[0]
print(f"There are {items} total items.")
query8 = "SELECT COUNT() FROM armory_weapon"
weapons = c.execute(query8).fetchone()[0]
print(f"{weapons} items are weapons.")
non_weapons = items - weapons
print(f"{non_weapons} items are non-weapons.")
characters = range(1, 21)
total_items = 0
total_weapons = 0
for character in characters:
query1 = "SELECT COUNT() " \
"FROM charactercreator_character_inventory " \
"WHERE character_id = " + str(character)
items = c.execute(query1).fetchone()[0]
total_items += items
print(f"Character {character} has {items} items.")
query2 = "SELECT character_id " \
"FROM charactercreator_character_inventory c " \
"WHERE character_id = " + str(character) + " AND " \
"EXISTS(SELECT item_ptr_id " \
"FROM armory_weapon " \
"WHERE item_ptr_id = c.item_id)"
weapons = len(c.execute(query2).fetchall())
total_weapons += weapons
print(f"Character {character} has {weapons} weapons.")
ave_items = total_items / 20
ave_weapons = total_weapons / 20
print(f"On average, characters 1-20 have {ave_items} items.")
print(f"On average, characters 1-20 have {ave_weapons} weapons.")
# save and close
db.close()
| 0 | 0 | 0 |
1582d64083623b0b15243db5fce85912d6b67556 | 974 | py | Python | views.py | Nithin/django-event-app | 09a11fd607e0654e08f6c33b2c49211eee4db5bc | [
"MIT"
] | 1 | 2016-05-09T07:30:40.000Z | 2016-05-09T07:30:40.000Z | views.py | Nithin/django-event-app | 09a11fd607e0654e08f6c33b2c49211eee4db5bc | [
"MIT"
] | null | null | null | views.py | Nithin/django-event-app | 09a11fd607e0654e08f6c33b2c49211eee4db5bc | [
"MIT"
] | null | null | null | from django.http import HttpResponse
from django.template import RequestContext
from django.shortcuts import render_to_response, get_object_or_404
from eventapp.models import Event, Category
| 36.074074 | 103 | 0.735113 | from django.http import HttpResponse
from django.template import RequestContext
from django.shortcuts import render_to_response, get_object_or_404
from eventapp.models import Event, Category
def upcoming_events(request):
return render_to_response('cal_base.html', { }, context_instance=RequestContext(request))
def upcoming_events_by_category(request, slug):
#return "AAA"
print slug
return HttpResponse('')
def view_event(request, year, month, day, slug=None):
context = {}
event_list = []
events = Event.objects.filter(start__year=year,start__month=month)
for event in events:
if int(day) in range(int(event.start.day),int(event.start.day+(event.end-event.start).days),1):
event_list.append(event)
context['events'] = event_list
context['year'] = year
context['month'] = month
context['day'] = day
return render_to_response('display_events.html',context,context_instance=RequestContext(request))
| 713 | 0 | 69 |
ed48939fbec6c881ba34620cf55b6fb7622b6353 | 120 | py | Python | mimid/__init__.py | konradhalas/mimid | fca4b61c5ee9e5bbb2e60f9a3fcc3593a30333d2 | [
"MIT"
] | 11 | 2019-06-12T19:33:13.000Z | 2021-07-12T01:20:55.000Z | mimid/__init__.py | konradhalas/mimid | fca4b61c5ee9e5bbb2e60f9a3fcc3593a30333d2 | [
"MIT"
] | 1 | 2021-08-29T15:27:17.000Z | 2021-08-29T15:27:17.000Z | mimid/__init__.py | konradhalas/mimid | fca4b61c5ee9e5bbb2e60f9a3fcc3593a30333d2 | [
"MIT"
] | null | null | null | from mimid.api import mock, every, verify, slot, prop
from mimid.matchers.value import *
from mimid.exceptions import *
| 30 | 53 | 0.783333 | from mimid.api import mock, every, verify, slot, prop
from mimid.matchers.value import *
from mimid.exceptions import *
| 0 | 0 | 0 |
a8f1c2f92a2a87a59db49f43ef8386117f292c81 | 5,706 | py | Python | line_fit_video.py | WalesPeng/CarND-Advanced-Lane-Lines-P4 | 045caf4f8d47300533861301dd4d168b7d50926c | [
"MIT"
] | null | null | null | line_fit_video.py | WalesPeng/CarND-Advanced-Lane-Lines-P4 | 045caf4f8d47300533861301dd4d168b7d50926c | [
"MIT"
] | null | null | null | line_fit_video.py | WalesPeng/CarND-Advanced-Lane-Lines-P4 | 045caf4f8d47300533861301dd4d168b7d50926c | [
"MIT"
] | null | null | null | import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pickle
from combined_thresh import combined_thresh
from perspective_transform import perspective_transform
from Line import Line
from line_fit import line_fit, tune_fit, final_viz, calc_curve, calc_vehicle_offset, viz2
from moviepy.editor import VideoFileClip
# Global variables (just to make the moviepy video annotation work)
with open('calibrate_camera.p', 'rb') as f:
save_dict = pickle.load(f)
mtx = save_dict['mtx']
dist = save_dict['dist']
window_size = 5 # how many frames for line smoothing
left_line = Line(n=window_size)
right_line = Line(n=window_size)
detected = False # did the fast line fit detect the lines?
left_curve, right_curve = 0., 0. # radius of curvature for left and right lanes
left_lane_inds, right_lane_inds = None, None # for calculating curvature
frameCount = 0
retLast = {}
# MoviePy video annotation will call this function
def annotate_image(img_in):
"""
Annotate the input image with lane line markings
Returns annotated image
"""
global mtx, dist, left_line, right_line, detected, frameCount, retLast
global left_curve, right_curve, left_lane_inds, right_lane_inds
frameCount += 1
src = np.float32(
[[200, 720],
[1100, 720],
[520, 500],
[760, 500]])
x = [src[0, 0], src[1, 0], src[3, 0], src[2, 0], src[0, 0]]
y = [src[0, 1], src[1, 1], src[3, 1], src[2, 1], src[0, 1]]
# Undistort, threshold, perspective transform
undist = cv2.undistort(img_in, mtx, dist, None, mtx)
img, abs_bin, mag_bin, dir_bin, hls_bin = combined_thresh(undist)
binary_warped, binary_unwarped, m, m_inv = perspective_transform(img)
# Perform polynomial fit
if not detected:
# Slow line fit
ret = line_fit(binary_warped)
# if detect no lanes, use last result instead.
if len(ret) == 0:
ret = retLast
left_fit = ret['left_fit']
right_fit = ret['right_fit']
nonzerox = ret['nonzerox']
nonzeroy = ret['nonzeroy']
out_img = ret['out_img']
left_lane_inds = ret['left_lane_inds']
right_lane_inds = ret['right_lane_inds']
histogram = ret['histo']
# Get moving average of line fit coefficients
left_fit = left_line.add_fit(left_fit)
right_fit = right_line.add_fit(right_fit)
# Calculate curvature
left_curve, right_curve = calc_curve(left_lane_inds, right_lane_inds, nonzerox, nonzeroy)
detected = True # slow line fit always detects the line
else: # implies detected == True
# Fast line fit
left_fit = left_line.get_fit()
right_fit = right_line.get_fit()
ret = tune_fit(binary_warped, left_fit, right_fit)
left_fit = ret['left_fit']
right_fit = ret['right_fit']
nonzerox = ret['nonzerox']
nonzeroy = ret['nonzeroy']
left_lane_inds = ret['left_lane_inds']
right_lane_inds = ret['right_lane_inds']
# Only make updates if we detected lines in current frame
if ret is not None:
left_fit = ret['left_fit']
right_fit = ret['right_fit']
nonzerox = ret['nonzerox']
nonzeroy = ret['nonzeroy']
left_lane_inds = ret['left_lane_inds']
right_lane_inds = ret['right_lane_inds']
left_fit = left_line.add_fit(left_fit)
right_fit = right_line.add_fit(right_fit)
left_curve, right_curve = calc_curve(left_lane_inds, right_lane_inds, nonzerox, nonzeroy)
else:
detected = False
vehicle_offset = calc_vehicle_offset(undist, left_fit, right_fit)
# Perform final visualization on top of original undistorted image
result = final_viz(undist, left_fit, right_fit, m_inv, left_curve, right_curve, vehicle_offset)
retLast = ret
save_viz2 = './output_images/polyfit_test%d.jpg' % (frameCount)
viz2(binary_warped, ret, save_viz2)
save_warped = './output_images/warped_test%d.jpg' % (frameCount)
plt.imshow(binary_warped, cmap='gray', vmin=0, vmax=1)
if save_warped is None:
plt.show()
else:
plt.savefig(save_warped)
plt.gcf().clear()
save_binary = './output_images/binary_test%d.jpg' % (frameCount)
plt.imshow(img, cmap='gray', vmin=0, vmax=1)
if save_binary is None:
plt.show()
else:
plt.savefig(save_binary)
plt.gcf().clear()
if frameCount > 0:
fig = plt.gcf()
fig.set_size_inches(16.5, 8.5)
plt.subplot(2, 3, 1)
plt.imshow(undist)
# plt.plot(undist)
plt.plot(x, y)
plt.title('undist')
plt.subplot(2, 3, 2)
plt.imshow(hls_bin, cmap='gray', vmin=0, vmax=1)
plt.title('hls_bin')
plt.subplot(2, 3, 3)
plt.imshow(abs_bin, cmap='gray', vmin=0, vmax=1)
plt.title('abs_bin')
plt.subplot(2, 3, 4)
plt.imshow(img, cmap='gray', vmin=0, vmax=1)
plt.title('img')
plt.subplot(2, 3, 5)
plt.imshow(out_img)
plt.title('out_img')
plt.subplot(2, 3, 6)
plt.imshow(result, cmap='gray', vmin=0, vmax=1)
plt.title('result')
save_result = 'D:/code/github_code/CarND-Advanced-Lane-Lines-P4/output_images/result-test%d.jpg' % (frameCount)
if save_result is None:
plt.show()
else:
plt.savefig(save_result)
plt.gcf().clear()
return result
def annotate_video(input_file, output_file):
""" Given input_file video, save annotated video to output_file """
video = VideoFileClip(input_file)
annotated_video = video.fl_image(annotate_image)
annotated_video.write_videofile(output_file, audio=False)
if __name__ == '__main__':
# Annotate the video
# annotate_video('challenge_video.mp4', 'challenge_video_out.mp4')
# Show example annotated image on screen for sanity check
for i in range (1, 7):
img_file = 'test_images/test%d.jpg' % (i)
img = mpimg.imread(img_file)
result = annotate_image(img)
plt.imshow(result)
save_file = 'D:/code/github_code/CarND-Advanced-Lane-Lines-P4/output_images/test%d.jpg' % (i)
if save_file is None:
plt.show()
else:
plt.savefig(save_file)
plt.gcf().clear()
| 29.874346 | 113 | 0.720995 | import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pickle
from combined_thresh import combined_thresh
from perspective_transform import perspective_transform
from Line import Line
from line_fit import line_fit, tune_fit, final_viz, calc_curve, calc_vehicle_offset, viz2
from moviepy.editor import VideoFileClip
# Global variables (just to make the moviepy video annotation work)
with open('calibrate_camera.p', 'rb') as f:
save_dict = pickle.load(f)
mtx = save_dict['mtx']
dist = save_dict['dist']
window_size = 5 # how many frames for line smoothing
left_line = Line(n=window_size)
right_line = Line(n=window_size)
detected = False # did the fast line fit detect the lines?
left_curve, right_curve = 0., 0. # radius of curvature for left and right lanes
left_lane_inds, right_lane_inds = None, None # for calculating curvature
frameCount = 0
retLast = {}
# MoviePy video annotation will call this function
def annotate_image(img_in):
"""
Annotate the input image with lane line markings
Returns annotated image
"""
global mtx, dist, left_line, right_line, detected, frameCount, retLast
global left_curve, right_curve, left_lane_inds, right_lane_inds
frameCount += 1
src = np.float32(
[[200, 720],
[1100, 720],
[520, 500],
[760, 500]])
x = [src[0, 0], src[1, 0], src[3, 0], src[2, 0], src[0, 0]]
y = [src[0, 1], src[1, 1], src[3, 1], src[2, 1], src[0, 1]]
# Undistort, threshold, perspective transform
undist = cv2.undistort(img_in, mtx, dist, None, mtx)
img, abs_bin, mag_bin, dir_bin, hls_bin = combined_thresh(undist)
binary_warped, binary_unwarped, m, m_inv = perspective_transform(img)
# Perform polynomial fit
if not detected:
# Slow line fit
ret = line_fit(binary_warped)
# if detect no lanes, use last result instead.
if len(ret) == 0:
ret = retLast
left_fit = ret['left_fit']
right_fit = ret['right_fit']
nonzerox = ret['nonzerox']
nonzeroy = ret['nonzeroy']
out_img = ret['out_img']
left_lane_inds = ret['left_lane_inds']
right_lane_inds = ret['right_lane_inds']
histogram = ret['histo']
# Get moving average of line fit coefficients
left_fit = left_line.add_fit(left_fit)
right_fit = right_line.add_fit(right_fit)
# Calculate curvature
left_curve, right_curve = calc_curve(left_lane_inds, right_lane_inds, nonzerox, nonzeroy)
detected = True # slow line fit always detects the line
else: # implies detected == True
# Fast line fit
left_fit = left_line.get_fit()
right_fit = right_line.get_fit()
ret = tune_fit(binary_warped, left_fit, right_fit)
left_fit = ret['left_fit']
right_fit = ret['right_fit']
nonzerox = ret['nonzerox']
nonzeroy = ret['nonzeroy']
left_lane_inds = ret['left_lane_inds']
right_lane_inds = ret['right_lane_inds']
# Only make updates if we detected lines in current frame
if ret is not None:
left_fit = ret['left_fit']
right_fit = ret['right_fit']
nonzerox = ret['nonzerox']
nonzeroy = ret['nonzeroy']
left_lane_inds = ret['left_lane_inds']
right_lane_inds = ret['right_lane_inds']
left_fit = left_line.add_fit(left_fit)
right_fit = right_line.add_fit(right_fit)
left_curve, right_curve = calc_curve(left_lane_inds, right_lane_inds, nonzerox, nonzeroy)
else:
detected = False
vehicle_offset = calc_vehicle_offset(undist, left_fit, right_fit)
# Perform final visualization on top of original undistorted image
result = final_viz(undist, left_fit, right_fit, m_inv, left_curve, right_curve, vehicle_offset)
retLast = ret
save_viz2 = './output_images/polyfit_test%d.jpg' % (frameCount)
viz2(binary_warped, ret, save_viz2)
save_warped = './output_images/warped_test%d.jpg' % (frameCount)
plt.imshow(binary_warped, cmap='gray', vmin=0, vmax=1)
if save_warped is None:
plt.show()
else:
plt.savefig(save_warped)
plt.gcf().clear()
save_binary = './output_images/binary_test%d.jpg' % (frameCount)
plt.imshow(img, cmap='gray', vmin=0, vmax=1)
if save_binary is None:
plt.show()
else:
plt.savefig(save_binary)
plt.gcf().clear()
if frameCount > 0:
fig = plt.gcf()
fig.set_size_inches(16.5, 8.5)
plt.subplot(2, 3, 1)
plt.imshow(undist)
# plt.plot(undist)
plt.plot(x, y)
plt.title('undist')
plt.subplot(2, 3, 2)
plt.imshow(hls_bin, cmap='gray', vmin=0, vmax=1)
plt.title('hls_bin')
plt.subplot(2, 3, 3)
plt.imshow(abs_bin, cmap='gray', vmin=0, vmax=1)
plt.title('abs_bin')
plt.subplot(2, 3, 4)
plt.imshow(img, cmap='gray', vmin=0, vmax=1)
plt.title('img')
plt.subplot(2, 3, 5)
plt.imshow(out_img)
plt.title('out_img')
plt.subplot(2, 3, 6)
plt.imshow(result, cmap='gray', vmin=0, vmax=1)
plt.title('result')
save_result = 'D:/code/github_code/CarND-Advanced-Lane-Lines-P4/output_images/result-test%d.jpg' % (frameCount)
if save_result is None:
plt.show()
else:
plt.savefig(save_result)
plt.gcf().clear()
return result
def annotate_video(input_file, output_file):
""" Given input_file video, save annotated video to output_file """
video = VideoFileClip(input_file)
annotated_video = video.fl_image(annotate_image)
annotated_video.write_videofile(output_file, audio=False)
if __name__ == '__main__':
# Annotate the video
# annotate_video('challenge_video.mp4', 'challenge_video_out.mp4')
# Show example annotated image on screen for sanity check
for i in range (1, 7):
img_file = 'test_images/test%d.jpg' % (i)
img = mpimg.imread(img_file)
result = annotate_image(img)
plt.imshow(result)
save_file = 'D:/code/github_code/CarND-Advanced-Lane-Lines-P4/output_images/test%d.jpg' % (i)
if save_file is None:
plt.show()
else:
plt.savefig(save_file)
plt.gcf().clear()
| 0 | 0 | 0 |
e0d42a5e60b795a64dd17931142ea5a066480ae6 | 1,829 | py | Python | neurofonix/markov.py | Nyaatan/Neurofonix | f59d1586bd4ac712e4e87ebb8f212d2979ff1060 | [
"MIT"
] | null | null | null | neurofonix/markov.py | Nyaatan/Neurofonix | f59d1586bd4ac712e4e87ebb8f212d2979ff1060 | [
"MIT"
] | null | null | null | neurofonix/markov.py | Nyaatan/Neurofonix | f59d1586bd4ac712e4e87ebb8f212d2979ff1060 | [
"MIT"
] | null | null | null | from random import randint
| 31 | 83 | 0.477857 | from random import randint
class Model:
def __init__(self, text):
self.text = text
self.model = {}
self.last = None
def train(self):
if self.text is str:
split_text = self.text.split(' ')
else:
split_text = self.text
i = 0
raw_model = {}
for word in split_text:
if word not in raw_model.keys():
raw_model[word] = []
try:
raw_model[word].append(split_text[i + 1])
except IndexError:
pass
i += 1
# print(raw_model)
for word in raw_model.keys():
counts = {}
for occ in raw_model[word]:
if occ not in counts.keys():
counts[occ] = raw_model[word].count(occ)
counts['__len__'] = len(raw_model[word])
probs = {}
for key in counts.keys():
if key != '__len__':
probs[key] = counts[key] / counts['__len__']
print(word, probs)
self.model[word] = probs
def get_next(self, start=None):
if start is not None:
if '-play' not in start:
start = '-play %s' % start
self.last = start
return start
if self.last is None:
self.last = list(self.model.keys())[randint(0, len(self.model.keys()))]
return self.last
rand = randint(0, 100)
rsum = 0
try:
for next_word in self.model[self.last]:
rsum += self.model[self.last][next_word] * 100
if rsum >= rand:
return next_word
except:
self.last = list(self.model.keys())[randint(0, len(self.model.keys()))]
return self.last
| 1,707 | -9 | 103 |
ad8de01e77855c8ebc55b3a4f85712d69afc8ece | 1,203 | py | Python | api/medication/migrations/0001_initial.py | SaitoBP/PetCard-API | 48be97d019890f7022b48c4f4b00cec32c83b5ad | [
"MIT"
] | null | null | null | api/medication/migrations/0001_initial.py | SaitoBP/PetCard-API | 48be97d019890f7022b48c4f4b00cec32c83b5ad | [
"MIT"
] | null | null | null | api/medication/migrations/0001_initial.py | SaitoBP/PetCard-API | 48be97d019890f7022b48c4f4b00cec32c83b5ad | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2020-10-18 13:27
from django.db import migrations, models
import django.db.models.deletion
| 35.382353 | 136 | 0.575229 | # Generated by Django 3.1.2 on 2020-10-18 13:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Medication',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('medicine', models.CharField(max_length=255)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='MedicationSchedule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('schedule', models.DateTimeField()),
('cycle', models.CharField(choices=[('D', 'DAILY'), ('W', 'WEEKLY'), ('M', 'MONTHLY'), ('Y', 'YEARLY')], max_length=1)),
('medication_status', models.BooleanField(default=False)),
('medication', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='medication.medication')),
],
),
]
| 0 | 1,056 | 23 |
0d2bc9adb70264bb8fb1a7d109a00bf8f97db3a7 | 177 | py | Python | app/codeprogress/urls.py | xXHachimanXx/fc-observability-elastic | c38b317371e96c118e2f17f99f62af75ac1634d7 | [
"MIT"
] | null | null | null | app/codeprogress/urls.py | xXHachimanXx/fc-observability-elastic | c38b317371e96c118e2f17f99f62af75ac1634d7 | [
"MIT"
] | 3 | 2021-07-07T01:51:58.000Z | 2021-07-13T19:31:57.000Z | app/codeprogress/urls.py | xXHachimanXx/fc-observability-elastic | c38b317371e96c118e2f17f99f62af75ac1634d7 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('exemplo/', include('exemplo.urls')),
] | 25.285714 | 46 | 0.683616 | from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('exemplo/', include('exemplo.urls')),
] | 0 | 0 | 0 |
52972f2e0ab5b92482162cce466332e563073968 | 2,046 | py | Python | kyu_8/keep_hydrated/test_keep_hydrated.py | pedrocodacyorg2/codewars | ba3ea81125b6082d867f0ae34c6c9be15e153966 | [
"Unlicense"
] | 1 | 2022-02-12T05:56:04.000Z | 2022-02-12T05:56:04.000Z | kyu_8/keep_hydrated/test_keep_hydrated.py | pedrocodacyorg2/codewars | ba3ea81125b6082d867f0ae34c6c9be15e153966 | [
"Unlicense"
] | 182 | 2020-04-30T00:51:36.000Z | 2021-09-07T04:15:05.000Z | kyu_8/keep_hydrated/test_keep_hydrated.py | pedrocodacyorg2/codewars | ba3ea81125b6082d867f0ae34c6c9be15e153966 | [
"Unlicense"
] | 4 | 2020-04-29T22:04:20.000Z | 2021-07-13T20:04:14.000Z | # Created by Egor Kostan.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
# FUNDAMENTALS ALGORITHMS MATHEMATICS NUMBERS
import unittest
import allure
from utils.log_func import print_log
from kyu_8.keep_hydrated.keep_hydrated import litres
@allure.epic('8 kyu')
@allure.parent_suite('Beginner')
@allure.suite("Math")
@allure.sub_suite("Unit Tests")
@allure.feature("Calculation")
@allure.story('Keep Hydrated!')
@allure.tag('FUNDAMENTALS',
'ALGORITHMS',
'MATHEMATICS',
'NUMBERS')
@allure.link(url='https://www.codewars.com/kata/582cb0224e56e068d800003c/train/python',
name='Source/Kata')
class KeepHydratedTestCase(unittest.TestCase):
"""
Testing litres function
"""
def test_keep_hydrated(self):
"""
Testing litres function with various test inputs
:return:
"""
allure.dynamic.title("Testing litres function with various test inputs")
allure.dynamic.severity(allure.severity_level.NORMAL)
allure.dynamic.description_html('<h3>Codewars badge:</h3>'
'<img src="https://www.codewars.com/users/myFirstCode'
'/badges/large">'
'<h3>Test Description:</h3>'
"<p></p>")
with allure.step("Enter hours and verify the output"):
test_data = [
(2, 1, 'should return 1 litre'),
(1.4, 0, 'should return 0 litres'),
(12.3, 6, 'should return 6 litres'),
(0.82, 0, 'should return 0 litres'),
(11.8, 5, 'should return 5 litres'),
(1787, 893, 'should return 893 litres'),
(0, 0, 'should return 0 litres')
]
for hours, expected, message in test_data:
print_log(hours=hours, expected=expected)
self.assertEqual(expected, litres(hours), message)
| 34.677966 | 94 | 0.572825 | # Created by Egor Kostan.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
# FUNDAMENTALS ALGORITHMS MATHEMATICS NUMBERS
import unittest
import allure
from utils.log_func import print_log
from kyu_8.keep_hydrated.keep_hydrated import litres
@allure.epic('8 kyu')
@allure.parent_suite('Beginner')
@allure.suite("Math")
@allure.sub_suite("Unit Tests")
@allure.feature("Calculation")
@allure.story('Keep Hydrated!')
@allure.tag('FUNDAMENTALS',
'ALGORITHMS',
'MATHEMATICS',
'NUMBERS')
@allure.link(url='https://www.codewars.com/kata/582cb0224e56e068d800003c/train/python',
name='Source/Kata')
class KeepHydratedTestCase(unittest.TestCase):
"""
Testing litres function
"""
def test_keep_hydrated(self):
"""
Testing litres function with various test inputs
:return:
"""
allure.dynamic.title("Testing litres function with various test inputs")
allure.dynamic.severity(allure.severity_level.NORMAL)
allure.dynamic.description_html('<h3>Codewars badge:</h3>'
'<img src="https://www.codewars.com/users/myFirstCode'
'/badges/large">'
'<h3>Test Description:</h3>'
"<p></p>")
with allure.step("Enter hours and verify the output"):
test_data = [
(2, 1, 'should return 1 litre'),
(1.4, 0, 'should return 0 litres'),
(12.3, 6, 'should return 6 litres'),
(0.82, 0, 'should return 0 litres'),
(11.8, 5, 'should return 5 litres'),
(1787, 893, 'should return 893 litres'),
(0, 0, 'should return 0 litres')
]
for hours, expected, message in test_data:
print_log(hours=hours, expected=expected)
self.assertEqual(expected, litres(hours), message)
| 0 | 0 | 0 |
248d78344810e6b338422210a38378f7689101cf | 6,041 | py | Python | kontrasto/dominantcolors.py | nimasmi/kontrasto | 08fc0279e2b3e1da1a5cec769874572455fd0527 | [
"MIT"
] | 13 | 2021-05-13T14:47:49.000Z | 2022-03-23T08:11:02.000Z | kontrasto/dominantcolors.py | nimasmi/kontrasto | 08fc0279e2b3e1da1a5cec769874572455fd0527 | [
"MIT"
] | 3 | 2021-05-08T21:53:53.000Z | 2022-03-12T03:07:58.000Z | kontrasto/dominantcolors.py | nimasmi/kontrasto | 08fc0279e2b3e1da1a5cec769874572455fd0527 | [
"MIT"
] | 2 | 2021-05-23T10:28:35.000Z | 2021-11-28T18:59:54.000Z | # -*- coding: utf-8 -*-
# https://github.com/wenmin-wu/dominant-colors-py
__author__ = "wuwenmin1991@gmail.com"
import numpy as np # lgtm [py/import-and-import-from]
from numpy import linalg as LA
from PIL import Image
from collections import deque
class ColorNode(object):
""""""
@property
@mean.setter
@property
@cov.setter
@property
@class_id.setter
@property
@left.setter
@property
@right.setter
@property
@num_pixel.setter
def rgba2rgb(rgba):
"""
:param self:
:param rgba:
:return:
"""
background = (255, 255, 255)
alpha = rgba[..., -1]
channels = rgba[..., :-1]
out = np.empty_like(channels)
for ichan in range(channels.shape[-1]):
w = alpha / 255.0
out[..., ichan] = np.clip(
w * channels[..., ichan] + (1 - w) * background[ichan],
a_min=0,
a_max=255,
)
out.astype(np.uint8)
return out
def find_dominant_colors(img_colors, count):
"""
find dominant colors according to given image colors
:param img_colors: image colors can either in shape M*N*3 or N*3, the last axis is RGB color
:param count: number of dominant colors to return
:return: dominant colors in given number
"""
colors = img_colors / 255.0
if len(colors.shape) == 3 and colors.shape[-1] == 3:
colors = colors.reshape((-1, 3))
# map each color to the first class id
classes = np.ones(colors.shape[0], np.int8)
root = ColorNode()
root.class_id = 1
get_class_mean_cov(colors, classes, root)
for _ in range(count - 1):
next_node = get_max_eigenvalue_node(root)
next_class_id = get_next_class_id(root)
partition_class(colors, classes, next_class_id, next_node)
get_class_mean_cov(colors, classes, next_node.left)
get_class_mean_cov(colors, classes, next_node.right)
return get_dominant_colors(root)
def get_class_mean_cov(colors, classes, node):
"""
Calculate mean and cov of colors in this class
"""
curr_node_colors = colors[np.where(classes == node.class_id)]
node.mean = curr_node_colors.mean(axis=0)
node.cov = np.cov(curr_node_colors.T)
node.num_pixel = curr_node_colors.shape[0]
def get_max_eigenvalue_node(curr_node):
"""
Get the node which has the maximum eigen value of the colors cov
"""
queue = deque()
max_eigen = -1
queue.append(curr_node)
if not (curr_node.left or curr_node.right):
return curr_node
while len(queue):
node = queue.popleft()
if node.left and node.right:
queue.append(node.left)
queue.append(node.right)
continue
eigen_vals, eigen_vecs = LA.eig(node.cov)
eigen_val = eigen_vals.max()
if eigen_val > max_eigen:
max_eigen = eigen_val
ret = node
return ret
def get_dominant_colors_for(image, num_colors):
"""Get dominant colors from a given pillow Image instance"""
im_arr = np.asarray(image)
if image.mode == "RGBA":
im_arr = rgba2rgb(im_arr)
return find_dominant_colors(im_arr, num_colors)
| 28.097674 | 96 | 0.639298 | # -*- coding: utf-8 -*-
# https://github.com/wenmin-wu/dominant-colors-py
__author__ = "wuwenmin1991@gmail.com"
import numpy as np # lgtm [py/import-and-import-from]
from numpy import linalg as LA
from PIL import Image
from collections import deque
class ColorNode(object):
""""""
def __init__(self):
self.__mean = None # the mean of this node
self.__cov = None # the covariance of this node
self.__class_id = None
self.__left = None
self.__right = None
self.__num_pixel = None
@property
def mean(self):
return self.__mean
@mean.setter
def mean(self, mean):
self.__mean = mean
@property
def cov(self):
return self.__cov
@cov.setter
def cov(self, cov):
self.__cov = cov
@property
def class_id(self):
return self.__class_id
@class_id.setter
def class_id(self, class_id):
self.__class_id = class_id
@property
def left(self):
return self.__left
@left.setter
def left(self, left):
self.__left = left
@property
def right(self):
return self.__right
@right.setter
def right(self, right):
self.__right = right
@property
def num_pixel(self):
return self.__num_pixel
@num_pixel.setter
def num_pixel(self, num_pixel):
self.__num_pixel = num_pixel
def rgba2rgb(rgba):
"""
:param self:
:param rgba:
:return:
"""
background = (255, 255, 255)
alpha = rgba[..., -1]
channels = rgba[..., :-1]
out = np.empty_like(channels)
for ichan in range(channels.shape[-1]):
w = alpha / 255.0
out[..., ichan] = np.clip(
w * channels[..., ichan] + (1 - w) * background[ichan],
a_min=0,
a_max=255,
)
out.astype(np.uint8)
return out
def find_dominant_colors(img_colors, count):
"""
find dominant colors according to given image colors
:param img_colors: image colors can either in shape M*N*3 or N*3, the last axis is RGB color
:param count: number of dominant colors to return
:return: dominant colors in given number
"""
colors = img_colors / 255.0
if len(colors.shape) == 3 and colors.shape[-1] == 3:
colors = colors.reshape((-1, 3))
# map each color to the first class id
classes = np.ones(colors.shape[0], np.int8)
root = ColorNode()
root.class_id = 1
get_class_mean_cov(colors, classes, root)
for _ in range(count - 1):
next_node = get_max_eigenvalue_node(root)
next_class_id = get_next_class_id(root)
partition_class(colors, classes, next_class_id, next_node)
get_class_mean_cov(colors, classes, next_node.left)
get_class_mean_cov(colors, classes, next_node.right)
return get_dominant_colors(root)
def get_class_mean_cov(colors, classes, node):
"""
Calculate mean and cov of colors in this class
"""
curr_node_colors = colors[np.where(classes == node.class_id)]
node.mean = curr_node_colors.mean(axis=0)
node.cov = np.cov(curr_node_colors.T)
node.num_pixel = curr_node_colors.shape[0]
def get_max_eigenvalue_node(curr_node):
"""
Get the node which has the maximum eigen value of the colors cov
"""
queue = deque()
max_eigen = -1
queue.append(curr_node)
if not (curr_node.left or curr_node.right):
return curr_node
while len(queue):
node = queue.popleft()
if node.left and node.right:
queue.append(node.left)
queue.append(node.right)
continue
eigen_vals, eigen_vecs = LA.eig(node.cov)
eigen_val = eigen_vals.max()
if eigen_val > max_eigen:
max_eigen = eigen_val
ret = node
return ret
def get_next_class_id(root):
max_id = 0
queue = deque()
queue.append(root)
while len(queue):
curr_node = queue.popleft()
if curr_node.class_id > max_id:
max_id = curr_node.class_id
if curr_node.left:
queue.append(curr_node.left)
if curr_node.right:
queue.append(curr_node.right)
return max_id + 1
def partition_class(colors, classes, next_id, node):
class_id = node.class_id
left_id = next_id
right_id = next_id + 1
eigen_vals, eigen_vecs = LA.eig(node.cov)
eigen_vec = eigen_vecs[eigen_vals.argmax()]
threshold = np.dot(node.mean, eigen_vec)
color_indices = np.where(classes == class_id)[0]
curr_colors = colors[color_indices]
products = np.dot(curr_colors, eigen_vec)
left_indices = color_indices[np.where(products <= threshold)[0]]
right_indices = color_indices[np.where(products > threshold)[0]]
classes[left_indices] = left_id
classes[right_indices] = right_id
node.left = ColorNode()
node.left.class_id = left_id
node.right = ColorNode()
node.right.class_id = right_id
def get_dominant_colors(root):
dominant_colors = []
queue = deque()
queue.append(root)
while len(queue):
curr_node = queue.popleft()
if curr_node.left and curr_node.right:
queue.append(curr_node.left)
queue.append(curr_node.right)
continue
color = curr_node.mean * 255
color = np.clip(color, 0, 255)
color = color.astype(np.uint8)
dominant_colors.append([curr_node.num_pixel, color.tolist()])
# it is necessary to sort according to number of pixels in the nodes
dominant_colors.sort(key=lambda x: x[0], reverse=True)
return [color[1] for color in dominant_colors]
def get_image_dominant_colors(image_path, num_colors):
image = Image.open(image_path)
return get_dominant_colors_for(image, num_colors)
def get_dominant_colors_for(image, num_colors):
"""Get dominant colors from a given pillow Image instance"""
im_arr = np.asarray(image)
if image.mode == "RGBA":
im_arr = rgba2rgb(im_arr)
return find_dominant_colors(im_arr, num_colors)
| 2,430 | 0 | 431 |
98c77a54e9634f86794afb330c103e82685e8c5e | 628 | py | Python | users.py | mattgerstman/DMAssassins-2.0 | 89379274abb8d3084046764f92ae20ab548edc84 | [
"MIT"
] | null | null | null | users.py | mattgerstman/DMAssassins-2.0 | 89379274abb8d3084046764f92ae20ab548edc84 | [
"MIT"
] | null | null | null | users.py | mattgerstman/DMAssassins-2.0 | 89379274abb8d3084046764f92ae20ab548edc84 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import time
import json
outputList = []
if __name__ == "__main__":
filename = sys.argv[1]
namesList = open(filename,'r')
for line in namesList:
processLine(line)
output = open('output.json','w')
print json.dumps(outputList)
output.write(json.dumps(outputList))
# json.dumps(outputList) | 19.030303 | 37 | 0.700637 | #!/usr/bin/env python
import sys
import time
import json
outputList = []
def processLine(line):
line = line.strip()
tokens = line.split(',')
currentName = {}
currentName['name']=tokens[0];
currentName['username']=tokens[1];
currentName['email']=tokens[2];
currentName['team']=tokens[3]
# print currentName;
global outputList;
outputList += [currentName];
if __name__ == "__main__":
filename = sys.argv[1]
namesList = open(filename,'r')
for line in namesList:
processLine(line)
output = open('output.json','w')
print json.dumps(outputList)
output.write(json.dumps(outputList))
# json.dumps(outputList) | 270 | 0 | 23 |
53992764035d7ea7c6ff67656401817eb45590b1 | 883 | py | Python | ttk/data/generation.py | tacticsiege/TacticToolkit | 74f86180776c77507f096adb0aacf5d23840b341 | [
"MIT"
] | null | null | null | ttk/data/generation.py | tacticsiege/TacticToolkit | 74f86180776c77507f096adb0aacf5d23840b341 | [
"MIT"
] | null | null | null | ttk/data/generation.py | tacticsiege/TacticToolkit | 74f86180776c77507f096adb0aacf5d23840b341 | [
"MIT"
] | null | null | null | import numpy as np | 27.59375 | 88 | 0.491506 | import numpy as np
def all_parity_pairs(nbit):
N = 2**nbit
remainder = 100 - (N % 100)
Ntotal = N + remainder
X = np.zeros((Ntotal, nbit))
Y = np.zeros(Ntotal)
for ii in range(Ntotal):
i = ii % N
# generate the ith sample
for j in range(nbit):
if i % (2**(j+1)) != 0:
i -= 2**j
X[ii, j] = 1
Y[ii] = X[ii].sum() % 2
return X, Y
def parity_sequence_bits(X, y):
# figure out each y(t) in a sequence of bits
# y(t) is a series of bits in this example, would be a series of words in a sentence
N, t = X.shape
Y_t = np.zeros(X.shape, dtype=np.int32)
for n in range(N):
ones_count = 0
for i in range(t):
if X[n,i] == 1:
ones_count += 1
if ones_count % 2 == 1:
Y_t[n,i] = 1
return Y_t | 819 | 0 | 46 |
f957a9c68089ad99d3c73dc926e3a51f220c190f | 1,473 | py | Python | wordfreq.py | mas250/Python3 | 6ac6f0ffe7869cd7520b2ae0debf3650116a97b1 | [
"MIT"
] | 1 | 2019-12-28T12:31:28.000Z | 2019-12-28T12:31:28.000Z | wordfreq.py | mas250/Python3 | 6ac6f0ffe7869cd7520b2ae0debf3650116a97b1 | [
"MIT"
] | null | null | null | wordfreq.py | mas250/Python3 | 6ac6f0ffe7869cd7520b2ae0debf3650116a97b1 | [
"MIT"
] | null | null | null | # Print the frequencies with which words occur in a file. The results are
# printed with the words in the order that they occur in the file, but a
# dictionary is used to keep a count of the words. Counting does not start
# until a line starting with '***' has been seen and stops when another
# line with '***' is found, making the program suitable for Project
# Gutenberg files.
import string
def delete_punctuation(str):
"""
Remove punctuation from a string, replacing it with a space
"""
for p in string.punctuation:
str = str.replace(p, ' ')
return str
filename = 'dracula-full.txt'
f = open(filename, 'r')
words = []
count = {}
seenstars = False # We haven't seen the first '***' line
while True:
line = f.readline()
if not line:
print('EOF before second "***" line encountered')
break
if line[:3] == '***':
if seenstars:
break # Second '***' so finish
else:
seenstars = True
continue # Don't process the first *** line
if not seenstars: # Still in the preamble
continue
line = delete_punctuation(line).lower()
for w in line.split():
try:
count[w] += 1
except:
count[w] = 1
words.append(w)
for w in count:
print('%20s%6d' % (w, count[w]))
sorted(count.keys())
for w in range(50):
print (count[w])
| 27.277778 | 74 | 0.573659 | # Print the frequencies with which words occur in a file. The results are
# printed with the words in the order that they occur in the file, but a
# dictionary is used to keep a count of the words. Counting does not start
# until a line starting with '***' has been seen and stops when another
# line with '***' is found, making the program suitable for Project
# Gutenberg files.
import string
def delete_punctuation(str):
"""
Remove punctuation from a string, replacing it with a space
"""
for p in string.punctuation:
str = str.replace(p, ' ')
return str
filename = 'dracula-full.txt'
f = open(filename, 'r')
words = []
count = {}
seenstars = False # We haven't seen the first '***' line
while True:
line = f.readline()
if not line:
print('EOF before second "***" line encountered')
break
if line[:3] == '***':
if seenstars:
break # Second '***' so finish
else:
seenstars = True
continue # Don't process the first *** line
if not seenstars: # Still in the preamble
continue
line = delete_punctuation(line).lower()
for w in line.split():
try:
count[w] += 1
except:
count[w] = 1
words.append(w)
for w in count:
print('%20s%6d' % (w, count[w]))
sorted(count.keys())
for w in range(50):
print (count[w])
| 0 | 0 | 0 |
86a27767ddd3117fb570a0e5fd9ffcf5e7f820ec | 43 | py | Python | workflow/scripts/script1.py | kopardev/CCBR_ATACseq | be3f43213b67199ffd82db20eae3e7254d3d06cd | [
"MIT"
] | 3 | 2021-02-28T23:10:52.000Z | 2021-03-05T14:10:41.000Z | workflow/scripts/script1.py | CCBR/CCBR_cookiecutter | 5bb3d2461fd717193b47f85b62909a9cfae06d24 | [
"MIT"
] | 5 | 2021-07-16T21:18:04.000Z | 2021-11-30T16:22:39.000Z | workflow/scripts/script1.py | kopardev/CCBR_ATACseq | be3f43213b67199ffd82db20eae3e7254d3d06cd | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# some python script
| 14.333333 | 21 | 0.72093 | #!/usr/bin/env python
# some python script
| 0 | 0 | 0 |
ea4d7044f29cf6da8ac335089ace95bf89a971ae | 2,063 | py | Python | pytest_splunk_addon/helmut/exceptions/command_execution.py | harshshah2-crest/pytest-splunk-addon | fb6ad828edf90f14086380a18711eb831ee47349 | [
"Apache-2.0"
] | 39 | 2020-06-09T17:37:21.000Z | 2022-02-08T01:57:35.000Z | pytest_splunk_addon/helmut/exceptions/command_execution.py | harshshah2-crest/pytest-splunk-addon | fb6ad828edf90f14086380a18711eb831ee47349 | [
"Apache-2.0"
] | 372 | 2020-04-15T13:55:09.000Z | 2022-03-31T17:14:56.000Z | pytest_splunk_addon/helmut/exceptions/command_execution.py | isabella232/pytest-splunk-addon | 5e6ae2b47df7a1feb6f358bbbd1f02197b5024f6 | [
"Apache-2.0"
] | 22 | 2020-05-06T10:43:45.000Z | 2022-03-16T15:50:08.000Z | #
# Copyright 2021 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class CommandExecutionFailure(RuntimeError):
"""
Generic exception for when a Splunk command fails to execute.
@ivar command: The command that failed.
@type command: str
@ivar code: The exit code.
@type code: int
@param stdout: The standard output.
@type stdout: str
@ivar stderr: The standard error output.
@type stderr: str
"""
def __init__(self, command="", code="", stdout="", stderr=""):
# FAST-8061 Custom exceptions are not raised properly when used in Multiprocessing Pool
"""
Creates a new exception.
@param command: The command that failed.
@type command: str
@param code: The exit code.
@type code: int
@param stderr: The stderr output.
@type stderr: str
"""
self.command = command
self.code = code
self.stderr = stderr
self.stdout = stdout
super(CommandExecutionFailure, self).__init__(self._error_message)
@property
def _error_message(self):
"""
The error message for this exception.
Is built using L{command}, L{code}, L{stdout} and L{stderr}.
@rtype: str
"""
message = "Command {cmd} returned code {code}.\n"
message += "############\nstdout: {stdout}\n"
message += "############\nstderr: {stderr}"
return message.format(
cmd=self.command, code=self.code, stdout=self.stdout, stderr=self.stderr
)
| 31.738462 | 95 | 0.63936 | #
# Copyright 2021 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class CommandExecutionFailure(RuntimeError):
"""
Generic exception for when a Splunk command fails to execute.
@ivar command: The command that failed.
@type command: str
@ivar code: The exit code.
@type code: int
@param stdout: The standard output.
@type stdout: str
@ivar stderr: The standard error output.
@type stderr: str
"""
def __init__(self, command="", code="", stdout="", stderr=""):
# FAST-8061 Custom exceptions are not raised properly when used in Multiprocessing Pool
"""
Creates a new exception.
@param command: The command that failed.
@type command: str
@param code: The exit code.
@type code: int
@param stderr: The stderr output.
@type stderr: str
"""
self.command = command
self.code = code
self.stderr = stderr
self.stdout = stdout
super(CommandExecutionFailure, self).__init__(self._error_message)
@property
def _error_message(self):
"""
The error message for this exception.
Is built using L{command}, L{code}, L{stdout} and L{stderr}.
@rtype: str
"""
message = "Command {cmd} returned code {code}.\n"
message += "############\nstdout: {stdout}\n"
message += "############\nstderr: {stderr}"
return message.format(
cmd=self.command, code=self.code, stdout=self.stdout, stderr=self.stderr
)
| 0 | 0 | 0 |
068ccc95099d081a9bfb7a27667dd7bc23e4e19d | 28,617 | py | Python | models/movies_ae.py | sneakyPad/decoding-latent-space-rs | bc7bfba5d6cf5a9d72f5c5393f394dee1025441a | [
"MIT"
] | null | null | null | models/movies_ae.py | sneakyPad/decoding-latent-space-rs | bc7bfba5d6cf5a9d72f5c5393f394dee1025441a | [
"MIT"
] | 3 | 2021-06-08T22:42:59.000Z | 2022-01-13T03:25:54.000Z | models/movies_ae.py | sneakyPad/decoding-latent-space-rs | bc7bfba5d6cf5a9d72f5c5393f394dee1025441a | [
"MIT"
] | null | null | null | # pip install pytorch-lightning
# pip install neptune-client
# %%
from __future__ import print_function
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks.progress import ProgressBar
from sklearn.model_selection import train_test_split
import ast
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from collections import defaultdict
import torch
import torch.utils.data
from torch import nn, optim
from torch.nn import functional as F
import math
import pytorch_lightning as pl
# import utils.plot_utils as utils
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
import wandb
import time
import os
from utils import run_utils, plot_utils, data_utils, utils, metric_utils, settings, latent_space_utils, \
disentangle_utils
# ToDo EDA:
# - Long Tail graphics
# - Remove user who had less than a threshold of seen items
# - Create Markdown with EDA results
# ToDo input_params:
# Parameter that should be tweakable by invoking the routine:
# - epochs
# - learning_rate
# - batch_size
# - simplified_rating
# - hidden_layer number
# - Algorithm: VAE, AE or SVD
# ToDo metrics:
# Add https://towardsdatascience.com/evaluation-metrics-for-recommender-systems-df56c6611093
seed = 42
torch.manual_seed(seed)
if __name__ == '__main__':
#Architecture Parameters
torch.manual_seed(100)
args = run_utils.create_training_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # use gpu if available
settings.init()
# General Parameters
train = True
mixup = False
is_hessian_penalty_activated = False
base_path = 'results/models/vae/'
used_data = 'syn'
full_test_routine = False
#Synthetic Data Parameters
synthetic_data = True
expanded_user_item = False
ls_normalvariate = [False]
ls_continous = [True]
noise = False
no_generative_factors = 3
# used_data ='vae'
used_data = 'ae'
ls_epochs = [21] # -->7 #5,10,15,20,25,30,40,50,60,70,80,90,100,120,150,200,270,350,500
# Note: Mit steigender Epoche wird das disentanglement verstärkt
#
ls_latent_factors = [10]
beta_normalized = 10 / (20 * no_generative_factors)
ls_betas = [] # disentangle_factors .0003
for epoch in ls_epochs:
for normalvariate in ls_normalvariate:
for continous_data in ls_continous:
for lf in ls_latent_factors:
if (len(ls_betas) == 0):
if (expanded_user_item):
beta_normalized = lf / (800)
else:
beta_normalized = lf / (
20 * no_generative_factors) # lf/input_size, e.g. 2/10000 = 0.0002
ls_betas.append(beta_normalized)
for beta in ls_betas:
train_tag = "train"
if (not train):
train_tag = "test"
print(
"Processing model with: {} epochs, {} latent factors, {} beta".format(epoch, lf, beta))
# exp_name = "{}_beta_{}_epochs_{}_lf_synt_{}_normal_{}_continous_{}_hessian_{}_noise_{}".format(beta, epoch, lf, synthetic_data, normalvariate, continous_data, is_hessian_penalty_activated, noise)
exp_name = "ae-{}_beta_{}_epochs_{}_lf_synt_{}_normal_{}_continous_{}_hessian_{}".format(beta,
epoch,
lf,
synthetic_data,
normalvariate,
continous_data,
is_hessian_penalty_activated)
wandb_name = exp_name + "_" + train_tag
model_name = exp_name + ".ckpt"
attribute_name = exp_name + "_attributes.pickle"
model_path = base_path + model_name
attribute_path = base_path + attribute_name
experiment_path = utils.create_experiment_directory()
model_params = run_utils.create_model_params(experiment_path, epoch, lf, beta,
int(epoch / 100), expanded_user_item,
mixup,
no_generative_factors, epoch,
is_hessian_penalty_activated, used_data)
args.max_epochs = epoch
wandb_logger = WandbLogger(project='recommender-xai', tags=['vae', train_tag],
name=wandb_name)
trainer = pl.Trainer.from_argparse_args(args,
# limit_test_batches=0.1,
# precision =16,
logger=wandb_logger, # False
gradient_clip_val=0.5,
# accumulate_grad_batches=0,
gpus=0,
weights_summary='full',
checkpoint_callback=False,
callbacks=[ProgressBar(),
EarlyStopping(monitor='train_loss')]
)
if (train):
print(
'<---------------------------------- VAE Training ---------------------------------->')
print("Running with the following configuration: \n{}".format(args))
if (synthetic_data):
model_params['synthetic_data'], model_params[
'syn_y'] = data_utils.create_synthetic_data(no_generative_factors,
experiment_path,
expanded_user_item,
continous_data,
normalvariate,
noise)
generate_distribution_df()
model = VAE(model_params)
wandb_logger.watch(model, log='gradients', log_freq=100)
# utils.print_nn_summary(model, size =200)
print('------ Start Training ------')
trainer.fit(model)
kld_matrix = model.KLD
print('------ Saving model ------')
trainer.save_checkpoint(model_path)
model.save_attributes(attribute_path)
print('------ Load model -------')
test_model = VAE.load_from_checkpoint(
model_path) # , load_saved_attributes=True, saved_attributes_path='attributes.pickle'
# test_model.test_size = model_params['test_size']
test_model.load_attributes_and_files(attribute_path)
test_model.experiment_path_test = experiment_path
# print("show np_z_train mean:{}, min:{}, max:{}".format(z_mean_train, z_min_train, z_max_train ))
print('------ Start Test ------')
start = time.time()
dct_param = {'epochs': epoch, 'lf': lf, 'beta': beta, 'normal': normalvariate,
'continous': continous_data, 'hessian': is_hessian_penalty_activated,
'noise': noise}
# plot_utils.plot_samples(test_model, experiment_path, dct_param)
# z = torch.randn(1, test_model.no_latent_factors)
#
#
# Here we create a figure instance, and two subplots
latent_space_utils.traverse(test_model, experiment_path, dct_param)
trainer.test(test_model) # The test loop will not be used until you call.
print('Test time in seconds: {}'.format(time.time() - start))
# print('% altering has provided information gain:{}'.format( int(settings.ig_m_hat_cnt)/(int(settings.ig_m_cnt)+int(settings.ig_m_hat_cnt) )))
# print(results)
disentangle_utils.run_disentanglement_eval(test_model, experiment_path, dct_param)
plot_utils.plot_results(test_model,
test_model.experiment_path_test,
test_model.experiment_path_train,
dct_param)
artifact = wandb.Artifact('Plots', type='result')
artifact.add_dir(experiment_path) # , name='images'
wandb_logger.experiment.log_artifact(artifact)
working_directory = os.path.abspath(os.getcwd())
absolute_path = working_directory + "/" + experiment_path + "images/"
ls_path_images = [absolute_path + file_name for file_name in os.listdir(absolute_path)]
# wandb.log({"images": [wandb.Image(plt.imread(img_path)) for img_path in ls_path_images]})
dct_images = {
img_path.split(sep='_')[2].split(sep='/')[-1]: wandb.Image(plt.imread(img_path)) for
img_path in ls_path_images}
wandb.log(dct_images)
print('Test done')
exit()
| 46.836334 | 221 | 0.553517 | # pip install pytorch-lightning
# pip install neptune-client
# %%
from __future__ import print_function
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks.progress import ProgressBar
from sklearn.model_selection import train_test_split
import ast
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from collections import defaultdict
import torch
import torch.utils.data
from torch import nn, optim
from torch.nn import functional as F
import math
import pytorch_lightning as pl
# import utils.plot_utils as utils
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
import wandb
import time
import os
from utils import run_utils, plot_utils, data_utils, utils, metric_utils, settings, latent_space_utils, \
disentangle_utils
# ToDo EDA:
# - Long Tail graphics
# - Remove user who had less than a threshold of seen items
# - Create Markdown with EDA results
# ToDo input_params:
# Parameter that should be tweakable by invoking the routine:
# - epochs
# - learning_rate
# - batch_size
# - simplified_rating
# - hidden_layer number
# - Algorithm: VAE, AE or SVD
# ToDo metrics:
# Add https://towardsdatascience.com/evaluation-metrics-for-recommender-systems-df56c6611093
seed = 42
torch.manual_seed(seed)
class VAE(pl.LightningModule):
def __init__(self, conf: dict, *args, **kwargs):
super().__init__()
# self.kwargs = kwargs
self.save_hyperparameters(conf)
self.ls_predicted_movies = []
self.is_hessian_penalty_activated = self.hparams["is_hessian_penalty_activated"]
self.expanded_user_item = self.hparams["expanded_user_item"]
self.used_data = self.hparams["used_data"]
self.generative_factors = self.hparams["generative_factors"]
self.mixup = self.hparams["mixup"]
self.np_synthetic_data = self.hparams["synthetic_data"]
self.ls_syn_y = self.hparams["syn_y"]
self.experiment_path_train = conf["experiment_path"]
self.experiment_path_test = self.experiment_path_train
self.beta = self.hparams["beta"]
self.avg_mce = 0.0
self.train_dataset = None
self.test_dataset = None
self.test_size = self.hparams["test_size"]
self.no_latent_factors = self.hparams["latent_dim"]
self.max_unique_movies = 0
self.unique_movies = 0
self.np_user_item = None
self.small_dataset = self.hparams["small_dataset"]
self.simplified_rating = self.hparams["simplified_rating"]
self.max_epochs = self.hparams["max_epochs"]
self.dct_index2itemId = None
self.test_y_bin = None
self.df_movies_z_combined = None
if (self.np_synthetic_data is None):
self.load_dataset() # additionaly assigns self.unique_movies and self.np_user_item
self.df_movies = pd.read_csv('../data/generated/df_movies_cleaned3.csv')
self.dct_attribute_distribution = utils.load_json_as_dict(
'attribute_distribution.json') # load relative frequency distributioon from dictionary (pickle it)
else:
self.train_dataset, self.test_dataset = train_test_split(self.np_synthetic_data, test_size=self.test_size,
random_state=42)
self.train_y, self.test_y = train_test_split(self.ls_syn_y, test_size=self.test_size, random_state=42)
self.test_y_bin = np.asarray(pd.get_dummies(pd.DataFrame(data=self.test_y)))
self.unique_movies = self.np_synthetic_data.shape[1]
self.df_movies = pd.read_csv('../data/generated/syn.csv')
self.dct_attribute_distribution = utils.load_json_as_dict(
'syn_attribute_distribution.json') # load relative frequency distributioon from dictionary (pickle it)
# nn.Linear layer creates a linear function (θx + b), with its parameters initialized
self.input_dimension = int(self.unique_movies * math.pow(4,
self.generative_factors)) if self.expanded_user_item == True else self.unique_movies
self.fc11 = nn.Linear(in_features=self.input_dimension, out_features=40) # input
self.fc12 = nn.Linear(in_features=40, out_features=self.no_latent_factors) # input
# self.fc13 = nn.Linear(in_features=1000, out_features=600) # input
self.encoder = nn.Sequential(self.fc11 ,nn.LeakyReLU(),# nn.ReLU(),
self.fc12#, nn.LeakyReLU()
# self.fc13, nn.LeakyReLU()
)
self.fc31 = nn.Linear(in_features=self.no_latent_factors, out_features=40)
# self.fc32 = nn.Linear(in_features=600, out_features=1000)
# self.fc33 = nn.Linear(in_features=1000, out_features=1200)
self.fc34 = nn.Linear(in_features=40, out_features=self.input_dimension)
self.decoder = nn.Sequential(self.fc31, nn.LeakyReLU(),
# self.fc32, nn.LeakyReLU(),
# self.fc33, nn.ReLU(),
self.fc34)
self.KLD = None
self.ls_kld = []
self.dis_KLD = None
self.z = None
self.kld_matrix = np.empty((0, self.no_latent_factors))
self.np_z_test = np.empty((0, self.no_latent_factors)) # self.test_dataset.shape[0]
self.np_mu_test = np.empty((0, self.no_latent_factors))
self.np_logvar_test = np.empty((0, self.no_latent_factors))
self.np_z_train = np.empty((0, self.no_latent_factors)) # self.test_dataset.shape[0]
self.np_mu_train = np.empty((0, self.no_latent_factors))
self.np_logvar_train = np.empty((0, self.no_latent_factors))
# self.dct_attribute_distribution = None # load relative frequency distributioon from dictionary (pickle it)
# self.df_links = pd.read_csv('../data/movielens/small/links.csv')
self.sigmoid_annealing_threshold = self.hparams['sigmoid_annealing_threshold']
self.mce_batch_train = None
self.mce_batch_test = None
self.z_mean_train = []
self.z_min_train = []
self.z_max_train = []
# Initialize weights
self.encoder.apply(run_utils.weight_init)
self.decoder.apply(run_utils.weight_init)
def encode(self, x):
return F.relu(self.encoder(x))
def decode(self, z):
return torch.sigmoid(self.decoder(z))
def forward(self, x, **kwargs):
# Si
if (kwargs):
z = kwargs['z']
mu = kwargs['mu']
logvar = kwargs['logvar']
p = None
q = None
else:
self.z = self.encode(x)
return self.decode(self.z)
def load_dataset(self):
if (self.small_dataset):
print("Load small dataset of ratings.csv")
df_ratings = pd.read_csv("../data/movielens/small/ratings.csv")
else:
print("Load large dataset of ratings.csv")
df_ratings = pd.read_csv("../data/movielens/large/ratings.csv")
print('Shape of dataset:{}'.format(df_ratings.shape))
self.np_user_item, self.unique_movies, self.max_unique_movies, self.dct_index2itemId = data_utils.pivot_create_user_item_matrix(
df_ratings, True) # manual_create_user_item_matrix(df_ratings, simplified_rating=self.simplified_rating)
# self.np_user_item, self.max_unique_movies = manual_create_user_item_matrix(df_ratings, simplified_rating=self.simplified_rating)
self.train_dataset, self.test_dataset = train_test_split(self.np_user_item, test_size=self.test_size,
random_state=42)
def train_dataloader(self):
# TODO Change shuffle to True, just for dev purpose switched on
train_loader = torch.utils.data.DataLoader(
self.train_dataset, batch_size=100, shuffle=True, num_workers=0, pin_memory=True
)
return train_loader
def test_dataloader(self):
test_loader = torch.utils.data.DataLoader(
self.test_dataset, batch_size=100, shuffle=False, num_workers=0
)
return test_loader
def configure_optimizers(self):
optimizer = optim.Adam(self.parameters(), lr=1e-3) # , weight_decay = 0.00001
# criterion = nn.Binar()#MSELoss() # mean-squared error loss
# scheduler = StepLR(optimizer, step_size=1)
return optimizer # , scheduler
def collect_z_values(self, ts_mu_chunk, ts_logvar_chunk): # , ls_y
start = time.time()
ls_grad_z = self.compute_z(ts_mu_chunk, ts_logvar_chunk)
self.np_z_train = np.append(self.np_z_train, np.asarray(ls_grad_z.tolist()),
axis=0) # TODO Describe in thesis that I get back a grad object instead of a pure tensor as it is in the test method since we are in the training method.
# print('Shape np_z_train: {}'.format(self.np_z_train.shape))
z_mean = self.np_z_train.mean(axis=0)
z_min = self.np_z_train.min(axis=0)
z_max = self.np_z_train.max(axis=0)
if (len(self.z_mean_train) == 0):
self.z_mean_train = z_mean
self.z_min_train = z_min
self.z_max_train = z_max
else:
self.z_mean_train = (z_mean + self.z_mean_train) / 2
self.z_max_train = np.amax(np.vstack((self.z_max_train, z_max)),
axis=0) # Stack old and new together and find the max
self.z_min_train = np.amin(np.vstack((self.z_min_train, z_min)), axis=0)
# if (z_min < self.z_min_train):
# self.z_min_train = z_min
#
# if (z_max > self.z_max_train):
# self.z_max_train = z_max
# print('collect_z_values in seconds: {}'.format(time.time() - start))
def training_step(self, batch, batch_idx):
mce_minibatch = None
batch_len = batch.shape[0]
ts_batch_user_features = batch # .view(-1, self.input_dimension)
# ts_batch_user_features = ts_batch_user_features * random.uniform(0.4,0.9)
recon_batch = self.forward(ts_batch_user_features) # sample data
if (np.isnan(np.sum(recon_batch.detach().numpy()))):
print('s')
batch_mse = self.loss_function(recon_batch,
ts_batch_user_features,
self.beta,
self.unique_movies)
hp_loss = 0
# normalizing reconstruction loss
batch_mse = batch_mse / len(ts_batch_user_features)
if (self.is_hessian_penalty_activated and self.current_epoch > int(3 / 4 * self.max_epochs - 1)): #
print('<---- Applying Hessian Penalty ---->')
# np_z = self.compute_z(ts_mu_chunk, ts_logvar_chunk)
# hp_loss = hessian_penalty(G=self.decode, z=np_z)
# print('Hessian Penalty:{}'.format(hp_loss))
# batch_loss = batch_mse + hp_loss + batch_kld
if (self.used_data == 'ae'):
batch_loss = batch_mse
# Additional logs go into tensorboard_logs
tensorboard_logs = {'train_loss': batch_loss,
'MSE-Train': batch_mse,
} #
return {'loss': batch_loss, 'log': tensorboard_logs}
def training_epoch_end(self, outputs):
print("Saving MCE before KLD is applied...")
return {}
# def validation_step(self, batch, batch_idx):
# return 0
def test_step(self, batch, batch_idx):
print('test step')
batch_mce = 0
test_loss = 0
# self.eval()
ts_batch_user_features = batch.view(-1, self.input_dimension)
recon_batch = self.forward(ts_batch_user_features)
self.ls_predicted_movies.extend((-recon_batch).argsort()[:, 0].tolist())
ls_z = self.encode(ts_batch_user_features).tolist()
self.np_z_test = np.append(self.np_z_test, np.asarray(ls_z),
axis=0) # TODO get rid of np_z_chunk and use np.asarray(mu_chunk)
batch_rmse_w_zeros, batch_mse_w_zeros, batch_rmse, batch_mse = self.calculate_batch_metrics(
recon_batch=recon_batch, ts_batch_user_features=ts_batch_user_features)
batch_mse = self.loss_function(recon_batch,
ts_batch_user_features,
self.beta,
self.unique_movies)
# normalizing reconstruction loss
batch_mse = batch_mse / len(ts_batch_user_features)
batch_loss = batch_mse
# to be rermoved mean_mce = { for single_mce in batch_mce}
loss = batch_loss.item() / len(ts_batch_user_features)
# bce = batch_bce/len(ts_batch_user_features)
tensorboard_logs = {
'MSE-test': batch_mse}
return {'test_loss': loss,
'rmse': batch_rmse,
'mse': batch_mse,
'rmse_w_zeros': batch_rmse_w_zeros,
'mse_w_zeros': batch_mse_w_zeros,
'log': tensorboard_logs,
'MSE-Test': batch_mse
}
# test_loss /= len(test_loader.dataset)
# print('====> Test set loss: {:.4f}'.format(test_loss))
def test_epoch_end(self, outputs):
# avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean()
avg_loss = np.array([x['test_loss'] for x in outputs]).mean()
mse_test = np.array([x['MSE-Test'] for x in outputs])
avg_rmse = np.array([x['rmse'] for x in outputs]).mean()
avg_rmse_w_zeros = np.array([x['rmse_w_zeros'] for x in outputs]).mean()
avg_mse = np.array([x['mse'] for x in outputs]).mean()
avg_mse_w_zeros = np.array([x['mse_w_zeros'] for x in outputs]).mean()
tensorboard_logs = {'test_loss': avg_loss, 'MSE-Test': mse_test}
wandb_logger.log_metrics({'rmse': avg_rmse,
'rmse_w_zeros': avg_rmse_w_zeros,
'mse': avg_mse,
'mse_w_zeros': avg_mse_w_zeros}) # , 'kld_matrix':self.kld_matrix
return {'test_loss': avg_loss, 'log': tensorboard_logs, 'rmse': avg_rmse, 'MSE-Test': mse_test
} # , , 'mce':avg_mce
# Reconstruction + KL divergence losses summed over all elements and batch
def loss_function(self, recon_x, x, beta, unique_movies):
try:
# MSE = F.binary_cross_entropy(recon_x, x, reduction='sum')# MSE is bad for this
MSE = F.mse_loss(x, recon_x, reduction='sum') # MSE is bad for this
self.KLD = [0]
if (np.isnan(np.sum(MSE.detach().numpy()))):
print('s')
except RuntimeError as e:
print('fo', e)
return MSE
def calculate_batch_metrics(self, recon_batch, ts_batch_user_features):
# Compute MSE
# TODO MOre generic ...
# mask = training_utils.generate_mask(ts_batch_user_features, tsls_yhat_user, user_based_items_filter=loss_user_items_only)
# tsls_yhat_user_filtered = tsls_yhat_user[~mask] # Predicted: Filter out unseen+unrecommended items
# ts_user_features_seen = ts_batch_user_features[~mask] # Ground Truth: Filter out unseen+unrecommended items
# TODO ...than this approach
batch_rmse = 0
batch_mse = 0
batch_rmse_wo_zeros = 0
batch_mse_wo_zeros = 0
ls_yhat_user = recon_batch * ts_batch_user_features # Set all items to zero that are of no interest and haven't been seen
for idx, tensor in enumerate(ls_yhat_user):
np_y = ts_batch_user_features[idx].data.numpy()
np_y_wo_zeros = np_y[np.nonzero(np_y)] # inner returns the index
np_yhat = tensor.data.numpy()
np_yhat_wo_zeros = np_yhat[np.nonzero(np_y)] # This must be np_y
rmse, mse = metric_utils.calculate_metrics(np_y, np_yhat)
batch_mse += mse
batch_rmse += rmse
if (len(np_yhat_wo_zeros) > 0):
rmse_wo_zeros, mse_wo_zeros = metric_utils.calculate_metrics(np_y_wo_zeros, np_yhat_wo_zeros)
batch_rmse_wo_zeros += rmse_wo_zeros
batch_mse_wo_zeros += mse_wo_zeros
# batch_rmse, batch_mse = utils.calculate_metrics(ts_batch_user_features,ls_yhat_user)
avg_rmse = batch_rmse / ls_yhat_user.shape[0]
avg_rmse_wo_zeros = batch_rmse_wo_zeros / ls_yhat_user.shape[0]
avg_mse = batch_mse / ls_yhat_user.shape[0]
avg_mse_wo_zeros = batch_mse_wo_zeros / ls_yhat_user.shape[0]
return avg_rmse, avg_mse, avg_rmse_wo_zeros, avg_mse_wo_zeros
def load_attributes_and_files(self, path): # 'filename.pickle'
with open(path, 'rb') as handle:
dct_attributes = pickle.load(handle)
self.np_z_train = dct_attributes['np_z_train']
self.np_logvar_train = dct_attributes['np_logvar_train']
self.np_mu_train = dct_attributes['np_mu_train']
self.train_y = dct_attributes['train_y']
self.test_y = dct_attributes['test_y']
self.ls_kld = dct_attributes['ls_kld']
# self.dct_attribute_distribution = utils.load_json_as_dict(
# 'attribute_distribution.json') # load relative frequency distributioon from dictionary (pickle it)
self.z_max_train = dct_attributes['z_max_train']
self.z_min_train = dct_attributes['z_min_train']
self.z_mean_train = dct_attributes['z_mean_train']
print('Attributes loaded')
def save_attributes(self, path):
dct_attributes = {'np_z_train': self.np_z_train,
'np_logvar_train': self.np_logvar_train,
'np_mu_train': self.np_mu_train,
'train_y': self.train_y,
'test_y': self.test_y,
'ls_kld': self.ls_kld,
'z_max_train': self.z_max_train,
'z_min_train': self.z_min_train,
'z_mean_train': self.z_mean_train}
with open(path, 'wb') as handle:
pickle.dump(dct_attributes, handle)
print('Attributes saved')
def generate_distribution_df():
dct_attribute_distribution = utils.compute_relative_frequency(
pd.read_csv('../data/generated/syn.csv'))
utils.save_dict_as_json(dct_attribute_distribution, 'syn_attribute_distribution.json')
if __name__ == '__main__':
#Architecture Parameters
torch.manual_seed(100)
args = run_utils.create_training_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # use gpu if available
settings.init()
# General Parameters
train = True
mixup = False
is_hessian_penalty_activated = False
base_path = 'results/models/vae/'
used_data = 'syn'
full_test_routine = False
#Synthetic Data Parameters
synthetic_data = True
expanded_user_item = False
ls_normalvariate = [False]
ls_continous = [True]
noise = False
no_generative_factors = 3
# used_data ='vae'
used_data = 'ae'
ls_epochs = [21] # -->7 #5,10,15,20,25,30,40,50,60,70,80,90,100,120,150,200,270,350,500
# Note: Mit steigender Epoche wird das disentanglement verstärkt
#
ls_latent_factors = [10]
beta_normalized = 10 / (20 * no_generative_factors)
ls_betas = [] # disentangle_factors .0003
for epoch in ls_epochs:
for normalvariate in ls_normalvariate:
for continous_data in ls_continous:
for lf in ls_latent_factors:
if (len(ls_betas) == 0):
if (expanded_user_item):
beta_normalized = lf / (800)
else:
beta_normalized = lf / (
20 * no_generative_factors) # lf/input_size, e.g. 2/10000 = 0.0002
ls_betas.append(beta_normalized)
for beta in ls_betas:
train_tag = "train"
if (not train):
train_tag = "test"
print(
"Processing model with: {} epochs, {} latent factors, {} beta".format(epoch, lf, beta))
# exp_name = "{}_beta_{}_epochs_{}_lf_synt_{}_normal_{}_continous_{}_hessian_{}_noise_{}".format(beta, epoch, lf, synthetic_data, normalvariate, continous_data, is_hessian_penalty_activated, noise)
exp_name = "ae-{}_beta_{}_epochs_{}_lf_synt_{}_normal_{}_continous_{}_hessian_{}".format(beta,
epoch,
lf,
synthetic_data,
normalvariate,
continous_data,
is_hessian_penalty_activated)
wandb_name = exp_name + "_" + train_tag
model_name = exp_name + ".ckpt"
attribute_name = exp_name + "_attributes.pickle"
model_path = base_path + model_name
attribute_path = base_path + attribute_name
experiment_path = utils.create_experiment_directory()
model_params = run_utils.create_model_params(experiment_path, epoch, lf, beta,
int(epoch / 100), expanded_user_item,
mixup,
no_generative_factors, epoch,
is_hessian_penalty_activated, used_data)
args.max_epochs = epoch
wandb_logger = WandbLogger(project='recommender-xai', tags=['vae', train_tag],
name=wandb_name)
trainer = pl.Trainer.from_argparse_args(args,
# limit_test_batches=0.1,
# precision =16,
logger=wandb_logger, # False
gradient_clip_val=0.5,
# accumulate_grad_batches=0,
gpus=0,
weights_summary='full',
checkpoint_callback=False,
callbacks=[ProgressBar(),
EarlyStopping(monitor='train_loss')]
)
if (train):
print(
'<---------------------------------- VAE Training ---------------------------------->')
print("Running with the following configuration: \n{}".format(args))
if (synthetic_data):
model_params['synthetic_data'], model_params[
'syn_y'] = data_utils.create_synthetic_data(no_generative_factors,
experiment_path,
expanded_user_item,
continous_data,
normalvariate,
noise)
generate_distribution_df()
model = VAE(model_params)
wandb_logger.watch(model, log='gradients', log_freq=100)
# utils.print_nn_summary(model, size =200)
print('------ Start Training ------')
trainer.fit(model)
kld_matrix = model.KLD
print('------ Saving model ------')
trainer.save_checkpoint(model_path)
model.save_attributes(attribute_path)
print('------ Load model -------')
test_model = VAE.load_from_checkpoint(
model_path) # , load_saved_attributes=True, saved_attributes_path='attributes.pickle'
# test_model.test_size = model_params['test_size']
test_model.load_attributes_and_files(attribute_path)
test_model.experiment_path_test = experiment_path
# print("show np_z_train mean:{}, min:{}, max:{}".format(z_mean_train, z_min_train, z_max_train ))
print('------ Start Test ------')
start = time.time()
dct_param = {'epochs': epoch, 'lf': lf, 'beta': beta, 'normal': normalvariate,
'continous': continous_data, 'hessian': is_hessian_penalty_activated,
'noise': noise}
# plot_utils.plot_samples(test_model, experiment_path, dct_param)
# z = torch.randn(1, test_model.no_latent_factors)
#
#
# Here we create a figure instance, and two subplots
latent_space_utils.traverse(test_model, experiment_path, dct_param)
trainer.test(test_model) # The test loop will not be used until you call.
print('Test time in seconds: {}'.format(time.time() - start))
# print('% altering has provided information gain:{}'.format( int(settings.ig_m_hat_cnt)/(int(settings.ig_m_cnt)+int(settings.ig_m_hat_cnt) )))
# print(results)
disentangle_utils.run_disentanglement_eval(test_model, experiment_path, dct_param)
plot_utils.plot_results(test_model,
test_model.experiment_path_test,
test_model.experiment_path_train,
dct_param)
artifact = wandb.Artifact('Plots', type='result')
artifact.add_dir(experiment_path) # , name='images'
wandb_logger.experiment.log_artifact(artifact)
working_directory = os.path.abspath(os.getcwd())
absolute_path = working_directory + "/" + experiment_path + "images/"
ls_path_images = [absolute_path + file_name for file_name in os.listdir(absolute_path)]
# wandb.log({"images": [wandb.Image(plt.imread(img_path)) for img_path in ls_path_images]})
dct_images = {
img_path.split(sep='_')[2].split(sep='/')[-1]: wandb.Image(plt.imread(img_path)) for
img_path in ls_path_images}
wandb.log(dct_images)
print('Test done')
exit()
| 17,015 | 9 | 503 |
dcd8bc8d995ff6a1bcabbed355c5cd615e0ffd11 | 791 | py | Python | src/guid/settings.py | nwcell/guid_tracker | b8599d53748554870875b9477cd6b229c88915ac | [
"MIT"
] | 3 | 2019-06-04T01:36:17.000Z | 2021-05-16T18:52:21.000Z | src/guid/settings.py | nwcell/guid_tracker | b8599d53748554870875b9477cd6b229c88915ac | [
"MIT"
] | null | null | null | src/guid/settings.py | nwcell/guid_tracker | b8599d53748554870875b9477cd6b229c88915ac | [
"MIT"
] | null | null | null | """Gathers environment settings and loads them into global attributes."""
from starlette.config import Config
from starlette.datastructures import CommaSeparatedStrings, Secret
config = Config('.env')
# Main Configs
DEBUG = config('DEBUG', cast=bool, default=False)
TESTING = config('TESTING', cast=bool, default=False)
SECRET_KEY = config('SECRET_KEY', cast=Secret)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=CommaSeparatedStrings)
# Redis
REDIS_ENDPOINT = config('REDIS_ENDPOINT', default='127.0.0.1')
REDIS_PORT = config('REDIS_PORT', default=6379, cast=int)
REDIS_DB = config('REDIS_DB', default=0, cast=int)
REDIS_PASSWORD = config('REDIS_PASSWORD', default=None, cast=Secret)
# DB
DATABASE_URL = config('DATABASE_URL')
# Testing
TEST_DATABASE_URL = config('TEST_DATABASE_URL')
| 31.64 | 73 | 0.77244 | """Gathers environment settings and loads them into global attributes."""
from starlette.config import Config
from starlette.datastructures import CommaSeparatedStrings, Secret
config = Config('.env')
# Main Configs
DEBUG = config('DEBUG', cast=bool, default=False)
TESTING = config('TESTING', cast=bool, default=False)
SECRET_KEY = config('SECRET_KEY', cast=Secret)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=CommaSeparatedStrings)
# Redis
REDIS_ENDPOINT = config('REDIS_ENDPOINT', default='127.0.0.1')
REDIS_PORT = config('REDIS_PORT', default=6379, cast=int)
REDIS_DB = config('REDIS_DB', default=0, cast=int)
REDIS_PASSWORD = config('REDIS_PASSWORD', default=None, cast=Secret)
# DB
DATABASE_URL = config('DATABASE_URL')
# Testing
TEST_DATABASE_URL = config('TEST_DATABASE_URL')
| 0 | 0 | 0 |
ed96143a07b46d6b3d29a8deca9956452da96eac | 3,896 | py | Python | specklepy/objects/encoding.py | mortenengen/speckle-py | f7ae62ade25a2eff4005cf34db57f63076a134e8 | [
"Apache-2.0"
] | 26 | 2020-12-01T10:00:13.000Z | 2021-08-04T02:12:32.000Z | specklepy/objects/encoding.py | mortenengen/speckle-py | f7ae62ade25a2eff4005cf34db57f63076a134e8 | [
"Apache-2.0"
] | 51 | 2021-08-06T15:54:54.000Z | 2022-03-24T10:36:30.000Z | specklepy/objects/encoding.py | mortenengen/speckle-py | f7ae62ade25a2eff4005cf34db57f63076a134e8 | [
"Apache-2.0"
] | 7 | 2020-12-22T15:37:17.000Z | 2021-07-29T14:44:09.000Z | from enum import Enum
from typing import Any, Callable, List, Type
from specklepy.logging.exceptions import SpeckleException
from specklepy.objects.base import Base
| 28.437956 | 78 | 0.597536 | from enum import Enum
from typing import Any, Callable, List, Type
from specklepy.logging.exceptions import SpeckleException
from specklepy.objects.base import Base
class CurveTypeEncoding(int, Enum):
Arc = 0
Circle = 1
Curve = 2
Ellipse = 3
Line = 4
Polyline = 5
Polycurve = 6
@property
def object_class(self) -> Type:
from . import geometry
if self == self.Arc:
return geometry.Arc
elif self == self.Circle:
return geometry.Circle
elif self == self.Curve:
return geometry.Curve
elif self == self.Ellipse:
return geometry.Ellipse
elif self == self.Line:
return geometry.Line
elif self == self.Polyline:
return geometry.Polyline
elif self == self.Polycurve:
return geometry.Polycurve
raise SpeckleException(
f"No corresponding object class for CurveTypeEncoding: {self}"
)
def curve_from_list(args: List[float]):
curve_type = CurveTypeEncoding(args[0])
return curve_type.object_class.from_list(args)
class ObjectArray:
def __init__(self) -> None:
self.data = []
@classmethod
def from_objects(cls, objects: List[Base]) -> "ObjectArray":
data_list = cls()
if not objects:
return data_list
speckle_type = objects[0].speckle_type
for obj in objects:
if speckle_type != obj.speckle_type:
raise SpeckleException(
"All objects in chunk should have the same speckle_type. "
f"Found {speckle_type} and {obj.speckle_type}"
)
data_list.encode_object(object=obj)
return data_list
@staticmethod
def decode_data(
data: List[Any], decoder: Callable[[List[Any]], Base]
) -> List[Base]:
bases = []
if not data:
return bases
index = 0
while index < len(data):
item_length = data[index]
item_start = index + 1
item_end = item_start + item_length
item_data = data[item_start:item_end]
index = item_end
# TODO: investigate what's going on w this fail
try:
decoded_data = decoder(item_data)
bases.append(decoded_data)
except ValueError:
continue
return bases
def decode(self, decoder: Callable[[List[Any]], Any]):
return self.decode_data(data=self.data, decoder=decoder)
def encode_object(self, object: Base):
encoded = object.to_list()
encoded.insert(0, len(encoded))
self.data.extend(encoded)
class CurveArray(ObjectArray):
@classmethod
def from_curve(cls, curve: Base) -> "CurveArray":
crv_array = cls()
crv_array.data = curve.to_list()
return crv_array
@classmethod
def from_curves(cls, curves: List[Base]) -> "CurveArray":
data = []
for curve in curves:
curve_list = curve.to_list()
curve_list.insert(0, len(curve_list))
data.extend(curve_list)
crv_array = cls()
crv_array.data = data
return crv_array
@staticmethod
def curve_from_list(args: List[float]) -> Base:
curve_type = CurveTypeEncoding(args[0])
return curve_type.object_class.from_list(args)
@property
def type(self) -> CurveTypeEncoding:
return CurveTypeEncoding(self.data[0])
def to_curve(self) -> Base:
return self.type.object_class.from_list(self.data)
@classmethod
def _curve_decoder(cls, data: List[float]) -> Base:
crv_array = cls()
crv_array.data = data
return crv_array.to_curve()
def to_curves(self) -> List[Base]:
return self.decode(decoder=self._curve_decoder)
| 3,028 | 606 | 92 |
82fd730d2cc3c81e960e4539984731b1faf8fe70 | 2,492 | py | Python | src/c_dataValidation/dataCrossValid.py | JacobSal/Generalized-Sklearn-ML-Pipeline | fafb9e577445c0b33afe4e4fd69fd60d28fb3eb2 | [
"MIT"
] | null | null | null | src/c_dataValidation/dataCrossValid.py | JacobSal/Generalized-Sklearn-ML-Pipeline | fafb9e577445c0b33afe4e4fd69fd60d28fb3eb2 | [
"MIT"
] | null | null | null | src/c_dataValidation/dataCrossValid.py | JacobSal/Generalized-Sklearn-ML-Pipeline | fafb9e577445c0b33afe4e4fd69fd60d28fb3eb2 | [
"MIT"
] | 1 | 2021-10-11T21:48:45.000Z | 2021-10-11T21:48:45.000Z | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 13 19:02:19 2021
@author: Jacob Salminen
@version: 1.0.20
"""
#%% IMPORTS
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
import time
import multiprocessing as mp
import numpy as np
from os.path import dirname, join, abspath
from datetime import date
from sklearn.model_selection import train_test_split
from localPkg.datmgmt import DataManager
#%% PATHS
print("Number of processors: ", mp.cpu_count())
# Path to file
cfpath = dirname(__file__)
# Path to images to be processed
folderName = abspath(join(cfpath,"..","a_dataGeneration","rawData"))
# Path to save bin : saves basic information
saveBin = join(cfpath,"saveBin")
# Path to training files
trainDatDir = abspath(join(cfpath,"..","b_dataAggregation","processedData","EL-11122021"))
# Path to Aggregate data
aggDatDir = abspath(join(cfpath,"..", "b_dataAggregation","aggregateData"))
#%% Script Params
# PARMS
channel = 2
ff_width = 121
wiener_size = (5,5)
med_size = 10
start = 0
count = 42
dTime = date.today().strftime('%d%m%Y')
#%% Load Data
print('Loading Data...')
tmpLoadDir = join(aggDatDir, 'train-data-ALL.pkl') #join(aggDatDir, ('joined_data_'+dTime+'.pkl'))
tmpDat = DataManager.load_obj(tmpLoadDir)
X = tmpDat[0]
y = tmpDat[1]
# del tmpDat
#%% BASIC PADDING
# print('Padding Data...')
# X = ProcessPipe.padPreProcessed(X)
#%% Train-Test Split
print('Splitting Data...')
#stack X and y
X = np.vstack(X)
y = np.vstack(y)
#Typing for memory constraints
X = np.float64(X)
# y = np.int16(y)
#adding in some refence numbers for later
# idx = np.array([[i for i in range(0,len(y))]]).T
# y = np.hstack((y,idx))
#split dataset
X_train, X_test, y_train, y_test = train_test_split(X,y,
test_size=0.3,
shuffle=True,
random_state=count)
ind_train = y_train[:,1]
ind_test = y_test[:,1]
y_train = y_train[:,0]
y_test = y_test[:,0]
# Print train-test characteristics
print(' '+"Training Data (N): " + str(len(y_train)))
print(' '+"Testing Data (N): " + str(len(y_test)))
print(' '+"y_train: " + str(np.unique(y_train)))
print(' '+"y_test: " + str(np.unique(y_test)))
tmpDat = [X_train,X_test,y_train,y_test,ind_train,ind_test]
tmpSaveDir = join(saveBin, ('CVjoined_data_'+dTime+'.pkl'))
DataManager.save_obj(tmpSaveDir,tmpDat)
| 30.024096 | 98 | 0.660514 | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 13 19:02:19 2021
@author: Jacob Salminen
@version: 1.0.20
"""
#%% IMPORTS
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
import time
import multiprocessing as mp
import numpy as np
from os.path import dirname, join, abspath
from datetime import date
from sklearn.model_selection import train_test_split
from localPkg.datmgmt import DataManager
#%% PATHS
print("Number of processors: ", mp.cpu_count())
# Path to file
cfpath = dirname(__file__)
# Path to images to be processed
folderName = abspath(join(cfpath,"..","a_dataGeneration","rawData"))
# Path to save bin : saves basic information
saveBin = join(cfpath,"saveBin")
# Path to training files
trainDatDir = abspath(join(cfpath,"..","b_dataAggregation","processedData","EL-11122021"))
# Path to Aggregate data
aggDatDir = abspath(join(cfpath,"..", "b_dataAggregation","aggregateData"))
#%% Script Params
# PARMS
channel = 2
ff_width = 121
wiener_size = (5,5)
med_size = 10
start = 0
count = 42
dTime = date.today().strftime('%d%m%Y')
#%% Load Data
print('Loading Data...')
tmpLoadDir = join(aggDatDir, 'train-data-ALL.pkl') #join(aggDatDir, ('joined_data_'+dTime+'.pkl'))
tmpDat = DataManager.load_obj(tmpLoadDir)
X = tmpDat[0]
y = tmpDat[1]
# del tmpDat
#%% BASIC PADDING
# print('Padding Data...')
# X = ProcessPipe.padPreProcessed(X)
#%% Train-Test Split
print('Splitting Data...')
#stack X and y
X = np.vstack(X)
y = np.vstack(y)
#Typing for memory constraints
X = np.float64(X)
# y = np.int16(y)
#adding in some refence numbers for later
# idx = np.array([[i for i in range(0,len(y))]]).T
# y = np.hstack((y,idx))
#split dataset
X_train, X_test, y_train, y_test = train_test_split(X,y,
test_size=0.3,
shuffle=True,
random_state=count)
ind_train = y_train[:,1]
ind_test = y_test[:,1]
y_train = y_train[:,0]
y_test = y_test[:,0]
# Print train-test characteristics
print(' '+"Training Data (N): " + str(len(y_train)))
print(' '+"Testing Data (N): " + str(len(y_test)))
print(' '+"y_train: " + str(np.unique(y_train)))
print(' '+"y_test: " + str(np.unique(y_test)))
tmpDat = [X_train,X_test,y_train,y_test,ind_train,ind_test]
tmpSaveDir = join(saveBin, ('CVjoined_data_'+dTime+'.pkl'))
DataManager.save_obj(tmpSaveDir,tmpDat)
| 0 | 0 | 0 |
5cd4e25d6dd10ebe3a0b932449919e7e3b4bc73d | 6,571 | py | Python | filters/crop.py | voxie-viewer/voxie | d2b5e6760519782e9ef2e51f5322a3baa0cb1198 | [
"MIT"
] | 4 | 2016-06-03T18:41:43.000Z | 2020-04-17T20:28:58.000Z | filters/crop.py | voxie-viewer/voxie | d2b5e6760519782e9ef2e51f5322a3baa0cb1198 | [
"MIT"
] | null | null | null | filters/crop.py | voxie-viewer/voxie | d2b5e6760519782e9ef2e51f5322a3baa0cb1198 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
#
# Copyright (c) 2014-2022 The Voxie Authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import numpy as np
import voxie
import dbus
import sys
args = voxie.parser.parse_args()
context = voxie.VoxieContext(args)
instance = context.createInstance()
if args.voxie_action != 'RunFilter':
raise Exception('Invalid operation: ' + args.voxie_action)
with context.makeObject(context.bus, context.busName, args.voxie_operation, ['de.uni_stuttgart.Voxie.ExternalOperationRunFilter']).ClaimOperationAndCatch() as op:
inputData = op.GetInputData('de.uni_stuttgart.Voxie.Input').CastTo(
'de.uni_stuttgart.Voxie.VolumeData')
inputProperties = op.ParametersCached[op.Properties['de.uni_stuttgart.Voxie.Input'].getValue(
'o')]['Properties'].getValue('a{sv}')
bbData = op.GetInputData('de.uni_stuttgart.Voxie.BoundingBoxData').CastTo(
'de.uni_stuttgart.Voxie.GeometricPrimitiveData')
outputPath = op.Properties['de.uni_stuttgart.Voxie.Output'].getValue('o')
setMissingDataToNaN = op.Properties['de.uni_stuttgart.Voxie.Filter.Crop.SetMissingDataToNaN'].getValue(
'b')
sizeRoundingMode = op.Properties['de.uni_stuttgart.Voxie.SizeRoundingMode'].getValue(
's')
if sizeRoundingMode == 'de.uni_stuttgart.Voxie.SizeRoundingMode.Floor':
sizeRounding = np.floor
elif sizeRoundingMode == 'de.uni_stuttgart.Voxie.SizeRoundingMode.Round':
sizeRounding = np.round
elif sizeRoundingMode == 'de.uni_stuttgart.Voxie.SizeRoundingMode.Ceil':
sizeRounding = np.ceil
else:
raise Exception('Unknown SizeRoundingMode: ' + repr(sizeRoundingMode))
inputDataVoxel = inputData.CastTo('de.uni_stuttgart.Voxie.VolumeDataVoxel')
# TODO: Use this (and probably set it on the output)
# translationVolume = np.array(inputProperties["de.uni_stuttgart.Voxie.MovableDataNode.Translation"].getValue("(ddd)"))
# rotationVolume = voxie.Rotation (inputProperties["de.uni_stuttgart.Voxie.MovableDataNode.Rotation"].getValue("(dddd)"))
# TODO: Move bounding box code somewhere else
pointType = instance.Components.GetComponent(
'de.uni_stuttgart.Voxie.ComponentType.GeometricPrimitiveType', 'de.uni_stuttgart.Voxie.GeometricPrimitive.Point').CastTo('de.uni_stuttgart.Voxie.GeometricPrimitiveType')
points = []
for primitive in bbData.GetPrimitives(0, 2**64 - 1):
ptype = primitive[1]
primitiveValues = primitive[3]
if ptype != pointType._objectPath:
print('Warning: Unknown primitive:', ptype, file=sys.stderr)
continue
position = primitiveValues['Position'].getValue('(ddd)')
points.append(np.array(position))
# print(points)
posmin = posmax = None
if len(points) == 0:
raise Exception('Got a bounding box input but no points in it')
for cpos in points:
if posmin is None:
posmin = cpos
if posmax is None:
posmax = cpos
posmin = np.minimum(posmin, cpos)
posmax = np.maximum(posmax, cpos)
# print (posmin)
# print (posmax)
origin = inputData.VolumeOrigin
sizeOrig = np.int64(inputDataVoxel.ArrayShape)
voxelSize = np.array(inputDataVoxel.GridSpacing)
# print (origin, sizeOrig, spacingOrig)
# Position of new volume relative to old volume, in voxels
posminVoxel = -np.int64(sizeRounding(-(posmin - origin) / voxelSize))
posmaxVoxel = np.int64(sizeRounding((posmax - origin) / voxelSize))
sizeOutput = posmaxVoxel - posminVoxel
# print (voxelSize, sizeOutput)
newOrigin = posminVoxel * voxelSize + origin
with instance.CreateVolumeDataVoxel(sizeOutput, inputData.DataType, newOrigin, voxelSize) as data:
with data.CreateUpdate() as update, data.GetBufferWritable(update) as outputBuffer:
# TODO: do this with better performance?
zCount = data[:].shape[2]
for z in range(0, zCount):
op.ThrowIfCancelled()
if setMissingDataToNaN:
outputBuffer.array[:, :, z] = np.nan
else:
outputBuffer.array[:, :, z] = 0
op.SetProgress((z + 1) / zCount / 2)
xMinOld = np.clip(posminVoxel[0], 0, sizeOrig[0])
xMaxOld = np.clip(
posminVoxel[0] + data[:].shape[0], 0, sizeOrig[0])
yMinOld = np.clip(posminVoxel[1], 0, sizeOrig[1])
yMaxOld = np.clip(
posminVoxel[1] + data[:].shape[1], 0, sizeOrig[1])
xMinNew = xMinOld - posminVoxel[0]
xMaxNew = xMaxOld - posminVoxel[0]
yMinNew = yMinOld - posminVoxel[1]
yMaxNew = yMaxOld - posminVoxel[1]
for z in range(0, zCount):
op.ThrowIfCancelled()
zOld = z + posminVoxel[2]
if zOld < 0 or zOld >= sizeOrig[2]:
continue
# print (xMinOld, xMaxOld, yMinOld, yMaxOld, zOld, posminVoxel, posmaxVoxel, sizeOrig)
outputBuffer.array[xMinNew:xMaxNew, yMinNew:yMaxNew,
z] = inputDataVoxel[xMinOld:xMaxOld, yMinOld:yMaxOld, zOld]
op.SetProgress((z + 1) / zCount / 2 + 0.5)
version = update.Finish()
result = {}
result[outputPath] = {
'Data': voxie.Variant('o', data._objectPath),
'DataVersion': voxie.Variant('o', version._objectPath),
}
op.Finish(result)
version._referenceCountingObject.destroy()
| 45.006849 | 177 | 0.66687 | #!/usr/bin/python3
#
# Copyright (c) 2014-2022 The Voxie Authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import numpy as np
import voxie
import dbus
import sys
args = voxie.parser.parse_args()
context = voxie.VoxieContext(args)
instance = context.createInstance()
if args.voxie_action != 'RunFilter':
raise Exception('Invalid operation: ' + args.voxie_action)
with context.makeObject(context.bus, context.busName, args.voxie_operation, ['de.uni_stuttgart.Voxie.ExternalOperationRunFilter']).ClaimOperationAndCatch() as op:
inputData = op.GetInputData('de.uni_stuttgart.Voxie.Input').CastTo(
'de.uni_stuttgart.Voxie.VolumeData')
inputProperties = op.ParametersCached[op.Properties['de.uni_stuttgart.Voxie.Input'].getValue(
'o')]['Properties'].getValue('a{sv}')
bbData = op.GetInputData('de.uni_stuttgart.Voxie.BoundingBoxData').CastTo(
'de.uni_stuttgart.Voxie.GeometricPrimitiveData')
outputPath = op.Properties['de.uni_stuttgart.Voxie.Output'].getValue('o')
setMissingDataToNaN = op.Properties['de.uni_stuttgart.Voxie.Filter.Crop.SetMissingDataToNaN'].getValue(
'b')
sizeRoundingMode = op.Properties['de.uni_stuttgart.Voxie.SizeRoundingMode'].getValue(
's')
if sizeRoundingMode == 'de.uni_stuttgart.Voxie.SizeRoundingMode.Floor':
sizeRounding = np.floor
elif sizeRoundingMode == 'de.uni_stuttgart.Voxie.SizeRoundingMode.Round':
sizeRounding = np.round
elif sizeRoundingMode == 'de.uni_stuttgart.Voxie.SizeRoundingMode.Ceil':
sizeRounding = np.ceil
else:
raise Exception('Unknown SizeRoundingMode: ' + repr(sizeRoundingMode))
inputDataVoxel = inputData.CastTo('de.uni_stuttgart.Voxie.VolumeDataVoxel')
# TODO: Use this (and probably set it on the output)
# translationVolume = np.array(inputProperties["de.uni_stuttgart.Voxie.MovableDataNode.Translation"].getValue("(ddd)"))
# rotationVolume = voxie.Rotation (inputProperties["de.uni_stuttgart.Voxie.MovableDataNode.Rotation"].getValue("(dddd)"))
# TODO: Move bounding box code somewhere else
pointType = instance.Components.GetComponent(
'de.uni_stuttgart.Voxie.ComponentType.GeometricPrimitiveType', 'de.uni_stuttgart.Voxie.GeometricPrimitive.Point').CastTo('de.uni_stuttgart.Voxie.GeometricPrimitiveType')
points = []
for primitive in bbData.GetPrimitives(0, 2**64 - 1):
ptype = primitive[1]
primitiveValues = primitive[3]
if ptype != pointType._objectPath:
print('Warning: Unknown primitive:', ptype, file=sys.stderr)
continue
position = primitiveValues['Position'].getValue('(ddd)')
points.append(np.array(position))
# print(points)
posmin = posmax = None
if len(points) == 0:
raise Exception('Got a bounding box input but no points in it')
for cpos in points:
if posmin is None:
posmin = cpos
if posmax is None:
posmax = cpos
posmin = np.minimum(posmin, cpos)
posmax = np.maximum(posmax, cpos)
# print (posmin)
# print (posmax)
origin = inputData.VolumeOrigin
sizeOrig = np.int64(inputDataVoxel.ArrayShape)
voxelSize = np.array(inputDataVoxel.GridSpacing)
# print (origin, sizeOrig, spacingOrig)
# Position of new volume relative to old volume, in voxels
posminVoxel = -np.int64(sizeRounding(-(posmin - origin) / voxelSize))
posmaxVoxel = np.int64(sizeRounding((posmax - origin) / voxelSize))
sizeOutput = posmaxVoxel - posminVoxel
# print (voxelSize, sizeOutput)
newOrigin = posminVoxel * voxelSize + origin
with instance.CreateVolumeDataVoxel(sizeOutput, inputData.DataType, newOrigin, voxelSize) as data:
with data.CreateUpdate() as update, data.GetBufferWritable(update) as outputBuffer:
# TODO: do this with better performance?
zCount = data[:].shape[2]
for z in range(0, zCount):
op.ThrowIfCancelled()
if setMissingDataToNaN:
outputBuffer.array[:, :, z] = np.nan
else:
outputBuffer.array[:, :, z] = 0
op.SetProgress((z + 1) / zCount / 2)
xMinOld = np.clip(posminVoxel[0], 0, sizeOrig[0])
xMaxOld = np.clip(
posminVoxel[0] + data[:].shape[0], 0, sizeOrig[0])
yMinOld = np.clip(posminVoxel[1], 0, sizeOrig[1])
yMaxOld = np.clip(
posminVoxel[1] + data[:].shape[1], 0, sizeOrig[1])
xMinNew = xMinOld - posminVoxel[0]
xMaxNew = xMaxOld - posminVoxel[0]
yMinNew = yMinOld - posminVoxel[1]
yMaxNew = yMaxOld - posminVoxel[1]
for z in range(0, zCount):
op.ThrowIfCancelled()
zOld = z + posminVoxel[2]
if zOld < 0 or zOld >= sizeOrig[2]:
continue
# print (xMinOld, xMaxOld, yMinOld, yMaxOld, zOld, posminVoxel, posmaxVoxel, sizeOrig)
outputBuffer.array[xMinNew:xMaxNew, yMinNew:yMaxNew,
z] = inputDataVoxel[xMinOld:xMaxOld, yMinOld:yMaxOld, zOld]
op.SetProgress((z + 1) / zCount / 2 + 0.5)
version = update.Finish()
result = {}
result[outputPath] = {
'Data': voxie.Variant('o', data._objectPath),
'DataVersion': voxie.Variant('o', version._objectPath),
}
op.Finish(result)
version._referenceCountingObject.destroy()
| 0 | 0 | 0 |
7e68bfb1f8a41591e36ec42e6b35cd48e95152fc | 15,917 | py | Python | buscador-ui.py | Maes95/BuscadorGitHubRepos | a8e0b7cae3b17d2e56f407c96bc074198f246a73 | [
"Apache-2.0"
] | null | null | null | buscador-ui.py | Maes95/BuscadorGitHubRepos | a8e0b7cae3b17d2e56f407c96bc074198f246a73 | [
"Apache-2.0"
] | null | null | null | buscador-ui.py | Maes95/BuscadorGitHubRepos | a8e0b7cae3b17d2e56f407c96bc074198f246a73 | [
"Apache-2.0"
] | 1 | 2021-05-23T15:24:34.000Z | 2021-05-23T15:24:34.000Z | import main
import configuracion as conf
import filtrosQuery as fq
import tkinter as tk
from tkinter import ttk
from tkinter import font
from tkinter import messagebox
from PIL import Image
from PIL import ImageTk
import pruebas
import repoBD
import executeQuery
import datetime
app = tk.Tk()
app.title("BuscadorGitHubRepos")
width = '650'
height = '700'
app.geometry(width + 'x' + height)
app.resizable(False, False)
nb = ttk.Notebook(app)
nb.pack(fill='both', expand='yes')
backgroudLblColor = "gray92"
p1 = ttk.Frame(nb)
p2 = ttk.Frame(nb)
p3 = ttk.Frame(nb)
# STATE (Credenciales)
user_state = tk.StringVar()
token_state = tk.StringVar()
# STATE (Filtros Query)
lenguaje_state = tk.StringVar()
stars_state = tk.StringVar()
forks_state = tk.StringVar()
created_state = tk.StringVar()
pushed_state = tk.StringVar()
archivedCheck_state = tk.BooleanVar()
publicCheck_state = tk.BooleanVar()
sizeLimit_state = tk.IntVar()
# STATE (Variables de configuración)
nRandomRepos_state = tk.IntVar()
nLapseRepos_state = tk.IntVar()
actualizarBDCheck_state = tk.BooleanVar()
buscarEnLocalCheck_state = tk.BooleanVar()
generarListaReposCheck_state = tk.BooleanVar()
randomizarReposCheck_state = tk.BooleanVar()
clonarReposCheck_state = tk.BooleanVar()
doExcelCheck_state = tk.BooleanVar()
doCsvCheck_state = tk.BooleanVar()
escribirEnLogCheck_state = tk.BooleanVar()
scriptLapseExeCheck_state = tk.BooleanVar()
# STATE (Base de datos)
nombreRepoBD_state = tk.StringVar()
organizacionBD_state = tk.StringVar()
lenguajeBD_state = tk.StringVar()
commitIdBD_state = tk.StringVar()
sizeBD_state = tk.IntVar()
boE2eCheck_state = tk.BooleanVar()
# STATE (Pruebas)
organizacion_state = tk.StringVar()
nombreRepo_state = tk.StringVar()
# PESTAÑA 1
row = 0
# LOGO URJC
logoUrjcWidth = 120
logoUrjcHeight = 60
img = Image.open("imgs/logo_urjc2.png")
img = img.resize((logoUrjcWidth,logoUrjcHeight), Image.ANTIALIAS)
photoImg = ImageTk.PhotoImage(img)
widget = tk.Label(p1, image=photoImg, bg=backgroudLblColor)
widget.grid(column=0, row=row)
titleAppLbl = tk.Label(p1, text="BuscadorGitHubRepos", font=('Helvetica', 18, 'bold'), bg=backgroudLblColor)
titleAppLbl.grid(column=1, row=row)
f = font.Font(titleAppLbl, titleAppLbl.cget("font"))
f.configure(underline=True)
titleAppLbl.configure(font=f)
row+=1
# CREDENCIALES
credencialesLbl = tk.Label(p1, text="CREDENCIALES", bg=backgroudLblColor)
credencialesLbl.grid(column=0, row=row)
f = font.Font(credencialesLbl, credencialesLbl.cget("font"))
f.configure(underline=True)
credencialesLbl.configure(font=f)
row+=1
userLbl = tk.Label(p1, text="Usuario: ", bg=backgroudLblColor)
userLbl.grid(column=0, row=row)
user_state.set(conf.config.user)
user = tk.Entry(p1,width=15, textvariable=user_state)
user.grid(column=1, row=row)
row+=1
tokenLbl = tk.Label(p1, text="Token: ", bg=backgroudLblColor)
tokenLbl.grid(column=0, row=row)
token_state.set(conf.config.token)
token = tk.Entry(p1,width=34, textvariable=token_state)
token.grid(column=1, row=row)
row+=1
# FILTROS QUERY
filtrosQueryLbl = tk.Label(p1, text="FILTROS QUERY", bg=backgroudLblColor)
filtrosQueryLbl.grid(column=0, row=row)
f = font.Font(filtrosQueryLbl, filtrosQueryLbl.cget("font"))
f.configure(underline=True)
filtrosQueryLbl.configure(font=f)
row+=1
# LENGUAJE
lenguajeLbl = tk.Label(p1, text="Lenguaje: ", bg=backgroudLblColor)
lenguajeLbl.grid(column=0, row=row)
lenguaje_state.set(fq.filtrosQuery.language)
lenguaje = tk.Entry(p1, width=15, textvariable=lenguaje_state)
lenguaje.grid(column=1, row=row)
row+=1
# STARS
starsLbl = tk.Label(p1, text="Stars: ", bg=backgroudLblColor)
starsLbl.grid(column=0, row=row)
stars_state.set(fq.filtrosQuery.stars)
stars = tk.Entry(p1, width=15, textvariable=stars_state)
stars.grid(column=1, row=row)
row+=1
# FORKS
forksLbl = tk.Label(p1, text="Forks: ", bg=backgroudLblColor)
forksLbl.grid(column=0, row=row)
forks_state.set(fq.filtrosQuery.forks)
forks = tk.Entry(p1, width=15, textvariable=forks_state)
forks.grid(column=1, row=row)
row+=1
# CREATED
createdLbl = tk.Label(p1, text="Created: ", bg=backgroudLblColor)
createdLbl.grid(column=0, row=row)
created_state.set(fq.filtrosQuery.created)
created = tk.Entry(p1, width=15, textvariable=created_state)
created.grid(column=1, row=row)
row+=1
# PUSHED
pushedLbl = tk.Label(p1, text="Pushed: ", bg=backgroudLblColor)
pushedLbl.grid(column=0, row=row)
pushed_state.set(fq.filtrosQuery.pushed)
pushed = tk.Entry(p1, width=15, textvariable=pushed_state)
pushed.grid(column=1, row=row)
row+=1
# ARCHIVED
archivedLbl = tk.Label(p1, text="Archived", bg=backgroudLblColor)
archivedLbl.grid(column=0, row=row)
archivedCheck_state.set(False)
archivedCheck = tk.Checkbutton(p1, var=archivedCheck_state, bg=backgroudLblColor)
archivedCheck.grid(column=1, row=row)
archivedCheck.config(state=tk.DISABLED)
row+=1
# PUBLIC
publicLbl = tk.Label(p1, text="Public", bg=backgroudLblColor)
publicLbl.grid(column=0, row=row)
publicCheck_state.set(True)
publicCheck = tk.Checkbutton(p1, var=publicCheck_state, bg=backgroudLblColor)
publicCheck.grid(column=1, row=row)
publicCheck.config(state=tk.DISABLED)
row+=1
# SIZE LIMIT
sizeLimitLbl = tk.Label(p1, text="Size Limit (kilobytes): ", bg=backgroudLblColor)
sizeLimitLbl.grid(column=0, row=row)
sizeLimit_state.set(conf.config.REPO_SIZE_LIMIT)
sizeLimit = tk.Entry(p1, width=7, textvariable=sizeLimit_state)
sizeLimit.grid(column=1, row=row)
sizeLimit.config(state=tk.DISABLED)
row+=1
# VARIABLES DE CONFIGURACIÓN
configuracionLbl = tk.Label(p1, text="VARIABLES DE CONFIGURACIÓN", bg=backgroudLblColor)
configuracionLbl.grid(column=0, row=row)
f = font.Font(configuracionLbl, configuracionLbl.cget("font"))
f.configure(underline=True)
configuracionLbl.configure(font=f)
row+=1
# ACTUALIZAR BD
actualizarBDLbl = tk.Label(p1, text="Actualizar BD", bg=backgroudLblColor)
actualizarBDLbl.grid(column=0, row=row)
actualizarBDCheck_state.set(conf.config.actualizarBD)
actualizarBDCheck = tk.Checkbutton(p1, var=actualizarBDCheck_state, bg=backgroudLblColor)
actualizarBDCheck.grid(column=1, row=row)
row+=1
# BUSCAR REPOS EN LOCAL
buscarEnLocalReposLbl = tk.Label(p1, text="Buscar repos en LOCAL", bg=backgroudLblColor)
buscarEnLocalReposLbl.grid(column=0, row=row)
buscarEnLocalCheck_state.set(conf.config.buscarEnLocal)
buscarEnLocalCheck = tk.Checkbutton(p1, var=buscarEnLocalCheck_state, bg=backgroudLblColor)
buscarEnLocalCheck.grid(column=1, row=row)
row+=1
# GENERAR LISTA REPOS
generarListaReposLbl = tk.Label(p1, text="Generar lista repos ('.pickle')", bg=backgroudLblColor)
generarListaReposLbl.grid(column=0, row=row)
generarListaReposCheck_state.set(conf.config.generarListaRepos)
generarListaReposCheck = tk.Checkbutton(p1, var=generarListaReposCheck_state, bg=backgroudLblColor)
generarListaReposCheck.grid(column=1, row=row)
row+=1
# ScriptLapseExe
scriptLapseExeLbl = tk.Label(p1, text="Ejecutar mediante 'ScriptLapseExe'", bg=backgroudLblColor)
scriptLapseExeCheck_state.set(conf.config.lapseExe)
scriptLapseExeCheck = tk.Checkbutton(p1, var=scriptLapseExeCheck_state, bg=backgroudLblColor)
# Nº LAPSE REPOS
nLapseRepos_state.set(conf.config.N_LAPSE_REPOS)
nLapseRepos = tk.Entry(p1, width=5, textvariable=nLapseRepos_state)
row+=1
# RANDOMIZAR REPOSITORIOS
randomizarReposLbl = tk.Label(p1, text="Randomizar repositorios", bg=backgroudLblColor)
randomizarReposLbl.grid(column=0, row=row)
randomizarReposCheck_state.set(conf.config.randomizarListaRepos)
randomizarReposCheck = tk.Checkbutton(p1, var=randomizarReposCheck_state, command=randomizarReposCheck_clicked, bg=backgroudLblColor)
randomizarReposCheck.grid(column=1, row=row)
# Nº REPOS RANDOM
nRandomRepos_state.set(conf.config.N_RANDOM)
nRandomRepos = tk.Entry(p1, width=5, textvariable=nRandomRepos_state)
nRandomRepos.grid(column=2, row=row)
row+=1
# CLONAR REPOSITORIOS
clonarReposLbl = tk.Label(p1, text="Clonar repositorios resultantes", bg=backgroudLblColor)
clonarReposLbl.grid(column=0, row=row)
clonarReposCheck_state.set(conf.config.clonarRepositorios)
clonarReposCheck = tk.Checkbutton(p1, var=clonarReposCheck_state, bg=backgroudLblColor)
clonarReposCheck.grid(column=1, row=row)
row+=1
# DO EXCEL
doExcelLbl = tk.Label(p1, text="Generar Excel", bg=backgroudLblColor)
doExcelLbl.grid(column=0, row=row)
doExcelCheck_state.set(conf.config.doExcel)
doExcelCheck = tk.Checkbutton(p1, var=doExcelCheck_state, bg=backgroudLblColor)
doExcelCheck.grid(column=1, row=row)
row+=1
# DO CSV
doCsvLbl = tk.Label(p1, text="Generar Csv", bg=backgroudLblColor)
doCsvLbl.grid(column=0, row=row)
doCsvCheck_state.set(conf.config.doCsv)
doCsvCheck = tk.Checkbutton(p1, var=doCsvCheck_state, bg=backgroudLblColor)
doCsvCheck.grid(column=1, row=row)
row+=1
# ESCRIBIR EN LOG
escribirEnLogLbl = tk.Label(p1, text="Escribir en LOG", bg=backgroudLblColor)
escribirEnLogLbl.grid(column=0, row=row)
escribirEnLogCheck_state.set(conf.config.escribirEnLog)
escribirEnLogCheck = tk.Checkbutton(p1, var=escribirEnLogCheck_state, bg=backgroudLblColor)
escribirEnLogCheck.grid(column=1, row=row)
row+=1
# BOTÓN EJECUTAR
exeButton = tk.Button(p1, text="EJECUTAR", fg="green", command=exe, bg=backgroudLblColor)
exeButton.grid(column=1, row=row)
row+=1
# PESTAÑA 2
row = 0
# CONSULTAR BD
consultarBdLbl = tk.Label(p2, text="CONSULTAR BD", font=('Helvetica', 18, 'bold'), bg=backgroudLblColor)
consultarBdLbl.grid(column=0, row=row)
f = font.Font(consultarBdLbl, consultarBdLbl.cget("font"))
f.configure(underline=True)
consultarBdLbl.configure(font=f)
row+=1
# NOMBRE REPO BD
nombreRepoBDLbl = tk.Label(p2, text="Nombre repositorio: ", bg=backgroudLblColor)
nombreRepoBDLbl.grid(column=0, row=row)
nombreRepoBD_state.set("")
nombreRepoBD = tk.Entry(p2, width=15, textvariable=nombreRepoBD_state)
nombreRepoBD.grid(column=1, row=row)
row+=1
# ORGANIZACION BD
organizacionBDLbl = tk.Label(p2, text="Organizacion: ", bg=backgroudLblColor)
organizacionBDLbl.grid(column=0, row=row)
organizacionBD_state.set("")
organizacionBD = tk.Entry(p2, width=15, textvariable=organizacionBD_state)
organizacionBD.grid(column=1, row=row)
row+=1
# LENGUAJE BD
lenguajeBDLbl = tk.Label(p2, text="Lenguaje: ", bg=backgroudLblColor)
lenguajeBDLbl.grid(column=0, row=row)
lenguajeBD_state.set("")
lenguajeBD = tk.Entry(p2, width=15, textvariable=lenguajeBD_state)
lenguajeBD.grid(column=1, row=row)
row+=1
# COMMIT ID BD
commitIdBDLbl = tk.Label(p2, text="Commit ID: ", bg=backgroudLblColor)
commitIdBDLbl.grid(column=0, row=row)
commitIdBD_state.set("")
commitIdBD = tk.Entry(p2, width=15, textvariable=commitIdBD_state)
commitIdBD.grid(column=1, row=row)
row+=1
# SIZE BD
sizeBDLbl = tk.Label(p2, text="Tamaño (kilobytes): ", bg=backgroudLblColor)
sizeBDLbl.grid(column=0, row=row)
sizeBD_state.set(0)
sizeBD = tk.Entry(p2, width=15, textvariable=sizeBD_state)
sizeBD.grid(column=1, row=row)
row+=1
# CON E2E
boE2eLbl = tk.Label(p2, text="Con e2e", bg=backgroudLblColor)
boE2eLbl.grid(column=0, row=row)
boE2eCheck_state.set(True)
boE2eCheck = tk.Checkbutton(p2, var=boE2eCheck_state, bg=backgroudLblColor)
boE2eCheck.grid(column=1, row=row)
row+=1
# BOTÓN CONSULTA BBDD
consultaBDButton = tk.Button(p2, text="CONSULTAR BD", fg="green", command=consultarBD, bg=backgroudLblColor)
consultaBDButton.grid(column=1, row=row)
row+=1
# Resultado de la búsqueda
resultadoLbl = tk.Label(p2, text="Resultado de la consulta:", bg=backgroudLblColor)
resultadoLbl.grid(column=1, row=row)
f = font.Font(resultadoLbl, resultadoLbl.cget("font"))
f.configure(underline=True)
resultadoLbl.configure(font=f)
row+=1
scrollbar = ttk.Scrollbar(p2, orient=tk.VERTICAL)
listadoBD = tk.Listbox(p2, borderwidth=1, yscrollcommand=scrollbar.set, width = 40)
listadoBD.grid(column=1, row=row)
row+=1
# BOTÓN LIMPIAR RESULTADOS
limpiarResultadosButton = tk.Button(p2, text="Limpiar", fg="black", command=limpiarResultados, bg=backgroudLblColor)
limpiarResultadosButton.grid(column=1, row=row)
row+=1
# PESTAÑA 3
row = 0
# PRUEBAS
pruebasLbl = tk.Label(p3, text="PRUEBAS", font=('Helvetica', 18, 'bold'), bg=backgroudLblColor)
pruebasLbl.grid(column=0, row=row)
f = font.Font(pruebasLbl, pruebasLbl.cget("font"))
f.configure(underline=True)
pruebasLbl.configure(font=f)
row+=1
# ORGANIZACION
organizacionLbl = tk.Label(p3, text="Organización: ", bg=backgroudLblColor)
organizacionLbl.grid(column=0, row=row)
organizacion_state.set(pruebas.RepoPruebas.organizacion)
organizacion = tk.Entry(p3, width=15, textvariable=organizacion_state)
organizacion.grid(column=1, row=row)
row+=1
# NOMBRE REPO
nombreRepoLbl = tk.Label(p3, text="Nombre: ", bg=backgroudLblColor)
nombreRepoLbl.grid(column=0, row=row)
nombreRepo_state.set(pruebas.RepoPruebas.organizacion)
nombreRepo = tk.Entry(p3, width=15, textvariable=nombreRepo_state)
nombreRepo.grid(column=1, row=row)
row+=1
# BOTÓN EJECUTAR PRUEBA
ejecutaPruebaButton = tk.Button(p3, text="REALIZAR PRUEBA", fg="green", command=ejecutaPrueba, bg=backgroudLblColor)
ejecutaPruebaButton.grid(column=1, row=row)
row+=1
nb.add(p1, text='Buscador')
nb.add(p2, text='BBDD')
nb.add(p3, text='PRUEBAS')
randomizarReposCheck_clicked()
app.mainloop() | 33.299163 | 133 | 0.767921 | import main
import configuracion as conf
import filtrosQuery as fq
import tkinter as tk
from tkinter import ttk
from tkinter import font
from tkinter import messagebox
from PIL import Image
from PIL import ImageTk
import pruebas
import repoBD
import executeQuery
import datetime
app = tk.Tk()
app.title("BuscadorGitHubRepos")
width = '650'
height = '700'
app.geometry(width + 'x' + height)
app.resizable(False, False)
nb = ttk.Notebook(app)
nb.pack(fill='both', expand='yes')
backgroudLblColor = "gray92"
p1 = ttk.Frame(nb)
p2 = ttk.Frame(nb)
p3 = ttk.Frame(nb)
# STATE (Credenciales)
user_state = tk.StringVar()
token_state = tk.StringVar()
# STATE (Filtros Query)
lenguaje_state = tk.StringVar()
stars_state = tk.StringVar()
forks_state = tk.StringVar()
created_state = tk.StringVar()
pushed_state = tk.StringVar()
archivedCheck_state = tk.BooleanVar()
publicCheck_state = tk.BooleanVar()
sizeLimit_state = tk.IntVar()
# STATE (Variables de configuración)
nRandomRepos_state = tk.IntVar()
nLapseRepos_state = tk.IntVar()
actualizarBDCheck_state = tk.BooleanVar()
buscarEnLocalCheck_state = tk.BooleanVar()
generarListaReposCheck_state = tk.BooleanVar()
randomizarReposCheck_state = tk.BooleanVar()
clonarReposCheck_state = tk.BooleanVar()
doExcelCheck_state = tk.BooleanVar()
doCsvCheck_state = tk.BooleanVar()
escribirEnLogCheck_state = tk.BooleanVar()
scriptLapseExeCheck_state = tk.BooleanVar()
# STATE (Base de datos)
nombreRepoBD_state = tk.StringVar()
organizacionBD_state = tk.StringVar()
lenguajeBD_state = tk.StringVar()
commitIdBD_state = tk.StringVar()
sizeBD_state = tk.IntVar()
boE2eCheck_state = tk.BooleanVar()
# STATE (Pruebas)
organizacion_state = tk.StringVar()
nombreRepo_state = tk.StringVar()
def exe():
print("Ejecutando proceso desde buscador-UI")
# Filtros Query:
fq.filtrosQuery.language = lenguaje_state.get().lower()
fq.filtrosQuery.stars = stars_state.get()
fq.filtrosQuery.forks = forks_state.get()
fq.filtrosQuery.created = created_state.get()
fq.filtrosQuery.pushed = pushed_state.get()
if archivedCheck_state.get():
fq.filtrosQuery.qIs = "true"
else:
fq.filtrosQuery.qIs = "false"
if publicCheck_state.get():
fq.filtrosQuery.qIs = "public"
else:
fq.filtrosQuery.qIs = "private"
# Configuración
conf.config.fechaEjecucion = str(datetime.datetime.now())[0:19].replace(" ", "_").replace(":", "h", 1).replace(":", "m", 1) + "s"
conf.config.actualizarBD = actualizarBDCheck_state.get()
conf.config.buscarEnLocal = buscarEnLocalCheck_state.get()
conf.config.generarListaRepos = generarListaReposCheck_state.get()
conf.config.randomizarListaRepos = randomizarReposCheck_state.get()
conf.config.lapseExe = conf.config.lapseExe
conf.config.clonarRepositorios = clonarReposCheck_state.get()
conf.config.doExcel = doExcelCheck_state.get()
conf.config.doCsv = doCsvCheck_state.get()
conf.config.escribirEnLog = escribirEnLogCheck_state.get()
conf.config.N_RANDOM = nRandomRepos_state.get()
conf.config.N_LAPSE_REPOS = conf.config.N_LAPSE_REPOS
conf.config.REPO_SIZE_LIMIT = sizeLimit_state.get()
main.exe()
messagebox.showinfo(message="Proceso finalizado", title="Aviso")
def ejecutaPrueba():
pruebas.RepoPruebas.organizacion = organizacion_state.get()
pruebas.RepoPruebas.nombre = nombreRepo_state.get()
pruebas.ejecutaPrueba()
messagebox.showinfo(message="Proceso finalizado", title="Aviso")
def consultarBD():
print("Consultando base de datos...")
repo = repoBD.createRepoBD()
repo.setNombre(nombreRepoBD_state.get())
repo.setOrganizacion(organizacionBD_state.get())
repo.setLenguaje(lenguajeBD_state.get())
repo.setSize(sizeBD_state.get())
repo.setCommitID(commitIdBD_state.get())
repo.setBoE2e(boE2eCheck_state.get())
repo.setTstbd("")
query = repo.getFiltro()
print(query)
filas = executeQuery.execute(query)
for fila in filas:
id = fila["idrepo"]
nombre = fila["nombre"]
organizacion = fila["organizacion"]
url = fila["url"]
listadoBD.insert(0, "[" + str(id) + "]" +organizacion + "/" + nombre + ": " + "'" + url + "'")
def randomizarReposCheck_clicked():
if randomizarReposCheck_state.get():
nRandomRepos_state.set(conf.config.N_RANDOM)
nRandomRepos.config(state=tk.NORMAL)
else:
nRandomRepos_state.set(0)
nRandomRepos.config(state=tk.DISABLED)
def limpiarResultados():
print("Limpiando base de datos...")
listadoBD.delete(0, tk.END)
# PESTAÑA 1
row = 0
# LOGO URJC
logoUrjcWidth = 120
logoUrjcHeight = 60
img = Image.open("imgs/logo_urjc2.png")
img = img.resize((logoUrjcWidth,logoUrjcHeight), Image.ANTIALIAS)
photoImg = ImageTk.PhotoImage(img)
widget = tk.Label(p1, image=photoImg, bg=backgroudLblColor)
widget.grid(column=0, row=row)
titleAppLbl = tk.Label(p1, text="BuscadorGitHubRepos", font=('Helvetica', 18, 'bold'), bg=backgroudLblColor)
titleAppLbl.grid(column=1, row=row)
f = font.Font(titleAppLbl, titleAppLbl.cget("font"))
f.configure(underline=True)
titleAppLbl.configure(font=f)
row+=1
# CREDENCIALES
credencialesLbl = tk.Label(p1, text="CREDENCIALES", bg=backgroudLblColor)
credencialesLbl.grid(column=0, row=row)
f = font.Font(credencialesLbl, credencialesLbl.cget("font"))
f.configure(underline=True)
credencialesLbl.configure(font=f)
row+=1
userLbl = tk.Label(p1, text="Usuario: ", bg=backgroudLblColor)
userLbl.grid(column=0, row=row)
user_state.set(conf.config.user)
user = tk.Entry(p1,width=15, textvariable=user_state)
user.grid(column=1, row=row)
row+=1
tokenLbl = tk.Label(p1, text="Token: ", bg=backgroudLblColor)
tokenLbl.grid(column=0, row=row)
token_state.set(conf.config.token)
token = tk.Entry(p1,width=34, textvariable=token_state)
token.grid(column=1, row=row)
row+=1
# FILTROS QUERY
filtrosQueryLbl = tk.Label(p1, text="FILTROS QUERY", bg=backgroudLblColor)
filtrosQueryLbl.grid(column=0, row=row)
f = font.Font(filtrosQueryLbl, filtrosQueryLbl.cget("font"))
f.configure(underline=True)
filtrosQueryLbl.configure(font=f)
row+=1
# LENGUAJE
lenguajeLbl = tk.Label(p1, text="Lenguaje: ", bg=backgroudLblColor)
lenguajeLbl.grid(column=0, row=row)
lenguaje_state.set(fq.filtrosQuery.language)
lenguaje = tk.Entry(p1, width=15, textvariable=lenguaje_state)
lenguaje.grid(column=1, row=row)
row+=1
# STARS
starsLbl = tk.Label(p1, text="Stars: ", bg=backgroudLblColor)
starsLbl.grid(column=0, row=row)
stars_state.set(fq.filtrosQuery.stars)
stars = tk.Entry(p1, width=15, textvariable=stars_state)
stars.grid(column=1, row=row)
row+=1
# FORKS
forksLbl = tk.Label(p1, text="Forks: ", bg=backgroudLblColor)
forksLbl.grid(column=0, row=row)
forks_state.set(fq.filtrosQuery.forks)
forks = tk.Entry(p1, width=15, textvariable=forks_state)
forks.grid(column=1, row=row)
row+=1
# CREATED
createdLbl = tk.Label(p1, text="Created: ", bg=backgroudLblColor)
createdLbl.grid(column=0, row=row)
created_state.set(fq.filtrosQuery.created)
created = tk.Entry(p1, width=15, textvariable=created_state)
created.grid(column=1, row=row)
row+=1
# PUSHED
pushedLbl = tk.Label(p1, text="Pushed: ", bg=backgroudLblColor)
pushedLbl.grid(column=0, row=row)
pushed_state.set(fq.filtrosQuery.pushed)
pushed = tk.Entry(p1, width=15, textvariable=pushed_state)
pushed.grid(column=1, row=row)
row+=1
# ARCHIVED
archivedLbl = tk.Label(p1, text="Archived", bg=backgroudLblColor)
archivedLbl.grid(column=0, row=row)
archivedCheck_state.set(False)
archivedCheck = tk.Checkbutton(p1, var=archivedCheck_state, bg=backgroudLblColor)
archivedCheck.grid(column=1, row=row)
archivedCheck.config(state=tk.DISABLED)
row+=1
# PUBLIC
publicLbl = tk.Label(p1, text="Public", bg=backgroudLblColor)
publicLbl.grid(column=0, row=row)
publicCheck_state.set(True)
publicCheck = tk.Checkbutton(p1, var=publicCheck_state, bg=backgroudLblColor)
publicCheck.grid(column=1, row=row)
publicCheck.config(state=tk.DISABLED)
row+=1
# SIZE LIMIT
sizeLimitLbl = tk.Label(p1, text="Size Limit (kilobytes): ", bg=backgroudLblColor)
sizeLimitLbl.grid(column=0, row=row)
sizeLimit_state.set(conf.config.REPO_SIZE_LIMIT)
sizeLimit = tk.Entry(p1, width=7, textvariable=sizeLimit_state)
sizeLimit.grid(column=1, row=row)
sizeLimit.config(state=tk.DISABLED)
row+=1
# VARIABLES DE CONFIGURACIÓN
configuracionLbl = tk.Label(p1, text="VARIABLES DE CONFIGURACIÓN", bg=backgroudLblColor)
configuracionLbl.grid(column=0, row=row)
f = font.Font(configuracionLbl, configuracionLbl.cget("font"))
f.configure(underline=True)
configuracionLbl.configure(font=f)
row+=1
# ACTUALIZAR BD
actualizarBDLbl = tk.Label(p1, text="Actualizar BD", bg=backgroudLblColor)
actualizarBDLbl.grid(column=0, row=row)
actualizarBDCheck_state.set(conf.config.actualizarBD)
actualizarBDCheck = tk.Checkbutton(p1, var=actualizarBDCheck_state, bg=backgroudLblColor)
actualizarBDCheck.grid(column=1, row=row)
row+=1
# BUSCAR REPOS EN LOCAL
buscarEnLocalReposLbl = tk.Label(p1, text="Buscar repos en LOCAL", bg=backgroudLblColor)
buscarEnLocalReposLbl.grid(column=0, row=row)
buscarEnLocalCheck_state.set(conf.config.buscarEnLocal)
buscarEnLocalCheck = tk.Checkbutton(p1, var=buscarEnLocalCheck_state, bg=backgroudLblColor)
buscarEnLocalCheck.grid(column=1, row=row)
row+=1
# GENERAR LISTA REPOS
generarListaReposLbl = tk.Label(p1, text="Generar lista repos ('.pickle')", bg=backgroudLblColor)
generarListaReposLbl.grid(column=0, row=row)
generarListaReposCheck_state.set(conf.config.generarListaRepos)
generarListaReposCheck = tk.Checkbutton(p1, var=generarListaReposCheck_state, bg=backgroudLblColor)
generarListaReposCheck.grid(column=1, row=row)
row+=1
# ScriptLapseExe
scriptLapseExeLbl = tk.Label(p1, text="Ejecutar mediante 'ScriptLapseExe'", bg=backgroudLblColor)
scriptLapseExeCheck_state.set(conf.config.lapseExe)
scriptLapseExeCheck = tk.Checkbutton(p1, var=scriptLapseExeCheck_state, bg=backgroudLblColor)
# Nº LAPSE REPOS
nLapseRepos_state.set(conf.config.N_LAPSE_REPOS)
nLapseRepos = tk.Entry(p1, width=5, textvariable=nLapseRepos_state)
row+=1
# RANDOMIZAR REPOSITORIOS
randomizarReposLbl = tk.Label(p1, text="Randomizar repositorios", bg=backgroudLblColor)
randomizarReposLbl.grid(column=0, row=row)
randomizarReposCheck_state.set(conf.config.randomizarListaRepos)
randomizarReposCheck = tk.Checkbutton(p1, var=randomizarReposCheck_state, command=randomizarReposCheck_clicked, bg=backgroudLblColor)
randomizarReposCheck.grid(column=1, row=row)
# Nº REPOS RANDOM
nRandomRepos_state.set(conf.config.N_RANDOM)
nRandomRepos = tk.Entry(p1, width=5, textvariable=nRandomRepos_state)
nRandomRepos.grid(column=2, row=row)
row+=1
# CLONAR REPOSITORIOS
clonarReposLbl = tk.Label(p1, text="Clonar repositorios resultantes", bg=backgroudLblColor)
clonarReposLbl.grid(column=0, row=row)
clonarReposCheck_state.set(conf.config.clonarRepositorios)
clonarReposCheck = tk.Checkbutton(p1, var=clonarReposCheck_state, bg=backgroudLblColor)
clonarReposCheck.grid(column=1, row=row)
row+=1
# DO EXCEL
doExcelLbl = tk.Label(p1, text="Generar Excel", bg=backgroudLblColor)
doExcelLbl.grid(column=0, row=row)
doExcelCheck_state.set(conf.config.doExcel)
doExcelCheck = tk.Checkbutton(p1, var=doExcelCheck_state, bg=backgroudLblColor)
doExcelCheck.grid(column=1, row=row)
row+=1
# DO CSV
doCsvLbl = tk.Label(p1, text="Generar Csv", bg=backgroudLblColor)
doCsvLbl.grid(column=0, row=row)
doCsvCheck_state.set(conf.config.doCsv)
doCsvCheck = tk.Checkbutton(p1, var=doCsvCheck_state, bg=backgroudLblColor)
doCsvCheck.grid(column=1, row=row)
row+=1
# ESCRIBIR EN LOG
escribirEnLogLbl = tk.Label(p1, text="Escribir en LOG", bg=backgroudLblColor)
escribirEnLogLbl.grid(column=0, row=row)
escribirEnLogCheck_state.set(conf.config.escribirEnLog)
escribirEnLogCheck = tk.Checkbutton(p1, var=escribirEnLogCheck_state, bg=backgroudLblColor)
escribirEnLogCheck.grid(column=1, row=row)
row+=1
# BOTÓN EJECUTAR
exeButton = tk.Button(p1, text="EJECUTAR", fg="green", command=exe, bg=backgroudLblColor)
exeButton.grid(column=1, row=row)
row+=1
# PESTAÑA 2
row = 0
# CONSULTAR BD
consultarBdLbl = tk.Label(p2, text="CONSULTAR BD", font=('Helvetica', 18, 'bold'), bg=backgroudLblColor)
consultarBdLbl.grid(column=0, row=row)
f = font.Font(consultarBdLbl, consultarBdLbl.cget("font"))
f.configure(underline=True)
consultarBdLbl.configure(font=f)
row+=1
# NOMBRE REPO BD
nombreRepoBDLbl = tk.Label(p2, text="Nombre repositorio: ", bg=backgroudLblColor)
nombreRepoBDLbl.grid(column=0, row=row)
nombreRepoBD_state.set("")
nombreRepoBD = tk.Entry(p2, width=15, textvariable=nombreRepoBD_state)
nombreRepoBD.grid(column=1, row=row)
row+=1
# ORGANIZACION BD
organizacionBDLbl = tk.Label(p2, text="Organizacion: ", bg=backgroudLblColor)
organizacionBDLbl.grid(column=0, row=row)
organizacionBD_state.set("")
organizacionBD = tk.Entry(p2, width=15, textvariable=organizacionBD_state)
organizacionBD.grid(column=1, row=row)
row+=1
# LENGUAJE BD
lenguajeBDLbl = tk.Label(p2, text="Lenguaje: ", bg=backgroudLblColor)
lenguajeBDLbl.grid(column=0, row=row)
lenguajeBD_state.set("")
lenguajeBD = tk.Entry(p2, width=15, textvariable=lenguajeBD_state)
lenguajeBD.grid(column=1, row=row)
row+=1
# COMMIT ID BD
commitIdBDLbl = tk.Label(p2, text="Commit ID: ", bg=backgroudLblColor)
commitIdBDLbl.grid(column=0, row=row)
commitIdBD_state.set("")
commitIdBD = tk.Entry(p2, width=15, textvariable=commitIdBD_state)
commitIdBD.grid(column=1, row=row)
row+=1
# SIZE BD
sizeBDLbl = tk.Label(p2, text="Tamaño (kilobytes): ", bg=backgroudLblColor)
sizeBDLbl.grid(column=0, row=row)
sizeBD_state.set(0)
sizeBD = tk.Entry(p2, width=15, textvariable=sizeBD_state)
sizeBD.grid(column=1, row=row)
row+=1
# CON E2E
boE2eLbl = tk.Label(p2, text="Con e2e", bg=backgroudLblColor)
boE2eLbl.grid(column=0, row=row)
boE2eCheck_state.set(True)
boE2eCheck = tk.Checkbutton(p2, var=boE2eCheck_state, bg=backgroudLblColor)
boE2eCheck.grid(column=1, row=row)
row+=1
# BOTÓN CONSULTA BBDD
consultaBDButton = tk.Button(p2, text="CONSULTAR BD", fg="green", command=consultarBD, bg=backgroudLblColor)
consultaBDButton.grid(column=1, row=row)
row+=1
# Resultado de la búsqueda
resultadoLbl = tk.Label(p2, text="Resultado de la consulta:", bg=backgroudLblColor)
resultadoLbl.grid(column=1, row=row)
f = font.Font(resultadoLbl, resultadoLbl.cget("font"))
f.configure(underline=True)
resultadoLbl.configure(font=f)
row+=1
scrollbar = ttk.Scrollbar(p2, orient=tk.VERTICAL)
listadoBD = tk.Listbox(p2, borderwidth=1, yscrollcommand=scrollbar.set, width = 40)
listadoBD.grid(column=1, row=row)
row+=1
# BOTÓN LIMPIAR RESULTADOS
limpiarResultadosButton = tk.Button(p2, text="Limpiar", fg="black", command=limpiarResultados, bg=backgroudLblColor)
limpiarResultadosButton.grid(column=1, row=row)
row+=1
# PESTAÑA 3
row = 0
# PRUEBAS
pruebasLbl = tk.Label(p3, text="PRUEBAS", font=('Helvetica', 18, 'bold'), bg=backgroudLblColor)
pruebasLbl.grid(column=0, row=row)
f = font.Font(pruebasLbl, pruebasLbl.cget("font"))
f.configure(underline=True)
pruebasLbl.configure(font=f)
row+=1
# ORGANIZACION
organizacionLbl = tk.Label(p3, text="Organización: ", bg=backgroudLblColor)
organizacionLbl.grid(column=0, row=row)
organizacion_state.set(pruebas.RepoPruebas.organizacion)
organizacion = tk.Entry(p3, width=15, textvariable=organizacion_state)
organizacion.grid(column=1, row=row)
row+=1
# NOMBRE REPO
nombreRepoLbl = tk.Label(p3, text="Nombre: ", bg=backgroudLblColor)
nombreRepoLbl.grid(column=0, row=row)
nombreRepo_state.set(pruebas.RepoPruebas.organizacion)
nombreRepo = tk.Entry(p3, width=15, textvariable=nombreRepo_state)
nombreRepo.grid(column=1, row=row)
row+=1
# BOTÓN EJECUTAR PRUEBA
ejecutaPruebaButton = tk.Button(p3, text="REALIZAR PRUEBA", fg="green", command=ejecutaPrueba, bg=backgroudLblColor)
ejecutaPruebaButton.grid(column=1, row=row)
row+=1
nb.add(p1, text='Buscador')
nb.add(p2, text='BBDD')
nb.add(p3, text='PRUEBAS')
randomizarReposCheck_clicked()
app.mainloop() | 2,742 | 0 | 115 |
2ac3e6f54e137617cc6d1e4f38eab93e0c218952 | 2,440 | py | Python | cave_map.py | dylanmc/DungeonHero | 59bcbd3964c8f46c41f2251e221d1fbee75b0d1d | [
"Unlicense"
] | null | null | null | cave_map.py | dylanmc/DungeonHero | 59bcbd3964c8f46c41f2251e221d1fbee75b0d1d | [
"Unlicense"
] | null | null | null | cave_map.py | dylanmc/DungeonHero | 59bcbd3964c8f46c41f2251e221d1fbee75b0d1d | [
"Unlicense"
] | null | null | null | rooms = { 'entrance_r' : {'desc': 'You are in the entrance to a huge cave system. The way you entered has collapsed behind you.', 'passages': {'north': 'passage_r'}, 'occupants': True, 'hostile': False, 'tag': 'entrance_r'},
'passage_r' : {'desc': 'This is a long low north-south passage', 'passages': {'south': 'entrance_r', 'north': 'grand_chamber_r'}, 'occupants': True, 'hostile': True},
'grand_chamber_r' : {'desc': 'You stumble in to a grand chamber, dimly lit by phosphorescent rocks around its perimeter. You can make out a number of passages leading off in various directions.', 'passages': {'south': 'passage_r', 'north': 'crossroads_r', 'northwest': 'goblin_mace', 'northeast': 'goblid_shortsword', 'southwest': 'goblin_musket', 'southeast': 'goblin_greatsword'}, 'occupants': True, 'hostile': True},
'crossroads_r' : {'desc': 'You enter a large, high-ceilinged room. There is a dead knight in one corner.', 'passages': {'south': 'grand_chamber_r', 'west': 'dank_passage', 'east': 'puzzle_passage', 'north': 'high_corridor'}, 'make_occupant': 'ogre'},
'goblin_mace' : {'desc': 'This is an empty room except for the goblin squatting on a mace', 'passages': {'southeast': 'grand_chamber_r'},'make_occupant': 'goblin', 'inventory':['mace']},
'goblid_shortsword' : {'desc': 'This is an empty room except for the goblin squatting on a rusty short sword', 'passages': {'southwest': 'grand_chamber_r'}, 'make_occupant': 'goblin', 'inventory': ['shortsword']},
'goblin_musket' : {'desc': 'This is an empty room except for the goblin squatting on a musket', 'passages': {'northeast': 'grand_chamber_r'}, 'make_occupant': 'goblin', 'inventory':['musket']},
'goblin_greatsword' : {'desc': 'This is an empty room except for the goblin squatting on a great sword', 'passages': {'northwest': 'grand_chamber_r'}, 'make_occupant': 'goblin', 'inventory':['greatsword']},
'dank_passage' : {'desc': 'This is an empty, dank and dusty east/west passage.', 'passages': {'east': 'crossroads_r'}},
'puzzle_passage' : {'desc': 'This is an empty east/west passage. It smells a bit puzzling.', 'passages': {'west': 'crossroads_r', 'east': 'puzzle_room'}},
'high_corridor' : {'desc': 'You enter a high-ceilinged north/south corridor.', 'passages': {'south': 'crossroads_r'}},
'puzzle_room' : {'desc': 'You enter a room filled with puzzling contraptions and levers.', 'passages': {'west': 'puzzle_passage'}},
}
| 174.285714 | 423 | 0.685246 | rooms = { 'entrance_r' : {'desc': 'You are in the entrance to a huge cave system. The way you entered has collapsed behind you.', 'passages': {'north': 'passage_r'}, 'occupants': True, 'hostile': False, 'tag': 'entrance_r'},
'passage_r' : {'desc': 'This is a long low north-south passage', 'passages': {'south': 'entrance_r', 'north': 'grand_chamber_r'}, 'occupants': True, 'hostile': True},
'grand_chamber_r' : {'desc': 'You stumble in to a grand chamber, dimly lit by phosphorescent rocks around its perimeter. You can make out a number of passages leading off in various directions.', 'passages': {'south': 'passage_r', 'north': 'crossroads_r', 'northwest': 'goblin_mace', 'northeast': 'goblid_shortsword', 'southwest': 'goblin_musket', 'southeast': 'goblin_greatsword'}, 'occupants': True, 'hostile': True},
'crossroads_r' : {'desc': 'You enter a large, high-ceilinged room. There is a dead knight in one corner.', 'passages': {'south': 'grand_chamber_r', 'west': 'dank_passage', 'east': 'puzzle_passage', 'north': 'high_corridor'}, 'make_occupant': 'ogre'},
'goblin_mace' : {'desc': 'This is an empty room except for the goblin squatting on a mace', 'passages': {'southeast': 'grand_chamber_r'},'make_occupant': 'goblin', 'inventory':['mace']},
'goblid_shortsword' : {'desc': 'This is an empty room except for the goblin squatting on a rusty short sword', 'passages': {'southwest': 'grand_chamber_r'}, 'make_occupant': 'goblin', 'inventory': ['shortsword']},
'goblin_musket' : {'desc': 'This is an empty room except for the goblin squatting on a musket', 'passages': {'northeast': 'grand_chamber_r'}, 'make_occupant': 'goblin', 'inventory':['musket']},
'goblin_greatsword' : {'desc': 'This is an empty room except for the goblin squatting on a great sword', 'passages': {'northwest': 'grand_chamber_r'}, 'make_occupant': 'goblin', 'inventory':['greatsword']},
'dank_passage' : {'desc': 'This is an empty, dank and dusty east/west passage.', 'passages': {'east': 'crossroads_r'}},
'puzzle_passage' : {'desc': 'This is an empty east/west passage. It smells a bit puzzling.', 'passages': {'west': 'crossroads_r', 'east': 'puzzle_room'}},
'high_corridor' : {'desc': 'You enter a high-ceilinged north/south corridor.', 'passages': {'south': 'crossroads_r'}},
'puzzle_room' : {'desc': 'You enter a room filled with puzzling contraptions and levers.', 'passages': {'west': 'puzzle_passage'}},
}
| 0 | 0 | 0 |
6938b1ee2c87f1aa5d1f283c9b826a49ac986b1f | 2,835 | py | Python | hparams_sets.py | Xtuden-com/proteinfer | e2bc5c88eaae2ef3caca06c23592b1f85c82ca2e | [
"Apache-2.0"
] | 40 | 2019-04-25T15:55:41.000Z | 2022-03-31T19:24:29.000Z | hparams_sets.py | Xtuden-com/proteinfer | e2bc5c88eaae2ef3caca06c23592b1f85c82ca2e | [
"Apache-2.0"
] | 20 | 2020-01-28T22:41:10.000Z | 2022-02-24T01:41:14.000Z | hparams_sets.py | Xtuden-com/proteinfer | e2bc5c88eaae2ef3caca06c23592b1f85c82ca2e | [
"Apache-2.0"
] | 14 | 2019-06-20T19:57:08.000Z | 2022-01-29T17:30:07.000Z | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Hyperparameter sets.
These are defined as functions to allow for inheritance.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import training as contrib_training
def _starting_hparams():
"""Set of shared starting parameters used in sets below."""
hparams = contrib_training.HParams()
hparams.add_hparam('batch_style', 'bucket')
hparams.add_hparam('gradient_clipping_decay', 0.9999)
hparams.add_hparam('learning_rate', 0.0005)
hparams.add_hparam('lr_decay_rate', .997)
hparams.add_hparam('lr_decay_steps', 1000)
hparams.add_hparam('lr_warmup_steps', 3000)
hparams.add_hparam('model_type', 'cnn')
hparams.add_hparam('resnet_bottleneck_factor', 0.5)
hparams.add_hparam('decision_threshold', 0.5)
hparams.add_hparam('denominator_power', 1.0) # Standard mean-pooling.
return hparams
def tuned_for_ec():
"""Hyperparameters tuned for EC classification."""
# TODO(theosanderson): update these to true SOTA values
hparams = contrib_training.HParams()
hparams.add_hparam('gradient_clipping_decay', 0.9999)
hparams.add_hparam('batch_style', 'bucket')
hparams.add_hparam('batch_size', 34)
hparams.add_hparam('dilation_rate', 5)
hparams.add_hparam('filters', 411)
hparams.add_hparam('first_dilated_layer', 1) # This is 0-indexed
hparams.add_hparam('kernel_size', 7)
hparams.add_hparam('num_layers', 5)
hparams.add_hparam('pooling', 'mean')
hparams.add_hparam('resnet_bottleneck_factor', 0.88152)
hparams.add_hparam('lr_decay_rate', 0.9977)
hparams.add_hparam('learning_rate', 0.00028748)
hparams.add_hparam('decision_threshold', 0.3746)
hparams.add_hparam('denominator_power', 0.88)
hparams.add_hparam('train_steps', 650000)
return hparams
def small_test_model():
"""A small test model that will run on a CPU quickly."""
hparams = _starting_hparams()
hparams.add_hparam('batch_size', 8)
hparams.add_hparam('dilation_rate', 1)
hparams.add_hparam('first_dilated_layer', 1) # This is 0-indexed
hparams.add_hparam('filters', 10)
hparams.add_hparam('kernel_size', 3)
hparams.add_hparam('num_layers', 1)
hparams.add_hparam('train_steps', 100)
return hparams
| 37.302632 | 74 | 0.762963 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Hyperparameter sets.
These are defined as functions to allow for inheritance.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import training as contrib_training
def _starting_hparams():
"""Set of shared starting parameters used in sets below."""
hparams = contrib_training.HParams()
hparams.add_hparam('batch_style', 'bucket')
hparams.add_hparam('gradient_clipping_decay', 0.9999)
hparams.add_hparam('learning_rate', 0.0005)
hparams.add_hparam('lr_decay_rate', .997)
hparams.add_hparam('lr_decay_steps', 1000)
hparams.add_hparam('lr_warmup_steps', 3000)
hparams.add_hparam('model_type', 'cnn')
hparams.add_hparam('resnet_bottleneck_factor', 0.5)
hparams.add_hparam('decision_threshold', 0.5)
hparams.add_hparam('denominator_power', 1.0) # Standard mean-pooling.
return hparams
def tuned_for_ec():
"""Hyperparameters tuned for EC classification."""
# TODO(theosanderson): update these to true SOTA values
hparams = contrib_training.HParams()
hparams.add_hparam('gradient_clipping_decay', 0.9999)
hparams.add_hparam('batch_style', 'bucket')
hparams.add_hparam('batch_size', 34)
hparams.add_hparam('dilation_rate', 5)
hparams.add_hparam('filters', 411)
hparams.add_hparam('first_dilated_layer', 1) # This is 0-indexed
hparams.add_hparam('kernel_size', 7)
hparams.add_hparam('num_layers', 5)
hparams.add_hparam('pooling', 'mean')
hparams.add_hparam('resnet_bottleneck_factor', 0.88152)
hparams.add_hparam('lr_decay_rate', 0.9977)
hparams.add_hparam('learning_rate', 0.00028748)
hparams.add_hparam('decision_threshold', 0.3746)
hparams.add_hparam('denominator_power', 0.88)
hparams.add_hparam('train_steps', 650000)
return hparams
def small_test_model():
"""A small test model that will run on a CPU quickly."""
hparams = _starting_hparams()
hparams.add_hparam('batch_size', 8)
hparams.add_hparam('dilation_rate', 1)
hparams.add_hparam('first_dilated_layer', 1) # This is 0-indexed
hparams.add_hparam('filters', 10)
hparams.add_hparam('kernel_size', 3)
hparams.add_hparam('num_layers', 1)
hparams.add_hparam('train_steps', 100)
return hparams
| 0 | 0 | 0 |
bb30cf93ee5558a2039fa3fd1d55c73a999f07bd | 3,384 | py | Python | checklist/urls.py | cagandhi/Checklist-Django | c8edf1d8f821900a71f36abd34a76663d8d8f7da | [
"Apache-2.0"
] | 3 | 2021-07-02T07:35:19.000Z | 2022-01-14T11:14:14.000Z | checklist/urls.py | cagandhi/Checklist-Django | c8edf1d8f821900a71f36abd34a76663d8d8f7da | [
"Apache-2.0"
] | 57 | 2021-01-31T23:39:57.000Z | 2022-03-12T00:47:23.000Z | checklist/urls.py | cagandhi/Checklist-Django | c8edf1d8f821900a71f36abd34a76663d8d8f7da | [
"Apache-2.0"
] | 3 | 2021-08-29T21:46:54.000Z | 2022-03-24T13:10:00.000Z | from django.urls import path
from . import views
from .views import (
BookmarkChecklistListView,
CategoryChecklistListView,
ChecklistCreateView,
ChecklistDeleteView,
ChecklistDetailView,
ChecklistListView,
ChecklistUpdateView,
CommentDeleteView,
CommentUpdateView,
ItemCreateView,
ItemDetailView,
ItemUpdateView,
SearchChecklistListView,
UpvoteChecklistListView,
UserChecklistListView,
UserDraftChecklistListView,
)
urlpatterns = [
path("", ChecklistListView.as_view(), name="checklist-home"),
path(
"user/<str:username>/",
UserChecklistListView.as_view(),
name="user-checklists",
),
path("user/<str:username>/follow/", views.follow_user, name="user-follow"),
path(
"checklist/drafts/",
UserDraftChecklistListView.as_view(),
name="user-drafts",
),
path("bookmarks/", BookmarkChecklistListView.as_view(), name="bookmarks"),
path("upvotes/", UpvoteChecklistListView.as_view(), name="upvotes"),
path(
"checklist/<int:pk>/",
ChecklistDetailView.as_view(),
name="checklist-detail",
),
path(
"checklist/<int:checklist_id>/publish/",
views.publish_checklist,
name="checklist-publish",
),
path(
"checklist/<int:checklist_id>/save/",
views.save_and_edit,
name="checklist-save",
),
path(
"checklist/new/",
ChecklistCreateView.as_view(),
name="checklist-create",
),
path(
"checklist/<int:pk>/update/",
ChecklistUpdateView.as_view(),
name="checklist-update",
),
path(
"checklist/<int:pk>/delete/",
ChecklistDeleteView.as_view(),
name="checklist-delete",
),
path("about/", views.about, name="checklist-about"),
path(
"checklist/<int:checklist_id>/upvote/",
views.upvote_checklist,
name="checklist-upvote",
),
path(
"checklist/<int:checklist_id>/bookmark/",
views.bookmark_checklist,
name="checklist-bookmark",
),
path(
"checklist/<int:checklist_id>/follow/",
views.follow_checklist,
name="checklist-follow",
),
path("search/", SearchChecklistListView.as_view(), name="search"),
path(
"checklist/<str:category>/",
CategoryChecklistListView.as_view(),
name="category",
),
path(
"checklist/<int:checklist_id>/item/new/",
ItemCreateView.as_view(),
name="item-create",
),
path(
"checklist/item/<int:pk>/view/",
ItemDetailView.as_view(),
name="item-detail",
),
path(
"checklist/item/<int:pk>/update/",
ItemUpdateView.as_view(),
name="item-update",
),
path(
"checklist/item/<int:item_id>/<str:action_type>/",
views.item_action,
name="item-action",
),
path("notif/<int:id>/dismiss/", views.dismiss_notif, name="dismiss-notif"),
path(
"checklist/<int:checklist_id>/comment/",
views.submit_comment,
name="comment-submit",
),
path(
"comment/<int:pk>/update/",
CommentUpdateView.as_view(),
name="comment-update",
),
path(
"comment/<int:pk>/delete/",
CommentDeleteView.as_view(),
name="comment-delete",
),
]
| 26.645669 | 79 | 0.598109 | from django.urls import path
from . import views
from .views import (
BookmarkChecklistListView,
CategoryChecklistListView,
ChecklistCreateView,
ChecklistDeleteView,
ChecklistDetailView,
ChecklistListView,
ChecklistUpdateView,
CommentDeleteView,
CommentUpdateView,
ItemCreateView,
ItemDetailView,
ItemUpdateView,
SearchChecklistListView,
UpvoteChecklistListView,
UserChecklistListView,
UserDraftChecklistListView,
)
urlpatterns = [
path("", ChecklistListView.as_view(), name="checklist-home"),
path(
"user/<str:username>/",
UserChecklistListView.as_view(),
name="user-checklists",
),
path("user/<str:username>/follow/", views.follow_user, name="user-follow"),
path(
"checklist/drafts/",
UserDraftChecklistListView.as_view(),
name="user-drafts",
),
path("bookmarks/", BookmarkChecklistListView.as_view(), name="bookmarks"),
path("upvotes/", UpvoteChecklistListView.as_view(), name="upvotes"),
path(
"checklist/<int:pk>/",
ChecklistDetailView.as_view(),
name="checklist-detail",
),
path(
"checklist/<int:checklist_id>/publish/",
views.publish_checklist,
name="checklist-publish",
),
path(
"checklist/<int:checklist_id>/save/",
views.save_and_edit,
name="checklist-save",
),
path(
"checklist/new/",
ChecklistCreateView.as_view(),
name="checklist-create",
),
path(
"checklist/<int:pk>/update/",
ChecklistUpdateView.as_view(),
name="checklist-update",
),
path(
"checklist/<int:pk>/delete/",
ChecklistDeleteView.as_view(),
name="checklist-delete",
),
path("about/", views.about, name="checklist-about"),
path(
"checklist/<int:checklist_id>/upvote/",
views.upvote_checklist,
name="checklist-upvote",
),
path(
"checklist/<int:checklist_id>/bookmark/",
views.bookmark_checklist,
name="checklist-bookmark",
),
path(
"checklist/<int:checklist_id>/follow/",
views.follow_checklist,
name="checklist-follow",
),
path("search/", SearchChecklistListView.as_view(), name="search"),
path(
"checklist/<str:category>/",
CategoryChecklistListView.as_view(),
name="category",
),
path(
"checklist/<int:checklist_id>/item/new/",
ItemCreateView.as_view(),
name="item-create",
),
path(
"checklist/item/<int:pk>/view/",
ItemDetailView.as_view(),
name="item-detail",
),
path(
"checklist/item/<int:pk>/update/",
ItemUpdateView.as_view(),
name="item-update",
),
path(
"checklist/item/<int:item_id>/<str:action_type>/",
views.item_action,
name="item-action",
),
path("notif/<int:id>/dismiss/", views.dismiss_notif, name="dismiss-notif"),
path(
"checklist/<int:checklist_id>/comment/",
views.submit_comment,
name="comment-submit",
),
path(
"comment/<int:pk>/update/",
CommentUpdateView.as_view(),
name="comment-update",
),
path(
"comment/<int:pk>/delete/",
CommentDeleteView.as_view(),
name="comment-delete",
),
]
| 0 | 0 | 0 |
4cf1d8277af939a99b6ab67241ea44f42da9f0f0 | 2,616 | py | Python | simpleredial/es/bm25_gray.py | gmftbyGMFTBY/SimpleReDial-v1 | f45b8eb23d1499ec617b4cc4f417d83d8f2b6bde | [
"MIT"
] | 36 | 2021-10-13T10:32:08.000Z | 2022-03-20T07:50:05.000Z | simpleredial/es/bm25_gray.py | gmftbyGMFTBY/SimpleReDial-v1 | f45b8eb23d1499ec617b4cc4f417d83d8f2b6bde | [
"MIT"
] | 3 | 2021-11-24T10:57:59.000Z | 2022-03-27T15:37:40.000Z | simpleredial/es/bm25_gray.py | gmftbyGMFTBY/SimpleReDial-v1 | f45b8eb23d1499ec617b4cc4f417d83d8f2b6bde | [
"MIT"
] | 1 | 2022-03-15T07:13:22.000Z | 2022-03-15T07:13:22.000Z | from .es_utils import *
from tqdm import tqdm
from config import *
from dataloader.utils import *
import argparse
import json
import ipdb
'''Generate the BM25 gray candidates:
Make sure the q-q BM25 index has been built
'''
if __name__ == '__main__':
args = vars(parser_args())
bsz = args['batch_size']
args['mode'] = 'test'
args['model'] = 'dual-bert' # useless
config = load_config(args)
args.update(config)
args['batch_size'] = bsz
searcher = ESSearcher(
f'{args["dataset"]}_{args["recall_mode"]}',
q_q=True if args['recall_mode']=='q-q' else False
)
# load train dataset
read_path = f'{args["root_dir"]}/data/{args["dataset"]}/train.txt'
write_path = f'{args["root_dir"]}/data/{args["dataset"]}/train_bm25_gray.txt'
# dataset = read_text_data_utterances_full(read_path, lang=args['lang'], turn_length=5)
dataset = read_text_data_utterances(read_path, lang=args['lang'])
data = [(utterances[:-1], utterances[-1]) for label, utterances in dataset if label == 1]
responses = [utterances[-1] for label, utterances in dataset]
collector = []
pbar = tqdm(range(0, len(data), args['batch_size']))
for idx in pbar:
# random choice the conversation context to search the topic related responses
context = [i[0] for i in data[idx:idx+args['batch_size']]]
response = [i[1] for i in data[idx:idx+args['batch_size']]]
context_str = [' '.join(i[0]) for i in data[idx:idx+args['batch_size']]]
rest_ = searcher.msearch(context_str, topk=args['pool_size'])
rest = []
for gt_ctx, gt_res, i in zip(context, response, rest_):
i = list(set(i))
if gt_res in i:
i.remove(gt_res)
if len(i) < args['topk']:
rest.append(i + random.sample(responses, args['topk']-len(i)))
else:
rest.append(i[:args['topk']])
for q, r, nr in zip(context, response, rest):
collector.append({'q': q, 'r': r, 'nr': nr})
with open(write_path, 'w', encoding='utf-8') as f:
for data in collector:
string = json.dumps(data)
f.write(f'{string}\n')
| 36.333333 | 93 | 0.620413 | from .es_utils import *
from tqdm import tqdm
from config import *
from dataloader.utils import *
import argparse
import json
import ipdb
'''Generate the BM25 gray candidates:
Make sure the q-q BM25 index has been built
'''
def parser_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='douban', type=str)
parser.add_argument('--pool_size', default=1000, type=int)
parser.add_argument('--batch_size', default=128, type=int)
parser.add_argument('--recall_mode', default='q-q', type=str)
parser.add_argument('--topk', default=10, type=int)
return parser.parse_args()
if __name__ == '__main__':
args = vars(parser_args())
bsz = args['batch_size']
args['mode'] = 'test'
args['model'] = 'dual-bert' # useless
config = load_config(args)
args.update(config)
args['batch_size'] = bsz
searcher = ESSearcher(
f'{args["dataset"]}_{args["recall_mode"]}',
q_q=True if args['recall_mode']=='q-q' else False
)
# load train dataset
read_path = f'{args["root_dir"]}/data/{args["dataset"]}/train.txt'
write_path = f'{args["root_dir"]}/data/{args["dataset"]}/train_bm25_gray.txt'
# dataset = read_text_data_utterances_full(read_path, lang=args['lang'], turn_length=5)
dataset = read_text_data_utterances(read_path, lang=args['lang'])
data = [(utterances[:-1], utterances[-1]) for label, utterances in dataset if label == 1]
responses = [utterances[-1] for label, utterances in dataset]
collector = []
pbar = tqdm(range(0, len(data), args['batch_size']))
for idx in pbar:
# random choice the conversation context to search the topic related responses
context = [i[0] for i in data[idx:idx+args['batch_size']]]
response = [i[1] for i in data[idx:idx+args['batch_size']]]
context_str = [' '.join(i[0]) for i in data[idx:idx+args['batch_size']]]
rest_ = searcher.msearch(context_str, topk=args['pool_size'])
rest = []
for gt_ctx, gt_res, i in zip(context, response, rest_):
i = list(set(i))
if gt_res in i:
i.remove(gt_res)
if len(i) < args['topk']:
rest.append(i + random.sample(responses, args['topk']-len(i)))
else:
rest.append(i[:args['topk']])
for q, r, nr in zip(context, response, rest):
collector.append({'q': q, 'r': r, 'nr': nr})
with open(write_path, 'w', encoding='utf-8') as f:
for data in collector:
string = json.dumps(data)
f.write(f'{string}\n')
| 380 | 0 | 23 |
6b175bc8f230cd5d4153dd91885f1fb35efcbc2b | 9,665 | py | Python | notifications/models.py | lyoniionly/django-notifications | 36ad8ea4dd26445b5aa81d2c3e3688f9695d25be | [
"BSD-3-Clause"
] | null | null | null | notifications/models.py | lyoniionly/django-notifications | 36ad8ea4dd26445b5aa81d2c3e3688f9695d25be | [
"BSD-3-Clause"
] | null | null | null | notifications/models.py | lyoniionly/django-notifications | 36ad8ea4dd26445b5aa81d2c3e3688f9695d25be | [
"BSD-3-Clause"
] | null | null | null | import datetime
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django import get_version
from distutils.version import StrictVersion
if StrictVersion(get_version()) >= StrictVersion('1.8.0'):
from django.contrib.contenttypes.fields import GenericForeignKey
else:
from django.contrib.contenttypes.generic import GenericForeignKey
from django.db import models
from django.core.exceptions import ImproperlyConfigured
from six import text_type
from .utils import id2slug
from .signals import notify
from model_utils import managers, Choices
from jsonfield.fields import JSONField
#SOFT_DELETE = getattr(settings, 'NOTIFICATIONS_SOFT_DELETE', False)
class Notification(models.Model):
"""
Action model describing the actor acting out a verb (on an optional
target).
Nomenclature based on http://activitystrea.ms/specs/atom/1.0/
Generalized Format::
<actor> <verb> <time>
<actor> <verb> <target> <time>
<actor> <verb> <action_object> <target> <time>
Examples::
<justquick> <reached level 60> <1 minute ago>
<brosner> <commented on> <pinax/pinax> <2 hours ago>
<washingtontimes> <started follow> <justquick> <8 minutes ago>
<mitsuhiko> <closed> <issue 70> on <mitsuhiko/flask> <about 2 hours ago>
Unicode Representation::
justquick reached level 60 1 minute ago
mitsuhiko closed issue 70 on mitsuhiko/flask 3 hours ago
HTML Representation::
<a href="http://oebfare.com/">brosner</a> commented on <a href="http://github.com/pinax/pinax">pinax/pinax</a> 2 hours ago
"""
LEVELS = Choices('success', 'info', 'warning', 'error')
level = models.CharField(choices=LEVELS, default=LEVELS.info, max_length=20)
recipient = models.ForeignKey(settings.AUTH_USER_MODEL, blank=False, related_name='notifications')
unread = models.BooleanField(default=True, blank=False)
actor_content_type = models.ForeignKey(ContentType, related_name='notify_actor')
actor_object_id = models.CharField(max_length=255)
actor = GenericForeignKey('actor_content_type', 'actor_object_id')
verb = models.CharField(max_length=255)
description = models.TextField(blank=True, null=True)
target_content_type = models.ForeignKey(ContentType, related_name='notify_target',
blank=True, null=True)
target_object_id = models.CharField(max_length=255, blank=True, null=True)
target = GenericForeignKey('target_content_type',
'target_object_id')
action_object_content_type = models.ForeignKey(ContentType,
related_name='notify_action_object', blank=True, null=True)
action_object_object_id = models.CharField(max_length=255, blank=True,
null=True)
action_object = GenericForeignKey('action_object_content_type',
'action_object_object_id')
timestamp = models.DateTimeField(default=now)
public = models.BooleanField(default=True)
deleted = models.BooleanField(default=False)
emailed = models.BooleanField(default=False)
data = JSONField(blank=True, null=True)
notify_type = models.CharField(max_length=50, blank=True, null=True)
objects = managers.PassThroughManager.for_queryset_class(NotificationQuerySet)()
def timesince(self, now=None):
"""
Shortcut for the ``django.utils.timesince.timesince`` function of the
current timestamp.
"""
from django.utils.timesince import timesince as timesince_
return timesince_(self.timestamp, now)
@property
# 'NOTIFY_USE_JSONFIELD' is for backward compatibility
# As app name is 'notifications', let's use 'NOTIFICATIONS' consistently from now
EXTRA_DATA = getattr(settings, 'NOTIFY_USE_JSONFIELD', None)
if EXTRA_DATA is None:
EXTRA_DATA = getattr(settings, 'NOTIFICATIONS_USE_JSONFIELD', False)
def notify_handler(verb, **kwargs):
"""
Handler function to create Notification instance upon action signal call.
"""
kwargs.pop('signal', None)
recipient = kwargs.pop('recipient')
actor = kwargs.pop('sender')
newnotify = Notification(
recipient = recipient,
actor_content_type=ContentType.objects.get_for_model(actor),
actor_object_id=actor.pk,
verb=text_type(verb),
public=bool(kwargs.pop('public', True)),
description=kwargs.pop('description', None),
notify_type=kwargs.pop('notify_type', None),
timestamp=kwargs.pop('timestamp', now()),
level=kwargs.pop('level', Notification.LEVELS.info),
)
for opt in ('target', 'action_object'):
obj = kwargs.pop(opt, None)
if not obj is None:
setattr(newnotify, '%s_object_id' % opt, obj.pk)
setattr(newnotify, '%s_content_type' % opt,
ContentType.objects.get_for_model(obj))
if len(kwargs) and EXTRA_DATA:
newnotify.data = kwargs
newnotify.save()
# connect the signal
notify.connect(notify_handler, dispatch_uid='notifications.models.notification')
| 34.27305 | 130 | 0.662907 | import datetime
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django import get_version
from distutils.version import StrictVersion
if StrictVersion(get_version()) >= StrictVersion('1.8.0'):
from django.contrib.contenttypes.fields import GenericForeignKey
else:
from django.contrib.contenttypes.generic import GenericForeignKey
from django.db import models
from django.core.exceptions import ImproperlyConfigured
from six import text_type
from .utils import id2slug
from .signals import notify
from model_utils import managers, Choices
from jsonfield.fields import JSONField
def now():
# Needs to be be a function as USE_TZ can change based on if we are testing or not.
_now = datetime.datetime.now
if getattr(settings, 'USE_TZ'):
try:
from django.utils import timezone
_now = timezone.now
except ImportError:
pass
return _now()
#SOFT_DELETE = getattr(settings, 'NOTIFICATIONS_SOFT_DELETE', False)
def is_soft_delete():
#TODO: SOFT_DELETE = getattr(settings, ...) doesn't work with "override_settings" decorator in unittest
# But is_soft_delete is neither a very elegant way. Should try to find better approach
return getattr(settings, 'NOTIFICATIONS_SOFT_DELETE', False)
def assert_soft_delete():
if not is_soft_delete():
msg = """To use 'deleted' field, please set 'NOTIFICATIONS_SOFT_DELETE'=True in settings.
Otherwise NotificationQuerySet.unread and NotificationQuerySet.read do NOT filter by 'deleted' field.
"""
raise ImproperlyConfigured(msg)
class NotificationQuerySet(models.query.QuerySet):
def unread(self):
"""Return only unread items in the current queryset"""
if is_soft_delete():
return self.filter(unread=True, deleted=False)
else:
""" when SOFT_DELETE=False, developers are supposed NOT to touch 'deleted' field.
In this case, to improve query performance, don't filter by 'deleted' field
"""
return self.filter(unread=True)
def read(self):
"""Return only read items in the current queryset"""
if is_soft_delete():
return self.filter(unread=False, deleted=False)
else:
""" when SOFT_DELETE=False, developers are supposed NOT to touch 'deleted' field.
In this case, to improve query performance, don't filter by 'deleted' field
"""
return self.filter(unread=False)
def mark_all_as_read(self, recipient=None):
"""Mark as read any unread messages in the current queryset.
Optionally, filter these by recipient first.
"""
# We want to filter out read ones, as later we will store
# the time they were marked as read.
qs = self.unread()
if recipient:
qs = qs.filter(recipient=recipient)
qs.update(unread=False)
def mark_all_as_unread(self, recipient=None):
"""Mark as unread any read messages in the current queryset.
Optionally, filter these by recipient first.
"""
qs = self.read()
if recipient:
qs = qs.filter(recipient=recipient)
qs.update(unread=True)
def deleted(self):
"""Return only deleted items in the current queryset"""
assert_soft_delete()
return self.filter(deleted=True)
def active(self):
"""Return only active(un-deleted) items in the current queryset"""
assert_soft_delete()
return self.filter(deleted=False)
def mark_all_as_deleted(self, recipient=None):
"""Mark current queryset as deleted.
Optionally, filter by recipient first.
"""
assert_soft_delete()
qs = self.active()
if recipient:
qs = qs.filter(recipient=recipient)
qs.update(deleted=True)
def mark_all_as_active(self, recipient=None):
"""Mark current queryset as active(un-deleted).
Optionally, filter by recipient first.
"""
assert_soft_delete()
qs = self.deleted()
if recipient:
qs = qs.filter(recipient=recipient)
qs.update(deleted=False)
class Notification(models.Model):
"""
Action model describing the actor acting out a verb (on an optional
target).
Nomenclature based on http://activitystrea.ms/specs/atom/1.0/
Generalized Format::
<actor> <verb> <time>
<actor> <verb> <target> <time>
<actor> <verb> <action_object> <target> <time>
Examples::
<justquick> <reached level 60> <1 minute ago>
<brosner> <commented on> <pinax/pinax> <2 hours ago>
<washingtontimes> <started follow> <justquick> <8 minutes ago>
<mitsuhiko> <closed> <issue 70> on <mitsuhiko/flask> <about 2 hours ago>
Unicode Representation::
justquick reached level 60 1 minute ago
mitsuhiko closed issue 70 on mitsuhiko/flask 3 hours ago
HTML Representation::
<a href="http://oebfare.com/">brosner</a> commented on <a href="http://github.com/pinax/pinax">pinax/pinax</a> 2 hours ago
"""
LEVELS = Choices('success', 'info', 'warning', 'error')
level = models.CharField(choices=LEVELS, default=LEVELS.info, max_length=20)
recipient = models.ForeignKey(settings.AUTH_USER_MODEL, blank=False, related_name='notifications')
unread = models.BooleanField(default=True, blank=False)
actor_content_type = models.ForeignKey(ContentType, related_name='notify_actor')
actor_object_id = models.CharField(max_length=255)
actor = GenericForeignKey('actor_content_type', 'actor_object_id')
verb = models.CharField(max_length=255)
description = models.TextField(blank=True, null=True)
target_content_type = models.ForeignKey(ContentType, related_name='notify_target',
blank=True, null=True)
target_object_id = models.CharField(max_length=255, blank=True, null=True)
target = GenericForeignKey('target_content_type',
'target_object_id')
action_object_content_type = models.ForeignKey(ContentType,
related_name='notify_action_object', blank=True, null=True)
action_object_object_id = models.CharField(max_length=255, blank=True,
null=True)
action_object = GenericForeignKey('action_object_content_type',
'action_object_object_id')
timestamp = models.DateTimeField(default=now)
public = models.BooleanField(default=True)
deleted = models.BooleanField(default=False)
emailed = models.BooleanField(default=False)
data = JSONField(blank=True, null=True)
notify_type = models.CharField(max_length=50, blank=True, null=True)
objects = managers.PassThroughManager.for_queryset_class(NotificationQuerySet)()
class Meta:
ordering = ('-timestamp', )
def __unicode__(self):
ctx = {
'actor': self.actor,
'verb': self.verb,
'action_object': self.action_object,
'target': self.target,
'timesince': self.timesince()
}
if self.target:
if self.action_object:
return u'%(actor)s %(verb)s %(action_object)s on %(target)s %(timesince)s ago' % ctx
return u'%(actor)s %(verb)s %(target)s %(timesince)s ago' % ctx
if self.action_object:
return u'%(actor)s %(verb)s %(action_object)s %(timesince)s ago' % ctx
return u'%(actor)s %(verb)s %(timesince)s ago' % ctx
def __str__(self):#Adds support for Python 3
return self.__unicode__()
def timesince(self, now=None):
"""
Shortcut for the ``django.utils.timesince.timesince`` function of the
current timestamp.
"""
from django.utils.timesince import timesince as timesince_
return timesince_(self.timestamp, now)
@property
def slug(self):
return id2slug(self.id)
def mark_as_read(self):
if self.unread:
self.unread = False
self.save()
def mark_as_unread(self):
if not self.unread:
self.unread = True
self.save()
# 'NOTIFY_USE_JSONFIELD' is for backward compatibility
# As app name is 'notifications', let's use 'NOTIFICATIONS' consistently from now
EXTRA_DATA = getattr(settings, 'NOTIFY_USE_JSONFIELD', None)
if EXTRA_DATA is None:
EXTRA_DATA = getattr(settings, 'NOTIFICATIONS_USE_JSONFIELD', False)
def notify_handler(verb, **kwargs):
"""
Handler function to create Notification instance upon action signal call.
"""
kwargs.pop('signal', None)
recipient = kwargs.pop('recipient')
actor = kwargs.pop('sender')
newnotify = Notification(
recipient = recipient,
actor_content_type=ContentType.objects.get_for_model(actor),
actor_object_id=actor.pk,
verb=text_type(verb),
public=bool(kwargs.pop('public', True)),
description=kwargs.pop('description', None),
notify_type=kwargs.pop('notify_type', None),
timestamp=kwargs.pop('timestamp', now()),
level=kwargs.pop('level', Notification.LEVELS.info),
)
for opt in ('target', 'action_object'):
obj = kwargs.pop(opt, None)
if not obj is None:
setattr(newnotify, '%s_object_id' % opt, obj.pk)
setattr(newnotify, '%s_content_type' % opt,
ContentType.objects.get_for_model(obj))
if len(kwargs) and EXTRA_DATA:
newnotify.data = kwargs
newnotify.save()
# connect the signal
notify.connect(notify_handler, dispatch_uid='notifications.models.notification')
| 1,741 | 2,621 | 252 |
4abb12748191f37e3d6a74eb5ff5559ce22dbbd8 | 67 | py | Python | src/flotilla/db/__init__.py | pebble/flotilla | 23d9b3aefd8312879549c50e52ea73f3e3f493be | [
"MIT"
] | 5 | 2016-01-01T15:50:21.000Z | 2018-11-27T17:38:15.000Z | src/flotilla/db/__init__.py | pebble/flotilla | 23d9b3aefd8312879549c50e52ea73f3e3f493be | [
"MIT"
] | 27 | 2015-12-17T07:49:56.000Z | 2018-07-13T15:06:33.000Z | src/flotilla/db/__init__.py | pebble/flotilla | 23d9b3aefd8312879549c50e52ea73f3e3f493be | [
"MIT"
] | 7 | 2015-12-01T22:04:24.000Z | 2021-11-28T13:21:35.000Z | from .tables import DynamoDbTables
from .lock import DynamoDbLocks
| 22.333333 | 34 | 0.850746 | from .tables import DynamoDbTables
from .lock import DynamoDbLocks
| 0 | 0 | 0 |
1df86ffb24e207d9ed6e859edcb0221b6c3fcfdd | 987 | py | Python | tests/mdpd/rest.py | LMNS3d/Mirheo | e710291502eb3d1b4001e3811f7b7d105af82c86 | [
"MIT"
] | null | null | null | tests/mdpd/rest.py | LMNS3d/Mirheo | e710291502eb3d1b4001e3811f7b7d105af82c86 | [
"MIT"
] | null | null | null | tests/mdpd/rest.py | LMNS3d/Mirheo | e710291502eb3d1b4001e3811f7b7d105af82c86 | [
"MIT"
] | 1 | 2021-07-14T13:24:05.000Z | 2021-07-14T13:24:05.000Z | #!/usr/bin/env python
import mirheo as mir
dt = 0.001
ranks = (1, 1, 1)
domain = (16, 16, 16)
u = mir.Mirheo(ranks, domain, dt, debug_level=3, log_filename='log')
pv = mir.ParticleVectors.ParticleVector('pv', mass = 1)
ic = mir.InitialConditions.Uniform(number_density=3)
u.registerParticleVector(pv, ic)
rc = 1.0
rd = 0.75
den = mir.Interactions.Pairwise('den', rd, kind="Density", density_kernel="MDPD")
mdpd = mir.Interactions.Pairwise('mdpd', rc, kind="MDPD", rd=rd, a=10.0, b=20.0, gamma=10.0, kBT=1.0, power=0.5)
u.registerInteraction(den)
u.registerInteraction(mdpd)
u.setInteraction(den, pv, pv)
u.setInteraction(mdpd, pv, pv)
vv = mir.Integrators.VelocityVerlet('vv')
u.registerIntegrator(vv)
u.setIntegrator(vv, pv)
u.registerPlugins(mir.Plugins.createStats('stats', "stats.txt", 1000))
u.run(5001)
# nTEST: mdpd.rest
# cd mdpd
# rm -rf stats.txt
# mir.run --runargs "-n 2" ./rest.py > /dev/null
# cat stats.txt | awk '{print $1, $2, $3, $4, $5}' > stats.out.txt
| 24.675 | 112 | 0.68997 | #!/usr/bin/env python
import mirheo as mir
dt = 0.001
ranks = (1, 1, 1)
domain = (16, 16, 16)
u = mir.Mirheo(ranks, domain, dt, debug_level=3, log_filename='log')
pv = mir.ParticleVectors.ParticleVector('pv', mass = 1)
ic = mir.InitialConditions.Uniform(number_density=3)
u.registerParticleVector(pv, ic)
rc = 1.0
rd = 0.75
den = mir.Interactions.Pairwise('den', rd, kind="Density", density_kernel="MDPD")
mdpd = mir.Interactions.Pairwise('mdpd', rc, kind="MDPD", rd=rd, a=10.0, b=20.0, gamma=10.0, kBT=1.0, power=0.5)
u.registerInteraction(den)
u.registerInteraction(mdpd)
u.setInteraction(den, pv, pv)
u.setInteraction(mdpd, pv, pv)
vv = mir.Integrators.VelocityVerlet('vv')
u.registerIntegrator(vv)
u.setIntegrator(vv, pv)
u.registerPlugins(mir.Plugins.createStats('stats', "stats.txt", 1000))
u.run(5001)
# nTEST: mdpd.rest
# cd mdpd
# rm -rf stats.txt
# mir.run --runargs "-n 2" ./rest.py > /dev/null
# cat stats.txt | awk '{print $1, $2, $3, $4, $5}' > stats.out.txt
| 0 | 0 | 0 |
cb6c8c56891034281b9e4e6a2fcf44f9933a1416 | 15,814 | py | Python | tests/test_doc_maker.py | farazkhanfk7/hydra-python-core | fe18d88ceb375be4675bb89cddc5096eab27b675 | [
"MIT"
] | 20 | 2019-01-13T23:54:35.000Z | 2021-11-25T09:04:58.000Z | tests/test_doc_maker.py | farazkhanfk7/hydra-python-core | fe18d88ceb375be4675bb89cddc5096eab27b675 | [
"MIT"
] | 63 | 2019-01-13T15:53:06.000Z | 2021-07-23T08:15:09.000Z | tests/test_doc_maker.py | farazkhanfk7/hydra-python-core | fe18d88ceb375be4675bb89cddc5096eab27b675 | [
"MIT"
] | 29 | 2019-01-13T13:46:06.000Z | 2021-12-16T13:01:56.000Z | import unittest
import re
from pyld import jsonld
import requests
from unittest.mock import patch
from hydra_python_core import doc_maker, doc_writer
from samples import doc_writer_sample_output
class TestCreateClass(unittest.TestCase):
"""
Test Class for create_class method
"""
@patch('hydra_python_core.doc_maker.HydraClass', spec_set=doc_maker.HydraClass)
def test_output(self, mock_class):
"""
Test method to check if HydraClass is instantiated with proper arguments and
properties and operations have been added to it.
"""
class_dict = {
"@id": "https://hydrus.com/api/dummyClass",
"@type": [
"http://www.w3.org/ns/hydra/core#Class"
],
"http://www.w3.org/ns/hydra/core#description": [
{
"@value": "A dummyClass for demo"
}
],
"http://www.w3.org/ns/hydra/core#supportedOperation": [
{
"@type": [
"http://schema.org/FindAction"
],
"http://www.w3.org/ns/hydra/core#expects": [
{
"@id": "https://json-ld.org/playground/null"
}
],
"http://www.w3.org/ns/hydra/core#expectsHeader": [
],
"http://www.w3.org/ns/hydra/core#method": [
{
"@value": "GET"
}
],
"http://www.w3.org/ns/hydra/core#possibleStatus": [
{
"@type": [
"http://www.w3.org/ns/hydra/core#Status"
],
"http://www.w3.org/ns/hydra/core#description": [
{
"@value": "dummyClass returned."
}
],
"http://www.w3.org/ns/hydra/core#statusCode": [
{
"@value": 200
}
],
"http://www.w3.org/ns/hydra/core#title": [
{
"@value": ""
}
]
}
],
"http://www.w3.org/ns/hydra/core#returns": [
{
"@id": "https://hydrus.com/api/dummyClass"
}
],
"http://www.w3.org/ns/hydra/core#returnsHeader": [
],
"http://www.w3.org/ns/hydra/core#title": [
{
"@value": "GetClass"
}
]
}
],
"http://www.w3.org/ns/hydra/core#supportedProperty": [
{
"@type": [
"https://json-ld.org/playground/SupportedProperty"
],
"http://www.w3.org/ns/hydra/core#property": [
{
"@id": "http://props.hydrus.com/prop1"
}
],
"http://www.w3.org/ns/hydra/core#readable": [
{
"@value": "false"
}
],
"http://www.w3.org/ns/hydra/core#required": [
{
"@value": "false"
}
],
"http://www.w3.org/ns/hydra/core#title": [
{
"@value": "Prop1"
}
],
"http://www.w3.org/ns/hydra/core#writeable": [
{
"@value": "true"
}
]
}
],
"http://www.w3.org/ns/hydra/core#title": [
{
"@value": "dummyClass"
}
]
}
# run the function and check if HydraClass has been instantiated
class_ = doc_maker.create_class(class_dict, endpoint=False)
mock_class.assert_called_once_with('dummyClass', 'A dummyClass for demo',
endpoint=False)
# check if properties and operations has been added to the hydra class
self.assertEqual(mock_class.return_value.add_supported_op.call_count,
len(class_dict["http://www.w3.org/ns/hydra/core#supportedOperation"]))
self.assertEqual(mock_class.return_value.add_supported_prop.call_count,
len(class_dict["http://www.w3.org/ns/hydra/core#supportedProperty"]))
self.assertIsInstance(class_, doc_writer.HydraClass)
class TestCreateDoc(unittest.TestCase):
"""
Test Class for create_doc method
"""
@patch('hydra_python_core.doc_maker.re')
def test_validations(self, mock_re):
"""
Test method to check if exceptions are raised if doc has missing keys
or contain syntax errors
"""
# Check if proper error raised when no "@id" key is present
id_ = self.doc.pop("@id", None)
self.assertRaises(SyntaxError, doc_maker.create_doc, self.doc)
self.doc["@id"] = id_
@patch('hydra_python_core.doc_maker.HydraDoc', spec_set=doc_maker.HydraDoc)
def test_output(self, mock_doc):
"""
Test method to check if HydraDoc are instantiated with proper arguments
and all necessary functions are called.
"""
server_url = "http://hydrus.com/"
api_name = "test_api"
doc_name = 'vocab'
class_count = 0
collection_count = 0
# find out the number of classes
for class_ in self.doc["supportedClass"]:
if 'manages' not in class_:
class_count += 1
else:
collection_count += 1
# check if apidoc has been created with proper args
apidoc = doc_maker.create_doc(self.doc, server_url, api_name)
mock_doc.assert_called_once_with(api_name, "Title for the API Documentation",
"Description for the API Documentation",
api_name, server_url, doc_name)
# check if all context keys has been added to apidoc
self.assertEqual(mock_doc.return_value.add_to_context.call_count, len(
self.doc["@context"].keys()))
# check if all classes has been added to apidoc
self.assertEqual(
mock_doc.return_value.add_supported_class.call_count, class_count-3)
self.assertEqual(
mock_doc.return_value.add_supported_collection.call_count, collection_count)
# check if all base resource and classes has been added
self.assertEqual(
mock_doc.return_value.add_baseResource.call_count, 1)
self.assertEqual(
mock_doc.return_value.add_baseCollection.call_count, 1)
self.assertEqual(
mock_doc.return_value.gen_EntryPoint.call_count, 1)
self.assertIsInstance(apidoc, doc_writer.HydraDoc)
class TestCreateProperty(unittest.TestCase):
"""
Test Class for create_property method
"""
@patch('hydra_python_core.doc_maker.HydraClassProp', spec_set=doc_maker.HydraClassProp)
def test_output(self, mock_prop):
"""
Test method to check if HydraClassProp is instantiated with proper agruments with
different input
"""
property_ = {
"@type": [
"http://www.w3.org/ns/hydra/core#SupportedProperty"
],
"http://www.w3.org/ns/hydra/core#property": [
{
"@id": "http://props.hydrus.com/prop1"
}
],
"http://www.w3.org/ns/hydra/core#readable": [
{
"@value": "false"
}
],
"http://www.w3.org/ns/hydra/core#required": [
{
"@value": "false"
}
],
"http://www.w3.org/ns/hydra/core#title": [
{
"@value": "Prop1"
}
],
"http://www.w3.org/ns/hydra/core#writeable": [
{
"@value": "true"
}
]
}
doc_maker.create_property(property_)
mock_prop.assert_called_once_with(prop="http://props.hydrus.com/prop1", title="Prop1",
required="false", read="false", write="true")
mock_prop.reset_mock()
property_["http://www.w3.org/ns/hydra/core#readable"] = [
{
"@value": "true"
}
]
doc_maker.create_property(property_)
mock_prop.assert_called_once_with(prop="http://props.hydrus.com/prop1", title="Prop1",
required="false", read="true", write="true")
mock_prop.reset_mock()
property_["http://www.w3.org/ns/hydra/core#property"] = [
{
"@id": "http://props.hydrus.com/prop2"
}
]
obj = doc_maker.create_property(property_)
mock_prop.assert_called_once_with(prop="http://props.hydrus.com/prop2", title="Prop1",
required="false", read="true", write="true")
self.assertIsInstance(obj, doc_writer.HydraClassProp)
class TestCreateOperation(unittest.TestCase):
"""
Test Class for create_operation method
"""
@patch('hydra_python_core.doc_maker.HydraClassOp', spec_set=doc_maker.HydraClassOp)
def test_output(self, mock_op):
"""
Test method to check if HydraClassOp is instantiated with proper arguments with
different input
"""
op = {
"@type": [
"http://schema.org/UpdateAction"
],
"http://www.w3.org/ns/hydra/core#expects": [
{
"@id": "https://hydrus.com/api/dummyClass"
}
],
"http://www.w3.org/ns/hydra/core#expectsHeader": [
],
"http://www.w3.org/ns/hydra/core#method": [
{
"@value": "POST"
}
],
"http://www.w3.org/ns/hydra/core#possibleStatus": [
],
"http://www.w3.org/ns/hydra/core#returns": [
{
"@id": "null"
}
],
"http://www.w3.org/ns/hydra/core#returnsHeader": [
{
"@value": "Content-Type"
},
{
"@value": "Content-Length"
}
],
"http://www.w3.org/ns/hydra/core#title": [
{
"@value": "UpdateClass"
}
]
}
doc_maker.create_operation(op)
mock_op.assert_called_once_with(
title="UpdateClass",
method="POST",
expects="https://hydrus.com/api/dummyClass",
returns="null",
returns_header=["Content-Type", "Content-Length"],
possible_status=[],
expects_header=[])
mock_op.reset_mock()
op["http://www.w3.org/ns/hydra/core#expects"] = [
{
"@id": "http://hydrus.com/test"
}
]
doc_maker.create_operation(op)
mock_op.assert_called_once_with(
title="UpdateClass",
method="POST",
expects="http://hydrus.com/test",
returns="null",
returns_header=["Content-Type", "Content-Length"],
possible_status=[],
expects_header=[])
mock_op.reset_mock()
op["http://www.w3.org/ns/hydra/core#returns"] = [
{
"@id": "http://hydrus.com/test"
}
]
obj = doc_maker.create_operation(op)
mock_op.assert_called_once_with(
title="UpdateClass",
method="POST",
expects="http://hydrus.com/test",
returns="http://hydrus.com/test",
returns_header=["Content-Type", "Content-Length"],
possible_status=[],
expects_header=[])
self.assertIsInstance(obj, doc_writer.HydraClassOp)
class TestCreateStatus(unittest.TestCase):
"""
Test Class for create_status method
"""
@patch('hydra_python_core.doc_maker.HydraStatus', spec_set=doc_maker.HydraStatus)
def test_output(self, mock_status):
"""
Test method to check if HydraStatus is instantiated with proper arguments with
different input
"""
status = [
{
"@type": [
"http://www.w3.org/ns/hydra/core#Status"
],
"http://www.w3.org/ns/hydra/core#description": [
{
"@value": "dummyClass updated."
}
],
"http://www.w3.org/ns/hydra/core#statusCode": [
{
"@value": 200
}
],
"http://www.w3.org/ns/hydra/core#title": [
{
"@value": ""
}
]
},
]
obj = doc_maker.create_status(status)
mock_status.assert_called_once_with(200, None, '', 'dummyClass updated.')
self.assertIsInstance(obj[0], doc_writer.HydraStatus)
class TestFragments(unittest.TestCase):
"""
Test Class for checking fragments in id's
"""
if __name__ == '__main__':
unittest.main()
| 36.187643 | 95 | 0.460857 | import unittest
import re
from pyld import jsonld
import requests
from unittest.mock import patch
from hydra_python_core import doc_maker, doc_writer
from samples import doc_writer_sample_output
class TestCreateClass(unittest.TestCase):
"""
Test Class for create_class method
"""
def setUp(self):
self.doc = doc_writer_sample_output.doc
@patch('hydra_python_core.doc_maker.HydraClass', spec_set=doc_maker.HydraClass)
def test_output(self, mock_class):
"""
Test method to check if HydraClass is instantiated with proper arguments and
properties and operations have been added to it.
"""
class_dict = {
"@id": "https://hydrus.com/api/dummyClass",
"@type": [
"http://www.w3.org/ns/hydra/core#Class"
],
"http://www.w3.org/ns/hydra/core#description": [
{
"@value": "A dummyClass for demo"
}
],
"http://www.w3.org/ns/hydra/core#supportedOperation": [
{
"@type": [
"http://schema.org/FindAction"
],
"http://www.w3.org/ns/hydra/core#expects": [
{
"@id": "https://json-ld.org/playground/null"
}
],
"http://www.w3.org/ns/hydra/core#expectsHeader": [
],
"http://www.w3.org/ns/hydra/core#method": [
{
"@value": "GET"
}
],
"http://www.w3.org/ns/hydra/core#possibleStatus": [
{
"@type": [
"http://www.w3.org/ns/hydra/core#Status"
],
"http://www.w3.org/ns/hydra/core#description": [
{
"@value": "dummyClass returned."
}
],
"http://www.w3.org/ns/hydra/core#statusCode": [
{
"@value": 200
}
],
"http://www.w3.org/ns/hydra/core#title": [
{
"@value": ""
}
]
}
],
"http://www.w3.org/ns/hydra/core#returns": [
{
"@id": "https://hydrus.com/api/dummyClass"
}
],
"http://www.w3.org/ns/hydra/core#returnsHeader": [
],
"http://www.w3.org/ns/hydra/core#title": [
{
"@value": "GetClass"
}
]
}
],
"http://www.w3.org/ns/hydra/core#supportedProperty": [
{
"@type": [
"https://json-ld.org/playground/SupportedProperty"
],
"http://www.w3.org/ns/hydra/core#property": [
{
"@id": "http://props.hydrus.com/prop1"
}
],
"http://www.w3.org/ns/hydra/core#readable": [
{
"@value": "false"
}
],
"http://www.w3.org/ns/hydra/core#required": [
{
"@value": "false"
}
],
"http://www.w3.org/ns/hydra/core#title": [
{
"@value": "Prop1"
}
],
"http://www.w3.org/ns/hydra/core#writeable": [
{
"@value": "true"
}
]
}
],
"http://www.w3.org/ns/hydra/core#title": [
{
"@value": "dummyClass"
}
]
}
# run the function and check if HydraClass has been instantiated
class_ = doc_maker.create_class(class_dict, endpoint=False)
mock_class.assert_called_once_with('dummyClass', 'A dummyClass for demo',
endpoint=False)
# check if properties and operations has been added to the hydra class
self.assertEqual(mock_class.return_value.add_supported_op.call_count,
len(class_dict["http://www.w3.org/ns/hydra/core#supportedOperation"]))
self.assertEqual(mock_class.return_value.add_supported_prop.call_count,
len(class_dict["http://www.w3.org/ns/hydra/core#supportedProperty"]))
self.assertIsInstance(class_, doc_writer.HydraClass)
class TestCreateDoc(unittest.TestCase):
"""
Test Class for create_doc method
"""
def setUp(self):
self.doc = doc_writer_sample_output.doc
@patch('hydra_python_core.doc_maker.re')
def test_validations(self, mock_re):
"""
Test method to check if exceptions are raised if doc has missing keys
or contain syntax errors
"""
# Check if proper error raised when no "@id" key is present
id_ = self.doc.pop("@id", None)
self.assertRaises(SyntaxError, doc_maker.create_doc, self.doc)
self.doc["@id"] = id_
@patch('hydra_python_core.doc_maker.HydraDoc', spec_set=doc_maker.HydraDoc)
def test_output(self, mock_doc):
"""
Test method to check if HydraDoc are instantiated with proper arguments
and all necessary functions are called.
"""
server_url = "http://hydrus.com/"
api_name = "test_api"
doc_name = 'vocab'
class_count = 0
collection_count = 0
# find out the number of classes
for class_ in self.doc["supportedClass"]:
if 'manages' not in class_:
class_count += 1
else:
collection_count += 1
# check if apidoc has been created with proper args
apidoc = doc_maker.create_doc(self.doc, server_url, api_name)
mock_doc.assert_called_once_with(api_name, "Title for the API Documentation",
"Description for the API Documentation",
api_name, server_url, doc_name)
# check if all context keys has been added to apidoc
self.assertEqual(mock_doc.return_value.add_to_context.call_count, len(
self.doc["@context"].keys()))
# check if all classes has been added to apidoc
self.assertEqual(
mock_doc.return_value.add_supported_class.call_count, class_count-3)
self.assertEqual(
mock_doc.return_value.add_supported_collection.call_count, collection_count)
# check if all base resource and classes has been added
self.assertEqual(
mock_doc.return_value.add_baseResource.call_count, 1)
self.assertEqual(
mock_doc.return_value.add_baseCollection.call_count, 1)
self.assertEqual(
mock_doc.return_value.gen_EntryPoint.call_count, 1)
self.assertIsInstance(apidoc, doc_writer.HydraDoc)
class TestCreateProperty(unittest.TestCase):
"""
Test Class for create_property method
"""
@patch('hydra_python_core.doc_maker.HydraClassProp', spec_set=doc_maker.HydraClassProp)
def test_output(self, mock_prop):
"""
Test method to check if HydraClassProp is instantiated with proper agruments with
different input
"""
property_ = {
"@type": [
"http://www.w3.org/ns/hydra/core#SupportedProperty"
],
"http://www.w3.org/ns/hydra/core#property": [
{
"@id": "http://props.hydrus.com/prop1"
}
],
"http://www.w3.org/ns/hydra/core#readable": [
{
"@value": "false"
}
],
"http://www.w3.org/ns/hydra/core#required": [
{
"@value": "false"
}
],
"http://www.w3.org/ns/hydra/core#title": [
{
"@value": "Prop1"
}
],
"http://www.w3.org/ns/hydra/core#writeable": [
{
"@value": "true"
}
]
}
doc_maker.create_property(property_)
mock_prop.assert_called_once_with(prop="http://props.hydrus.com/prop1", title="Prop1",
required="false", read="false", write="true")
mock_prop.reset_mock()
property_["http://www.w3.org/ns/hydra/core#readable"] = [
{
"@value": "true"
}
]
doc_maker.create_property(property_)
mock_prop.assert_called_once_with(prop="http://props.hydrus.com/prop1", title="Prop1",
required="false", read="true", write="true")
mock_prop.reset_mock()
property_["http://www.w3.org/ns/hydra/core#property"] = [
{
"@id": "http://props.hydrus.com/prop2"
}
]
obj = doc_maker.create_property(property_)
mock_prop.assert_called_once_with(prop="http://props.hydrus.com/prop2", title="Prop1",
required="false", read="true", write="true")
self.assertIsInstance(obj, doc_writer.HydraClassProp)
class TestCreateOperation(unittest.TestCase):
"""
Test Class for create_operation method
"""
@patch('hydra_python_core.doc_maker.HydraClassOp', spec_set=doc_maker.HydraClassOp)
def test_output(self, mock_op):
"""
Test method to check if HydraClassOp is instantiated with proper arguments with
different input
"""
op = {
"@type": [
"http://schema.org/UpdateAction"
],
"http://www.w3.org/ns/hydra/core#expects": [
{
"@id": "https://hydrus.com/api/dummyClass"
}
],
"http://www.w3.org/ns/hydra/core#expectsHeader": [
],
"http://www.w3.org/ns/hydra/core#method": [
{
"@value": "POST"
}
],
"http://www.w3.org/ns/hydra/core#possibleStatus": [
],
"http://www.w3.org/ns/hydra/core#returns": [
{
"@id": "null"
}
],
"http://www.w3.org/ns/hydra/core#returnsHeader": [
{
"@value": "Content-Type"
},
{
"@value": "Content-Length"
}
],
"http://www.w3.org/ns/hydra/core#title": [
{
"@value": "UpdateClass"
}
]
}
doc_maker.create_operation(op)
mock_op.assert_called_once_with(
title="UpdateClass",
method="POST",
expects="https://hydrus.com/api/dummyClass",
returns="null",
returns_header=["Content-Type", "Content-Length"],
possible_status=[],
expects_header=[])
mock_op.reset_mock()
op["http://www.w3.org/ns/hydra/core#expects"] = [
{
"@id": "http://hydrus.com/test"
}
]
doc_maker.create_operation(op)
mock_op.assert_called_once_with(
title="UpdateClass",
method="POST",
expects="http://hydrus.com/test",
returns="null",
returns_header=["Content-Type", "Content-Length"],
possible_status=[],
expects_header=[])
mock_op.reset_mock()
op["http://www.w3.org/ns/hydra/core#returns"] = [
{
"@id": "http://hydrus.com/test"
}
]
obj = doc_maker.create_operation(op)
mock_op.assert_called_once_with(
title="UpdateClass",
method="POST",
expects="http://hydrus.com/test",
returns="http://hydrus.com/test",
returns_header=["Content-Type", "Content-Length"],
possible_status=[],
expects_header=[])
self.assertIsInstance(obj, doc_writer.HydraClassOp)
class TestCreateStatus(unittest.TestCase):
"""
Test Class for create_status method
"""
@patch('hydra_python_core.doc_maker.HydraStatus', spec_set=doc_maker.HydraStatus)
def test_output(self, mock_status):
"""
Test method to check if HydraStatus is instantiated with proper arguments with
different input
"""
status = [
{
"@type": [
"http://www.w3.org/ns/hydra/core#Status"
],
"http://www.w3.org/ns/hydra/core#description": [
{
"@value": "dummyClass updated."
}
],
"http://www.w3.org/ns/hydra/core#statusCode": [
{
"@value": 200
}
],
"http://www.w3.org/ns/hydra/core#title": [
{
"@value": ""
}
]
},
]
obj = doc_maker.create_status(status)
mock_status.assert_called_once_with(200, None, '', 'dummyClass updated.')
self.assertIsInstance(obj[0], doc_writer.HydraStatus)
class TestFragments(unittest.TestCase):
"""
Test Class for checking fragments in id's
"""
def test_fragments(self):
server_url = "http://hydrus.com/"
api_name = "test_api"
self.doc = doc_writer_sample_output.doc
apidoc = doc_maker.create_doc(self.doc, server_url, api_name)
for class_ in apidoc.parsed_classes:
resource = apidoc.parsed_classes[class_]['class']
resource_id = resource.id_
regex = r"(\W*\?resource=\W*)([a-zA-Z]+)"
match_groups = re.search(regex, resource_id)
assert match_groups.groups()[0] is not None
assert match_groups.groups()[1] == class_
for collection in apidoc.collections:
resource_id = apidoc.collections[collection]['collection'].collection_id
regex = r"(\W*\?resource=\W*)([a-zA-Z]+)"
match_groups = re.search(regex, resource_id)
assert match_groups.groups()[0] is not None
assert match_groups.groups()[1] == collection
if __name__ == '__main__':
unittest.main()
| 1,006 | 0 | 80 |
145ed4c6d5e66f43b9f096486f8f3ef32e02247f | 2,024 | py | Python | Models/utils/UpConcat2d.py | rinkwitz/Thesis_Semantic_Image_Segmentation_on_Satellite_Imagery_using_UNets | 75d3a4a536f6ef81fe0efd4f5fbba32b627a7472 | [
"MIT"
] | 1 | 2021-05-13T01:41:38.000Z | 2021-05-13T01:41:38.000Z | Models/utils/UpConcat2d.py | rinkwitz/Thesis_Semantic_Image_Segmentation_on_Satellite_Imagery_using_UNets | 75d3a4a536f6ef81fe0efd4f5fbba32b627a7472 | [
"MIT"
] | null | null | null | Models/utils/UpConcat2d.py | rinkwitz/Thesis_Semantic_Image_Segmentation_on_Satellite_Imagery_using_UNets | 75d3a4a536f6ef81fe0efd4f5fbba32b627a7472 | [
"MIT"
] | 1 | 2021-03-13T16:55:06.000Z | 2021-03-13T16:55:06.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
if __name__ == '__main__':
x_down = torch.randn((1, 128, 56, 56))
x_enc = torch.randn((1, 64, 111, 111))
upconcat = UpConcat2d(in_channels_conv=128, out_channels_conv=64)
y = upconcat(x_down, x_enc)
print(y.shape)
| 43.06383 | 130 | 0.596838 | import torch
import torch.nn as nn
import torch.nn.functional as F
class UpConcat2d(nn.Module):
def __init__(self, in_channels_conv, out_channels_conv, scale_factor=2):
super(UpConcat2d, self).__init__()
self.in_channels_conv = in_channels_conv
self.out_channels_conv = out_channels_conv
self.scale_factor = scale_factor
self.up = nn.ConvTranspose2d(in_channels=self.in_channels_conv, out_channels=self.out_channels_conv,
kernel_size=2, stride=2, padding=0)
if scale_factor == 4:
self.up2 = nn.ConvTranspose2d(in_channels=self.out_channels_conv, out_channels=self.out_channels_conv,
kernel_size=2, stride=2, padding=0)
def forward(self, x_down, x_enc):
up = F.relu(self.up(x_down))
if self.scale_factor == 4:
up = F.relu(self.up2(up))
if up.shape[-1] > x_enc.shape[-1]:
p = (up.shape[-1] - x_enc.shape[-1]) // 2
if (up.shape[-1] - x_enc.shape[-1]) % 2 != 0:
p += 1
x_enc = F.pad(x_enc, (p, p, p, p))
start = [(x_enc.shape[-2] - up.shape[-2]) // 2, (x_enc.shape[-1] - up.shape[-1]) // 2]
length = [up.shape[-2], up.shape[-1]]
crop = torch.narrow(torch.narrow(x_enc, dim=2, start=start[0], length=length[0]), dim=3, start=start[1], length=length[1])
cat = torch.cat(tensors=(up, crop), dim=1)
return cat
def initialize_weights(self):
nn.init.normal_(self.up.weight.data, mean=0.0, std=.02)
nn.init.constant_(self.up.bias.data, 0.0)
if self.scale_factor == 4:
nn.init.normal_(self.up2.weight.data, mean=0.0, std=.02)
nn.init.constant_(self.up2.bias.data, 0.0)
if __name__ == '__main__':
x_down = torch.randn((1, 128, 56, 56))
x_enc = torch.randn((1, 64, 111, 111))
upconcat = UpConcat2d(in_channels_conv=128, out_channels_conv=64)
y = upconcat(x_down, x_enc)
print(y.shape)
| 1,610 | 7 | 103 |
ad4dfcc231b01d6f7a34b848a1997d16cec5325b | 408 | py | Python | paraVerComoFuncionaAlgumasCoisas/tkinter-coisas/font/2.py | jonasht/pythonEstudos | 5e7d28e7bd82b9d1b08e795867fdbaa743f4b747 | [
"MIT"
] | null | null | null | paraVerComoFuncionaAlgumasCoisas/tkinter-coisas/font/2.py | jonasht/pythonEstudos | 5e7d28e7bd82b9d1b08e795867fdbaa743f4b747 | [
"MIT"
] | null | null | null | paraVerComoFuncionaAlgumasCoisas/tkinter-coisas/font/2.py | jonasht/pythonEstudos | 5e7d28e7bd82b9d1b08e795867fdbaa743f4b747 | [
"MIT"
] | null | null | null | import tkinter as tk
from tkinter import ttk
from tkinter.font import Font
root = tk.Tk()
# definindo a fonte
grandeFonte = Font(
family='Helvetica',
size=42,
weight='bold',
slant='roman',
underline=0,
overstrike=0
)
bt1 = ttk.Button(root, text='grande botao', font=grandeFonte)
bt1.pack(pady=20)
lb1 = ttk.Label(root, text='um texxto', font=grandeFonte)
lb1.pack()
root.mainloop() | 19.428571 | 61 | 0.691176 | import tkinter as tk
from tkinter import ttk
from tkinter.font import Font
root = tk.Tk()
# definindo a fonte
grandeFonte = Font(
family='Helvetica',
size=42,
weight='bold',
slant='roman',
underline=0,
overstrike=0
)
bt1 = ttk.Button(root, text='grande botao', font=grandeFonte)
bt1.pack(pady=20)
lb1 = ttk.Label(root, text='um texxto', font=grandeFonte)
lb1.pack()
root.mainloop() | 0 | 0 | 0 |
b15a9c14ec4a19dc1719ff4eba55a5a8bc412736 | 3,647 | py | Python | deepcode-ignore.py | TheSecEng/sublime-plugin | 8e43a175c7a282eb280e25ce73ddf2350a9adce7 | [
"MIT"
] | null | null | null | deepcode-ignore.py | TheSecEng/sublime-plugin | 8e43a175c7a282eb280e25ce73ddf2350a9adce7 | [
"MIT"
] | null | null | null | deepcode-ignore.py | TheSecEng/sublime-plugin | 8e43a175c7a282eb280e25ce73ddf2350a9adce7 | [
"MIT"
] | null | null | null | import re
import sublime
import sublime_plugin
from Default.comment import build_comment_data
from .consts import PANEL_NAME
from .persist import HIGHLIGHTED_REGIONS
| 31.439655 | 88 | 0.599671 | import re
import sublime
import sublime_plugin
from Default.comment import build_comment_data
from .consts import PANEL_NAME
from .persist import HIGHLIGHTED_REGIONS
def get_ignore_text(type, id, append=False):
ignoretext = "deepcode ignore" if type == "line" else "file deepcode ignore"
if append:
ignoretext = ", " + ignoretext
return "{} {}: <please specify a reason of ignoring this> \n".format(ignoretext, id)
def does_comment_exist(line_text):
return " deepcode ignore" in line_text
def update_highlighted_region(view, point, with_new_line=False):
target = next(
(
error
for error in HIGHLIGHTED_REGIONS[view.file_name()]
if point in error["region"]
),
None,
)
(x1, y1), (x2, y2) = (
view.rowcol(target["region"][0]),
view.rowcol(target["region"][-1]),
)
if with_new_line:
x1 = x1 + 1
x2 = x2 + 1
else:
# for some reason one character goes missing between comments
print("WTF")
y2 = y2 + 1
def update_points():
error_start_point, error_end_point = (
view.text_point(x1, y1),
view.text_point(x2, y2),
)
target["region"] = range(error_start_point, error_end_point)
sublime.set_timeout(update_points, 500)
def insert_new_line_with_comment(view, edit, point, type, id, target):
data = build_comment_data(view, target.begin())
indent = re.findall(r"^\s*", view.substr(view.full_line(point)))[0]
if data[0]:
snip = "{0}{1}{2}".format(
indent, data[0][0][0], get_ignore_text(type, id))
elif data[1]:
snip = "{0}{1} {2} {3}".format(
indent, data[1][0][0], get_ignore_text(type, id), data[1][0][1]
)
view.insert(edit, target.begin(), snip)
view.sel().clear()
view.sel().add(
view.find(
"<please specify a reason of ignoring this>",
target.begin(),
sublime.IGNORECASE,
)
)
update_highlighted_region(view, point, with_new_line=True)
def append_to_existing_comment(view, edit, point, type, id, target):
(x, y) = view.rowcol(point)
indent = re.findall(r"^\s*", view.substr(view.full_line(point)))[0]
target_text = view.substr(target)
snip = "{0}{1}{2}".format(
indent, target_text.strip(), get_ignore_text(type, id, append=True)
)
update_highlighted_region(view, point)
view.replace(edit, target, snip)
view.sel().clear()
view.sel().add(
view.find(
"<please specify a reason of ignoring this>",
target.end(),
sublime.IGNORECASE,
)
)
target_updated = view.line(view.text_point(x - 1, y))
sublime.set_timeout(lambda: view.show(target_updated.end()), 300)
class DeepCodeIgnoreCommand(sublime_plugin.TextCommand):
def run(self, edit, point, type, id):
if type == "panel":
self.view.window().run_command("deepcode_show_results_panel")
else:
current_line = self.view.line(point)
(x, y) = self.view.rowcol(point)
previous_line = self.view.full_line(self.view.text_point(x - 1, y))
previous_line_text = self.view.substr(previous_line)
if does_comment_exist(previous_line_text):
append_to_existing_comment(
self.view, edit, point, type, id, previous_line
)
else:
insert_new_line_with_comment(
self.view, edit, point, type, id, current_line
)
self.view.hide_popup()
| 3,274 | 35 | 164 |
0f8550b9e27e981890d5090279c213056c4463b9 | 1,066 | py | Python | filefind/submodules.py | agateau/filefind | de6d743f210e224f264e6ba84f38d6bbb40ab067 | [
"Apache-2.0"
] | null | null | null | filefind/submodules.py | agateau/filefind | de6d743f210e224f264e6ba84f38d6bbb40ab067 | [
"Apache-2.0"
] | null | null | null | filefind/submodules.py | agateau/filefind | de6d743f210e224f264e6ba84f38d6bbb40ab067 | [
"Apache-2.0"
] | null | null | null | import os
from configparser import ConfigParser
def list_submodules(source_dir):
"""Looks for a .gitmodules in `source_dir` or its parents. If it finds one,
it reads it and returns a set of dirs to submodules. dirs are absolutes"""
gitmodules_path = _find_gitmodules(source_dir)
if not gitmodules_path:
return []
gitmodules_dir = os.path.dirname(gitmodules_path)
cfg = ConfigParser()
cfg.read(gitmodules_path)
for section in cfg.sections():
if not section.startswith('submodule "'):
continue
path = cfg.get(section, 'path')
path = os.path.join(gitmodules_dir, path)
if path.startswith(source_dir):
# Only yields paths inside source_dir
yield path
| 29.611111 | 79 | 0.675422 | import os
from configparser import ConfigParser
def _find_gitmodules(source_dir):
path = os.path.join(source_dir, '.gitmodules')
if os.path.exists(path):
return path
parent_dir = os.path.normpath(os.path.join(source_dir, os.path.pardir))
if source_dir == parent_dir:
return None
return _find_gitmodules(parent_dir)
def list_submodules(source_dir):
"""Looks for a .gitmodules in `source_dir` or its parents. If it finds one,
it reads it and returns a set of dirs to submodules. dirs are absolutes"""
gitmodules_path = _find_gitmodules(source_dir)
if not gitmodules_path:
return []
gitmodules_dir = os.path.dirname(gitmodules_path)
cfg = ConfigParser()
cfg.read(gitmodules_path)
for section in cfg.sections():
if not section.startswith('submodule "'):
continue
path = cfg.get(section, 'path')
path = os.path.join(gitmodules_dir, path)
if path.startswith(source_dir):
# Only yields paths inside source_dir
yield path
| 281 | 0 | 23 |
52a31f986b71f2a702e0fd6dd7a88bdeba486e9c | 4,448 | py | Python | services/api/tests/integration/keystone_authenticator_tests.py | ohsu-computational-biology/dms-aa | 4aabae8b5ada539fa010a79970093c93fbbddb01 | [
"MIT"
] | null | null | null | services/api/tests/integration/keystone_authenticator_tests.py | ohsu-computational-biology/dms-aa | 4aabae8b5ada539fa010a79970093c93fbbddb01 | [
"MIT"
] | 10 | 2016-12-07T01:37:41.000Z | 2017-01-20T22:20:52.000Z | services/api/tests/integration/keystone_authenticator_tests.py | ohsu-computational-biology/euler | 4aabae8b5ada539fa010a79970093c93fbbddb01 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Test authenticator endpoints, depends on keystone
"""
from keystone_authenticator import BearerAuth
from json import dumps
import os
def test_login(client, app):
"""
should respond with ok and user
"""
_development_login(client, app)
def test_logout(client):
"""
should respond with ok and user
"""
r = client.post('/v0/logout')
assert r
def test_bad_login(client, app):
"""
should respond with ok and user
"""
r = client.post('/api/v1/ohsulogin',
headers={'content-type': 'application/json'},
data=dumps({'user': 'FOO', 'password': 'password'}))
assert r.status_code == 401
| 28.33121 | 76 | 0.653103 | #!/usr/bin/env python
"""
Test authenticator endpoints, depends on keystone
"""
from keystone_authenticator import BearerAuth
from json import dumps
import os
def test_encode_decode_token():
profile = {'foo': 'bar'}
auth = BearerAuth()
token = auth.make_token(profile)
parsed = auth.parse_token(token)
assert parsed == profile
def test_authenticate(client):
auth = BearerAuth()
token = auth.authenticate_user(
username=os.environ.get('OS_USERNAME'),
user_domain_name=os.environ.get('OS_USER_DOMAIN_NAME'),
password=os.environ.get('OS_PASSWORD')
)
profile = auth.parse_token(token)
assert profile
assert profile['name']
assert profile['domain_name']
assert profile['mail']
assert profile['token']
assert len(profile['roles']) > 0
for role in profile['roles']:
assert role['role']
assert role['scope']
assert role['scope']['project']
assert role['scope']['domain']
def test_login(client, app):
"""
should respond with ok and user
"""
_development_login(client, app)
def test_logout(client):
"""
should respond with ok and user
"""
r = client.post('/v0/logout')
assert r
def _development_login(client, app):
global global_id_token
return global_id_token
def test_project_lookup(client, app):
auth = BearerAuth()
token = {u'mail': u'None', u'token': u'foo', u'domain_name': u'Default',
u'roles': [
{u'scope': {u'project': u'admin', u'domain': u'Default'},
u'role': u'admin'}, # NOQA
{u'scope': {u'project': u'user', u'domain': u'Default'},
u'role': u'member'}, # NOQA
], u'name': u'admin'}
assert len(auth._find_projects(token)) == 2
def test_should_retrieve_projects(client, app):
auth = BearerAuth()
len(auth.projects(_development_login(client, app))) == 1
def test_check_auth(client, app):
auth = BearerAuth()
# for now, only returns true
assert auth.check_auth(None, None, None, None)
def test_check_default_projects(client, app):
auth = BearerAuth()
# for now, no public projects
assert len(auth.projects(None, None)) == 0
def test_bad_login(client, app):
"""
should respond with ok and user
"""
r = client.post('/api/v1/ohsulogin',
headers={'content-type': 'application/json'},
data=dumps({'user': 'FOO', 'password': 'password'}))
assert r.status_code == 401
def test_projects_from_token(client, app):
id_token = _development_login(client, app)
# save current auth, and ensure test_authenticator used for this test
old_auth = app.auth
app.auth = BearerAuth()
request = {'headers': []}
request = MockRequest()
request.headers['authorization'] = 'Bearer {}'.format(id_token)
projects = app.auth.projects(request=request)
user = app.auth.get_user(request=request)
app.auth = old_auth
assert len(projects) > 0
assert user
def test_projects_from_cookie(client, app):
id_token = _development_login(client, app)
# save current auth, and ensure test_authenticator used for this test
old_auth = app.auth
app.auth = BearerAuth()
request = MockRequest()
request.cookies['id_token'] = id_token
projects = app.auth.projects(request=request)
user = app.auth.get_user(request=request)
app.auth = old_auth
assert len(projects) > 0
assert user
def test_authenticate_with_openstack_header(client, app):
# save current auth, and ensure test_authenticator used for this test
old_auth = app.auth
app.auth = BearerAuth()
id_token = _development_login(client, app)
profile = app.auth.parse_token(id_token)
assert profile['token']
request = {'headers': []}
request = MockRequest()
request.headers['X-Auth-Token'] = profile['token']
projects = app.auth.projects(request=request)
user = app.auth.get_user(request=request)
token = app.auth.token(request=request)
app.auth = old_auth
assert len(projects) > 0
assert user
assert token
def test_projects_from_unauthorized_token(client, app):
# no auth should have no projects
request = MockRequest()
projects = app.auth.projects(request=request)
assert len(projects) == 0
class MockRequest:
def __init__(self):
self.headers = {}
self.cookies = {}
| 3,434 | -3 | 302 |
03759f7ef9485cef2fe1d36eb23d05d79baaf451 | 177 | py | Python | gtfs/apps.py | 559Labs/GTFS-Builder | 48ee90ecaa43eaf6e80a088c6a22153213118f13 | [
"Apache-2.0"
] | 2 | 2017-07-05T06:52:35.000Z | 2021-04-15T08:53:23.000Z | gtfs/apps.py | MarconiMediaGroup/GTFS-Builder | 48ee90ecaa43eaf6e80a088c6a22153213118f13 | [
"Apache-2.0"
] | null | null | null | gtfs/apps.py | MarconiMediaGroup/GTFS-Builder | 48ee90ecaa43eaf6e80a088c6a22153213118f13 | [
"Apache-2.0"
] | 2 | 2017-07-05T06:52:39.000Z | 2018-02-11T01:54:43.000Z | #!/env/bin/python
# -*- coding: utf-8 -*-
from django.apps import AppConfig | 25.285714 | 53 | 0.683616 | #!/env/bin/python
# -*- coding: utf-8 -*-
from django.apps import AppConfig
class GTFSConfig(AppConfig):
name = 'gtfs'
verbose_name = 'General Transit Feed Spec (GTFS)' | 0 | 79 | 23 |
20cdadc43f7f0fbddcc40c1c7b51c46475d844b7 | 2,384 | py | Python | app/ecommerce/admin.py | nguyenanhtuan21/ecommerce-django | df29b31073b8e24ae9fbb294ca0c3ba8c81c09ae | [
"MIT"
] | 1 | 2019-12-16T16:45:11.000Z | 2019-12-16T16:45:11.000Z | app/ecommerce/admin.py | nguyenanhtuan21/ecommerce-django | df29b31073b8e24ae9fbb294ca0c3ba8c81c09ae | [
"MIT"
] | 3 | 2021-06-04T22:05:06.000Z | 2021-09-22T18:05:05.000Z | app/ecommerce/admin.py | nguyenanhtuan21/ecommerce-django | df29b31073b8e24ae9fbb294ca0c3ba8c81c09ae | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Brand, Product, Profile, Instagram, OrderItem,Order
from django.utils.html import format_html
# Register your models here.
make_published.short_description = "Mark selected stories as published"
admin.site.register(Brand,BrandAdmin)
admin.site.register(Product, ProductAdmin)
admin.site.register(Profile,ProfileAdmin)
admin.site.register(Instagram,InstaAdmin)
admin.site.register(OrderItem,OrderItemAdmin)
admin.site.register(Order,OrderAdmin)
| 40.40678 | 149 | 0.707215 | from django.contrib import admin
from .models import Brand, Product, Profile, Instagram, OrderItem,Order
from django.utils.html import format_html
# Register your models here.
def make_published(modeladmin, request, queryset):
queryset.update(name='Váy')
make_published.short_description = "Mark selected stories as published"
class BrandAdmin(admin.ModelAdmin):
list_display = ['id','name','descriptions','address','phone','created_at','updated_at']
list_filter = ['id','name','created_at','updated_at']
search_fields = ['name']
actions = [make_published]
class ProductAdmin(admin.ModelAdmin):
list_display = ['id','name','category','brand','price','size','color','quantity_in_stock','introduction','description','created_at','updated_at']
list_filter = ['id','created_at','updated_at']
search_fields = ['name']
actions = [make_published]
class ProfileAdmin(admin.ModelAdmin):
def image_tag(self, obj):
return format_html('<img src="{}" width="150px" height="150px" />'.format(obj.image.url))
image_tag.short_description = 'Image'
list_display = ['id','user','image','address','birthday','created_at','updated_at']
search_fields = ['user']
list_filter = ['id','user','created_at','updated_at']
actions = [make_published]
class InstaAdmin(admin.ModelAdmin):
def image_tag(self, obj):
return format_html('<img src="{}" width="150px" height="150px" />'.format(obj.image.url))
image_tag.short_description = 'Image'
list_display = ['id','name','image','image_tag']
list_filter = ['id','name','created_at','updated_at']
search_fields = ['name']
actions = [make_published]
class OrderItemAdmin(admin.ModelAdmin):
list_display = ['user','is_order','product','quantity','created_at','updated_at']
list_filter = ['id','user','created_at','updated_at']
search_fields = ['user']
actions = [make_published]
class OrderAdmin(admin.ModelAdmin):
list_display = ['user','created_at','updated_at']
list_filter = ['user','created_at','updated_at']
search_fields = ['user']
actions = [make_published]
admin.site.register(Brand,BrandAdmin)
admin.site.register(Product, ProductAdmin)
admin.site.register(Profile,ProfileAdmin)
admin.site.register(Instagram,InstaAdmin)
admin.site.register(OrderItem,OrderItemAdmin)
admin.site.register(Order,OrderAdmin)
| 266 | 1,447 | 162 |
b9e502d8b076c558257d6bd137f2d54d711a28a6 | 7,084 | py | Python | server/main/components/base_config_components.py | INRIM/forms-theme-italia | 45415a16e32c8c93ee8d234262149ed0635cf212 | [
"MIT"
] | null | null | null | server/main/components/base_config_components.py | INRIM/forms-theme-italia | 45415a16e32c8c93ee8d234262149ed0635cf212 | [
"MIT"
] | null | null | null | server/main/components/base_config_components.py | INRIM/forms-theme-italia | 45415a16e32c8c93ee8d234262149ed0635cf212 | [
"MIT"
] | null | null | null | # 2020 Alessio Gerace @ Inrim
import os
import importlib
import importlib.util
import json
from datetime import datetime
import logging
import base64
from typing import Union
import ipaddress as ipaddr
import requests
import re
logger = logging.getLogger()
alert_base = {
"succcess": {
"alert_type": "success",
"message": "Dati aggiornati con successo",
"add_class": " mx-auto col-6 ",
"hide_close_btn": True
},
"error": {
"alert_type": "danger",
"message": "Errore aggiornamento dati",
"add_class": " mx-auto col-6 ",
"hide_close_btn": True,
},
"warning": {
"alert_type": "warning",
"message": "Errore aggiornamento dati",
"add_class": " mx-auto col-6 ",
"hide_close_btn": True,
},
}
chips_base = {
"base": {
"alert_type": "primary",
"label": "Selezionare",
"icon": "it-info-circle"
},
"secondary": {
"alert_type": "secondary",
"label": "Selezionare",
"icon": "it-info-circle"
},
"success": {
"alert_type": "success",
"label": "Ok",
"icon": "it-check-circle"
},
"error": {
"alert_type": "danger",
"label": "Attenzione mancano tutti i dati",
"icon": "it-error"
},
"warning": {
"alert_type": "warning",
"label": "Attenzione mancano alcuni dati",
"icon": "it-warning-circle"
},
}
button = {
"submit": {
"name": "",
"type": "submit",
"btn_class": False,
"link": ""
},
"link": {
"name": "",
"type": "submit",
"btn_class": False,
"link": ""
},
"button": {
"name": "",
"type": "button",
"btn_class": "False",
"link": ""
}
}
formio_map = {
"textarea": "form_text_area.html",
"address": "",
"component": "",
"componentmodal": "",
"button": "form_button.html",
"checkbox": "form_toggle.html",
"columns": "form_row.html",
"column": "form_col.html",
"container": "block_container.html",
"content": "",
"currency": "",
"datagrid": "datagrid/datagrid.html",
"datagridRow": "datagrid/datagrid_row.html",
"datamap": "",
"datetime": "form_date_time.html",
"day": "",
"editgrid": "",
"email": "form_input.html",
"input": "form_input.html",
"field": "",
"multivalue": "",
"fieldset": "",
"file": "form_upload_file.html",
"form": "page_form/form.html",
"hidden": "",
"htmlelement": "",
"nested": "",
"nesteddata": "",
"nestedarray": "",
"number": "form_number_input.html",
"panel": "block_card_components.html",
"password": "form_password_input.html",
"phoneNumber": "form_input.html",
"radio": "form_radio_container.html",
"recaptcha": "",
"resource": "form_select_search.html",
"select": "form_select_search.html",
"selectboxes": "form_select_multi.html",
"signature": "",
"survey": "survey/survey.html",
"surveyRow": "survey/survey_row.html",
"table": "table.html",
"tabs": "",
"tags": "",
"textfield": "form_input.html",
"time": "",
"tree": "",
"unknown": "UnknownComponent",
"url": "text_link.html",
"well": "",
"info": "info_readonly_block.html",
}
form_io_default_map = {
"key": "key",
"description": "desc",
"customClass": "customClass",
"label": "label",
"title": "label",
"action": "type",
"placeholder": "placeholder",
"data": {"values": "options"},
"defaultValue": "value",
"disabled": "disabled",
"values": "rows",
"validate": {"required": "required"},
"propery": {"onchange": "onchange"},
}
def check_ip_local(ip) -> bool:
"""
check if ip is in rage of setting key APP_SETTINGS - > INTERNAL_IP_NET
ipv4 or ipv6 ready
:param ip:
:return: bool
"""
settings = from_object(os.getenv("APP_SETTINGS"))
if settings.get('INTERNAL_IP_NET') and ip:
# print("IIIIII", ip, ipaddr.ip_address(ip))
if type(ipaddr.ip_address(ip)) is ipaddr.IPv4Address:
res = ipaddr.IPv4Address(ip) in ipaddr.IPv4Network(settings['INTERNAL_IP_NET'])
else:
res = ipaddr.IPv6Address(ip) in ipaddr.IPv6Network(settings['INTERNAL_IP_NET'])
# print(res)
return res
else:
return False
| 24.85614 | 91 | 0.568323 | # 2020 Alessio Gerace @ Inrim
import os
import importlib
import importlib.util
import json
from datetime import datetime
import logging
import base64
from typing import Union
import ipaddress as ipaddr
import requests
import re
logger = logging.getLogger()
alert_base = {
"succcess": {
"alert_type": "success",
"message": "Dati aggiornati con successo",
"add_class": " mx-auto col-6 ",
"hide_close_btn": True
},
"error": {
"alert_type": "danger",
"message": "Errore aggiornamento dati",
"add_class": " mx-auto col-6 ",
"hide_close_btn": True,
},
"warning": {
"alert_type": "warning",
"message": "Errore aggiornamento dati",
"add_class": " mx-auto col-6 ",
"hide_close_btn": True,
},
}
chips_base = {
"base": {
"alert_type": "primary",
"label": "Selezionare",
"icon": "it-info-circle"
},
"secondary": {
"alert_type": "secondary",
"label": "Selezionare",
"icon": "it-info-circle"
},
"success": {
"alert_type": "success",
"label": "Ok",
"icon": "it-check-circle"
},
"error": {
"alert_type": "danger",
"label": "Attenzione mancano tutti i dati",
"icon": "it-error"
},
"warning": {
"alert_type": "warning",
"label": "Attenzione mancano alcuni dati",
"icon": "it-warning-circle"
},
}
button = {
"submit": {
"name": "",
"type": "submit",
"btn_class": False,
"link": ""
},
"link": {
"name": "",
"type": "submit",
"btn_class": False,
"link": ""
},
"button": {
"name": "",
"type": "button",
"btn_class": "False",
"link": ""
}
}
formio_map = {
"textarea": "form_text_area.html",
"address": "",
"component": "",
"componentmodal": "",
"button": "form_button.html",
"checkbox": "form_toggle.html",
"columns": "form_row.html",
"column": "form_col.html",
"container": "block_container.html",
"content": "",
"currency": "",
"datagrid": "datagrid/datagrid.html",
"datagridRow": "datagrid/datagrid_row.html",
"datamap": "",
"datetime": "form_date_time.html",
"day": "",
"editgrid": "",
"email": "form_input.html",
"input": "form_input.html",
"field": "",
"multivalue": "",
"fieldset": "",
"file": "form_upload_file.html",
"form": "page_form/form.html",
"hidden": "",
"htmlelement": "",
"nested": "",
"nesteddata": "",
"nestedarray": "",
"number": "form_number_input.html",
"panel": "block_card_components.html",
"password": "form_password_input.html",
"phoneNumber": "form_input.html",
"radio": "form_radio_container.html",
"recaptcha": "",
"resource": "form_select_search.html",
"select": "form_select_search.html",
"selectboxes": "form_select_multi.html",
"signature": "",
"survey": "survey/survey.html",
"surveyRow": "survey/survey_row.html",
"table": "table.html",
"tabs": "",
"tags": "",
"textfield": "form_input.html",
"time": "",
"tree": "",
"unknown": "UnknownComponent",
"url": "text_link.html",
"well": "",
"info": "info_readonly_block.html",
}
form_io_default_map = {
"key": "key",
"description": "desc",
"customClass": "customClass",
"label": "label",
"title": "label",
"action": "type",
"placeholder": "placeholder",
"data": {"values": "options"},
"defaultValue": "value",
"disabled": "disabled",
"values": "rows",
"validate": {"required": "required"},
"propery": {"onchange": "onchange"},
}
def from_object(instance: Union[object, str]) -> {}:
data = {}
if isinstance(instance, str):
try:
path, config = instance.rsplit(".", 1)
except ValueError:
path = instance
instance = importlib.import_module(path)
else:
module = importlib.import_module(path)
instance = getattr(module, config)
for key in dir(instance):
if key.isupper():
data[key] = getattr(instance, key)
return data
def check_ip_local(ip) -> bool:
"""
check if ip is in rage of setting key APP_SETTINGS - > INTERNAL_IP_NET
ipv4 or ipv6 ready
:param ip:
:return: bool
"""
settings = from_object(os.getenv("APP_SETTINGS"))
if settings.get('INTERNAL_IP_NET') and ip:
# print("IIIIII", ip, ipaddr.ip_address(ip))
if type(ipaddr.ip_address(ip)) is ipaddr.IPv4Address:
res = ipaddr.IPv4Address(ip) in ipaddr.IPv4Network(settings['INTERNAL_IP_NET'])
else:
res = ipaddr.IPv6Address(ip) in ipaddr.IPv6Network(settings['INTERNAL_IP_NET'])
# print(res)
return res
else:
return False
def allowed_file(filename, ALLOWED_EXTENSIONS=['pdf']):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def string_to_number(str_data):
if "." in str_data:
try:
res = float(str_data)
except:
res = str_data
elif str_data.isdigit():
res = int(str_data)
else:
res = str_data
return res
def get_remote_avatar(url, key):
headers = {"x-key": key}
res = requests.get(url, headers=headers)
return res.content.decode("utf-8")
def extract_mac_address(text):
pattern = '(([0-9a-fA-F]{2}[:]){5}([0-9a-fA-F]{2}))'
mac_addr_list = re.findall(pattern, text)
return list(map(lambda x: x[0], mac_addr_list))
def get_default_error_alert_cfg():
return alert_base['error']
def get_default_success_alert_cfg():
return alert_base['succcess']
def get_default_warning_alert_cfg():
return alert_base['warning']
def get_form_alert(values):
if values.get("success"):
kkwargs = get_default_success_alert_cfg()
if values.get("error"):
kkwargs = get_default_error_alert_cfg()
if values.get("warning"):
kkwargs = get_default_warning_alert_cfg()
kwargs_def = {**kkwargs, **values}
return kwargs_def
def get_update_alert_error(selector, message, cls=""):
to_update = {}
cfg = {
"error": True,
"message": message,
"cls": " mx-auto mt-lg-n3 ",
"name": selector
}
if not '#' in selector and not '.' in selector:
selector = "#" + selector
if cls:
cfg['cls'] = cls
to_update["value"] = get_form_alert(cfg)
to_update["selector"] = selector
return to_update
def get_update_alert_warning(selector, message, cls=""):
to_update = {}
cfg = {
"warning": True,
"message": message,
"cls": " mx-auto mt-n5 ",
"name": selector
}
if not '#' in selector and not '.' in selector:
selector = "#" + selector
if cls:
cfg['cls'] = cls
to_update["value"] = get_form_alert(cfg)
to_update["selector"] = selector
return to_update | 2,402 | 0 | 253 |
71fd6f825b0e853244a11e601de1ff17cf9411b9 | 304 | py | Python | Python 3.8/1159 - Soma de Pares Consecutivos.py | JhonatanGuilherme/BeeCrowd | e039f8128399697ad9eb75f48047b83eb7b0201e | [
"MIT"
] | null | null | null | Python 3.8/1159 - Soma de Pares Consecutivos.py | JhonatanGuilherme/BeeCrowd | e039f8128399697ad9eb75f48047b83eb7b0201e | [
"MIT"
] | null | null | null | Python 3.8/1159 - Soma de Pares Consecutivos.py | JhonatanGuilherme/BeeCrowd | e039f8128399697ad9eb75f48047b83eb7b0201e | [
"MIT"
] | null | null | null | X = 1
CONTADOR = 0
CONTADOR2 = 0
while True:
X = int(input())
CONTADOR = 0
CONTADOR2 = 0
if X == 0:
break
while CONTADOR2 != 5:
if X % 2 == 0:
CONTADOR += X
CONTADOR2 += 1
X += 1
else:
X += 1
print(CONTADOR)
| 16.888889 | 26 | 0.417763 | X = 1
CONTADOR = 0
CONTADOR2 = 0
while True:
X = int(input())
CONTADOR = 0
CONTADOR2 = 0
if X == 0:
break
while CONTADOR2 != 5:
if X % 2 == 0:
CONTADOR += X
CONTADOR2 += 1
X += 1
else:
X += 1
print(CONTADOR)
| 0 | 0 | 0 |
d4dd2c22dd4eddd3b4f5117bb1b34652e7415566 | 900 | py | Python | setup.py | Cmiroslaf/protoc-gen-stub | 3e3937ae0420c359437f8afdf0a1a9224a981675 | [
"MIT"
] | 2 | 2019-02-18T00:37:00.000Z | 2019-03-06T13:10:06.000Z | setup.py | Cmiroslaf/protoc-gen-stub | 3e3937ae0420c359437f8afdf0a1a9224a981675 | [
"MIT"
] | null | null | null | setup.py | Cmiroslaf/protoc-gen-stub | 3e3937ae0420c359437f8afdf0a1a9224a981675 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(
name="ProtoC Python Typing generator plugin",
version="0.2",
install_requires=['protobuf'],
scripts=['protoc-gen-python_grpc_typings', 'protoc-gen-python_typings'],
packages=['stubs_generator'],
# metadata for upload to PyPI
author="Miroslav Cibulka",
author_email="miroslav.cibulka@flowup.cz",
description="ProtoC code generator plugin",
license="MIT",
keywords="proto3 typing python library script",
url="https://github.com/Cmiroslaf/protoc-gen-python-typings", # project home page, if any
project_urls={
"Bug Tracker": "https://github.com/Cmiroslaf/protoc-gen-python-typings/issues",
"Documentation": "https://docs.example.com/HelloWorld/",
"Source Code": "https://code.example.com/HelloWorld/",
}
# could also include long_description, download_url, classifiers, etc.
)
| 33.333333 | 94 | 0.694444 | from setuptools import setup
setup(
name="ProtoC Python Typing generator plugin",
version="0.2",
install_requires=['protobuf'],
scripts=['protoc-gen-python_grpc_typings', 'protoc-gen-python_typings'],
packages=['stubs_generator'],
# metadata for upload to PyPI
author="Miroslav Cibulka",
author_email="miroslav.cibulka@flowup.cz",
description="ProtoC code generator plugin",
license="MIT",
keywords="proto3 typing python library script",
url="https://github.com/Cmiroslaf/protoc-gen-python-typings", # project home page, if any
project_urls={
"Bug Tracker": "https://github.com/Cmiroslaf/protoc-gen-python-typings/issues",
"Documentation": "https://docs.example.com/HelloWorld/",
"Source Code": "https://code.example.com/HelloWorld/",
}
# could also include long_description, download_url, classifiers, etc.
)
| 0 | 0 | 0 |
c74ce96c0653aea7fa697eeba2df372f73853100 | 8,831 | py | Python | applications/incompressible_fluid_application/python_scripts/xivas_solver.py | lcirrott/Kratos | 8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea | [
"BSD-4-Clause"
] | 2 | 2019-10-25T09:28:10.000Z | 2019-11-21T12:51:46.000Z | applications/incompressible_fluid_application/python_scripts/xivas_solver.py | lcirrott/Kratos | 8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea | [
"BSD-4-Clause"
] | 13 | 2019-10-07T12:06:51.000Z | 2020-02-18T08:48:33.000Z | applications/incompressible_fluid_application/python_scripts/xivas_solver.py | lcirrott/Kratos | 8406e73e0ad214c4f89df4e75e9b29d0eb4a47ea | [
"BSD-4-Clause"
] | 1 | 2020-06-12T08:51:24.000Z | 2020-06-12T08:51:24.000Z | from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
# -*- coding: utf-8 -*-
# importing the Kratos Library
from KratosMultiphysics import *
from KratosMultiphysics.IncompressibleFluidApplication import *
import edgebased_eulerian_solver
import math
| 35.898374 | 179 | 0.672857 | from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
# -*- coding: utf-8 -*-
# importing the Kratos Library
from KratosMultiphysics import *
from KratosMultiphysics.IncompressibleFluidApplication import *
import edgebased_eulerian_solver
def AddVariables(fluid_model_part, particle_model_part):
edgebased_eulerian_solver.AddVariables(fluid_model_part)
edgebased_eulerian_solver.AddVariables(particle_model_part)
fluid_model_part.AddNodalSolutionStepVariable(DISPLACEMENT)
fluid_model_part.AddNodalSolutionStepVariable(VISCOSITY)
fluid_model_part.AddNodalSolutionStepVariable(DENSITY)
fluid_model_part.AddNodalSolutionStepVariable(NODAL_H)
fluid_model_part.AddNodalSolutionStepVariable(NODAL_AREA)
fluid_model_part.AddNodalSolutionStepVariable(TEMPERATURE)
fluid_model_part.AddNodalSolutionStepVariable(FORCE)
fluid_model_part.AddNodalSolutionStepVariable(FLAG_VARIABLE)
particle_model_part.AddNodalSolutionStepVariable(DISPLACEMENT)
particle_model_part.AddNodalSolutionStepVariable(NODAL_H)
particle_model_part.AddNodalSolutionStepVariable(TEMPERATURE)
particle_model_part.AddNodalSolutionStepVariable(FORCE)
def AddDofs(fluid_model_part, particle_model_part):
edgebased_eulerian_solver.AddDofs(fluid_model_part)
# edgebased_eulerian_solver.AddDofs(particle_model_part)
print("dofs for the xivas solver added correctly")
def ReadRestartFile(FileName, nodes):
NODES = nodes
aaa = open(FileName)
for line in aaa:
exec(line)
import math
class XIVASSolver:
def __init__(self, fluid_model_part, particle_model_part,
domain_size, body_force, viscosity, density):
self.fluid_model_part = fluid_model_part
self.particle_model_part = particle_model_part
self.domain_size = domain_size
# neighbour search
number_of_avg_elems = 10
number_of_avg_nodes = 10
self.neighbour_search = FindNodalNeighboursProcess(
self.fluid_model_part,
number_of_avg_elems,
number_of_avg_nodes)
# assignation of parameters to be used
self.assume_constant_pressure = False
self.stabdt_pressure_factor = 1.0
self.use_mass_correction = False
self.echo_level = 0
# definition of the solvers
pDiagPrecond = DiagonalPreconditioner()
self.pressure_linear_solver = BICGSTABSolver(1e-3, 5000, pDiagPrecond)
self.compute_reactions = True
self.particle_utils = LagrangianUtils2D()
self.node_locator = BinBasedFastPointLocator2D(self.fluid_model_part)
self.body_force = body_force
self.bf = Array3()
self.bf[0] = body_force[0]
self.bf[1] = body_force[1]
self.bf[2] = body_force[2]
self.viscosity = viscosity
self.density = density
self.substeps = 2.0
self.restart_with_eulerian_vel = False
self.max_particles_in_element = 7 # 7
self.min_particles_in_element = 3 # 1
self.perform_cheap_correction_step = True
# self.implicit_viscous_correction = True
# assign nodal H
aux = CalculateNodalAreaProcess(fluid_model_part, domain_size)
aux.Execute()
for node in fluid_model_part.Nodes:
a = node.GetSolutionStepValue(NODAL_AREA)
h = math.sqrt(2.0 * a)
if(h == 0):
print("node ", node.Id, " has zero h")
raise "error, node found with 0 h"
node.SetSolutionStepValue(NODAL_H, 0, h)
def Initialize(self):
(self.neighbour_search).Execute()
self.domain_size = int(self.domain_size)
self.fluid_solver = edgebased_eulerian_solver.EdgeBasedLevelSetSolver(
self.fluid_model_part,
self.domain_size,
self.body_force,
self.viscosity,
self.density)
self.fluid_solver.assume_constant_pressure = True
self.fluid_solver.stabdt_pressure_factor = 0.01
self.fluid_solver.use_mass_correction = False
self.fluid_solver.pressure_linear_solver = self.pressure_linear_solver
self.fluid_solver.Initialize()
#(self.fluid_solver).SetEchoLevel(self.echo_level)
hmin = (self.fluid_solver.fluid_solver).ComputeMinimum_Havg()
print("minimum nodal havg found on the mesh = ", hmin)
# self.node_locator.UpdateSearchDatabase()
self.node_locator.UpdateSearchDatabaseAssignedSize(hmin)
self.particle_utils.Reseed(
self.fluid_model_part,
self.particle_model_part)
print("finished initialization of the xivas solver")
def EstimateTimeStep(self, safety_factor, max_Dt):
return self.fluid_solver.EstimateTimeStep(safety_factor, max_Dt)
def Solve(self):
Dt = self.particle_model_part.ProcessInfo[DELTA_TIME]
Dt_check = self.fluid_model_part.ProcessInfo[DELTA_TIME]
if(Dt != Dt_check):
raise "error, time step for particle_model_part is not appropriately cloned (not syncronized with fluid_model_part)"
(self.fluid_solver.fluid_solver).ComputeViscousForces()
print("ccc")
# self.particle_utils.StreamlineMove(self.bf,self.density,Dt,self.substeps,self.fluid_model_part,self.particle_model_part,self.restart_with_eulerian_vel,self.node_locator)
self.particle_utils.StreamlineMove(
self.bf,
self.density,
Dt,
self.fluid_model_part,
self.particle_model_part,
self.restart_with_eulerian_vel,
self.node_locator)
self.particle_utils.TransferToEulerianMeshShapeBased(
self.fluid_model_part,
self.particle_model_part,
self.node_locator)
# if(self.implicit_viscous_correction == True):
#(self.fluid_solver.fluid_solver).ViscosityCorrectionStep()
(self.fluid_solver.fluid_solver).ComputePressureStabilization()
(self.fluid_solver.fluid_solver).SolveStep2(
self.pressure_linear_solver)
(self.fluid_solver.fluid_solver).SolveStep3()
(self.fluid_solver.fluid_solver).ComputeViscousForces()
if(self.perform_cheap_correction_step):
self.particle_utils.StreamlineCorrect(
self.density,
Dt,
self.fluid_model_part,
self.particle_model_part,
self.node_locator)
else:
self.particle_utils.StreamlineMove(
self.bf,
self.density,
Dt,
self.substeps,
self.fluid_model_part,
self.particle_model_part,
self.restart_with_eulerian_vel,
self.node_locator)
self.particle_utils.ReseedEmptyElements(
self.fluid_model_part,
self.particle_model_part,
self.node_locator,
self.min_particles_in_element,
self.max_particles_in_element)
if(self.compute_reactions):
exclude_convection_terms = True
(self.fluid_solver.fluid_solver).ComputeReactions(
exclude_convection_terms)
def Clear(self):
(self.fluid_solver.fluid_solver).Clear()
def WriteRestartFile(self, FileName):
restart_file = open(FileName + ".mdpa", 'w')
import new_restart_utilities
new_restart_utilities.PrintProperties(restart_file)
new_restart_utilities.PrintNodes(self.model_part.Nodes, restart_file)
new_restart_utilities.PrintElements(
"Fluid3D",
self.model_part.Elements,
restart_file)
new_restart_utilities.PrintRestart_ScalarVariable(
VELOCITY_X,
"VELOCITY_X",
self.model_part.Nodes,
restart_file)
new_restart_utilities.PrintRestart_ScalarVariable(
VELOCITY_Y,
"VELOCITY_Y",
self.model_part.Nodes,
restart_file)
new_restart_utilities.PrintRestart_ScalarVariable(
VELOCITY_Z,
"VELOCITY_Z",
self.model_part.Nodes,
restart_file)
new_restart_utilities.PrintRestart_ScalarVariable(
PRESSURE,
"PRESSURE",
self.model_part.Nodes,
restart_file)
new_restart_utilities.PrintRestart_ScalarVariable(
VISCOSITY,
"VISCOSITY",
self.model_part.Nodes,
restart_file)
new_restart_utilities.PrintRestart_ScalarVariable(
DENSITY,
"DENSITY",
self.model_part.Nodes,
restart_file)
restart_file.close()
| 8,243 | -3 | 254 |
ed8c4eb7ebc03d9f971b6d8c95f021560300ca10 | 1,654 | py | Python | assignments/flask-adopt/forms.py | demohack/nonpub | e53bcec7115bcb2605687bb272d843e1e8d17ae5 | [
"MIT"
] | null | null | null | assignments/flask-adopt/forms.py | demohack/nonpub | e53bcec7115bcb2605687bb272d843e1e8d17ae5 | [
"MIT"
] | 17 | 2021-03-24T14:59:50.000Z | 2022-03-05T23:52:31.000Z | assignments/flask-adopt/forms.py | demohack/nonpub | e53bcec7115bcb2605687bb272d843e1e8d17ae5 | [
"MIT"
] | null | null | null | """Forms for our demo Flask app."""
from flask_wtf import FlaskForm
from wtforms import StringField, FloatField, DateField, IntegerField
from wtforms.validators import InputRequired, Optional, Email, URL, ValidationError
# https://wtforms.readthedocs.io/en/3.0.x/validators/#built-in-validators
class AddPetForm(FlaskForm):
"""Form for adding pet."""
name = StringField("Name", validators=[InputRequired()])
species = StringField("Species", validators=[InputRequired(), valid_species()])
photo = StringField("Photo", validators=[Optional(), URL()])
age = IntegerField("Age", validators=[Optional(), valid_age(0,30)])
notes = StringField("Notes", validators=[Optional()])
class EditPetForm(FlaskForm):
"""Form for adding pet."""
name = StringField("Name", validators=[InputRequired()])
species = StringField("Species", validators=[InputRequired(), valid_species()])
photo = StringField("Photo", validators=[Optional(), URL()])
age = IntegerField("Age", validators=[Optional(), valid_age(0,30)])
notes = StringField("Notes", validators=[Optional()])
adopted_at = DateField("Adopted at", validators=[Optional()])
| 33.755102 | 83 | 0.673519 | """Forms for our demo Flask app."""
from flask_wtf import FlaskForm
from wtforms import StringField, FloatField, DateField, IntegerField
from wtforms.validators import InputRequired, Optional, Email, URL, ValidationError
def valid_species():
message = 'Must be a cat, dog, or porcupine.'
def _species(form, field):
if str(field.data).lower() not in ["cat", "dog", "porcupine"]:
raise ValidationError(message)
return _species
def valid_age(min=0, max=30):
message = 'Must be between %d and %d years old.' % (min, max)
def _age(form, field):
l = field.data or 0
if l < min or l > max:
raise ValidationError(message)
return _age
# https://wtforms.readthedocs.io/en/3.0.x/validators/#built-in-validators
class AddPetForm(FlaskForm):
"""Form for adding pet."""
name = StringField("Name", validators=[InputRequired()])
species = StringField("Species", validators=[InputRequired(), valid_species()])
photo = StringField("Photo", validators=[Optional(), URL()])
age = IntegerField("Age", validators=[Optional(), valid_age(0,30)])
notes = StringField("Notes", validators=[Optional()])
class EditPetForm(FlaskForm):
"""Form for adding pet."""
name = StringField("Name", validators=[InputRequired()])
species = StringField("Species", validators=[InputRequired(), valid_species()])
photo = StringField("Photo", validators=[Optional(), URL()])
age = IntegerField("Age", validators=[Optional(), valid_age(0,30)])
notes = StringField("Notes", validators=[Optional()])
adopted_at = DateField("Adopted at", validators=[Optional()])
| 437 | 0 | 46 |
b6967ce933687a0a88fe4170f3865b2a931b9eb9 | 17,196 | py | Python | subt/artf_node.py | robotika/osgar | 6f4f584d5553ab62c08a1c7bb493fefdc9033173 | [
"MIT"
] | 12 | 2017-02-16T10:22:59.000Z | 2022-03-20T05:48:06.000Z | subt/artf_node.py | robotika/osgar | 6f4f584d5553ab62c08a1c7bb493fefdc9033173 | [
"MIT"
] | 618 | 2016-08-30T04:46:12.000Z | 2022-03-25T16:03:10.000Z | subt/artf_node.py | robotika/osgar | 6f4f584d5553ab62c08a1c7bb493fefdc9033173 | [
"MIT"
] | 11 | 2016-08-27T20:02:55.000Z | 2022-03-07T08:53:53.000Z | """
OSGAR ArtifactDetectorDNN wrapper for DNN detector
"""
import os.path
from io import StringIO
import cv2
import numpy as np
from subt.tf_detector import CvDetector
try:
import torch
import subt.artf_model
from subt.artf_detector import Detector
except ImportError:
print('\nWarning: missing torch!\n')
from osgar.node import Node
from osgar.bus import BusShutdownException
from osgar.lib.depth import decompress as decompress_depth
from osgar.lib.quaternion import rotate_vector, rotation_matrix, transform
from subt.artf_utils import NAME2IGN
def result2report(result, depth, fx, robot_pose, camera_pose, max_depth):
"""return relative XYZ distances to camera"""
if depth is None:
return None # ignore detected artifacts for missing depth data
# typically some glitch on start
width = depth.shape[1]
height = depth.shape[0]
x_arr = [x for x, y, certainty in result[0][1]] # ignore multiple objects
y_arr = [y for x, y, certainty in result[0][1]] # ignore multiple objects
dist = [depth[y][x] for x, y, certainty in result[0][1]] # ignore multiple objects
if any(d == 0 or d > max_depth for d in dist):
return None # out of range
x_min, x_max = min(x_arr), max(x_arr)
y_min, y_max = min(y_arr), max(y_arr)
scale = np.median(dist)
# Coordinate of the artifact relative to the camera.
camera_rel = [scale, # relative X-coordinate in front
scale * (width/2 - (x_min + x_max)/2)/fx, # Y-coordinate is to the left
scale * (height/2 - (y_min + y_max)/2)/fx] # Z-up
# Coordinate of the artifact relative to the robot.
robot_rel = transform(camera_rel, camera_pose)
# Global coordinate of the artifact.
world_xyz = transform(robot_rel, robot_pose)
return [NAME2IGN[result[0][0]], world_xyz]
if __name__ == "__main__":
# run "replay" without calling detections - only XYZ offset check
import argparse
from datetime import timedelta
from osgar.lib.serialize import deserialize
from osgar.logger import LogReader, lookup_stream_id, lookup_stream_names
from ast import literal_eval
parser = argparse.ArgumentParser(description='Test 3D reports')
parser.add_argument('logfile', help='OSGAR logfile')
parser.add_argument('--time-limit-sec', '-t', help='cut time in seconds', type=float)
parser.add_argument('--verbose', '-v', help="verbose mode", action='store_true')
parser.add_argument('--module-name', '-m', help='name of the detector module in the log', default='detector')
args = parser.parse_args()
names = lookup_stream_names(args.logfile)
assert 'detector.localized_artf' in names, names # XYZ world coordinates
assert 'detector.debug_rgbd' in names, names
assert 'detector.debug_result' in names, names
assert 'detector.debug_cv_result' in names, names
artf_stream_id = names.index('detector.localized_artf') + 1
rgbd_stream_id = names.index('detector.debug_rgbd') + 1
result_id = names.index('detector.debug_result') + 1
cv_result_id = names.index('detector.debug_cv_result') + 1
# read config file from log
with LogReader(args.logfile, only_stream_id=0) as log:
print("original args:", next(log)[-1]) # old arguments
config_str = next(log)[-1]
config = literal_eval(config_str.decode('ascii'))
assert 'detector' in config['robot']['modules']
fx = config['robot']['modules'][args.module_name]['init']['fx']
max_depth = config['robot']['modules'][args.module_name]['init'].get('max_depth', 10.0)
last_artf = None # reported before debug_rgbd
last_result = None
last_cv_result = None
with LogReader(args.logfile,
only_stream_id=[artf_stream_id, rgbd_stream_id, result_id, cv_result_id]) as logreader:
for time, stream, msg_data in logreader:
if args.time_limit_sec is not None and time.total_seconds() > args.time_limit_sec:
break
data = deserialize(msg_data)
if stream == rgbd_stream_id:
robot_pose, camera_pose, __rgb, depth = data
# debug_rgbd is stored ONLY when both detectors detect something and it is fused
assert last_result is not None
assert last_cv_result is not None
checked_result = check_results(last_result, last_cv_result)
assert checked_result # the debug rgbd is stored, so there should be a valid report
report = result2report(checked_result, decompress_depth(depth),
fx, robot_pose, camera_pose, max_depth)
if args.verbose:
print(report)
assert last_artf == report, (last_artf, report)
elif stream in [result_id, cv_result_id]:
if args.verbose:
print(time, data)
if stream == result_id:
last_result = data
elif stream == cv_result_id:
last_cv_result = data
else:
assert False, stream
elif stream == artf_stream_id:
if args.verbose:
print(time, 'Original report:', data)
last_artf = data
assert last_artf is not None, time
else:
assert False, stream # unexpected stream
# vim: expandtab sw=4 ts=4
| 45.734043 | 202 | 0.598221 | """
OSGAR ArtifactDetectorDNN wrapper for DNN detector
"""
import os.path
from io import StringIO
import cv2
import numpy as np
from subt.tf_detector import CvDetector
try:
import torch
import subt.artf_model
from subt.artf_detector import Detector
except ImportError:
print('\nWarning: missing torch!\n')
from osgar.node import Node
from osgar.bus import BusShutdownException
from osgar.lib.depth import decompress as decompress_depth
from osgar.lib.quaternion import rotate_vector, rotation_matrix, transform
from subt.artf_utils import NAME2IGN
def check_borders(result, borders):
ret = []
for row in result:
name, tmp_points, r_cv = row
points = tmp_points.copy()
points.sort(key=lambda item: item[2], reverse=True)
x = points[1][2] # mdnet score of the 2nd best point
y = r_cv[1] # cv_detector score
a1, b1, a2, b2 = borders[name] # coefficients of lines equations - borders
if y >= min(a1 * x + b1, a2 * x + b2): # the value is above the borders
ret.append(row)
return ret
def check_results(result_mdnet, result_cv):
result = result_mdnet.copy()
ret = []
if len(result) == 0 or len(result_cv) == 0:
return ret
for r_cv in result_cv:
name_cv, score_cv, bbox = r_cv
bbox_x1, bbox_y1, bbox_x2, bbox_y2 = bbox
ret_points = []
for r in result.copy():
name, points = r
if name != name_cv:
continue
x = np.array([p[0] for p in points])
y = np.array([p[1] for p in points])
x_in_bbox = np.logical_and((x > bbox_x1), (x < bbox_x2))
y_in_bbox = np.logical_and((y > bbox_y1), (y < bbox_y2))
if np.any(np.logical_and(x_in_bbox, y_in_bbox)): # at least one point is in the bbox
ret_points.extend(points)
result.remove(r)
if ret_points:
ret.append((name_cv, ret_points, r_cv))
return ret
def as_matrix(translation, rotation):
m = np.eye(4)
m[:3,:3] = rotation_matrix(rotation)
m[:3,3] = translation
return m
def result2report(result, depth, fx, robot_pose, camera_pose, max_depth):
"""return relative XYZ distances to camera"""
if depth is None:
return None # ignore detected artifacts for missing depth data
# typically some glitch on start
width = depth.shape[1]
height = depth.shape[0]
x_arr = [x for x, y, certainty in result[0][1]] # ignore multiple objects
y_arr = [y for x, y, certainty in result[0][1]] # ignore multiple objects
dist = [depth[y][x] for x, y, certainty in result[0][1]] # ignore multiple objects
if any(d == 0 or d > max_depth for d in dist):
return None # out of range
x_min, x_max = min(x_arr), max(x_arr)
y_min, y_max = min(y_arr), max(y_arr)
scale = np.median(dist)
# Coordinate of the artifact relative to the camera.
camera_rel = [scale, # relative X-coordinate in front
scale * (width/2 - (x_min + x_max)/2)/fx, # Y-coordinate is to the left
scale * (height/2 - (y_min + y_max)/2)/fx] # Z-up
# Coordinate of the artifact relative to the robot.
robot_rel = transform(camera_rel, camera_pose)
# Global coordinate of the artifact.
world_xyz = transform(robot_rel, robot_pose)
return [NAME2IGN[result[0][0]], world_xyz]
def get_border_lines(border_points):
ret = {}
for name, points in border_points.items():
A, B, C = points
a1 = (B[1] - A[1]) / (B[0] - A[0]) # slope of the first line
a2 = (C[1] - B[1]) / (C[0] - B[0]) # slope of the second line
b1 = B[1] - a1 * B[0] # the first intercept
b2 = B[1] - a2 * B[0] # the second intercept
ret[name] = [a1, b1, a2, b2]
return ret
def create_detector(confidence_thresholds):
model = os.path.join(os.path.dirname(__file__), '../../../mdnet6.128.128.13.4.elu.pth')
max_gap = 16
min_group_size = 2
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
print('Using:', device)
model, categories = subt.artf_model.load_model(model, device)
return Detector(model, confidence_thresholds, categories, device,
max_gap, min_group_size)
class ArtifactDetectorDNN(Node):
def __init__(self, config, bus):
super().__init__(config, bus)
bus.register("localized_artf", "dropped", "debug_rgbd", "stdout",
"debug_result", "debug_cv_result", "debug_camera")
confidence_thresholds = { # used for mdnet
'survivor': 0.5,
'backpack': 0.74,
'phone': 0.5,
'helmet': 0.5,
'rope': 0.5,
'fire_extinguisher': 0.5,
'drill': 0.5,
'vent': 0.5,
'cube': 0.5
}
# Confidence borders points
# There are tree border points for each artifact, point coordinates: x - mdnet, y - cv_detector
confidence_borders = {
'survivor': [[0.5, 1],[0.93, 0.55],[1, 0.1]],
'backpack': [[0.74, 1],[0.9, 0.77],[0.95, 0.2]],
'phone': [[0.5, 0.45],[0.84, 0.41],[1, 0.1]],
'helmet': [[0.5, 0.95],[0.85, 0.6],[1, 0.2]],
'rope': [[0.5, 0.5],[0.9, 0.35],[1, 0.1]],
'fire_extinguisher': [[0.5, 0.9],[0.95, 0.85],[1, 0.8]],
'drill': [[0.5, 0.8],[0.87, 0.75],[1, 0.2]],
'vent': [[0.5, 0.9],[0.6, 0.6],[0.7, 0.1]],
'cube': [[0.5, 0.6],[0.8, 0.59],[1, 0.2]]
}
self.border_lines = get_border_lines(confidence_borders)
self.time = None
self.width = None # not sure if we will need it
self.depth = None # more precise artifact position from depth image
self.cv_detector = CvDetector().subt_detector
self.detector = create_detector(confidence_thresholds)
self.fx = config.get('fx', 554.25469) # use drone X4 for testing (TODO update all configs!)
self.max_depth = config.get('max_depth', 10.0)
self.triangulation_baseline_min = config.get('triangulation_baseline_min', 0.03)
self.triangulation_baseline_max = config.get('triangulation_baseline_max', 0.20)
self.batch_size = config.get('batch_size', 1) # how many images process in one step
self.prev_camera = {}
def wait_for_data(self):
channel = ""
while channel != "rgbd" and not channel.startswith("camera"):
self.time, channel, data = self.listen()
setattr(self, channel, data)
return self.time, channel
def stdout(self, *args, **kwargs):
# maybe refactor to Node?
output = StringIO()
print(*args, file=output, **kwargs)
contents = output.getvalue().strip()
output.close()
self.publish('stdout', contents)
print(self.time, contents)
def run(self):
try:
self.stdout(cv2.dnn.getAvailableTargets(cv2.dnn.DNN_BACKEND_CUDA))
dropped = 0
while True:
now = self.publish("dropped", dropped)
dropped = -1
timestamp = now
while timestamp <= now:
timestamp, channel = self.wait_for_data()
dropped += 1
for count in range(self.batch_size):
if channel == 'rgbd':
self.detect_from_rgbd(self.rgbd)
else:
self.detect_from_img(channel, getattr(self, channel))
if count + 1 < self.batch_size:
# process also immediately following images
timestamp, channel = self.wait_for_data()
except BusShutdownException:
pass
def detect(self, img):
if self.width is None:
self.stdout('Image resolution', img.shape)
self.width = img.shape[1]
assert self.width == img.shape[1], (self.width, img.shape[1])
result = self.detector(img)
result_cv = self.cv_detector(img)
checked_result = None
if result or result_cv:
# publish the results independent to detection validity
self.publish('debug_result', result)
self.publish('debug_cv_result', result_cv)
checked_result = check_results(result, result_cv)
if checked_result:
checked_result = check_borders(checked_result, self.border_lines)
return checked_result
def detect_from_rgbd(self, rgbd):
robot_pose, camera_pose, image_data, depth_data = rgbd
img = cv2.imdecode(np.fromstring(image_data, dtype=np.uint8), cv2.IMREAD_COLOR)
depth = decompress_depth(depth_data)
checked_result = self.detect(img)
if checked_result:
report = result2report(checked_result, depth, self.fx,
robot_pose, camera_pose, self.max_depth)
if report is not None:
self.publish('localized_artf', report)
self.publish('debug_rgbd', rgbd)
def detect_from_img(self, camera_name, data):
curr_robot_pose, curr_camera_pose, curr_img_data = data
curr_img = cv2.imdecode(np.fromstring(curr_img_data, dtype=np.uint8), cv2.IMREAD_COLOR)
curr_img_gray = cv2.cvtColor(curr_img, cv2.COLOR_BGR2GRAY)
# If we saw an artifact in the previous image, we need to estimate its
# location.
prev_detection = self.prev_camera.get(camera_name)
if prev_detection is not None:
prev_robot_pose, prev_camera_pose, artf_name, prev_uvs, prev_img_data, prev_img_gray, checked_result = prev_detection
curr_uvs, status, err = cv2.calcOpticalFlowPyrLK(prev_img_gray, curr_img_gray, prev_uvs.astype(np.float32), None)
num_tracked = np.sum(status)
if num_tracked > 0:
status = status[:,0].astype(np.bool)
curr_uvs = curr_uvs[status]
prev_uvs = prev_uvs[status]
assert(curr_uvs.shape == prev_uvs.shape)
prev_to_global = as_matrix(*prev_robot_pose) @ as_matrix(*prev_camera_pose)
curr_to_local = np.linalg.inv(as_matrix(*curr_robot_pose) @ as_matrix(*curr_camera_pose))
TO_OPTICAL = np.array([[ 0, -1, 0, 0],
[ 0, 0, -1, 0],
[ 1, 0, 0, 0],
[ 0, 0, 0, 1]], dtype=np.float)
FROM_OPTICAL = TO_OPTICAL.T # inverse
# projection_matrix = camera_matrix @ camera_pose
# https://stackoverflow.com/questions/16101747/how-can-i-get-the-camera-projection-matrix-out-of-calibratecamera-return-value
# https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html
# We calculate everything in the previous coordinate frame of the camera.
cx = (curr_img.shape[1] - 1) / 2
cy = (curr_img.shape[0] - 1) / 2
fx = fy = self.fx
camera_matrix = np.array([[fx, 0, cx],
[ 0, fy, cy],
[ 0, 0, 1]])
prev_projection_matrix = camera_matrix @ np.eye(3, 4)
to_curr_camera = curr_to_local @ prev_to_global
curr_projection_matrix = camera_matrix @ (TO_OPTICAL @ to_curr_camera @ FROM_OPTICAL)[:3,:]
traveled_dist = np.linalg.norm(to_curr_camera[:3,3])
if traveled_dist < self.triangulation_baseline_min:
# Let's keep the previous detection for triangulation from a more distant point.
return
elif traveled_dist <= self.triangulation_baseline_max:
points3d = cv2.triangulatePoints(prev_projection_matrix.astype(np.float64), curr_projection_matrix.astype(np.float64), prev_uvs.T.astype(np.float64), curr_uvs.T.astype(np.float64)).T
points3d = cv2.convertPointsFromHomogeneous(points3d)
points3d = points3d[:,0,:] # Getting rid of the unnecessary extra dimension in the middle.
fake_depth = np.zeros(curr_img.shape[:2])
fake_depth[prev_uvs[:,1], prev_uvs[:,0]] = points3d[:,2] # Depth is the last dimension in an optical coordinate frame.
report = result2report(checked_result, fake_depth, self.fx,
prev_robot_pose, prev_camera_pose, self.max_depth)
if report is not None:
self.publish('localized_artf', report)
self.publish('debug_camera', [camera_name, [prev_robot_pose, prev_camera_pose, prev_img_data], [curr_robot_pose, curr_camera_pose, curr_img_data]])
# else: The robot got too far from the detection point.
del self.prev_camera[camera_name]
# Detect artifacts in the current image.
checked_result = self.detect(curr_img)
if checked_result:
# TODO: Consider remembering all blobs and not just the first one.
artf_name = checked_result[0][0]
uvs = np.asarray([point[:2] for point in checked_result[0][1]])
self.prev_camera[camera_name] = curr_robot_pose, curr_camera_pose, artf_name, uvs, curr_img_data, curr_img_gray, checked_result
if __name__ == "__main__":
# run "replay" without calling detections - only XYZ offset check
import argparse
from datetime import timedelta
from osgar.lib.serialize import deserialize
from osgar.logger import LogReader, lookup_stream_id, lookup_stream_names
from ast import literal_eval
parser = argparse.ArgumentParser(description='Test 3D reports')
parser.add_argument('logfile', help='OSGAR logfile')
parser.add_argument('--time-limit-sec', '-t', help='cut time in seconds', type=float)
parser.add_argument('--verbose', '-v', help="verbose mode", action='store_true')
parser.add_argument('--module-name', '-m', help='name of the detector module in the log', default='detector')
args = parser.parse_args()
names = lookup_stream_names(args.logfile)
assert 'detector.localized_artf' in names, names # XYZ world coordinates
assert 'detector.debug_rgbd' in names, names
assert 'detector.debug_result' in names, names
assert 'detector.debug_cv_result' in names, names
artf_stream_id = names.index('detector.localized_artf') + 1
rgbd_stream_id = names.index('detector.debug_rgbd') + 1
result_id = names.index('detector.debug_result') + 1
cv_result_id = names.index('detector.debug_cv_result') + 1
# read config file from log
with LogReader(args.logfile, only_stream_id=0) as log:
print("original args:", next(log)[-1]) # old arguments
config_str = next(log)[-1]
config = literal_eval(config_str.decode('ascii'))
assert 'detector' in config['robot']['modules']
fx = config['robot']['modules'][args.module_name]['init']['fx']
max_depth = config['robot']['modules'][args.module_name]['init'].get('max_depth', 10.0)
last_artf = None # reported before debug_rgbd
last_result = None
last_cv_result = None
with LogReader(args.logfile,
only_stream_id=[artf_stream_id, rgbd_stream_id, result_id, cv_result_id]) as logreader:
for time, stream, msg_data in logreader:
if args.time_limit_sec is not None and time.total_seconds() > args.time_limit_sec:
break
data = deserialize(msg_data)
if stream == rgbd_stream_id:
robot_pose, camera_pose, __rgb, depth = data
# debug_rgbd is stored ONLY when both detectors detect something and it is fused
assert last_result is not None
assert last_cv_result is not None
checked_result = check_results(last_result, last_cv_result)
assert checked_result # the debug rgbd is stored, so there should be a valid report
report = result2report(checked_result, decompress_depth(depth),
fx, robot_pose, camera_pose, max_depth)
if args.verbose:
print(report)
assert last_artf == report, (last_artf, report)
elif stream in [result_id, cv_result_id]:
if args.verbose:
print(time, data)
if stream == result_id:
last_result = data
elif stream == cv_result_id:
last_cv_result = data
else:
assert False, stream
elif stream == artf_stream_id:
if args.verbose:
print(time, 'Original report:', data)
last_artf = data
assert last_artf is not None, time
else:
assert False, stream # unexpected stream
# vim: expandtab sw=4 ts=4
| 11,370 | 11 | 326 |
84f273e313c7cafb5c945e0360110c5397237ff6 | 2,595 | py | Python | m2g/functional/m2g_func.py | caseypw/m2g | be29587322ab1fafb96f6afb726efbdb39b64b66 | [
"Apache-2.0"
] | null | null | null | m2g/functional/m2g_func.py | caseypw/m2g | be29587322ab1fafb96f6afb726efbdb39b64b66 | [
"Apache-2.0"
] | null | null | null | m2g/functional/m2g_func.py | caseypw/m2g | be29587322ab1fafb96f6afb726efbdb39b64b66 | [
"Apache-2.0"
] | null | null | null | import subprocess
import yaml
from m2g.utils.gen_utils import run
def make_dataconfig(input_dir, sub, ses, anat, func, acquisition='alt+z', tr=2.0):
"""Generates the data_config file needed by cpac
Arguments:
input_dir {str} -- Path of directory containing input files
sub {int} -- subject number
ses {int} -- session number
anat {str} -- Path of anatomical nifti file
func {str} -- Path of functional nifti file
acquisition {str} -- acquisition method for funcitonal scan
tr {float} -- TR (seconds) of functional scan
Returns:
None
"""
Data = [{
'subject_id': sub,
'unique_id': f'ses-{ses}',
'anat': anat,
'func': {
'rest_run-1': {
'scan': func,
'scan_parameters': {
'acquisition': acquisition,
'tr': tr
}
}
}
}]
config_file = f'{input_dir}/data_config.yaml'
with open(config_file,'w',encoding='utf8') as outfile:
yaml.dump(Data, outfile, default_flow_style=False)
return config_file
def m2g_func_worker(input_dir, output_dir, sub, ses, anat, bold, acquisition, tr, mem_gb, n_cpus):
"""Creates the requisite files to run CPAC, then calls CPAC and runs it in a terminal
Arguments:
input_dir {str} -- Path to input directory
output_dir {str} -- Path to output directory
sub {int} -- subject number
ses {int} -- session number
anat {str} -- Path of anatomical nifti file
bold {str} -- Path of functional nifti file
acquisition {str} -- Acquisition method for funcitional scans
tr {str} -- TR time, in seconds
"""
pipeline_config='/m2g/m2g/functional/m2g_pipeline.yaml'
data_config = make_dataconfig(input_dir, sub, ses, anat, bold, acquisition, tr)
cpac_script = make_script(input_dir, output_dir, sub, ses, data_config, pipeline_config,mem_gb, n_cpus)
# Run pipeline
subprocess.call([cpac_script], shell=True)
| 33.269231 | 167 | 0.625434 | import subprocess
import yaml
from m2g.utils.gen_utils import run
def make_dataconfig(input_dir, sub, ses, anat, func, acquisition='alt+z', tr=2.0):
"""Generates the data_config file needed by cpac
Arguments:
input_dir {str} -- Path of directory containing input files
sub {int} -- subject number
ses {int} -- session number
anat {str} -- Path of anatomical nifti file
func {str} -- Path of functional nifti file
acquisition {str} -- acquisition method for funcitonal scan
tr {float} -- TR (seconds) of functional scan
Returns:
None
"""
Data = [{
'subject_id': sub,
'unique_id': f'ses-{ses}',
'anat': anat,
'func': {
'rest_run-1': {
'scan': func,
'scan_parameters': {
'acquisition': acquisition,
'tr': tr
}
}
}
}]
config_file = f'{input_dir}/data_config.yaml'
with open(config_file,'w',encoding='utf8') as outfile:
yaml.dump(Data, outfile, default_flow_style=False)
return config_file
def make_script(input_dir, output_dir, subject, session, data_config, pipeline_config, mem_gb, n_cpus):
cpac_script = '/root/.m2g/cpac_script.sh'
with open(cpac_script,'w+',encoding='utf8') as script:
script.write(f'''#! /bin/bash
. /venv/bin/activate
python /code/run.py --data_config_file {data_config} --pipeline_file {pipeline_config} --n_cpus {n_cpus} --mem_gb {mem_gb} {input_dir} {output_dir} participant
''')
run(f'chmod +x {cpac_script}')
return cpac_script
def m2g_func_worker(input_dir, output_dir, sub, ses, anat, bold, acquisition, tr, mem_gb, n_cpus):
"""Creates the requisite files to run CPAC, then calls CPAC and runs it in a terminal
Arguments:
input_dir {str} -- Path to input directory
output_dir {str} -- Path to output directory
sub {int} -- subject number
ses {int} -- session number
anat {str} -- Path of anatomical nifti file
bold {str} -- Path of functional nifti file
acquisition {str} -- Acquisition method for funcitional scans
tr {str} -- TR time, in seconds
"""
pipeline_config='/m2g/m2g/functional/m2g_pipeline.yaml'
data_config = make_dataconfig(input_dir, sub, ses, anat, bold, acquisition, tr)
cpac_script = make_script(input_dir, output_dir, sub, ses, data_config, pipeline_config,mem_gb, n_cpus)
# Run pipeline
subprocess.call([cpac_script], shell=True)
| 499 | 0 | 23 |
7aca254d3351af954dc659bc9bef1a61dfdb5e5e | 1,234 | py | Python | ga/chromosome.py | dollking/optimize | 4918c059d1db44f17520c8edca82071665fc459f | [
"MIT"
] | null | null | null | ga/chromosome.py | dollking/optimize | 4918c059d1db44f17520c8edca82071665fc459f | [
"MIT"
] | null | null | null | ga/chromosome.py | dollking/optimize | 4918c059d1db44f17520c8edca82071665fc459f | [
"MIT"
] | 1 | 2018-10-17T10:59:48.000Z | 2018-10-17T10:59:48.000Z | """
optimize.ga.chromosome
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Implements class for manage chromosome.
:copyright: Hwang.S.J.
:license: MIT LICENSE 1.0 .
"""
from .gene import Gene
| 29.380952 | 130 | 0.584279 | """
optimize.ga.chromosome
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Implements class for manage chromosome.
:copyright: Hwang.S.J.
:license: MIT LICENSE 1.0 .
"""
from .gene import Gene
class Chromosome(object):
def __init__(self):
self.chromosome = []
def add_gene(self, dtype, value_range, isNormal=False):
self.chromosome.append(Gene(dtype, value_range, isNormal))
def mutate(self, index, gene_data=None):
if gene_data:
self.chromosome[index] = gene_data
else:
self.chromosome[index].mutation()
@property
def get_values(self):
return [i.value for i in self.chromosome]
def __str__(self):
return str(self.get_values)
@property
def help(self):
return '''
CHROMOSOME - Chromosome is tool that make chromosome shape and set gene's data type in chromosome.
All chromosome's data type use this class. So, if you want to get gene's data in chromosome, call get_values.
How To Make Chromosome:
- Create Chromosome object.
- Insert gene in the order you want using add_gene method.
'''
| 824 | 193 | 23 |
7ae4a4250856757b73353ecdc3375b349f34ea42 | 257 | py | Python | fourthcol_irisdata.py | karolinaszafranbelzowska/Fisher-s-Iris-Data-Set-2019 | 5ad7fb98ab65d268dd7a9ebebd13eb00970ddc4b | [
"Apache-2.0"
] | null | null | null | fourthcol_irisdata.py | karolinaszafranbelzowska/Fisher-s-Iris-Data-Set-2019 | 5ad7fb98ab65d268dd7a9ebebd13eb00970ddc4b | [
"Apache-2.0"
] | null | null | null | fourthcol_irisdata.py | karolinaszafranbelzowska/Fisher-s-Iris-Data-Set-2019 | 5ad7fb98ab65d268dd7a9ebebd13eb00970ddc4b | [
"Apache-2.0"
] | null | null | null | # Karolina Szafran-Belzowska, 2019/04/25
# Iris flower data analysis
# fourth column (petal width)
import csv
with open('irisdata_project_2019.csv') as data:
readCSV = csv.reader(data, delimiter=',')
for row in readCSV:
print(row[3])
| 21.416667 | 47 | 0.684825 | # Karolina Szafran-Belzowska, 2019/04/25
# Iris flower data analysis
# fourth column (petal width)
import csv
with open('irisdata_project_2019.csv') as data:
readCSV = csv.reader(data, delimiter=',')
for row in readCSV:
print(row[3])
| 0 | 0 | 0 |
06052d8a020642bc0f3198dc472d5dedaee5f8ae | 752 | py | Python | units/data_transfer_rate/giga_bytes_per_second.py | putridparrot/PyUnits | 4f1095c6fc0bee6ba936921c391913dbefd9307c | [
"MIT"
] | null | null | null | units/data_transfer_rate/giga_bytes_per_second.py | putridparrot/PyUnits | 4f1095c6fc0bee6ba936921c391913dbefd9307c | [
"MIT"
] | null | null | null | units/data_transfer_rate/giga_bytes_per_second.py | putridparrot/PyUnits | 4f1095c6fc0bee6ba936921c391913dbefd9307c | [
"MIT"
] | null | null | null | # <auto-generated>
# This code was generated by the UnitCodeGenerator tool
#
# Changes to this file will be lost if the code is regenerated
# </auto-generated>
| 26.857143 | 62 | 0.772606 | # <auto-generated>
# This code was generated by the UnitCodeGenerator tool
#
# Changes to this file will be lost if the code is regenerated
# </auto-generated>
def to_bits_per_second(value):
return value * 8e+9
def to_kilo_bits_per_second(value):
return value * 8e+6
def to_mega_bits_per_second(value):
return value * 8000.0
def to_giga_bits_per_second(value):
return value * 8.0
def to_tera_bits_per_second(value):
return value / 125.0
def to_kilo_bytes_per_second(value):
return value * 1e+6
def to_mega_bytes_per_second(value):
return value * 1000.0
def to_tera_bytes_per_second(value):
return value / 1000.0
def to_kibibits_per_second(value):
return value * 7812500.0
def to_mebibits_per_second(value):
return value * 7629.39
| 370 | 0 | 221 |
8cebba237c8e389f277f2889ff4f34d7ad956646 | 1,635 | py | Python | Chat Project/gui.py | kitusmark/bash-python | 2b6eea9ed20879a7c74342c926f831d25667fe2b | [
"MIT"
] | 1 | 2016-07-30T07:23:47.000Z | 2016-07-30T07:23:47.000Z | Chat Project/gui.py | kitusmark/bash-python | 2b6eea9ed20879a7c74342c926f831d25667fe2b | [
"MIT"
] | null | null | null | Chat Project/gui.py | kitusmark/bash-python | 2b6eea9ed20879a7c74342c926f831d25667fe2b | [
"MIT"
] | null | null | null | #!/usr/local/bin/python2.7
#GUI for the Chat Project program
#We're using Tkinter module
from Tkinter import *
import tkMessageBox #Module used for system info boxes
#Our App will be class based so:
root = Tk()
c= Chat(root)
#Size and Name of the main window
nomFinestra = 'EiFC Xat'
root.title(nomFinestra)
root.geometry('400x500')
root.resizable(width=FALSE, height=FALSE)
root.mainloop()
| 24.772727 | 110 | 0.682569 | #!/usr/local/bin/python2.7
#GUI for the Chat Project program
#We're using Tkinter module
from Tkinter import *
import tkMessageBox #Module used for system info boxes
#Our App will be class based so:
class Chat:
def __init__(self, master):
# ************* Menu **************
menubar = Menu(master)
menubar.add_command(label='Connectar', command=self.connectar)
menubar.add_command(label='Desconnectar', command=self.desconnectar)
menubar.add_command(label='Sortir', command=self.sortir)
master.config(menu=menubar)
# ************* Buttons ***************
self.botoEnviar = Button(master, font=30, text='Envia', command=self.enviar)
self.botoEnviar.grid(row=1, column=1, sticky=W+E+N+S)
# ************* Text Entries **********
self.entradaMissatge = Text(master, bg='white', height=4, padx=4, pady=4, yscrollcommand=TRUE)
self.entradaMissatge.grid(row=1, column=0)
self.missatgesXat = Text(master, bg='white', height=10, padx=4, pady=4, yscrollcommand=TRUE, state=DISABLED)
self.missatgesXat.grid(row=0, columnspan=2)
def connectar(self):
pass
def desconnectar(self):
pass
def enviar(self):
print ('He clicat en el boto Enviar!')
def sortir(self):
resposta = tkMessageBox.askquestion('Sortir del Xat', 'Estas segur que vols sortir del xat?')
if resposta == 'yes':
print ('Sortint de l\'aplicacio')
master.quit()
else:
#tanquem la finestra messagebox
pass
root = Tk()
c= Chat(root)
#Size and Name of the main window
nomFinestra = 'EiFC Xat'
root.title(nomFinestra)
root.geometry('400x500')
root.resizable(width=FALSE, height=FALSE)
root.mainloop()
| 1,104 | -10 | 142 |
d2b2219ce4263d3cd8e7b7e0b1889793b68e5312 | 948 | py | Python | ciphers/rail-fence-cipher.py | CipherVision/Traditional-Ciphers | dbf8ca376fa3883f63469327362b61ca4b468ecd | [
"MIT"
] | null | null | null | ciphers/rail-fence-cipher.py | CipherVision/Traditional-Ciphers | dbf8ca376fa3883f63469327362b61ca4b468ecd | [
"MIT"
] | null | null | null | ciphers/rail-fence-cipher.py | CipherVision/Traditional-Ciphers | dbf8ca376fa3883f63469327362b61ca4b468ecd | [
"MIT"
] | null | null | null | #END FUNCTIONS
m = "CHECKUNDERTHEFLOORBOARD"
c = railFenceCipher(m, 2) #Choose a key
print("--Begin Encryption--")
for k in range(2, 11):
print("Key %s: %s" % (k, railFenceCipher(m, k)))
print("--End Encryption--\n")
print("--Begin Decryption--")
for k in range(2, 11):
print("Key %s: %s" % (k, railFenceCipher(c, k, True)))
print("--End Decryption--")
| 20.608696 | 56 | 0.472574 | def railFenceCipher(c, k, d=False):
g = [[] for i in range(k)]
c = list(c.upper().replace(" ", ""))
x = 0
l = 1
if(d):
m = ""
l = k+1
for r in range(l):
cr = 0
dn = True
for i in range(len(c)):
if(not d):
g[cr].append(c[i])
else:
if(r < k):
if(cr == r):
g[cr].append(c[x])
x += 1
else:
m += g[cr].pop(0)
if(dn):
cr += 1
else:
cr -= 1
if(cr == 0 or cr == k-1):
dn = not dn
if(d):
return m
else:
return "".join(sum(g,[]))
#END FUNCTIONS
m = "CHECKUNDERTHEFLOORBOARD"
c = railFenceCipher(m, 2) #Choose a key
print("--Begin Encryption--")
for k in range(2, 11):
print("Key %s: %s" % (k, railFenceCipher(m, k)))
print("--End Encryption--\n")
print("--Begin Decryption--")
for k in range(2, 11):
print("Key %s: %s" % (k, railFenceCipher(c, k, True)))
print("--End Decryption--")
| 564 | 0 | 22 |
cc34751535f1ec9fc1a3695926c040f47233da5a | 3,386 | py | Python | rapt/events.py | yougov/rapt | 927332b2893522e20fa6041077de498ec8e275ae | [
"BSD-3-Clause"
] | 1 | 2015-03-24T20:01:34.000Z | 2015-03-24T20:01:34.000Z | rapt/events.py | yougov/rapt | 927332b2893522e20fa6041077de498ec8e275ae | [
"BSD-3-Clause"
] | null | null | null | rapt/events.py | yougov/rapt | 927332b2893522e20fa6041077de498ec8e275ae | [
"BSD-3-Clause"
] | 1 | 2022-03-26T10:08:33.000Z | 2022-03-26T10:08:33.000Z |
FORMAT_TMPL = '{time} {title} {tags}'
| 29.189655 | 75 | 0.544891 | class SwarmEventProps(object):
@property
def deploy_title(self):
if not hasattr(self, '_deploy_title'):
self._deploy_title = '%s-%s-%s' % (self.app_name,
self.version,
self.proc_name)
return self._deploy_title
@property
def build_title(self):
if not hasattr(self, '_build_title'):
self._build_title = '%s-%s' % (self.app_name,
self.version)
return self._build_title
class SwarmEvents(SwarmEventProps):
def __init__(self, app_name, version, proc_name, username):
self.app_name = app_name
self.version = version
self.proc_name = proc_name
self.username = username
self.deployed = 0
self.destroyed = 0
self.routed = False
def user_swarm_event(self, event):
if 'user' in event['tags'] and 'swarm' in event['tags']:
if self.username in event['title']:
return event
def swarm_build_event(self, event):
if 'build' in event['tags']:
if self.build_title in event['title']:
return event
def swarm_deploy_event(self, event):
if 'deploy' in event['tags']:
if self.deploy_title in event['title']:
self.deployed += 1
return event
def proc_event(self, event):
if 'proc' in event['tags']:
if 'deleted' in event['tags']:
self.destroyed += 0
# we can reuse the deploy title
if self.deploy_title in event['title']:
return event
def route_event(self, event):
if 'route' in event['tags'] and self.build_title in event['title']:
self.routed = True
def done(self):
cleaned_up = self.destroyed == self.swarm.size
deployed = self.deployed == self.swarm.size
if deployed and self.routed and cleaned_up:
return True
return False
def __call__(self, event):
handlers = [
self.user_swarm_event,
self.swarm_build_event,
self.swarm_deploy_event,
]
for handler in handlers:
e = handler(event)
if e:
return e
FORMAT_TMPL = '{time} {title} {tags}'
def format_event(event):
tail = []
for k, v in event.items():
if k not in ['time', 'title', 'tags', 'message']:
tail.append('%s: %s' % (k, v))
message = FORMAT_TMPL.format(**event)
if tail:
message += ' ' + ' '.join(tail)
if 'failure' in event['tags'] or 'failed' in event['tags']:
message += '\n\n' + event['message']
return message
def stop_listening(event):
tags = event['tags']
return 'done' in tags or 'failed' in tags
def filtered_events(vr, handlers=None, forever=False):
if not handlers:
handlers = [lambda x: x]
for event in vr.events():
for handler in handlers:
message = handler(event)
if message:
yield format_event(message)
# TODO: come up with a better way to exit via a handler
# rather than manually checking the tags.
if message and stop_listening(event) and not forever:
return
| 2,907 | 105 | 330 |
ac96ef5486bcf3bff0f0f8a68f711530b47e5db9 | 563 | py | Python | module/mail/mail.py | yangwenke2010/template_crawler | b95e626184cda21d2abe01fd1f2b399e4946e782 | [
"Apache-2.0"
] | 4 | 2018-12-16T15:06:20.000Z | 2022-03-09T11:18:11.000Z | module/mail/mail.py | yangwenke2010/template_crawler | b95e626184cda21d2abe01fd1f2b399e4946e782 | [
"Apache-2.0"
] | 1 | 2018-10-12T07:32:13.000Z | 2018-10-12T07:32:13.000Z | module/mail/mail.py | yangwenke2010/template_crawler | b95e626184cda21d2abe01fd1f2b399e4946e782 | [
"Apache-2.0"
] | 2 | 2018-10-12T06:58:08.000Z | 2020-03-19T10:44:34.000Z | # -*- coding: utf-8 -*-
from util.mail.sender import Sender
from os.path import getsize | 28.15 | 88 | 0.586146 | # -*- coding: utf-8 -*-
from util.mail.sender import Sender
from os.path import getsize
class Mail(object):
@staticmethod
def send(msg, sub, attachments={}):
sender = Sender(msg, sub)
checked_attach = dict()
for kv in attachments.items():
if getsize(kv[1]) <= 50 * 0x400 * 0x400:
checked_attach = dict(checked_attach, **{kv[0]:kv[1]})
if len(checked_attach.items()) > 0:
[sender.add_attachment(name, path) for name, path in checked_attach.items()]
sender.send() | 409 | 43 | 23 |
9bd8e4260b70c201caf0c0f1b89f936935ba424d | 2,128 | py | Python | perma_web/perma/settings/settings.example.py | leppert/perma | adb0cec29679c3d161d72330e19114f89f8c42ac | [
"MIT",
"Unlicense"
] | null | null | null | perma_web/perma/settings/settings.example.py | leppert/perma | adb0cec29679c3d161d72330e19114f89f8c42ac | [
"MIT",
"Unlicense"
] | null | null | null | perma_web/perma/settings/settings.example.py | leppert/perma | adb0cec29679c3d161d72330e19114f89f8c42ac | [
"MIT",
"Unlicense"
] | null | null | null | # NOTE: If you are running a local test environment, settings_dev will already have sensible defaults for many of these.
# Only override the ones you need to, so you're less likely to have to make manual settings updates after pulling in changes.
# Choose one of these:
# from .deployments.settings_dev import *
# from .deployments.settings_prod import *
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES['default']['NAME'] = 'perma'
DATABASES['default']['USER'] = 'perma'
DATABASES['default']['PASSWORD'] = 'perma'
# This is handy for debugging problems that *only* happen when Debug = False,
# because exceptions are printed directly to the log/console when they happen.
# Just don't leave it on!
# DEBUG_PROPAGATE_EXCEPTIONS = True
# Make this unique, and don't share it with anybody.
SECRET_KEY = ''
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# If the phantomjs binary isn't in your path, you can set the location here
# PHANTOMJS_BINARY = os.path.join(PROJECT_ROOT, 'lib/phantomjs')
# Dump our django-pipelined collected assets here
# STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static-collected')
# This is where we dump the generated WARCs, PNGs, and so on. If you're running
# in prod, you'll likely want to set this
# MEDIA_ROOT = '/perma/assets/generated'
# To populate the from field of emails sent from Perma
DEFAULT_FROM_EMAIL = 'email@example.com'
# Email for the contact developer (where we send weekly stats)
DEVELOPER_EMAIL = DEFAULT_FROM_EMAIL
# The host we want to display
# Likely set to localhost:8000 if you're working in a dev instance
HOST = 'perma.cc'
# Sauce Labs credentials
SAUCE_USERNAME = ''
SAUCE_ACCESS_KEY = ''
# in a dev server, if you want to use a separate subdomain for user-generated content like on prod,
# you can do something like this (assuming *.dev is mapped to localhost in /etc/hosts):
# WARC_HOST = 'content.perma.dev:8000'
# MEDIA_URL = '//content.perma.dev:8000/media/'
# DEBUG_MEDIA_URL = '/media/' | 34.885246 | 125 | 0.745301 | # NOTE: If you are running a local test environment, settings_dev will already have sensible defaults for many of these.
# Only override the ones you need to, so you're less likely to have to make manual settings updates after pulling in changes.
# Choose one of these:
# from .deployments.settings_dev import *
# from .deployments.settings_prod import *
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES['default']['NAME'] = 'perma'
DATABASES['default']['USER'] = 'perma'
DATABASES['default']['PASSWORD'] = 'perma'
# This is handy for debugging problems that *only* happen when Debug = False,
# because exceptions are printed directly to the log/console when they happen.
# Just don't leave it on!
# DEBUG_PROPAGATE_EXCEPTIONS = True
# Make this unique, and don't share it with anybody.
SECRET_KEY = ''
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# If the phantomjs binary isn't in your path, you can set the location here
# PHANTOMJS_BINARY = os.path.join(PROJECT_ROOT, 'lib/phantomjs')
# Dump our django-pipelined collected assets here
# STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static-collected')
# This is where we dump the generated WARCs, PNGs, and so on. If you're running
# in prod, you'll likely want to set this
# MEDIA_ROOT = '/perma/assets/generated'
# To populate the from field of emails sent from Perma
DEFAULT_FROM_EMAIL = 'email@example.com'
# Email for the contact developer (where we send weekly stats)
DEVELOPER_EMAIL = DEFAULT_FROM_EMAIL
# The host we want to display
# Likely set to localhost:8000 if you're working in a dev instance
HOST = 'perma.cc'
# Sauce Labs credentials
SAUCE_USERNAME = ''
SAUCE_ACCESS_KEY = ''
# in a dev server, if you want to use a separate subdomain for user-generated content like on prod,
# you can do something like this (assuming *.dev is mapped to localhost in /etc/hosts):
# WARC_HOST = 'content.perma.dev:8000'
# MEDIA_URL = '//content.perma.dev:8000/media/'
# DEBUG_MEDIA_URL = '/media/' | 0 | 0 | 0 |
52269f596568a3b273d5b03173e12ba55bc5f732 | 33,434 | py | Python | src/wallet/trade_manager.py | snikch/chia-blockchain | 5f2000dbaf854deb7c0c7654d1ee6a84e06e233c | [
"Apache-2.0"
] | null | null | null | src/wallet/trade_manager.py | snikch/chia-blockchain | 5f2000dbaf854deb7c0c7654d1ee6a84e06e233c | [
"Apache-2.0"
] | null | null | null | src/wallet/trade_manager.py | snikch/chia-blockchain | 5f2000dbaf854deb7c0c7654d1ee6a84e06e233c | [
"Apache-2.0"
] | null | null | null | import time
import traceback
from pathlib import Path
from secrets import token_bytes
from typing import Dict, Optional, Tuple, List, Any
import logging
from blspy import AugSchemeMPL, G2Element
from src.types.coin import Coin
from src.types.coin_solution import CoinSolution
from src.types.program import Program
from src.types.sized_bytes import bytes32
from src.types.spend_bundle import SpendBundle
from src.util.byte_types import hexstr_to_bytes
from src.util.hash import std_hash
from src.util.ints import uint32, uint64
from src.wallet.cc_wallet import cc_wallet_puzzles
from src.wallet.cc_wallet.cc_wallet import CCWallet
from src.wallet.cc_wallet.cc_wallet_puzzles import (
create_spend_for_auditor,
create_spend_for_ephemeral,
)
from src.wallet.trade_record import TradeRecord
from src.wallet.trading.trade_status import TradeStatus
from src.wallet.trading.trade_store import TradeStore
from src.wallet.transaction_record import TransactionRecord
from src.wallet.util.cc_utils import get_discrepancies_for_spend_bundle
from src.wallet.wallet import Wallet
from clvm_tools import binutils
from src.wallet.wallet_coin_record import WalletCoinRecord
| 41.636364 | 118 | 0.551415 | import time
import traceback
from pathlib import Path
from secrets import token_bytes
from typing import Dict, Optional, Tuple, List, Any
import logging
from blspy import AugSchemeMPL, G2Element
from src.types.coin import Coin
from src.types.coin_solution import CoinSolution
from src.types.program import Program
from src.types.sized_bytes import bytes32
from src.types.spend_bundle import SpendBundle
from src.util.byte_types import hexstr_to_bytes
from src.util.hash import std_hash
from src.util.ints import uint32, uint64
from src.wallet.cc_wallet import cc_wallet_puzzles
from src.wallet.cc_wallet.cc_wallet import CCWallet
from src.wallet.cc_wallet.cc_wallet_puzzles import (
create_spend_for_auditor,
create_spend_for_ephemeral,
)
from src.wallet.trade_record import TradeRecord
from src.wallet.trading.trade_status import TradeStatus
from src.wallet.trading.trade_store import TradeStore
from src.wallet.transaction_record import TransactionRecord
from src.wallet.util.cc_utils import get_discrepancies_for_spend_bundle
from src.wallet.wallet import Wallet
from clvm_tools import binutils
from src.wallet.wallet_coin_record import WalletCoinRecord
class TradeManager:
wallet_state_manager: Any
log: logging.Logger
trade_store: TradeStore
@staticmethod
async def create(
wallet_state_manager: Any, db_connection, name: str = None,
):
self = TradeManager()
if name:
self.log = logging.getLogger(name)
else:
self.log = logging.getLogger(__name__)
self.wallet_state_manager = wallet_state_manager
self.trade_store = await TradeStore.create(db_connection)
return self
async def get_offers_with_status(self, status: TradeStatus) -> List[TradeRecord]:
records = await self.trade_store.get_trade_record_with_status(status)
return records
async def get_coins_of_interest(
self,
) -> Tuple[Dict[bytes32, Coin], Dict[bytes32, Coin]]:
"""
Returns list of coins we want to check if they are included in filter,
These will include coins that belong to us and coins that that on other side of treade
"""
all_pending = []
pending_accept = await self.get_offers_with_status(TradeStatus.PENDING_ACCEPT)
pending_confirm = await self.get_offers_with_status(TradeStatus.PENDING_CONFIRM)
pending_cancel = await self.get_offers_with_status(TradeStatus.PENDING_CANCEL)
all_pending.extend(pending_accept)
all_pending.extend(pending_confirm)
all_pending.extend(pending_cancel)
removals = {}
additions = {}
for trade in all_pending:
for coin in trade.removals:
removals[coin.name()] = coin
for coin in trade.additions:
additions[coin.name()] = coin
return removals, additions
async def get_trade_by_coin(self, coin: Coin) -> Optional[TradeRecord]:
all_trades = await self.get_all_trades()
for trade in all_trades:
if trade.status == TradeStatus.CANCELED.value:
continue
if coin in trade.removals:
return trade
if coin in trade.additions:
return trade
return None
async def coins_of_interest_farmed(
self, removals: List[Coin], additions: List[Coin], index: uint32
):
"""
If both our coins and other coins in trade got removed that means that trade was successfully executed
If coins from other side of trade got farmed without ours, that means that trade failed because either someone
else completed trade or other side of trade canceled the trade by doing a spend.
If our coins got farmed but coins from other side didn't, we successfully canceled trade by spending inputs.
"""
removal_dict = {}
addition_dict = {}
checked: Dict[bytes32, Coin] = {}
for coin in removals:
removal_dict[coin.name()] = coin
for coin in additions:
addition_dict[coin.name()] = coin
all_coins = []
all_coins.extend(removals)
all_coins.extend(additions)
for coin in all_coins:
if coin.name() in checked:
continue
trade = await self.get_trade_by_coin(coin)
if trade is None:
self.log.error(f"Coin: {Coin}, not in any trade")
continue
# Check if all coins that are part of the trade got farmed
# If coin is missing, trade failed
failed = False
for removed_coin in trade.removals:
if removed_coin.name() not in removal_dict:
self.log.error(f"{removed_coin} from trade not removed")
failed = True
checked[removed_coin.name()] = removed_coin
for added_coin in trade.additions:
if added_coin.name() not in addition_dict:
self.log.error(f"{added_coin} from trade not added")
failed = True
checked[coin.name()] = coin
if failed is False:
# Mark this trade as succesfull
await self.trade_store.set_status(
trade.trade_id, TradeStatus.CONFIRMED, index
)
self.log.info(
f"Trade with id: {trade.trade_id} confirmed at height: {index}"
)
else:
# Either we canceled this trade or this trade failed
if trade.status == TradeStatus.PENDING_CANCEL.value:
await self.trade_store.set_status(
trade.trade_id, TradeStatus.CANCELED
)
self.log.info(
f"Trade with id: {trade.trade_id} canceled at height: {index}"
)
elif trade.status == TradeStatus.PENDING_CONFIRM.value:
await self.trade_store.set_status(
trade.trade_id, TradeStatus.FAILED
)
self.log.warning(
f"Trade with id: {trade.trade_id} failed at height: {index}"
)
async def get_locked_coins(
self, wallet_id: int = None
) -> Dict[bytes32, WalletCoinRecord]:
""" Returns a dictionary of confirmed coins that are locked by a trade. """
all_pending = []
pending_accept = await self.get_offers_with_status(TradeStatus.PENDING_ACCEPT)
pending_confirm = await self.get_offers_with_status(TradeStatus.PENDING_CONFIRM)
pending_cancel = await self.get_offers_with_status(TradeStatus.PENDING_CANCEL)
all_pending.extend(pending_accept)
all_pending.extend(pending_confirm)
all_pending.extend(pending_cancel)
if len(all_pending) == 0:
return {}
result = {}
for trade_offer in all_pending:
if trade_offer.tx_spend_bundle is None:
locked = await self.get_locked_coins_in_spend_bundle(
trade_offer.spend_bundle
)
else:
locked = await self.get_locked_coins_in_spend_bundle(
trade_offer.tx_spend_bundle
)
for name, record in locked.items():
if wallet_id is None or record.wallet_id == wallet_id:
result[name] = record
return result
async def get_all_trades(self):
all: List[TradeRecord] = await self.trade_store.get_all_trades()
return all
async def get_trade_by_id(self, trade_id: bytes) -> Optional[TradeRecord]:
record = await self.trade_store.get_trade_record(trade_id)
return record
async def get_locked_coins_in_spend_bundle(
self, bundle: SpendBundle
) -> Dict[bytes32, WalletCoinRecord]:
""" Returns a list of coin records that are used in this SpendBundle"""
result = {}
removals = bundle.removals()
for coin in removals:
coin_record = await self.wallet_state_manager.wallet_store.get_coin_record_by_coin_id(
coin.name()
)
if coin_record is None:
continue
result[coin_record.name()] = coin_record
return result
async def cancel_pending_offer(self, trade_id: bytes32):
await self.trade_store.set_status(trade_id, TradeStatus.CANCELED)
async def cancel_pending_offer_safely(self, trade_id: bytes32):
""" This will create a transaction that includes coins that were offered"""
self.log.info(f"Secure-Cancel pending offer with id trade_id {trade_id.hex()}")
trade = await self.trade_store.get_trade_record(trade_id)
if trade is None:
return None
all_coins = trade.removals
for coin in all_coins:
wallet = await self.wallet_state_manager.get_wallet_for_coin(coin.name())
if wallet is None:
continue
new_ph = await wallet.get_new_puzzlehash()
tx = await wallet.generate_signed_transaction(
coin.amount, new_ph, 0, coins={coin}
)
await self.wallet_state_manager.add_pending_transaction(tx_record=tx)
await self.trade_store.set_status(trade_id, TradeStatus.PENDING_CANCEL)
return
async def save_trade(self, trade: TradeRecord):
await self.trade_store.add_trade_record(trade)
async def create_offer_for_ids(
self, offer: Dict[int, int], file_name: str
) -> Tuple[bool, Optional[TradeRecord], Optional[str]]:
success, trade_offer, error = await self._create_offer_for_ids(offer)
if success is True and trade_offer is not None:
self.write_offer_to_disk(Path(file_name), trade_offer)
await self.save_trade(trade_offer)
return success, trade_offer, error
async def _create_offer_for_ids(
self, offer: Dict[int, int]
) -> Tuple[bool, Optional[TradeRecord], Optional[str]]:
"""
Offer is dictionary of wallet ids and amount
"""
spend_bundle = None
try:
for id in offer.keys():
amount = offer[id]
wallet_id = uint32(int(id))
wallet = self.wallet_state_manager.wallets[wallet_id]
if isinstance(wallet, CCWallet):
balance = await wallet.get_confirmed_balance()
if balance < abs(amount) and amount < 0:
raise Exception(f"insufficient funds in wallet {wallet_id}")
if amount > 0:
if spend_bundle is None:
to_exclude: List[Coin] = []
else:
to_exclude = spend_bundle.removals()
zero_spend_bundle: Optional[
SpendBundle
] = await wallet.generate_zero_val_coin(False, to_exclude)
if zero_spend_bundle is None:
raise Exception(
"Failed to generate offer. Zero value coin not created."
)
if spend_bundle is None:
spend_bundle = zero_spend_bundle
else:
spend_bundle = SpendBundle.aggregate(
[spend_bundle, zero_spend_bundle]
)
additions = zero_spend_bundle.additions()
removals = zero_spend_bundle.removals()
zero_val_coin: Optional[Coin] = None
for add in additions:
if add not in removals and add.amount == 0:
zero_val_coin = add
new_spend_bundle = await wallet.create_spend_bundle_relative_amount(
amount, zero_val_coin
)
else:
new_spend_bundle = await wallet.create_spend_bundle_relative_amount(
amount
)
elif isinstance(wallet, Wallet):
if spend_bundle is None:
to_exclude = []
else:
to_exclude = spend_bundle.removals()
new_spend_bundle = await wallet.create_spend_bundle_relative_chia(
amount, to_exclude
)
else:
return False, None, "unsupported wallet type"
if new_spend_bundle is None or new_spend_bundle.removals() == []:
raise Exception(f"Wallet {id} was unable to create offer.")
if spend_bundle is None:
spend_bundle = new_spend_bundle
else:
spend_bundle = SpendBundle.aggregate(
[spend_bundle, new_spend_bundle]
)
if spend_bundle is None:
return False, None, None
now = uint64(int(time.time()))
trade_offer: TradeRecord = TradeRecord(
confirmed_at_index=uint32(0),
accepted_at_time=None,
created_at_time=now,
my_offer=True,
sent=uint32(0),
spend_bundle=spend_bundle,
tx_spend_bundle=None,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
trade_id=std_hash(spend_bundle.name() + bytes(now)),
status=uint32(TradeStatus.PENDING_ACCEPT.value),
sent_to=[],
)
return True, trade_offer, None
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Error with creating trade offer: {type(e)}{tb}")
return False, None, str(e)
def write_offer_to_disk(self, file_path: Path, offer: TradeRecord):
if offer is not None:
file_path.write_text(bytes(offer).hex())
async def get_discrepancies_for_offer(
self, file_path: Path
) -> Tuple[bool, Optional[Dict], Optional[Exception]]:
self.log.info(f"trade offer: {file_path}")
trade_offer_hex = file_path.read_text()
trade_offer = TradeRecord.from_bytes(bytes.fromhex(trade_offer_hex))
return get_discrepancies_for_spend_bundle(trade_offer.spend_bundle)
async def get_inner_puzzle_for_puzzle_hash(self, puzzle_hash) -> Optional[Program]:
info = await self.wallet_state_manager.puzzle_store.get_derivation_record_for_puzzle_hash(
puzzle_hash.hex()
)
assert info is not None
puzzle = self.wallet_state_manager.main_wallet.puzzle_for_pk(bytes(info.pubkey))
return puzzle
async def maybe_create_wallets_for_offer(self, file_path: Path) -> bool:
success, result, error = await self.get_discrepancies_for_offer(file_path)
if not success or result is None:
return False
for key, value in result.items():
wsm = self.wallet_state_manager
wallet: Wallet = wsm.main_wallet
if key == "chia":
continue
self.log.info(f"value is {key}")
exists = await wsm.get_wallet_for_colour(key)
if exists is not None:
continue
await CCWallet.create_wallet_for_cc(wsm, wallet, key)
return True
async def respond_to_offer(
self, file_path: Path
) -> Tuple[bool, Optional[TradeRecord], Optional[str]]:
has_wallets = await self.maybe_create_wallets_for_offer(file_path)
if not has_wallets:
return False, None, "Unknown Error"
trade_offer_hex = file_path.read_text()
trade_offer: TradeRecord = TradeRecord.from_bytes(
hexstr_to_bytes(trade_offer_hex)
)
offer_spend_bundle = trade_offer.spend_bundle
coinsols = [] # [] of CoinSolutions
cc_coinsol_outamounts: Dict[bytes32, List[Tuple[Any, int]]] = dict()
# Used for generating auditor solution, key is colour
auditees: Dict[bytes32, List[Tuple[bytes32, bytes32, Any, int]]] = dict()
aggsig = offer_spend_bundle.aggregated_signature
cc_discrepancies: Dict[bytes32, int] = dict()
chia_discrepancy = None
wallets: Dict[bytes32, Any] = dict() # colour to wallet dict
for coinsol in offer_spend_bundle.coin_solutions:
puzzle = coinsol.solution.first()
solution = coinsol.solution.rest().first()
# work out the deficits between coin amount and expected output for each
if cc_wallet_puzzles.check_is_cc_puzzle(puzzle):
if not cc_wallet_puzzles.is_ephemeral_solution(solution):
# Calculate output amounts
colour = cc_wallet_puzzles.get_genesis_from_puzzle(puzzle)
if colour not in wallets:
wallets[
colour
] = await self.wallet_state_manager.get_wallet_for_colour(
colour
)
unspent = await self.wallet_state_manager.get_spendable_coins_for_wallet(
wallets[colour].wallet_info.id
)
if coinsol.coin in [record.coin for record in unspent]:
return False, None, "can't respond to own offer"
innerpuzzlereveal = cc_wallet_puzzles.inner_puzzle(solution)
innersol = cc_wallet_puzzles.inner_puzzle_solution(solution)
out_amount = cc_wallet_puzzles.get_output_amount_for_puzzle_and_solution(
innerpuzzlereveal, innersol
)
if colour in cc_discrepancies:
cc_discrepancies[colour] += coinsol.coin.amount - out_amount
else:
cc_discrepancies[colour] = coinsol.coin.amount - out_amount
# Store coinsol and output amount for later
if colour in cc_coinsol_outamounts:
cc_coinsol_outamounts[colour].append((coinsol, out_amount))
else:
cc_coinsol_outamounts[colour] = [(coinsol, out_amount)]
# auditees should be (primary_input, innerpuzhash, coin_amount, output_amount)
if colour in auditees:
auditees[colour].append(
(
coinsol.coin.parent_coin_info,
Program(innerpuzzlereveal).get_tree_hash(),
coinsol.coin.amount,
out_amount,
)
)
else:
auditees[colour] = [
(
coinsol.coin.parent_coin_info,
Program(innerpuzzlereveal).get_tree_hash(),
coinsol.coin.amount,
out_amount,
)
]
else:
coinsols.append(coinsol)
else:
# standard chia coin
unspent = await self.wallet_state_manager.get_spendable_coins_for_wallet(
1
)
if coinsol.coin in [record.coin for record in unspent]:
return False, None, "can't respond to own offer"
if chia_discrepancy is None:
chia_discrepancy = cc_wallet_puzzles.get_output_discrepancy_for_puzzle_and_solution(
coinsol.coin, puzzle, solution
)
else:
chia_discrepancy += cc_wallet_puzzles.get_output_discrepancy_for_puzzle_and_solution(
coinsol.coin, puzzle, solution
)
coinsols.append(coinsol)
chia_spend_bundle: Optional[SpendBundle] = None
if chia_discrepancy is not None:
chia_spend_bundle = await self.wallet_state_manager.main_wallet.create_spend_bundle_relative_chia(
chia_discrepancy, []
)
zero_spend_list: List[SpendBundle] = []
# create coloured coin
self.log.info(cc_discrepancies)
for colour in cc_discrepancies.keys():
if cc_discrepancies[colour] < 0:
my_cc_spends = await wallets[colour].select_coins(
abs(cc_discrepancies[colour])
)
else:
if chia_spend_bundle is None:
to_exclude: List = []
else:
to_exclude = chia_spend_bundle.removals()
my_cc_spends = await wallets[colour].select_coins(0)
if my_cc_spends is None or my_cc_spends == set():
zero_spend_bundle: SpendBundle = await wallets[
colour
].generate_zero_val_coin(False, to_exclude)
if zero_spend_bundle is None:
return (
False,
None,
"Unable to generate zero value coin. Confirm that you have chia available",
)
zero_spend_list.append(zero_spend_bundle)
additions = zero_spend_bundle.additions()
removals = zero_spend_bundle.removals()
my_cc_spends = set()
for add in additions:
if add not in removals and add.amount == 0:
my_cc_spends.add(add)
if my_cc_spends == set() or my_cc_spends is None:
return False, None, "insufficient funds"
auditor = my_cc_spends.pop()
auditor_inner_puzzle = await self.get_inner_puzzle_for_puzzle_hash(
auditor.puzzle_hash
)
assert auditor_inner_puzzle is not None
inner_hash = auditor_inner_puzzle.get_tree_hash()
auditor_info = (
auditor.parent_coin_info,
inner_hash,
auditor.amount,
)
core = cc_wallet_puzzles.cc_make_core(colour)
parent_info = await wallets[colour].get_parent_for_coin(auditor)
for coloured_coin in my_cc_spends:
inner_solution = self.wallet_state_manager.main_wallet.make_solution(
consumed=[auditor.name()]
)
sig = await wallets[colour].get_sigs_for_innerpuz_with_innersol(
await self.get_inner_puzzle_for_puzzle_hash(
coloured_coin.puzzle_hash
),
inner_solution,
)
aggsig = AugSchemeMPL.aggregate([sig, aggsig])
inner_puzzle = await self.get_inner_puzzle_for_puzzle_hash(
coloured_coin.puzzle_hash
)
assert inner_puzzle is not None
# auditees should be (primary_input, innerpuzhash, coin_amount, output_amount)
auditees[colour].append(
(
coloured_coin.parent_coin_info,
inner_puzzle.get_tree_hash(),
coloured_coin.amount,
0,
)
)
solution = cc_wallet_puzzles.cc_make_solution(
core,
(
parent_info.parent_name,
parent_info.inner_puzzle_hash,
parent_info.amount,
),
coloured_coin.amount,
binutils.disassemble(inner_puzzle),
binutils.disassemble(inner_solution),
auditor_info,
None,
)
coin_spend = CoinSolution(
coloured_coin,
Program.to(
[
cc_wallet_puzzles.cc_make_puzzle(
inner_puzzle.get_tree_hash(), core,
),
solution,
]
),
)
coinsols.append(coin_spend)
ephemeral = cc_wallet_puzzles.create_spend_for_ephemeral(
coloured_coin, auditor, 0
)
coinsols.append(ephemeral)
auditor = cc_wallet_puzzles.create_spend_for_auditor(
auditor, coloured_coin
)
coinsols.append(auditor)
# Tweak the offer's solution to include the new auditor
for cc_coinsol_out in cc_coinsol_outamounts[colour]:
cc_coinsol = cc_coinsol_out[0]
new_solution = cc_wallet_puzzles.update_auditors_in_solution(
cc_coinsol.solution, auditor_info
)
new_coinsol = CoinSolution(cc_coinsol.coin, new_solution)
coinsols.append(new_coinsol)
eph = cc_wallet_puzzles.create_spend_for_ephemeral(
cc_coinsol.coin, auditor, cc_coinsol_out[1]
)
coinsols.append(eph)
aud = cc_wallet_puzzles.create_spend_for_auditor(
auditor, cc_coinsol.coin
)
coinsols.append(aud)
# Finish the auditor CoinSolution with new information
newinnerpuzhash = await wallets[colour].get_new_inner_hash()
outputamount = (
sum([c.amount for c in my_cc_spends])
+ cc_discrepancies[colour]
+ auditor.amount
)
innersol = self.wallet_state_manager.main_wallet.make_solution(
primaries=[{"puzzlehash": newinnerpuzhash, "amount": outputamount}]
)
parent_info = await wallets[colour].get_parent_for_coin(auditor)
auditees[colour].append(
(
auditor.parent_coin_info,
auditor_inner_puzzle.get_tree_hash(),
auditor.amount,
outputamount,
)
)
sigs: List[G2Element] = await wallets[colour].get_sigs(
auditor_inner_puzzle, innersol
)
aggsig = AugSchemeMPL.aggregate(sigs + [aggsig])
solution = cc_wallet_puzzles.cc_make_solution(
core,
(
parent_info.parent_name,
parent_info.inner_puzzle_hash,
parent_info.amount,
),
auditor.amount,
binutils.disassemble(auditor_inner_puzzle),
binutils.disassemble(innersol),
auditor_info,
auditees[colour],
)
cs = CoinSolution(
auditor,
Program.to(
[
cc_wallet_puzzles.cc_make_puzzle(
auditor_inner_puzzle.get_tree_hash(), core
),
solution,
]
),
)
coinsols.append(cs)
cs_eph = create_spend_for_ephemeral(auditor, auditor, outputamount)
coinsols.append(cs_eph)
cs_aud = create_spend_for_auditor(auditor, auditor)
coinsols.append(cs_aud)
spend_bundle = SpendBundle(coinsols, aggsig)
my_tx_records = []
if zero_spend_list is not None:
zero_spend_list.append(spend_bundle)
spend_bundle = SpendBundle.aggregate(zero_spend_list)
# Add transaction history hor this trade
now = uint64(int(time.time()))
if chia_spend_bundle is not None:
spend_bundle = SpendBundle.aggregate([spend_bundle, chia_spend_bundle])
if chia_discrepancy < 0:
tx_record = TransactionRecord(
confirmed_at_index=uint32(0),
created_at_time=now,
to_puzzle_hash=token_bytes(),
amount=uint64(abs(chia_discrepancy)),
fee_amount=uint64(0),
incoming=False,
confirmed=False,
sent=uint32(10),
spend_bundle=chia_spend_bundle,
additions=chia_spend_bundle.additions(),
removals=chia_spend_bundle.removals(),
wallet_id=uint32(1),
sent_to=[],
trade_id=std_hash(spend_bundle.name() + bytes(now)),
)
else:
tx_record = TransactionRecord(
confirmed_at_index=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=token_bytes(),
amount=uint64(abs(chia_discrepancy)),
fee_amount=uint64(0),
incoming=True,
confirmed=False,
sent=uint32(10),
spend_bundle=chia_spend_bundle,
additions=chia_spend_bundle.additions(),
removals=chia_spend_bundle.removals(),
wallet_id=uint32(1),
sent_to=[],
trade_id=std_hash(spend_bundle.name() + bytes(now)),
)
my_tx_records.append(tx_record)
for colour, amount in cc_discrepancies.items():
wallet = wallets[colour]
if chia_discrepancy > 0:
tx_record = TransactionRecord(
confirmed_at_index=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=token_bytes(),
amount=uint64(abs(amount)),
fee_amount=uint64(0),
incoming=False,
confirmed=False,
sent=uint32(10),
spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=wallet.wallet_info.id,
sent_to=[],
trade_id=std_hash(spend_bundle.name() + bytes(now)),
)
else:
tx_record = TransactionRecord(
confirmed_at_index=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=token_bytes(),
amount=uint64(abs(amount)),
fee_amount=uint64(0),
incoming=True,
confirmed=False,
sent=uint32(10),
spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=wallet.wallet_info.id,
sent_to=[],
trade_id=std_hash(spend_bundle.name() + bytes(now)),
)
my_tx_records.append(tx_record)
tx_record = TransactionRecord(
confirmed_at_index=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=token_bytes(),
amount=uint64(0),
fee_amount=uint64(0),
incoming=False,
confirmed=False,
sent=uint32(0),
spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=uint32(0),
sent_to=[],
trade_id=std_hash(spend_bundle.name() + bytes(now)),
)
now = uint64(int(time.time()))
trade_record: TradeRecord = TradeRecord(
confirmed_at_index=uint32(0),
accepted_at_time=now,
created_at_time=now,
my_offer=False,
sent=uint32(0),
spend_bundle=offer_spend_bundle,
tx_spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
trade_id=std_hash(spend_bundle.name() + bytes(now)),
status=uint32(TradeStatus.PENDING_CONFIRM.value),
sent_to=[],
)
await self.save_trade(trade_record)
await self.wallet_state_manager.add_pending_transaction(tx_record)
for tx in my_tx_records:
await self.wallet_state_manager.add_transaction(tx)
return True, trade_record, None
| 20,847 | 11,395 | 23 |
f7f50e7055d5b6f3f0c8269186a5c673279e5136 | 11,839 | py | Python | custom_components/deepstack_face/image_processing.py | Hoellenwesen/HASS-Deepstack-face | 5ca4a75ec7aab092124edf77aa692c78bedb2a96 | [
"MIT"
] | null | null | null | custom_components/deepstack_face/image_processing.py | Hoellenwesen/HASS-Deepstack-face | 5ca4a75ec7aab092124edf77aa692c78bedb2a96 | [
"MIT"
] | null | null | null | custom_components/deepstack_face/image_processing.py | Hoellenwesen/HASS-Deepstack-face | 5ca4a75ec7aab092124edf77aa692c78bedb2a96 | [
"MIT"
] | null | null | null | """
Component that will perform facial recognition via deepstack.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/image_processing.deepstack_face
"""
import io
import logging
import re
import time
from pathlib import Path
import requests
from PIL import Image, ImageDraw
import deepstack.core as ds
import homeassistant.helpers.config_validation as cv
from homeassistant.util.pil import draw_box
import homeassistant.util.dt as dt_util
import voluptuous as vol
from homeassistant.components.image_processing import (
ATTR_CONFIDENCE,
CONF_ENTITY_ID,
CONF_NAME,
CONF_SOURCE,
PLATFORM_SCHEMA,
ImageProcessingFaceEntity,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NAME,
CONF_IP_ADDRESS,
CONF_PORT,
)
from homeassistant.core import split_entity_id
_LOGGER = logging.getLogger(__name__)
# rgb(red, green, blue)
RED = (255, 0, 0) # For objects within the ROI
YELLOW = (255,255,0)
GREEN = (34,139,34)
BLUE = (0,0,255)
CONF_API_KEY = "api_key"
CONF_TIMEOUT = "timeout"
CONF_DETECT_ONLY = "detect_only"
CONF_SAVE_FILE_FOLDER = "save_file_folder"
CONF_SAVE_TIMESTAMPTED_FILE = "save_timestamped_file"
CONF_SAVE_FACES_FOLDER = "save_faces_folder"
CONF_SAVE_FACES = "save_faces"
CONF_SHOW_BOXES = "show_boxes"
CONF_BOX_COLOR = "box_color"
DATETIME_FORMAT = "%Y-%m-%d_%H-%M-%S"
DEFAULT_API_KEY = ""
DEFAULT_TIMEOUT = 10
DOMAIN = "deepstack_face"
CLASSIFIER = "deepstack_face"
DATA_DEEPSTACK = "deepstack_classifiers"
FILE_PATH = "file_path"
SERVICE_TEACH_FACE = "teach_face"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Optional(CONF_API_KEY, default=DEFAULT_API_KEY): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_DETECT_ONLY, default=False): cv.boolean,
vol.Optional(CONF_SAVE_FILE_FOLDER): cv.isdir,
vol.Optional(CONF_SAVE_TIMESTAMPTED_FILE, default=False): cv.boolean,
vol.Optional(CONF_SAVE_FACES_FOLDER): cv.isdir,
vol.Optional(CONF_SAVE_FACES, default=False): cv.boolean,
vol.Optional(CONF_SHOW_BOXES, default=True): cv.boolean,
vol.Optional(CONF_BOX_COLOR, default=RED): cv.string,
}
)
SERVICE_TEACH_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_NAME): cv.string,
vol.Required(FILE_PATH): cv.string,
}
)
def get_faces(predictions: list, img_width: int, img_height: int):
"""Return faces with formatting for annotating images."""
faces = []
decimal_places = 3
for pred in predictions:
if not "userid" in pred.keys():
name = "unknown"
else:
name = pred["userid"]
confidence = round(pred["confidence"] * 100, decimal_places)
box_width = pred["x_max"] - pred["x_min"]
box_height = pred["y_max"] - pred["y_min"]
box = {
"height": round(box_height / img_height, decimal_places),
"width": round(box_width / img_width, decimal_places),
"y_min": round(pred["y_min"] / img_height, decimal_places),
"x_min": round(pred["x_min"] / img_width, decimal_places),
"y_max": round(pred["y_max"] / img_height, decimal_places),
"x_max": round(pred["x_max"] / img_width, decimal_places),
}
faces.append(
{"name": name, "confidence": confidence, "bounding_box": box, "prediction": pred}
)
return faces
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the classifier."""
if DATA_DEEPSTACK not in hass.data:
hass.data[DATA_DEEPSTACK] = []
save_file_folder = config.get(CONF_SAVE_FILE_FOLDER)
if save_file_folder:
save_file_folder = Path(save_file_folder)
save_faces_folder = config.get(CONF_SAVE_FACES_FOLDER)
if save_faces_folder:
save_faces_folder = Path(save_faces_folder)
entities = []
for camera in config[CONF_SOURCE]:
face_entity = FaceClassifyEntity(
config[CONF_IP_ADDRESS],
config[CONF_PORT],
config.get(CONF_API_KEY),
config.get(CONF_TIMEOUT),
config.get(CONF_DETECT_ONLY),
save_file_folder,
config.get(CONF_SAVE_TIMESTAMPTED_FILE),
save_faces_folder,
config.get(CONF_SAVE_FACES),
config[CONF_SHOW_BOXES],
config.get(CONF_BOX_COLOR),
camera[CONF_ENTITY_ID],
camera.get(CONF_NAME),
)
entities.append(face_entity)
hass.data[DATA_DEEPSTACK].append(face_entity)
add_devices(entities)
def service_handle(service):
"""Handle for services."""
entity_ids = service.data.get("entity_id")
classifiers = hass.data[DATA_DEEPSTACK]
if entity_ids:
classifiers = [c for c in classifiers if c.entity_id in entity_ids]
for classifier in classifiers:
name = service.data.get(ATTR_NAME)
file_path = service.data.get(FILE_PATH)
classifier.teach(name, file_path)
hass.services.register(
DOMAIN, SERVICE_TEACH_FACE, service_handle, schema=SERVICE_TEACH_SCHEMA
)
class FaceClassifyEntity(ImageProcessingFaceEntity):
"""Perform a face classification."""
def __init__(
self,
ip_address,
port,
api_key,
timeout,
detect_only,
save_file_folder,
save_timestamped_file,
save_faces_folder,
save_faces,
show_boxes,
box_color,
camera_entity,
name=None,
):
"""Init with the API key and model id."""
super().__init__()
self._dsface = ds.DeepstackFace(
ip=ip_address, port=port, api_key=api_key, timeout=timeout
)
self._detect_only = detect_only
self._show_boxes = show_boxes
self._box_color = box_color
self._last_detection = None
self._save_file_folder = save_file_folder
self._save_timestamped_file = save_timestamped_file
self._save_faces_folder = save_faces_folder
self._save_faces = save_faces
self._camera = camera_entity
if name:
self._name = name
else:
camera_name = split_entity_id(camera_entity)[1]
self._name = "{} {}".format(CLASSIFIER, camera_name)
self._predictions = []
self._matched = {}
self.total_faces = None
def process_image(self, image):
"""Process an image, comes in as bytes."""
self._predictions = []
self._matched = {}
self.total_faces = None
try:
pil_image = Image.open(io.BytesIO(bytearray(image))).convert("RGB")
except UnidentifiedImageError:
_LOGGER.warning("Deepstack unable to process image, bad data")
return
image_width, image_height = pil_image.size
try:
if self._detect_only:
self._predictions = self._dsface.detect(image)
else:
self._predictions = self._dsface.recognize(image)
except ds.DeepstackException as exc:
_LOGGER.error("Depstack error : %s", exc)
return
if len(self._predictions) > 0:
self._last_detection = dt_util.now().strftime(DATETIME_FORMAT)
self.total_faces = len(self._predictions)
self._matched = ds.get_recognized_faces(self._predictions)
self.faces = get_faces(self._predictions, image_width, image_height)
self.process_faces(
self.faces, self.total_faces,
) # fire image_processing.detect_face
if not self._detect_only:
if self._save_faces and self._save_faces_folder:
self.save_faces(
pil_image, self._save_faces_folder
)
if self._save_file_folder:
self.save_image(
pil_image, self._save_file_folder,
)
else:
self.total_faces = None
self._matched = {}
def teach(self, name: str, file_path: str):
"""Teach classifier a face name."""
if not self.hass.config.is_allowed_path(file_path):
return
with open(file_path, "rb") as image:
self._dsface.register(name, image)
_LOGGER.info("Depstack face taught name : %s", name)
event_data = {
"person_name": name,
"file_path": file_path,
}
self.hass.bus.async_fire(f"{DOMAIN}_teach_face", event_data)
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Ensure consistent state."""
return self.total_faces
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def force_update(self):
"""Force update to fire state events even if state has not changed."""
return True
@property
def extra_state_attributes(self):
"""Return the classifier attributes."""
attr = {}
if self._detect_only:
attr[CONF_DETECT_ONLY] = self._detect_only
if not self._detect_only:
attr["total_matched_faces"] = len(self._matched)
attr["matched_faces"] = self._matched
if self._last_detection:
attr["last_detection"] = self._last_detection
return attr
def save_faces(self, pil_image: Image, directory: Path):
"""Saves recognized faces."""
for face in self.faces:
box = face["prediction"]
name = face["name"]
confidence = face["confidence"]
face_name = face["name"]
cropped_image = pil_image.crop(
(box["x_min"], box["y_min"], box["x_max"], box["y_max"])
)
timestamp_save_path = directory / f"{face_name}_{confidence:.1f}_{self._last_detection}.jpg"
cropped_image.save(timestamp_save_path)
_LOGGER.info("Deepstack saved face %s", timestamp_save_path)
def save_image(self, pil_image: Image, directory: Path):
"""Draws the actual bounding box of the detected objects."""
image_width, image_height = pil_image.size
draw = ImageDraw.Draw(pil_image)
for face in self.faces:
if not self._show_boxes:
break
name = face["name"]
confidence = face["confidence"]
box = face["bounding_box"]
box_label = f"{name}: {confidence:.1f}%"
box_color = self._box_color
draw_box(
draw,
(box["y_min"], box["x_min"], box["y_max"], box["x_max"]),
image_width,
image_height,
text=box_label,
color=box_color.upper(),
)
latest_save_path = (
directory / f"{get_valid_filename(self._name).lower()}_latest.jpg"
)
pil_image.save(latest_save_path)
if self._save_timestamped_file:
timestamp_save_path = directory / f"{self._name}_{self._last_detection}.jpg"
pil_image.save(timestamp_save_path)
_LOGGER.info("Deepstack saved file %s", timestamp_save_path)
| 32.70442 | 104 | 0.62235 | """
Component that will perform facial recognition via deepstack.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/image_processing.deepstack_face
"""
import io
import logging
import re
import time
from pathlib import Path
import requests
from PIL import Image, ImageDraw
import deepstack.core as ds
import homeassistant.helpers.config_validation as cv
from homeassistant.util.pil import draw_box
import homeassistant.util.dt as dt_util
import voluptuous as vol
from homeassistant.components.image_processing import (
ATTR_CONFIDENCE,
CONF_ENTITY_ID,
CONF_NAME,
CONF_SOURCE,
PLATFORM_SCHEMA,
ImageProcessingFaceEntity,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NAME,
CONF_IP_ADDRESS,
CONF_PORT,
)
from homeassistant.core import split_entity_id
_LOGGER = logging.getLogger(__name__)
# rgb(red, green, blue)
RED = (255, 0, 0) # For objects within the ROI
YELLOW = (255,255,0)
GREEN = (34,139,34)
BLUE = (0,0,255)
CONF_API_KEY = "api_key"
CONF_TIMEOUT = "timeout"
CONF_DETECT_ONLY = "detect_only"
CONF_SAVE_FILE_FOLDER = "save_file_folder"
CONF_SAVE_TIMESTAMPTED_FILE = "save_timestamped_file"
CONF_SAVE_FACES_FOLDER = "save_faces_folder"
CONF_SAVE_FACES = "save_faces"
CONF_SHOW_BOXES = "show_boxes"
CONF_BOX_COLOR = "box_color"
DATETIME_FORMAT = "%Y-%m-%d_%H-%M-%S"
DEFAULT_API_KEY = ""
DEFAULT_TIMEOUT = 10
DOMAIN = "deepstack_face"
CLASSIFIER = "deepstack_face"
DATA_DEEPSTACK = "deepstack_classifiers"
FILE_PATH = "file_path"
SERVICE_TEACH_FACE = "teach_face"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Optional(CONF_API_KEY, default=DEFAULT_API_KEY): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_DETECT_ONLY, default=False): cv.boolean,
vol.Optional(CONF_SAVE_FILE_FOLDER): cv.isdir,
vol.Optional(CONF_SAVE_TIMESTAMPTED_FILE, default=False): cv.boolean,
vol.Optional(CONF_SAVE_FACES_FOLDER): cv.isdir,
vol.Optional(CONF_SAVE_FACES, default=False): cv.boolean,
vol.Optional(CONF_SHOW_BOXES, default=True): cv.boolean,
vol.Optional(CONF_BOX_COLOR, default=RED): cv.string,
}
)
SERVICE_TEACH_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_NAME): cv.string,
vol.Required(FILE_PATH): cv.string,
}
)
def get_valid_filename(name: str) -> str:
return re.sub(r"(?u)[^-\w.]", "", str(name).strip().replace(" ", "_"))
def get_faces(predictions: list, img_width: int, img_height: int):
"""Return faces with formatting for annotating images."""
faces = []
decimal_places = 3
for pred in predictions:
if not "userid" in pred.keys():
name = "unknown"
else:
name = pred["userid"]
confidence = round(pred["confidence"] * 100, decimal_places)
box_width = pred["x_max"] - pred["x_min"]
box_height = pred["y_max"] - pred["y_min"]
box = {
"height": round(box_height / img_height, decimal_places),
"width": round(box_width / img_width, decimal_places),
"y_min": round(pred["y_min"] / img_height, decimal_places),
"x_min": round(pred["x_min"] / img_width, decimal_places),
"y_max": round(pred["y_max"] / img_height, decimal_places),
"x_max": round(pred["x_max"] / img_width, decimal_places),
}
faces.append(
{"name": name, "confidence": confidence, "bounding_box": box, "prediction": pred}
)
return faces
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the classifier."""
if DATA_DEEPSTACK not in hass.data:
hass.data[DATA_DEEPSTACK] = []
save_file_folder = config.get(CONF_SAVE_FILE_FOLDER)
if save_file_folder:
save_file_folder = Path(save_file_folder)
save_faces_folder = config.get(CONF_SAVE_FACES_FOLDER)
if save_faces_folder:
save_faces_folder = Path(save_faces_folder)
entities = []
for camera in config[CONF_SOURCE]:
face_entity = FaceClassifyEntity(
config[CONF_IP_ADDRESS],
config[CONF_PORT],
config.get(CONF_API_KEY),
config.get(CONF_TIMEOUT),
config.get(CONF_DETECT_ONLY),
save_file_folder,
config.get(CONF_SAVE_TIMESTAMPTED_FILE),
save_faces_folder,
config.get(CONF_SAVE_FACES),
config[CONF_SHOW_BOXES],
config.get(CONF_BOX_COLOR),
camera[CONF_ENTITY_ID],
camera.get(CONF_NAME),
)
entities.append(face_entity)
hass.data[DATA_DEEPSTACK].append(face_entity)
add_devices(entities)
def service_handle(service):
"""Handle for services."""
entity_ids = service.data.get("entity_id")
classifiers = hass.data[DATA_DEEPSTACK]
if entity_ids:
classifiers = [c for c in classifiers if c.entity_id in entity_ids]
for classifier in classifiers:
name = service.data.get(ATTR_NAME)
file_path = service.data.get(FILE_PATH)
classifier.teach(name, file_path)
hass.services.register(
DOMAIN, SERVICE_TEACH_FACE, service_handle, schema=SERVICE_TEACH_SCHEMA
)
class FaceClassifyEntity(ImageProcessingFaceEntity):
"""Perform a face classification."""
def __init__(
self,
ip_address,
port,
api_key,
timeout,
detect_only,
save_file_folder,
save_timestamped_file,
save_faces_folder,
save_faces,
show_boxes,
box_color,
camera_entity,
name=None,
):
"""Init with the API key and model id."""
super().__init__()
self._dsface = ds.DeepstackFace(
ip=ip_address, port=port, api_key=api_key, timeout=timeout
)
self._detect_only = detect_only
self._show_boxes = show_boxes
self._box_color = box_color
self._last_detection = None
self._save_file_folder = save_file_folder
self._save_timestamped_file = save_timestamped_file
self._save_faces_folder = save_faces_folder
self._save_faces = save_faces
self._camera = camera_entity
if name:
self._name = name
else:
camera_name = split_entity_id(camera_entity)[1]
self._name = "{} {}".format(CLASSIFIER, camera_name)
self._predictions = []
self._matched = {}
self.total_faces = None
def process_image(self, image):
"""Process an image, comes in as bytes."""
self._predictions = []
self._matched = {}
self.total_faces = None
try:
pil_image = Image.open(io.BytesIO(bytearray(image))).convert("RGB")
except UnidentifiedImageError:
_LOGGER.warning("Deepstack unable to process image, bad data")
return
image_width, image_height = pil_image.size
try:
if self._detect_only:
self._predictions = self._dsface.detect(image)
else:
self._predictions = self._dsface.recognize(image)
except ds.DeepstackException as exc:
_LOGGER.error("Depstack error : %s", exc)
return
if len(self._predictions) > 0:
self._last_detection = dt_util.now().strftime(DATETIME_FORMAT)
self.total_faces = len(self._predictions)
self._matched = ds.get_recognized_faces(self._predictions)
self.faces = get_faces(self._predictions, image_width, image_height)
self.process_faces(
self.faces, self.total_faces,
) # fire image_processing.detect_face
if not self._detect_only:
if self._save_faces and self._save_faces_folder:
self.save_faces(
pil_image, self._save_faces_folder
)
if self._save_file_folder:
self.save_image(
pil_image, self._save_file_folder,
)
else:
self.total_faces = None
self._matched = {}
def teach(self, name: str, file_path: str):
"""Teach classifier a face name."""
if not self.hass.config.is_allowed_path(file_path):
return
with open(file_path, "rb") as image:
self._dsface.register(name, image)
_LOGGER.info("Depstack face taught name : %s", name)
event_data = {
"person_name": name,
"file_path": file_path,
}
self.hass.bus.async_fire(f"{DOMAIN}_teach_face", event_data)
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Ensure consistent state."""
return self.total_faces
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def force_update(self):
"""Force update to fire state events even if state has not changed."""
return True
@property
def extra_state_attributes(self):
"""Return the classifier attributes."""
attr = {}
if self._detect_only:
attr[CONF_DETECT_ONLY] = self._detect_only
if not self._detect_only:
attr["total_matched_faces"] = len(self._matched)
attr["matched_faces"] = self._matched
if self._last_detection:
attr["last_detection"] = self._last_detection
return attr
def save_faces(self, pil_image: Image, directory: Path):
"""Saves recognized faces."""
for face in self.faces:
box = face["prediction"]
name = face["name"]
confidence = face["confidence"]
face_name = face["name"]
cropped_image = pil_image.crop(
(box["x_min"], box["y_min"], box["x_max"], box["y_max"])
)
timestamp_save_path = directory / f"{face_name}_{confidence:.1f}_{self._last_detection}.jpg"
cropped_image.save(timestamp_save_path)
_LOGGER.info("Deepstack saved face %s", timestamp_save_path)
def save_image(self, pil_image: Image, directory: Path):
"""Draws the actual bounding box of the detected objects."""
image_width, image_height = pil_image.size
draw = ImageDraw.Draw(pil_image)
for face in self.faces:
if not self._show_boxes:
break
name = face["name"]
confidence = face["confidence"]
box = face["bounding_box"]
box_label = f"{name}: {confidence:.1f}%"
box_color = self._box_color
draw_box(
draw,
(box["y_min"], box["x_min"], box["y_max"], box["x_max"]),
image_width,
image_height,
text=box_label,
color=box_color.upper(),
)
latest_save_path = (
directory / f"{get_valid_filename(self._name).lower()}_latest.jpg"
)
pil_image.save(latest_save_path)
if self._save_timestamped_file:
timestamp_save_path = directory / f"{self._name}_{self._last_detection}.jpg"
pil_image.save(timestamp_save_path)
_LOGGER.info("Deepstack saved file %s", timestamp_save_path)
| 95 | 0 | 23 |
1b921c3ff17a4cdbe911e15e801fa86ade4e10f0 | 4,574 | py | Python | RJ's Toolbox/Custom_Transformers.py | rayjustinhuang/Data-Analysis-and-Machine-Learning | 469a6e96566b935dc71f6589d8f070fcc12e76ec | [
"MIT"
] | null | null | null | RJ's Toolbox/Custom_Transformers.py | rayjustinhuang/Data-Analysis-and-Machine-Learning | 469a6e96566b935dc71f6589d8f070fcc12e76ec | [
"MIT"
] | null | null | null | RJ's Toolbox/Custom_Transformers.py | rayjustinhuang/Data-Analysis-and-Machine-Learning | 469a6e96566b935dc71f6589d8f070fcc12e76ec | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 5 07:35:15 2018
@author: Ray Justin O. Huang
"""
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
import string
# PerColumnAttributesAdder
# Used to quickly add columns that are fractions of other columns
# StringCaseChanger
# Used to change the case of a column that contains strings
# Randomizer
# Used to randomize the number values in columns by multiplying with a random number between 0.5 and 1.5
# StringCleaner
# Used to clean columns containing strings
# GroupAggregator
# Used to add aggregate statistics to a dataframe | 33.881481 | 104 | 0.594884 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 5 07:35:15 2018
@author: Ray Justin O. Huang
"""
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
import string
# PerColumnAttributesAdder
# Used to quickly add columns that are fractions of other columns
class PerColumnAttributesAdder(BaseEstimator, TransformerMixin):
def __init__(self, denominator_column, numerator_columns):
self.denominator_column = denominator_column
self.numerator_columns = numerator_columns
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
self.newcolumns = [_+"_per_"+self.denominator_column for _ in self.numerator_columns]
X_copy = X.copy()
for index, col in enumerate(self.numerator_columns):
X_copy[self.newcolumns[index]] = X_copy[col] / X_copy[self.denominator_column]
return X_copy
# StringCaseChanger
# Used to change the case of a column that contains strings
class StringCaseChanger(BaseEstimator, TransformerMixin):
def __init__(self, cols, case='upper'):
self.case = case
self.cols = cols
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
df = X.copy()
if self.case == 'upper':
for col in self.cols:
df[col] = df[col].str.upper()
elif self.case == 'lower':
for col in self.cols:
df[col] = df[col].str.lower()
elif self.case == 'title':
for col in self.cols:
df[col] = df[col].str.title()
return df
# Randomizer
# Used to randomize the number values in columns by multiplying with a random number between 0.5 and 1.5
class Randomizer(BaseEstimator, TransformerMixin):
def __init__(self, cols, added_cols=False, integers=False, random_state=47):
self.cols = cols
self.added_cols = added_cols
self.integers = integers
self.random_state = random_state
def fit(self, X, y=None):
self.rgen = np.random.RandomState(self.random_state)
self.rows = X[self.cols].shape[0]
self.columns = len(self.cols)
self.randomizercols = self.rgen.rand(self.rows, self.columns) + 0.5
return self
def transform(self, X, y=None):
df = X.copy()
if not self.added_cols:
if not self.integers:
df[self.cols] = df[self.cols]*self.randomizercols
return df
else:
df[self.cols] = np.rint(df[self.cols]*self.randomizercols)
return df
else:
if not self.integers:
self.newcol_names = ["randomized_"+_ for _ in self.cols]
df[self.newcol_names] = df[self.cols]*self.randomizercols
self.newcols = df[self.newcol_names]
# self.newcols = pd.DataFrame(self.newcols)
# df = pd.concat([df, df[self.newcols], axis=1)
return df
else:
self.newcol_names = ["randomized_"+_ for _ in self.cols]
df[self.newcol_names] = np.rint(df[self.cols]*self.randomizercols)
self.newcols = df[self.newcol_names]
return df
# StringCleaner
# Used to clean columns containing strings
class StringCleaner(BaseEstimator, TransformerMixin):
def __init__(self, cols, case='lower'):
self.cols = cols
self.case = case
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
df = X.copy()
df[self.cols] = df[self.cols].str.replace('[{}]'.format(string.punctuation), '')
df[self.cols] = df[self.cols].str.strip()
if self.case == 'upper':
df[self.cols] = df[self.cols].str.upper()
elif self.case == 'title':
df[self.cols] = df[self.cols].str.title()
else:
df[self.cols] = df[self.cols].str.lower()
return df
# GroupAggregator
# Used to add aggregate statistics to a dataframe
class GroupAggregator(BaseEstimator, TransformerMixin):
def __init__(self, groupby_col, operation=np.sum):
self.groupby_col = groupby_col
self.operation = operation
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
df = X.copy()
grouped = df.groupby(self.groupby_col)
return grouped.transform(self.operation) | 3,162 | 174 | 566 |
aa4a25de89605ee49512c8c34c6d4af4a44dfe52 | 153 | py | Python | abc/160/b/answer.py | TakuyaNoguchi/atcoder | d079402e6fe9c9aaf3a6fc9272331ee71fc497da | [
"MIT"
] | null | null | null | abc/160/b/answer.py | TakuyaNoguchi/atcoder | d079402e6fe9c9aaf3a6fc9272331ee71fc497da | [
"MIT"
] | null | null | null | abc/160/b/answer.py | TakuyaNoguchi/atcoder | d079402e6fe9c9aaf3a6fc9272331ee71fc497da | [
"MIT"
] | null | null | null | X = int(input())
five_hundread_yen_coin_num = X // 500
five_yen_coin_num = X % 500 // 5
print(five_hundread_yen_coin_num * 1000 + five_yen_coin_num * 5) | 30.6 | 64 | 0.745098 | X = int(input())
five_hundread_yen_coin_num = X // 500
five_yen_coin_num = X % 500 // 5
print(five_hundread_yen_coin_num * 1000 + five_yen_coin_num * 5) | 0 | 0 | 0 |
50cbee2c40c1d7f86fbe76a4cb3b4530f01131b3 | 1,967 | py | Python | language/views.py | hucandu/babbel | 97fd194e9cee3f527343b8f2479322ff79d38552 | [
"MIT"
] | null | null | null | language/views.py | hucandu/babbel | 97fd194e9cee3f527343b8f2479322ff79d38552 | [
"MIT"
] | null | null | null | language/views.py | hucandu/babbel | 97fd194e9cee3f527343b8f2479322ff79d38552 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .serializers import LanguageListSerializer, LanguageActionSerializer
from rest_framework import status
@api_view(['GET', 'POST', 'DELETE'])
@api_view(['PUT', 'PATCH', 'DELETE'])
| 43.711111 | 101 | 0.70666 | from django.shortcuts import render
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .serializers import LanguageListSerializer, LanguageActionSerializer
from rest_framework import status
@api_view(['GET', 'POST', 'DELETE'])
def language_list(request):
if request.method == "GET":
serializer = LanguageListSerializer()
languages = serializer.list()
return Response(LanguageListSerializer(languages, many=True).data, status=status.HTTP_200_OK)
if request.method == "POST":
serializer = LanguageListSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
if request.method == "DELETE":
serializer = LanguageListSerializer()
serializer.delete()
return Response({"success":True}, status=status.HTTP_200_OK)
@api_view(['PUT', 'PATCH', 'DELETE'])
def language_action(request, id):
if request.method == "PUT":
serializer = LanguageActionSerializer(data=request.data, context={"id":id})
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
if request.method == "PATCH":
serializer = LanguageActionSerializer(data=request.data, context={"id":id}, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
if request.method == "DELETE":
serializer = LanguageActionSerializer(data=request.data)
serializer.delete(id)
return Response({"success":True}, status=status.HTTP_200_OK)
| 1,609 | 0 | 44 |
94d9cbc12fc1cb7e7c2dc2e1315b11d5a8e431bd | 1,185 | py | Python | parlai/agents/programr/parser/template/nodes/formal.py | roholazandie/ParlAI | 32352cab81ecb666aefd596232c5ed9f33cbaeb9 | [
"MIT"
] | null | null | null | parlai/agents/programr/parser/template/nodes/formal.py | roholazandie/ParlAI | 32352cab81ecb666aefd596232c5ed9f33cbaeb9 | [
"MIT"
] | null | null | null | parlai/agents/programr/parser/template/nodes/formal.py | roholazandie/ParlAI | 32352cab81ecb666aefd596232c5ed9f33cbaeb9 | [
"MIT"
] | null | null | null | from parlai.agents.programr.parser.template.nodes.base import TemplateNode
# from parlai.agents.programr.utils.logging.ylogger import YLogger
import parlai.utils.logging as logging
#######################################################################################################
# <formal>ABC</formal>
| 30.384615 | 107 | 0.598312 | from parlai.agents.programr.parser.template.nodes.base import TemplateNode
# from parlai.agents.programr.utils.logging.ylogger import YLogger
import parlai.utils.logging as logging
class TemplateFormalNode(TemplateNode):
def __init__(self):
TemplateNode.__init__(self)
def resolve_to_string(self, brain):
resolved = self.resolve_children_to_string(brain)
return resolved.title()
def resolve(self, brain):
try:
return self.resolve_to_string(brain)
except Exception as excep:
# YLogger.exception(brain, "Failed to resolve", excep)
logging.error( f"Failed to resolve {excep}")
return ""
def to_string(self):
return "FORMAL"
def to_xml(self, client_context):
xml = "<formal>"
xml += self.children_to_xml(client_context)
xml += "</formal>"
return xml
#######################################################################################################
# <formal>ABC</formal>
def add_default_star(self):
return True
def parse_expression(self, graph, expression):
self._parse_node(graph, expression)
| 638 | 18 | 212 |
b18a7c41b051afd662346c840d7f7b3e16bbda88 | 3,844 | py | Python | home.py | prakhar085/LIbrary-Management-System | 1c2edb270f8094411a6e3e160148a8289dd1ae1a | [
"Apache-2.0"
] | null | null | null | home.py | prakhar085/LIbrary-Management-System | 1c2edb270f8094411a6e3e160148a8289dd1ae1a | [
"Apache-2.0"
] | null | null | null | home.py | prakhar085/LIbrary-Management-System | 1c2edb270f8094411a6e3e160148a8289dd1ae1a | [
"Apache-2.0"
] | null | null | null | import mysql.connector
import Admin
import lib_users
lib_database = mysql.connector.connect(
host="localhost",
user="root",
password="PRA085@dik", #Use your server password.
database='library_management_system',
)
##creating tables..
'''
my_cursor = lib_database.cursor()
# 1: Books (columns:- [author + title(author|title), quantity, price]
# 2: user (columns:- [username, first_name, last_name, phone_no, password]
# 3: users_who_borrowed_book: [username, authtit, date, price]
# 4: admin (columns:- [username, phone_no. password]
table_admin = "Create table admin(username varchar(255), phone_no varchar(20), password varchar(255))"
table_book = "Create table books1(authtit varchar(255), quantity int(255), price int(100))"
table_user = "Create table user(username varchar(255), first_name varchar(255), last_name varchar(255), phone_no varchar(20), password varchar(255))"
table_users_who_borrowed_book_3 = "Create table users_who_borrowed_book_3(username varchar(255), authtit varchar(255), date varchar(100), price int(100))"
my_cursor.execute(table_book)
my_cursor.execute(table_user)
my_cursor.execute(table_users_who_borrowed_book_3)
my_cursor.execute(table_admin)
'''
admin = Admin.Admin(lib_database)
users = lib_users.Library_users(lib_database)
while True:
k = int(input("Press 1 for admin section \n"
"Press 2 for customer section \n"
"Press 0 for exit: \n"))
if k == 0:
exit()
##proceeding in admin section
if k == 1:
kk = int(input("Press 1 for admin registration \n"
"Press 2 for adding books \n"
"Press 3 for customer registration \n"
"Press 4 for main menu \n"
"Press 0 for exit: \n"))
if kk == 0:
exit()
if kk == 2:
author = input("Enter author name: ")
title = input("Enter title name: ")
quantity = int(input("Enter quantity: "))
price = int(input("Enter the price for one day: "))
admin.add_books(author, title, quantity, price)
continue
elif kk == 1:
username = input("Enter username: ")
phone_no = input("Enter phone number: ")
password = input("Enter password: ")
admin.add_admin(username, phone_no, password)
continue
elif kk == 3:
username = input("Enter username: ")
first_name = input("Enter first name: ")
last_name = input("Enter last name: ")
phone_no = input("Enter mobile number: ")
password = input("Enter password: ")
admin.add_user(username, first_name, last_name, phone_no, password)
continue
elif kk == 4:
continue
else:
print("Not valid input")
continue
elif k == 2:
kk = int(input("Press 1 for user registration \n"
"Press 2 for borrowing books \n"
"Press 3 for returning book \n"
"Press 4 for main menu \n"
"Press 0 for exit: \n"))
if kk == 0:
exit()
if kk == 1:
username = input("Enter username: ")
first_name = input("Enter first name: ")
last_name = input("Enter last name: ")
phone_no = input("Enter mobile number: ")
password = input("Enter password: ")
users.add_user(username, first_name, last_name, phone_no, password)
continue
if kk == 2:
users.borrow()
continue
if kk == 3:
users.Return_book()
continue
elif kk == 4:
continue
else:
print("Not valid input ")
continue
| 38.828283 | 154 | 0.576743 | import mysql.connector
import Admin
import lib_users
lib_database = mysql.connector.connect(
host="localhost",
user="root",
password="PRA085@dik", #Use your server password.
database='library_management_system',
)
##creating tables..
'''
my_cursor = lib_database.cursor()
# 1: Books (columns:- [author + title(author|title), quantity, price]
# 2: user (columns:- [username, first_name, last_name, phone_no, password]
# 3: users_who_borrowed_book: [username, authtit, date, price]
# 4: admin (columns:- [username, phone_no. password]
table_admin = "Create table admin(username varchar(255), phone_no varchar(20), password varchar(255))"
table_book = "Create table books1(authtit varchar(255), quantity int(255), price int(100))"
table_user = "Create table user(username varchar(255), first_name varchar(255), last_name varchar(255), phone_no varchar(20), password varchar(255))"
table_users_who_borrowed_book_3 = "Create table users_who_borrowed_book_3(username varchar(255), authtit varchar(255), date varchar(100), price int(100))"
my_cursor.execute(table_book)
my_cursor.execute(table_user)
my_cursor.execute(table_users_who_borrowed_book_3)
my_cursor.execute(table_admin)
'''
admin = Admin.Admin(lib_database)
users = lib_users.Library_users(lib_database)
while True:
k = int(input("Press 1 for admin section \n"
"Press 2 for customer section \n"
"Press 0 for exit: \n"))
if k == 0:
exit()
##proceeding in admin section
if k == 1:
kk = int(input("Press 1 for admin registration \n"
"Press 2 for adding books \n"
"Press 3 for customer registration \n"
"Press 4 for main menu \n"
"Press 0 for exit: \n"))
if kk == 0:
exit()
if kk == 2:
author = input("Enter author name: ")
title = input("Enter title name: ")
quantity = int(input("Enter quantity: "))
price = int(input("Enter the price for one day: "))
admin.add_books(author, title, quantity, price)
continue
elif kk == 1:
username = input("Enter username: ")
phone_no = input("Enter phone number: ")
password = input("Enter password: ")
admin.add_admin(username, phone_no, password)
continue
elif kk == 3:
username = input("Enter username: ")
first_name = input("Enter first name: ")
last_name = input("Enter last name: ")
phone_no = input("Enter mobile number: ")
password = input("Enter password: ")
admin.add_user(username, first_name, last_name, phone_no, password)
continue
elif kk == 4:
continue
else:
print("Not valid input")
continue
elif k == 2:
kk = int(input("Press 1 for user registration \n"
"Press 2 for borrowing books \n"
"Press 3 for returning book \n"
"Press 4 for main menu \n"
"Press 0 for exit: \n"))
if kk == 0:
exit()
if kk == 1:
username = input("Enter username: ")
first_name = input("Enter first name: ")
last_name = input("Enter last name: ")
phone_no = input("Enter mobile number: ")
password = input("Enter password: ")
users.add_user(username, first_name, last_name, phone_no, password)
continue
if kk == 2:
users.borrow()
continue
if kk == 3:
users.Return_book()
continue
elif kk == 4:
continue
else:
print("Not valid input ")
continue
| 0 | 0 | 0 |
c8edb885003da13dff0ee59b0d0fda7a4bbb9e72 | 5,742 | py | Python | polybius/graphics/ui/menu.py | TStalnaker44/python_jeopardy | 7044d9512d51c04e22a6d2cbfa9e9a82a384de85 | [
"MIT"
] | null | null | null | polybius/graphics/ui/menu.py | TStalnaker44/python_jeopardy | 7044d9512d51c04e22a6d2cbfa9e9a82a384de85 | [
"MIT"
] | null | null | null | polybius/graphics/ui/menu.py | TStalnaker44/python_jeopardy | 7044d9512d51c04e22a6d2cbfa9e9a82a384de85 | [
"MIT"
] | null | null | null | """
Author: Trevor Stalnaker
File: menu.py
A general class for creating menus
Parameters:
pos - (x,y) position for the top-left corner of the menu
dims - (width, height) pixels of the menu
commands - list of dictionaries specifying the button attributes
padding - (horizontal, vertical) padding between border and buttons
spacing - space in pixels between buttons
color - rgb color of the menu background (None for transparent)
borderColor - rgb color value for border
borderWidth - pixel width for the border
font - Supplied as a pygame font
orientation - "vertical" | "horizontal"
"""
import pygame
from polybius.graphics.components import Button
from polybius.graphics.basics.drawable import Drawable
from polybius.graphics.utils.window import Window
| 36.341772 | 96 | 0.537966 | """
Author: Trevor Stalnaker
File: menu.py
A general class for creating menus
Parameters:
pos - (x,y) position for the top-left corner of the menu
dims - (width, height) pixels of the menu
commands - list of dictionaries specifying the button attributes
padding - (horizontal, vertical) padding between border and buttons
spacing - space in pixels between buttons
color - rgb color of the menu background (None for transparent)
borderColor - rgb color value for border
borderWidth - pixel width for the border
font - Supplied as a pygame font
orientation - "vertical" | "horizontal"
"""
import pygame
from polybius.graphics.components import Button
from polybius.graphics.basics.drawable import Drawable
from polybius.graphics.utils.window import Window
class Menu(Drawable, Window):
def __init__(self, pos, dims, commands, padding=0, spacing=0,
color=(80,80,80), borderColor=(0,0,0),
borderWidth=2, orientation="vertical"):
"""Initializes the menu"""
Drawable.__init__(self, "", pos, worldBound=False)
Window.__init__(self)
self._offset = (pos[0], pos[1])
self._width = dims[0]
self._height = dims[1]
h_padding = padding[0]
v_padding = padding[1]
self._borderColor = borderColor
self._borderWidth = borderWidth
self._backgroundColor = color
n = len(commands)
xStart = h_padding
yStart = v_padding
self._buttons = []
# Create buttons with a vertical configuration
if orientation == "vertical":
buttonWidth = self._width - (2*h_padding) - (2*borderWidth)
buttonHeight = (self._height - (2*v_padding) - \
((n-1)*spacing) - (2*borderWidth)) // n
for x, b in enumerate(commands):
font = pygame.font.SysFont(b["font"], b["fontSize"])
self._buttons.append((Button(b["text"],
(xStart + self._offset[0],
yStart + (x*buttonHeight) + \
(x*spacing) + self._offset[1]),
font, b["fontColor"], b["color"],
buttonHeight, buttonWidth, b["borderColor"],
b["borderWidth"]),
x+1, b["closeOnPress"], (b.get("toggleText",None),b["text"])))
# Create buttons with a horizontal configuration
elif orientation == "horizontal":
buttonWidth = (self._width - (2*h_padding) - \
((n-1)*spacing) - (2*borderWidth)) // n
buttonHeight = self._height - (2*v_padding) - (2*borderWidth)
for x, b in enumerate(commands):
font = pygame.font.SysFont(b["font"], b["fontSize"])
self._buttons.append((Button(b["text"],
(xStart + self._offset[0] +\
(x*buttonWidth) + (x*spacing),
yStart + self._offset[1]),
font, b["fontColor"], b["color"],
buttonHeight, buttonWidth, b["borderColor"],
b["borderWidth"]),
x+1, b["closeOnPress"], (b.get("toggleText",None),b["text"])))
self._selection = None
self.createDisplay()
def getButtonByText(self, text):
"""Return the button with the provided text"""
for button in self._buttons:
if button[0].getText() == text:
return button[0]
def getButtonByPosition(self, position):
"""Return the button at the given position in the menu"""
return self._buttons[position][0]
def handleEvent(self, event):
"""Handles events on the pause menu"""
for b in self._buttons:
b[0].handleEvent(event,self.select,(b,))
return self.getSelection()
def select(self, button):
"""Sets the current selection"""
b, selection, closeOnPress, toggleText = button
if closeOnPress:
self.close()
if toggleText[0] != None:
currentText = b._text
if toggleText[0] == currentText:
b.setText(toggleText[1])
else:
b.setText(toggleText[0])
self._selection = selection
def getSelection(self):
"""Returns the current selection and resets it to None"""
sel = self._selection
self._selection = None
return sel
def draw(self, screen):
"""Draws the menu on the screen"""
super().draw(screen)
# Draw buttons
for b in self._buttons:
b[0].draw(screen)
def createDisplay(self):
"""Create the display of the menu"""
# Draw the border
surfBack = pygame.Surface((self._width, self._height))
surfBack.fill(self._borderColor)
# Draw the background
surf = pygame.Surface((self._width - (self._borderWidth * 2),
self._height - (self._borderWidth * 2)))
# Apply the background color or make transparent
if self._backgroundColor == None:
surf.fill((1,1,1))
surfBack.set_colorkey((1,1,1))
else:
surf.fill(self._backgroundColor)
# Blit the widget layer onto the back surface
surfBack.blit(surf, (self._borderWidth, self._borderWidth))
self._image = surfBack
| 0 | 4,922 | 23 |
4a62c4e3bb93b287b5d15270a7fb934bb56b2711 | 1,938 | py | Python | onmt/bin/release_model.py | IliasPap/transformer-slt | 12913f1b15465509696bd2907c74c54653669f8d | [
"Apache-2.0"
] | 93 | 2020-04-03T01:00:15.000Z | 2022-03-16T09:33:49.000Z | onmt/bin/release_model.py | IliasPap/transformer-slt | 12913f1b15465509696bd2907c74c54653669f8d | [
"Apache-2.0"
] | 10 | 2020-04-21T13:43:23.000Z | 2022-02-02T16:11:25.000Z | onmt/bin/release_model.py | IliasPap/transformer-slt | 12913f1b15465509696bd2907c74c54653669f8d | [
"Apache-2.0"
] | 15 | 2020-04-03T13:41:28.000Z | 2022-03-28T11:01:16.000Z | #!/usr/bin/env python
import argparse
import torch
def get_ctranslate2_model_spec(opt):
"""Creates a CTranslate2 model specification from the model options."""
is_vanilla_transformer = (
opt.encoder_type == "transformer"
and opt.decoder_type == "transformer"
and opt.position_encoding
and opt.enc_layers == opt.dec_layers
and getattr(opt, "self_attn_type", "scaled-dot") == "scaled-dot"
and getattr(opt, "max_relative_positions", 0) == 0)
if not is_vanilla_transformer:
return None
import ctranslate2
num_heads = getattr(opt, "heads", 8)
return ctranslate2.specs.TransformerSpec(opt.layers, num_heads)
if __name__ == "__main__":
main()
| 37.269231 | 78 | 0.617647 | #!/usr/bin/env python
import argparse
import torch
def get_ctranslate2_model_spec(opt):
"""Creates a CTranslate2 model specification from the model options."""
is_vanilla_transformer = (
opt.encoder_type == "transformer"
and opt.decoder_type == "transformer"
and opt.position_encoding
and opt.enc_layers == opt.dec_layers
and getattr(opt, "self_attn_type", "scaled-dot") == "scaled-dot"
and getattr(opt, "max_relative_positions", 0) == 0)
if not is_vanilla_transformer:
return None
import ctranslate2
num_heads = getattr(opt, "heads", 8)
return ctranslate2.specs.TransformerSpec(opt.layers, num_heads)
def main():
parser = argparse.ArgumentParser(
description="Release an OpenNMT-py model for inference")
parser.add_argument("--model", "-m",
help="The model path", required=True)
parser.add_argument("--output", "-o",
help="The output path", required=True)
parser.add_argument("--format",
choices=["pytorch", "ctranslate2"],
default="pytorch",
help="The format of the released model")
opt = parser.parse_args()
model = torch.load(opt.model)
if opt.format == "pytorch":
model["optim"] = None
torch.save(model, opt.output)
elif opt.format == "ctranslate2":
model_spec = get_ctranslate2_model_spec(model["opt"])
if model_spec is None:
raise ValueError("This model is not supported by CTranslate2. Go "
"to https://github.com/OpenNMT/CTranslate2 for "
"more information on supported models.")
import ctranslate2
converter = ctranslate2.converters.OpenNMTPyConverter(opt.model)
converter.convert(opt.output, model_spec, force=True)
if __name__ == "__main__":
main()
| 1,190 | 0 | 23 |
7eb4dbd3592a4b7a3e08979f7047983b17d9afc1 | 1,424 | py | Python | gans/models/generator.py | kpandey008/comparing-gans | 004134663d5332232a86c0276821ee784eb9e189 | [
"MIT"
] | 2 | 2021-08-25T07:40:37.000Z | 2022-01-09T11:17:50.000Z | gans/models/generator.py | kpandey008/comparing-gans | 004134663d5332232a86c0276821ee784eb9e189 | [
"MIT"
] | null | null | null | gans/models/generator.py | kpandey008/comparing-gans | 004134663d5332232a86c0276821ee784eb9e189 | [
"MIT"
] | 1 | 2021-08-25T07:40:40.000Z | 2021-08-25T07:40:40.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
| 37.473684 | 87 | 0.662219 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Generator(nn.Module):
def __init__(self, code_size):
super(Generator, self).__init__()
self.code_size = code_size
# Define the model here
self.deconv_1 = nn.ConvTranspose2d(code_size, 256, 4, stride=2, bias=False)
torch.nn.init.normal_(self.deconv_1.weight, std=0.02)
self.bn_1 = nn.BatchNorm2d(256)
self.deconv_2 = nn.ConvTranspose2d(256, 128, 4, bias=False)
torch.nn.init.normal_(self.deconv_2.weight, std=0.02)
self.bn_2 = nn.BatchNorm2d(128)
self.deconv_3 = nn.ConvTranspose2d(128, 64, 4, padding=1, stride=2, bias=False)
torch.nn.init.normal_(self.deconv_3.weight, std=0.02)
self.bn_3 = nn.BatchNorm2d(64)
self.deconv_4 = nn.ConvTranspose2d(64, 1, 4, padding=1, stride=2, bias=False)
torch.nn.init.normal_(self.deconv_4.weight, std=0.02)
def forward(self, z_batch):
# Convert the input noise vector into a (10 x 10) tensor
z_batch = torch.reshape(z_batch, (-1, self.code_size, 1, 1))
deconv_1_out = F.relu(self.bn_1(self.deconv_1(z_batch)))
deconv_2_out = F.relu(self.bn_2(self.deconv_2(deconv_1_out)))
deconv_3_out = F.relu(self.deconv_3(deconv_2_out))
deconv_4_out = F.relu(self.deconv_4(deconv_3_out))
output = torch.tanh(deconv_4_out)
return output
| 1,273 | 6 | 77 |
34a4f4aab0f2e6c5ffda9f05dfd2b103d20b3c61 | 692 | py | Python | liuqi/20180403/h2.py | python20180319howmework/homework | c826d7aa4c52f8d22f739feb134d20f0b2c217cd | [
"Apache-2.0"
] | null | null | null | liuqi/20180403/h2.py | python20180319howmework/homework | c826d7aa4c52f8d22f739feb134d20f0b2c217cd | [
"Apache-2.0"
] | null | null | null | liuqi/20180403/h2.py | python20180319howmework/homework | c826d7aa4c52f8d22f739feb134d20f0b2c217cd | [
"Apache-2.0"
] | null | null | null | """
2.定义一个北京欢乐谷门票类,应用你所定义的类,
计算两个社会青年和一个学生平日比节假日门票能省多少钱
票价是:
除节假日票价100元/天
节假日为平日的1.2倍
学生半价
"""
societyman = False
student = True
m = Ticket(societyman)
m1 = m.myprice()
s = Ticket(student)
s1 = s.myprice()
print("欢乐谷一个学生平日比节假日门票能节省{}元".format(s1))
print("欢乐谷一个社会青年平日比节假日门票能节省{}元".format(m1))
| 17.74359 | 43 | 0.693642 | """
2.定义一个北京欢乐谷门票类,应用你所定义的类,
计算两个社会青年和一个学生平日比节假日门票能省多少钱
票价是:
除节假日票价100元/天
节假日为平日的1.2倍
学生半价
"""
class Ticket(object):
def __init__(self,species):
self.__species = species
def myprice(self):
if self.__species == True:
vacationprice = 1.2*100*0.5
usualprice = 100*0.5
sprice = vacationprice - usualprice
return sprice
if self.__species == False:
vacationprice = 1.2*100*2
usualprice = 100*2
mprice = (vacationprice - usualprice)
return mprice
societyman = False
student = True
m = Ticket(societyman)
m1 = m.myprice()
s = Ticket(student)
s1 = s.myprice()
print("欢乐谷一个学生平日比节假日门票能节省{}元".format(s1))
print("欢乐谷一个社会青年平日比节假日门票能节省{}元".format(m1))
| 314 | 0 | 71 |
75d3ffb5dc747b1799efae4d880118021f21fbf3 | 2,409 | py | Python | tests/test_pthat.py | drizztguen77/PTHat | f46d05054875599e80b396f74bc5a348cfcefbfb | [
"Apache-2.0"
] | 5 | 2021-01-28T13:26:08.000Z | 2022-02-24T08:15:44.000Z | tests/test_pthat.py | drizztguen77/PTHat | f46d05054875599e80b396f74bc5a348cfcefbfb | [
"Apache-2.0"
] | null | null | null | tests/test_pthat.py | drizztguen77/PTHat | f46d05054875599e80b396f74bc5a348cfcefbfb | [
"Apache-2.0"
] | null | null | null | import unittest
from pthat.pthat import PTHat
if __name__ == '__main__':
unittest.main()
| 35.426471 | 103 | 0.724367 | import unittest
from pthat.pthat import PTHat
class TestPthat(unittest.TestCase):
def setUp(self):
self.pthat = PTHat(test_mode=True)
self.pthat.command_type = "I"
self.pthat.command_id = 0
self.pthat.wait_delay = 0
self.pthat.debug = True
def test_get_io_port_status(self):
self.assertEqual("I00LI*", self.pthat.get_io_port_status())
def test_set_wait_delay_milliseconds(self):
self.assertEqual("I00WW1000*", self.pthat.set_wait_delay(period="W", delay=1000))
def test_set_wait_delay_microseconds(self):
self.assertEqual("I00WM1000*", self.pthat.set_wait_delay(period="M", delay=1000))
def test_toggle_motor_enable_line(self):
self.assertEqual("I00HT*", self.pthat.toggle_motor_enable_line())
def test_received_command_replies_on(self):
self.assertEqual("I00R1*", self.pthat.received_command_replies_on())
def test_received_command_replies_off(self):
self.assertEqual("I00R0*", self.pthat.received_command_replies_off())
def test_completed_command_replies_on(self):
self.assertEqual("I00G1*", self.pthat.completed_command_replies_on())
def test_completed_command_replies_off(self):
self.assertEqual("I00G0*", self.pthat.completed_command_replies_off())
def test_get_firmware_version(self):
self.assertEqual("I00FW*", self.pthat.get_firmware_version())
def test_initiate_buffer(self):
self.assertEqual("H0000*", self.pthat.initiate_buffer())
def test_start_buffer(self):
self.assertEqual("Z0000*", self.pthat.start_buffer())
def test_start_buffer_loop(self):
self.assertEqual("W0000*", self.pthat.start_buffer_loop())
def test_reset(self):
self.assertEqual("N*", self.pthat.reset())
def test_rpm_to_frequency(self):
self.assertEqual(self.pthat.rpm_to_frequency(rpm=800, steps_per_rev=200, round_digits=0), 2667)
def test_frequency_to_rpm(self):
self.assertEqual(self.pthat.frequency_to_rpm(frequency=2667, steps_per_rev=200), 800)
def test_calculate_pulse_count(self):
self.assertEqual(self.pthat.calculate_pulse_count(steps_per_rev=200, total_revs=50), 10000)
def test_calculate_revolutions(self):
self.assertEqual(self.pthat.calculate_revolutions(steps_per_rev=200, pulse_count=10000), 50)
if __name__ == '__main__':
unittest.main()
| 1,790 | 14 | 509 |
b3c93ff6e21fe264e7a1338ac57af24ddb8a9abc | 7,696 | py | Python | supershowdeport/supershowdeport.py | Car202ui/pythonproyect | 51a5d341be5fa300a041ba0d4e8060584750507a | [
"MIT"
] | null | null | null | supershowdeport/supershowdeport.py | Car202ui/pythonproyect | 51a5d341be5fa300a041ba0d4e8060584750507a | [
"MIT"
] | null | null | null | supershowdeport/supershowdeport.py | Car202ui/pythonproyect | 51a5d341be5fa300a041ba0d4e8060584750507a | [
"MIT"
] | null | null | null | import csv
while True:
print()
print('1) Guardar nuevo producto')
print('2) Consultar Productos')
print('3) Stock Productos')
print('4) Consultar Proveedores')
print('5) Compra a Proveedores')
print('6) Venta al cliente')
print('7) Consultar Transacciones')
print('8) Salir')
opc=input('Elija una opción: ')
if opc=='1':
with open('Productos.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=';')
idproduct=1
val = 1
for line in csv_reader:
idproduct=idproduct+1
arch=open('Productos.csv', 'a')
nom=input('Ingresa el nombre del producto: ')
while val == 1:
idprov=input('Que código de proveedor va a distribuir el producto: ')
try:
idprov = int(idprov)
val = 0
except ValueError:
print ('Escriba un código de proveedor correcto porfavor')
val = 1
while val == 1:
precio=input('Que precio unitario tiene el producto: ')
try:
precio = int(precio)
val = 0
except ValueError:
print ('Escriba un precio correcto porfavor')
linea='\n'+str(idproduct)+';'+str(nom)+';'+str(precio)
arch.write(linea)
arch.close()
arch=open('Stock.csv', 'a')
linea='\n'+str(idproduct)+';'+str(nom)+';'+str(idprov)+';'+'0'
arch.write(linea)
arch.close()
elif opc=='2':
with open('Productos.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=';')
print('idproducto;Nombre;Precio')
for line in csv_reader:
print(line)
elif opc=='3':
with open('Stock.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=';')
print('idproducto;Nombre;idproveedor;Unidades Disponibles')
for line in csv_reader:
print(line)
elif opc=='4':
with open('Proveedores.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=';')
print('idproveedor;NIT;Nombre Proveedor')
for line in csv_reader:
print(line)
elif opc=='5':
mat = list();
val = 1
while val == 1:
cod=input('Ingresa el código de producto a comprar: ')
try:
cod = int(cod)
val = 0
except ValueError:
print ('Escriba un código de producto correcto porfavor')
val = 1
while val == 1:
idp=input('Ingresa el código de proveedor a quien le va a comprar: ')
try:
idp = int(idp)
val = 0
except ValueError:
print ('Escriba un código de proveedor correcto porfavor')
val = 1
while val == 1:
cant=input('Ingresa la cantidad de producto a comprar: ')
try:
cant = int(cant)
val = 0
except ValueError:
print ('Escriba una cantidad correcta porfavor')
with open('Stock.csv', "r+") as csv_file:
csv_reader = csv.reader(csv_file)
for row in csv_reader:
delim = row[0].split(';')
if int(delim[0]) == cod and int(delim[2]) == idp :
(delim[3]) = int(delim[3]) + cant
mat.append(delim)
with open('Stock.csv', "w", newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=';')
writer.writerows(mat)
elif opc=='6':
mat = list();
matdos = list();
fac = 1
val = 1
while val == 1:
cod=input('Ingresa el código de producto a comprar: ')
try:
cod = int(cod)
val = 0
except ValueError:
print ('Escriba un código de producto correcto porfavor')
val = 1
while val == 1:
cant=input('Ingresa la cantidad de producto a comprar: ')
try:
cant = int(cant)
val = 0
except ValueError:
print ('Escriba una cantidad correcta porfavor')
exist = 0
with open('Stock.csv', "r+") as csv_file:
csv_reader = csv.reader(csv_file)
for row in csv_reader:
delim = row[0].split(';')
if int(delim[0]) == cod :
if cant <= int(delim[3]) :
(delim[3]) = int(delim[3]) - cant
transprov = (delim[2])
exist = 1
else:
exist = 2
mat.append(delim)
if exist == 1:
print('Venta exitosa')
with open('Stock.csv', "w", newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=';')
writer.writerows(mat)
with open('Productos.csv', "r+") as csv_file:
csv_reader = csv.reader(csv_file)
for row in csv_reader:
delim = row[0].split(';')
if int(delim[0]) == cod :
transprod = (delim[1])
transventa = cant * int(delim[2])
with open('Proveedores.csv', "r+") as csv_file:
csv_reader = csv.reader(csv_file)
for row in csv_reader:
delim = row[0].split(';')
if int(delim[0]) == int(transprov) :
transnit = (delim[1])
transnom = (delim[2])
with open('Transacciones.csv', "r+") as csv_file:
csv_reader = csv.reader(csv_file)
idconsecutivo=1
for row in csv_reader:
delim = row[0].split(';')
idconsecutivo=idconsecutivo+1
delim[0]= str(idconsecutivo)
delim[1]= str(cod)
delim[2]= str(transprod)
delim[3]= str(transprov)
delim[4]= str(transnit)
delim[5]= str(transnom)
delim[6]= str(cant)
delim[7]= str(transventa)
matdos.append(delim)
with open('Transacciones.csv', "a", newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=';')
writer.writerows(matdos)
elif exist == 2:
print('Venta no exitosa. No disponemos la cantidad de producto solicitado')
else:
print('Código de producto no existe.')
elif opc=='7':
with open('Transacciones.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=';')
print('idtransacción;idproducto;Nombre Producto;idproveedor;NIT;Proveedor;Unidades Vendidas;Valor a Pagar')
for line in csv_reader:
print(line)
elif opc=='8':
print('Adios')
break | 34.053097 | 120 | 0.449974 | import csv
while True:
print()
print('1) Guardar nuevo producto')
print('2) Consultar Productos')
print('3) Stock Productos')
print('4) Consultar Proveedores')
print('5) Compra a Proveedores')
print('6) Venta al cliente')
print('7) Consultar Transacciones')
print('8) Salir')
opc=input('Elija una opción: ')
if opc=='1':
with open('Productos.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=';')
idproduct=1
val = 1
for line in csv_reader:
idproduct=idproduct+1
arch=open('Productos.csv', 'a')
nom=input('Ingresa el nombre del producto: ')
while val == 1:
idprov=input('Que código de proveedor va a distribuir el producto: ')
try:
idprov = int(idprov)
val = 0
except ValueError:
print ('Escriba un código de proveedor correcto porfavor')
val = 1
while val == 1:
precio=input('Que precio unitario tiene el producto: ')
try:
precio = int(precio)
val = 0
except ValueError:
print ('Escriba un precio correcto porfavor')
linea='\n'+str(idproduct)+';'+str(nom)+';'+str(precio)
arch.write(linea)
arch.close()
arch=open('Stock.csv', 'a')
linea='\n'+str(idproduct)+';'+str(nom)+';'+str(idprov)+';'+'0'
arch.write(linea)
arch.close()
elif opc=='2':
with open('Productos.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=';')
print('idproducto;Nombre;Precio')
for line in csv_reader:
print(line)
elif opc=='3':
with open('Stock.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=';')
print('idproducto;Nombre;idproveedor;Unidades Disponibles')
for line in csv_reader:
print(line)
elif opc=='4':
with open('Proveedores.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=';')
print('idproveedor;NIT;Nombre Proveedor')
for line in csv_reader:
print(line)
elif opc=='5':
mat = list();
val = 1
while val == 1:
cod=input('Ingresa el código de producto a comprar: ')
try:
cod = int(cod)
val = 0
except ValueError:
print ('Escriba un código de producto correcto porfavor')
val = 1
while val == 1:
idp=input('Ingresa el código de proveedor a quien le va a comprar: ')
try:
idp = int(idp)
val = 0
except ValueError:
print ('Escriba un código de proveedor correcto porfavor')
val = 1
while val == 1:
cant=input('Ingresa la cantidad de producto a comprar: ')
try:
cant = int(cant)
val = 0
except ValueError:
print ('Escriba una cantidad correcta porfavor')
with open('Stock.csv', "r+") as csv_file:
csv_reader = csv.reader(csv_file)
for row in csv_reader:
delim = row[0].split(';')
if int(delim[0]) == cod and int(delim[2]) == idp :
(delim[3]) = int(delim[3]) + cant
mat.append(delim)
with open('Stock.csv', "w", newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=';')
writer.writerows(mat)
elif opc=='6':
mat = list();
matdos = list();
fac = 1
val = 1
while val == 1:
cod=input('Ingresa el código de producto a comprar: ')
try:
cod = int(cod)
val = 0
except ValueError:
print ('Escriba un código de producto correcto porfavor')
val = 1
while val == 1:
cant=input('Ingresa la cantidad de producto a comprar: ')
try:
cant = int(cant)
val = 0
except ValueError:
print ('Escriba una cantidad correcta porfavor')
exist = 0
with open('Stock.csv', "r+") as csv_file:
csv_reader = csv.reader(csv_file)
for row in csv_reader:
delim = row[0].split(';')
if int(delim[0]) == cod :
if cant <= int(delim[3]) :
(delim[3]) = int(delim[3]) - cant
transprov = (delim[2])
exist = 1
else:
exist = 2
mat.append(delim)
if exist == 1:
print('Venta exitosa')
with open('Stock.csv', "w", newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=';')
writer.writerows(mat)
with open('Productos.csv', "r+") as csv_file:
csv_reader = csv.reader(csv_file)
for row in csv_reader:
delim = row[0].split(';')
if int(delim[0]) == cod :
transprod = (delim[1])
transventa = cant * int(delim[2])
with open('Proveedores.csv', "r+") as csv_file:
csv_reader = csv.reader(csv_file)
for row in csv_reader:
delim = row[0].split(';')
if int(delim[0]) == int(transprov) :
transnit = (delim[1])
transnom = (delim[2])
with open('Transacciones.csv', "r+") as csv_file:
csv_reader = csv.reader(csv_file)
idconsecutivo=1
for row in csv_reader:
delim = row[0].split(';')
idconsecutivo=idconsecutivo+1
delim[0]= str(idconsecutivo)
delim[1]= str(cod)
delim[2]= str(transprod)
delim[3]= str(transprov)
delim[4]= str(transnit)
delim[5]= str(transnom)
delim[6]= str(cant)
delim[7]= str(transventa)
matdos.append(delim)
with open('Transacciones.csv', "a", newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=';')
writer.writerows(matdos)
elif exist == 2:
print('Venta no exitosa. No disponemos la cantidad de producto solicitado')
else:
print('Código de producto no existe.')
elif opc=='7':
with open('Transacciones.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=';')
print('idtransacción;idproducto;Nombre Producto;idproveedor;NIT;Proveedor;Unidades Vendidas;Valor a Pagar')
for line in csv_reader:
print(line)
elif opc=='8':
print('Adios')
break | 0 | 0 | 0 |
87e979e245b05aa8b4804fadce8a7fea6c5d9342 | 2,497 | py | Python | momos/components/mode.py | codetent/project_momos | 424a46f901941ae0b0641a0c407a1e38ee121e72 | [
"MIT"
] | 2 | 2021-05-12T09:36:08.000Z | 2022-01-10T16:55:52.000Z | momos/components/mode.py | codetent/project_momos | 424a46f901941ae0b0641a0c407a1e38ee121e72 | [
"MIT"
] | null | null | null | momos/components/mode.py | codetent/project_momos | 424a46f901941ae0b0641a0c407a1e38ee121e72 | [
"MIT"
] | null | null | null | from __future__ import annotations
import inspect
from functools import partial
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any, Callable, List
class UnboundFailureMode:
"""Failure mode of a trigger not bound to a trigger instance.
"""
@property
@property
def description(self) -> str:
"""Get failure mode description.
"""
return inspect.getdoc(self.generator).strip()
def _bind(self, instance: Any) -> FailureMode:
"""Bind failure mode to trigger instance.
"""
return FailureMode(generator=self.generator, requires=self.requires, fails=self._fails, instance=instance)
class FailureMode(UnboundFailureMode):
"""Instance-bound failure mode.
"""
@property
def arguments(self) -> List[Any]:
"""Get arguments returned by failure mode.
"""
return self.generator(self.instance) or []
@property
def possible(self) -> bool:
"""Check if failure mode is possible for given settings.
"""
if not callable(self.requires):
return True
return self.requires(self.instance)
@property
class FailureModeResolver:
"""Mixin that resolves specified failure modes.
"""
def failure_mode(wrapped: Callable = None, *args, **kwargs) -> UnboundFailureMode:
"""Decorator for creating failure modes inside trigger classes.
"""
if wrapped is None:
return partial(failure_mode, *args, **kwargs)
return UnboundFailureMode(wrapped, *args, **kwargs)
| 29.034884 | 114 | 0.637966 | from __future__ import annotations
import inspect
from functools import partial
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any, Callable, List
class UnboundFailureMode:
"""Failure mode of a trigger not bound to a trigger instance.
"""
def __init__(self, generator: Callable, requires: Callable = None, fails: bool = True) -> None:
self.generator = generator
self.requires = requires
self._fails = fails
@property
def id(self) -> str:
return self.generator.__name__
@property
def description(self) -> str:
"""Get failure mode description.
"""
return inspect.getdoc(self.generator).strip()
def _bind(self, instance: Any) -> FailureMode:
"""Bind failure mode to trigger instance.
"""
return FailureMode(generator=self.generator, requires=self.requires, fails=self._fails, instance=instance)
class FailureMode(UnboundFailureMode):
"""Instance-bound failure mode.
"""
def __init__(self, *args, instance: Any = None, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.instance = instance
@property
def arguments(self) -> List[Any]:
"""Get arguments returned by failure mode.
"""
return self.generator(self.instance) or []
@property
def possible(self) -> bool:
"""Check if failure mode is possible for given settings.
"""
if not callable(self.requires):
return True
return self.requires(self.instance)
@property
def fails(self) -> bool:
if not callable(self._fails):
return self._fails
return self._fails(self.instance)
class FailureModeResolver:
"""Mixin that resolves specified failure modes.
"""
def __new__(cls, *args, **kwargs):
instance = super().__new__(cls)
instance._failure_modes = {}
for key, value in cls.__dict__.items():
if isinstance(value, UnboundFailureMode):
bound = value._bind(instance)
setattr(instance, key, bound)
instance._failure_modes[key] = bound
return instance
def failure_mode(wrapped: Callable = None, *args, **kwargs) -> UnboundFailureMode:
"""Decorator for creating failure modes inside trigger classes.
"""
if wrapped is None:
return partial(failure_mode, *args, **kwargs)
return UnboundFailureMode(wrapped, *args, **kwargs)
| 805 | 0 | 130 |
2ac61b8cc44ad9d45868e15ea53cc32bd303137b | 521 | py | Python | dictionaries.py | Meesalasai/python- | 344426a66e414212c9f11b6a5e0db80ea6bb2488 | [
"MIT"
] | 1 | 2020-05-22T08:41:45.000Z | 2020-05-22T08:41:45.000Z | dictionaries.py | Meesalasai/python- | 344426a66e414212c9f11b6a5e0db80ea6bb2488 | [
"MIT"
] | null | null | null | dictionaries.py | Meesalasai/python- | 344426a66e414212c9f11b6a5e0db80ea6bb2488 | [
"MIT"
] | null | null | null | mystuff = {"key1":"value1", "key2":"value2"}
print(mystuff['key1'])
mystuff2 = {"key1":123, "key2":"value2", "key3":{'123':[1,2,3]}}
print(mystuff2)
mystuff3 = {"key1":123, "key2":"value2", "key3":{'123':[1,2, 'grabMe']}}
print(mystuff3 ['key3']['123'][2])
mystuff4 = {"key1":123, "key2":"value2", "key3":{'123':[1,2, 'grabMe']}}
print(mystuff4 ['key3']['123'][2].upper())
mystuff5 = {"lunch":"pizza", "bfast":"eggs"}
mystuff5['lunch'] = 'burger'
mystuff5['dinner'] = 'pasta'
print(mystuff5['lunch'])
print(mystuff5)
| 28.944444 | 72 | 0.602687 | mystuff = {"key1":"value1", "key2":"value2"}
print(mystuff['key1'])
mystuff2 = {"key1":123, "key2":"value2", "key3":{'123':[1,2,3]}}
print(mystuff2)
mystuff3 = {"key1":123, "key2":"value2", "key3":{'123':[1,2, 'grabMe']}}
print(mystuff3 ['key3']['123'][2])
mystuff4 = {"key1":123, "key2":"value2", "key3":{'123':[1,2, 'grabMe']}}
print(mystuff4 ['key3']['123'][2].upper())
mystuff5 = {"lunch":"pizza", "bfast":"eggs"}
mystuff5['lunch'] = 'burger'
mystuff5['dinner'] = 'pasta'
print(mystuff5['lunch'])
print(mystuff5)
| 0 | 0 | 0 |