seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
72515571067
|
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.chains import RetrievalQAWithSourcesChain, RetrievalQA
from langchain import OpenAI
from langchain.vectorstores import Chroma
import gradio as gr
from langchain.chains.question_answering import load_qa_chain
qa_chain = load_qa_chain(OpenAI(temperature=0.7), chain_type="map_rerank")
import json
# create embedding from jsonloader
path = "./source/blog.json"
# load json and get lines and slug
pages = []
slugs = []
def load_json(path=path):
with open(path, "r") as f:
raw = json.load(f)
# get lines and slug
for i, j in enumerate(raw["pages"]):
j = json.loads(j)
# flat to lines
line = " ".join(j["lines"])
if line == "":
continue
pages.append(line)
s = {"source": j["slug"]}
slugs.append(s)
splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=0)
docs = splitter.create_documents(texts=pages, metadatas=slugs)
return docs
def split_documents(docs):
"""documents を tokenize する
Args:
docs (_type_): _description_
Returns:
_type_: _description_
"""
# split chauncs
splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
return splitter.split_documents(docs)
def get_embeddings_for_search(texts):
embeddings = OpenAIEmbeddings()
docsearch = Chroma.from_documents(texts, embeddings=embeddings,
#metadatas=[{"source": meta[i]} for i in range(len(meta))]
)
return docsearch
class Chain():
def __init__(self, doc):
self.doc = doc
self.chain = RetrievalQA(combine_documents_chain=qa_chain, retriever=self.doc.as_retriever(), return_source_documents=True)
def get(self, query):
#res = self.chain.run(question=query)
res = self.chain({"query": query})
sources = []
for r in res["source_documents"]:
sources.append(f'https://uni-3.app/{r.metadata["source"]}')
return res["result"], "\n".join(sources)
def main():
# get docsearch
docs = load_json()
# get embeddings
#print("docs", docs)
docsearch = get_embeddings_for_search(docs)
# init chain
c = Chain(docsearch)
# launch gradio app
with gr.Blocks() as demo:
# input query then get_query
input_query = gr.Textbox(label="Query")
# show query result
result = gr.Textbox(label="Result")
source = gr.Markdown(label="source")
b = gr.Button("run")
b.click(c.get, inputs=[input_query], outputs=[result, source])
demo.launch()
if __name__ == '__main__':
main()
|
uni-3/gradio-apps
|
qa_retrieval/app.py
|
app.py
|
py
| 2,753
|
python
|
en
|
code
| 0
|
github-code
|
6
|
71754427069
|
import paramiko
import paramiko.client
client = paramiko.client.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(
paramiko.AutoAddPolicy()
)
client.connect(
"localhost",
username="developer",
password="4linux"
)
(stdin, stdout, stderr) = client.exec_command("ls")
status = stdout.channel.recv_exit_status()
if status == 0:
stdout.flush()
print(stdout.read().decode())
else:
stderr.flush()
print(stderr.read().decode())
client.close()
|
elderlima/4linux
|
ssh/main.py
|
main.py
|
py
| 500
|
python
|
en
|
code
| 0
|
github-code
|
6
|
4487177002
|
"""
Lab 7.4 Document Exceptons
"""
class AbstractException(Exception):
""" Basic class for class exceptions"""
def __init__(self, message):
super().__init__(message)
self.message = message
class WrongInput(AbstractException):
""" Invalid input """
class CursorRangeError(AbstractException):
""" Cursor out of range """
class CallHomeError(AbstractException):
""" Unable to call home method """
class CallEndError(AbstractException):
""" Unable to call end method """
class FileWrongName(AbstractException):
""" Unable to create file """
class Document:
""" Class with information methods to work with the document """
def __init__(self) -> None:
"""
Init class values with empty row and cursor
"""
self.characters = []
self.cursor = Cursor(self)
self.filename = 'example.txt'
def insert(self, character:str) -> None:
"""
Insert character on current cursor position
"""
if not hasattr(character, "character"):
character = Character(character)
self.characters.insert(self.cursor.position, character)
self.cursor.forward()
def delete(self):
"""
Delete character on cursor
"""
if len(self.characters) == 0:
raise CursorRangeError("There are no character to delete")
del self.characters[self.cursor.position]
def save(self) -> None:
"""
Write document information into the file
"""
if self.filename == '':
raise FileWrongName("Invalid file name")
with open(self.filename, "w", encoding='utf-8') as file:
file.write("".join((str(c) for c in self.characters)))
@property
def string(self) -> str:
"""
String. Return the document's test
"""
return "".join((str(c) for c in self.characters))
class Cursor:
"""
Cursor class
"""
def __init__(self, document:Document) -> None:
"""
Init cursor values with default position as 0
"""
self.document = document
self.position = 0
def forward(self) -> None:
"""
Move forward on 1 character.
Raise: CursorRangeError if it is last position
"""
if self.position == len(self.document.characters):
raise CursorRangeError( "It is the last position, you can't move forward")
self.position += 1
def back(self) -> None:
"""
Move back on 1 character.
Raise: CursorRangeError if it is the first position
"""
if self.position == 0:
raise CursorRangeError("It is in the first position, you can't move back")
self.position -= 1
def home(self):
"""
Change cursor position to the first one in a row
"""
if len(self.document.characters) == 0:
raise CallHomeError( "Unable to use method on empty document")
while self.position and self.document.characters[self.position - 1].character != "\n":
self.position -= 1
def end(self):
"""
Change cursor position to the last one in a row
"""
if len(self.document.characters) == 0:
raise CallEndError( "Unable to use method on empty document")
while (self.position < len(self.document.characters))\
and (self.document.characters[self.position].character != "\n"):
self.position += 1
class Character:
"""
One character class provides different styles
"""
def __init__(self, character, bold=False, italic=False, underline=False):
"""
Init character values without additional text style as default.
"""
if len(character) != 1:
raise WrongInput("Invlid input character")
self.character = character
self.bold = bold
self.italic = italic
self.underline = underline
def __str__(self):
"""
String representation of the character
"""
bold = "*" if self.bold else ""
italic = "/" if self.italic else ""
underline = "_" if self.underline else ""
return bold + italic + underline + self.character
|
sviat-l/FP_Labs
|
7_Lab_Exceptions_Tests/4_Document/main.py
|
main.py
|
py
| 4,283
|
python
|
en
|
code
| 0
|
github-code
|
6
|
20678034139
|
from omegaconf import DictConfig
import hydra
from VisualClassificationRunner import VisualClassificationRunner
class CueConflictRunner(VisualClassificationRunner):
def test(self) -> dict:
"""Test the model on the standard dataset and the randomized datasets.
:return: Dictionary with randomized test added
:rtype: dict
"""
self.module.testset.set_shape_or_texture('shape')
shape_test_log = super().test()
self.module.testset.set_shape_or_texture('texture')
texture_test_log = super().test()
return {'shape_bias': 100.0 * shape_test_log['correct'] / (shape_test_log['correct'] + texture_test_log['correct'])}
def extract_test_log(self, log: dict) -> dict:
out_log = {
'correct': log['correct']
}
return out_log
@hydra.main(config_path="../conf", config_name="visual_classification.yaml")
def main(conf: DictConfig):
runner = CueConflictRunner(conf)
runner.main()
if __name__ == '__main__':
main()
|
nathansomavarapu/core-ml
|
src/CueConflictRunner.py
|
CueConflictRunner.py
|
py
| 1,053
|
python
|
en
|
code
| 0
|
github-code
|
6
|
24416364915
|
import grp
import json
import logging
import os
import pkgutil
import tempfile
from cloudify.decorators import operation
from cloudify.exceptions import NonRecoverableError
from managed_nagios_plugin._compat import text_type
from managed_nagios_plugin.constants import (
BASE_OBJECTS_DIR,
OBJECT_DIR_PERMISSIONS,
OBJECT_OWNERSHIP,
RATE_BASE_PATH,
)
from managed_nagios_plugin.rest_utils import (
get_entities,
run_workflow,
StartWorkflowFailed,
)
from managed_nagios_plugin.utils import (
deploy_configuration_file,
deploy_file,
disable_service,
download_and_deploy_file_from_blueprint,
enable_service,
generate_certs,
reload_systemd_configuration,
run,
start_service,
stop_service,
yum_install,
yum_remove,
_decode_if_bytes
)
SSL_KEY_PATH = '/etc/nagios/ssl.key'
SSL_CERT_PATH = '/etc/nagios/ssl.crt'
BLUEPRINT_SSL_KEY_PATH = 'ssl/{key_file}'
BLUEPRINT_SSL_CERT_PATH = 'ssl/{cert_file}'
NAGIOSREST_SERVICES = ['nagiosrest-gunicorn', 'httpd']
@operation
def create(ctx):
props = ctx.node.properties
ctx.logger.info('Validating SSL properties')
if bool(props['ssl_certificate']) != bool(props['ssl_key']):
raise NonRecoverableError(
'Either ssl_certificate and ssl_key must both be provided, '
'or neither of them must be provided. '
'ssl_certificate was: {ssl_certificate}; '
'ssl_key was: {ssl_key}'.format(
ssl_certificate=props['ssl_certificate'],
ssl_key=props['ssl_key'],
)
)
ctx.logger.info('Enabling EPEL (if required)')
yum_install(text_type('epel-release'))
ctx.logger.info('Installing required packages')
yum_install([
'mod_ssl',
'nagios',
'nagios-plugins-disk',
'nagios-plugins-load',
'nagios-plugins-ping',
'nagios-plugins-snmp',
'nagios-selinux',
'net-snmp',
'net-snmp-utils',
'python-flask',
'python-gunicorn',
'python-jinja2',
'python-requests',
'selinux-policy-devel',
'incron',
])
ctx.logger.info('Deploying SELinux configuration')
# Prepare SELinux context for trap handler
tmp_path = tempfile.mkdtemp()
with open(
os.path.join(tmp_path, 'cloudify-nagios-snmp-trap-handler.te'), 'w',
) as policy_handle:
policy_handle.write(_decode_if_bytes(pkgutil.get_data(
'managed_nagios_plugin',
'resources/selinux/cloudify_nagios_snmp_trap_handler.te',
)))
run(['make', '-f', '/usr/share/selinux/devel/Makefile', '-C', tmp_path],
sudo=True)
run(['semodule',
'-i',
os.path.join(tmp_path, 'cloudify-nagios-snmp-trap-handler.pp')],
sudo=True)
run(['rm', '-rf', tmp_path], sudo=True)
ctx.logger.info('Deploying nagios plugins and SNMP trap handler')
for supporting_lib in ('_compat.py',
'constants.py',
'utils.py',
'snmp_utils.py',
'nagios_utils.py',
'rest_utils.py',
'resources/scripts/nagios_plugin_utils.py',
'resources/scripts/logging_utils.py'):
if supporting_lib.startswith('resources/scripts/'):
destination_filename = supporting_lib[len('resources/scripts/'):]
else:
destination_filename = supporting_lib
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
supporting_lib,
),
destination='/usr/lib64/nagios/plugins/' + destination_filename,
ownership='root.nagios',
permissions='440',
sudo=True,
)
for script in ('check_snmp_numeric',
'check_snmp_aggregate',
'check_group_aggregate',
'check_group_meta_aggregate',
'cloudify_nagios_snmp_trap_handler',
'notify_cloudify',
'check_nagios_command_file',
'check_snmptrap_checks'):
source = os.path.join('resources/scripts/', script)
script_content = pkgutil.get_data('managed_nagios_plugin', source)
destination = os.path.join('/usr/lib64/nagios/plugins', script)
deploy_file(
data=script_content,
destination=destination,
permissions='550',
sudo=True,
)
ctx.logger.info('Deploying nagiosrest')
run(['mkdir', '-p', '/usr/local/www/nagiosrest'], sudo=True)
for nagiosrest_file in ('nagiosrest.py',
'nagiosrest_group.py',
'nagiosrest_target.py',
'nagiosrest_tenant.py',
'logging_utils.py'):
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'resources/scripts/' + nagiosrest_file,
),
destination='/usr/local/www/nagiosrest/' + nagiosrest_file,
ownership='root.nagios',
permissions='440',
sudo=True,
)
for supporting_lib in ('_compat.py',
'nagios_utils.py',
'utils.py',
'constants.py'):
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
supporting_lib,
),
destination='/usr/local/www/nagiosrest/' + supporting_lib,
ownership='root.nagios',
permissions='440',
sudo=True,
)
for template in ('hostgroup.template', 'target.template', 'node.template',
'group.template', 'group_check.template',
'meta_group_check.template'):
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
os.path.join('resources', template),
),
destination='/usr/local/www/nagiosrest/' + template,
ownership='root.nagios',
permissions='440',
sudo=True,
)
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'resources/base_configuration/systemd_nagiosrest.conf',
),
destination='/usr/lib/systemd/system/nagiosrest-gunicorn.service',
ownership='root.root',
permissions='440',
sudo=True,
)
ctx.logger.info('Deploying notification configuration script')
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'resources/scripts/update_notify_cloudify_configuration',
),
destination='/usr/local/bin/update_notify_cloudify_configuration',
ownership='root.root',
permissions='500',
sudo=True,
# Must have the group of the agent user for reconcile operation to
# work correctly
template_params={'group': grp.getgrgid(os.getgid()).gr_name},
)
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'_compat.py',
),
destination='/usr/local/bin/_compat.py',
ownership='root.root',
permissions='400',
sudo=True,
)
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'utils.py',
),
destination='/usr/local/bin/utils.py',
ownership='root.root',
permissions='400',
sudo=True,
)
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'constants.py',
),
destination='/usr/local/bin/constants.py',
ownership='root.root',
permissions='400',
sudo=True,
)
ctx.logger.info(
'Creating directory structure for storing temporary rate data'
)
for rate_dir in ('nodes', 'instances'):
rate_storage_path = os.path.join(RATE_BASE_PATH, rate_dir)
run(['mkdir', '-p', rate_storage_path], sudo=True)
run(['chown', 'nagios.', rate_storage_path], sudo=True)
run(['restorecon', rate_storage_path], sudo=True)
if props['ssl_certificate']:
if props['ssl_certificate'].startswith("-----BEGIN CERTIFICATE-----"):
deploy_file(
data=props['ssl_key'],
destination=SSL_KEY_PATH,
ownership='root.root',
permissions='440',
sudo=True,
)
deploy_file(
data=props['ssl_certificate'],
destination=SSL_CERT_PATH,
ownership='root.root',
permissions='444',
sudo=True,
)
else:
download_and_deploy_file_from_blueprint(
source=BLUEPRINT_SSL_KEY_PATH.format(
key_file=props['ssl_key'],
),
destination=SSL_KEY_PATH,
ownership='root.root',
permissions='440',
ctx=ctx,
)
download_and_deploy_file_from_blueprint(
source=BLUEPRINT_SSL_CERT_PATH.format(
cert_file=props['ssl_certificate'],
),
destination=SSL_CERT_PATH,
ownership='root.root',
permissions='444',
ctx=ctx,
)
else:
ctx.logger.info('Generating SSL certificate')
generate_certs(SSL_KEY_PATH, SSL_CERT_PATH, ctx.logger)
with open(SSL_CERT_PATH) as crt_handle:
ctx.instance.runtime_properties['ssl_certificate'] = crt_handle.read()
ctx.logger.info('Reloading systemd configuration')
reload_systemd_configuration()
@operation
def configure(ctx):
props = ctx.node.properties
ctx.logger.info('Configuring nagios web user')
username = props['nagios_web_username']
password = props['nagios_web_password']
tmpdir = tempfile.mkdtemp()
tmp_htpass = os.path.join(tmpdir, 'passwd')
run(['htpasswd', '-bc', tmp_htpass, username, password])
run(['mv', tmp_htpass, '/etc/nagios/passwd'], sudo=True)
run(['rm', '-rf', tmpdir])
run(['chown', 'root.apache', '/etc/nagios/passwd'], sudo=True)
run(['chmod', '640', '/etc/nagios/passwd'], sudo=True)
run(['usermod', '-G', 'nagios', 'apache'], sudo=True)
ctx.logger.info('Deploying automated reaction configuration')
# We're using username+password because current token implementation is
# unsuitable for this.
reaction_configuration = {
'username': props['cloudify_manager_username'],
'password': props['cloudify_manager_password'],
}
deploy_file(
data=json.dumps(reaction_configuration),
destination='/etc/nagios/cloudify_manager.json',
ownership='nagios.{group}'.format(
# Must have the group of the agent user for reconcile operation to
# work correctly
group=grp.getgrgid(os.getgid()).gr_name,
),
permissions='440',
sudo=True,
)
notification_plugin_storage_dir = '/var/spool/nagios/cloudifyreaction'
run(['mkdir', '-p', notification_plugin_storage_dir], sudo=True)
run(['restorecon', notification_plugin_storage_dir], sudo=True)
run(['chown', 'nagios.nagios', notification_plugin_storage_dir],
sudo=True)
run(['chmod', '750', notification_plugin_storage_dir], sudo=True)
ctx.logger.info('Preparing object paths')
run(['rm', '-rf', BASE_OBJECTS_DIR], sudo=True)
object_subdirs = [
'checks',
'commands',
'contacts',
'groups/group_instances',
'groups/tenants',
'groups/types',
'templates',
'timeperiods',
'deployments',
'snmp_traps',
'targets',
'target_types',
'tenants',
]
for subdir in object_subdirs:
subdir = os.path.join(BASE_OBJECTS_DIR, subdir)
run(['mkdir', '-p', subdir], sudo=True)
run(['chown', '-R', OBJECT_OWNERSHIP, BASE_OBJECTS_DIR], sudo=True)
run(['chmod', '-R', OBJECT_DIR_PERMISSIONS, BASE_OBJECTS_DIR], sudo=True)
ctx.logger.info('Deploying nagios object configuration')
config_source_dest_params = (
# Fully qualified paths because these two go outside the objects dir
('cgi.cfg', '/etc/nagios/cgi.cfg', {'user': username}),
('nagios.cfg', '/etc/nagios/nagios.cfg', {}),
# The rest are 'normal' configuration files
('base_system.cfg', 'base_system.cfg', {}),
('command_host_icmp.cfg', 'commands/check_host_icmp.cfg', {}),
('command_no_check.cfg', 'commands/no_check.cfg', {}),
('command_local_load.cfg', 'commands/check_local_load.cfg', {}),
('command_local_disk.cfg', 'commands/check_local_disk.cfg', {}),
('command_snmp_value.cfg', 'commands/check_snmp_value.cfg', {}),
('command_check_nagios_command_file.cfg',
'commands/check_nagios_command_file.cfg', {}),
('command_snmp_aggregate.cfg',
'commands/check_snmp_aggregate.cfg', {}),
('command_group_aggregate.cfg',
'commands/check_group_aggregate.cfg', {}),
('command_group_meta_aggregate.cfg',
'commands/check_group_meta_aggregate.cfg', {}),
('command_snmptrap_checks.cfg',
'commands/check_snmptrap_checks.cfg', {}),
('notification.cfg', 'commands/notify_automation.cfg', {}),
('contact.cfg', 'contacts/automation.cfg', {}),
('template_generic_service.cfg', 'templates/generic_service.cfg', {}),
('template_generic_host.cfg', 'templates/generic_host.cfg', {}),
('template_pseudo_host.cfg', 'templates/pseudo_host.cfg', {}),
('timeperiod_24x7.cfg', 'timeperiods/24x7.cfg', {}),
)
for source, dest, params in config_source_dest_params:
deploy_configuration_file(
ctx.logger,
source=os.path.join('resources/base_configuration', source),
destination=dest,
template_params=params,
# We can't validate before we've put all of the configuration in
# place as it will be invalid until it's finished
validate=False,
# We can't reload, it's not running yet
reload_service=False,
sudo=True,
)
ctx.logger.info('Configuring httpd for ssl')
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'resources/base_configuration/httpd.conf',
),
destination='/etc/httpd/conf/httpd.conf',
ownership='root.apache',
permissions='440',
sudo=True,
)
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'resources/base_configuration/ssl.conf',
),
destination='/etc/httpd/conf.d/ssl.conf',
ownership='root.apache',
permissions='440',
sudo=True,
)
ctx.logger.info('Configuring httpd for nagiosrest')
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'resources/base_configuration/httpd_nagiosrest.conf',
),
destination='/etc/httpd/conf.d/nagiosrest.conf',
ownership='root.apache',
permissions='440',
sudo=True,
)
ctx.logger.info('Allowing nagiosrest to restart nagios')
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'resources/base_configuration/sudoers-nagiosrest',
),
destination='/etc/sudoers.d/nagios-service-restart',
ownership='root.root',
permissions='440',
sudo=True,
)
ctx.logger.info('Deploying base SNMP configuration')
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'resources/base_configuration/snmp',
),
destination='/etc/snmp/snmp.conf',
ownership='root.root',
permissions='440',
sudo=True,
)
trap_community = ctx.node.properties['trap_community']
if trap_community:
ctx.logger.info('Configuring SNMP traps to use handler')
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'resources/base_configuration/snmptrapd',
),
destination='/etc/snmp/snmptrapd.conf',
ownership='root.root',
permissions='440',
sudo=True,
template_params={
'trap_community': trap_community,
},
)
ctx.logger.info('Configuring notification script')
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'resources/base_configuration/incron.allow',
),
destination='/etc/incron.allow',
ownership='root.root',
permissions='440',
sudo=True,
)
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'resources/base_configuration/incron_root_spool',
),
destination='/var/spool/incron/root',
ownership='root.root',
permissions='400',
template_params={
'homedir': os.path.expanduser('~'),
},
sudo=True,
)
agent_config_dir = os.path.join(
os.path.expanduser('~'),
'.cfy-agent',
)
agent_configs = [
os.path.join(agent_config_dir, filename)
for filename in os.listdir(agent_config_dir)
]
# We'll use the most recently updated agent config
current_agent_config = max(agent_configs, key=os.path.getmtime)
run(
[
'/usr/local/bin/update_notify_cloudify_configuration',
current_agent_config,
],
sudo=True,
)
ctx.logger.info('Deploying logging configuration')
level = props['component_log_level'].upper()
validate_level = logging.getLevelName(level)
if not isinstance(validate_level, int):
raise NonRecoverableError(
'{level} is not a valid logging level. '
'It is recommended that component_log_level be set to one of '
'DEBUG, INFO, WARNING, ERROR'.format(level=level)
)
component_logging_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'format': '%(name)s(%(process)s) [%(levelname)s]: %(message)s',
},
},
'handlers': {
'syslog': {
'formatter': 'default',
'level': level,
'class': 'logging.handlers.SysLogHandler',
'address': '/dev/log',
},
},
'loggers': {
'': {
'handlers': ['syslog'],
'level': level,
'propagate': True,
},
},
}
deploy_file(
data=json.dumps(component_logging_config),
destination='/etc/nagios/cloudify_components_logging.cfg',
ownership='root.nagios',
permissions='440',
sudo=True,
)
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'resources/base_configuration/logrotate_config',
),
destination='/etc/logrotate.d/managed_nagios',
ownership='root.root',
permissions='444',
sudo=True,
)
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'resources/base_configuration/rsyslog_config',
),
destination='/etc/rsyslog.d/managed_nagios_logging.conf',
ownership='root.root',
permissions='444',
sudo=True,
)
stop_service('rsyslog')
start_service('rsyslog')
@operation
def start(ctx):
ctx.logger.info('Enabling and starting nagios and httpd services')
services = ['nagios', 'incrond']
if ctx.node.properties['start_nagiosrest']:
services.extend(NAGIOSREST_SERVICES)
if ctx.node.properties['trap_community']:
services.append('snmptrapd')
for service in services:
enable_service(service)
start_service(service)
@operation
def delete(ctx):
ctx.logger.info('Uninstalling nagios and web server packages')
yum_remove([
'nagios',
'httpd', # Installed by nagios, remove it as it is outward facing
'nagios-selinux',
'nagios-plugins-load',
'nagios-plugins-disk',
'nagios-plugins-ping',
'nagios-plugins-snmp',
'net-snmp',
])
ctx.logger.info('Removing nagiosrest')
stop_service('nagiosrest-gunicorn')
disable_service('nagiosrest-gunicorn')
run(['rm', '/usr/lib/systemd/system/nagiosrest-gunicorn.service'],
sudo=True)
reload_systemd_configuration()
ctx.logger.info('Removing leftover data, configuration, and scripts')
for path in (
'/etc/nagios',
'/etc/httpd',
'/usr/lib64/nagios',
'/usr/local/www/nagiosrest',
'/var/spool/nagios',
'/var/log/nagios',
'/etc/snmp',
'/var/spool/incron/root',
):
run(['rm', '-rf', path], sudo=True)
def _node_has_nagiosrest_properties(node):
return 'nagiosrest_monitoring' in node.get('properties', {})
@operation
def reconcile_monitoring(ctx, only_deployments=None, only_tenants=None):
if not only_deployments:
only_deployments = []
if not only_tenants:
only_tenants = []
ctx.logger.info('Getting tenant list')
tenants = [
tenant['name']
for tenant in get_entities(
entity_type='tenants',
tenant='default_tenant',
properties=['name'],
logger=ctx.logger,
)
]
problem_deployments = {}
targets = None
for tenant in tenants:
if only_tenants and tenant not in only_tenants:
ctx.logger.info('Skipping tenant {tenant}'.format(
tenant=tenant,
))
continue
ctx.logger.info('Checking deployments for tenant {tenant}'.format(
tenant=tenant,
))
targets = {}
interesting_nodes = get_entities(
entity_type='nodes',
tenant=tenant,
properties=['deployment_id', 'id'],
logger=ctx.logger,
include=_node_has_nagiosrest_properties,
)
ctx.logger.info(
'Found {num} nodes with monitoring configuration'.format(
num=len(interesting_nodes),
)
)
notified_skipped_deployments = []
for node in interesting_nodes:
dep_id = node['deployment_id']
if only_deployments and dep_id not in only_deployments:
if dep_id not in notified_skipped_deployments:
ctx.logger.info('Skipping deployment {dep}'.format(
dep=dep_id,
))
notified_skipped_deployments.append(dep_id)
continue
if dep_id not in targets:
targets[dep_id] = []
targets[dep_id].append(node['id'])
if targets:
for deployment, nodes in targets.items():
ctx.logger.info(
'Starting monitoring for deployment {deployment}'.format(
deployment=deployment,
)
)
try:
run_workflow(
tenant=tenant,
deployment=deployment,
workflow_id='execute_operation',
parameters={
"node_ids": nodes,
"operation": (
"cloudify.interfaces.monitoring.start"
),
},
allow_custom_parameters=False,
force=False,
logger=ctx.logger,
)
except StartWorkflowFailed as err:
ctx.logger.error(
'{deployment} failed to start workflow: {err}'.format(
deployment=deployment,
err=text_type(err),
)
)
if tenant not in problem_deployments:
problem_deployments[tenant] = []
problem_deployments[tenant].append(deployment)
if targets:
ctx.logger.info('All monitored instances not listed as problems '
'should be re-added to '
'nagios within a short time. See individual '
'deployments for execution states. '
'Problem messages state: '
'Tenant <name> had problems starting workflows, '
'and list which deployments had these problems. '
'If any of these appear you can re-run just those '
'deployments by using the only_deployments '
'argument.')
if problem_deployments:
for tenant in problem_deployments:
ctx.logger.warn(
'Tenant {tenant} had problems starting workflows for '
'deployments: {deps}'.format(
tenant=tenant,
deps=','.join(problem_deployments[tenant]),
)
)
else:
ctx.logger.info('No problems were reported starting workflows.')
else:
ctx.logger.warn('Nothing needed to be done. Either the combination '
'of tenant and deployment filtering left no targets '
'or there are no monitored deployments using the '
'nagiosrest plugin on the cloudify manager.')
@operation
def start_nagiosrest(ctx):
ctx.logger.info('Enabling and starting nagios and httpd services')
services = ['httpd', 'nagiosrest-gunicorn']
for service in services:
enable_service(service)
start_service(service)
|
cloudify-cosmo/cloudify-managed-nagios-plugin
|
managed_nagios_plugin/nagios/tasks.py
|
tasks.py
|
py
| 26,382
|
python
|
en
|
code
| 0
|
github-code
|
6
|
11711962924
|
import os
import numpy as np
import bpy
import mathutils
import math
from satvis.utils.blender_utils import make_sat, make_sun, \
make_camera, make_torch, setup_depth, rotate_sat, \
rotate_earth, save_render, get_data, CAMERA_VIEW_DIRECTION, \
get_sat_path, move_sun, move_chaser
from satvis.orbital_dynamics.sun_position import julian_date, get_sun_dir
from satvis.orbital_dynamics.satellite import ReferenceSatellite
from satvis.orbital_dynamics.clohessy_wiltshire import compute_cw
def setup_scene(configs):
bpy.context.scene.render.resolution_x = configs["height"]
bpy.context.scene.render.resolution_y = configs["height"]
bpy.context.scene.render.resolution_percentage = 100
bpy.context.scene.world.horizon_color = (0, 0, 0)
bpy.context.scene.world.light_settings.use_environment_light = True
bpy.context.scene.world.light_settings.environment_energy = 0.02
def setup_params_partial(configs, params, depth_path):
# Setup parameters
initial_pos = np.array(configs["initial_pos"], dtype=np.float32) * configs["distance_scale"]
configs["cam_pos"] = np.array(configs["cam_pos"], dtype=np.float32) * configs["distance_scale"]
configs["max_depth"] = configs["max_depth"] * configs["distance_scale"]
configs["attitude"] = [math.radians(float(i)) for i in configs["attitude"]]
# If ang_vel is not a list of euler angles, get the rotation from axis angle
if not isinstance(configs["ang_vel"], (list, np.ndarray)):
axis = configs["axis"]
axis /= np.linalg.norm(axis)
# Rotate axis to line up with the camera view direction
rotation_diff = mathutils.Vector(CAMERA_VIEW_DIRECTION).rotation_difference(mathutils.Vector(configs["cam_dir"]))
axis = mathutils.Vector(axis)
axis.rotate(rotation_diff)
# Work out the rotation per step as a quaternion
angle = math.radians(configs["ang_vel"])
rotation_step = mathutils.Quaternion(axis, angle)
else:
ang_vel = [math.radians(float(i)) for i in configs["ang_vel"]]
rotation_step = mathutils.Euler(ang_vel, 'XYZ').to_quaternion()
# Convert angular velocity into quaternion rotation per step
axis, angle = rotation_step.to_axis_angle()
rotation_step = mathutils.Quaternion(axis, angle*configs["frame_skip"]/configs["fps"])
if params.sun_behind:
configs["sun_dir"] = 0
# If sun_dir is an angle, rotate this much from the view direction, around the world z axis
if not isinstance(configs["sun_dir"], (list, np.ndarray)):
angle = configs["sun_dir"] * np.pi / 180.
Rz = np.array([[np.cos(angle), -np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]]) # rotation matrix about z
sun_dir = Rz @ configs["cam_dir"]
configs["sun_dir"] = sun_dir
# Make objects
earth = bpy.data.objects['Earth']
earth.location = initial_pos
earth.scale = np.array([1,1,1]) * 637100 * configs['distance_scale']
#earth.transform_apply(scale=True)
sat_initial_attitude = mathutils.Euler(configs["attitude"], 'XYZ').to_quaternion()
sat_path = get_sat_path(configs["satObjDir"], configs["target"])
sat = make_sat(sat_path, sat_initial_attitude, configs["distance_scale"])
sun = make_sun(configs["sun_dir"], -10000000*configs["distance_scale"]*np.array(configs["sun_dir"]))
cam = make_camera(configs["cam_pos"], configs["cam_dir"], configs["distance_scale"], configs["fov"])
torch = make_torch(configs["cam_pos"], configs["cam_dir"], configs["distance_scale"], use_torch=not params.no_torch)
setup_depth(depth_path)
# Set clip distance
cam.data.clip_end = 1.5 * np.linalg.norm(initial_pos)
for a in bpy.context.screen.areas:
if a.type == 'VIEW_3D':
for s in a.spaces:
if s.type == 'VIEW_3D':
s.clip_end = 1.5 * np.linalg.norm(initial_pos)
return rotation_step, earth, sat, sun, cam, torch
def setup_params_full(configs, params, depth_path):
# Setup random orbit
inclination, ascending_node, periapsis = np.random.rand(3) * np.pi
mu = float(configs["mu"])
angular_momentum = np.sqrt(mu * configs["radius"])
ref_sat = ReferenceSatellite(0, angular_momentum, mu, inclination, ascending_node, periapsis)
print(ref_sat)
# Start at a random point in orbit
num_steps_in_orbit = ref_sat.period * configs["fps"]
start_iter = np.random.randint(num_steps_in_orbit)
times = np.arange(configs["duration"]*configs["fps"]+1) / configs["fps"] + start_iter
anomalies = times / num_steps_in_orbit * 2*np.pi
anomalies = anomalies % (2*np.pi)
ref_sat.set_states(anomalies)
# Convert date and time to Julian date
date = configs["date"].split('/')
assert(len(date) == 3 and len(date[2]) == 4), 'Date should be in the form dd/mm/yyyy'
year, month, day = [float(i) for i in date]
day += float(configs["time"]) / 24.
jd = julian_date(year, month, day)
# Get initial position of Earth, Sun and chaser
initial_pos = np.array(configs["radius"] * configs["distance_scale"]) * np.array([-1, 0, 0])
configs["max_depth"] = configs["max_depth"] * configs["distance_scale"]
configs["attitude"] = [math.radians(float(i)) for i in configs["attitude"]]
sun_dir = get_sun_dir(jd, 0, ref_sat.get_state()[:3])
# Get states of target and chaser
# If ang_vel is not a list of euler angles, get the rotation from axis angle
if not isinstance(configs["ang_vel"], (list, np.ndarray)):
axis = mathutils.Vector(configs["axis"])
axis.normalize()
# Work out the rotation per step as a quaternion
angle = math.radians(configs["ang_vel"])
rotation_step = mathutils.Quaternion(axis, angle)
else:
ang_vel = [math.radians(float(i)) for i in configs["ang_vel"]]
rotation_step = mathutils.Euler(ang_vel, 'XYZ').to_quaternion()
# Convert angular velocity into quaternion rotation per step
axis, angle = rotation_step.to_axis_angle()
rotation_step = mathutils.Quaternion(axis, angle*configs["frame_skip"]/configs["fps"])
# Get chaser states
chaser_initial_state = np.array([configs["distance"], 0, 0, 0, -2*ref_sat.omega*configs["distance"], 0]) # Initial state for a circular relative orbit
chaser_pos = chaser_initial_state[:3] * configs["distance_scale"]
chaser_dir = -chaser_initial_state[:3] / np.linalg.norm(chaser_initial_state[:3])
chaser_states = compute_cw(chaser_initial_state, ref_sat, anomalies)
# Get sun direction at each step
sun_dirs = []
for i, t in enumerate(times):
ref_sat.set_iter(i)
sun_dirs.append(get_sun_dir(jd, times[i], ref_sat.get_state()))
# Make objects
earth = bpy.data.objects['Earth']
earth.location = initial_pos
earth.scale = np.array([1,1,1]) * 637100 * configs['distance_scale']
#earth.transform_apply(scale=True)
sat_initial_attitude = mathutils.Euler(configs["attitude"], 'XYZ').to_quaternion()
sat_path = get_sat_path(configs["satObjDir"], configs["target"])
sat = make_sat(sat_path, sat_initial_attitude, configs["distance_scale"])
sun = make_sun(sun_dir, -10000000*configs["distance_scale"]*np.array(sun_dir))
cam = make_camera(chaser_pos, chaser_dir, configs["distance_scale"], configs["fov"])
torch = make_torch(chaser_pos, chaser_dir, configs["distance_scale"], use_torch=not params.no_torch)
setup_depth(depth_path)
# Set clip distance
cam.data.clip_end = 1.5 * np.linalg.norm(initial_pos)
for a in bpy.context.screen.areas:
if a.type == 'VIEW_3D':
for s in a.spaces:
if s.type == 'VIEW_3D':
s.clip_end = 1.5 * np.linalg.norm(initial_pos)
return rotation_step, earth, sat, sun, cam, torch, ref_sat.period, chaser_states, sun_dirs
def animate(configs, params, save_path, data_path, rotation_step, earth, sat, sun, cam, torch, period, chaser_states=None, sun_dirs=None):
frame_end = configs["duration"] * configs["fps"]
bpy.context.scene.frame_end = configs["duration"] * configs["fps"]
earth_rot_angle = configs["frame_skip"] / configs["fps"] / period * 2 * np.pi
# Rotate satellite by constant angular velocity
for i in range(frame_end//configs["frame_skip"]+1):
rotate_sat(sat, i*configs["frame_skip"], rotation_step)
rotate_earth(earth, i*configs["frame_skip"], earth_rot_angle)
# If doing a full simulation, move chaser and sun accordingly to the dynamics
if chaser_states is not None:
iter = i * configs["frame_skip"]
move_sun(sun, i, sun_dirs[iter], -1E8*configs["distance_scale"]*sun_dirs[iter])
move_chaser(cam, torch, i, chaser_states[iter,:3]*configs["distance_scale"], -chaser_states[iter,:3]/np.linalg.norm(chaser_states[iter,:3]))
# Save animation
data = []
for i in range(frame_end//configs["frame_skip"]+1):
bpy.context.scene.frame_set(i*configs["frame_skip"])
save_render(i, save_path)
data.append(get_data(sat, cam, i/configs["fps"]*configs["frame_skip"], configs["distance_scale"]))
# Save data to file
np.savetxt(os.path.join(data_path, "data.txt"), data, header="pos[3], target q[4], chaser q[4], time")
|
Ben-Guthrie/satvis
|
utils/vis_utils.py
|
vis_utils.py
|
py
| 9,405
|
python
|
en
|
code
| 3
|
github-code
|
6
|
41117547284
|
"""
Created by hu-jinwen on 2022/4/25
"""
# 打开文件
file_read = open("content")
file_write = open("content[copy]", "w")
# 读写
while True:
#读取一行内容
text = file_read.readline()
# 判断是否读取到内容
if not text:
break
file_write.write(text)
# 关闭
file_write.close()
file_read.close()
|
wu-qiqin/python_learn
|
documents/open_write_read_copy.py
|
open_write_read_copy.py
|
py
| 341
|
python
|
en
|
code
| 0
|
github-code
|
6
|
8213198289
|
import sublime
import sublime_plugin
import os
class JaiToolsBuildCommand(sublime_plugin.WindowCommand):
common_parent_dir = None
abs_file_paths = []
rel_file_paths = []
active_rel_file_path = None
def run(self):
paths_set = get_all_jai_project_file_paths()
# Add open files to the paths set, in case they're not part of the project
for view in sublime.active_window().views():
if view_is_jai(view) and view.file_name() != None:
paths_set.add(view.file_name())
if len(paths_set) == 0:
sublime.error_message('There are no Jai files open or in your project. Save a Jai file and try again.')
return None
self.common_parent_dir = get_common_parent_dir(list(paths_set))
if len(paths_set) == 1:
self.active_rel_file_path = os.path.relpath(list(paths_set)[0], self.common_parent_dir)
self.perform_exec_command()
else:
self.abs_file_paths = sorted(list(paths_set))
self.rel_file_paths = list(map(lambda p: os.path.relpath(p, self.common_parent_dir), self.abs_file_paths))
list_input = map(lambda p: "Run 'jai " + p + "'", self.rel_file_paths)
self.window.show_quick_panel(list(list_input), self.on_quick_panel_done)
def perform_exec_command(self):
exec_args = {
'cmd': ['jai', self.active_rel_file_path],
'working_dir': self.common_parent_dir,
'file_regex': build_result_file_regex
}
self.window.run_command('exec', exec_args)
def on_quick_panel_done(self, file_path_index):
if file_path_index < 0:
return
self.active_rel_file_path = self.rel_file_paths[file_path_index]
self.perform_exec_command()
|
RobinWragg/JaiTools
|
JaiToolsBuildCommand.py
|
JaiToolsBuildCommand.py
|
py
| 1,602
|
python
|
en
|
code
| 28
|
github-code
|
6
|
16557567463
|
from zscript import *
#######################################################################################################################
# p_init = """L = 1350
# albedo = 0.3
# epsilon = 1
# sigma = 5.67*10^-8
# waterdepth = 4000
# heatcapground = 4.2*1000^2*waterdepth
# heatcapair = 100^2*30
#
# csurface = 6.916*10^7
# tsurface = 1.669*10^8
# circlearea = 6.39*10^13
# cshadow = 8.425*10^6
# tshadow = 4.910*10^7
# cair = 226
# cground = 247
# tair = 282
# tground = 303
# tgroundsolar = L*(1-albedo)*tshadow
# cgroundsolar = L*(1-albedo)*cshadow
# cgroundcap = heatcapground*csurface
# caircap = heatcapair*csurface
# tgroundcap = heatcapground*tsurface
# taircap = heatcapair*tsurface
# cgroundcon = cground*cgroundcap
# caircon = cair*caircap
# tgroundcon = tground*tgroundcap
# taircon = tair*taircap
# dt = 60*60*24
#
# tcair = 0
# tcground = 0
# tairspace == (epsilon*tsurface*sigma*tair^4)
# tairground == (epsilon*tsurface*sigma*tair^4)
# tgroundair == epsilon*tsurface*sigma*tground^4
#
# ctair = 0
# ctground = 0
# cairspace == (epsilon*csurface*sigma*cair^4)
# cairground == (epsilon*csurface*sigma*cair^4)
# cgroundair == epsilon*csurface*sigma*cground^4
#
# tairin == tgroundair + ctair
# tairout == tairground + tairspace + tcair
# tgroundin == tairground + ctground + tgroundsolar
# tgroundout == tcground + tgroundair
#
# cairin == cgroundair + tcair
# cairout == cairground + cairspace + tcair
# cgroundin == cairground + tcground + cgroundsolar
# cgroundout == ctground + cgroundair
# """
#
# p_loop = """
# tcair = tairin - tairout
# tcground = tgroundin - tgroundout
# ctair = cairin - cairout
# ctground = cgroundin - cgroundout
#
# cairdelta = cairin - cairout
# cgrounddelta = cgroundin - cgroundout
# tairdelta = tairin - tairout
# tgrounddelta = tgroundin - tgroundout
#
# caircon = caircon + cairdelta*dt
# cgroundcon = cgroundcon + cgrounddelta*dt
# taircon = taircon + tairdelta*dt
# tgroundcon = tgroundcon + tgrounddelta*dt
#
# cair = caircon/caircap
# cground = cgroundcon/cgroundcap
# tair = taircon/taircap
# tground = tgroundcon/tgroundcap
# """
#
# env = {}
# compilerun(p_init, env)
#
# for i in range(100):
# print()
# print()
# print(i)
# compilerun(p_loop, env)
#
# print('--------------------------------------------------------------------------------------------------------------')
# for k, v in env.items():
# print(k, ': ', v(env))
# print('--------------------------------------------------------------------------------------------------------------')
# print(env)
#######################################################################################################################
p = '''
p = 9
c = p < 10
c
'''
print(list(compilerun(p, {})))
f = '''
for i=0; i < 10; i+1 | i in list
a = a + i
b = b + a + a
c = c + b + b
d = d + c + c
next
'''
'''x = []
for i in lexer(f):
x.append(i)
if i[0] == 'NL':
print(x)
x = []
'''
repl()
|
zavierboyd/ZS-Appengine
|
testing.py
|
testing.py
|
py
| 2,943
|
python
|
en
|
code
| 0
|
github-code
|
6
|
4083668934
|
from urllib.parse import urlencode
import httpx
from common.kan_params import crd_params
from common.voe_ipc import KanAgent
#host = 'localhost'
#host = 'kan-agent'
#port = '8088'
class KanAgentClient:
#def __init__(self, host=host, port=port, scope='default', version='v1', ref='v1alpha2.ReferenceK8sCRD'):
def __init__(self, scope='default', version='v1', ref='v1alpha2.ReferenceK8sCRD'):
#self.url = f'http://{host}:{port}/v1alpha2/agent/references'
self.url = f'{KanAgent.Url}/v1alpha2/agent/references'
self.scope = scope
self.version = version
self.ref = ref
def _gen_params(self, crd_name):
return {
'scope': self.scope,
'version': self.version,
'ref': self.ref
}
def _get(self, crd_name, object_name):
params = self._gen_params(crd_name)
params['kind'] = crd_params[crd_name]['plural']
params['group'] = crd_params[crd_name]['group']
params['id'] = object_name
#FIXME error handling
#print(params)
try:
r = httpx.get(self.url, params=params)
except:
raise Exception('Cannot access Kan Agent')
return r.json()
def get_target(self, name):
return self._get('Target', name)
def get_device(self, name):
return self._get('Device', name)
def get_solution(self, name):
return self._get('Solution', name)
def get_instance(self, name):
return self._get('Instance', name)
def get_skill(self, name):
return self._get('Skill', name)
def get_model(self, name):
return self._get('Model', name)
def get_skill_with_instance_name_and_alias(self, name, instance_name, alias):
params = self._gen_params('Skill')
params['kind'] = crd_params['Skill']['plural']
params['group'] = crd_params['Skill']['group']
params['id'] = name
params['instance'] = instance_name
params['alias'] = alias
try:
r = httpx.get(self.url, params=params)
except:
raise Exception('Cannot access Kan Agent')
return r.json()
def export_model(self, name):
params = self._gen_params('Model')
params['kind'] = crd_params['Model']['plural']
params['group'] = crd_params['Model']['group']
params['id'] = name
params['lookup'] = 'download'
params['iteration'] = 'latest'
params['platform'] = 'ONNX'
try:
r = httpx.get(self.url, params=params)
except:
raise Exception('Cannot access Kan Agent')
return r.json()
def _post(self, crd_name, object_name, data):
params = self._gen_params(crd_name)
params['kind'] = crd_params[crd_name]['plural']
params['group'] = crd_params[crd_name]['group']
params['id'] = object_name
try:
r = httpx.post(self.url, params=params, json=data)
except:
raise Exception('Cannot access Kan Agent')
def post_instance_status(self, name, status_code, status_description):
self._post('Instance', name, data={"status_code": status_code, "status_description": status_description})
def post_instance_fps(self, name, skill_name, fps):
self._post('Instance', name, data={f'fps_{skill_name}': str(fps)})
if __name__ == '__main__':
sac = KanAgentClient()
print(sac.get_target('sdsdsd'))
|
Azure/KAN
|
src/edge/EdgeSolution/modules/common/common/kan_agent_client.py
|
kan_agent_client.py
|
py
| 3,527
|
python
|
en
|
code
| 61
|
github-code
|
6
|
4520925854
|
from django.urls import path
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView, TokenVerifyView,
TokenObtainSlidingView, TokenRefreshSlidingView
)
from .views import SingUpView, BlacklistRefreshView
urlpatterns = [
path('signup/', SingUpView.as_view(), name='sign_up'),
# path('api/logout/', BlacklistRefreshView.as_view(), name="logout"),
path('api/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('api/token/verify/', TokenVerifyView.as_view(), name='token_verify'),
]
|
rustamovjavohir/EcommerceSHOP
|
auth_user/urls.py
|
urls.py
|
py
| 638
|
python
|
en
|
code
| 8
|
github-code
|
6
|
6846678287
|
from tkinter import Frame, Label, Menu, ttk
from tkinter.messagebox import showinfo
import tkinter as tk
import requests
class AttackInfo(Frame):
def __init__(self, master=None):
super(AttackInfo, self).__init__(master)
master = master
self.type_label = Label(self, text=" ", font=('Helvetica', 18, "bold"), pady=10)
self.type_label.grid(row=0, column=2)
self.info_label = Label(self, text=" ", font=('Helvetica', 8), wraplength=600)
self.info_label.grid(row=1, column=2, sticky='w')
reply = requests.get('http://127.0.0.1:23432/attack/')
attacks = reply.json()
print(attacks)
attack_titles = [attack['type'] for attack in attacks]
attack_info = [attack['info'] for attack in attacks]
var = tk.Variable(value=attack_titles)
listbox = tk.Listbox(
self,
listvariable=var,
height=len(attack_titles))
listbox.grid(row=0, column=0, rowspan=2, sticky="ns")
self.rowconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
scrollbar = ttk.Scrollbar(
self,
orient=tk.VERTICAL,
command=listbox.yview
)
listbox['yscrollcommand'] = scrollbar.set
scrollbar.grid(row=0, rowspan=2, column=1, sticky="ns")
def items_selected(event):
# get selected indices
self.info_label.destroy()
self.type_label.destroy()
selected_indices = listbox.curselection()[0]
print(selected_indices)
self.type_label = Label(self, text=attack_titles[selected_indices], font=('Helvetica', 18, "bold"), pady=10)
self.type_label.grid(row=0, column=2, sticky="n")
self.info_label = Label(self, text=attack_info[selected_indices], font=('Helvetica', 8), wraplength=600)
self.info_label.grid(row=1, column=2, sticky='n')
listbox.bind('<<ListboxSelect>>', items_selected)
|
iamcrysun/eqw
|
desktop/views/attackinfo.py
|
attackinfo.py
|
py
| 2,001
|
python
|
en
|
code
| 0
|
github-code
|
6
|
10167784496
|
# (c) Nelen & Schuurmans. MIT licensed, see LICENSE.rst.
from __future__ import unicode_literals
from django.http.multipartparser import parse_header
from rest_framework.renderers import BaseRenderer
COLNAME_FORMAT = '%Y-%m-%d %H:%M:%S.%f'
class CSVRenderer(BaseRenderer):
"""
Renderer which serializes to csv.
"""
media_type = 'text/csv'
format = 'csv'
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render `obj` into csv.
"""
if data is None:
return ''
response = '"datetime (utc)";' + \
';'.join(['"%s"' % column for column in data.columns]) + '\n' + \
''.join(['%s\n' % row for row in \
['"%s";' % timestamp.strftime(COLNAME_FORMAT) + \
';'.join(['"%s"' % row[i] for i, _ in enumerate(data.columns)])
for timestamp, row in data.iterrows()]])
return response
|
ddsc/dikedata-api
|
dikedata_api/renderers.py
|
renderers.py
|
py
| 957
|
python
|
en
|
code
| 0
|
github-code
|
6
|
14959290509
|
import json
from json import JSONEncoder
from yoti_python_sdk.crypto import Crypto
from yoti_python_sdk.http import SignedRequestBuilder
import yoti_python_sandbox
from .anchor import SandboxAnchor
from .attribute import SandboxAttribute
from .endpoint import SandboxEndpoint
from .sandbox_exception import SandboxException
from .token import YotiTokenRequest
from .token import YotiTokenResponse
class SandboxEncoder(JSONEncoder):
def default(self, o):
if isinstance(o, YotiTokenRequest):
return o.__dict__()
if isinstance(o, SandboxAttribute):
return o.__dict__()
if isinstance(o, SandboxAnchor):
return o.__dict__()
return json.JSONEncoder.default(self, o)
class SandboxClient(object):
def __init__(self, sdk_id, pem_file, sandbox_url=None):
if sandbox_url is None:
sandbox_url = yoti_python_sandbox.DEFAULT_SANDBOX_URL
self.sdk_id = sdk_id
self.__endpoint = SandboxEndpoint(sdk_id)
self.__sandbox_url = sandbox_url
if isinstance(pem_file, Crypto):
self.__crypto = pem_file
else:
self.__crypto = Crypto.read_pem_file(pem_file, "SandboxClient.__init__")
def setup_sharing_profile(self, request_token):
"""
Using the supplied YotiTokenRequest, this function will make a request
to the defined sandbox environment to create a profile with the supplied values.
The returned token can be used against the sandbox environment to retrieve the profile
using the standard YotiClient.
:param YotiTokenRequest request_token:
:return: the token for accessing a profile
"""
request_path = self.__endpoint.get_sandbox_path()
payload = json.dumps(request_token, cls=SandboxEncoder).encode("utf-8")
signed_request = (
SignedRequestBuilder()
.with_pem_file(self.__crypto)
.with_base_url(self.__sandbox_url)
.with_endpoint(request_path)
.with_payload(payload)
.with_post()
.build()
)
response_payload = signed_request.execute()
if response_payload.status_code < 200 or response_payload.status_code >= 300:
raise SandboxException(
"Error making request to sandbox service: "
+ str(response_payload.status_code),
response_payload,
)
parsed = json.loads(response_payload.text)
return YotiTokenResponse(parsed["token"])
@staticmethod
def builder():
"""
Creates an instance of the sandbox client builder
:return: instance of SandboxClientBuilder
"""
return SandboxClientBuilder()
class SandboxClientBuilder(object):
def __init__(self):
self.__sdk_id = None
self.__pem_file = None
self.__sandbox_url = None
def for_application(self, sdk_id):
"""
Sets the application ID on the builder
:param str sdk_id: the SDK ID supplied from Yoti Hub
:return: the updated builder
"""
self.__sdk_id = sdk_id
return self
def with_pem_file(self, pem_file):
"""
Sets the pem file to be used on the builder
:param str pem_file: path to the PEM file
:return: the updated builder
"""
self.__pem_file = pem_file
return self
def with_sandbox_url(self, sandbox_url):
"""
Sets the URL of the sandbox environment on the builder
:param str sandbox_url: the sandbox environment URL
:return: the updated builder
"""
self.__sandbox_url = sandbox_url
return self
def build(self):
"""
Using all supplied values, create an instance of the SandboxClient.
:raises ValueError: one or more of the values is None
:return: instance of SandboxClient
"""
if self.__sdk_id is None or self.__pem_file is None:
raise ValueError("SDK ID/PEM file must not be None")
return SandboxClient(self.__sdk_id, self.__pem_file, self.__sandbox_url)
|
getyoti/yoti-python-sdk-sandbox
|
yoti_python_sandbox/client.py
|
client.py
|
py
| 4,176
|
python
|
en
|
code
| 0
|
github-code
|
6
|
6105704383
|
import requests
import sys
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
proxies = {'http': 'http://127.0.0.1:8080', 'https': 'http://127.0.0.1:8080'}
def promote_to_admin(s, url):
# login as the wiener user
login_url = url + "/login"
data_login = {"username": "wiener", "password": "peter"}
r = s.post(login_url, data=data_login, verify=False, proxies=proxies)
res = r.text
if "Log out" in res:
print("(+) Successfully logged in as the wiener user.")
# Exploit access control vulnerability to promote the user to admin
admin_roles_url = url + "/admin-roles?username=wiener&action=upgrade"
r = s.get(admin_roles_url, verify=False, proxies=proxies)
res = r.text
if "Admin panel" in res:
print("(+) Successfully promoted the user to administrator.")
else:
print("(-) Could not promote the user to administrator.")
sys.exit(-1)
else:
print("(-) Could not login as the wiener user.")
sys.exit(-1)
def main():
if len(sys.argv) != 2:
print("(+) Usage: %s <url>" % sys.argv[0])
print("(+) Example: %s www.example.com" % sys.argv[0])
sys.exit(-1)
s = requests.Session()
url = sys.argv[1]
promote_to_admin(s, url)
if __name__ == "__main__":
main()
|
rkhal101/Web-Security-Academy-Series
|
broken-access-control/lab-06/access-control-lab-06.py
|
access-control-lab-06.py
|
py
| 1,364
|
python
|
en
|
code
| 396
|
github-code
|
6
|
20701388833
|
import sqlite3
samira = sqlite3.connect('shallownowschool.db')
cursor = samira.cursor()
cursor.execute("""
INSERT INTO td_estudante(nome, endereco, nascimento, matricula)
VALUES ('Maria da Conceição', 'Rua da Paz', '1902-12-12', 20161382596);
""")
samira.commit()
print("Inserido com sucesso.")
samira.close()
|
kemelynfigueiredo/TopicosEspeciais
|
MeuPrimeiroSQLite/temp.py
|
temp.py
|
py
| 324
|
python
|
pt
|
code
| 0
|
github-code
|
6
|
13511594492
|
"""
This module is unit-tested using doctest when run directly. :)
python duration_string.py
"""
import doctest
import re
__version__ = "1.2.3"
TIMING_MAP = {"s": 1, "m": 60, "h": 60 * 60, "d": 24 * 60 * 60}
def is_valid(string):
"""
>>> is_valid('')
False
>>> is_valid(None)
False
>>> is_valid('f')
False
>>> is_valid('2')
True
>>> is_valid('2x')
False
>>> is_valid('2s')
True
>>> is_valid(2)
True
>>> is_valid(' 2 ')
True
>>> is_valid(' 2h ')
True
>>> is_valid(' 2 h ')
True
"""
val, _ = is_valid_w_reason(string)
return val
def is_valid_w_reason(string):
if not string:
return False, "Valid String value required."
if not isinstance(string, str) and not isinstance(string, int):
return False, "Invalid duration: %s" % string
if isinstance(string, int):
string = str(string)
_, time_string = separate_time_string(string)
if time_string and time_string not in TIMING_MAP:
return (
False,
"Invalid duration string. Valid ones are %s"
% ",".join(list(TIMING_MAP.keys())),
)
return True, ""
def separate_time_string(string):
if isinstance(string, int):
string = str(string)
string = re.sub(r"\s", "", string)
time_value = re.sub(r"[^\d\.]", "", string)
time_string = string.replace(time_value, "")
return time_value, time_string
def get_valid_time_parts(string):
validity, reason = is_valid_w_reason(string)
if not validity:
raise ValueError(validity)
return separate_time_string(string)
def get(string):
"""
>>> get('2h')
7200
>>> get(' 2h ')
7200
>>> get(' 2 h ')
7200
>>> get('2s')
2
>>> get('1m')
60
>>> get('0.5m')
30
>>> get('2')
2
>>> get(2)
2
"""
time_value, time_string = get_valid_time_parts(string)
multiplier = TIMING_MAP.get(time_string, TIMING_MAP["s"])
return int(multiplier * float(time_value))
if __name__ == "__main__":
doctest.testmod()
|
geonyoro/durationstring
|
durationstring.py
|
durationstring.py
|
py
| 2,106
|
python
|
en
|
code
| 2
|
github-code
|
6
|
17625798812
|
import sys
import os
import pandas as pd
from util import load_column_transformers, preprocess_data
from alphagan_class import AlphaGAN
from keras.losses import MeanAbsoluteError
from bigan import BIGAN
import keras.backend as K
import tensorflow as tf
import numpy as np
if __name__ == '__main__':
session = K.get_session()
init = tf.global_variables_initializer()
session.run(init)
ag = AlphaGAN()
ag.load_pretrained_models('./snapshots/3900_')
test_normal_df = pd.read_csv('./data/test_set_normal.csv')
preprocess_data(test_normal_df, './data/ranges.csv')
test_abnomal_df = pd.read_csv('./data/test_set_abnomal.csv')
preprocess_data(test_abnomal_df, './data/ranges.csv')
X_1 = test_normal_df.to_numpy()
X_2 = test_abnomal_df.to_numpy()
Z_hat_1 = ag.encoder.predict(X_1)
X_hat_1 = ag.generator.predict(Z_hat_1)
Z_hat_2 = ag.encoder.predict(X_2)
X_hat_2 = ag.generator.predict(Z_hat_2)
rec_losses_normal = np.linalg.norm(np.subtract(X_1, X_hat_1), axis=1)
rec_losses_fraud = np.linalg.norm(np.subtract(X_2, X_hat_2), axis=1)
num = len(rec_losses_normal) + len(rec_losses_fraud)
print('Number of test samples: %d' % num)
THRESH = 9.25
rec_losses_normal_correct = [loss for loss in rec_losses_normal if loss < THRESH]
print('Precision of normal transactions: %1.2f%%(%d/%d)' % (len(rec_losses_normal_correct) * 100 / len(rec_losses_normal),
len(rec_losses_normal_correct), len(rec_losses_normal)))
rec_losses_fraud_correct = [loss for loss in rec_losses_fraud if loss > THRESH]
print('Precision of fraud transactions: %1.2f%%(%d/%d)' % \
(len(rec_losses_fraud_correct) * 100 / len(rec_losses_fraud), len(rec_losses_fraud_correct), len(rec_losses_fraud)))
|
royalsalute/fraud-creditcard-detection
|
eval.py
|
eval.py
|
py
| 1,784
|
python
|
en
|
code
| 1
|
github-code
|
6
|
30789951381
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import plotly.graph_objects as go
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.impute import KNNImputer
from sklearn.metrics import f1_score
# # Exploratory Data Analysis (EDA):
#
# Step 1: Loaded the dataset and examined its structure and dimensions.
#
# Step 2: Checked for missing values and handled them appropriately (e.g., imputation or removal).
#
# Step 3: Explored the distribution and summary statistics of each feature.
#
# Step 4: Visualized the relationships between variables using heat map.
# In[2]:
# Load the dataset
data = pd.read_csv(r"C:\Users\Nimisha\OneDrive\Desktop\Assessment\starcraft_player_data.csv")
# Display the first few rows of the dataset
data.head()
# In[3]:
data.info()
# In[4]:
# Check the shape of the dataset
print("Shape of the dataset:", data.shape)
# Check for missing values
print("Missing values:\n", data.isna().sum())
# Summary statistics
print("Summary statistics:\n", data.describe())
# In[5]:
# Check the distribution of the target variable
print("Distribution of the target variable:\n", data['LeagueIndex'].value_counts())
# In[6]:
class_counts = data['LeagueIndex'].value_counts()
plt.bar(class_counts.index, class_counts.values)
plt.xlabel('Class')
plt.ylabel('Count')
plt.title('Class Distribution')
plt.show()
# After looking at the dimension and structure of the dataset , I noticed a few important characteristics about the dataset:
# 1. There are 3 columns described as objects and those are Age, TotalHours and HoursPerWeek. I tried to find the null values in these columns but there are no null values. Instead, they have '?' so it needs to be either removed or imputed. First, we will simply remove all the '?' from the dataset.
#
# 2. This is a class imbalance problem which we will address later on. As we can see there are very few data points with LeagueIndex 7 and 8.
# Conducted feature selection using correlation analysis and identified relevant features.
# In[7]:
data['Age'].unique()
# In[8]:
data['HoursPerWeek'].unique()
# In[9]:
data['TotalHours'].unique()
# In[10]:
data[data['TotalHours']=='?']
# I checked all the three columns with '?' and figured out that TotalHours has the maximum '?' and if we drop its rows then our
# issue will be resolved because it combines the '?' rows of the other 2 columns as well.
# In[11]:
data2 = data.drop(data[data['TotalHours'] == '?'].dropna().index)
# In[12]:
data2.head()
# In[13]:
data2.info()
# In[14]:
data2[data2['Age']=='?']
# In[15]:
data2[data2['HoursPerWeek']=='?']
# Then I converted all the 3 columns to integer type to find the correlation between the features.
# In[16]:
#converting them into integer
data2['Age'] = data2['Age'].astype('int64')
data2['HoursPerWeek'] = data2['HoursPerWeek'].astype('int64')
data2['TotalHours'] = data2['TotalHours'].astype('int64')
# In[17]:
data2.isna().sum()
# In[18]:
#Then I checked correlation between columns to understand what impact does the other features have on target variable.
correl = data2.corr()
trace = go.Heatmap(z=correl.values,
x=correl.index.values,
y=correl.columns.values)
data=[trace]
layout = go.Layout(width=1000, height=900)
fig = go.Figure(data=data, layout=layout)
fig.show()
# In[19]:
sorted_corr = correl['LeagueIndex'].sort_values(ascending=False)
sorted_corr
#found the two least correlated columns to LeagueIndex i.e. GameID and TotalHours
# # Data Preprocessing and Feature Engineering:
#
# Step 1: Split the data into features (X) and the target variable (y) for rank prediction.
#
# Step 2: Scaled the continuous variables using standardization or normalization.
# In[20]:
# Split the dataset into features and target variable
X = data2.drop('LeagueIndex', axis=1)
y = data2['LeagueIndex']
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Feature scaling using StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
# # Model Selection, Training and Evaluation:
#
# 1. Selected appropriate models for rank prediction, such as logistic regression, decision trees, random forests, gradient boosting, SVM, or Neural Network.
#
#
# 2. Split the data into training and testing sets for model evaluation.
#
#
# 3. Trained the chosen models on the training set.
#
#
# 4. Evaluated the trained models on the testing set using suitable metrics like F1 score. I used F1 score to evaluate the performance instead of accuracy because this is a class imbalance problem.
# In[21]:
# Create and train different models
models = [
LogisticRegression(),
DecisionTreeClassifier(),
RandomForestClassifier(),
GradientBoostingClassifier(),
SVC(),
MLPClassifier()
]
model_names = [
'Logistic Regression',
'Decision Tree',
'Random Forest',
'Gradient Boosting',
'SVM',
'Neural Network'
]
scores = []
# Evaluate models and print accuracy
for model, name in zip(models, model_names):
model.fit(X_train_scaled, y_train)
y_pred = model.predict(X_test_scaled)
f1score = f1_score(y_test, y_pred, average='weighted')
print(f"{name} f1 Score: {f1score}")
scores.append(f1score)
# In[22]:
# Plotting the F1 scores
plt.figure(figsize=(8, 6))
plt.bar(model_names, scores)
plt.xlabel('Models')
plt.ylabel('F1 Score')
plt.title('Comparison of F1 Scores for Different Models')
plt.xticks(rotation=45)
plt.ylim(0, 1) # Set the y-axis limit
plt.show()
# # Class Imbalance Problem
# Now we will address the class imbalance problem by class weighting. Assign higher weights to the minority class samples or
# lower weights to the majority class samples during model training. This gives more importance to the minority class during the
# learning process. I added weights and re-evaluated the decision tree classifier.
# In[23]:
# Calculate class weights
class_weights = dict(zip(np.unique(y_train), np.bincount(y_train)))
# Create and train the decision tree classifier with class weights
dt_classifier = DecisionTreeClassifier(class_weight = class_weights)
dt_classifier.fit(X_train_scaled, y_train)
# Make predictions on the testing data
y_pred = dt_classifier.predict(X_test_scaled)
# Compute the weighted F1 score
f1score = f1_score(y_test, y_pred, average='weighted')
print("f1 Score:",f1score)
# # Removed least correlated columns
# In[24]:
#Next, we remove the two least correlated columns to LeagueIndex.
data3 = data2.drop(columns=['GameID','TotalHours'])
# In[25]:
# Split the dataset into features and target variable
X = data3.drop('LeagueIndex', axis=1)
y = data3['LeagueIndex']
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Feature scaling using StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Create and train different models
models = [
LogisticRegression(),
DecisionTreeClassifier(),
RandomForestClassifier(),
GradientBoostingClassifier(),
SVC(),
MLPClassifier()
]
model_names = [
'Logistic Regression',
'Decision Tree',
'Random Forest',
'Gradient Boosting',
'SVM',
'Neural Network'
]
# Evaluate models and print accuracy
for model, name in zip(models, model_names):
model.fit(X_train_scaled, y_train)
y_pred = model.predict(X_test_scaled)
f1score = f1_score(y_test, y_pred, average="weighted")
print(f"{name} F1 Score: {f1score}")
# # K-Nearest Neighbors Classifier
# In[26]:
knn_model = KNeighborsClassifier(n_neighbors=14)
knn_model.fit(X_train_scaled, y_train)
knn_pred = knn_model.predict(X_test_scaled)
f1score = f1_score(y_test, knn_pred, average="weighted")
print("KNN F1 Score:", f1score)
# # Imputation using KNN
# Now we will perform imputation. Instead of dropping all the rows with '?', we will fill the missing values through imputation.
# In[27]:
sampledata = pd.read_csv(r"C:\Users\Nimisha\OneDrive\Desktop\Assessment\starcraft_player_data.csv")
# In[28]:
sampledata[['Age','TotalHours','HoursPerWeek']] = sampledata[['Age','TotalHours','HoursPerWeek']].replace('?', None)
# In[29]:
sampledata.info()
# In[30]:
sampledata.isna().sum()
# In[31]:
#imputing the values using knn
missingdata = sampledata[['Age','TotalHours','HoursPerWeek']]
# In[32]:
k = 5
knn_imputer = KNNImputer(n_neighbors=k)
imputed_data = knn_imputer.fit_transform(missingdata)
# In[33]:
df_imputed = pd.DataFrame(imputed_data, columns=missingdata.columns)
# In[34]:
df_imputed.info()
# In[35]:
sampledata[['Age','TotalHours','HoursPerWeek']] = df_imputed[['Age','TotalHours','HoursPerWeek']]
# In[36]:
sampledata.info()
# In[37]:
sampledata.isna().sum()
# In[38]:
# Split the dataset into features and target variable
X = sampledata.drop('LeagueIndex', axis=1)
y = sampledata['LeagueIndex']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=50)
X_train
X_test
y_train
y_test
# In[39]:
rf_model = RandomForestClassifier()
rf_model.fit(X_train, y_train)
rf_pred = rf_model.predict(X_test)
f1score = f1_score(y_test, rf_pred,average= "weighted")
print("Random Forest F1 Score:", f1score)
# Finally, let's address the hypothetical scenario where stakeholders want to collect more data and seek guidance. Based on the EDA and model results, I would suggest the following:
#
# 1. Collect more samples for the minority classes: Since the dataset is imbalanced, collecting more data for the underrepresented rank levels can improve the model's performance.
#
# 2. Gather additional features: If there are relevant features that are not present in the current dataset, collecting additional data with those features can enhance the model's predictive power.
#
# 3. Monitor data quality: Ensure that the new data collection process maintains data quality standards, such as avoiding missing values, outliers, or inconsistencies.
#
# 4. Perform iterative model updates: As more data becomes available, it's beneficial to periodically update and retrain the model using the augmented dataset to capture any evolving patterns or changes in player performance.
#
# These recommendations aim to enhance the predictive capabilities of the model and provide more accurate rank predictions.
# In[ ]:
|
nimishakhaitan/Evil-Geniuses-assessment
|
Assessment_Data_Science.py
|
Assessment_Data_Science.py
|
py
| 11,117
|
python
|
en
|
code
| 0
|
github-code
|
6
|
13659068319
|
from django.http import HttpResponse
from django.shortcuts import redirect, render
from .forms import ContactForm
from django.core.mail import send_mail, BadHeaderError
from config.settings import RECIPIENTS_EMAIL, DEFAULT_FROM_EMAIL
# Create your views here.
def contact_view(request):
if request.method == "GET":
form = ContactForm()
elif request.method == "POST":
form = ContactForm(request.POST)
if form.is_valid():
full_name = form.cleaned_data['full_name']
from_email = form.cleaned_data['from_email']
message = form.cleaned_data['message']
try:
send_mail(f'{full_name} от {from_email}',
message,
DEFAULT_FROM_EMAIL,
[DEFAULT_FROM_EMAIL],
RECIPIENTS_EMAIL)
except BadHeaderError:
return HttpResponse('Ошибка в теме письма.')
return redirect('success')
else:
return HttpResponse('Неверный запрос.')
return render(request, "email/contact.html", {'form': form})
def success_view(request):
return render(request, "email/consuc.html")
|
Dauka03/food_project_back
|
sendemail/views.py
|
views.py
|
py
| 1,235
|
python
|
en
|
code
| 1
|
github-code
|
6
|
24519278933
|
import argparse
import os
import cv2
from wand.image import Image
import numpy as np
#ArgumentParser객체:명령행을 파이썬 데이터형으로 파싱하는데 필요한 모든 정보가 들어있음
#ArgumentParser객체 생성
ap=argparse.ArgumentParser()
ap.add_argument("-i","--images",required=True, help="absolute path to the input image")
ap.add_argument("-c","--cascade",default="C:/Users/cat7892/Documents/GitHub/catholic/catface_detection/haarcascade_frontalcatface.xml",help="absolute path to detector haar cascade")
#parse_args():문자열을 객체로 변환 후 namespace의 attribute로 설정
args=vars(ap.parse_args())
count=0
#imagefolder->only heic, jpg type images
imagefolder=args['images'].replace('\\','/')
for image in os.listdir(args["images"]):
print(image)
#If the image file format is 'heic', it is converted to 'jpg'.
if image[-3:]!='jpg':
img=Image(filename=imagefolder+'/'+image)
img.format='jpg'
image=image.replace('heic', 'jpg')
img.save(filename=imagefolder+'/'+image)
img.close()
os.remove(imagefolder+'/'+image.replace('jpg','heic'))
'''
# load image+convert grayscale
color=cv2.imread(imagefolder+'/'+image)
gray=cv2.cvtColor(color, cv2.COLOR_BGR2GRAY)
#load detector, then detect
detector=cv2.CascadeClassifier(args["cascade"])
rects=detector.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=10)
#loop over detect and save file
for (i, (x,y,w,h)) in enumerate(rects):
roi=color[y:y+h,x:x+w].copy()
print(x,y,w,h)
cv2.imwrite('C:/Users/cat7892/Documents/GitHub/catholic/catface_detection/test_detect/'+str(count)+'.jpg',roi)
cv2.waitKey()
count+=1
'''
|
Zzang-yeah/catholic
|
cat_detection/feic_to_jpg.py
|
feic_to_jpg.py
|
py
| 1,733
|
python
|
en
|
code
| 0
|
github-code
|
6
|
34607069404
|
import os
import json
import pandas as pd
import numpy as np
import networkx as nx
#所有节点和边构成网络图
def form_graph(parent_path):
every_pro_path = os.path.join(parent_path, r'dataset\secondExperiments\COVID_Node2Vec.csv')
simGO_path = os.path.join(parent_path, r'dataset\secondExperiments\train_COVID_AllHuman_GoSim.xls')
# 网络图的构建
G = nx.Graph()
pro_file = pd.read_csv(every_pro_path)
pro = np.array(pro_file["name"])
proList = [] # 每个蛋白质的索引
for i in range(len(pro)):
proList.append(pro[i])
G.add_node(i)
edges_df = pd.read_excel(simGO_path)
source = np.array(edges_df["protein"])
target = np.array(edges_df["neghbor_protein"])
sim = np.array(edges_df["sim"])
edgesList = []
for j in range(len(source)):
edgesList.append((source[j].split("_")[1], target[j].split("_")[1]))
G.add_edge(int(source[j].split("_")[0]), int(target[j].split("_")[0]))
G[int(source[j].split("_")[0])][int(target[j].split("_")[0])]['weight'] = sim[j]
print("图形已构成")
return G
#通过所有核数据
def readresultjson(parent_path, graph):
result_corefile = os.path.join(parent_path, r'result\secondExperiments\train_COVID_result.json')
with open(result_corefile, 'r', encoding='utf8')as fp:
json_data = json.load(fp)
seed = []
for core_k, core_v in json_data.items():
core = core_v[0]["cover"]
seed.extend(core)
seed = list(set(seed))
return json_data,seed
#通过核获得一阶邻点
def getfirstneighbor(json_data,seed,graph):
adj = graph._adj
neighbordict = {}
for k,v in json_data.items():
allneighborNode = []
neighborNode = []
core = v[0]["cover"]
for i in core:
everyneigh = adj[int(i)].keys()
neighborNode.extend(everyneigh)
neighborNode = list(set(neighborNode))
for i in neighborNode:
if i not in seed:
allneighborNode.append(i)
neighbordict[k] = neighborNode
return adj,neighbordict
#适应度函数
def fitness_function(adj,complex, core,graph):
sum_degree_in = 0 # 所有顶点的入度之和
sum_degree_out = 0 # 所有顶点的出度之和
E = 0
for i in range(len(complex)):
degree_in = 0 # 每个节点的入度
degree_out = 0 # 每个节点的出度
i_adj = adj[complex[i]].keys()
for z in i_adj:
if z in complex:
if (complex[i], int(z)) in graph.edges():
degree_in += graph[complex[i]][int(z)]['weight']
else:
degree_in += graph[int(z)][complex[i]]['weight']
else:
if (complex[i], int(z)) in graph.edges():
degree_out += graph[complex[i]][int(z)]['weight']
else:
degree_out += graph[int(z)][complex[i]]['weight']
for j in range(len(complex)):
if i < j:
if (complex[i], complex[j]) in graph.edges() or (
complex[j], complex[i]) in graph.edges():
E += 1
sum_degree_in += degree_in
sum_degree_out += degree_out
a = 0.8
modularity = (sum_degree_in - sum_degree_out) / (sum_degree_out + sum_degree_in)
density = (2 * E) / (len(complex) * (len(complex) - 1))
score = a*density+(1-a)*modularity
return score
#通过分数形成核心-附件的形式
def core_accessories(json_data,neighbordict,adj,graph):
resultDict = {}
for k, v in json_data.items():
complexjson = {}
resultList = []
core = [int(i) for i in v[0]["cover"]]
complex = core+neighbordict[k]
#求每个一阶邻点与core总的功能相似性
score_neighbordict = {}
for j in neighbordict[k]:
score = 0
for z in core:
if (int(z), int(j)) in graph.edges():
score += graph[int(z)][int(j)]['weight']
elif (int(j), int(z)) in graph.edges():
score += graph[int(j)][int(z)]['weight']
else:
score += 0
score_neighbordict[j] = score
score_neighbordict = sorted(score_neighbordict.items(), key=lambda item: item[1])
if len(complex) > 3:
core_score = fitness_function(adj, complex, core, graph)
for i in score_neighbordict:
# for i in neighbordict[k]:
if len(complex) > 3:
complex.remove(i[0])
complex_score = fitness_function(adj, complex, core, graph)
if complex_score >= core_score:
core_score = complex_score
else:
complex.append(i[0])
else:
break
elif len(complex) == 3:
core_score = fitness_function(adj, complex, core, graph)
else:
# continue
core_score = 0
# if len(core) > 1:
# core_score = fitness_function(adj,core)
# else:
# core_score = 0
# complex = core
# for i in neighbordict[k]:
# complex.append(i)
# complex_score = fitness_function(adj, complex)
# if complex_score >= core_score:
# core_score = complex_score
# else:
# complex.remove(i)
complexjson["cover"] = complex
complexjson["score"] = core_score
resultList.append(complexjson)
resultDict[k] = resultList
return resultDict
def savecomplex(parent_path,complexjson):
result_path = parent_path + r'\result\secondExperiments\train_COVID_resultComplex.json'
with open(result_path, 'w') as fw:
json.dump(complexjson, fw)
if __name__ == '__main__':
ROOT_DIR = os.path.dirname(os.path.abspath('__file__'))
parent_path = os.path.dirname(ROOT_DIR)
graph = form_graph(parent_path)
json_data,seed = readresultjson(parent_path, graph)
adj,neighbordict = getfirstneighbor(json_data,seed,graph)
complexjson = core_accessories(json_data,neighbordict,adj,graph)
print(complexjson.__len__())
savecomplex(parent_path,complexjson)
|
LittleBird120/DiseaseGenePredicition
|
DiseaseGenePredicition/20210315covering-clustering-algorithm - COVID/algorithm/formComplex.py
|
formComplex.py
|
py
| 6,315
|
python
|
en
|
code
| 0
|
github-code
|
6
|
1414123757
|
import pytest
import os
import shutil
import tngsdk.project.workspace as workspace
from tngsdk.project.workspace import Workspace
from tngsdk.project.project import Project
class TestProjectUnit:
# create and return a temporary workspace 'test-ws'
@pytest.fixture(scope='module')
def workspace(self):
# start clean without workspace
if os.path.isdir('test-ws'):
shutil.rmtree('test-ws')
args = workspace.parse_args_workspace([
'-w', 'test-ws',
'--debug'
])
workspace.init_workspace(args)
assert os.path.isdir('test-ws')
yield 'test-ws'
shutil.rmtree('test-ws')
# test descriptors of project 'example-project'
def test_example_project_descriptors(self, workspace):
ws = Workspace.load_workspace(workspace)
example_project = Project.load_project('example-project', workspace=ws)
example_project.status()
vnfds = example_project.get_vnfds()
assert vnfds == ['tango_vnfd0.yml']
nsds = example_project.get_nsds()
assert nsds == ['tango_nsd.yml']
|
sonata-nfv/tng-sdk-project
|
tests/test_project_unit.py
|
test_project_unit.py
|
py
| 1,124
|
python
|
en
|
code
| 5
|
github-code
|
6
|
41283122101
|
# -*- coding: utf-8 -*-
import math
import numpy as np
from scipy.interpolate import interp1d
class SpatialParameters:
def __init__(self, signal, fs, window_size, running_step):
self.time_steps, self.iacc, self.tiacc, self.wiacc = self.__get_xcorr_descriptors(
signal, fs, window_size, running_step)
def __get_xcorr_descriptors(self, signal, fs, window_size, running_step):
w_samples = round(window_size * fs)
rs_samples = round(running_step * fs)
time_steps = np.array([])
iacc = np.array([])
tiacc = np.array([])
wiacc = np.array([])
wiacc_d = np.array([])
wiacc_u = np.array([])
max_delay = 1 # (ms)
dT = math.floor(max_delay * 0.001 * fs)
t_axis_length = (2*dT) + 1
T_AXIS = np.linspace(-max_delay, max_delay, t_axis_length)
delta = t_axis_length//2
iD = 0
iU = w_samples
lim = math.ceil(t_axis_length/2)-1
while iU < len(signal):
c = 0
idx = 0
max_value = 0
ts = np.round(iD / fs, 2)
time_steps = np.append(time_steps,ts)
cll0 = np.correlate(signal[iD:iU,0], signal[iD:iU,0])
crr0 = np.correlate(signal[iD:iU,1], signal[iD:iU,1])
if((cll0 == 0) or (crr0 == 0)):
iacc = np.append(iacc, 0)
wiacc = np.append(wiacc, 0)
wiacc_d = np.append(wiacc_d, 0)
wiacc_u = np.append(wiacc_u, 0)
if((cll0 == 0) and (crr0 != 0)):
tiacc = np.append(tiacc, 1)
if((cll0 != 0) and (crr0 == 0)):
tiacc = np.append(tiacc, -1)
else:
tiacc = np.append(tiacc, 0)
scale = math.sqrt(cll0*crr0)
iacf = np.zeros([(2*delta)+1])
for tau in range(2*delta, -1, -1):
L = [0]
R = [0]
if(c < lim):
L = signal[iD+delta-c:iU, 0]
R = signal[iD:iU-delta+c, 1]
elif(c == lim):
L = signal[iD:iU, 0]
R = signal[iD:iU, 1]
else:
L = signal[iD:iU-c+delta, 0]
R = signal[iD+c-delta:iU, 1]
xcorr = np.correlate(L, R) / scale
#iacf[tau, step] = xcorr
c += 1
if(xcorr > max_value):
max_value = xcorr
idx = tau
iacc = np.append(iacc, max_value)
tiacc_window = T_AXIS[idx]
tiacc = np.append(tiacc, tiacc_window)
alpha = 0.9*max_value
idx_minus = np.linspace(idx, 0, idx+1)
idx_plus = np.linspace(idx, t_axis_length-1, t_axis_length-idx)
t_minus = -1;
t_plus = 1;
i_m = 0
i_p = 0
m_found = False
if(len(idx_minus) > 1):
for i in idx_minus:
if(iacf[int(i)] < alpha):
i_m = int(i)
if(idx-i > 0):
m_found = True
x_m = iacf[i_m:idx+1]
y_m = T_AXIS[i_m:idx+1]
t_f = interp1d(x_m, y_m, kind='linear')
t_minus = t_f(alpha)
break
p_found = False
if(len(idx_plus) > 1):
for i in idx_plus:
if(iacf[int(i)] < alpha):
i_p = int(i)
if(i-idx > 0):
p_found = True
x_p = iacf[idx:i_p+1]
y_p = T_AXIS[idx:i_p+1]
t_f = interp1d(x_p, y_p, kind='linear')
t_plus = t_f(alpha)
break
wiacc_d = np.append(wiacc_d, t_minus)
wiacc_u = np.append(wiacc_u, t_plus)
if(not(m_found)):
wiacc = np.append(wiacc, (t_plus - tiacc_window) * 2)
elif(not(p_found)):
wiacc = np.append(wiacc, (tiacc_window - t_minus) * 2)
else:
wiacc = np.append(wiacc, (t_plus - t_minus))
iD += rs_samples
iU += rs_samples
return time_steps, iacc, tiacc, wiacc
|
kgordillo-hub/SoundMonitor-MetricsCalculator
|
SpatialParameters.py
|
SpatialParameters.py
|
py
| 4,821
|
python
|
en
|
code
| 0
|
github-code
|
6
|
25273398870
|
import sys
sys.stdin = open('input.txt', 'r')
def check(L, i):
c = 0
cnt = 0
for j in range(N):
if L[i][j]:
cnt += 1
if not L[i][j] or j == N-1:
if cnt == K:
c += 1
cnt = 0
return c
for t in range(1, int(input())+1):
N, K = map(int, input().split())
box = [list(map(int, input().split())) for _ in range(N)]
box_rev = list(zip(*box))
total = 0
for i in range(N):
total += check(box, i)
total += check(box_rev, i)
print('#{} {}'.format(t, total))
|
powerticket/algorithm
|
Practice/실습/D10_문제풀이.py
|
D10_문제풀이.py
|
py
| 575
|
python
|
en
|
code
| 0
|
github-code
|
6
|
2416697564
|
import pygame
import sys
from random import randint
display = True
class snakeGame:
snake = [(16, 16),(16,15)]
apple = (18, 18)
is_dead = False
is_left = False
is_right = False
def move_left(self):
self.is_left = True
self.move_forward()
self.is_left = False
self.is_right = False
def move_right(self):
self.is_right = True
self.move_forward()
self.is_left = False
self.is_right = False
def move_forward(self):
vect = (self.snake[0][0] - self.snake[1][0], self.snake[0][1] - self.snake[1][1])
if self.is_right:
vect = (-vect[1], vect[0])
elif self.is_left:
vect = (vect[1], -vect[0])
dest_pos = (self.snake[0][0] + vect[0],self.snake[0][1]+ vect[1])
if not(0 <= dest_pos[0] < 32 and 0 <= dest_pos[1] < 32):
self.is_dead =True
elif dest_pos in self.snake:
self.is_dead = True
elif dest_pos == self.apple:
prev_tail = self.snake[-1]
for i in range(len(self.snake) - 1,0,-1):
self.snake[i] = self.snake[i-1]
self.snake[0] = dest_pos
self.snake.append(prev_tail)
#APPLE CREATION
self.apple = (randint(0,32), randint(0,32))
flag = True
while flag:
flag = False
for pos in self.snake:
if pos[0] == self.apple[0] and pos[1] == self.apple[1]:
self.apple = (randint(0,32), randint(0,32))
flag = True
else:
for i in range(len(self.snake) - 1,0,-1):
self.snake[i] = self.snake[i-1]
self.snake[0] = dest_pos
def game(self):
if display:
pygame.init()
DisplaySurface = pygame.display.set_mode((640, 640))
# Game Name Pong
pygame.display.set_caption("Pong")
self.is_dead = False
while not self.is_dead:
# User Events;
if display:
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
keys = pygame.key.get_pressed()
if keys[pygame.K_RIGHT]:
self.move_right()
elif keys[pygame.K_LEFT]:
self.move_left()
self.move_forward()
if display:
DisplaySurface.fill((0,0,0))
self.update(DisplaySurface)
(pygame.time.Clock()).tick(16)
pygame.quit()
sys.exit()
def update(self, surface):
for elem in self.snake:
pygame.draw.rect(surface, (255,255,255), (elem[0]*20,elem[1]*20,20,20))
pygame.draw.rect(surface, (255, 0, 0), (self.apple[0] * 20,self.apple[1] * 20, 20, 20))
snakeGame().game()
|
dogancanalgul/Pong
|
scratch.py
|
scratch.py
|
py
| 3,004
|
python
|
en
|
code
| 0
|
github-code
|
6
|
43364784094
|
from pathlib import Path
from argparse import ArgumentParser
# The path the project resides in
BASE_PATH = Path(__file__).parent.parent
# Alarm net dimensions
ALARM_HUGE = (5000, 2000, 500, 200)
ALARM_BIG = (1000, 500, 200, 75)
ALARM_SMALL = (100, 50, 25, 10)
# Standard arguments
def add_standard_arguments(parser: ArgumentParser):
"""
Add basic arguments useful to all experiments
:param parser: argument parser object
:return: argument parser with standard arguments added
"""
parser.add_argument(
"random_seed", type=int, help="Seed to fix randomness"
)
parser.add_argument(
"data_split", type=str, help="Data split on which to evaluate the performance (i.e. val or test)"
)
parser.add_argument(
"--p_contamination", default=0.0, type=float,
help="Fraction of contamination, i.e. anomalies in the training data"
)
parser.add_argument(
"--n_train_anomalies", default=0, type=int,
help="Number of known anomalies in the training set"
)
parser.add_argument(
"--plot_freq", default=0, type=int,
help="Plotting frequency to visualise the latent and image space"
)
parser.add_argument(
"--n_epochs", default=500, type=int,
help="Number of epochs"
)
parser.add_argument(
"--learning_rate", default=.0001, type=float,
help="Learning rate for adam"
)
parser.add_argument(
"--sample_stddev", default=None, type=float,
help="Standard deviation of the generated anomalies (in the code layer)"
)
parser.add_argument(
"--model_path", default=BASE_PATH / "models", type=Path, help="Base output path for the models"
)
parser.add_argument(
"--result_path", default=None, type=Path,
help="Base output path for the results, if None use the model path"
# default = BASE_PATH / "results"
)
parser.add_argument(
"--is_override", default=False, type=bool,
help="Override existing models"
)
return parser
|
Fraunhofer-AISEC/DA3D
|
libs/constants.py
|
constants.py
|
py
| 2,063
|
python
|
en
|
code
| 1
|
github-code
|
6
|
30896806208
|
import cv2
# loading the images
img1 = cv2.imread("png//yy.jpg")
img2 = cv2.imread("png//ra.jpg")
# resizing the both images in same resolution
scale_percent = 60 # percent of original size
width = int(img1.shape[1] * scale_percent / 90)
height = int(img2.shape[0] * scale_percent / 90)
dim = (width, height)
# resize image
reimg1 = cv2.resize(img1,dsize= dim, interpolation = cv2.INTER_AREA)
reimg2 = cv2.resize(img2, dsize=dim, interpolation = cv2.INTER_AREA)
#including face detection
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
gray1 = cv2.cvtColor(reimg1,cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(reimg2,cv2.COLOR_BGR2GRAY)
face1 = face_cascade.detectMultiScale(gray1,scaleFactor = 1.05, minNeighbors=5)
face2 = face_cascade.detectMultiScale(gray2,scaleFactor = 1.05 ,minNeighbors=5)
# putting the rectangle on the faces
for x,y ,w,h in face1:
reimg1 = cv2.rectangle(reimg1, (x, y), (x+w, y+h),(255, 0, 0), 3)
for x,y ,w,h in face2:
reimg2 = cv2.rectangle(reimg2, (x, y), (x+w, y+h), (0, 0, 250), 3)
# difference img of the images
diff = cv2.subtract(reimg1, reimg2)
cv2.imshow("diff",diff)
if cv2.waitKey(0) == ord('q'):
cv2.destroyAllWindows()
print("Difference : ",diff)
# comparing the two images for exactly same of not
b, g, r = cv2.split(diff)
if cv2.countNonZero(b)==0 and cv2.countNonZero(g) == 0 and cv2.countNonZero(r) == 0:
print("Both Images Are Identical !")
else:
print("Images Are Not Identical")
# finding the similarities of two images
sift = cv2.xfeatures2d.SIFT_create()
kp1, desc1 = sift.detectAndCompute(reimg2, None) # key-points corresponds to the position
kp2, desc2 = sift.detectAndCompute(reimg1, None)
index_params = dict(algorithm=0, trees=5)
search_params = dict()
# Fast Library for Approximate Nearest Neighbors
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(desc1, desc2, k=2) # to get the best matches
good_points = [] # correct matches
ratio = 0.6
for m, n in matches:
if m.distance < ratio*n.distance:
good_points.append(m)
# showing the both images (compare mode)
result = cv2.drawMatches(reimg1, kp1, reimg2, kp2, good_points, None)
cv2.imshow("result", result)
cv2.imshow("img1", reimg1)
cv2.imshow("img2", reimg2)
cv2.waitKey(0)
cv2.destroyAllWindows()
acc = len(good_points)*100/len(matches)
print("Good Points : {}".format(len(good_points)))
print("Total Matches : {}".format(len(matches)))
print("Accuracy : {}".format(acc))
if acc > 0:
print("Both Are The same person")
else:
print("Different Persons")
|
singhsaurabh1998/OpenCv
|
SimilarImg.py
|
SimilarImg.py
|
py
| 2,656
|
python
|
en
|
code
| 0
|
github-code
|
6
|
37693718022
|
from enum import Enum, auto
from typing import Any, Tuple
from mesa import Model
from mesa.datacollection import DataCollector
from mesa.space import ContinuousSpace
from mesa.time import SimultaneousActivation
from autonomous_intersection.agents.direction import Direction
from autonomous_intersection.agents.visualcell import VisualCell
from autonomous_intersection.constants import PIXEL_PER_METER, STEPS_PER_SECOND
from autonomous_intersection.managers.advanced_reservation_manager import AdvancedReservationBasedManager
from autonomous_intersection.managers.prediction_manager import PredictionBasedManager
from autonomous_intersection.managers.reservation_manager import ReservationBasedManager
from autonomous_intersection.managers.traffic_light_manager import TrafficLightManager
from autonomous_intersection.rect import Rect
class Manager(Enum):
TrafficLight = auto()
BasicReservation = auto()
AdvancedReservation = auto()
Prediction = auto()
class Intersection(Model):
def __init__(self, height=1000, width=1000, spawn_rate=10, manager: str = Manager.TrafficLight.name, *args: Any,
**parameters: Any):
super().__init__(*args, **parameters)
self.schedule = SimultaneousActivation(self)
self.space = ContinuousSpace(height, width, False)
self.width = width
self.height = height
self.road_width = 7 * PIXEL_PER_METER
self.manager = self.get_manager(manager)(self.width, self.height, self.road_width, parameters, self)
self.build_background()
self.agent_id = 0
self.running = True
self.spawn_rate = spawn_rate / 100
self.car_height = int(1.5 * PIXEL_PER_METER)
self.data_collector = DataCollector(model_reporters={"Throughput [cars / min]": self.get_agent_rate})
@staticmethod
def get_manager(manager):
if manager == Manager.TrafficLight.name: return TrafficLightManager
if manager == Manager.BasicReservation.name: return ReservationBasedManager
if manager == Manager.AdvancedReservation.name: return AdvancedReservationBasedManager
return PredictionBasedManager
def get_agent_id(self):
self.agent_id += 1
return self.agent_id
def build_background(self):
for cell in self.manager.build_background():
self.space.place_agent(cell, (cell.x, cell.y))
self.schedule.add(cell)
def spawn_car(self, entry, width, height):
cell = self.manager.create_new_car(entry, (width, height), self.get_agent_id())
self.space.place_agent(cell, (cell.x, cell.y))
self.schedule.add(cell)
def add_new_agents(self):
for entry in Direction:
if not self.manager.is_entry_occupied(entry) and self.random.random() < self.spawn_rate:
self.spawn_car(entry, *self.random_car_size(self.car_height))
def random_car_size(self, height) -> Tuple[int, int]:
return self.random.randint(round(height * 1.3), height * 2), height
def step(self):
self.add_new_agents()
self.manager.remove_cars(self.space, self.schedule)
self.manager.control_cars()
self.schedule.step()
self.data_collector.collect(self)
def draw_debug_object(self, rect: Rect, color: str) -> VisualCell:
cell = VisualCell((rect.left, rect.top), (rect.width, rect.height), self, color, 2)
self.space.place_agent(cell, (cell.x, cell.y))
self.schedule.add(cell)
return cell
def delete_debug_object(self, cell: VisualCell) -> None:
self.space.remove_agent(cell)
self.schedule.remove(cell)
def get_agent_rate(self):
if self.manager.first_step is None: return 0
steps = self.manager.steps - self.manager.first_step + 1
if steps < 50: return 0
return ((STEPS_PER_SECOND * 60) * self.manager.agent_count) // steps
|
GrzegorzNieuzyla/Autonomous-Intersection
|
autonomous_intersection/model.py
|
model.py
|
py
| 3,906
|
python
|
en
|
code
| 0
|
github-code
|
6
|
39363863586
|
import networkx as nx
from node import Node
class Graph(nx.Graph):
def __init__(self):
super().__init__()
self.arcs=[]
self.nao_servido = 0
def increment_nao_servido(self):
self.nao_servido += 1
def decrement_nao_serveido(self):
self.nao_servido-=1
def create_node(self, id: str) -> Node:
node = Node(id)
return node
def create_nodes(self, matrix):
listaN = []
dict_ap = {}
for i in range(1, len(matrix[0])):
node_name = matrix[0][i]
node = self.create_node(node_name)
listaN.append(node)
dict_ap[node_name] = node
self.add_nodes_from(listaN)
multiple = set()
i = 0
for i in range(1, len(matrix)):
for j in range(1, len(matrix[i])):
if matrix[i][j] == '1':
if(not (matrix[0][j], matrix[i][0]) in multiple):
dict_ap[matrix[0][j]].add_neighbor(
dict_ap[matrix[i][0]])
dict_ap[matrix[i][0]].add_neighbor(
dict_ap[matrix[0][j]])
self.add_edge(dict_ap[matrix[0][j]],
dict_ap[matrix[i][0]])
multiple.add((matrix[0][j], matrix[i][0]))
multiple.add((matrix[i][0], matrix[0][j]))
def create_arcs(self):
for i in self.nodes:
for j in i.vizinhos:
if ((i.id, j.id) not in self.arcs) and ((j.id, i.id) not in self.arcs):
self.arcs.append((i, j))
|
marcoscezar1/Simulador
|
Simulação I/graph.py
|
graph.py
|
py
| 1,642
|
python
|
en
|
code
| 0
|
github-code
|
6
|
12755233630
|
"""
This file contains the 16-state EKF for INS/GNSS integration in direct configuration
The following states are estimated in three dimensions
position, velocity, orientation, accel bias, gyro bias
References
https://github.com/NorthStarUAS/insgnss_tools/blob/main/insgnss_tools/Kinematics.py
https://github.com/PX4/PX4-ECL/tree/master/EKF/python/ekf_derivation
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3812568/
"""
import math
import numpy as np
from ins_data_utilities.quaternion_utils import *
# Model Constants
def create_skew(w):
"""
Create skew symmetric matrix from vector
:param w:
:return:
"""
return np.array([
[0.0, -w[2], w[1]],
[w[2], 0.0, -w[0]],
[-w[1], w[0], 0.0]])
class NavEKF16:
"""
16-state EKF for INS/GNSS integration in direct configuration
"""
gravity_const = 9.81 # Acceleration due to gravity
_I2 = np.identity(2)
_I3 = np.identity(3)
_I5 = np.identity(5)
_I16 = np.identity(16)
def __init__(self):
# Sensor variances (as standard deviation) and models (tau)
self.aNoiseSigma_mps2 = 0.5 # Std dev of Accelerometer Wide Band Noise (m/s^2)
self.aMarkovSigma_mps2 = 0.1 # Std dev of Accelerometer Markov Bias
self.tau_acc_bias = 300.0 # Correlation time or time constant
self.wNoiseSigma_rps = math.radians(0.1) # Std dev of rotation rate output noise (rad/s)
self.wMarkovSigma_rps = math.radians(0.015) # Std dev of correlated rotation rate bias (rad/s)
# self.tau_angular_acc_bias = 1000.0 # Correlation time or time constant
# TODO: Figure out hhy does it not work when using real GNSS noise is decreased to correct values 2.5 cm?
# Seems to velocity is updated wrongly
self.rNoiseSigma_NE_m = 3 # GPS measurement noise std dev (m)
self.rNoiseSigma_U_m = 1 # GPS measurement noise std dev (m)
self.vNoiseSigma_NE_mps = 0.5 # GPS measurement noise std dev (m/s)
self.vNoiseSigma_D_mps = 1.0 # GPS measurement noise std dev (m/s)
# Initial set of covariance
self.pErrSigma_Init_m = 10.0 # Std dev of initial position error (m)
self.vErrSigma_Init_mps = 1.0 # Std dev of initial velocity error (m/s)
self.attErrSigma_Init_rad = math.radians(20) # Std dev of initial attitude (phi and theta) error (rad)
self.hdgErrSigma_Init_rad = math.radians(90) # Std dev of initial Heading (psi) error (rad)
self.aBiasSigma_Init_mps2 = 0.1 * self.gravity_const # Std dev of initial acceleration bias (m/s^2)
self.wBiasSigma_Init_rps = math.radians(1) # Std dev of initial rotation rate bias (rad/s)
# Kalman Matrices
self.H = np.zeros((6, 16)) # Observation matrix
self.R = np.zeros((6, 6)) # Covariance of the Observation Noise (associated with MeasUpdate())
self.Rw = np.zeros((6, 6)) # Covariance of the Sensor Noise (associated with TimeUpdate())
self.S = np.zeros((6, 6)) # Innovation covariance
self.P = np.zeros((16, 16)) # Covariance estimate
self.K = np.zeros((16, 6)) # Kalman gain
self.Q = np.zeros((16, 16)) # Process noise covariance
self.Fx = self._I16 # State transition matrix
# State estimates
self.est_pos = np.zeros(3) # Estimated position in ENU
self.est_vel = np.zeros(3) # Estimated velocity in ENU
self.est_ori = np.zeros(4) # Quaternion of B wrt Local
self.est_acc_bias = np.zeros(3) # acceleration bias
self.est_rate_of_rot_bias = np.zeros(3) # rotation rate bias
# Estimates from previous data
self.est_acc = np.zeros(3) # Estimated acceleration in Body
self.est_rate_rotation = np.zeros(3) # Bias free rate of rotation
self._configure()
def _configure(self):
# Observation matrix (H)
self.H[0:5, 0:5] = self._I5
# Covariance of the Process Noise (associated with TimeUpdate())
self.Rw[0:3, 0:3] = self.aNoiseSigma_mps2 ** 2 * self._I3
self.Rw[3:6, 3:6] = self.wNoiseSigma_rps ** 2 * self._I3
# self.Rw[6:9, 6:9] = 2 / self.tau_acc_bias * self.aMarkovSigma_mps2 ** 2 * self._I3 # TODO: Check this
# self.Rw[9:12, 9:12] = 2 / self.tau_angular_acc_bias * self.wMarkovSigma_rps ** 2 * self._I3
# Covariance of the Observation Noise (associated with MeasUpdate())
self.R[0:2, 0:2] = self.rNoiseSigma_NE_m ** 2 * self._I2
self.R[2, 2] = self.rNoiseSigma_U_m ** 2
self.R[3:5, 3:5] = self.vNoiseSigma_NE_mps ** 2 * self._I2
self.R[5, 5] = self.vNoiseSigma_D_mps ** 2
# Initial Innovation Covariance Estimate (S)
# Zeros
# Initial Covariance Estimate (P)
self.P[0:3, 0:3] = self.pErrSigma_Init_m ** 2 * self._I3
self.P[3:6, 3:6] = self.vErrSigma_Init_mps ** 2 * self._I3
self.P[6:8, 6:8] = self.attErrSigma_Init_rad ** 2 * self._I2
self.P[8, 8] = self.hdgErrSigma_Init_rad ** 2
self.P[9:12, 9:12] = self.aBiasSigma_Init_mps2 ** 2 * self._I3
self.P[12:15, 12:15] = self.wBiasSigma_Init_rps ** 2 * self._I3
def get_euler_angles(self):
# Euler angles from quaternion
return quaternion_to_euler(self.est_ori)
def initialize(self, r0, v0, s0):
# Initialize Position and Velocity
self.est_pos = r0 # Position in ENU
self.est_vel = v0 # Velocity in ENU
# Euler to quaternion
self.est_ori = euler_to_quaternion(s0)
def create_state_transition_matrix(self, dt) -> np.ndarray:
q0, q1, q2, q3 = self.est_ori
dvx, dvy, dvz = self.est_acc # delta velocity
dax, day, daz = self.est_rate_rotation # delta angle
dvx_b, dvy_b, dvz_b = self.est_acc_bias # Accelerometer bias
dax_b, day_b, daz_b = self.est_rate_of_rot_bias
g = self.gravity_const
# tau_dvb = self.tau_acc_bias
# tau_dab = self.tau_angular_acc_bias
return np.array(
[[1, 0, 0, dt, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, dt, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, dt, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, dt * (2 * q2 * (dvz - dvz_b - g) - 2 * q3 * (dvy - dvy_b)),
dt * (2 * q2 * (dvy - dvy_b) + 2 * q3 * (dvz - dvz_b - g)),
dt * (2 * q0 * (dvz - dvz_b - g) + 2 * q1 * (dvy - dvy_b) - 4 * q2 * (dvx - dvx_b)),
dt * (-2 * q0 * (dvy - dvy_b) + 2 * q1 * (dvz - dvz_b - g) - 4 * q3 * (dvx - dvx_b)), 0, 0, 0,
dt * (2 * q2 ** 2 + 2 * q3 ** 2 - 1), dt * (2 * q0 * q3 - 2 * q1 * q2),
dt * (-2 * q0 * q2 - 2 * q1 * q3)],
[0, 0, 0, 0, 1, 0, dt * (-2 * q1 * (dvz - dvz_b - g) + 2 * q3 * (dvx - dvx_b)),
dt * (-2 * q0 * (dvz - dvz_b - g) - 4 * q1 * (dvy - dvy_b) + 2 * q2 * (dvx - dvx_b)),
dt * (2 * q1 * (dvx - dvx_b) + 2 * q3 * (dvz - dvz_b - g)),
dt * (2 * q0 * (dvx - dvx_b) + 2 * q2 * (dvz - dvz_b - g) - 4 * q3 * (dvy - dvy_b)), 0, 0, 0,
dt * (-2 * q0 * q3 - 2 * q1 * q2), dt * (2 * q1 ** 2 + 2 * q3 ** 2 - 1),
dt * (2 * q0 * q1 - 2 * q2 * q3)],
[0, 0, 0, 0, 0, 1, dt * (2 * q1 * (dvy - dvy_b) - 2 * q2 * (dvx - dvx_b)),
dt * (2 * q0 * (dvy - dvy_b) - 4 * q1 * (dvz - dvz_b - g) + 2 * q3 * (dvx - dvx_b)),
dt * (-2 * q0 * (dvx - dvx_b) - 4 * q2 * (dvz - dvz_b - g) + 2 * q3 * (dvy - dvy_b)),
dt * (2 * q1 * (dvx - dvx_b) + 2 * q2 * (dvy - dvy_b)), 0, 0, 0, dt * (2 * q0 * q2 - 2 * q1 * q3),
dt * (-2 * q0 * q1 - 2 * q2 * q3), dt * (2 * q1 ** 2 + 2 * q2 ** 2 - 1)],
[0, 0, 0, 0, 0, 0, 1, -0.5 * dax + 0.5 * dax_b, -0.5 * day + 0.5 * day_b, -0.5 * daz + 0.5 * daz_b,
0.5 * q1, 0.5 * q2, 0.5 * q3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0.5 * dax - 0.5 * dax_b, 1, 0.5 * daz - 0.5 * daz_b, -0.5 * day + 0.5 * day_b,
-0.5 * q0, 0.5 * q3, -0.5 * q2, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0.5 * day - 0.5 * day_b, -0.5 * daz + 0.5 * daz_b, 1, 0.5 * dax - 0.5 * dax_b,
-0.5 * q3, -0.5 * q0, 0.5 * q1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0.5 * daz - 0.5 * daz_b, 0.5 * day - 0.5 * day_b, -0.5 * dax + 0.5 * dax_b, 1, 0.5 * q2,
-0.5 * q1, -0.5 * q0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]]
)
def create_process_update_matrix(self, dt):
q0, q1, q2, q3 = self.est_ori
return np.array(
[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0],
[0, 0, 0, dt * (-2 * q2 ** 2 - 2 * q3 ** 2 + 1), dt * (-2 * q0 * q3 + 2 * q1 * q2),
dt * (2 * q0 * q2 + 2 * q1 * q3)],
[0, 0, 0, dt * (2 * q0 * q3 + 2 * q1 * q2), dt * (-2 * q1 ** 2 - 2 * q3 ** 2 + 1),
dt * (-2 * q0 * q1 + 2 * q2 * q3)],
[0, 0, 0, dt * (-2 * q0 * q2 + 2 * q1 * q3), dt * (2 * q0 * q1 + 2 * q2 * q3),
dt * (-2 * q1 ** 2 - 2 * q2 ** 2 + 1)], [-0.5 * q1, -0.5 * q2, -0.5 * q3, 0, 0, 0],
[0.5 * q0, -0.5 * q3, 0.5 * q2, 0, 0, 0], [0.5 * q3, 0.5 * q0, -0.5 * q1, 0, 0, 0],
[-0.5 * q2, 0.5 * q1, 0.5 * q0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
)
def predict(self, measured_angular_vel, measured_acc, dt):
# A-priori accel and rotation rate estimate
self.est_acc = measured_acc - self.est_acc_bias
self.est_rate_rotation = measured_angular_vel - self.est_rate_of_rot_bias
# Kalman Time Update (Prediction)
# Compute DCM (Body to/from ENU) Transformations from Quaternion
local_to_body_trans = quaternion_to_dcm(self.est_ori)
body_to_local_trans = local_to_body_trans.T
# Attitude Update
attitude_delta = np.hstack((1, 0.5 * dt * self.est_rate_rotation))
self.est_ori = quaternion_mult(self.est_ori, attitude_delta)
# Avoid quaternion flips sign
if self.est_ori[0] < 0:
self.est_ori = -self.est_ori
# Velocity Update
gravity_vector = np.array([0.0, 0.0, -self.gravity_const])
self.est_vel += (body_to_local_trans @ self.est_acc + gravity_vector) * dt
# Position Update
self.est_pos += (dt * self.est_vel)
self.Fx = self.create_state_transition_matrix(dt)
# Process Noise Covariance (Discrete approximation)
Fu = self.create_process_update_matrix(dt)
# Discrete Process Noise
self.Q = Fu @ self.Rw @ Fu.T
self.Q = 0.5 * (self.Q + self.Q.T)
# Covariance Time Update
self.P = self.Fx @ self.P @ self.Fx.T + self.Q
self.P = 0.5 * (self.P + self.P.T)
def correct(self, measured_position, measured_velocity, measured_angular_vel, measured_acc):
# Kalman Measurement Update
# Position Error
pos_err = measured_position - self.est_pos
# Velocity Error
vel_err = measured_velocity - self.est_vel
# Create measurement Y, as Error between Measures and Outputs
y = np.zeros(6)
y[0:3] = pos_err
y[3:6] = vel_err
# Innovation covariance
self.S = self.H @ self.P @ self.H.T + self.R
# Kalman gain
self.K = self.P @ self.H.T @ np.linalg.inv(self.S)
# Covariance update
I_KH = self._I16 - self.K @ self.H
self.P = I_KH @ self.P @ I_KH.T + self.K @ self.R @ self.K.T
# State update, x = K * y
x = self.K @ y
# Pull apart x terms to update the Position, velocity, orientation, and sensor biases
# Position update
pos_delta = x[0:3] # Position Deltas in ENU
self.est_pos += pos_delta
# Velocity update
vel_delta = x[3:6] # Velocity Deltas in ENU
self.est_vel += vel_delta
# Attitude correction
quat_delta = x[6:10] # Quaternion Delta
attitude_delta = np.array([quat_delta[0], quat_delta[1], quat_delta[2], quat_delta[3]])
self.est_ori = quaternion_mult(self.est_ori, attitude_delta)
# Update biases from states
acc_bias_delta = x[10:13] # Accel Bias Deltas
self.est_acc_bias += acc_bias_delta
w_bias_delta = x[13:16] # Rotation Rate Bias Deltas
self.est_rate_of_rot_bias += w_bias_delta
# Post-priori accel and rotation rate estimate, biases updated in MeasUpdate()
self.est_acc = measured_acc - self.est_acc_bias # only for users convenience
self.est_rate_rotation = measured_angular_vel - self.est_rate_of_rot_bias # only for users convenience
|
Birkehoj/gazebo_ins_analysis
|
ins_samples/ins_ekf_16_states.py
|
ins_ekf_16_states.py
|
py
| 13,019
|
python
|
en
|
code
| 0
|
github-code
|
6
|
42253873273
|
from tkinter import IntVar
from tkinter.constants import N, NE, S, TRUE, W
from controllers.edocsController import *
from helpers import helper
import json
class view:
def __init__(self, program):
self.program = program
self.frame_bgc = "#f5f5f5"
self.controller = edocsController()
def index(self):
try:
self.program.header("Configuracion de cuenta EDOC's")
general_info_frame = self.program.newFrame(Row = 0 , Col = 0, Pos='NW', Margin = 5, Bg=self.frame_bgc)
self.program.newLabel( LabelName = "API URL:", Row = 0, Col = 0, Bg=self.frame_bgc , into_frame = general_info_frame)
self.program.newEntry( Row = 0, Col = 1, Pos = 'W', Long = 80 , Data = self.controller.val1 , into_frame = general_info_frame)
self.program.newLabel( LabelName = "Usuario:", Row = 1, Col = 0, Bg=self.frame_bgc , into_frame = general_info_frame)
self.program.newEntry( Row = 1, Col = 1, Pos = 'W', Long = 20 , Data = self.controller.val2 , into_frame = general_info_frame)
self.program.newLabel( LabelName = "Contraseña:", Row = 2, Col = 0, Bg=self.frame_bgc , into_frame = general_info_frame)
self.program.newEntry( Row = 2, Col = 1, Pos = 'W', Long = 20 , Data = self.controller.val3 , into_frame = general_info_frame , mask = True)
self.program.newLabel( LabelName = "Token:", Row = 3, Col = 0, Bg=self.frame_bgc , into_frame = general_info_frame)
self.program.newEntry( Row = 3, Col = 1, Pos = 'W', Long = 50 , Data = self.controller.val4 , into_frame = general_info_frame , mask = True)
self.program.newButton( Name = "Conectar", Row = 4 , Col = 0, Pos = 'W', Command= lambda : self.controller.save() , into_frame = general_info_frame )
except Exception as e :
messagebox.showerror(message=str(e) , title="Error")
|
REYJDR/COLOMETRIC_ANALISYS
|
views/Settings/Edocs.py
|
Edocs.py
|
py
| 1,958
|
python
|
en
|
code
| 0
|
github-code
|
6
|
5095318867
|
import random
import torchvision
from torchvision import datasets, transforms
from data.RPSLS.types import *
transformations = transforms.Compose([
transforms.ToTensor(),
])
dataset = datasets.ImageFolder(
root='../../../data/RPSLS/rock-paper-scissors-lizard-spock',
transform = transformations
)
def generate_data(dataset):
examples = list()
i = list(range(len(dataset)))
random.shuffle(i)
i = iter(i)
while True:
try:
examples.append(next_example(i,dataset))
except StopIteration:
break
def next_example(i,dataset):
x1, x2 = next(i), next(i)
y = winner(x1, x2,dataset)
return x1, x2, y
generate_data(dataset)
|
AytugAltin/ProblogRPSLS
|
examples/RPSLS/Rock-Paper-Scissors/generate_data.py
|
generate_data.py
|
py
| 698
|
python
|
en
|
code
| 1
|
github-code
|
6
|
34511399562
|
from django import forms
from .models import Producto
from django.forms import ModelForm
class ProductosForm(forms.ModelForm):
class Meta:
model = Producto
fields = ('nombre','material','cantidad','categoria')
labels = {
'nombre':'Nombre',
'cantidad':'n. cantidad'
}
def __init__(self, *args, **kwargs):
super(ProductosForm,self).__init__(*args, **kwargs)
self.fields['categoria'].empty_label = "Select"
self.fields['cantidad'].required = False
|
SteveManfred/eco_facil_mio
|
test/electivo_2023/productos/forms.py
|
forms.py
|
py
| 543
|
python
|
es
|
code
| 0
|
github-code
|
6
|
28051281167
|
"""
Tests for the petl.fluent module.
"""
from __future__ import absolute_import, print_function, division
from tempfile import NamedTemporaryFile
import csv
from nose.tools import eq_
import petl
import petl.interactive as etl
from petl.testutils import ieq
def test_basics():
t1 = (('foo', 'bar'),
('A', 1),
('B', 2))
w1 = etl.wrap(t1)
eq_(('foo', 'bar'), w1.header())
eq_(petl.header(w1), w1.header())
ieq((('A', 1), ('B', 2)), w1.data())
ieq(petl.data(w1), w1.data())
w2 = w1.cut('bar', 'foo')
expect2 = (('bar', 'foo'),
(1, 'A'),
(2, 'B'))
ieq(expect2, w2)
ieq(petl.cut(w1, 'bar', 'foo'), w2)
w3 = w1.cut('bar', 'foo').cut('foo', 'bar')
ieq(t1, w3)
def test_staticmethods():
f = NamedTemporaryFile(delete=False)
writer = csv.writer(f, delimiter='\t')
table = (('foo', 'bar'),
('a', 1),
('b', 2),
('c', 2))
for row in table:
writer.writerow(row)
f.close()
actual = etl.fromcsv(f.name, delimiter='\t')
expect = (('foo', 'bar'),
('a', '1'),
('b', '2'),
('c', '2'))
ieq(expect, actual)
ieq(expect, actual) # verify can iterate twice
def test_container():
table = (('foo', 'bar'),
('a', 1),
('b', 2),
('c', 2))
actual = etl.wrap(table)[0]
expect = ('foo', 'bar')
eq_(expect, actual)
actual = len(etl.wrap(table))
expect = 4
eq_(expect, actual)
def test_repr_html():
table = (('foo', 'bar'),
('a', 1),
('b', 2),
('c', 2))
expect = u"""<table class='petl'>
<thead>
<tr>
<th>foo</th>
<th>bar</th>
</tr>
</thead>
<tbody>
<tr>
<td>a</td>
<td style='text-align: right'>1</td>
</tr>
<tr>
<td>b</td>
<td style='text-align: right'>2</td>
</tr>
<tr>
<td>c</td>
<td style='text-align: right'>2</td>
</tr>
</tbody>
</table>
"""
actual = etl.wrap(table)._repr_html_()
for l1, l2 in zip(expect.split('\n'), actual.split('\r\n')):
eq_(l1, l2)
def test_repr_html_limit():
table = (('foo', 'bar'),
('a', 1),
('b', 2),
('c', 2))
# lower repr limit
etl.repr_html_limit = 2
expect = u"""<table class='petl'>
<thead>
<tr>
<th>foo</th>
<th>bar</th>
</tr>
</thead>
<tbody>
<tr>
<td>a</td>
<td style='text-align: right'>1</td>
</tr>
<tr>
<td>b</td>
<td style='text-align: right'>2</td>
</tr>
</tbody>
</table>
<p><strong>...</strong></p>
"""
actual = etl.wrap(table)._repr_html_()
for l1, l2 in zip(expect.split('\n'), actual.split('\r\n')):
eq_(l1, l2)
|
podpearson/petl
|
src/petl/test/test_interactive.py
|
test_interactive.py
|
py
| 2,716
|
python
|
en
|
code
| null |
github-code
|
6
|
33585138405
|
__author__ = 'Vivek'
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
# @param A : head node of linked list
# @return the head node in the linked list
def mergeTwoLists(self, A, B):
curA, curB = A, B
if A == None :
return B
elif B == None :
return A
if curA.val <= curB.val :
C = curA
curA = curA.next
else :
C = curB
curB = curB.next
curC = C
while curA != None and curB != None :
if curA.val <= curB.val :
curC.next = curA
curA = curA.next
curC = curC.next
else :
curC.next = curB
curB = curB.next
curC = curC.next
while curA != None :
curC.next = curA
curA = curA.next
curC = curC.next
while curB != None :
curC.next = curB
curB = curB.next
curC = curC.next
return C
def split(self, start) :
if start == None or start.next == None :
first = start
second = None
else :
slow = start
fast = start
while fast != None and fast.next != None :
slow = slow.next
fast = fast.next.next
first = start
second = slow.next
slow.next = None
return first, second
def sortList(self, A):
if A == None or A.next == None :
return A
firstHalf = None
secondHalf = None
firstHalf, secondHalf = self.split(A)
self.sortList(firstHalf)
self.sortList(secondHalf)
#self.mergeTwoLists(firstHalf, secondHalf)
return self.mergeTwoLists(firstHalf, secondHalf)
|
viveksyngh/InterviewBit
|
Linked List/SORTLIST.py
|
SORTLIST.py
|
py
| 1,904
|
python
|
en
|
code
| 3
|
github-code
|
6
|
37740591228
|
from fastapi import Depends, FastAPI, Header, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from app.auth import main as auths
from app.users import main as users
from app.blogs import main as blogs, sub as blogs_sub
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(auths.app)
app.include_router(users.app)
app.include_router(blogs.app)
app.include_router(blogs_sub.app)
|
tokusumi/fastapi-nuxt-blog
|
backend/app/app/main.py
|
main.py
|
py
| 524
|
python
|
en
|
code
| 6
|
github-code
|
6
|
17813388652
|
from typing import Callable, Any, Type
from lyrid import Address
from lyrid.base import ActorSystemBase
from lyrid.core.node import NodeSpawnProcessMessage
from lyrid.core.process import Process
from lyrid.core.system import Placement
from tests.factory.system import create_actor_system
from tests.mock.messenger import MessengerMock
from tests.mock.placement_policy import PlacementPolicyMatcherMock, PlacementPolicyMock
from tests.mock.randomizer import RandomizerMock
def assert_pass_process_type_to_policy_matcher(spawn_process: Callable[[ActorSystemBase], Any], type_: Type[Process]):
matcher = PlacementPolicyMatcherMock()
system = create_actor_system(placements=[Placement(match=matcher, policy=PlacementPolicyMock())],
node_addresses=[Address("#node0"), Address("#node1"), Address("#node2")])
spawn_process(system)
assert matcher.match__type == type_
def assert_send_node_spawn_process_message_to_the_address_from_policy(spawn_process: Callable[[ActorSystemBase], Any]):
messenger = MessengerMock()
policy = PlacementPolicyMock(get_placement_node__return=Address("#node1"))
system = create_actor_system(messenger=messenger,
placements=[Placement(PlacementPolicyMatcherMock(match__return=True), policy)],
node_addresses=[Address("#node0"), Address("#node1"), Address("#node2")])
spawn_process(system)
assert messenger.send__receiver == Address("#node1") and \
isinstance(messenger.send__message, NodeSpawnProcessMessage)
def assert_use_node_address_from_first_matched_policy(spawn_process: Callable[[ActorSystemBase], Any]):
messenger = MessengerMock()
placements = [
Placement(
match=PlacementPolicyMatcherMock(match__return=False),
policy=PlacementPolicyMock(get_placement_node__return=Address("#node0")),
),
Placement(
match=PlacementPolicyMatcherMock(match__return=True),
policy=PlacementPolicyMock(get_placement_node__return=Address("#node1")),
),
Placement(
match=PlacementPolicyMatcherMock(match__return=True),
policy=PlacementPolicyMock(get_placement_node__return=Address("#node2")),
),
]
# noinspection DuplicatedCode
system = create_actor_system(messenger=messenger, placements=placements,
node_addresses=[Address("#node0"), Address("#node1"), Address("#node2")])
spawn_process(system)
assert messenger.send__receiver == Address("#node1") and \
isinstance(messenger.send__message, NodeSpawnProcessMessage)
def assert_use_random_node_when_no_matched_policy(spawn_process: Callable[[ActorSystemBase], Any]):
messenger = MessengerMock()
placements = [
Placement(
match=PlacementPolicyMatcherMock(match__return=False),
policy=PlacementPolicyMock(get_placement_node__return=Address("#node0")),
),
Placement(
match=PlacementPolicyMatcherMock(match__return=False),
policy=PlacementPolicyMock(get_placement_node__return=Address("#node1")),
),
]
randomizer = RandomizerMock(randrange__return=2)
system = create_actor_system(messenger=messenger, placements=placements, randomizer=randomizer,
node_addresses=[Address("#node0"), Address("#node1"), Address("#node2")])
spawn_process(system)
assert messenger.send__receiver == Address("#node2") and \
isinstance(messenger.send__message, NodeSpawnProcessMessage)
|
SSripilaipong/lyrid
|
tests/system/actor_placement/_assertion.py
|
_assertion.py
|
py
| 3,625
|
python
|
en
|
code
| 12
|
github-code
|
6
|
36511035270
|
def read_file(fileName):
content = []
with open(fileName, 'r', encoding='utf-8-sig') as f:
for line in f:
content.append(line.strip())
return content
def convert(textContent):
output = []
person = None
for line in textContent:
if line == 'Allen':
person = 'Allen'
continue
elif line == 'Tom':
person = 'Tom'
continue
if person:
output.append(person + ": " + line)
return output
def write_file(fileName, lines):
with open(fileName, 'w') as f:
for line in lines:
f.write(line + '\n')
def main():
lines = read_file('input.txt')
lines = convert(lines)
write_file('output.txt', lines)
main()
|
taylorchen78/chat
|
chat.py
|
chat.py
|
py
| 634
|
python
|
en
|
code
| 0
|
github-code
|
6
|
21832705996
|
from .piece import Piece
class King(Piece):
"""
Class that represents a king.
"""
def __init__(self, color, row, column, board, rooks):
Piece.__init__(self, color, row, column, board)
self.offsets = [(1, -1), (1, 0), (1, 1), (0, 1), (-1, 1), (-1, 0), (-1, -1), (0, -1)]
self.rooks = rooks
def generate_legal_moves(self, king, prev_move):
#glitch where king can take a defended piece
end_squares = self.threatened_squares()
possible_moves = [(self.row, self.column, square[0], square[1]) for square in end_squares]
opposite_color = "B" if self.color == "W" else "W"
#maybe make this more efficient
enemy_threatened_squares = self.board.get_threatened_squares(opposite_color)
legal_moves = self.filter_checks_and_pins(king, possible_moves)
if not (self.row, self.column) in enemy_threatened_squares and not king.get_has_moved():
self.castle(king, legal_moves, enemy_threatened_squares)
return legal_moves
def castle(self, king, legal_moves, enemy_threatened_squares):
for rook in self.rooks:
if rook.get_has_moved() or rook.is_killed():
continue
rook_row = rook.get_row()
rook_column = rook.get_column()
king_column = king.get_column()
king_row = king.get_row()
exceptions = [(rook_row, rook_column), (king_row, king_column)]
can_castle = True
#adapted for chess 960
#queenside
if king_column < rook_column:
#make sure can't castle into check
#check [?][5] and [?][4] for not being occupied - exception of occupied by
# king or correct rook
#4 = rook, 5 = king
if (self.row, 5) in enemy_threatened_squares: #make sure can't castle into check
continue
#make sure final squares for rook and king are not occupied by other pieces
if self.board.is_occupied(self.row, 4) and (self.row, 4) not in exceptions:
continue
elif self.board.is_occupied(self.row, 5) and (self.row, 5) not in exceptions:
continue
#make sure no pieces are inbetween king and rook
for column in range(king_column + 1, rook_column):
if self.board.is_occupied(self.row, column):
can_castle = False
break
#make sure all squares king has to move over aren't attacked
for column in range(king_column, 6):
if (king_row, column) in enemy_threatened_squares:
can_castle = False
break
if can_castle:
legal_moves.append((self.row, self.column, self.row, rook_column))
for column in range(king_column + 2, rook_column):
legal_moves.append((self.row, self.column, self.row, column))
#kingside
else:
#make sure can't castle into check
# check [?][1] and [?][2] for not being occupied
#2 = rook, 1 = king
if (self.row, 1) in enemy_threatened_squares:
continue
elif self.board.is_occupied(self.row, 1) and (self.row, 1) not in exceptions:
continue
elif self.board.is_occupied(self.row, 2) and (self.row, 2) not in exceptions:
continue
for column in range(king_column - 1, rook_column, -1):
if self.board.is_occupied(self.row, column):
can_castle = False
break
for column in range(king_column, 0, -1):
if (king_row, column) in enemy_threatened_squares:
can_castle = False
break
if can_castle:
legal_moves.append((self.row, self.column, self.row, rook_column))
for column in range(king_column - 2, rook_column, -1):
legal_moves.append((self.row, self.column, self.row, column))
def threatened_squares(self):
possible_moves = []
for row_offset, col_offset in self.offsets:
curr_row, curr_column = self.row + row_offset, self.column + col_offset
if curr_row < 0 or curr_row >= 8 or curr_column < 0 or curr_column >= 8:
continue
elif self.board.is_occupied(curr_row, curr_column):
if self.board.get_piece_color(curr_row, curr_column) != self.color:
possible_moves.append((curr_row, curr_column))
else:
possible_moves.append((curr_row, curr_column))
return possible_moves
def is_attacked(self):
# opposite_color = "B" if self.color == "W" else "W"
# threatened_squares = self.board.get_threatened_squares(opposite_color)
# if (self.row, self.column) in threatened_squares:
# return True
# return False
king_row, king_column = self.row, self.column
straight_offsets = [(1, 0), (0, 1), (0, -1), (-1, 0)]
for row_offset, column_offset in straight_offsets:
curr_row, curr_column = king_row + row_offset, king_column + column_offset
while curr_row >= 0 and curr_row < 8 and curr_column >= 0 and curr_column < 8:
square = self.board.get_square(curr_row, curr_column)
if square:
if square.get_color() == self.color:
break
else:
if square.get_symbol()[1] == "k":
if abs(king_row - curr_row) <= 1 and abs(king_column - curr_column) <= 1:
return True
#if opposite color and its a piece that can attack vertically
elif square.get_symbol()[1] in "rq":
return True
else:
break
curr_row, curr_column = curr_row + row_offset, curr_column + column_offset
diagonal_offsets = [(1, 1), (1, -1),(-1, 1), (-1, -1)]
for row_offset, column_offset in diagonal_offsets:
curr_row, curr_column = king_row + row_offset, king_column + column_offset
while curr_row >= 0 and curr_row < 8 and curr_column >= 0 and curr_column < 8:
square = self.board.get_square(curr_row, curr_column)
if square:
if square.get_color() == self.color:
break
else:
#if opposite color and its a piece that can attack vertically
if square.get_symbol()[1] == "k":
if abs(king_row - curr_row) <= 1 and abs(king_column - curr_column) <= 1:
return True
elif square.get_symbol()[1] == "p":
if self.color == "W":
if curr_row == king_row + 1 and abs(curr_column - king_column) == 1:
return True
else:
break
else:
if curr_row == king_row - 1 and abs(curr_column - king_column) == 1:
return True
else:
break
elif square.get_symbol()[1] in "kbq":
return True
else:
break
curr_row, curr_column = curr_row + row_offset, curr_column + column_offset
knight_offsets = [(2, 1), (2, -1), (-2, 1), (-2, -1), (1, 2), (-1, 2), (1, -2), (-1, -2)]
for row_offset, column_offset in knight_offsets:
curr_row, curr_column = king_row + row_offset, king_column + column_offset
if curr_row >= 0 and curr_row < 8 and curr_column >= 0 and curr_column < 8:
square = self.board.get_square(curr_row, curr_column)
if square:
if square.get_color() != self.color:
#if opposite color and its a knight
if square.get_symbol()[1] == "n":
return True
else:
continue
return False
def get_rooks(self):
return self.rooks
def __str__(self):
return f"""
King. Coordinates: [{self.row}][{self.column}]
"""
def get_symbol(self):
return self.color + "k"
|
wuhw234/chess_bot
|
chess_game/pieces/king.py
|
king.py
|
py
| 9,001
|
python
|
en
|
code
| 0
|
github-code
|
6
|
35091600325
|
import unittest
from Soda import Soda
class TestSoda(unittest.TestCase):
def test_valid_no_tasty(self):
self.soda = Soda('')
self.assertEqual(self.soda.show_my_drink(), 'Обычная газировка')
def test_valid_add_tasty(self):
self.soda = Soda('клубника')
self.assertEqual(self.soda.show_my_drink(), 'Газировка и клубника')
if __name__ == '__main__':
unittest.main()
|
Sensor45/oop
|
soda_test.py
|
soda_test.py
|
py
| 453
|
python
|
ru
|
code
| 0
|
github-code
|
6
|
15484480802
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import os
if sys.version_info >= (3, 0):
import tkinter
else:
import Tkinter as tkinter
import interaction
import canvas
import FigureManager
# The size of the button (width, height) for buttons in root gui.
SIZE_BUTTON = (18, 4)
def find_show_image():
"""Search, open and show an pmg image.
"""
filename = interaction.find_pmg()
if filename:
FigureManager.g_figure_manager.add_pmg(filename)
canvas.show_figure_from_manager(
FigureManager.g_figure_manager,
title=os.path.basename(filename))
def main():
"""The main entry of the program.
"""
root = tkinter.Tk()
root.title('Pytena')
tkinter.Button(
root,
text='Script',
height=SIZE_BUTTON[1],
width=SIZE_BUTTON[0],
command=interaction.load_python_script).pack(side=tkinter.TOP)
tkinter.Button(
root,
text='Image',
height=SIZE_BUTTON[1],
width=SIZE_BUTTON[0],
command=find_show_image).pack(side=tkinter.TOP)
tkinter.Button(
root,
text='Command',
height=SIZE_BUTTON[1],
width=SIZE_BUTTON[0],
command=interaction.start_text_box).pack(side=tkinter.TOP)
tkinter.Button(
root,
text='Help',
height=SIZE_BUTTON[1],
width=SIZE_BUTTON[0],
command=interaction.show_help_box).pack(side=tkinter.TOP)
root.mainloop()
if __name__ == '__main__':
main()
|
t-lou/pytena
|
main.py
|
main.py
|
py
| 1,537
|
python
|
en
|
code
| 0
|
github-code
|
6
|
21138659052
|
from neo4j import GraphDatabase
# neo4j connection
driver = GraphDatabase.driver("bolt://127.0.0.1:7687", auth=("neo4j", "neo4j"))
# random walk
k = 10 # Number of neighbors
pre_weight = 2 # Weight of return
n = -1 # number of users to use, -1 means using all the users.
batch_size = 1000 # batchsize to save
cores = 12 # Multi threads
# Neo4j SQL to sample the motif.
motif_sql = '''
match (a:User {{user_id: {id} }})<-[:msg|click]-(m)-[:msg|click]->(f) return "RESPOND" as r1, m.user_id as middle, f.user_id as final, 2 as weight limit {n1}
union
match (a:User {{user_id: {id} }})-[:msg|click]->(m)<-[:msg|click]-(f) return "DOUBLE" as r1, m.user_id as middle, f.user_id as final, 2 as weight limit {n2}
'''
"""
match (a:User {{user_id: {id} }})<-[:msg|click]-(m)-[:msg|click]->(f) return "RESPOND" as r1, m.user_id as middle, f.user_id as final, 2 as weight limit {n1}
union
match (a:User {{user_id: {id} }})<-->(m)-[:msg|click]->(f) return "SEND" as r1, m.user_id as middle, f.user_id as final, 3 as weight limit {n1}
union
match (a:User {{user_id: {id} }})<-[:msg|click]-(m)<-->(f) return "DOUBLE" as r1, m.user_id as middle, f.user_id as final, 3 as weight limit {n2}
union
match (a:User {{user_id: {id} }})-[:msg|click]->(m)<-[:msg|click]-(f) return "DOUBLE" as r1, m.user_id as middle, f.user_id as final, 2 as weight limit {n2}
union
match (a:User {{user_id: {id} }})<-->(m)<-[:msg|click]-(f) return "DOUBLE" as r1, m.user_id as middle, f.user_id as final, 2 as weight limit {n2}
union
match (a:User {{user_id: {id} }})-[:msg|click]->(m)<-->(f) return "DOUBLE" as r1, m.user_id as middle, f.user_id as final, 3 as weight limit {n2}
union
match (a:User {{user_id: {id} }})<-->(m)<-->(f) return "DOUBLE" as r1, m.user_id as middle, f.user_id as final, 4 as weight limit {n2}
"""
raw_walk_path = "../data/sjjy_data/motif_random_walk_path_M1+M4_b_{}.txt".format(pre_weight) # Path of the raw random walk sequences
raw_emb_path = "../model/sjjy_motif_walk_M1+M4_b_{}.emb".format(pre_weight) # Path of the raw embedding path
emb_save_path = "../model/sjjy_motif_walk_M1+M4_b_{}.emb".format(pre_weight) # No need for data Sjjy
# motif random walk
raw_train_data_path = "../data/sjjy_data/train_data_v4.csv" # train user pairs file path 原始的用户对id
raw_test_data_path = ""'../data/sjjy_data/test_data_v4.csv' # test file path
train_data_path = "../data/sjjy_data/rec_data_train_M1+M4_b_{}.csv".format(pre_weight) # train user pairs with neighbors
test_data_path = "../data/sjjy_data/rec_data_train_test_M1+M4_b_{}.csv".format(pre_weight)
# train
uid2idx_path = "../data/uid_2_idx.pkl" # user_id to id
model_save_path = "../model/recommend_M1+M4_b_{}.pb".format(pre_weight) # final model save path
check_point_path = "../checkpoint/recommend_M1+M4_b_{}.pth".format(pre_weight) # checkpoint path
feature_dict_path = "../data/sjjy_data/enc_feature_dict.pkl"
|
RManLuo/MotifGNN
|
src_sjjy/pipline_config.py
|
pipline_config.py
|
py
| 3,021
|
python
|
en
|
code
| 7
|
github-code
|
6
|
36636572184
|
import random
import pyxel
import utils
import stage
TYPE_AGGRESSIVE = 0
TYPE_MILD = 1
TYPE_RANDOM_SLOW = 2
TYPE_RANDOM_FAST = 3
TYPES = [
TYPE_AGGRESSIVE,
TYPE_MILD,
TYPE_RANDOM_SLOW,
TYPE_RANDOM_FAST
]
TICKS_PER_FRAME = 10
MAX_FRAME = 4
MAX_SPEED = 0.4
MAX_RESPAWN_TICKS = 300 # 5 secs
class Spinner:
def __init__(self, x, y, type):
self.x = x
self.y = y
self.type = 2
if type in TYPES:
self.type = type
self.vx = random.choice([-MAX_SPEED, MAX_SPEED])
self.vy = random.choice([-MAX_SPEED, MAX_SPEED])
self.radius = 4
self.frame = 0
self.frame_ticks = 0
self.is_dead = False
self.respawn_ticks = MAX_RESPAWN_TICKS
def _set_new_position(self, stageObj):
px = stageObj.player.x
py = stageObj.player.y
loc = None
loclist = [
stage.SPAWN_SECTOR_TOPLEFT,
stage.SPAWN_SECTOR_BOTTOMLEFT,
stage.SPAWN_SECTOR_TOPRIGHT,
stage.SPAWN_SECTOR_BOTTOMRIGHT
]
if px < 80:
if py < 75:
loclist.remove(stage.SPAWN_SECTOR_TOPLEFT)
else:
loclist.remove(stage.SPAWN_SECTOR_BOTTOMLEFT)
else:
if py < 75:
loclist.remove(stage.SPAWN_SECTOR_TOPRIGHT)
else:
loclist.remove(stage.SPAWN_SECTOR_BOTTOMRIGHT)
loc = stageObj.get_random_spawn_loc(random.choice(loclist))
self.x = loc[0]
self.y = loc[1]
def kill(self):
self.is_dead = True
self.respawn_ticks = MAX_RESPAWN_TICKS
def _do_collisions(self, stage):
new_x = self.x + self.vx
for b in stage.solid_rects:
if utils.circle_rect_overlap(new_x, self.y, self.radius,
b[0], b[1], b[2], b[3]):
if self.x > b[0] + b[2]: # was prev to right of border.
new_x = b[0] + b[2] + self.radius
elif self.x < b[0]: # was prev to left of border.
new_x = b[0] - self.radius
self.vx *= -1
break
new_y = self.y + self.vy
for b in stage.solid_rects:
if utils.circle_rect_overlap(self.x, new_y, self.radius,
b[0], b[1], b[2], b[3]):
if self.y > b[1] + b[3]: # was prev below border.
new_y = b[1] + b[3] + self.radius
elif self.y < b[1]: # was prev above border.
new_y = b[1] - self.radius
self.vy *= -1
break
self.x = new_x
self.y = new_y
def respawn(self):
self.is_dead = False
def update(self, stage):
if self.is_dead:
self.respawn_ticks -= 1
if self.respawn_ticks == 0:
self.respawn()
elif self.respawn_ticks == 30:
self._set_new_position(stage)
else:
self._do_collisions(stage)
self.frame_ticks += 1
if self.frame_ticks == TICKS_PER_FRAME:
self.frame_ticks = 0
self.frame += 1
if self.frame == MAX_FRAME:
self.frame = 0
def draw(self, shake_x, shake_y):
if self.is_dead:
framex = None
if self.respawn_ticks < 10:
framex = 42
elif self.respawn_ticks < 20:
framex = 63
elif self.respawn_ticks < 30:
framex = 84
if framex is not None:
pyxel.blt(
self.x + shake_x - 10,
self.y + shake_y - 10,
0,
framex,
231,
21, 21,
8
)
else:
pyxel.blt(
self.x + shake_x - 4,
self.y + shake_y - 4,
0,
160 + self.frame*9,
8,
9, 9,
8
)
|
helpcomputer/megaball
|
megaball/spinner.py
|
spinner.py
|
py
| 4,317
|
python
|
en
|
code
| 7
|
github-code
|
6
|
1568812252
|
#mark ericson
#9/12/2022
#This program creates a cubic grid
import rhinoscriptsyntax as rs
from random import uniform
def cubic_grid(x_number, y_number, z_number, cell_size):
rs.EnableRedraw(False)
point_list = []
for i in range(0, x_number,cell_size):
x = i
for j in range(0, y_number, cell_size):
y = j
for p in range(0, z_number, cell_size):
z = p
point = (x,y,z)
point_list.append(point)
rs.AddPoint(point)
return(point_list)
x_value = rs.GetInteger("Please provide the number of units in the x direction.")
y_value = rs.GetInteger("Please provide the number of units in the y direction.")
z_value = rs.GetInteger("Please provide the number of units in the z direction.")
cell_value = rs.GetInteger("Please provide a unit size for the grid.")
points = cubic_grid(x_value, y_value,z_value, cell_value)
for i in points:
radius = uniform(1,5)
rs.AddSphere(i,radius)
print (points)
|
mcericson/arch_441_repo_fall_22
|
03_week/sphere_grid_inclasss.py
|
sphere_grid_inclasss.py
|
py
| 1,037
|
python
|
en
|
code
| 0
|
github-code
|
6
|
21998531046
|
from collections import Counter
class Solution:
def minWindow(self, s: str, t: str) -> str:
s_len = len(s)
t_len = len(t)
begin = 0
win_freq = {}
t_freq = dict(Counter(t))
min_len = s_len + 1
distance = 0
left = 0
right = 0
while right < s_len:
if s[right] in t_freq and t_freq[s[right]] == 0:
right += 1
continue
win_freq.setdefault(s[right], 0)
if s[right] in t_freq and win_freq[s[right]] < t_freq[s[right]]:
distance += 1
win_freq[s[right]] += 1
right += 1
# 满足条件时,进行左边缘移动
while distance == t_len:
# win_freq.setdefault(s[left], 0)
if right - left < min_len:
min_len = right - left
begin = left
if s[left] not in t_freq:
left += 1
continue
if s[left] in t_freq and win_freq[s[left]] == t_freq[s[left]]:
distance -= 1
win_freq[s[left]] -= 1
left += 1
if min_len == s_len + 1:
return ""
return s[begin:begin + min_len]
so = Solution()
print(so.minWindow(s="ADOBECODEBANC", t="ABC"))
|
hangwudy/leetcode
|
1-99/76. 最小覆盖子串.py
|
76. 最小覆盖子串.py
|
py
| 1,359
|
python
|
en
|
code
| 0
|
github-code
|
6
|
1004121962
|
from flask import Flask, render_template, request
import pypandoc
app = Flask(__name__)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/convert', methods=['POST'])
def convert():
input_markup = request.form['input_markup']
output_markup = pypandoc.convert(input_markup, format='mediawiki', to='markdown_github')
return render_template('index.html',
input_markup=input_markup,
output_markup=output_markup)
if __name__ == '__main__':
app.run(debug=True)
|
myw/wiki-converter
|
converter.py
|
converter.py
|
py
| 552
|
python
|
en
|
code
| 0
|
github-code
|
6
|
19203182463
|
import os
from dotenv import load_dotenv
from minio import Minio
from io import BytesIO
from data_pipeline.classes.data_loader.DataLoader import DataLoader
class MinIOLoader(DataLoader):
def __init__(self, endpoint, bucket_name):
super().__init__(endpoint)
self._bucket_name = bucket_name
self._create_bucket()
def _open_connection(self):
load_dotenv()
access_key = os.getenv("MINIO_USER")
secret_key = os.getenv("MINIO_PASSWORD")
return Minio(self._endpoint, access_key=access_key, secret_key=secret_key, secure=False)
def _create_bucket(self):
bucket_exists = self._connection.bucket_exists(self._bucket_name)
if not bucket_exists:
return self._connection.make_bucket(f"{self._bucket_name}")
else:
print(f"Bucket {self._bucket_name} already exists")
def load_from_csv(self, upload_filepath, file_name, file_type='csv'):
self._load_file(upload_filepath, file_name, file_type)
def load_from_dataframe(self, dataframe, file_name, file_type='csv'):
csv_file = dataframe.to_csv().encode('utf-8')
csv_buffer = BytesIO(csv_file)
self._connection.put_object(
self._bucket_name,
file_name,
csv_buffer,
length=len(csv_file),
content_type=f'application/{file_type}'
)
def load_from_json(self, upload_filepath, file_name, file_type='json'):
self._load_file(upload_filepath, file_name, file_type)
def _load_file(self, upload_filepath, file_name, file_type):
self._connection.fput_object(
self._bucket_name,
file_name,
upload_filepath,
content_type=f'application/{file_type}'
)
|
robbailiff/data_pipeline
|
src/data_pipeline/classes/data_loader/MinIOLoader.py
|
MinIOLoader.py
|
py
| 1,836
|
python
|
en
|
code
| 0
|
github-code
|
6
|
72519791227
|
import json
from warnings import warn
# def init_from_config(meas_cls, config: dict):
# arg_str = ''
#
# for key, value in config.items():
# arg_str = key+'='+value
def export_measurement_config(obj, attr_keys=None):
if attr_keys is None:
attr_keys = obj.__init__.__code__.co_varnames
params = {}
for key in attr_keys:
flag = 0
if isinstance(obj, dict):
if key in obj.keys():
param = obj[key]
flag = 1
else:
if key != 'self' and hasattr(obj, key):
param = obj.__getattribute__(key)
flag = 1
if flag:
if param.__class__.__name__ in ['dict', 'list', 'tuple', 'str', 'int',
'float', 'bool', 'NoneType']:
params[key] = param
else:
warn('The parameter \'%s\' of type \'%s\' is not JSON serializable and is skipped.' %
(key, param.__class__.__name__))
return params
def save_config(config, filename):
with open(filename, 'w') as fp:
json.dump(config, fp, indent='\t')
def load_config(filename):
with open(filename, 'r') as fp:
config = json.load(fp)
return config
|
yyzidea/measurement-automation
|
utilities/measurement_helper.py
|
measurement_helper.py
|
py
| 1,278
|
python
|
en
|
code
| 0
|
github-code
|
6
|
8068261091
|
from torchvision.models.detection import maskrcnn_resnet50_fpn
from rigl_torch.models import ModelFactory
@ModelFactory.register_model_loader(model="maskrcnn", dataset="coco")
def get_maskrcnn(*args, **kwargs):
return maskrcnn_resnet50_fpn(
weights=None, weights_backbone=None, trainable_backbone_layers=5
)
if __name__ == "__main__":
model = get_maskrcnn()
print(model)
|
calgaryml/condensed-sparsity
|
src/rigl_torch/models/maskrcnn.py
|
maskrcnn.py
|
py
| 400
|
python
|
en
|
code
| 10
|
github-code
|
6
|
69868074749
|
# 字典 values() 方法返回一个迭代器,可以使用 list() 来转换为列表,列表为字典中的所有值。
# dict.values()
# 返回值 返回迭代器。
Object = {
"name": "Tom",
"age": 18,
"other": "其他"
}
print(Object.values()) # dict_values(['Tom', 18, '其他'])
for value in Object.values():
print('value', value) # value Tom\value 18\value 其他
print(list(Object.values())) # ['Tom', 18, '其他']
|
yangbaoxi/dataProcessing
|
python/字典(对象)/遍历字典/values.py
|
values.py
|
py
| 467
|
python
|
zh
|
code
| 1
|
github-code
|
6
|
38160567413
|
#import bpy
from random import seed
from random import uniform
import numpy as np
import cv2
# seed random number generator
seed(1)
"""
def test1():
# make mesh
vertices = [(1, 0, 0),(1,0,5),(0,1,0)]
edges = []
faces = []
faces.append([0,1,2])
faces.append([2,0,3])
#new_mesh = bpy.data.meshes.new('new_mesh')
#new_mesh.from_pydata(vertices, edges, faces)
#knew_mesh.update()
# make object from mesh
#new_object = bpy.data.objects.new('new_object', new_mesh)
# make collection
#new_collection = bpy.data.collections.new('new_collection')
#bpy.context.scene.collection.children.link(new_collection)
# add object to scene collection
#new_collection.objects.link(new_object)
def add_mesh(name, verts, faces, edges=None, col_name="Collection"):
if edges is None:
edges = []
mesh = bpy.data.meshes.new(name)
obj = bpy.data.objects.new(mesh.name, mesh)
col = bpy.data.collections.get(col_name)
col.objects.link(obj)
bpy.context.view_layer.objects.active = obj
mesh.from_pydata(verts, edges, faces)
"""
def get_translation_between_points(pt1,pt2):
transl = (pt2[0] - pt1[0], pt2[1] - pt1[1])
return transl
class Geometry2d:
def __init__(self, points=[]):
self.points = points
def reverse(self):
self.points.reverse()
def get_last_point(self):
return self.points[-1]
def get_first_point(self):
return self.points[0]
def remove_first_element(self):
self.points.pop(0)
def remove_last_element(self):
self.points.pop()
def translate_points(self, transl):
points = self.points
for i,p in enumerate(points):
p = (p[0]+transl[0], p[1]+transl[1])
points[i] = p
return Geometry2d(points)
def scale_points(self, scale):
points = self.points
for i,p in enumerate(points):
p = (p[0]*scale, p[1]*scale)
points[i] = p
return Geometry2d(points)
def flip_y(self):
points = self.points
for i,p in enumerate(self.points):
points[i] = (p[0],-p[1])
return Geometry2d(points)
def draw(self):
width = 300
height = 300
img = np.zeros((300,300))
scale_obj = self.scale_points(30)
draw_obj = scale_obj.flip_y()
draw_obj = draw_obj.translate_points((int(width/2), int(height/2)))
for point in draw_obj.points:
print(point)
point = (int(point[0]), int(point[1]))
cv2.circle(img, point, 2, (255,0,0), 2)
cv2.imshow("img", img)
cv2.waitKey(0)
def combine_at_first(self, geo2d_obj):
new_geo2d_obj = Geometry2d()
first_pt_obj1 = self.get_first_point()
print("first_pt")
print(first_pt_obj1)
first_pt_obj2 = geo2d_obj.get_first_point()
print("last pt")
print(first_pt_obj2)
transl = get_translation_between_points(first_pt_obj2, first_pt_obj1)
print("transl")
print(transl)
transl_obj = geo2d_obj.translate_points(transl)
print("translated points")
print(transl_obj.points)
transl_obj.points.pop(0)
transl_obj.points = self.points + transl_obj.points
return transl_obj
def combine_at_last(self, geo2d_obj):
first_pt_obj1 = self.get_last_point()
print("first_pt")
print(first_pt_obj1)
first_pt_obj2 = geo2d_obj.get_first_point()
print("last pt")
print(first_pt_obj2)
transl = get_translation_between_points(first_pt_obj2, first_pt_obj1)
print("transl")
print(transl)
transl_obj = geo2d_obj.translate_points(transl)
print("translated points")
print(transl_obj.points)
transl_obj.points.pop(0)
transl_obj.points = self.points + transl_obj.points
return transl_obj
"""
def generate_2d_corner(height, width, corner_point):
p1 = (corner_point[0] + width, corner_point[1])
p2 = corner_point
p3 = (corner_point[0], corner_point[1]+height)
verts = []
# create bottom vertices
for i in range(num_points):
i = i/num_points*x_stop
x = i+uniform(-step,step)
y = -width/2
z = uniform(0,z_limit)
point = (x,y,z)
verts.append(point)
y = width/2
point = (x,y,z)
verts.append(point)
faces = []
for point in verts:
print(point)
# create faces
num_points = len(verts)
print(f'num points: {num_points}')
for i in range(0,num_points-2, 2):
faces.append([i+1, i, i+2, i+3])
print("Faces:")
print(faces)
for face in faces:
print(face)
add_mesh("testsets", verts, faces)
#verts = [( 1.0, 1.0, 0.0),
# ( 1.0, -1.0, 0.0),
# (-1.0, -1.0, 0.0),
# (-1.0, 1.0, 0.0),
#]
#faces = [[0, 1, 2, 3]]
#add_mesh("myBeautifulMesh_1", verts, faces)
#verts = [( 3.0, 1.0, 0.0),
# ( 3.0, -1.0, 0.0),
# ( 2.0, -1.0, 0.0),
# ( 2.0, 1.0, 0.0),
# ]
#add_mesh("myBeautifulMesh_2", verts, faces)
"""
if __name__ == '__main__':
geo2d_obj = Geometry2d([(1,0), (0,0),(0,1),(0,2), (0,3)])
geo2d_obj1 = Geometry2d([(0,0), (1,0), (1,2), (2,1.5),(3,2), (3,0), (4,0)])
geo2d_obj1 = geo2d_obj1.translate_points((4,4))
geo_comb = geo2d_obj.combine_at_last(geo2d_obj1)
geo_comb.draw()
|
olaals/masteroppgave-old
|
src/testing/blender/generate-mesh/generate-alu-parts/generate-test.py
|
generate-test.py
|
py
| 5,601
|
python
|
en
|
code
| 0
|
github-code
|
6
|
39019535632
|
from typing import Union
import numpy as np
from numpy import typing as npt
from .outlier_removal_interface import OutlierRemovalInterface
class AbsoluteOutlierRemoval(OutlierRemovalInterface):
def __init__(self, top: float=None, bottom= None) -> None:
super().__init__(top, bottom)
def fit(self, X: npt.ArrayLike):
self.lower_filter_ = self._bottom if self._bottom is not None else np.min(X)
self.top_filter_ = self._top if self._top is not None else np.max(X)
return self
def transform(self, X: npt.ArrayLike) -> npt.ArrayLike:
return np.where(
(X >= self.lower_filter_) &
(X <= self.top_filter_)
)
if __name__ == '__main__':
a = np.asarray([1, 2, 1, 2, 3, 2, 20, 30])
iqr_removal = AbsoluteOutlierRemoval(bottom=2)
iqr_removal.fit(a)
print(a)
print(a[iqr_removal.transform(a)])
|
Diogo364/AirBnB_PriceRegressor
|
utils/outlier_removal/absolute_outlier_removal.py
|
absolute_outlier_removal.py
|
py
| 893
|
python
|
en
|
code
| 0
|
github-code
|
6
|
2521814692
|
#!/usr/bin/env python
import random
def merge(left, right):
res = []
while True:
if len(left) == 0:
res += right
break
elif len(right) == 0:
res += left
break
elif left[0] > right[0]:
res += [ right.pop(0) ]
else:
res += [ left.pop(0) ]
if len(left) == 0 and len(right) == 0:
break
#print(res)
return res
def merge_sort(a):
N = len(a)
if N == 1:
return a
else:
m = N//2
left = a[0:m]
right = a[m:]
#print('%s %s' % (left, right))
left = merge_sort(left)
right = merge_sort(right)
return merge(left, right)
N = 100
a = []
for i in range(N):
a.append(random.randint(0, 10000))
b = merge_sort(a)
s = True
c = sorted(a)
for i in range(len(a)):
if b[i] != c[i]:
s = False
print(s)
|
pmediaandy/bullpen
|
hackerrank/merge_sort.py
|
merge_sort.py
|
py
| 918
|
python
|
en
|
code
| 0
|
github-code
|
6
|
37304550340
|
import pyrealsense2 as rs
import numpy as np
import cv2
WIDTH = 640
HEIGHT = 480
FPS = 30
# file name which you want to open
FILE = './data/stairs.bag'
def main():
# stream(Depth/Color) setting
config = rs.config()
config.enable_stream(rs.stream.color, WIDTH, HEIGHT, rs.format.rgb8, FPS)
config.enable_stream(rs.stream.depth, WIDTH, HEIGHT, rs.format.z16, FPS)
config.enable_device_from_file(FILE)
# Start streaming
pipeline = rs.pipeline()
pipeline.start(config)
try:
while True:
# Wait for frames(Color/Depth)
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
if not depth_frame or not color_frame:
continue
# Convert images to numpy arrays
depth_image = np.asanyarray(depth_frame.get_data())
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.08), cv2.COLORMAP_JET)
color_image = np.asanyarray(color_frame.get_data())
# Show images
color_image_s = cv2.resize(color_image, (WIDTH, HEIGHT))
depth_colormap_s = cv2.resize(depth_colormap, (WIDTH, HEIGHT))
images = np.hstack((color_image_s, depth_colormap_s))
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.imshow('RealSense', images)
INTERVAL = 10
if cv2.waitKey(INTERVAL) & 0xff == 27: # End with ESC
cv2.destroyAllWindows()
break
finally:
# Stop streaming
pipeline.stop()
if __name__ == '__main__':
main()
|
masachika-kamada/realsense-matome
|
play_bagfile.py
|
play_bagfile.py
|
py
| 1,686
|
python
|
en
|
code
| 0
|
github-code
|
6
|
35566573513
|
from locale import windows_locale
import turtle
import tkinter as tk
from tkinter import ttk
"""
Koch Snowflake Program
Author: Katherine Butt
This program uses a recursive algorithm to draw a koch snowflake
The user is able to use a slider and button to control the number of times the recursion occurs
Depending on how many times the recursion occurs different orders of the snowflake building process are drawn
"""
"""
The main purpose of this function is the recursion required to to produce the snowflake
This function is called from the drawSnowflake function once the user input has been gained
This function runs for as many order given by the user
"""
def snowflake(sidelength, i):
if i == 0:
t.forward(sidelength)
return
sidelength /= 3.0
snowflake(sidelength, i-1)
t.left(60)
snowflake(sidelength, i-1)
t.right(120)
snowflake(sidelength, i-1)
t.left(60)
snowflake(sidelength, i-1)
check = "True"
"""
This function is responsible for getting the user input and initating the snowflake drawing
This function holds all the commands to get the turtle element of the program to begin running
"""
def drawsnowflake():
global size
t.clear()
order = (inputval.get()-1)
while(check == "True"):
size = root.winfo_width()/2
t.penup()
t.pendown()
screen.tracer(0)
for i in range(3):
snowflake(size, order)
t.right(120)
screen.tracer(1)
resizechecker()
def resizechecker():
if(size != root.winfo_width()/2):
t.clear()
drawsnowflake()
"""
The following code produces the gui window and allows for the tkinter and turtle modules to be on the same window
It sets all the defaults required fro the gui window and the interation that allow it to be resizeable
"""
root = tk.Tk()
root.geometry("500x500")#This sets the base size of the window
root.minsize(300,300)
root.resizable(True,True) #This allows the window to be resizeable by both the width and the height
canvas = tk.Canvas(master = root) #Adding the canvas to the root window
canvas.pack(fill = tk.BOTH, expand=tk.YES) #Allowing the canvas to be expanded
root.update()
"""
This section of code adds all the elements required to the gui window
This means that all element are shown to the user
"""
screen = turtle.TurtleScreen(canvas)
t = turtle.RawTurtle(screen)
t.hideturtle() #This line hides the drawing icon
inputval = tk.DoubleVar()
s = tk.Scale(master = root, label='Pick your order', from_=1, to=10, orient=tk.HORIZONTAL,
length=200, showvalue=0,tickinterval=1, variable=inputval).pack(side=tk.LEFT, anchor="sw")
tk.Button(master = root, text = "Next", command = drawsnowflake).pack(side=tk.LEFT, anchor="sw")
tk.mainloop()
root.mainloop()
|
Katherinebutt/COSC-326
|
Etude 3/snowflake.py
|
snowflake.py
|
py
| 2,777
|
python
|
en
|
code
| 0
|
github-code
|
6
|
46046574096
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
REQUIREMENTS = open(os.path.join(os.path.dirname(__file__), 'requirements.txt')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-hooks',
version='0.2.0-pre',
description='A plugin system for django.',
author='Esteban Castro Borsani',
author_email='ecastroborsani@gmail.com',
long_description=README,
url='https://github.com/nitely/django-hooks',
packages=[
'hooks',
'hooks.templatetags',
],
include_package_data=True,
zip_safe=False,
install_requires=REQUIREMENTS,
license='MIT License',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
nitely/django-hooks
|
setup.py
|
setup.py
|
py
| 1,303
|
python
|
en
|
code
| 16
|
github-code
|
6
|
35470658937
|
import queue, time
import threading
'''This is a basic reference script that shows how to use Queues & different types of Queues in python
There are basically 3 types of Queues :-
1. FIFO (default)
2. LIFO
3. Priority
'''
# q = queue.Queue()
# q.put(5)
# print(q.get()) # to fetch the items in the Queue
# print(q.empty()) # once we fetch the object from the queue the item is removed automatically from the queue
# for i in range(1,5):
# print('inserting inside queue ....{} \n'.format(i))
# q.put(i)
# while not q.empty():
# '''Always use q.empty() otherwise if you are using single thread only then if there are no items in the queue then your queue will
# get freezed and it will wait till next items comes in the queue. So , ALWAYS USE DIFFERENT THEREADS TO ACCESS THE 1 QUEUE
# '''
# print(q.get(), end=' ')
def putting_thread(q):
while True:
print('Starting thread \n')
time.sleep(5)
q.put(5)
print('put something \n')
q = queue.Queue()
t = threading.Thread(target=putting_thread, args=(q,), daemon=True) # make it a daemon thread so main func finished automatically daemon thread will also finish
t.start()
q.put(55)
print('first item gotten \n')
# print(q.get())
while True:
print(q.get())
print('finished')
|
AbhishekMaity001/Python-Code-Snippets
|
Queue-Threading-1.py
|
Queue-Threading-1.py
|
py
| 1,311
|
python
|
en
|
code
| 1
|
github-code
|
6
|
36846615388
|
from typing import cast
from .kotlin_entities import (
KotlinEntity,
KotlinProperty,
KotlinEntityEnumeration,
PARSING_ERRORS_PROP_NAME,
ENTITY_STATIC_CREATOR
)
from ..base import Generator
from ... import utils
from ...config import GenerationMode, GeneratedLanguage, TEMPLATE_SUFFIX
from ...schema.modeling.entities import (
StringEnumeration,
EntityEnumeration,
Entity,
Object,
ObjectFormat,
)
from ...schema.modeling.text import Text, EMPTY
class KotlinGenerator(Generator):
def __init__(self, config):
super(KotlinGenerator, self).__init__(config)
self.kotlin_annotations = config.generation.kotlin_annotations
self._error_collectors = config.generation.errors_collectors
self._generate_equality = config.generation.generate_equality
self.generate_serialization = config.generation.generate_serialization
def filename(self, name: str) -> str:
return f'{utils.capitalize_camel_case(name)}.kt'
def _entity_declaration(self, entity: Entity) -> Text:
entity: KotlinEntity = cast(KotlinEntity, entity)
entity.__class__ = KotlinEntity
entity.eval_errors_collector_enabled(self._error_collectors)
entity.update_bases()
if entity.generate_as_protocol:
return self.__declaration_as_interface(entity)
result: Text = self.__main_declaration_header(entity)
is_template = entity.generation_mode.is_template
if is_template:
result += EMPTY
result += ' constructor ('
result += ' env: ParsingEnvironment,'
result += f' parent: {utils.capitalize_camel_case(entity.name)}? = null,'
result += ' topLevel: Boolean = false,'
result += ' json: JSONObject'
result += ' ) {'
result += ' val logger = env.logger'
constructor = entity.constructor_body(with_commas=False).indented(indent_width=8)
if constructor.lines:
result += constructor
result += ' }'
result += EMPTY
result += entity.value_resolving_declaration.indented(indent_width=4)
if self.generate_serialization:
result += EMPTY
result += entity.serialization_declaration.indented(indent_width=4)
if not is_template and self._generate_equality and not entity.instance_properties:
result += EMPTY
result += self.__manual_equals_hash_code_declaration.indented(indent_width=4)
if not is_template:
patch = entity.copy_with_new_array_declaration
if patch:
result += patch
static_declarations = entity.static_declarations(self.generate_serialization)
if static_declarations.lines:
result += EMPTY
result += ' companion object {'
result += static_declarations.indented(indent_width=8)
result += ' }'
result += EMPTY
if entity.inner_types:
for inner_type in filter(lambda t: not isinstance(t, StringEnumeration) or not is_template,
entity.inner_types):
result += EMPTY
result += self._main_declaration(inner_type).indented(indent_width=4)
result += '}'
return result
@staticmethod
def __declaration_as_interface(entity: KotlinEntity) -> Text:
result = Text(f'interface {utils.capitalize_camel_case(entity.name)} {{')
for prop in entity.instance_properties_kotlin:
result += prop.declaration(overridden=False,
in_interface=True,
with_comma=False,
with_default=False).indented(indent_width=4)
result += '}'
return result
def __main_declaration_header(self, entity: KotlinEntity) -> Text:
result = Text()
for annotation in self.kotlin_annotations.classes:
result += annotation
data_prefix = 'data '
if entity.generation_mode.is_template or not self._generate_equality or not entity.instance_properties:
data_prefix = ''
prefix = f'{data_prefix}class {utils.capitalize_camel_case(entity.name)}'
interfaces = ['JSONSerializable'] if self.generate_serialization else []
protocol_plus_super_entities = entity.protocol_plus_super_entities()
if protocol_plus_super_entities is not None:
interfaces.append(protocol_plus_super_entities)
interfaces = ', '.join(interfaces)
suffix = f' : {interfaces}' if interfaces else ''
suffix += ' {'
def add_instance_properties(text: Text, is_template: bool) -> Text:
mixed_properties = entity.instance_properties_kotlin
if entity.errors_collector_enabled:
mixed_properties.append(KotlinProperty(
name=PARSING_ERRORS_PROP_NAME,
description='',
description_translations={},
dict_field='',
property_type=Object(name='List<Exception>', object=None, format=ObjectFormat.DEFAULT),
optional=True,
is_deprecated=False,
mode=GenerationMode.NORMAL_WITHOUT_TEMPLATES,
supports_expressions_flag=False,
default_value=None,
platforms=None
))
for prop in mixed_properties:
overridden = False
if entity.implemented_protocol is not None:
overridden = any(p.name == prop.name for p in entity.implemented_protocol.properties)
text += prop.declaration(
overridden=overridden,
in_interface=False,
with_comma=not is_template,
with_default=not is_template
).indented(indent_width=4)
return text
if entity.generation_mode.is_template:
result += prefix + suffix
if entity.instance_properties:
result = add_instance_properties(text=result, is_template=True)
else:
constructor_prefix = ''
if self.kotlin_annotations.constructors:
constructor_annotations = ', '.join(self.kotlin_annotations.constructors)
constructor_prefix = f' {constructor_annotations} constructor '
if not entity.instance_properties:
result += f'{prefix}{constructor_prefix}(){suffix}'
else:
result += f'{prefix}{constructor_prefix}('
result = add_instance_properties(text=result, is_template=False)
result += f'){suffix}'
return result
@property
def __manual_equals_hash_code_declaration(self) -> Text:
result = Text('override fun equals(other: Any?) = javaClass == other?.javaClass')
result += EMPTY
result += 'override fun hashCode() = javaClass.hashCode()'
return result
def _entity_enumeration_declaration(self, entity_enumeration: EntityEnumeration) -> Text:
entity_enumeration: KotlinEntityEnumeration = cast(KotlinEntityEnumeration, entity_enumeration)
entity_enumeration.__class__ = KotlinEntityEnumeration
declaration_name = utils.capitalize_camel_case(entity_enumeration.name)
entity_declarations = list(map(utils.capitalize_camel_case, entity_enumeration.entity_names))
default_entity_decl = utils.capitalize_camel_case(str(entity_enumeration.default_entity_declaration))
result = Text()
for annotation in self.kotlin_annotations.classes:
result += annotation
interfaces = ['JSONSerializable'] if self.generate_serialization else []
interfaces.append(entity_enumeration.mode.protocol_name(
lang=GeneratedLanguage.KOTLIN,
name=entity_enumeration.resolved_prefixed_declaration))
interfaces = ', '.join(filter(None, interfaces))
suffix = f' : {interfaces}' if interfaces else ''
suffix += ' {'
result += f'sealed class {declaration_name}{suffix}'
for decl in entity_declarations:
naming = entity_enumeration.format_case_naming(decl)
decl = f'class {naming}(val value: {decl}) : {declaration_name}()'
result += Text(indent_width=4, init_lines=decl)
result += EMPTY
result += f' fun value(): {entity_enumeration.common_interface(GeneratedLanguage.KOTLIN) or "Any"} {{'
result += ' return when (this) {'
for decl in entity_declarations:
naming = entity_enumeration.format_case_naming(decl)
decl = f'is {naming} -> value'
result += Text(indent_width=12, init_lines=decl)
result += ' }'
result += ' }'
result += EMPTY
if self.generate_serialization:
result += ' override fun writeToJSON(): JSONObject {'
result += ' return when (this) {'
for decl in entity_declarations:
naming = entity_enumeration.format_case_naming(decl)
decl = f'is {naming} -> value.writeToJSON()'
result += Text(indent_width=12, init_lines=decl)
result += ' }'
result += ' }'
result += EMPTY
if entity_enumeration.mode.is_template:
self_name = entity_enumeration.resolved_prefixed_declaration
result += f' override fun resolve(env: ParsingEnvironment, data: JSONObject): {self_name} {{'
result += ' return when (this) {'
for decl in entity_declarations:
case_name = entity_enumeration.format_case_naming(decl)
line = f'is {case_name} -> {self_name}.{case_name}(value.resolve(env, data))'
result += Text(indent_width=12, init_lines=line)
result += ' }'
result += ' }'
result += EMPTY
result += ' val type: String'
result += ' get() {'
result += ' return when (this) {'
for decl in entity_declarations:
naming = entity_enumeration.format_case_naming(decl)
line = f'is {naming} -> {decl}.TYPE'
result += Text(indent_width=16, init_lines=line)
result += ' }'
result += ' }'
result += EMPTY
elif self._generate_equality:
result += ' override fun equals(other: Any?): Boolean {'
result += ' if (this === other) { return true }'
result += f' if (other is {declaration_name}) {{'
result += ' return value().equals(other.value())'
result += ' }'
result += ' return false'
result += ' }'
result += EMPTY
if not self.generate_serialization:
result += '}'
return result
result += ' companion object {'
result += ' @Throws(ParsingException::class)'
source_name = 'json'
source_type = 'JSONObject'
read_type_expr = 'json.read("type", logger = logger, env = env)'
read_type_opt_expr = 'json.readOptional("type", logger = logger, env = env)'
throwing_expr = 'throw typeMismatch(json = json, key = "type", value = type)'
if entity_enumeration.mode.is_template:
def deserialization_args(s):
return f'env, parent?.value() as {s}?, topLevel, {source_name}'
result += ' operator fun invoke('
result += ' env: ParsingEnvironment,'
result += ' topLevel: Boolean = false,'
result += f' {source_name}: {source_type}'
result += f' ): {declaration_name} {{'
result += ' val logger = env.logger'
if default_entity_decl:
result += f' val receivedType: String = {read_type_opt_expr} ?: {default_entity_decl}Template.TYPE'
else:
result += f' val receivedType: String = {read_type_expr}'
result += f' val parent = env.templates[receivedType] as? {declaration_name}'
result += ' val type = parent?.type ?: receivedType'
else:
def deserialization_args(s):
return f'env, {source_name}'
result += ' @JvmStatic'
result += ' @JvmName("fromJson")'
args = f'env: ParsingEnvironment, {source_name}: {source_type}'
result += f' operator fun invoke({args}): {declaration_name} {{'
result += ' val logger = env.logger'
if default_entity_decl:
result += f' val type: String = {read_type_opt_expr} ?: {default_entity_decl}.TYPE'
else:
result += f' val type: String = {read_type_expr}'
result += ' when (type) {'
for decl in entity_declarations:
naming = entity_enumeration.format_case_naming(decl)
line = f'{decl}.TYPE -> return {naming}({decl}({deserialization_args(decl)}))'
result += Text(indent_width=16, init_lines=line)
if entity_enumeration.mode is GenerationMode.NORMAL_WITH_TEMPLATES:
result += ' }'
name = utils.capitalize_camel_case(entity_enumeration.name + TEMPLATE_SUFFIX)
template_type = entity_enumeration.template_declaration_prefix + name
result += f' val template = env.templates.getOrThrow(type, json) as? {template_type}'
result += ' if (template != null) {'
result += f' return template.resolve(env, {source_name})'
result += ' } else {'
result += f' {throwing_expr}'
result += ' }'
else:
result += f' else -> {throwing_expr}'
result += ' }'
result += ' }'
static_creator_lambda = f'env: ParsingEnvironment, it: JSONObject -> {declaration_name}(env, json = it)'
result += f' val {ENTITY_STATIC_CREATOR} = {{ {static_creator_lambda} }}'
result += ' }'
result += '}'
return result
def _string_enumeration_declaration(self, string_enumeration: StringEnumeration) -> Text:
declaration_name = utils.capitalize_camel_case(string_enumeration.name)
cases_declarations = list(map(lambda s: Text(indent_width=16, init_lines=f'{s}.value -> {s}'),
map(lambda s: utils.fixing_first_digit(utils.constant_upper_case(s[0])),
string_enumeration.cases)))
result = Text(f'enum class {declaration_name}(private val value: String) {{')
for ind, case in enumerate(string_enumeration.cases):
terminal = ',' if ind != (len(cases_declarations) - 1) else ';'
name = utils.fixing_first_digit(utils.constant_upper_case(case[0]))
value = case[1]
result += Text(indent_width=4, init_lines=f'{name}("{value}"){terminal}')
result += EMPTY
result += ' companion object Converter {'
result += f' fun toString(obj: {declaration_name}): String {{'
result += ' return obj.value'
result += ' }'
result += EMPTY
result += f' fun fromString(string: String): {declaration_name}? {{'
result += ' return when (string) {'
result += cases_declarations
result += ' else -> null'
result += ' }'
result += ' }'
result += EMPTY
result += ' val FROM_STRING = { string: String ->'
result += ' when (string) {'
result += cases_declarations
result += ' else -> null'
result += ' }'
result += ' }'
result += ' }'
result += '}'
return result
|
divkit/divkit
|
api_generator/api_generator/generators/kotlin/generator.py
|
generator.py
|
py
| 16,470
|
python
|
en
|
code
| 1,940
|
github-code
|
6
|
38959212336
|
obj=open("Demo.txt","w+")
obj.write("Hello World")
obj.seek(3) #Move to 4th Byte
print(obj.tell()) #Gives the byte number(in terms of index
print(obj.read())
obj.seek(2,0)#Move 2 bytes further from beginning(0[First character at 0]+2=2)
print(obj.read())
obj.close()
obj=open("Demo.txt","rb")
obj.seek(1)
obj.seek(5,1)#Move 5 bytes further from current position[1[Currently at 2nd byte]+5=6]
print(obj.read().decode("utf-8"))
obj.seek(3)
obj.seek(-2,1)#Move 2 bytes back from current position
print(obj.read().decode("utf-8"))
obj.seek(-3,2) #Move 3 bytes backward[Reference position is from end]
print(obj.read().decode("utf-8"))
|
9Mugen/int108
|
seeking_another.py
|
seeking_another.py
|
py
| 646
|
python
|
en
|
code
| 0
|
github-code
|
6
|
41058690136
|
# Nicolas Gomollon, Lab 6
class Poly:
def __init__(self,*terms):
# __str__ uses the name self.terms for the dictionary of terms
# So __init__ should build this dictionary from terms
self.terms = {}
for coeff, power in terms:
assert type(coeff) in (int, float), "Poly.__init__: illegal coefficient in : {}".format((coeff, power))
assert (type(power) is int) and (power >= 0), "Poly.__init__: illegal power in : {}".format((coeff, power))
assert power not in self.terms, "Poly.__init__: power({}) can only be defined once".format(power)
if coeff != 0: self.terms[power] = coeff
# Fill in the rest of this method, using *terms to intialize self.terms
# I have written str(...) because it is used in the bsc.txt file and
# it is a bit subtle to get correct. Notice that it assumes that
# every Poly object stores a dict whose keys are powers and whose
# associated values are coefficients. This function does not depend
# on any other method in this class being written correctly.
def __str__(self):
def term(c,p,var):
return (str(c) if p == 0 or c != 1 else '') +\
('' if p == 0 else var+('^'+str(p) if p != 1 else ''))
if len(self.terms) == 0:
return '0'
else:
return ' + '.join([term(c,p,'x') for p,c in sorted(self.terms.items(),reverse=True)]).replace('+ -','- ')
def __repr__(self):
return "Poly({})".format(', '.join((str((coeff, power)) for power, coeff in self.terms.items())))
def __len__(self):
if len(self.terms) == 0: return 0
return max(self.terms)
def __call__(self,arg):
return sum([(coeff * (arg ** power)) for power, coeff in self.terms.items()])
def __iter__(self):
for power, coeff in sorted(self.terms.items(), reverse=True):
yield (coeff, power)
def __getitem__(self,index):
if (type(index) is not int) or (index < 0):
raise TypeError("Poly.__getitem__: illegal index : {}".format(index))
if index not in self.terms:
return 0
return self.terms[index]
def __setitem__(self,index,value):
if (type(index) is not int) or (index < 0):
raise TypeError("Poly.__setitem__: illegal index : {}".format(index))
if (value != 0):
self.terms[index] = value
elif index in self.terms:
self.terms.pop(index)
return None
def __delitem__(self,index):
if (type(index) is not int) or (index < 0):
raise TypeError("Poly.__delitem__: illegal index : {}".format(index))
if index in self.terms:
self.terms.pop(index)
return None
def _add_term(self,c,p):
if type(c) not in (int, float):
raise TypeError("Poly._add_term: illegal coefficient in : {}".format((c, p)))
if (type(p) is not int) or (p < 0):
raise TypeError("Poly._add_term: illegal power in : {}".format((c, p)))
if (c != 0) and (p not in self.terms):
self[p] = c
elif (p in self.terms):
self[p] += c
return None
def __add__(self,right):
if type(right) is Poly:
new_terms = dict(self.terms)
for power, coeff in right.terms.items():
if power in new_terms:
if (new_terms[power] + coeff) != 0:
new_terms[power] += coeff
else:
new_terms.pop(power)
else:
new_terms[power] = coeff
return Poly(*[(coeff, power) for power, coeff in new_terms.items()])
elif type(right) in (int, float):
new_terms = [(coeff, power) for power, coeff in self.terms.items() if power > 0]
new_terms += [(self[0] + right, 0)]
return Poly(*new_terms)
raise TypeError("Poly.__add__: illegal operand : {}".format(right))
def __radd__(self,left):
return self.__add__(left)
def __mul__(self,right):
if type(right) is Poly:
new_terms = {}
for p1, c1 in self.terms.items():
for p2, c2 in right.terms.items():
new_p = p1 + p2
new_c = c1 * c2
if new_p in new_terms:
new_terms[new_p] += new_c
else:
new_terms[new_p] = new_c
return Poly(*[(coeff, power) for power, coeff in new_terms.items()])
elif type(right) in (int, float):
new_terms = dict(self.terms)
for power, coeff in new_terms.items():
new_terms[power] *= right
return Poly(*[(coeff, power) for power, coeff in new_terms.items()])
raise TypeError("Poly.__mul__: illegal operand : {}".format(right))
def __rmul__(self,left):
return self.__mul__(left)
def __eq__(self,right):
if type(right) is Poly:
return self.terms == right.terms
elif type(right) in (int, float):
if (len(self.terms) == 1) and (len(self) == 0):
return self.terms[0] == right
return False
raise TypeError("Poly.__eq__: illegal operand : {}".format(right))
if __name__ == '__main__':
# Some simple tests; you can comment them out and/or add your own before
# the driver is called.
print('Start simple tests')
p = Poly((3,2),(-2,1), (4,0))
print(' For Polynomial: 3x^2 - 2x + 4')
print(' str(p):',p)
print(' repr(p):',repr(p))
print(' len(p):',len(p))
print(' p(2):',p(2))
print(' list collecting iterator results:',[t for t in p])
print(' p+p:',p+p)
print(' p+2:',p+2)
print(' p*p:',p*p)
print(' p*2:',p*2)
print('End simple tests\n')
import driver
#driver.default_show_exception=True
#driver.default_show_exception_message=True
#driver.default_show_traceback=True
driver.driver()
|
solomc1/python
|
ics 33/solutions/ile2 solutions/Lab 6/GomollonNicolas/poly.py
|
poly.py
|
py
| 6,377
|
python
|
en
|
code
| 0
|
github-code
|
6
|
1282652745
|
import datetime
import pandas as pd
from tqdm import tqdm
from emailer import Emailer
from shipping import Shipping
from shipstation import Shipstation
def main():
# Instantiate objects to be used throughout the script
shipstation = Shipstation()
shipping = Shipping()
# Get all shipment information from ShipStation
print("\nGetting shipments...", end="")
all_shipments = shipstation.get_shipments()
print("done!\n")
# Filter shipments for only those that were delivered, per ShipStation
print("\nFiltering for delivered shipments...", end="")
delivered_shipments = [
shipment for shipment in all_shipments if shipment["confirmation"] == "delivery"
]
print("done!\n")
# Filter delivered shipments created in the last week for those with orders created in the last week
print("\nFiltering for orders within the last week...", end="")
good_shipments = []
for shipment in tqdm(delivered_shipments, position=0, leave=True):
order_response = shipstation.get_order(shipment["orderId"])
order_date = datetime.datetime.strptime(
order_response["orderDate"], "%Y-%m-%dT%H:%M:%S.%f0"
)
if order_date > datetime.datetime.now() - datetime.timedelta(days=8):
if len(order_response["items"]) == 1:
good_shipments.append((shipment, order_response))
print("done!\n")
# Get tracking info from USPS and UPS
print("\nGetting tracking info...", end="")
usps_info = {}
usps_tracking_numbers = [
s[0]["trackingNumber"] for s in good_shipments if "usps" in s[0]["serviceCode"]
]
for tracking_number in usps_tracking_numbers:
usps_info[tracking_number] = shipping.get_ups_tracking(tracking_number)
ups_info = {}
ups_tracking_numbers = [
s[0]["trackingNumber"] for s in good_shipments if "ups" in s[0]["serviceCode"]
]
for tracking_number in ups_tracking_numbers:
ups_info[tracking_number] = shipping.get_ups_tracking(tracking_number)
# Combine tracking info into one dictionary
tracking_info = {**ups_info, **usps_info}
print("done!\n")
# Filter shipments for those that were confirmed as delivered during the previous business day by USPS or UPS
print("\nFiltering for deliveries confirmed by the carrier...", end="")
actually_delivered = [
s
for s in good_shipments
if tracking_info.get(s[0]["trackingNumber"], [0, False])[1]
and datetime.datetime.strptime(
tracking_info.get(s[0]["trackingNumber"], ["2023-01-01 00:00", False])[0],
"%Y-%m-%d %H:%M",
).date()
== (datetime.datetime.now() - datetime.timedelta(days=3)).date()
]
print("done!\n")
# Create pandas DataFrame for data to be exported
print("\nSending to CSV...", end="")
filename = "shipstation_delivered.csv"
values = []
for i in range(len(actually_delivered)):
shipment_id = actually_delivered[i][0]["shipmentId"]
order_id = actually_delivered[i][0]["orderId"]
email = actually_delivered[i][0]["customerEmail"]
ship_date = actually_delivered[i][0]["shipDate"]
order_date = actually_delivered[i][1]["createDate"]
bill_to = actually_delivered[i][1]["billTo"]
ship_to = actually_delivered[i][1]["shipTo"]
item = actually_delivered[i][1]["items"][0]["sku"]
quantity = actually_delivered[i][1]["items"][0]["quantity"]
tracking_number = actually_delivered[i][0]["trackingNumber"]
values.append(
(
shipment_id,
order_id,
email,
ship_date,
order_date,
bill_to,
ship_to,
item,
quantity,
tracking_number,
)
)
df = pd.DataFrame(
values,
columns=[
"shipmentId",
"orderId",
"customerEmail",
"shipDate",
"orderDate",
"billTo",
"shipTo",
"sku",
"quantity",
"trackingNumber",
],
)
df["deliveryDate"] = df["trackingNumber"].map(
{k: v[0] for k, v in usps_info.items()}
)
df["deliveryDate"] = pd.to_datetime(df["deliveryDate"])
df.to_csv(filename, index=False)
print("done!\n")
# Sending email to relevant parties
emailer = Emailer(to_address="matt@jmac.com")
print("\nSending email...", end="")
subject = "ShipStation Daily Report"
body = f"""
Attached are the {len(df)} cherry-picked orders/shipments that were delivered during the
previous business day.
"""
emailer.send_email(subject, body, filename)
print("done!\n")
if __name__ == "__main__":
main()
|
mattgrcia/review-booster
|
main.py
|
main.py
|
py
| 4,870
|
python
|
en
|
code
| 0
|
github-code
|
6
|
21894452141
|
from dsa_stack import DSAStack
import sys
from typing import Union
class TowersOfHanoi:
def __init__(self, num_pegs: int, num_disks: int) -> None:
self.num_pegs = num_pegs
self.num_disks = num_disks
self.pegs = [
DSAStack(num_disks),
DSAStack(num_disks),
DSAStack(num_disks),
]
def place_disk(self, peg: int, disk: int) -> None:
peg = self.pegs[peg]
if peg.is_empty() or disk < peg.top():
peg.push(disk)
else:
raise ValueError(
"Disk of size {} cannot be placed on disk of size {}.".format(disk, peg.top()))
def remove_disk(self, peg: int) -> int:
peg = self.pegs[peg]
return peg.pop()
def move_disk(self, src: int, dst: int) -> None:
self.place_disk(dst, self.remove_disk(src))
# Gets the disk at the given peg and index from bottom, or None if none
# exists.
def disk_at(self, peg: int, i: int) -> Union[int, None]:
p = self.pegs[peg].as_list()
if i < len(p):
d = p[-1 - i]
else:
d = None
return d
# Moves n disks from peg src to peg dst (1-indexed).
def solve(n: int, src: int, dst: int) -> None:
src -= 1
dst -= 1
towers = TowersOfHanoi(3, n)
for i in range(n, 0, -1):
towers.place_disk(src, i)
step = 0
display_progress(towers, step)
solve_impl(towers, n, src, dst, step)
# Moves n disks from peg src to peg dst (0-indexed).
# Returns the new step count.
def solve_impl(towers: TowersOfHanoi, n: int, src: int, dst: int, step: int) -> int:
if n <= 0:
raise AssertionError("n must be > 0.")
elif n == 1:
towers.move_disk(src, dst)
step += 1
display_progress(towers, step)
else:
other = 3 - src - dst
step = solve_impl(towers, n - 1, src, other, step)
towers.move_disk(src, dst)
step += 1
display_progress(towers, step)
step = solve_impl(towers, n - 1, other, dst, step)
return step
def display_progress(towers: TowersOfHanoi, step: int) -> None:
header = "Step {}:".format(step)
indent = " " * (len(header) + 2)
disk_width = len(str(towers.num_disks))
print(header)
for i in range(towers.num_disks - 1, -1, -1):
print(indent, end="")
for j in range(towers.num_pegs):
disk = towers.disk_at(j, i)
if disk is None:
s = "|"
else:
s = str(disk)
# Padding for when disk could be multiple columns wide.
s = " " * (disk_width - len(s)) + s
print(s + " ", end="")
print()
print()
if len(sys.argv) != 4:
print("Usage: python {} num_disks src_peg dst_peg".format(sys.argv[0]))
else:
try:
num_disks = int(sys.argv[1])
src = int(sys.argv[2])
dst = int(sys.argv[3])
except ValueError:
print("Parameters must be integers.")
else:
if num_disks < 1:
print("num_disks must be > 0.")
# Currently needs 8 extra stack frames to run, will require adjustment
# if implementation changes.
elif sys.getrecursionlimit() < num_disks + 8:
print("Solving with num_disks={} would exceed max call stack depth."
.format(num_disks))
elif not 0 < src <= 3:
print("src_peg must be > 0 and <= 3.")
elif not 0 < dst <= 3:
print("dst_peg must be > 0 and <= 3.")
else:
print("Solving Towers of Hanoi with {} pegs and {} disks, starting from peg {} and ending at peg {}."
.format(3, num_disks, src, dst))
print("Number of moves required: {}.".format(2 ** num_disks - 1))
print()
solve(num_disks, src, dst)
|
MC-DeltaT/DSA-Practicals
|
P2/towers_of_hanoi.py
|
towers_of_hanoi.py
|
py
| 3,845
|
python
|
en
|
code
| 0
|
github-code
|
6
|
26333498275
|
# Face detection is done using classifier
# classifier is an algorithm that decides wherether a face is present or not
# classifier need to be trained images thousands of with and without the faces.
# Opencv have pretrained classifier called haarcascade, localbinary pattern.
import cv2 as cv
img = cv.imread('Images/group.jpg')
cv.imshow('group', img)
# firstly we covert an image into grayscale because face detection does not involve any colors
# haarcascade looks at object in an image and using edges find the faces in an image
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
cv.imshow('GrayScale', gray)
# Now Reading into haarcascade file
haar_cascade = cv.CascadeClassifier('haar_face.xml')
# Detection of Face
face_rect = haar_cascade.detectMultiScale(gray, 1.1, minNeighbors=3) # detects a face and returns list of the rectangle coordinates of each faces
print(f'No. of faces detected : {len(face_rect)}')
# print(face_rect)
for (x,y,w,h) in face_rect:
cv.rectangle(img, (x,y), (x+w,y+h), (0,0,255), thickness=2)
# so for the group of people haarcascade is more sensitiveto noise which will end up with more no. of faces than the actual no. of faces
# less minNeighbour value leads to more face and vise versa
cv.imshow('Detected_Faces', img)
cv.waitKey(0)
|
JinalSinroja/OpenCV
|
Face_Detection.py
|
Face_Detection.py
|
py
| 1,299
|
python
|
en
|
code
| 0
|
github-code
|
6
|
26435457278
|
# Import all necessary libraries
from distutils.cmd import Command
import tkinter as tk
from tkinter import ttk
from tkinter import *
from tkinter import font as tkFont
import math
# Initialize tkinter and set the display
root = tk.Tk()
root.configure(bg = "green")
root.title("Calculator MIN 3000")
root.geometry("294x389")
root.resizable(False, False)
options = {"padx": 1, "pady": 1}
relation = ""
#Function to update entered number and syntax in entry box
def button_click(num):
"""This function collects all the selected characters and displays them"""
global relation
relation = relation + str(num)
equation.set(relation)
#Function to evaluate the final expresion
def click_equal():
"""This function evaluates the expression when equal is selected"""
try:
global relation
total = str(eval(relation))
equation.set(total)
relation = ""
except:
equation.set("error")
relation = ""
#Function to clear and delete the contents of text entry box
def delete():
"""This function deletes contents of text entry box"""
global relation
relation = ""
equation.set("")
# Define variables and constants
equation = StringVar()
helv36 = tkFont.Font(family='Helvetica', size=20, weight=tkFont.BOLD)
sqrt = math.sqrt
prct = 1/100
# Configuration of the display and buttons
result_display = ttk.Entry(
root,
textvariable = equation ,
width=19,
font=helv36
).grid(
column=0,
row=0,
columnspan=4,
**options
)
button_percent = Button(
root,
text="%",
width=3,
font=helv36,
bg="#ADD8E6",
command= lambda: button_click("prct")
).grid(
column=0,
row=1,
sticky="nsew",
**options
)
button_left_parenthesis = Button(
root,
text="(",
width=3,
command=lambda: button_click("("),
font=helv36,
bg="#ADD8E6"
).grid(
column=1,
row=1,
sticky="nsew",
**options
)
button_right_parenthesis = Button(
root,
text=")",
width=3,
command=lambda: button_click(")"),
font=helv36,
bg="#ADD8E6"
).grid(
column=2,
row=1,
sticky="nsew",
**options
)
button_del = Button(
root,
text="DEL",
width=3,
font=helv36,
command = delete,
bg="#ADD8E6",
fg="red"
).grid(
column=3,
row=1,
sticky="nsew",
**options
)
button_square = Button(
root,
text="x^2",
width=3,
command=lambda: button_click("**2"),
font=helv36,
bg="#ADD8E6"
).grid(
column=0,
row=2,
sticky="nsew",
**options
)
button_sqrt = Button(
root,
text="√",
width=3,
font=helv36,
bg="#ADD8E6",
command=lambda: button_click("sqrt")
).grid(
column=1,
row=2,
sticky="nsew",
**options
)
button_1_devide_x = Button(
root,
text="1⁄x",
width=3,
font=helv36,
bg="#ADD8E6"
).grid(
column=2,
row=2,
sticky="nsew",
**options
)
button_multiply = Button(
root,
text="×",
width=3,
command= lambda: button_click("*"),
font=helv36,
bg="#ADD8E6"
).grid(
column=3,
row=2,
sticky="nsew",
**options
)
button_7 = Button(
root,
text="7",
width=3,
command=lambda: button_click(7),
font=helv36
).grid(
column=0,
row=3,
sticky="nsew",
**options
)
button_8 = Button(
root,
text="8",
width=3,
command=lambda: button_click(8),
font=helv36
).grid(
column=1,
row=3,
sticky="nsew",
**options
)
button_9 = Button(
root,
text="9",
width=3,
command=lambda: button_click(9),
font=helv36
).grid(
column=2,
row=3,
sticky="nsew",
**options
)
button_devide = Button(
root,
text="÷",
width=3,
command=lambda: button_click("/"),
font=helv36,
bg="#ADD8E6"
).grid(
column=3,
row=3,
sticky="nsew",
**options
)
button_4 = Button(
root,
text="4",
width=3,
command=lambda: button_click(4),
font=helv36
).grid(
column=0,
row=4,
sticky="nsew",
**options
)
button_5 = Button(
root,
text="5",
width=3,
command=lambda: button_click(5),
font=helv36
).grid(
column=1,
row=4,
sticky="nsew",
**options
)
button_6 = Button(
root,
text="6",
width=3,
command=lambda: button_click(6),
font=helv36
).grid(
column=2,
row=4,
sticky="nsew",
**options
)
button_subtract = Button(
root,
text="-",
width=3,
command=lambda: button_click("-"),
font=helv36,
bg="#ADD8E6"
).grid(
column=3,
row=4,
sticky="nsew",
**options
)
button_1 = Button(
root,
text="1",
width=3,
command=lambda: button_click(1),
font=helv36
).grid(
column=0,
row=5,
sticky="nsew",
**options
)
button_2 = Button(
root,
text="2",
width=3,
command=lambda: button_click(2),
font=helv36
).grid(
column=1,
row=5,
sticky="nsew",
**options
)
button_3 = Button(
root,
text="3",
width=3,
command=lambda: button_click(3),
font=helv36
).grid(
column=2,
row=5,
sticky="nsew",
**options
)
button_add = Button(
root,
text="+",
width=3,
command=lambda: button_click("+"),
font=helv36,
bg="#ADD8E6"
).grid(
column=3,
row=5,
sticky="nsew",
**options
)
button_0 = Button(
root,
text="0",
width=6,
command=lambda: button_click(0),
font=helv36
).grid(
column=0,
row=6,
columnspan=2,
sticky="nsew",
**options
)
button_dot = Button(
root,
text="∙",
width=3,
command=lambda: button_click("."),
font=helv36
).grid(
column=2,
row=6,
sticky="nsew",
**options
)
button_equal = Button(
root,
text="=",
width=3,
command = click_equal,
font=helv36,
bg="#ADD8E6"
).grid(
column=3,
row=6,
sticky="nsew",
**options
)
root.mainloop()
|
IonMurzac/tkinter_calculator
|
P_13_Calculator.py
|
P_13_Calculator.py
|
py
| 7,168
|
python
|
en
|
code
| 0
|
github-code
|
6
|
575142348
|
#breakout.py
#A. Colwell (2015)
# graphics2 module returns a TUPLE when using .getOverlap()
# The tuple will have two numbers, indicating which objects
# have overlapped. Only useful if you know the corresponding
# object. In this case, the first brick (brick1) is the first
# object drawn in the window, and therefore the object 1.
# All the bricks are drawn first, so their numbers are 1-36
# Paddle is number 37
# Ball is number 38
# Submitting this document will give you 50% plus:
# 10% if you can add 10 points for every brick hit Yes
# 10% display points in the window Yes
# 10% add 30 points for every brick in top row only No
# 10% vary direction of ball movement (any angle) No
# 10% reset and replay game after finished Yes
Power = 'On'
while Power == 'On':
Reset = 'y'
from graphics2 import *
from time import sleep
while Reset == 'y':
winx=600
winy=400
win = GraphWin('Brick Breaker',600,400)
move={'Left':(-1,0),'Right':(1,0),'':(0,0)}
speed = 4
#--------------------Start Game-----------------------------
Start = Text(Point(300,200),'Click anywhere to start the game')
Start.draw(win)
coord=win.getMouse()
x=coord.getX()
y=coord.getY()
if x<600 and y<400:
Start.undraw()
Esc = Text(Point(500,10),'Press Esc key to close the game')
Esc.setSize(10)
Esc.draw(win)
#--------------------Bricks-----------------------------
brick1 = Rectangle(Point(0,40),Point(49,49))
brick1.setFill('red')
brick1.setOutline('lightgrey')
brick1.draw(win)
brick2 = Rectangle(Point(50,40),Point(99,49))
brick2.setFill('blue')
brick2.setOutline('lightgrey')
brick2.draw(win)
brick3 = Rectangle(Point(100,40),Point(149,49))
brick3.setFill('yellow')
brick3.setOutline('lightgrey')
brick3.draw(win)
brick4 = Rectangle(Point(150,40),Point(199,49))
brick4.setFill('green')
brick4.setOutline('lightgrey')
brick4.draw(win)
brick5 = Rectangle(Point(200,40),Point(249,49))
brick5.setFill('purple')
brick5.setOutline('lightgrey')
brick5.draw(win)
brick6 = Rectangle(Point(250,40),Point(299,49))
brick6.setFill('red')
brick6.setOutline('lightgrey')
brick6.draw(win)
brick7 = Rectangle(Point(300,40),Point(349,49))
brick7.setFill('blue')
brick7.setOutline('lightgrey')
brick7.draw(win)
brick8 = Rectangle(Point(350,40),Point(399,49))
brick8.setFill('green')
brick8.setOutline('lightgrey')
brick8.draw(win)
brick9 = Rectangle(Point(400,40),Point(449,49))
brick9.setFill('yellow')
brick9.setOutline('lightgrey')
brick9.draw(win)
brick10 = Rectangle(Point(450,40),Point(499,49))
brick10.setFill('purple')
brick10.setOutline('lightgrey')
brick10.draw(win)
brick11 = Rectangle(Point(500,40),Point(549,49))
brick11.setFill('red')
brick11.setOutline('lightgrey')
brick11.draw(win)
brick12 = Rectangle(Point(550,40),Point(599,49))
brick12.setFill('blue')
brick12.setOutline('lightgrey')
brick12.draw(win)
brick13 = Rectangle(Point(0,30),Point(49,39))
brick13.setFill('green')
brick13.setOutline('lightgrey')
brick13.draw(win)
brick14 = Rectangle(Point(50,30),Point(99,39))
brick14.setFill('yellow')
brick14.setOutline('lightgrey')
brick14.draw(win)
brick15 = Rectangle(Point(100,30),Point(149,39))
brick15.setFill('purple')
brick15.setOutline('lightgrey')
brick15.draw(win)
brick16 = Rectangle(Point(150,30),Point(199,39))
brick16.setFill('red')
brick16.setOutline('lightgrey')
brick16.draw(win)
brick17 = Rectangle(Point(200,30),Point(299,39))
brick17.setFill('blue')
brick17.setOutline('lightgrey')
brick17.draw(win)
brick18 = Rectangle(Point(250,30),Point(299,39))
brick18.setFill('green')
brick18.setOutline('lightgrey')
brick18.draw(win)
brick19 = Rectangle(Point(300,30),Point(349,39))
brick19.setFill('yellow')
brick19.setOutline('lightgrey')
brick19.draw(win)
brick20 = Rectangle(Point(350,30),Point(399,39))
brick20.setFill('purple')
brick20.setOutline('lightgrey')
brick20.draw(win)
brick21 = Rectangle(Point(400,30),Point(449,39))
brick21.setFill('red')
brick21.setOutline('lightgrey')
brick21.draw(win)
brick22 = Rectangle(Point(450,30),Point(499,39))
brick22.setFill('blue')
brick22.setOutline('lightgrey')
brick22.draw(win)
brick23 = Rectangle(Point(500,30),Point(549,39))
brick23.setFill('green')
brick23.setOutline('lightgrey')
brick23.draw(win)
brick24 = Rectangle(Point(550,30),Point(599,39))
brick24.setFill('yellow')
brick24.setOutline('lightgrey')
brick24.draw(win)
brick25 = Rectangle(Point(0,20),Point(49,29))
brick25.setFill('purple')
brick25.setOutline('lightgrey')
brick25.draw(win)
brick26 = Rectangle(Point(50,20),Point(99,29))
brick26.setFill('red')
brick26.setOutline('lightgrey')
brick26.draw(win)
brick27 = Rectangle(Point(100,20),Point(149,29))
brick27.setFill('blue')
brick27.setOutline('lightgrey')
brick27.draw(win)
brick28 = Rectangle(Point(150,20),Point(199,29))
brick28.setFill('green')
brick28.setOutline('lightgrey')
brick28.draw(win)
brick29 = Rectangle(Point(200,20),Point(249,29))
brick29.setFill('yellow')
brick29.setOutline('lightgrey')
brick29.draw(win)
brick30 = Rectangle(Point(250,20),Point(299,29))
brick30.setFill('purple')
brick30.setOutline('lightgrey')
brick30.draw(win)
brick31 = Rectangle(Point(300,20),Point(349,29))
brick31.setFill('red')
brick31.setOutline('lightgrey')
brick31.draw(win)
brick32 = Rectangle(Point(350,20),Point(399,29))
brick32.setFill('blue')
brick32.setOutline('lightgrey')
brick32.draw(win)
brick33 = Rectangle(Point(400,20),Point(449,29))
brick33.setFill('green')
brick33.setOutline('lightgrey')
brick33.draw(win)
brick34 = Rectangle(Point(450,20),Point(499,29))
brick34.setFill('yellow')
brick34.setOutline('lightgrey')
brick34.draw(win)
brick35 = Rectangle(Point(500,20),Point(549,29))
brick35.setFill('purple')
brick35.setOutline('lightgrey')
brick35.draw(win)
brick36 = Rectangle(Point(550,20),Point(599,29))
brick36.setFill('red')
brick36.setOutline('lightgrey')
brick36.draw(win)
#bricks 600/12=49 will be width of brick
#bricks 9 will be height of brick
#brickname will be based on 25-36 ************
# 13-24 ************
# 1-12 ************
#brickList is used to undraw the bricks in main program
#if a brick is hit, it is undrawn then a 0 is placed in the list
brickList=[[brick25,brick13,brick1],[brick26,brick14,brick2],
[brick27,brick15,brick3],[brick28,brick16,brick4],
[brick29,brick17,brick5],[brick30,brick18,brick6],
[brick31,brick19,brick7],[brick32,brick20,brick8],
[brick33,brick21,brick9],[brick34,brick22,brick10],
[brick35,brick23,brick11],[brick36,brick24,brick12]]
Reset = 'n'
#--------------------Paddle-----------------------------
px = winx/2-30
py = winy-30
paddle = Rectangle(Point(px,py),Point(px+60,py+10))
paddle.setFill('black')
paddle.setOutline('lightgrey')
paddle.draw(win)
#--------------------Ball-------------------------------
bx = winx/2-5
by = winy/4-5
dx = 1
dy = 1
ball = Circle(Point(bx,by),5)
ball.setFill('grey')
ball.setOutline('black')
ball.draw(win)
#--------------------Functions--------------------------
#--------------------Main Loop--------------------------
score = 0
scoreText = Text(Point(58,10),score)
scoreText2 = Text(Point(24,10),'Score:')
scoreText2.draw(win)
game = True #used in code to stop the game
while win.isOpen() == True and game == True:
sleep(.005) # speed or slow the game
#look after the paddle
m = win.checkKey()
if m == 'Escape':
win.close()
else:
try:
x,y = move[m]
except:
pass
paddle.move(x*speed,y*speed)
c=paddle.getCenter()
if c.getX() > (600-30):
paddle.move(-1*(c.getX() - 570),0)
if c.getX() < (0+30):
paddle.move((-1*c.getX()+30),0)
#look after ball movement
bc = ball.getCenter()
if bc.getX() > 595:
dx = dx*-1
ball.move((bc.getX()-595)*-1,0)
if bc.getX() < 5:
dx = dx*-1
ball.move((bc.getX()-5)*-1,0)
if bc.getY() < 5:
dy = dy*-1
ball.move((bc.getY()-5)*-1,0)
ball.move(dx,dy)
#check for ball collisions
if bc.getY() < 50: #bricks collision section
x=int(bc.getX()//50) #convert width of window to x value (column)
y=int(bc.getY()//10) - 2 #convert height of bricks area to y value (row)
if brickList[x][y] != 0 : #check to see if brick has already been undrawn
brickList[x][y].undraw() #undraw brick
brickList[x][y]=0 #replace brick object in list with number 0
score = score+10
scoreText.undraw()
scoreText = Text(Point(58,10),score)
scoreText.draw(win)
dy = dy*-1 #change direction of ball movement
ball.move(dx,dy+2) #move ball with a nudge of 2
if bc.getY() > 365: #out of bounds at bottom collision section
if len(ball.getOverlap())>1:
dy*=-1
ball.move(dx,dy-2)
elif bc.getY()>395: #closes window if ball passes by paddle
Replay = 'n'
win.close()
#--------------------Retry--------------------------
win= GraphWin('Brick Breaker',600,400)
label1 = Rectangle(Point(0,0),Point(300,405))
label1.draw(win)
label2 = Rectangle(Point(300,0),Point(600,405))
label2.draw(win)
label1.setFill('red')
label2.setFill('blue')
gameOver = Text(Point(300,20),'Game has ended. Your score was')
gameOver.setSize(25)
gameOver.draw(win)
finalScore = Text(Point(300,50),score)
finalScore.setSize(23)
finalScore.draw(win)
Retry = Text(Point(300,70),'Would you like to play again?')
Retry.setSize(15)
Retry.draw(win)
yes = Text(Point(150,200),'Yes')
no = Text(Point(450,200),'No')
yes.setSize(20)
no.setSize(20)
yes.draw(win)
no.draw(win)
coord=win.getMouse()
x=coord.getX()
y=coord.getY()
if x<300 and y<400:
Reset = 'y'
win.close()
if x>300 and y<400:
Reset = 'n'
win.close()
if Reset == 'n':
Power = 'Off'
#--------------------Final Message--------------------------
win= GraphWin('Brick Breaker',600,200)
win.setBackground('yellow')
End = Text(Point(300,100),'Thank You For Playing!!! :)')
End.draw(win)
End.setSize(36)
End.setTextColor('blue')
Closing = Text(Point(300,150),'Click anywhere to close')
Closing.draw(win)
coord=win.getMouse()
x=coord.getX()
y=coord.getY()
if x<600 and y<200:
win.close()
|
MrColwell/PythonProfessionalLearning
|
PythonForTeachers/StudentCode/Example6_BrickBreaker.py
|
Example6_BrickBreaker.py
|
py
| 13,772
|
python
|
en
|
code
| 0
|
github-code
|
6
|
11137230909
|
#!/usr/bin/python
''' Advent of code - Day 1: Chronal Calibration - Problem 1.2
https://adventofcode.com/2018/day/1#part2
'''
import os, sys
# Reading file
filename = 'input1.txt'
lines = []
input_file_path = "%s/%s" % (os.path.abspath(os.path.dirname(__file__)), filename)
try:
with open(input_file_path, 'r') as f:
lines = [line.strip() for line in f]
except Exception as e:
sys.exit("Error - while reading input file: %s" % e)
# Computing frequency reached twice
total = 0
reachedTwice = False
frequencyDict = {0: 1} # 0 was already reached
while (not reachedTwice):
for num in lines:
num = int(num)
total += num
if total in frequencyDict:
reachedTwice = total
break
else:
frequencyDict[total] = 1
if reachedTwice:
print("First frequency reached twice is: %d" % reachedTwice)
else:
print("No frequency was reached twice")
|
dvdantunes/adventofcode-2018
|
day-01/day-01-02.py
|
day-01-02.py
|
py
| 940
|
python
|
en
|
code
| 0
|
github-code
|
6
|
906797873
|
import os
from gptwntranslator.helpers.config_helper import Config
from gptwntranslator.helpers.file_helper import write_md_as_epub
from gptwntranslator.helpers.text_helper import write_novel_md, parse_chapters
from gptwntranslator.helpers.ui_helper import print_title, wait_for_user_input
from gptwntranslator.storage.json_storage import JsonStorage
from gptwntranslator.ui.page_base import PageBase
from gptwntranslator.ui.page_message import PageMessage
from gptwntranslator.ui.ui_resources import get_resources
class PageNovelExporting(PageBase):
def __init__(self) -> None:
pass
def render(self, screen, **kwargs) -> tuple[PageBase, dict]:
resources = get_resources()
novel_code = kwargs["novel_url_code"]
targets = kwargs["target"]
novel_origin = kwargs["novel_origin"]
storage = JsonStorage()
config = Config()
target_language = config.data.config.translator.target_language
# Print title
last_y = print_title(screen, resources["title"], 0)
last_y += 2
screen.print_at(f"Exporting targets: {targets}", 2, last_y)
while True:
last_y += 2
try:
message = "(1/3) Parsing chapter targets... "
screen.print_at(message, 2, last_y)
screen.refresh()
targets = parse_chapters(targets)
screen.print_at("success.", 2 + len(message), last_y)
screen.refresh()
last_y += 1
except Exception as e:
screen.print_at("failed.", 2 + len(message), last_y)
last_y += 1
messages = [
f"Error: Chapter targets parsing failed.",
f"Error: {e}"]
target = PageMessage
params = {"messages": messages, "return_page": self.args["return_page"], "return_kwargs": self.args["return_kwargs"]}
break
try:
message = "(2/3) Loading local storage... "
screen.print_at(message, 2, last_y)
screen.refresh()
novels = storage.get_data()
novel = [novel for novel in novels if novel.novel_code == novel_code and novel.novel_origin == novel_origin][0]
screen.print_at("success.", 2 + len(message), last_y)
screen.refresh()
last_y += 1
except Exception as e:
screen.print_at("failed.", 2 + len(message), last_y)
last_y += 1
messages = [
f"Error: Error loading local storage.",
f"Error: {e}"]
target = PageMessage
params = {"messages": messages, "return_page": self.args["return_page"], "return_kwargs": self.args["return_kwargs"]}
break
try:
message = "(3/3) Exporting novel to epub... "
screen.print_at(message, 2, last_y)
screen.refresh()
md_text = write_novel_md(novel, targets)
output = os.path.join(config.vars["output_path"], f"{novel_origin}-{novel.novel_code}-{target_language}.epub")
write_md_as_epub(md_text, output)
screen.print_at("success.", 2 + len(message), last_y)
screen.refresh()
last_y += 1
target, params = self.args["return_page"], self.args["return_kwargs"]
except Exception as e:
screen.print_at("failed.", 2 + len(message), last_y)
last_y += 1
messages = [
f"Error: Error exporting novel to epub.",
f"Error: {e}"]
target = PageMessage
params = {"messages": messages, "return_page": self.args["return_page"], "return_kwargs": self.args["return_kwargs"]}
break
last_y += 1
screen.refresh()
wait_for_user_input(screen, 2, last_y)
return target, params
|
combobulativedesigns/gptwntranslator
|
src/gptwntranslator/ui/page_novel_exporting.py
|
page_novel_exporting.py
|
py
| 4,092
|
python
|
en
|
code
| 18
|
github-code
|
6
|
41559253356
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait as wait
from selenium.webdriver.common.action_chains import ActionChains as ac
from selenium.common.exceptions import NoSuchElementException
import pandas as pd
import time
# seamless 로그아웃
def logout(driver):
driver.find_element_by_xpath('/html/body/div/div/div/div/div[2]/div[2]/span/button').click()
driver.find_element_by_xpath('/html/body/div/div/div/div/div[2]/div[2]/ul/li[6]/a').click()
driver.close()
# 스크랩한 기업 삭제
def delete_companies(driver):
wait(driver, 20).until(EC.element_to_be_clickable((By.XPATH,
'/html/body/div/div/div/div[2]/div/div[2]/table/thead/tr/th/div/span/div/div/div/label/span'))).click()
time.sleep(1)
wait(driver, 10).until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div/div/div/div[2]/div/div[2]/div/div[2]/div[2]/div[2]/span/button'))).click()
time.sleep(1)
wait(driver, 10).until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div/div/div/div[2]/div/div[2]/div/div[2]/div[2]/div[2]/ul/li/a'))).click()
time.sleep(1)
wait(driver, 10).until(
EC.element_to_be_clickable((By.XPATH, '/html/body/div[7]/div[2]/div/div/div[2]/div/input'))).send_keys(
'delete')
time.sleep(1)
wait(driver, 10).until(
EC.element_to_be_clickable((By.XPATH, '/html/body/div[7]/div[2]/div/div/div[3]/button[2]'))).send_keys(
Keys.ENTER)
time.sleep(5)
def scrap(keyword, start_page, end_page, columns, file_name):
driver = webdriver.Chrome('chromedriver.exe')
driver.get('https://login.seamless.ai/login')
driver.set_window_size(1500, 1000)
time.sleep(1)
driver.find_element_by_name('username').send_keys('********')
driver.find_element_by_name('password').send_keys('********')
driver.find_element_by_css_selector('form > button').click()
time.sleep(3)
try:
driver.get('https://login.seamless.ai/search/companies?page=' + str(
start_page) + '&locations=1&companiesExactMatch=false&companyKeywords=' + keyword)
driver.execute_script("location.reload(true);")
time.sleep(1)
driver.find_element_by_css_selector('button > svg').click()
for p1 in range(start_page, end_page + 1):
driver.find_element_by_css_selector('body').send_keys(Keys.HOME)
time.sleep(5)
# 페이지 전체 한꺼번에 스크랩
wait(driver, 60).until(EC.element_to_be_clickable((By.XPATH,
'/html/body/div/div/div/div[2]/div/div[2]/div[2]/table/thead/tr/th/div/span/div/div/div/label/span'))).click()
time.sleep(1)
wait(driver, 60).until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div/div/div/div[2]/div/div[2]/div[2]/div/div[2]/button'))).click()
if p1 < end_page:
driver.find_element_by_css_selector('body').send_keys(Keys.END)
time.sleep(20)
wait(driver, 10).until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div/div/div/div[2]/div/div[2]/div[2]/div[2]/div/button[2]'))).send_keys(
Keys.ENTER)
else:
time.sleep(10)
# 스크랩한 기업 정보 가져오기
all_data = list() # 정보 저장할 리스트
wait(driver, 10).until(
EC.element_to_be_clickable(
(By.XPATH, '/html/body/div/div/div/div/div/div[3]/a'))).click() # 스크랩한 기업 목록으로 넘어감
time.sleep(5)
item_info = wait(driver, 10).until(
EC.element_to_be_clickable((By.XPATH, '/html/body/div/div/div/div[2]/div/div[2]/div/div/div[2]'))).text
item_len = int(item_info.split()[-1])
pages = (item_len - 1) // 15 + 1
for p2 in range(pages):
if p2 < pages - 1:
items = 15
else:
items = item_len % 15 if item_len % 15 else 15
for i in range(items):
company_data = list()
wait(driver, 10).until(EC.element_to_be_clickable((By.XPATH,
'/html/body/div/div/div/div[2]/div/div[2]/table/tbody/tr[' + str(
i + 1) + ']/td[2]/div/div/button'))).send_keys(
Keys.ENTER)
time.sleep(5)
wait(driver, 60).until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div[7]/div[2]/div/div/div[2]/div/div[2]/button'))).send_keys(Keys.ENTER)
time.sleep(5)
wait(driver, 30).until(EC.element_to_be_clickable((By.XPATH,
'/html/body/div[7]/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div[2]/div[2]/div/span/span/span[4]/span/span/a'))).send_keys(
Keys.ENTER)
name = wait(driver, 10).until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div[7]/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[2]'))).text
desc = wait(driver, 10).until(EC.element_to_be_clickable((By.XPATH,
'/html/body/div[7]/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div[2]/div[2]/div/span/span'))).text
website = wait(driver, 10).until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div[7]/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div[3]/div[2]'))).text
industry = wait(driver, 10).until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div[7]/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div[4]/div[2]'))).text
size = wait(driver, 10).until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div[7]/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div[5]/div[2]'))).text
founded = wait(driver, 10).until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div[7]/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div[6]/div[2]'))).text
company_type = wait(driver, 10).until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div[7]/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div[7]/div[2]'))).text
revenue = wait(driver, 10).until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div[7]/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div[8]/div[2]'))).text
location = wait(driver, 10).until(EC.element_to_be_clickable(
(By.XPATH, '/html/body/div[7]/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div[9]/div[2]'))).text
company_data.extend([name, desc, website, industry, size, founded, company_type, revenue, location])
all_data.append(company_data)
time.sleep(20)
driver.find_element_by_css_selector('body').send_keys(Keys.ESCAPE)
if i % 5 == 4:
time.sleep(1)
driver.find_element_by_css_selector('body').send_keys(Keys.PAGE_DOWN)
time.sleep(1)
time.sleep(1)
driver.find_element_by_css_selector('body').send_keys(Keys.HOME)
time.sleep(2)
delete_companies(driver)
except Exception:
time.sleep(1)
driver.find_element_by_css_selector('body').send_keys(Keys.ESCAPE)
time.sleep(1)
wait(driver, 10).until(
EC.element_to_be_clickable(
(By.XPATH, '/html/body/div/div/div/div/div/div[3]/a'))).click() # 스크랩한 기업 목록으로 넘어감
time.sleep(1)
ac(driver).move_by_offset(0, 500).click().perform()
time.sleep(5)
try:
item_info = wait(driver, 30).until(EC.element_to_be_clickable((
By.CSS_SELECTOR, 'div.RecordCount__RecordCountContainer-jdtFHI'))).text
item_len = int(item_info.split()[-1])
pages = (item_len - 1) // 15 + 1
for p in range(pages):
delete_companies(driver)
except NoSuchElementException:
pass
else:
all_columns = [
'Company Name', 'Description', 'Website', 'Industry', 'Company Size', 'Founded', 'Company Type',
'Revenue',
'Location'
]
all_data.reverse()
all_data = pd.DataFrame(all_data, columns=all_columns, index=list(range(1, item_len + 1)))[columns]
all_data.to_excel(file_name, encoding='utf-8-sig')
finally:
logout(driver)
|
cermen/SecondCompanyScraping
|
scrap.py
|
scrap.py
|
py
| 8,973
|
python
|
en
|
code
| 0
|
github-code
|
6
|
4501146166
|
import asyncio
import contextlib
import types
import unittest
import pytest
from lsst.ts import salobj, watcher
from lsst.ts.idl.enums.Watcher import AlarmSeverity
# Timeout for normal operations (seconds)
STD_TIMEOUT = 5
class GetRuleClassTestCase(unittest.TestCase):
"""Test `lsst.ts.watcher.get_rule_class`."""
def test_good_names(self):
for classname, desired_class in (
("Enabled", watcher.rules.Enabled),
("test.NoConfig", watcher.rules.test.NoConfig),
("test.ConfiguredSeverities", watcher.rules.test.ConfiguredSeverities),
):
rule_class = watcher.get_rule_class(classname)
assert rule_class == desired_class
def test_bad_names(self):
for bad_name in (
"NoSuchRule", # no such rule
"test.NoSuchRule", # no such rule
"test.Enabled", # wrong module
"NoConfig", # wrong module
"test_NoConfig", # wrong separator
):
with pytest.raises(ValueError):
watcher.get_rule_class(bad_name)
class ModelTestCase(unittest.IsolatedAsyncioTestCase):
def setUp(self):
salobj.set_random_lsst_dds_partition_prefix()
@contextlib.asynccontextmanager
async def make_model(self, names, enable, escalation=(), use_bad_callback=False):
"""Make a Model as self.model, with one or more Enabled rules.
Parameters
----------
names : `list` [`str`]
Name and index of one or more CSCs.
Each entry is of the form "name" or name:index".
The associated alarm names have a prefix of "Enabled.".
enable : `bool`
Enable the model?
escalation : `list` of `dict`, optional
Escalation information.
See `CONFIG_SCHEMA` for the format of entries.
use_bad_callback : `bool`
If True then specify an invalid callback function:
one that is synchronous. This should raise TypeError.
"""
if not names:
raise ValueError("Must specify one or more CSCs")
self.name_index_list = [salobj.name_to_name_index(name) for name in names]
configs = [dict(name=name_index) for name_index in names]
watcher_config_dict = dict(
disabled_sal_components=[],
auto_acknowledge_delay=3600,
auto_unacknowledge_delay=3600,
rules=[dict(classname="Enabled", configs=configs)],
escalation=escalation,
)
watcher_config = types.SimpleNamespace(**watcher_config_dict)
self.read_severities = dict()
self.read_max_severities = dict()
self.controllers = []
for name_index in names:
name, index = salobj.name_to_name_index(name_index)
self.controllers.append(salobj.Controller(name=name, index=index))
if use_bad_callback:
def bad_callback():
pass
alarm_callback = bad_callback
else:
alarm_callback = self.alarm_callback
self.model = watcher.Model(
domain=self.controllers[0].domain,
config=watcher_config,
alarm_callback=alarm_callback,
)
for name, rule in self.model.rules.items():
rule.alarm.init_severity_queue()
self.read_severities[name] = []
self.read_max_severities[name] = []
controller_start_tasks = [
controller.start_task for controller in self.controllers
]
await asyncio.gather(self.model.start_task, *controller_start_tasks)
if enable:
await self.model.enable()
for rule in self.model.rules.values():
assert rule.alarm.nominal
assert not rule.alarm.acknowledged
assert not rule.alarm.muted
self.assert_not_muted(rule.alarm)
try:
yield
finally:
await self.model.close()
controller_close_tasks = [
asyncio.create_task(controller.close())
for controller in self.controllers
]
await asyncio.gather(*controller_close_tasks)
async def alarm_callback(self, alarm):
"""Callback function for each alarm.
Updates self.read_severities and self.read_max_severities,
dicts of alarm_name: list of severity/max_severity.
"""
self.read_severities[alarm.name].append(alarm.severity)
self.read_max_severities[alarm.name].append(alarm.max_severity)
# Print the state to aid debugging test failures.
print(
f"alarm_callback({alarm.name}, severity={alarm.severity!r}): "
f"read_severities={self.read_severities[alarm.name]}"
)
async def write_states(self, index, states):
"""Write a sequence of summary states to a specified controller."""
controller = self.controllers[index]
controller_name_index = f"{controller.salinfo.name}:{controller.salinfo.index}"
rule_name = f"Enabled.{controller_name_index}"
rule = self.model.rules[rule_name]
previous_state = None
for state in states:
await controller.evt_summaryState.set_write(
summaryState=state, force_output=True
)
if self.model.enabled and previous_state != state:
await asyncio.wait_for(
rule.alarm.severity_queue.get(), timeout=STD_TIMEOUT
)
assert rule.alarm.severity_queue.empty()
elif self.model.enabled:
# State didn't changed should not receive any new event
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(
rule.alarm.severity_queue.get(), timeout=STD_TIMEOUT
)
assert rule.alarm.severity_queue.empty()
else:
# We don't have any event we can wait for, so sleep a bit
# to give the model time to react to the data.
await asyncio.sleep(0.1)
previous_state = state
def assert_muted(self, alarm, muted_severity, muted_by):
"""Assert that the specified alarm is muted.
Parameters
----------
alarm : `lsst.ts.watcher.Alarm`
Alarm to test.
muted_severity : `lsst.ts.idl.enums.Watcher.AlarmSeverity`
Expected value for rule.severity.
muted_by : `str`
Expected value for rule.muted_by.
"""
assert alarm.muted
assert alarm.muted_severity == muted_severity
assert alarm.muted_by == muted_by
def assert_not_muted(self, alarm):
"""Assert that the specified alarm is not muted.
Parameters
----------
alarm : `lsst.ts.watcher.Alarm`
Alarm to test.
"""
assert not alarm.muted
assert alarm.muted_severity == AlarmSeverity.NONE
assert alarm.muted_by == ""
async def test_constructor_bad_callback(self):
remote_names = ["ScriptQueue:5", "Test:7"]
with pytest.raises(TypeError):
async with self.make_model(
names=remote_names, enable=False, use_bad_callback=True
):
pass
async def test_acknowledge_full_name(self):
user = "test_ack_alarm"
remote_names = ["ScriptQueue:5", "Test:7"]
nrules = len(remote_names)
async with self.make_model(names=remote_names, enable=True):
full_rule_name = f"Enabled.{remote_names[0]}"
assert full_rule_name in self.model.rules
# Send STANDBY to all controllers to put all alarms into warning.
for index in range(nrules):
await self.write_states(index=index, states=[salobj.State.STANDBY])
for name, rule in self.model.rules.items():
assert not rule.alarm.nominal
assert rule.alarm.severity == AlarmSeverity.WARNING
assert rule.alarm.max_severity == AlarmSeverity.WARNING
# Acknowledge one rule by full name but not the other.
await self.model.acknowledge_alarm(
name=full_rule_name, severity=AlarmSeverity.WARNING, user=user
)
for name, rule in self.model.rules.items():
if name == full_rule_name:
assert rule.alarm.acknowledged
assert rule.alarm.acknowledged_by == user
else:
assert not rule.alarm.acknowledged
assert rule.alarm.acknowledged_by == ""
async def test_acknowledge_regex(self):
user = "test_ack_alarm"
remote_names = ["ScriptQueue:1", "ScriptQueue:2", "Test:62"]
nrules = len(remote_names)
async with self.make_model(names=remote_names, enable=True):
assert len(self.model.rules) == nrules
# Send STANDBY to all controllers to put all alarms into warning.
for index in range(nrules):
await self.write_states(index=index, states=[salobj.State.STANDBY])
for rule in self.model.rules.values():
assert not rule.alarm.nominal
assert rule.alarm.severity == AlarmSeverity.WARNING
assert rule.alarm.max_severity == AlarmSeverity.WARNING
# Acknowledge the ScriptQueue alarms but not Test.
await self.model.acknowledge_alarm(
name="Enabled.ScriptQueue:*", severity=AlarmSeverity.WARNING, user=user
)
for name, rule in self.model.rules.items():
if "ScriptQueue" in name:
assert rule.alarm.acknowledged
assert rule.alarm.acknowledged_by == user
else:
assert not rule.alarm.acknowledged
assert rule.alarm.acknowledged_by == ""
async def test_enable(self):
remote_names = ["ScriptQueue:5", "Test:7"]
async with self.make_model(names=remote_names, enable=True):
assert len(self.model.rules) == 2
# Enable the model and write ENABLED several times.
# This triggers the rule callback but that does not
# change the state of the alarm.
await self.model.enable()
for index in range(len(remote_names)):
await self.write_states(
index=index,
states=(
salobj.State.ENABLED,
salobj.State.ENABLED,
salobj.State.ENABLED,
),
)
for name, rule in self.model.rules.items():
assert rule.alarm.nominal
assert self.read_severities[name] == [AlarmSeverity.NONE]
assert self.read_max_severities[name] == [AlarmSeverity.NONE]
# Disable the model and issue several events that would
# trigger an alarm if the model was enabled. Since the
# model is disabled the alarm does not change states.
self.model.disable()
for index in range(len(remote_names)):
await self.write_states(
index=index, states=(salobj.State.FAULT, salobj.State.STANDBY)
)
for name, rule in self.model.rules.items():
assert rule.alarm.nominal
assert self.read_severities[name] == [AlarmSeverity.NONE]
assert self.read_max_severities[name] == [AlarmSeverity.NONE]
# Enable the model. This will trigger a callback with
# the current state of the event (STANDBY).
# Note that the earlier FAULT event is is ignored
# because it arrived while disabled.
await self.model.enable()
for name, rule in self.model.rules.items():
await rule.alarm.assert_next_severity(AlarmSeverity.WARNING)
assert not rule.alarm.nominal
assert rule.alarm.severity == AlarmSeverity.WARNING
assert rule.alarm.max_severity == AlarmSeverity.WARNING
assert self.read_severities[name] == [
AlarmSeverity.NONE,
AlarmSeverity.WARNING,
]
assert self.read_max_severities[name] == [
AlarmSeverity.NONE,
AlarmSeverity.WARNING,
]
# Issue more events; they should be processed normally.
for index in range(len(remote_names)):
await self.write_states(
index=index, states=(salobj.State.FAULT, salobj.State.STANDBY)
)
for name, rule in self.model.rules.items():
assert not rule.alarm.nominal
assert rule.alarm.severity == AlarmSeverity.WARNING
assert rule.alarm.max_severity == AlarmSeverity.CRITICAL
assert self.read_severities[name] == [
AlarmSeverity.NONE,
AlarmSeverity.WARNING,
AlarmSeverity.CRITICAL,
AlarmSeverity.WARNING,
]
assert self.read_max_severities[name] == [
AlarmSeverity.NONE,
AlarmSeverity.WARNING,
AlarmSeverity.CRITICAL,
AlarmSeverity.CRITICAL,
]
async def test_escalation(self):
remote_names = ["ScriptQueue:1", "ScriptQueue:2", "Test:1", "Test:2", "Test:52"]
# Escalation info for the first two rules;
# check that case does not have to match.
esc_info12 = dict(
alarms=["enabled.scriptqueue:*"],
responder="chaos",
delay=0.11,
)
# Escalation info for the next two rules
esc_info34 = dict(
alarms=["Enabled.Test:?"],
responder="stella",
delay=0.12,
)
# Escalation info that does not match any alarm names
esc_notused = dict(
alarms=["Enabled.NoMatch"],
responder="someone",
delay=0.13,
)
async with self.make_model(
names=remote_names,
enable=False,
escalation=[esc_info12, esc_info34, esc_notused],
):
alarms = [rule.alarm for rule in self.model.rules.values()]
assert len(alarms) == len(remote_names)
for alarm in alarms[0:2]:
assert alarm.escalation_responder == esc_info12["responder"]
assert alarm.escalation_delay == esc_info12["delay"]
for alarm in alarms[2:4]:
assert alarm.escalation_responder == esc_info34["responder"]
assert alarm.escalation_delay == esc_info34["delay"]
for alarm in alarms[4:]:
assert alarm.escalation_responder == ""
assert alarm.escalation_delay == 0
for alarm in alarms:
assert alarm.timestamp_escalate == 0
async def test_get_rules(self):
remote_names = ["ScriptQueue:1", "ScriptQueue:2", "Test:1", "Test:2", "Test:52"]
async with self.make_model(names=remote_names, enable=False):
rules = self.model.get_rules("NoSuchName")
assert len(list(rules)) == 0
# Search starts at beginning, so Enabled.foo works
# but foo does not.
rules = self.model.get_rules("ScriptQueue")
assert len(list(rules)) == 0
rules = self.model.get_rules(".*")
assert len(list(rules)) == len(remote_names)
rules = self.model.get_rules("Enabled")
assert len(list(rules)) == len(remote_names)
rules = self.model.get_rules("Enabled.ScriptQueue")
assert len(list(rules)) == 2
rules = self.model.get_rules("Enabled.Test")
assert len(list(rules)) == 3
async def test_mute_full_name(self):
"""Test mute and unmute by full alarm name."""
user = "test_mute_alarm"
remote_names = ["ScriptQueue:5", "Test:7"]
async with self.make_model(names=remote_names, enable=True):
full_rule_name = f"Enabled.{remote_names[0]}"
assert full_rule_name in self.model.rules
# Mute one rule by full name.
await self.model.mute_alarm(
name=full_rule_name,
duration=5,
severity=AlarmSeverity.WARNING,
user=user,
)
for name, rule in self.model.rules.items():
if name == full_rule_name:
self.assert_muted(
rule.alarm, muted_severity=AlarmSeverity.WARNING, muted_by=user
)
else:
self.assert_not_muted(rule.alarm)
# Nnmute one rule by full name.
await self.model.unmute_alarm(name=full_rule_name)
for rule in self.model.rules.values():
self.assert_not_muted(rule.alarm)
async def test_mute_regex(self):
"""Test mute and unmute by regex."""
user = "test_mute_alarm"
remote_names = ["ScriptQueue:1", "ScriptQueue:2", "Test:62"]
nrules = len(remote_names)
async with self.make_model(names=remote_names, enable=True):
assert len(self.model.rules) == nrules
# Mute the ScriptQueue alarms but not Test.
await self.model.mute_alarm(
name="Enabled.ScriptQueue.*",
duration=5,
severity=AlarmSeverity.WARNING,
user=user,
)
for name, rule in self.model.rules.items():
if "ScriptQueue" in name:
self.assert_muted(
rule.alarm, muted_severity=AlarmSeverity.WARNING, muted_by=user
)
else:
self.assert_not_muted(rule.alarm)
# Unmute the ScriptQueue alarms but not Test.
await self.model.unmute_alarm(name="Enabled.ScriptQueue.*")
for rule in self.model.rules.values():
self.assert_not_muted(rule.alarm)
|
lsst-ts/ts_watcher
|
tests/test_model.py
|
test_model.py
|
py
| 18,435
|
python
|
en
|
code
| 0
|
github-code
|
6
|
7323087980
|
from __future__ import division
from collections import deque
class BiGramDict(object):
def __init__(self):
self.count = dict()
self.dictionary = dict()
def put(self, key, value):
if key not in self.dictionary:
self.count[key] = 0
self.dictionary[key] = dict()
if value not in self.dictionary[key]:
self.dictionary[key][value] = 0
self.dictionary[key][value] += 1
self.count[key] += 1
def normalize(self):
for key_dict in self.dictionary.iterkeys():
for value_dict in self.dictionary[key_dict].iterkeys():
self.dictionary[key_dict][value_dict] /= self.count[key_dict]
def get(self, key, value):
if key in self.dictionary and value in self.dictionary[key]:
return self.dictionary[key][value]
return False
class TriGramDict(object):
def __init__(self):
# Don't need a count. BiGramDict on each gram will maintain a tab
self.dictionary = dict()
def put(self, first_gram, second_gram, third_gram):
if first_gram not in self.dictionary:
self.dictionary[first_gram] = BiGramDict()
self.dictionary[first_gram].put(second_gram, third_gram)
def normalize(self):
for gram in self.dictionary.iterkeys():
self.dictionary[gram].normalize()
def get(self, first_gram, second_gram, third_gram):
if first_gram not in self.dictionary:
return False
return self.dictionary[first_gram].get(second_gram, third_gram)
class HMM(object):
def __init__(self):
self.states = set()
self.trigramDictionary = TriGramDict()
self.bigramStateDictionary = BiGramDict()
self.emissionDictionary = BiGramDict()
self.initDictionary = BiGramDict()
pass
def trainHMM(self, filename):
prevPosTag = None
prevPosTags = [None] * 2
with open(filename, "r") as train_file:
for line in train_file:
line = line.strip()
if line == "###/###":
prevPosTags[1] = None
continue
word, posTag = line.split('/')
posTag = posTag.replace('\n', '')
self.emissionDictionary.put(posTag, word)
previous_BiGram_nexists = prevPosTags[1] == None
if previous_BiGram_nexists:
self.initDictionary.put('init', posTag)
prevPosTags[1] = posTag
continue
self.bigramStateDictionary.put(prevPosTags[1], posTag)
prevPosTags[0], prevPosTags[1] = prevPosTags[1], posTag
self.trigramDictionary.put(prevPosTags[0], prevPosTags[1], posTag)
self.emissionDictionary.normalize()
self.bigramStateDictionary.normalize()
self.initDictionary.normalize()
self.states.update(self.bigramStateDictionary.dictionary.keys())
def __initial_probability__(self, state):
return self.initDictionary.get('init', state) or 0
def __emission_probability__(self, state, observation):
return self.emissionDictionary.get(state, observation) or 0.0000015
def __transition_probability__(self, prevState, nextState):
return self.bigramStateDictionary.get(prevState, nextState) or 0.0000015
def testHMM(self, filename):
with open(filename, "r") as test_file:
actual_count = 0
successful_count = 0
true_states = []
obs = []
for line in test_file:
line = line.strip()
if line != '###/###':
word, tag = line.split('/')
true_states.append(tag)
obs.append(word)
continue
if len(true_states) == 0:
continue
(probability, pred_states) = self.__predict_tagsets__(obs)
for i in xrange(len(true_states)):
if true_states[i] == pred_states[i]:
successful_count += 1
actual_count += 1
#print pred_states
#print true_states
print (actual_count - successful_count)/actual_count * 100
obs = []
true_states = []
#raw_input()
def __predict_tagsets__(self, observations):
viterbi = [{}]
path = {}
for s0 in self.states:
viterbi[0][s0] = self.__initial_probability__(s0) * self.__emission_probability__(s0, observations[0])
path[s0] = [s0]
for t in range(1, len(observations)):
viterbi.append({})
newpath = {}
for sj in self.states:
(prob, state) = max((viterbi[t-1][si] * self.__transition_probability__(si, sj) * self.__emission_probability__(sj, observations[t]), si) for si in self.states)
viterbi[t][sj] = prob
newpath[sj] = path[state] + [sj]
path = newpath
n = 0
if len(observations) != 1:
n = len(observations) - 1
(prob, state) = max((viterbi[n][y], y) for y in self.states)
return (prob, path[state])
if __name__ == '__main__':
hmm = HMM()
hmm.trainHMM("entrain.txt")
hmm.testHMM("entest.txt")
|
vigneshwerv/HMM
|
hmm.py
|
hmm.py
|
py
| 4,436
|
python
|
en
|
code
| 0
|
github-code
|
6
|
38831039266
|
# Module 5
# Programming Assignment 6
# Prob-2.py
# Esther Pisano
from graphics import *
def main():
# creating a label "win" for the graph that we wish to draw in.
# titled it "Squares"
win = GraphWin("Squares", 200, 200)
# created first rectangle
shape = Rectangle(Point(50, 50), Point(20, 20))
# colored the shape
shape.setOutline("red")
shape.setFill("red")
# executed the drawing function
shape.draw(win)
# made a loop to make more rectangles.
# This will loop 5 times
for i in range(5):
p = win.getMouse()
c = shape.getCenter()
dx = p.getX() - c.getX()
dy = p.getY() - c.getY()
# code below will add the new coordinate to the first coordinate (50,50)
# it will then create a new rectangle where the new float number is (which is where the mouse was clicked)
new_shape = Rectangle(Point(50+dx, 50+dy), Point(20+dx, 20+dy))
new_shape.setOutline("red")
new_shape.setFill("red")
new_shape.draw(win)
# create a label to show window is ending
label = Text(Point(100, 120), "Click once more to quit")
label.draw(win)
win.getMouse()
win.close()
main()
|
CTEC-121-Spring-2020/mod-4-programming-assignment-EPisano526
|
Prob-2/Prob-2.py
|
Prob-2.py
|
py
| 1,215
|
python
|
en
|
code
| 0
|
github-code
|
6
|
2544504801
|
import cv2
import numpy as np
###Color detection
def empty(a):
pass
def stackImages(scale,imgArray):
rows = len(imgArray)
cols = len(imgArray[0])
rowsAvailable = isinstance(imgArray[0], list)
width = imgArray[0][0].shape[1]
height = imgArray[0][0].shape[0]
if rowsAvailable:
for x in range ( 0, rows):
for y in range(0, cols):
if imgArray[x][y].shape[:2] == imgArray[0][0].shape [:2]:
imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0), None, scale, scale)
else:
imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]), None, scale, scale)
if len(imgArray[x][y].shape) == 2: imgArray[x][y]= cv2.cvtColor( imgArray[x][y], cv2.COLOR_GRAY2BGR)
imageBlank = np.zeros((height, width, 3), np.uint8)
hor = [imageBlank]*rows
hor_con = [imageBlank]*rows
for x in range(0, rows):
hor[x] = np.hstack(imgArray[x])
ver = np.vstack(hor)
else:
for x in range(0, rows):
if imgArray[x].shape[:2] == imgArray[0].shape[:2]:
imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale, scale)
else:
imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None,scale, scale)
if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)
hor= np.hstack(imgArray)
ver = hor
return ver
cv2.namedWindow("trackBars")
cv2.resizeWindow("trackBars",640,240)
#name of window,add on which window,initial value, max value, function executed when trak bar value is changed
#Hue ranges from 0 to 255 but opencv supports still 179 so max value 179
cv2.createTrackbar("Hue Min","trackBars",0,179,empty)
cv2.createTrackbar("Hue Max","trackBars",13,179,empty)
cv2.createTrackbar("Sat Min","trackBars",24,255,empty)
cv2.createTrackbar("Sat Max","trackBars",250,255,empty)
cv2.createTrackbar("Value Min","trackBars",119,255,empty)
cv2.createTrackbar("Value Max","trackBars",255,255,empty)
while True:
img = cv2.imread("lambo.png")
imgHSV = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
#trackbar name, window name is which it belongs
h_min = cv2.getTrackbarPos("Hue Min","trackBars")
h_max = cv2.getTrackbarPos("Hue Max","trackBars")
s_min = cv2.getTrackbarPos("Sat Min","trackBars")
s_max = cv2.getTrackbarPos("Sat Max","trackBars")
v_min = cv2.getTrackbarPos("Value Min","trackBars")
v_max = cv2.getTrackbarPos("Value Max","trackBars")
print(h_min,h_max,s_min,s_max,v_min,v_max)
lower = np.array([h_min,s_min,v_min])
upper = np.array([h_max,s_max,v_max])
#creating a mask
mask = cv2.inRange(imgHSV,lower,upper)
#cv2.imshow("lambo ",img)
#cv2.imshow("lamboHSV ",imgHSV)
#cv2.imshow("mask ", mask)
# keep things in black color if you dont want it
#which will add 2 images together to create a new image it will check both images and wherever the pixel are both present it will take it has a yes or a 1 and it will store that in new image
# cv2.bitwise_and(img,img,mask=mask) source image,output image, mask
imgResult = cv2.bitwise_and(img,img,mask=mask)
#cv2.imshow("Result masked image ", imgResult)
imgStack = stackImages(0.6,([img,imgHSV],[mask,imgResult]))
cv2.imshow("Stack Images",imgStack)
cv2.waitKey(1)
|
monsterpit/openCVDemo
|
Resources/chapter7.py
|
chapter7.py
|
py
| 3,458
|
python
|
en
|
code
| 0
|
github-code
|
6
|
19399743449
|
from typing import List
import collections
class Solution:
def combine(self, n: int, k: int) -> List[List[int]]:
q = collections.deque()
for i in range(1, n + 1):
q.append([i])
while q:
e = q.popleft()
if len(e) == k:
q.appendleft(e)
break
else:
for i in range(e[-1] + 1, n + 1):
a = e[:]
a.append(i)
q.append(a)
return list(q)
n = 3
k = 3
r = Solution().combine(n, k)
print(r)
|
Yigang0622/LeetCode
|
combine.py
|
combine.py
|
py
| 576
|
python
|
en
|
code
| 1
|
github-code
|
6
|
20521842050
|
import time
from telapi import tas_api
from database import db
api_caller = tas_api(db.phone1.accountId, db.phone1.mailboxId)
api_callee = tas_api(db.phone2.accountId, db.phone2.mailboxId)
session_id, caller_party_id = api_caller.callout(db.phone1.deviceId, db.phone2.number)
callee_party_id = api_caller.get_party_by_number(session_id, 2)
if callee_party_id is None:
print("Impossible to obtain callee party ID.")
exit(-1)
if api_callee.get_amount_devices() > 1:
time.sleep(5)
api_callee.answer(session_id, callee_party_id, db.phone2.deviceId)
time.sleep(2)
api_caller.transfer_to_park_orbit(caller_party_id, session_id, db.parkOrbitId)
print(api_caller.get_session(session_id))
time.sleep(2)
body = api_caller.pickup(callee_party_id, session_id, db.phone1.deviceId)
print(body)
|
annoviko/sandbox
|
applications/telapi/scenario_pickup_outgoing.py
|
scenario_pickup_outgoing.py
|
py
| 829
|
python
|
en
|
code
| 5
|
github-code
|
6
|
6017725196
|
import re
re.findall(r'(\w+)=(\d+)', 'set width=20 and height=10')
# 查全部 没有为[], [('width', '20'), ('height', '10')]
def dashreplace(matchobj):
"""普通替换"""
if matchobj.group(0) == '-':
return ' '
else:
return ''
re.sub('-{1,4}', dashreplace, 'pro----gram-files')
# program files
def dashreplace2(matchobj):
"""分组替换"""
# print(matchobj.groupdict()) # {'first_name': 'Malcolm', 'last_name': 'Reynolds'}
first_name = matchobj.group('first_name')
last_name = matchobj.group('last_name')
return f"{first_name}-{last_name}"
re.sub(r'(?P<first_name>\w+) (?P<last_name>\w+)',
dashreplace2, "Malcolm Reynolds")
# 检查字符串中任何位置的匹配,查第一个 没有为None,
res = re.search(r"\w+", "abc def gh abc")
# <re.Match object; span=(0, 3), match='abc'>
# 检查字符串开头是否匹配
m = re.match(r"(\d+)\.(\d+)", "24.1632")
# m.groups() => ('24', '1632')
m = re.match(r"(?P<first_name>\w+) (?P<last_name>\w+)", "Malcolm Reynolds")
# m.groupdict() # {'first_name': 'Malcolm', 'last_name': 'Reynolds'}
# fullmatch: 完全匹配
|
Yuelioi/Program-Learning
|
Python/Basic/标准库/05.文本处理服务/_re.py
|
_re.py
|
py
| 1,135
|
python
|
en
|
code
| 0
|
github-code
|
6
|
69986222269
|
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.contrib.auth import get_user_model
class CustomUser(AbstractUser):
phone = models.CharField(max_length=13, blank=True, null=True)
bonus_coin = models.IntegerField(default=0)
class NameIt(models.Model):
name = models.CharField(max_length=255)
class Meta:
abstract = True
def __str__(self):
return self.name
class Category(NameIt):
pass
class Product(NameIt):
price = models.IntegerField(null=False)
category = models.ForeignKey(Category, on_delete=models.CASCADE, null=False)
compound = models.TextField(null=True)
description = models.TextField(null=True)
class ProductImage(models.Model):
image = models.ImageField(upload_to='images', verbose_name='Изображение_товара')
product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='images')
is_main = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
def __str__(self):
return "%s" % self.id
class Reviews(models.Model):
body = models.TextField()
publish_date = models.DateTimeField(blank=True, null=True)
is_published = models.BooleanField(default=False)
author = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
def __str__(self):
return self.body
# Create your models here.
|
Pdnky/MySite
|
FoodDelivery/core/models.py
|
models.py
|
py
| 1,427
|
python
|
en
|
code
| 0
|
github-code
|
6
|
11120994067
|
import logging
import typing as tp
from collections import deque
from librarius.domain.messages import (
AbstractMessage,
AbstractEvent,
AbstractCommand,
AbstractQuery,
)
from librarius.service.uow import AbstractUnitOfWork
from librarius.domain.exceptions import SkipMessage
logger = logging.getLogger(__name__)
class MessageBus:
def __init__(
self,
uow: AbstractUnitOfWork,
event_handlers: dict[tp.Type[AbstractEvent], list[tp.Callable]],
command_handlers: dict[tp.Type[AbstractCommand], tp.Callable],
query_handlers: dict[tp.Type[AbstractQuery], tp.Callable],
):
self.queue: deque[AbstractMessage] = deque()
self.uow = uow
self.event_handlers = event_handlers
self.command_handlers = command_handlers
self.query_handlers = query_handlers
def handle(self, message: AbstractMessage):
self.queue.append(message)
try:
while self.queue:
message = self.queue.popleft()
if isinstance(message, AbstractEvent):
self.handle_event(message)
elif isinstance(message, AbstractCommand):
self.handle_command(message)
elif isinstance(message, AbstractQuery):
return self.handle_query(message)
else:
raise Exception(f"{message} was not an Event, Command or Query")
except SkipMessage as error:
logger.warning(f"Skipping message {message.uuid} because {error.reason}")
def handle_event(self, event: AbstractEvent) -> None:
for handler in self.event_handlers[type(event)]:
try:
logger.debug(f"Handling event {event} with handler {handler}")
handler(event)
self.queue.extend(self.uow.collect_new_events())
except Exception:
logger.exception(f"Exception handling event {event}")
continue
def handle_command(self, command: AbstractCommand) -> None:
logger.debug(f"Handling command {command}")
try:
handler = self.command_handlers[type(command)]
handler(command)
self.queue.extend(self.uow.collect_new_events())
except Exception:
logger.exception(f"Exception handling command {command}")
raise
def handle_query(self, query: AbstractQuery):
logger.debug(f"Handling query {query}")
try:
handler = self.query_handlers[type(query)]
results = handler(query)
self.queue.extend(self.uow.collect_new_events())
return results
except Exception:
logger.exception(f"Exception handling query {query}")
raise
|
adriangabura/vega
|
librarius/service/message_bus.py
|
message_bus.py
|
py
| 2,802
|
python
|
en
|
code
| 1
|
github-code
|
6
|
8213590960
|
import numpy as np
from ..helpers import unify_tags, flatten_fillins
from .special_tokens import BLANK_TOK
def create_blanked_sents(doc, indexes=None):
if indexes:
if type(indexes[0]) == int:
indexes = [indexes]
indexes_list = indexes #[indexes]
else:
indexes_list = get_random_idxes(
doc, is_token_only=False, max_count=3)
blanks = set([flatten_fillins(
doc, indexes, [BLANK_TOK] * len(indexes)) \
for indexes in indexes_list])
return blanks
# the function for placing BLANKS.
def get_one_random_idx_set(
doc, max_blank_block=3, req_dep=None, blank_type_prob=None,
pre_selected_idxes=None, is_token_only=False):
if req_dep is not None:
if type(req_dep) == str: req_dep = [req_dep]
idx_range = [i for i, token in enumerate(doc) if token.dep_ in req_dep or unify_tags(token.dep_) in req_dep]
else:
idx_range = list(range(len(doc)))
# only keep those pre_selected_idxes
if pre_selected_idxes is not None:
idx_range = [i for i in idx_range if i in pre_selected_idxes]
max_blank_block = min(len(idx_range), max_blank_block)
#print(req_dep, idx_range)
selected_indexes = []
while max_blank_block > 0 and not selected_indexes:
# if fixed the thing to change, then do one specific change
n_perturb = np.random.choice(list(range(1, max_blank_block+1))) #if req_dep is None else 1
replace_idx, total_run = -1, 1000
while (total_run > 0 and n_perturb > 0): #and len(span_and_edits) == 0:
replace_idx = np.random.choice(idx_range)
token = doc[replace_idx]
if token.is_punct:
total_run -= 1
continue
if blank_type_prob: p = blank_type_prob
else:
# if fixed the tree, then mostly use the tree
if is_token_only: p = [0.7, 0, 0.3]
elif req_dep is None: p = [0.4, 0.35, 0.25]
else: p = [0.1, 0.7, 0.2]
is_replace_subtree = np.random.choice(["token", "subtree", "insert"], p=p)
if is_replace_subtree == "subtree":
start, end = token.left_edge.i, token.right_edge.i+1
elif is_replace_subtree == "token":
start, end = token.i, token.i+1
else:
start, end = token.i, token.i
if all([end < sstart or start > send for sstart, send in selected_indexes]):
selected_indexes.append([start, end])
n_perturb -= 1
total_run -= 1
return sorted(selected_indexes, key=lambda idx: (idx[0], idx[1]))
def get_random_idxes(doc,
pre_selected_idxes=None,
deps=None, is_token_only=False,
max_blank_block=3, max_count=None):
unique_blanks = {str([[0, len(doc)]]): [[0, len(doc)]]}
default_deps = [None, "", ["subj","obj"], ["aux", "ROOT"], ["conj", "modifier", "clause"]]
if is_token_only:
unique_blanks = {}
if deps is None: deps = default_deps
for dep in deps:
# for each different dep, get some blank
rounds = 1 if dep is not None else 2
if is_token_only:
rounds = 5
for _ in range(rounds):
curr_idx = get_one_random_idx_set(
doc, req_dep=dep,
max_blank_block=max_blank_block,
pre_selected_idxes=pre_selected_idxes,
is_token_only=is_token_only) if dep != "" else None
if curr_idx is not None:
unique_blanks[str(curr_idx)] = curr_idx
unique_blanks = list(unique_blanks.values())
if max_count is not None:
try:
unique_blanks = list(np.random.choice(
np.array(unique_blanks, dtype="object"),
min(len(unique_blanks), max_count),
replace=False))
except:
unique_blanks = unique_blanks[:max_count]
return unique_blanks
|
tongshuangwu/polyjuice
|
polyjuice/generations/create_blanks.py
|
create_blanks.py
|
py
| 3,984
|
python
|
en
|
code
| 89
|
github-code
|
6
|
23091348874
|
'''
Epidemic modelling
YOUR NAME
Functions for running a simple epidemiological simulation
'''
import random
import sys
import click
# This seed should be used for debugging purposes only! Do not refer
# to this variable in your code.
TEST_SEED = 20170217
def has_an_infected_neighbor(city, location):
'''
Determine whether a person at a specific location has an infected
neighbor in a city modelled as a ring.
Args:
city (list of tuples): the state of all people in the simulation
at the start of the day
location (int): the location of the person to check
Returns (boolean): True, if the person has an infected neighb
False otherwise.
'''
# The location needs to be a valid index for the city list.
assert 0 <= location < len(city)
# This function should only be called when the person at location
# is susceptible to infection.
disease_state, _ = city[location]
assert disease_state == "S"
disease_state_left, _ = city[location-1]
disease_state_right, _ = city[(location+1) % len(city)]
# these define the state of the neighbors to the immediate left or right of the selected person
if disease_state_left == "I" or disease_state_right == "I":
# if the person has an infeccted neighbor to their left or their right, it is true that they neighbor an infected person
return True
# REPLACE False WITH AN APPROPRIATE RETURN VALUE
return False
# if the person doesn't have an infected neighbor, it is false that they would neighbor an infectee
def advance_person_at_location(city, location, days_contagious):
'''
Compute the next state for the person at the specified location.
Args:
city (list): the state of all people in the simulation at the
start of the day
location (int): the location of the person to check
days_contagious (int): the number of a days a person is infected
Returns (string, int): the disease state and the number of days
the person has been in that state after simulating one day.
'''
disease_state, _ = city[location]
assert 0 <= location < len(city)
state, days_in_state = city[location]
days_in_state +=1
# the day increases by one everytime we advance a person (a day has passed for their condition to be rechecked)
if state == "S":
if has_an_infected_neighbor(city, location):
#if the person is susceptible and it is true that their neighbor is an infected person
state = "I"
days_in_state = 0
# the suspectible person becomes infected and have been so for zero days
if disease_state[0] == "I":
if days_in_state >= days_contagious:
# if the person is infected and have been so past the life of the virus
state = "R"
days_in_state = 0
# they recover and have been so for 0 days
# We don't add a condition for recovered people.
#Their condition cannot change so all that happens is a day passes in their life
# REPLACE ("R", 0) WITH AN APPROPRIATE RETURN VALUE
return (state, days_in_state)
def simulate_one_day(starting_city, days_contagious):
'''
Move the simulation forward a single day.
Args:
starting_city (list): the state of all people in the simulation at the
start of the day
days_contagious (int): the number of a days a person is infected
Returns (list of tuples): the state of the city after one day
'''
ending_city = []
# we set an empty set, which will be the city after one day
for location in range(len(starting_city)):
# for a person in the city
ending_city.append(advance_person_at_location(starting_city, location, days_contagious))
# we advance a person through a day, and add them to the new city
# REPLACE [] WITH AN APPROPRIATE RETURN VALUE
return ending_city
# this leaves us with an ending city, where the people have all gone through one day
# thus, a day has been simulated
def is_transmission_possible(city):
"""
Is there at least one susceptible person who has an infected neighbor?
Args:
city (list): the current state of the city
Returns (boolean): True if the city has at least one susceptible person
with an infected neighbor, False otherwise.
"""
# YOUR CODE HERE
for location in range(len(city)):
state, _ = city[location]
# we define the state of each person in the city
if state == "S" and has_an_infected_neighbor(city, location):
return True
# if a person is suspectible and neighbors an infected person, we say that transmission can occur
# REPLACE False WITH AN APPROPRIATE RETURN VALUE
return False
# In any other case, the city has no susceptible people next to sick neighbors
def run_simulation(starting_city, days_contagious):
'''
Run the entire simulation
Args:
starting_city (list): the state of all people in the city at the
start of the simulation
days_contagious (int): the number of a days a person is infected
Returns tuple (list of tuples, int): the final state of the city
and the number of days actually simulated.
'''
pass
city = starting_city
days = 0
while is_transmission_possible(city):
# while susceptible people in the city can be infected
city=simulate_one_day(city, days_contagious)
days +=1
# we simulate a day, and do so until no more susceptible people can get infected
# REPLACE ([], 0) WITH AN APPROPRIATE RETURN VALUE
return (city, days)
def vaccinate_person(vax_tuple):
'''
Attempt to vaccinate a single person based on their current
disease state and personal eagerness to be vaccinated.
Args:
vax_tuple (string, int, float): information about a person,
including their eagerness to be vaccinated.
Returns (string, int): a person tuple
'''
# YOUR CODE HERE
state, days, chance = vax_tuple
# we only check the case for susceptible people, as recovered or infected people aren't allowed to get vaccinated
if state =="S" and random.random() < chance:
# if the person is susceptible and they pass the probability test
state = "V"
days = 0
# they become vaccinated, and have been so for 0 days
# REPLACE ("R", 0) WITH AN APPROPRIATE RETURN VALUE
return (state, days)
def vaccinate_city(city_vax_tuples, random_seed):
'''
Vaccinate the people in the city based on their current state and
eagerness to be vaccinated.
Args:
city_vax_tuples (list of (string, int, float) triples):
state of all people in the simulation at the start
of the simulation, including their eagerness to be vaccinated.
random_seed (int): seed for the random number generator
Returns (list of (string, int) tuples): state of the people in the
city after vaccination
'''
# YOUR CODE HERE
random.seed(random_seed)
city_end = []
# empty city_end will represent the city after one day has passed
for person in city_vax_tuples:
city_end.append(vaccinate_person(person))
# we check if any person gets vaccinated given the above function, and move these post-day people to city_end
# REPLACE [] WITH AN APPROPRIATE RETURN VALUE
return city_end
def vaccinate_and_simulate(city_vax_tuples, days_contagious, random_seed):
"""
Vaccinate the city and then simulate the infection spread
Args:
city_vax_tuples (list): a list with the state of the people in the city,
including their eagerness to be vaccinated.
days_contagious (int): the number of days a person is infected
random_seed (int): the seed for the random number generator
Returns (list of tuples, int): the state of the city at the end of the
simulation and the number of days simulated.
"""
# YOUR CODE HERE
city = vaccinate_city(city_vax_tuples, random_seed)
# this returns the city after we perform the above simulation of a day where people can get vaccinated
# REPLACE ([], 0) WITH AN APPROPRIATE RETURN VALUE
return run_simulation(city, days_contagious)
# this returns the city after a simulated day, where people can get infected or recover
# now, vaccinated people can't get infected
################ Do not change the code below this line #######################
def run_trials(vax_city, days_contagious, random_seed, num_trials):
"""
Run multiple trials of vaccinate_and_simulate and compute the median
result for the number of days until infection transmission stops.
Args:
vax_city (list of (string, int, float) triples): a list with vax
tuples for the people in the city
days_contagious (int): the number of days a person is infected
random_seed (int): the seed for the random number generator
num_trials (int): the number of trial simulations to run
Returns:
(int) the median number of days until infection transmission stops
"""
days = []
for i in range(num_trials):
if random_seed:
_, num_days_simulated = vaccinate_and_simulate(vax_city,
days_contagious,
random_seed+i)
else:
_, num_days_simulated = vaccinate_and_simulate(vax_city,
days_contagious,
random_seed)
days.append(num_days_simulated)
# quick way to compute the median
return sorted(days)[num_trials // 2]
def parse_city_file(filename, is_vax_tuple):
"""
Read a city represented as person tuples or vax tuples from
a file.
Args:
filename (string): the name of the file
is_vax_tuple (boolean): True if the file is expected to contain
(string, int) pairs. False if the file is expected to contain
(string, int, float) triples.
Returns: list of tuples or None, if the file does not exist or
cannot be parsed.
"""
try:
with open(filename) as f:
residents = [line.split() for line in f]
except IOError:
print("Could not open:", filename, file=sys.stderr)
return None
ds_types = ('S', 'I', 'R', 'V')
rv = []
if is_vax_tuple:
try:
for i, res in enumerate(residents):
ds, nd, ve = res
num_days = int(nd)
vax_eagerness = float(ve)
if ds not in ds_types or num_days < 0 or \
vax_eagerness < 0 or vax_eagerness > 1.0:
raise ValueError()
rv.append((ds, num_days, vax_eagerness))
except ValueError:
emsg = ("Error in line {}: vax tuples are represented "
"with a disease state {}"
"a non-negative integer, and a floating point value "
"between 0 and 1.0.")
print(emsg.format(i, ds_types), file=sys.stderr)
return None
else:
try:
for i, res in enumerate(residents):
ds, nd = res
num_days = int(nd)
if ds not in ds_types or num_days < 0:
raise ValueError()
rv.append((ds, num_days))
except ValueError:
emsg = ("Error in line {}: persons are represented "
"with a disease state {} and a non-negative integer.")
print(emsg.format(i, ds_types), file=sys.stderr)
return None
return rv
@click.command()
@click.argument("filename", type=str)
@click.option("--days-contagious", default=2, type=int)
@click.option("--task-type", default="no_vax",
type=click.Choice(['no_vax', 'vax']))
@click.option("--random-seed", default=None, type=int)
@click.option("--num-trials", default=1, type=int)
def cmd(filename, days_contagious, task_type, random_seed, num_trials):
'''
Process the command-line arguments and do the work.
'''
city = parse_city_file(filename, task_type == "vax")
if not city:
return -1
if task_type == "no_vax":
print("Running simulation ...")
final_city, num_days_simulated = run_simulation(
city, days_contagious)
print("Final city:", final_city)
print("Days simulated:", num_days_simulated)
elif num_trials == 1:
print("Running one vax clinic and simulation ...")
final_city, num_days_simulated = vaccinate_and_simulate(
city, days_contagious, random_seed)
print("Final city:", final_city)
print("Days simulated:", num_days_simulated)
else:
print("Running multiple trials of the vax clinic and simulation ...")
median_num_days = run_trials(city, days_contagious,
random_seed, num_trials)
print("Median number of days until infection transmission stops:",
median_num_days)
return 0
if __name__ == "__main__":
cmd() # pylint: disable=no-value-for-parameter
|
MaxSaint01/pa1
|
sir.py
|
sir.py
|
py
| 13,436
|
python
|
en
|
code
| 1
|
github-code
|
6
|
11324258537
|
import pygame
from random import randint
from pygame.locals import *
pygame.init()
display_widht = 600
display_height = 360
spaceship_widht = 84
spaceship_height = 50
shots_x = []
shots_y = []
asteroids_x = []
asteroids_y = []
asteroids_type = []
gameDisplay = pygame.display.set_mode((display_widht, display_height))
pygame.display.set_caption('The battle of death')
clock = pygame.time.Clock()
spaceshipImg = pygame.image.load('spaceship.png')
backgroundImg = pygame.image.load('background.png')
laserImg = pygame.image.load('laser.png')
asteroidImg = pygame.image.load('asteroid.png')
def spaceship(x,y):
gameDisplay.blit(spaceshipImg, (x,y))
def shot(x,y):
x += 4
gameDisplay.blit(laserImg, (x,y))
gameDisplay.blit(laserImg, (x, y + spaceship_height - 7))
shots_x.append(x)
shots_y.append(y)
def move_shoots():
for i in range(len(shots_x)):
shots_x[i] += 8
if shots_x[i] < display_widht:
gameDisplay.blit(laserImg, (shots_x[i],shots_y[i]))
gameDisplay.blit(laserImg, (shots_x[i],shots_y[i] + spaceship_height - 7))
def create_asteroid():
up_side = randint(0,2)
x = randint(1, display_widht)
y = randint(1, display_height)
asteroids_type.append(up_side)
if up_side == 0:
y = 0
asteroids_x.append(x)
asteroids_y.append(y)
else:
x = display_widht - 40
asteroids_y.append(y)
asteroids_x.append(x)
gameDisplay.blit(asteroidImg, (x,y))
def move_asteroids():
global asteroids_x
global asteroids_y
for i in range(len(asteroids_x)):
if (asteroids_x[i] < display_widht or asteroids_x[i] > 0) and asteroids_type[i] != 0:
asteroids_x[i] -= 7
gameDisplay.blit(asteroidImg, (asteroids_x[i], asteroids_y[i]))
else:
asteroids_y[i] += 7
gameDisplay.blit(asteroidImg, (asteroids_x[i], asteroids_y[i]))
def game_loop():
x = 0
y = display_height * 0.5
x_change = 0
y_change = 0
gameExit = False
while not gameExit:
gameDisplay.blit(backgroundImg, (0,0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameExit = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
x_change = 4
if event.key == pygame.K_LEFT:
x_change = -4
if event.key == pygame.K_UP:
y_change = -4
if event.key == pygame.K_DOWN:
y_change = 4
if event.key == pygame.K_SPACE:
shot(x,y)
create_asteroid()
if event.type == pygame.KEYUP:
if event.key == pygame.K_RIGHT or event.key == pygame.K_LEFT:
x_change = 0
if event.key == pygame.K_UP or event.key == pygame.K_DOWN:
y_change = 0
x += x_change
y += y_change
if y > display_height - spaceship_height:
y = display_height - spaceship_height
if x > display_widht - spaceship_widht:
x = display_widht - spaceship_widht
if x < 0:
x = 0
if y < 0:
y = 0
spaceship(x,y)
move_shoots()
move_asteroids()
pygame.display.update()
clock.tick(60)
game_loop()
pygame.quit()
quit()
|
macelai/star-wars
|
game.py
|
game.py
|
py
| 3,439
|
python
|
en
|
code
| 0
|
github-code
|
6
|
22868194593
|
import requests
from googletrans import Translator, LANGUAGES
import pickle
import webScraping
with open('Resources/API key/oxford.pck', 'rb') as file:
api_key = pickle.load(file)
app_id = api_key['app id']
app_key = api_key['app key']
url_base = 'https://od-api.oxforddictionaries.com/api/v2/'
language_code = 'en-us'
def lemmatize(word):
endpoint = 'lemmas'
url = url_base + endpoint + '/' + language_code + '/' + word
res = requests.get(url, headers={'app_id': app_id, 'app_key': app_key})
if format(res.status_code) != '404':
return res.json()['results'][0]['lexicalEntries'][0]['inflectionOf'][0]['id']
else:
return ''
def Definition(word):
word = lemmatize(word)
if word != '':
endpoint = 'entries'
url = url_base + endpoint + '/' + language_code + '/' + word
res = requests.get(url, headers={'app_id': app_id, 'app_key': app_key})
try:
return res.json()['results'][0]['lexicalEntries'][0]['entries'][0]['senses'][0]['definitions'][0]
except:
return None
else:
return None
def Synonyms(word):
word = lemmatize(word)
if word != '':
endpoint = 'entries'
url = url_base + endpoint + '/' + language_code + '/' + word
res = requests.get(url, headers={"app_id": app_id, "app_key": app_key})
try:
list_of_synonyms = res.json()['results'][0]['lexicalEntries'][0]['entries'][0]['senses'][0]['synonyms']
result_list = []
for i in range(min(5, len(list_of_synonyms))):
result_list.append(list_of_synonyms[i]['text'])
return result_list
except:
return None
else:
return None
def Antonyms(word):
if word.find(' ') != -1:
return None
word = lemmatize(word)
return webScraping.Get_Antonyms(word)
def lang_translate(text,language):
if language in LANGUAGES.values():
translator = Translator()
result = translator.translate(text, src='en', dest=language)
return result
else:
return None
|
TroySigX/smartbot
|
dictionary.py
|
dictionary.py
|
py
| 2,176
|
python
|
en
|
code
| 2
|
github-code
|
6
|
39263007416
|
import datetime as datetime
import json
from django.db.models import Q
from django.test import override_settings
from mock import MagicMock, patch
from rest_framework.status import HTTP_403_FORBIDDEN, HTTP_201_CREATED
from eums.models import MultipleChoiceAnswer, TextAnswer, Flow, Run, \
NumericAnswer, Alert, RunQueue
from eums.test.api.authorization.authenticated_api_test_case import AuthenticatedAPITestCase
from eums.test.config import BACKEND_URL
from eums.test.factories.consignee_factory import ConsigneeFactory
from eums.test.factories.delivery_factory import DeliveryFactory
from eums.test.factories.delivery_node_factory import DeliveryNodeFactory
from eums.test.factories.flow_factory import FlowFactory
from eums.test.factories.option_factory import OptionFactory
from eums.test.factories.purchase_order_factory import PurchaseOrderFactory
from eums.test.factories.purchase_order_item_factory import PurchaseOrderItemFactory
from eums.test.factories.question_factory import TextQuestionFactory, MultipleChoiceQuestionFactory, \
NumericQuestionFactory
ENDPOINT_URL = BACKEND_URL + 'web-answers'
class WebAnswerEndpointTest(AuthenticatedAPITestCase):
mock_get = MagicMock(return_value={})
mock_distribution_alert_raise = MagicMock()
def setUp(self):
super(WebAnswerEndpointTest, self).setUp()
self.setup_flow_with_questions(Flow.Label.IMPLEMENTING_PARTNER)
def setup_flow_with_questions(self, flow_type):
flow = FlowFactory(label=flow_type)
delivery_received_qn = MultipleChoiceQuestionFactory(label='deliveryReceived', flow=flow)
OptionFactory(question=delivery_received_qn, text='Yes')
OptionFactory(question=delivery_received_qn, text='No')
TextQuestionFactory(label='dateOfReceipt', flow=flow)
good_order_qn = MultipleChoiceQuestionFactory(label='isDeliveryInGoodOrder', flow=flow)
OptionFactory(question=good_order_qn, text='Yes')
OptionFactory(question=good_order_qn, text='No')
OptionFactory(question=good_order_qn, text='Incomplete')
satisfied_qn = MultipleChoiceQuestionFactory(label='areYouSatisfied', flow=flow)
OptionFactory(question=satisfied_qn, text='Yes')
OptionFactory(question=satisfied_qn, text='No')
TextQuestionFactory(label='additionalDeliveryComments', flow=flow)
@override_settings(CELERY_LIVE=True)
@patch('eums.services.contact_service.ContactService.get', mock_get)
@patch('eums.services.flow_scheduler.distribution_alert_raise', mock_distribution_alert_raise)
def test_should_save_answers(self):
delivery = DeliveryFactory()
date_of_receipt = self.__get_current_date()
good_comment = "All is good"
data = {
'runnable': delivery.id, 'answers': [
{'question_label': 'deliveryReceived', 'value': 'Yes'},
{'question_label': 'dateOfReceipt', 'value': date_of_receipt},
{'question_label': 'isDeliveryInGoodOrder', 'value': 'Yes'},
{'question_label': 'areYouSatisfied', 'value': 'Yes'},
{'question_label': 'additionalDeliveryComments', 'value': good_comment}
]}
response = self.client.post(ENDPOINT_URL, data=json.dumps(data), content_type='application/json')
answer_for_delivery_received = self._get_answer_for(MultipleChoiceAnswer, delivery.id, 'deliveryReceived')
answer_for_date_of_receipt = self._get_answer_for(TextAnswer, delivery.id, 'dateOfReceipt')
answer_for_delivery_order = self._get_answer_for(MultipleChoiceAnswer, delivery.id, 'isDeliveryInGoodOrder')
answer_for_satisfaction = self._get_answer_for(MultipleChoiceAnswer, delivery.id, 'areYouSatisfied')
answer_for_additional_comments = self._get_answer_for(TextAnswer, delivery.id, 'additionalDeliveryComments')
self.assertEqual(response.status_code, 201)
self.assertEqual(answer_for_delivery_received.value.text, 'Yes')
self.assertEqual(answer_for_date_of_receipt.value, date_of_receipt)
self.assertEqual(answer_for_delivery_order.value.text, 'Yes')
self.assertEqual(answer_for_satisfaction.value.text, 'Yes')
self.assertEqual(answer_for_additional_comments.value, good_comment)
self.assertTrue(self.mock_distribution_alert_raise.delay.called)
@override_settings(CELERY_LIVE=True)
@patch('eums.services.contact_service.ContactService.get', mock_get)
@patch('eums.services.flow_scheduler.distribution_alert_raise', mock_distribution_alert_raise)
@patch('eums.models.DistributionPlan.confirm')
def test_should_confirm_delivery_when_answers_are_saved(self, mock_confirm):
delivery = DeliveryFactory()
date_of_receipt = self.__get_current_date()
good_comment = "All is good"
data = {
'runnable': delivery.id, 'answers': [
{'question_label': 'deliveryReceived', 'value': 'Yes'},
{'question_label': 'dateOfReceipt', 'value': date_of_receipt},
{'question_label': 'isDeliveryInGoodOrder', 'value': 'Yes'},
{'question_label': 'areYouSatisfied', 'value': 'Yes'},
{'question_label': 'additionalDeliveryComments', 'value': good_comment}
]}
response = self.client.post(ENDPOINT_URL, data=json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 201)
self.assertTrue(mock_confirm.called)
self.assertTrue(self.mock_distribution_alert_raise.delay.called)
@override_settings(CELERY_LIVE=True)
@patch('eums.services.contact_service.ContactService.get', mock_get)
@patch('eums.services.flow_scheduler.distribution_alert_raise', mock_distribution_alert_raise)
@patch('eums.services.response_alert_handler.ResponseAlertHandler')
def test_should_format_answers_to_rapidpro_hook_api_and_handle_corresponding_alerts(self, mock_alert_handler):
delivery = DeliveryFactory()
date_of_receipt = self.__get_current_date()
good_comment = "All is good"
data = {
'runnable': delivery.id, 'answers': [
{'question_label': 'deliveryReceived', 'value': 'Yes'},
{'question_label': 'dateOfReceipt', 'value': date_of_receipt},
{'question_label': 'isDeliveryInGoodOrder', 'value': 'Yes'},
{'question_label': 'areYouSatisfied', 'value': 'Yes'},
{'question_label': 'additionalDeliveryComments', 'value': good_comment}
]}
response = self.client.post(ENDPOINT_URL, data=json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 201)
rapidpro_formatted_answers = [
{"category": {'eng': 'Yes', 'base': 'Yes'}, 'label': 'deliveryReceived'},
{"category": {'eng': date_of_receipt, 'base': date_of_receipt}, 'label': 'dateOfReceipt'},
{"category": {'eng': 'Yes', 'base': 'Yes'}, 'label': 'isDeliveryInGoodOrder',},
{"category": {'eng': 'Yes', 'base': 'Yes'}, 'label': 'areYouSatisfied'},
{"category": {'eng': good_comment, 'base': good_comment}, 'label': 'additionalDeliveryComments'}
]
self.assertTrue(mock_alert_handler.called_once_with(delivery, rapidpro_formatted_answers))
self.assertTrue(self.mock_distribution_alert_raise.delay.called)
@override_settings(CELERY_LIVE=True)
@patch('eums.services.contact_service.ContactService.get', mock_get)
@patch('eums.services.flow_scheduler.distribution_alert_raise', mock_distribution_alert_raise)
@patch('eums.services.response_alert_handler.ResponseAlertHandler.process')
def test_should_process_alerts(self, mock_process):
delivery = DeliveryFactory()
date_of_receipt = self.__get_current_date()
good_comment = "All is good"
data = {
'runnable': delivery.id, 'answers': [
{'question_label': 'deliveryReceived', 'value': 'Yes'},
{'question_label': 'dateOfReceipt', 'value': date_of_receipt},
{'question_label': 'isDeliveryInGoodOrder', 'value': 'Yes'},
{'question_label': 'areYouSatisfied', 'value': 'Yes'},
{'question_label': 'additionalDeliveryComments', 'value': good_comment}
]}
response = self.client.post(ENDPOINT_URL, data=json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 201)
self.assertTrue(mock_process.called)
self.assertTrue(self.mock_distribution_alert_raise.delay.called)
@override_settings(CELERY_LIVE=True)
@patch('eums.services.contact_service.ContactService.get', mock_get)
@patch('eums.services.flow_scheduler.distribution_alert_raise', mock_distribution_alert_raise)
def test_should_create_alerts_integration(self):
purchase_order = PurchaseOrderFactory(order_number=5678)
purchase_order_item = PurchaseOrderItemFactory(purchase_order=purchase_order)
consignee = ConsigneeFactory(name="Liverpool FC")
delivery = DeliveryFactory(consignee=consignee)
DeliveryNodeFactory(item=purchase_order_item, distribution_plan=delivery)
date_of_receipt = self.__get_current_date()
good_comment = "All is good"
data = {
'runnable': delivery.id, 'answers': [
{'question_label': 'deliveryReceived', 'value': 'No'},
{'question_label': 'dateOfReceipt', 'value': date_of_receipt},
{'question_label': 'isDeliveryInGoodOrder', 'value': 'Yes'},
{'question_label': 'areYouSatisfied', 'value': 'Yes'},
{'question_label': 'additionalDeliveryComments', 'value': good_comment}
]}
response = self.client.post(ENDPOINT_URL, data=json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, 201)
alert = Alert.objects.get(consignee_name="Liverpool FC", order_number=5678)
self.assertEqual(alert.issue, Alert.ISSUE_TYPES.not_received)
self.assertTrue(self.mock_distribution_alert_raise.delay.called)
@override_settings(CELERY_LIVE=True)
@patch('eums.services.contact_service.ContactService.get', mock_get)
@patch('eums.services.flow_scheduler.distribution_alert_raise', mock_distribution_alert_raise)
def test_should_cancel_existing_runs_when_saving_a_new_set_of_answers(self):
delivery = DeliveryFactory()
data = {
'runnable': delivery.id, 'answers': [
{'question_label': 'deliveryReceived', 'value': 'Yes'}
]}
self.client.post(ENDPOINT_URL, data=json.dumps(data), content_type='application/json')
runs = Run.objects.filter(runnable=delivery)
self.assertEqual(len(runs), 1)
self.client.post(ENDPOINT_URL, data=json.dumps(data), content_type='application/json')
runs = Run.objects.filter(runnable=delivery)
self.assertEqual(len(runs), 2)
self.assertEqual(len(Run.objects.filter(runnable=delivery, status='cancelled')), 1)
self.assertEqual(len(Run.objects.filter(runnable=delivery, status='completed')), 1)
self.assertTrue(self.mock_distribution_alert_raise.delay.called)
@override_settings(CELERY_LIVE=True)
@patch('eums.services.contact_service.ContactService.get', mock_get)
@patch('eums.services.flow_scheduler.distribution_alert_raise', mock_distribution_alert_raise)
def test_should_save_delivery_node_answers(self):
self.setup_flow_with_questions(Flow.Label.WEB)
node = DeliveryNodeFactory()
date_of_receipt = self.__get_current_date()
data = {
'runnable': node.id, 'answers': [
{'question_label': 'deliveryReceived', 'value': 'Yes'},
{'question_label': 'dateOfReceipt', 'value': date_of_receipt}
]}
self.client.post(ENDPOINT_URL, data=json.dumps(data), content_type='application/json')
runs = Run.objects.filter(runnable=node)
self.assertEqual(len(runs), 1)
self.assertEqual(len(TextAnswer.objects.filter(run__runnable=node)), 1)
self.assertEqual(len(MultipleChoiceAnswer.objects.filter(run__runnable=node)), 1)
self.assertTrue(self.mock_distribution_alert_raise.delay.called)
@override_settings(CELERY_LIVE=True)
@patch('eums.services.contact_service.ContactService.get', mock_get)
@patch('eums.services.flow_scheduler.distribution_alert_raise', mock_distribution_alert_raise)
def test_should_save_delivery_node_answers_to_web_flow(self):
self.setup_flow_with_questions(Flow.Label.WEB)
node = DeliveryNodeFactory()
date_of_receipt = self.__get_current_date()
data = {
'runnable': node.id, 'answers': [
{'question_label': 'deliveryReceived', 'value': 'Yes'},
{'question_label': 'dateOfReceipt', 'value': date_of_receipt}
]}
self.client.post(ENDPOINT_URL, data=json.dumps(data), content_type='application/json')
web_flow = Flow.objects.get(label=Flow.Label.WEB)
self.assertEqual(len(TextAnswer.objects.filter(question__flow=web_flow)), 1)
self.assertEqual(len(MultipleChoiceAnswer.objects.filter(question__flow=web_flow)), 1)
self.assertTrue(self.mock_distribution_alert_raise.delay.called)
@override_settings(CELERY_LIVE=True)
@patch('eums.services.contact_service.ContactService.get', mock_get)
@patch('eums.services.flow_scheduler.distribution_alert_raise', mock_distribution_alert_raise)
def test_should_save_numeric_answers(self):
self.setup_flow_with_questions(Flow.Label.WEB)
web_flow = Flow.objects.filter(label=Flow.Label.WEB).first()
NumericQuestionFactory(label='quantityDelivered', flow=web_flow)
node = DeliveryNodeFactory()
data = {
'runnable': node.id, 'answers': [
{'question_label': 'deliveryReceived', 'value': 'Yes'},
{'question_label': 'quantityDelivered', 'value': '2'}
]}
self.client.post(ENDPOINT_URL, data=json.dumps(data), content_type='application/json')
self.assertEqual(len(NumericAnswer.objects.filter(question__flow=web_flow)), 1)
self.assertTrue(self.mock_distribution_alert_raise.delay.called)
@override_settings(CELERY_LIVE=True)
@patch('eums.services.contact_service.ContactService.get', mock_get)
@patch('eums.services.flow_scheduler.distribution_alert_raise', mock_distribution_alert_raise)
def test_should_dequeue_next_run_in_the_queue(self):
first_delivery_to_be_answered = DeliveryFactory(track=True)
contact = {'name': 'Some name', 'phone': '098765433'}
first_delivery_to_be_answered.build_contact = MagicMock(return_value=contact)
self._schedule_run_for(first_delivery_to_be_answered)
second_delivery_to_be_answered = DeliveryFactory(track=True)
self._schedule_run_for(second_delivery_to_be_answered)
data = {
'runnable': first_delivery_to_be_answered.id, 'answers': [
{'question_label': 'deliveryReceived', 'value': 'Yes'}]
}
next_run = RunQueue.objects.filter(
Q(contact_person_id=second_delivery_to_be_answered.contact_person_id) & Q(
status='not_started')).order_by(
'-run_delay').first()
self.client.post(ENDPOINT_URL, data=json.dumps(data), content_type='application/json')
first_runs = Run.objects.filter(runnable=first_delivery_to_be_answered)
next_run = RunQueue.objects.get(id=next_run.id)
self.assertEqual(len(first_runs), 2)
self.assertEqual(next_run.status, 'started')
self.assertTrue(self.mock_distribution_alert_raise.delay.called)
def _get_answer_for(self, answer_type, delivery_id, question_label):
return answer_type.objects.filter(run__runnable=delivery_id, question__label=question_label).first()
def _schedule_run_for(self, runnable):
if runnable.completed_run() is None:
if Run.has_scheduled_run(runnable.contact_person_id):
RunQueue.enqueue(runnable, 0)
else:
contact = runnable.build_contact()
task = '231x31231231'
Run.objects.create(scheduled_message_task_id=task, runnable=runnable,
status=Run.STATUS.scheduled, phone=contact['phone'] if contact else None)
def __get_current_date(self):
return datetime.datetime.strftime(datetime.datetime.now().date(), '%Y-%m-%d')
def test_unicef_admin_should_not_have_permission_to_create_web_answer(self):
self.log_and_assert_create_web_answer_permission(self.log_unicef_admin_in, HTTP_403_FORBIDDEN)
def test_unicef_editor_should_not_have_permission_to_create_web_answer(self):
self.log_and_assert_create_web_answer_permission(self.log_unicef_editor_in, HTTP_403_FORBIDDEN)
def test_unicef_viewer_should_not_have_permission_to_create_web_answer(self):
self.log_and_assert_create_web_answer_permission(self.log_unicef_viewer_in, HTTP_403_FORBIDDEN)
@override_settings(CELERY_LIVE=True)
@patch('eums.services.contact_service.ContactService.get', mock_get)
@patch('eums.services.flow_scheduler.distribution_alert_raise', mock_distribution_alert_raise)
def test_ip_editor_should_have_permission_to_create_web_answer(self):
self.log_and_assert_create_web_answer_permission(self.log_ip_editor_in, HTTP_201_CREATED)
self.assertTrue(self.mock_distribution_alert_raise.delay.called)
def test_ip_viewer_should_not_have_permission_to_create_web_answer(self):
self.log_and_assert_create_web_answer_permission(self.log_ip_viewer_in, HTTP_403_FORBIDDEN)
def log_and_assert_create_web_answer_permission(self, log_func, expected_status_code):
log_func()
self.setup_flow_with_questions(Flow.Label.WEB)
web_flow = Flow.objects.filter(label=Flow.Label.WEB).first()
NumericQuestionFactory(label='quantityDelivered', flow=web_flow)
node = DeliveryNodeFactory()
request_body = {
'runnable': node.id, 'answers': [
{'question_label': 'deliveryReceived', 'value': 'Yes'},
{'question_label': 'quantityDelivered', 'value': '2'}
]}
response = self.client.post(ENDPOINT_URL, data=json.dumps(request_body), content_type='application/json')
self.assertEqual(response.status_code, expected_status_code)
|
unicefuganda/eums
|
eums/test/api/test_web_answers_end_point.py
|
test_web_answers_end_point.py
|
py
| 18,674
|
python
|
en
|
code
| 9
|
github-code
|
6
|
38979909130
|
from datetime import datetime, timedelta, timezone
import pytz
tokyo_tz = pytz.timezone('Asia/Tokyo')
# def delete_feed_with_too_many_entries(reader, db, url):
# entries = list(reader.get_entries())
# if len(entries) > 300:
# print("deleting feeds: ", url)
# reader.delete_feed(url)
# return setup_feed(db, url)
# return reader
def delete_old_entries(reader):
entries = list(reader.get_entries())
for entry in entries:
if entry.published.replace(tzinfo=None) < datetime.now() - timedelta(days=1):
print(entry)
reader.delete_entry(entry)
|
kei49/rss-to-slack
|
src/feed.py
|
feed.py
|
py
| 637
|
python
|
en
|
code
| 0
|
github-code
|
6
|
34891232871
|
import todo
def main():
run = 1
todo.create_table()
while run:
print("\n")
print("1. Inser Task in todo list \n"
"2. View data from todo list \n"
"3. Delete task from todo list \n"
"4. Exit \n")
x = int(input("Choose any of the above option"))
if x==1:
task = str(input("Enter your todo: "))
todo.data_entry(task)
elif x==2:
todo.printData()
elif x==3:
indexToDelete = int(input("Enter the number of task to be deleted"))
todo.data_delete(indexToDelete)
elif x==4:
run=0
else:
print("Please choose valid option")
todo.closeCursor()
if __name__ == '__main__': main()
|
hrishikesh-godbole/Python_Daily
|
TODO App/my_todo.py
|
my_todo.py
|
py
| 808
|
python
|
en
|
code
| 0
|
github-code
|
6
|
17321484534
|
from uiplib.setWallpaper import change_background
import os
from uiplib.constants import CURR_DIR, PICS_FOLDER, WEBSITE, TIMEOUT
import random
import time
from uiplib.scrape import get_images
from threading import Thread
import sys
from select import select
try:
import msvcrt
except ImportError:
#not on windows
pass
class scheduler():
def __init__(self, offline):
directory = os.path.join(CURR_DIR, PICS_FOLDER)
if not offline:
fetch = Thread(target=self.initFetch)
# all child threads need to be daemons to die upon main thread exit
fetch.setDaemon(True)
fetch.start()
while not ((os.path.isdir(os.path.join(CURR_DIR, PICS_FOLDER)) and
os.listdir(directory) != [])):
print('Downloading images..')
time.sleep(60)
elif not os.path.exists(directory):
os.makedirs(directory)
if os.listdir(directory) != []:
print("You can wait for next wallpaper or skip this wallpaper"
" by just pressing enter.")
self.change_random()
self.setStartTime(time.time())
self.changeCycle()
else:
print("No downloaded images. Try again in online mode.")
def initFetch(self):
try:
get_images(WEBSITE)
except ValueError as e:
print("File could not be retrieved.", e)
def change_random(self):
directory = os.path.join(CURR_DIR, PICS_FOLDER)
filename = random.choice(os.listdir(directory))
path = os.path.join(directory, filename)
print("changing desktop wallpaper to: ", path)
change_background(path)
def kbhit(self):
''' Returns True if keyboard character was hit, False otherwise.
'''
if os.name == 'nt':
return msvcrt.kbhit()
else:
dr,dw,de = select([sys.stdin], [], [], 0)
return dr != []
def getch(self):
''' Returns a keyboard character after kbhit() has been called.
Should not be called in the same program as getarrow().
'''
s = ''
if os.name == 'nt':
return msvcrt.getch().decode('utf-8')
else:
return sys.stdin.read(1)
def changeCycle(self):
while True:
if not self.kbhit():
delta = self.deltaTime()
if delta >= TIMEOUT:
self.change_random()
self.time = time.time()
else:
self.getch()
print("Skipping this wallpaper")
self.change_random()
self.time = time.time()
def setStartTime(self, time):
self.time = time
def deltaTime(self):
return (time.time()-self.time)
|
teja-315/UIP
|
uiplib/scheduler.py
|
scheduler.py
|
py
| 2,856
|
python
|
en
|
code
| null |
github-code
|
6
|
14362032569
|
import math
infile = open("bank1.cc", "r")
outfile = open("bank2.cc", "w")
i=0
for line in infile:
i=i+1
if i==1:
val = 2
else:
val = int(math.log(i/2, 10) + 2)
line2 = line[val:]
outfile.write (line2)
|
Skeletrox/Dump
|
zz.py
|
zz.py
|
py
| 213
|
python
|
en
|
code
| 0
|
github-code
|
6
|
1466500793
|
from dataclasses import dataclass, field
from src.shared.general_functions import sum_all_initialized_int_attributes
@dataclass
class ShareholdersEquity:
"""Shareholders' equity is the amount that the owners of a company have invested in their business. This includes
the money they've directly invested and the accumulation of income the company has earned and that has been
reinvested since inception."""
preferred_Stock: int
common_stock: int
retained_earnings: int
accumulated_other_comprehensive_income_loss: int
other_total_stockholders_equity: int
minority_interest: int
total_shareholders_equity: int = field(init=False)
def __post_init__(self):
self.total_shareholders_equity = sum_all_initialized_int_attributes(self)
|
hakunaprojects/stock-investing
|
src/domain/financial_statements/balance_sheet_statement/shareholders_equity.py
|
shareholders_equity.py
|
py
| 785
|
python
|
en
|
code
| 0
|
github-code
|
6
|
18959073144
|
import boto3
import time
import json
import configparser
from botocore.exceptions import ClientError
redshift_client = boto3.client('redshift', region_name='ap-southeast-1')
ec2 = boto3.resource('ec2', region_name='ap-southeast-1')
def create_udacity_cluster(config):
"""Create an Amazon Redshift cluster
Args:
config: configurations file
Returns:
response['Cluster']: return cluster dictionary information
Raises:
ClientError
"""
try:
response = redshift_client.create_cluster(
ClusterIdentifier='udacity-cluster',
ClusterType='multi-node',
NumberOfNodes=2,
NodeType='dc2.large',
PubliclyAccessible=True,
DBName=config.get('CLUSTER', 'DB_NAME'),
MasterUsername=config.get('CLUSTER', 'DB_USER'),
MasterUserPassword=config.get('CLUSTER', 'DB_PASSWORD'),
Port=int(config.get('CLUSTER', 'DB_PORT')),
IamRoles=[config.get('IAM_ROLE', 'ROLE_ARN')],
VpcSecurityGroupIds=['sg-077f9a08ba80c09e4']
)
except ClientError as e:
print(f'ERROR: {e}')
return None
else:
return response['Cluster']
def wait_for_creation(cluster_id):
"""Wait for cluster creation
Args:
cluster_id: Cluster identifier
Returns:
cluster_info: return cluster dictionary information
Raises:
None
"""
while True:
response = redshift_client.describe_clusters(ClusterIdentifier=cluster_id)
cluster_info = response['Clusters'][0]
if cluster_info['ClusterStatus'] == 'available':
break
time.sleep(30)
return cluster_info
def opentcp(config,cluster_info):
"""Open an incoming TCP port to access the cluster endpoint
Args:
config: configurations file
cluster_info: cluster dictionary information
Returns:
None
Raises:
None
"""
try:
vpc = ec2.Vpc(id=cluster_info['VpcId'])
defaultSg = list(vpc.security_groups.all())[0]
print(defaultSg)
defaultSg.authorize_ingress(
GroupName=defaultSg.group_name,
CidrIp='0.0.0.0/0',
IpProtocol='TCP',
FromPort=int(config.getint('CLUSTER', 'DB_PORT')),
ToPort=int(config.getint('CLUSTER', 'DB_PORT'))
)
except Exception as e:
print(e)
def main():
"""Create cluster"""
config = configparser.ConfigParser()
config.read('../dwh.cfg')
cluster_info = create_udacity_cluster(config)
if cluster_info is not None:
print('Cluster is being created')
cluster_info = wait_for_creation(cluster_info['ClusterIdentifier'])
print(f'Cluster has been created.')
print(f"Endpoint to copy={cluster_info['Endpoint']['Address']}")
opentcp(config,cluster_info)
if __name__ == '__main__':
main()
|
hieutdle/bachelor-thesis
|
airflow/scripts/create_cluster.py
|
create_cluster.py
|
py
| 2,942
|
python
|
en
|
code
| 1
|
github-code
|
6
|
6371745337
|
#!/usr/bin/python3
"""
this module defines the Pascal's Triangle function
"""
def pascal_triangle(n):
"""
this stands for the size(n) of pascal's Triangle
"""
if n <= 0:
return []
triangle = [[1]]
while len(triangle) != n:
th_triangle = triangle[-1]
new = [1]
for d in range(len(th_triangle) - 1):
new.append(th_triangle[d] + th_triangle[d + 1])
new.append(1)
triangle.append(new)
return (triangle)
|
Laabk/alx-higher_level_programming
|
0x0B-python-input_output/12-pascal_triangle.py
|
12-pascal_triangle.py
|
py
| 490
|
python
|
en
|
code
| 0
|
github-code
|
6
|
22164374876
|
from .bayesianPwLinearRegression import BayesianPieceWiseLinearRegression
from .bayesianLinearRegression import BayesianLinearRegression
from .seqCoupledBayesianPwLinReg import SeqCoupledBayesianPieceWiseLinearRegression
from .globCoupBayesianPwLinReg import GlobCoupledBayesianPieceWiseLinearRegression
from .vvglobCoup import VVglobCoupled
from .scores import calculateFeatureScores, adjMatrixRoc, credible_interval, \
credible_score, get_betas_over_time, get_scores_over_time, beta_post_matrix, score_beta_matrix
from .fullParentsBpwLinReg import FPBayesianPieceWiseLinearRegression
from .fpBayesianLinearRegression import FpBayesianLinearRegression
from .fpGlobCoupBpwLinReg import FpGlobCoupledBayesianPieceWiseLinearRegression
from .fpSeqCoupBpwlinReg import FpSeqCoupledBayesianPieceWiseLinearRegression
from .fpvvGlobCoup import FpVVglobCoupled
import numpy as np
class Network():
'''
Class that serves as the waypoint to infer the network topology
of a dataset using various different implemented algorithms
Attributes:
data : numpy.ndarray
numpy array with shape (num_samples, variables)
chain_length : int
integer containing the chain length
burn_in : int
integer that determines the burn_in interval of the MCMC chain
'''
def __init__(self, data, chain_length, burn_in, lag, change_points = []):
self.data = data
self.lag = lag
self.change_points = change_points
self.network_configuration = None
self.chain_length = chain_length
self.burn_in = burn_in
#self.method = 'nh_dbn'
self.true_adj_matrix = None
self.proposed_adj_matrix = [] # proposed adj matrix
self.edge_scores = None
self.chain_results = None
self.scores_over_time = [] # scores over time list of matrices
self.betas_over_time = [] # we also want the betas over time for diagnostics
self.cps_over_response = [] # we want all the different computed chains
self.network_configurations = [] # the list of all the design matrices of all the network configs
self.network_args = {
'model': None,
'type': None,
'length': str(self.chain_length),
'burn-in': str(self.burn_in),
'thinning': 'modulo 10',
'scoring_method': None,
'network_configs': []
} # dict where we are going to save the args used by the network
def set_network_configuration(self, configuration):
'''
Method transforms and sets the 'raw' data using the
given configuration into a dictionary of the form:
{
'features': {
'X1': numpy.ndarray
...
}
'response': {
'y': numpy.ndarray
}
}
Args:
configuration : int
integer that indicates which variable X_i is the current response
'''
network_list = self.data # retreive the network data
dims = self.data[0].shape[1] # dimensions of the data points
dimsVector = [x for x in range(dims)]
num_samples = 0
for segment in network_list:
# add the length of the segment
num_samples = segment.data.shape[0] + num_samples
#num_samples = self.data.shape[0] # number of data points
currResponse = configuration # Which column will be the response for the configuration
# You have to evaluate because the filter returns an obj
currFeatures = list(filter(lambda x: x != configuration, dimsVector))
data_dict = {
'features': {},
'response': {}
}
# add an aditional matrix for each max lag
count_label = 1
for lag in range(self.lag):
# Add the features to the dict
for el in currFeatures:
# if the lag is greater than 1 then label will be the count
if lag + 1 > 1:
label = count_label
else:
label = el
col_name = 'X' + str(label)
feature_data = np.array([]) # data initilize as empty
for segment in network_list:
curr_segment_len = segment.shape[0]
# select all but the last data point
segment_data = segment[:curr_segment_len - (lag + 1), el]
# we do the insert only if lag > 1
if lag + 1 > 1:
# insert a 0 at the beginning for each lag beyond 1
for _ in range(lag):
# if we have a length greater than 1 then we append 0s to match the
# dimensions of the original design matrix
segment_data = np.insert(segment_data, 0, [0])
# concatenate(stack) the segment data into the data of the curr feature
feature_data = np.concatenate((feature_data, segment_data)) if feature_data.size else segment_data
# add to the dict
data_dict['features'][col_name] = feature_data
count_label = count_label + 1 # sum 1 to the label
# Select + stack the data for the response
resp_data = np.array([]) # resp init as empty
for segment in network_list:
curr_resp_len = segment.shape[0]
segment_data = segment[1:curr_resp_len, currResponse] # select curr resp data
# concatenate the resp data
resp_data = np.concatenate((resp_data, segment_data), axis = 0) if resp_data.size else segment_data
data_dict['response']['y'] = resp_data
self.network_configuration = data_dict # add the current config to the network
self.network_configurations.append(data_dict) # append the current network config
# dictionary that will save the args used in the conf
formated_dict = {
'features': list(data_dict['features'].keys()),
'response': 'X' + str(currResponse)
}
self.network_args['network_configs'].append(formated_dict)
def fit(self, method):
'''
Method that will the current data configuration of the network
using the provided method (algorithm)
Args:
method : str
string that will determine which method we are going to use
'''
num_samples = self.network_configuration['response']['y'].shape[0] # Number of data points
if method == 'varying_nh_dbn': # call the nh-dbn with varying cps
self.network_args['model'] = 'Bayesian Non-Homogeneous'
self.network_args['type'] = 'Varying Parents'
baReg = BayesianPieceWiseLinearRegression(
self.network_configuration, # Current data config
'varying_nh', # varying changepoints non-homogeneous
num_samples, # number of data points
self.chain_length, # len of chain
[num_samples + 2] # just the last pseudo cp []
)
baReg.fit() # Call the fit method of the regressor
self.chain_results = baReg.results # Set the results
elif method == 'fp_varying_nh_dbn': # full parents credible intervals method
self.network_args['model'] = 'Bayesian Non-Homogeneous'
self.network_args['type'] = 'Full Parents'
baReg = FPBayesianPieceWiseLinearRegression(
self.network_configuration, # Current data config
'varying_nh', # varying changepoints non-homogeneous
num_samples, # number of data points
self.chain_length, # len of chain
[num_samples + 2] # just the last pseudo cp []
)
baReg.fit() # Call the fit method of the regressor
self.chain_results = baReg.results # Set the results
elif method == 'fixed_nh_dbn': # call the nh-dbn with fixed cps
self.network_args['model'] = 'Bayesian Non-Homogeneous'
self.network_args['type'] = 'Varying Parents-Fixed changepoints'
baReg = BayesianPieceWiseLinearRegression(
self.network_configuration, # Current data config of the network
'fixed_nh', # fixed cps non-homogeneous
num_samples - 1, # number of data points
self.chain_length, # length of the MCMC
self.change_points # predefined cps
)
baReg.fit() # call the fit method of the regressor
self.chain_results = baReg.results # set the results
elif method == 'h_dbn': # call the h-dbn
self.network_args['model'] = 'Bayesian Homogeneous'
self.network_args['type'] = 'Varying Parents'
baReg = BayesianLinearRegression(
self.network_configuration, # current data config of the network
num_samples + 1, # number of samples
self.chain_length # length of the MCMC chain
)
baReg.fit() # call to the fit method of the regressor
self.chain_results = baReg.results # set the results
elif method == 'fp_h_dbn': # call the full parents h-dbn
self.network_args['model'] = 'Bayesian Homogeneous'
self.network_args['type'] = 'Full Parents'
baReg = FpBayesianLinearRegression(
self.network_configuration, # current data config of the network
num_samples + 1, # number of samples
self.chain_length # length of the MCMC chain
)
baReg.fit() # call the fit method of the regressor
self.chain_results = baReg.results # set the results
elif method == 'seq_coup_nh_dbn':
self.network_args['model'] = 'Sequentilly Coupled Non-Homogeneous'
self.network_args['type'] = 'Varying Parents'
baReg = SeqCoupledBayesianPieceWiseLinearRegression(
self.network_configuration, # Current data config
'seq_coup_nh', # varying changepoints non-homogeneous seq coupled
num_samples - 1, # number of data points
self.chain_length, # len of chain
[num_samples + 2] # just the last pseudo cp []
)
baReg.fit() # call the fit method of the regressor
self.chain_results = baReg.results # set the results
elif method == 'fp_seq_coup_nh_dbn':
self.network_args['model'] = 'Sequentilly Coupled Non-Homogeneous'
self.network_args['type'] = 'Full Parents'
baReg = FpSeqCoupledBayesianPieceWiseLinearRegression(
self.network_configuration, # Current data config
'seq_coup_nh', # varying changepoints non-homogeneous seq coupled
num_samples - 1, # number of data points
self.chain_length, # len of chain
[num_samples + 2] # just the last pseudo cp []
)
baReg.fit() # call the fit method of the regressor
self.chain_results = baReg.results # set the results
elif method == 'glob_coup_nh_dbn':
self.network_args['model'] = 'Globally Coupled Non-Homogeneous'
self.network_args['type'] = 'Varying Parents'
baReg = GlobCoupledBayesianPieceWiseLinearRegression(
self.network_configuration, # current data config
'glob_coup_nh', # glob coup additional functions
num_samples, # number of data points
self.chain_length, # len of chain
[num_samples + 2] # just the last pseudo cp []
)
baReg.fit() # call to the fit method of the glob coup regressor
self.chain_results = baReg.results
elif method == 'fp_glob_coup_nh_dbn':
self.network_args['model'] = 'Globally Coupled Non-Homogeneous'
self.network_args['type'] = 'Full Parents'
baReg = FpGlobCoupledBayesianPieceWiseLinearRegression(
self.network_configuration, # current data config
'glob_coup_nh', # glob coup additional functions
num_samples, # number of data points
self.chain_length, # length of the chain
[num_samples + 2] # just the last pseudo cp []
)
baReg.fit() # call to the fit method of the glob coup regressor
self.chain_results = baReg.results
elif method == 'var_glob_coup_nh_dbn':
self.network_args['model'] = 'Varying Globally Coupled Non-Homogeneous'
self.network_args['type'] = 'Varying Parents'
baReg = VVglobCoupled(
self.network_configuration, # current data config
'var_glob_coup_nh', # glob coup additional functions
num_samples, # number of data points
self.chain_length, # len of chain
[num_samples + 2] # just the last pseudo cp []
)
baReg.fit() # call to the fit method of the glob coup regressor
self.chain_results = baReg.results
elif method == 'fp_var_glob_coup_nh_dbn':
self.network_args['model'] = 'Varying Globally Coupled Non-Homogeneous'
self.network_args['type'] = 'Full Parents'
baReg = FpVVglobCoupled(
self.network_configuration, # current data config
'var_glob_coup_nh', # glob coup additional functions
num_samples, # number of data points
self.chain_length, # len of chain
[num_samples + 2] # just the last pseudo cp []
)
baReg.fit() # call to the fit method of the glob coup regressor
self.chain_results = baReg.results
def score_edges(self, currResponse, method):
'''
Calculates de edge score for the current configuration of the network
Args:
currResponse : int
integer referencing which variable X_i is the
current response of the configuration
method : str
string that contains the type of method used so we can evaluate
with the chain_results of the pi_vector or with the credible intervals
for the full parent sets
'''
# current features + data dimensions according to the lag
currFeatures = [int(string[1:]) for string in list(self.network_configuration['features'])]
dims = self.data[0].shape[1] # dimensions of the data points
#dims = len(currFeatures) + 1
# check if the method is for full parents
# this should only check the first 2 letters of the method
if (method == 'fp_varying_nh_dbn'
or method == 'fp_h_dbn'
or method == 'fp_seq_coup_nh_dbn'
or method == 'fp_glob_coup_nh_dbn'
or method == 'fp_var_glob_coup_nh_dbn'):
# thin + burn the chain on the global mean chain
if (method == 'fp_glob_coup_nh_dbn'
or method == 'fp_var_glob_coup_nh_dbn'):
# if the method is from the glob coup we will use the global mean vector
burned_chain = self.chain_results['mu_vector'][self.burn_in:]
thinned_chain = [burned_chain[x] for x in range(len(burned_chain)) if x%10==0]
# necessary so the beta matrix is built correctly
mu_thinned_chain = [[element] for element in thinned_chain]
# shift the betas by 2 so it fits with the cps
betas_chain = self.chain_results['betas_vector'][2:]
# burn the shifted chain
burned_chain = betas_chain[self.burn_in:]
# thin both of the chains
betas_thinned_chain = [burned_chain[x] for x in range(len(burned_chain)) if x%10==0]
# we only have chainpoints in the non-homogeneous model
if method != 'fp_h_dbn':
# burn the cps chain
burned_cps = self.chain_results['tau_vector'][self.burn_in:]
thinned_changepoints = [burned_cps[x] for x in range(len(burned_cps)) if x%10==0]
else:
thinned_changepoints = [] # if not then we just assign an empty list
# This will get the betas over time as diagnostic
if (method == 'fp_varying_nh_dbn'
or method == 'fp_seq_coup_nh_dbn'
or method == 'fp_glob_coup_nh_dbn'
or method == 'fp_var_glob_coup_nh_dbn'):
# get the len of the time-series
time_pts = self.network_configuration['response']['y'].shape[0]
betas_over_time = get_betas_over_time(time_pts, thinned_changepoints, betas_thinned_chain, dims) #TODO add the dims
self.betas_over_time.append(betas_over_time) # append to the network
scores_over_time = get_scores_over_time(betas_over_time, currFeatures, dims)
self.scores_over_time.append(scores_over_time) # append to the network
if method == 'fp_glob_coup_nh_dbn':
# if we are using a glob coup model change the scores to the global vector
# TODO make this a user input
betas_thinned_chain = mu_thinned_chain
betas_matrix = beta_post_matrix(betas_thinned_chain) # construct the betas post matrix
edge_scores = score_beta_matrix(betas_matrix, currFeatures, currResponse) # score the matrix
self.network_args['scoring_method'] = 'fraq-score'
self.proposed_adj_matrix.append(edge_scores) # append to the proposed adj matrix
self.cps_over_response.append(thinned_changepoints) # append the cps chain over the curr response
else: # we are doing the varying parents model(s)
# TODO make a single prune_chain() method prunning = burn + thinning
# burn + thin the features (parents) chain
burned_chain = self.chain_results['pi_vector'][self.burn_in:]
thinned_chain = [burned_chain[x] for x in range(len(burned_chain)) if x%10!=0]
self.edge_scores = calculateFeatureScores(
#self.chain_results['pi_vector'][self.burn_in:],
thinned_chain,
dims,
currFeatures,
currResponse)
self.network_args['scoring_method'] = 'edge-scores'
self.proposed_adj_matrix.append(self.edge_scores) # append to the proposed adj matrix
# betas chain prunning
# shift the betas by 2 so it fits with the cps
betas_chain = self.chain_results['padded_betas'][2:]
burned_chain = betas_chain[self.burn_in:]
betas_thinned_chain = [burned_chain[x] for x in range(len(burned_chain)) if x%10==0]
# cps chain pruning
if len(self.chain_results['tau_vector']) == 0:
# we are on the homogeneous dbn model
beta_dim = len(currFeatures) + 1 # we sum 1 because of the intercept
time_pts = self.network_configuration['response']['y'].shape[0] # get the len of the time-series
# -> create an artificial cps set that has just 1 cp as cp length + 2 so get_betas_over_time works
thinned_changepoints = [[time_pts + 2] for _ in range(len(betas_thinned_chain))]
betas_over_time = get_betas_over_time(time_pts, thinned_changepoints, betas_thinned_chain, beta_dim) #TODO add the dims
self.betas_over_time.append(betas_over_time) # append to the network
self.cps_over_response.append(thinned_changepoints) # append the cps chain over the curr response
else:
burned_cps = self.chain_results['tau_vector'][self.burn_in:]
thinned_changepoints = [burned_cps[x] for x in range(len(burned_cps)) if x%10==0]
# get the dims of the betas
beta_dim = len(currFeatures) + 1 # we sum 1 because of the intercept
time_pts = self.network_configuration['response']['y'].shape[0] # get the len of the time-series
betas_over_time = get_betas_over_time(time_pts, thinned_changepoints, betas_thinned_chain, beta_dim) #TODO add the dims
self.betas_over_time.append(betas_over_time) # append to the network
self.cps_over_response.append(thinned_changepoints) # append the cps chain over the curr response
def infer_network(self, method):
'''
Infers the network topology on the data by changing to all
possible configurations of the network
Args:
method : str
string with the name of the method we are going to use
to fit the data
'''
# because data is now a list we have to select the first allways
# existing element
dims = self.data[0].shape[1] # dimensions of the data points
dimsVector = [x for x in range(dims)]
for configuration in dimsVector:
self.set_network_configuration(configuration)
self.fit(method)
self.score_edges(configuration, method)
|
charx7/DynamicBayesianNetworks
|
src/dyban/network.py
|
network.py
|
py
| 19,868
|
python
|
en
|
code
| 16
|
github-code
|
6
|
45385920976
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
##### System wide lib #####
import sys
import os
import operator
from StringIO import StringIO
##### Theory lib #####
from theory.apps.command.baseCommand import SimpleCommand
from theory.gui import field
from theory.apps import apps
from theory.db.migrations import Migration
from theory.db.migrations.loader import MigrationLoader
from theory.db.migrations.autodetector import MigrationAutodetector
from theory.db.migrations.questioner import MigrationQuestioner, InteractiveMigrationQuestioner
from theory.db.migrations.state import ProjectState
from theory.db.migrations.writer import MigrationWriter
from theory.gui.color import noStyle
from theory.utils.six import iteritems
from theory.utils.six.moves import reduce
##### Theory third-party lib #####
##### Local app #####
##### Theory app #####
##### Misc #####
class MakeMigration(SimpleCommand):
"""
Creates new migration(s) for apps.
"""
name = "makeMigration"
verboseName = "makeMigration"
_notations = ["Command",]
_drums = {"Terminal": 1, }
class ParamForm(SimpleCommand.ParamForm):
appLabelLst = field.ListField(
field.TextField(maxLength=32),
label="Application Name",
helpText='Specify the app label(s) to create migrations for.',
required=False,
initData=[],
)
isDryRun = field.BooleanField(
label="is dry run",
helpText=(
"Just show what migrations would be made; "
"don't actually write them."
),
required=False,
initData=False,
)
isMerge = field.BooleanField(
label="is merge",
helpText="Enable fixing of migration conflicts.",
required=False,
initData=False,
)
isEmpty = field.BooleanField(
label="is empty",
helpText="Create an empty migration.",
required=False,
initData=False,
)
def run(self):
options = self.paramForm.clean()
self.verbosity = options.get('verbosity')
self.interactive = False
self.dryRun = options.get('isDryRun', False)
self.merge = options.get('isMerge', False)
self.empty = options.get('isEmpty', False)
self.stdout = StringIO()
self.style = noStyle()
# Make sure the app they asked for exists
appLabels = set(options["appLabelLst"])
badAppLabels = set()
for appLabel in appLabels:
try:
apps.getAppConfig(appLabel)
except LookupError:
badAppLabels.add(appLabel)
if badAppLabels:
for appLabel in badAppLabels:
self.stdout.write("App '%s' could not be found. Is it in INSTALLED_APPS?" % appLabel)
sys.exit(2)
# Load the current graph state. Pass in None for the connection so
# the loader doesn't try to resolve replaced migrations from DB.
loader = MigrationLoader(None, ignoreNoMigrations=True)
# Before anything else, see if there's conflicting apps and drop out
# hard if there are any and they don't want to merge
conflicts = loader.detectConflicts()
# If appLabels is specified, filter out conflicting migrations for unspecified apps
if appLabels:
conflicts = dict(
(appLabel, conflict) for appLabel, conflict in iteritems(conflicts)
if appLabel in appLabels
)
if conflicts and not self.merge:
nameStr = "; ".join(
"%s in %s" % (", ".join(names), app)
for app, names in conflicts.items()
)
raise CommandError("Conflicting migrations detected (%s).\nTo fix them run 'python manage.py makemigrations --merge'" % nameStr)
# If they want to merge and there's nothing to merge, then politely exit
if self.merge and not conflicts:
self.stdout.write("No conflicts detected to merge.")
self._stdOut = self.stdout.getvalue()
self.stdout.close()
return
# If they want to merge and there is something to merge, then
# divert into the merge code
if self.merge and conflicts:
self._stdOut = self.stdout.getvalue()
self.stdout.close()
return self.handleMerge(loader, conflicts)
# Set up autodetector
autodetector = MigrationAutodetector(
loader.projectState(),
ProjectState.fromApps(apps),
InteractiveMigrationQuestioner(specifiedApps=appLabels, dryRun=self.dryRun),
)
# If they want to make an empty migration, make one for each app
if self.empty:
if not appLabels:
raise CommandError("You must supply at least one app label when using --empty.")
# Make a fake changes() result we can pass to arrangeForGraph
changes = dict(
(app, [Migration("custom", app)])
for app in appLabels
)
changes = autodetector.arrangeForGraph(changes, loader.graph)
self.writeMigrationFiles(changes)
self._stdOut = self.stdout.getvalue()
self.stdout.close()
return
# Detect changes
changes = autodetector.changes(
graph=loader.graph,
trimToApps=appLabels or None,
convertApps=appLabels or None,
)
# No changes? Tell them.
if not changes and self.verbosity >= 1:
if len(appLabels) == 1:
self.stdout.write("No changes detected in app '%s'" % appLabels.pop())
elif len(appLabels) > 1:
self.stdout.write("No changes detected in apps '%s'" % ("', '".join(appLabels)))
else:
self.stdout.write("No changes detected")
self._stdOut = self.stdout.getvalue()
self.stdout.close()
return
self.writeMigrationFiles(changes)
def writeMigrationFiles(self, changes):
"""
Takes a changes dict and writes them out as migration files.
"""
directoryCreated = {}
for appLabel, appMigrations in changes.items():
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Migrations for '%s':" % appLabel) + "\n")
for migration in appMigrations:
# Describe the migration
writer = MigrationWriter(migration)
if self.verbosity >= 1:
self.stdout.write(" %s:\n" % (self.style.MIGRATE_LABEL(writer.filename),))
for operation in migration.operations:
self.stdout.write(" - %s\n" % operation.describe())
if not self.dryRun:
# Write the migrations file to the disk.
migrationsDirectory = os.path.dirname(writer.path)
if not directoryCreated.get(appLabel, False):
if not os.path.isdir(migrationsDirectory):
os.mkdir(migrationsDirectory)
initPath = os.path.join(migrationsDirectory, "__init__.py")
if not os.path.isfile(initPath):
open(initPath, "w").close()
# We just do this once per app
directoryCreated[appLabel] = True
migrationString = writer.asString()
with open(writer.path, "wb") as fh:
fh.write(migrationString)
elif self.verbosity == 3:
# Alternatively, makemigrations --dry-run --verbosity 3
# will output the migrations to stdout rather than saving
# the file to the disk.
self.stdout.write(self.style.MIGRATE_HEADING("Full migrations file '%s':" % writer.filename) + "\n")
self.stdout.write("%s\n" % writer.asString())
def handleMerge(self, loader, conflicts):
"""
Handles merging together conflicted migrations interactively,
if it's safe; otherwise, advises on how to fix it.
"""
if self.interactive:
questioner = InteractiveMigrationQuestioner()
else:
questioner = MigrationQuestioner(defaults={'askMerge': True})
for appLabel, migrationNames in conflicts.items():
# Grab out the migrations in question, and work out their
# common ancestor.
mergeMigrations = []
for migrationName in migrationNames:
migration = loader.getMigration(appLabel, migrationName)
migration.ancestry = loader.graph.forwardsPlan((appLabel, migrationName))
mergeMigrations.append(migration)
commonAncestor = None
for level in zip(*[m.ancestry for m in mergeMigrations]):
if reduce(operator.eq, level):
commonAncestor = level[0]
else:
break
if commonAncestor is None:
raise ValueError("Could not find common ancestor of %s" % migrationNames)
# Now work out the operations along each divergent branch
for migration in mergeMigrations:
migration.branch = migration.ancestry[
(migration.ancestry.index(commonAncestor) + 1):
]
migration.mergedOperations = []
for nodeApp, nodeName in migration.branch:
migration.mergedOperations.extend(
loader.getMigration(nodeApp, nodeName).operations
)
# In future, this could use some of the Optimizer code
# (canOptimizeThrough) to automatically see if they're
# mergeable. For now, we always just prompt the user.
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("Merging %s" % appLabel))
for migration in mergeMigrations:
self.stdout.write(self.style.MIGRATE_LABEL(" Branch %s" % migration.name))
for operation in migration.mergedOperations:
self.stdout.write(" - %s\n" % operation.describe())
if questioner.askMerge(appLabel):
# If they still want to merge it, then write out an empty
# file depending on the migrations needing merging.
numbers = [
MigrationAutodetector.parseNumber(migration.name)
for migration in mergeMigrations
]
try:
biggestNumber = max([x for x in numbers if x is not None])
except ValueError:
biggestNumber = 1
subclass = type("Migration", (Migration, ), {
"dependencies": [(appLabel, migration.name) for migration in mergeMigrations],
})
newMigration = subclass("%04iMerge" % (biggestNumber + 1), appLabel)
writer = MigrationWriter(newMigration)
with open(writer.path, "wb") as fh:
fh.write(writer.asString())
if self.verbosity > 0:
self.stdout.write("\nCreated new merge migration %s" % writer.path)
|
grapemix/theory
|
theory/apps/command/makeMigration.py
|
makeMigration.py
|
py
| 10,198
|
python
|
en
|
code
| 1
|
github-code
|
6
|
39680498179
|
""" General functions for data_tables and data_table_manager
We are using a class here just to make it easier to pass around
"""
import logging
import pprint
import subprocess
from pathlib import Path
import re
from typing import Union
import matplotlib.pyplot as mpl
import numpy as np
import pandas as pd
from pylibrary.plotting import plothelpers as PH
from pylibrary.tools import cprint
from pyqtgraph.Qt import QtGui
import ephys.datareaders as DR
from ephys.ephys_analysis import spike_analysis
from ephys.tools import utilities
import ephys
UTIL = utilities.Utility()
CP = cprint.cprint
class CustomFormatter(logging.Formatter):
grey = "\x1b[38;21m"
yellow = "\x1b[33;21m"
red = "\x1b[31;21m"
bold_red = "\x1b[31;1m"
white = "\x1b[37m"
reset = "\x1b[0m"
lineformat = "%(asctime)s - %(levelname)s - (%(filename)s:%(lineno)d) %(message)s "
FORMATS = {
logging.DEBUG: grey + lineformat + reset,
logging.INFO: white + lineformat + reset,
logging.WARNING: yellow + lineformat + reset,
logging.ERROR: red + lineformat + reset,
logging.CRITICAL: bold_red + lineformat + reset,
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
def get_git_hashes():
process = subprocess.Popen(["git", "rev-parse", "HEAD"], shell=False, stdout=subprocess.PIPE)
git_head_hash = process.communicate()[0].strip()
ephyspath = Path(ephys.__file__).parent
process = subprocess.Popen(
["git", "-C", str(ephyspath), "rev-parse", "HEAD"],
shell=False,
stdout=subprocess.PIPE,
)
ephys_git_hash = process.communicate()[0].strip()
return {"project": git_head_hash, "ephys": ephys_git_hash}
def create_logger(
log_name: str = "Log Name",
log_file: str = "log_file.log",
log_message: str = "Starting Logging",
):
logging.getLogger("fontTools.subset").disabled = True
Logger = logging.getLogger(log_name)
level = logging.DEBUG
Logger.setLevel(level)
# create file handler which logs even debug messages
logging_fh = logging.FileHandler(filename=log_file)
logging_fh.setLevel(level)
logging_sh = logging.StreamHandler()
logging_sh.setLevel(level)
log_formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s (%(filename)s:%(lineno)d) - %(message)s "
)
logging_fh.setFormatter(log_formatter)
logging_sh.setFormatter(CustomFormatter()) # log_formatter)
Logger.addHandler(logging_fh)
Logger.addHandler(logging_sh)
Logger.info(log_message)
return Logger
Logger = create_logger(
log_name="Spike Analysis",
log_file="spike_analysis.log",
log_message="Starting Process Spike Analysis",
)
PrettyPrinter = pprint.PrettyPrinter
datacols = [
"holding",
"RMP",
"RMP_SD",
"Rin",
"taum",
"dvdt_rising",
"dvdt_falling",
"current",
"AP_thr_V",
"AP_HW",
"AP15Rate",
"AdaptRatio",
"AHP_trough_V",
"AHP_depth_V",
"tauh",
"Gh",
"FiringRate",
]
iv_keys: list = [
"holding",
"WCComp",
"CCComp",
"BridgeAdjust",
"RMP",
"RMP_SD",
"RMPs",
"Irmp",
"taum",
"taupars",
"taufunc",
"Rin",
"Rin_peak",
"tauh_tau",
"tauh_bovera",
"tauh_Gh",
"tauh_vss",
]
spike_keys: list = [
"FI_Growth",
"AdaptRatio",
"FI_Curve",
"FiringRate",
"AP1_Latency",
"AP1_HalfWidth",
"AP1_HalfWidth_interpolated",
"AP2_Latency",
"AP2_HalfWidth",
"AP2_HalfWidth_interpolated",
"FiringRate_1p5T",
"AHP_Depth",
"AHP_Trough",
"spikes",
"iHold",
"pulseDuration",
"baseline_spikes",
"poststimulus_spikes",
]
# map spike measurements to top level keys
mapper: dict = {
"AP1_HalfWidth": "halfwidth",
"AP1_HalfWidth_interpolated": "halfwidth_interpolated",
"AHP_trough_V": "trough_V",
"AHP_Trough": "trough_T",
"AHP_depth_V": "trough_V",
"AP1_Latency": "AP_latency",
"AP_thr_V": "AP_begin_V",
"AP_HW": "halfwidth",
"dvdt_rising": "dvdt_rising",
"dvdt_falling": "dvdt_falling",
}
# map summary/not individual spike data to top level keys
mapper1: dict = {
"AP15Rate": "FiringRate_1p5T",
"AdaptRatio": "AdaptRatio",
}
iv_mapper: dict = {
"tauh": "tauh_tau",
"Gh": "tauh_Gh",
"taum": "taum",
"Rin": "Rin",
"RMP": "RMP",
}
def print_spike_keys(row):
if pd.isnull(row.IV):
return row
# print(row.IV)
return row
class Functions:
def __init__(self):
self.textbox = None
pass
def get_row_selection(self, table_manager):
"""
Find the selected rows in the currently managed table, and if there is a valid selection,
return the index to the first row and the data from that row
"""
self.selected_index_rows = table_manager.table.selectionModel().selectedRows()
if self.selected_index_rows is None:
return None, None
else:
index_row = self.selected_index_rows[0]
selected = table_manager.get_table_data(index_row) # table_data[index_row]
if selected is None:
return None, None
else:
return index_row, selected
def get_multiple_row_selection(self, table_manager):
"""
Find the selected rows in the currently managed table, and if there is a valid selection,
return a list of indexs from the selected rows.
"""
self.selected_index_rows = table_manager.table.selectionModel().selectedRows()
if self.selected_index_rows is None:
return None, None
else:
return self.selected_index_rows
def get_datasummary_protocols(self, datasummary):
"""
Print a configuration file-like text of all the datasummary protocols, as categorized here.
"""
data_complete = datasummary["data_complete"].values
print("# of datasummary entries: ", len(data_complete))
protocols = []
for i, prots in enumerate(data_complete):
prots = prots.split(",")
for prot in prots:
protocols.append(prot[:-4].strip(" ")) # remove trailing "_000" etc
allprots = sorted(list(set(protocols)))
print("# of unique protocols: ", len(allprots))
# print(allprots)
# make a little table for config dict:
txt = "protocols:\n"
txt += " CCIV:"
ncciv = 0
prots_used = []
for i, prot in enumerate(allprots):
if "CCIV".casefold() in prot.casefold():
computes = "['RmTau', 'IV', 'Spikes', 'FI']"
if "posonly".casefold() in prot.casefold(): # cannot compute rmtau for posonly
computes = "['IV', 'Spikes', 'FI']"
txt += f"\n {prot:s}: {computes:s}"
prots_used.append(i)
ncciv += 1
if ncciv == 0:
txt += " None"
txt += "\n VCIV:"
nvciv = 0
for i, prot in enumerate(allprots):
if "VCIV".casefold() in prot.casefold():
computes = "['VC']"
txt += f"\n {prot:s}: {computes:s}"
nvciv += 1
prots_used.append(i)
if nvciv == 0:
txt += " None"
txt += "\n Maps:"
nmaps = 0
for i, prot in enumerate(allprots):
if "Map".casefold() in prot.casefold():
computes = "['Maps']"
txt += f"\n {prot:s}: {computes:s}"
nmaps += 1
prots_used.append(i)
if nmaps == 0:
txt += " None"
txt += "\n Minis:"
nminis = 0
for i, prot in enumerate(allprots):
cprot = prot.casefold()
if "Mini".casefold() in cprot or "VC_Spont".casefold() in cprot:
computes = "['Mini']"
txt += f"\n {prot:s}: {computes:s}"
nminis += 1
prots_used.append(i)
if nminis == 0:
txt += " None"
txt += "\n PSCs:"
npsc = 0
for i, prot in enumerate(allprots):
if "PSC".casefold() in prot.casefold():
computes = "['PSC']"
txt += f"\n {prot:s}: {computes:s}"
npsc += 1
prots_used.append(i)
if npsc == 0:
txt += " None"
txt += "\n Uncategorized:"
allprots = [prot for i, prot in enumerate(allprots) if i not in prots_used]
nother = 0
for i, prot in enumerate(allprots):
if len(prot) == 0 or prot == " ":
prot = "No Name"
computes = "None"
txt += f"\n {prot:s}: {computes:s}"
nother += 1
if nother == 0:
txt += "\n None"
print(f"\n{txt:s}\n")
# this print should be pasted into the configuration file (watch indentation)
def moving_average(self, data, window_size):
"""moving_average Compute a triangular moving average on the data over a window
Parameters
----------
data : _type_
_description_
window_size : _type_
_description_
Returns
-------
_type_
_description_
"""
window = np.bartlett(window_size)
# Normalize the window
window /= window.sum()
return np.convolve(data, window, "valid") / window_size
def get_slope(self, y, x, index, window_size):
"""get_slope get slope of a smoothed curve at a given index
Parameters
----------
y : _type_
_description_
x : _type_
_description_
index : _type_
_description_
window_size : _type_
_description_
Returns
-------
_type_
_description_
"""
# Smooth the data
y_smooth = self.moving_average(y, window_size)
x_smooth = self.moving_average(x, window_size)
# Adjust the index for the reduced size of the smoothed data
index -= window_size // 2
if index < 1 or index >= len(y_smooth) - 1:
# Can't calculate slope at the start or end
return None
else:
dy = y_smooth[index + 1] - y_smooth[index - 1]
dx = x_smooth[index + 1] - x_smooth[index - 1]
return dy / dx
def draw_orthogonal_line(self, x, y, index, slope, length, color, ax):
# Calculate the slope of the orthogonal line
orthogonal_slope = -1.0 / slope
# Calculate the start and end points of the orthogonal line
x_start = x[index] - length / 2
x_end = x[index] + length / 2
y_start = y[index] + orthogonal_slope * (x_start - x[index])
y_end = y[index] + orthogonal_slope * (x_end - x[index])
# Plot the orthogonal line
ax.plot([x_start, x_end], [y_start, y_end], color=color)
def get_selected_cell_data_spikes(self, experiment, table_manager, assembleddata):
self.get_row_selection(table_manager)
if self.selected_index_rows is not None:
for nplots, index_row in enumerate(self.selected_index_rows):
selected = table_manager.get_table_data(index_row)
day = selected.date[:-4]
slicecell = selected.cell_id[-4:]
cell_df, cell_df_tmp = self.get_cell(experiment, assembleddata, cell=selected.cell_id)
protocols = list(cell_df["Spikes"].keys())
min_index = None
min_current = 1
V = None
min_protocol = None
spike = None
for ip, protocol in enumerate(protocols):
min_current_index, current, trace = self.find_lowest_current_trace(
cell_df["Spikes"][protocol]
)
if current < min_current:
I = current
V = trace
min_index = min_current_index
min_protocol = ip
min_current = current
spike = cell_df["Spikes"][protocol]
pp = PrettyPrinter(indent=4)
print("spike keys: ", spike["spikes"].keys())
print(
"min I : ",
I,
"min V: ",
V,
"min index: ",
min_index,
"min_current: ",
min_current,
)
pp.pprint(spike["spikes"][V][min_index])
low_spike = spike["spikes"][V][min_index]
if nplots == 0:
import matplotlib.pyplot as mpl
f, ax = mpl.subplots(1, 2, figsize=(10, 5))
vtime = (low_spike.Vtime - low_spike.peak_T) * 1e3
ax[0].plot(vtime, low_spike.V * 1e3)
ax[1].plot(low_spike.V * 1e3, low_spike.dvdt)
dvdt_ticks = np.arange(-4, 2.01, 0.1)
t_indices = np.array([np.abs(vtime - point).argmin() for point in dvdt_ticks])
thr_index = np.abs(vtime - (low_spike.AP_latency - low_spike.peak_T) * 1e3).argmin()
# Create a colormap
cmap = mpl.get_cmap("tab10")
# Create an array of colors based on the index of each point
colors = cmap(np.linspace(0, 1, len(t_indices)))
# for i in range(len(t_indices)):
# local_slope = self.get_slope(
# low_spike.V * 1e3, low_spike.dvdt, t_indices[i], 7,
# )
# if local_slope is not None:
# self.draw_orthogonal_line(
# low_spike.V * 1e3,
# low_spike.dvdt,
# index=t_indices[i],
# slope=local_slope,
# length=5.0,
# color=colors[i],
# ax=ax[1],
# )
# ax[1].scatter(
# low_spike.V[t_indices[i]] * 1e3,
# low_spike.dvdt[t_indices[i]],
# s=12,
# marker='|',
# color=colors[i],
# zorder = 10
# )
# Plot each point with a different color
# ax[1].scatter(
# low_spike.V[t_indices] * 1e3,
# low_spike.dvdt[t_indices],
# s=12,
# marker='|',
# color=colors,
# zorder = 10
# )
ax[1].scatter(
low_spike.V[thr_index] * 1e3,
low_spike.dvdt[thr_index],
s=12,
marker="o",
color="r",
zorder=12,
)
latency = (low_spike.AP_latency - low_spike.peak_T) * 1e3 # in msec
ax[0].plot(
latency,
low_spike.AP_begin_V * 1e3,
"ro",
markersize=2.5,
zorder=10,
)
ax[0].plot(
[
(low_spike.left_halfwidth_T - low_spike.peak_T - 0.0001) * 1e3,
(low_spike.right_halfwidth_T - low_spike.peak_T + 0.0001) * 1e3,
],
[ # in msec
low_spike.halfwidth_V * 1e3,
low_spike.halfwidth_V * 1e3,
],
"g-",
zorder=10,
)
# ax[0].plot(
# (low_spike.right_halfwidth_T - low_spike.peak_T)
# * 1e3, # in msec
# low_spike.halfwidth_V * 1e3,
# "co",
# )
if nplots == 0: # annotate
ax[0].set_xlabel("Time (msec), re Peak")
ax[0].set_ylabel("V (mV)")
ax[1].set_xlabel("V (mV)")
ax[1].set_ylabel("dV/dt (mV/ms)")
PH.nice_plot(ax[0])
PH.nice_plot(ax[1])
PH.talbotTicks(ax[0])
PH.talbotTicks(ax[1])
nplots += 1
if nplots > 0:
mpl.show()
return cell_df
else:
return None
def get_selected_cell_data_FI(self, experiment, table_manager, assembleddata):
self.get_row_selection(table_manager)
pp = PrettyPrinter(indent=4, width=120)
if self.selected_index_rows is not None:
for nplots, index_row in enumerate(self.selected_index_rows):
selected = table_manager.get_table_data(index_row)
day = selected.date[:-4]
slicecell = selected.cell_id[-4:]
# cell_df, _ = self.get_cell(
# experiment, assembleddata, cell=selected.cell_id
# )
fig, ax = mpl.subplots(1, 1)
self.compute_FI_Fits(
experiment, assembleddata, selected.cell_id, plot_fits=True, ax=ax
)
if nplots > 0:
mpl.show()
return self.selected_index_rows
else:
return None
def average_FI(self, FI_Data_I_, FI_Data_FR_, max_current: float = 1.0e-9):
if len(FI_Data_I_) > 0:
try:
FI_Data_I, FI_Data_FR = zip(*sorted(zip(FI_Data_I_, FI_Data_FR_)))
except:
raise ValueError("couldn't zip the data sets: ")
if len(FI_Data_I) > 0: # has data...
print("averaging FI data")
FI_Data_I_ = np.array(FI_Data_I)
FI_Data_FR_ = np.array(FI_Data_FR)
f1_index = np.where((FI_Data_I_ >= 0.0) & (FI_Data_I_ <= max_current))[
0
] # limit to 1 nA, regardless
FI_Data_I, FI_Data_FR, FI_Data_FR_Std, FI_Data_N = self.avg_group(
FI_Data_I_[f1_index], FI_Data_FR_[f1_index], ndim=FI_Data_I_.shape
)
return FI_Data_I, FI_Data_FR, FI_Data_FR_Std, FI_Data_N
def avg_group(self, x, y, ndim=2):
if ndim == 2:
x = np.array([a for b in x for a in b])
y = np.array([a for b in y for a in b])
else:
x = np.array(x)
y = np.array(y)
# x = np.ravel(x) # np.array(x)
# y = np.array(y)
xa, ind, counts = np.unique(
x, return_index=True, return_counts=True
) # find unique values in x
ya = y[ind]
ystd = np.zeros_like(ya)
yn = np.ones_like(ya)
for dupe in xa[counts > 1]: # for each duplicate value, replace with mean
# print("dupe: ", dupe)
# print(np.where(x==dupe), np.where(xa==dupe))
ya[np.where(xa == dupe)] = np.nanmean(y[np.where(x == dupe)])
ystd[np.where(xa == dupe)] = np.nanstd(y[np.where(x == dupe)])
yn[np.where(xa == dupe)] = np.count_nonzero(~np.isnan(y[np.where(x == dupe)]))
return xa, ya, ystd, yn
# get maximum slope from fit.
def hill_deriv(self, x: float, y0: float, ymax: float, m: float, n: float):
"""hill_deriv
analyztical solution computed from SageMath
Parameters
----------
x : float
current
y0 : float
baseline
ymax : float
maximum y value
m : float
growth rate
n : float
growth power
"""
hd = m * n * ymax
hd *= np.power(m / x, n - 1)
hd /= (x * x) * np.power((np.power(m / x, n) + 1.0), 2.0)
return hd
def fit_FI_Hill(
self,
FI_Data_I,
FI_Data_FR,
FI_Data_FR_Std,
FI_Data_I_,
FI_Data_FR_,
FI_Data_N,
hill_max_derivs,
hill_i_max_derivs,
FI_fits,
linfits,
cell: str,
celltype: str,
plot_fits=False,
ax: Union[mpl.Axes, None] = None,
):
plot_raw = False # only to plot the unaveraged points.
spanalyzer = spike_analysis.SpikeAnalysis()
spanalyzer.fitOne(
i_inj=FI_Data_I,
spike_count=FI_Data_FR,
pulse_duration=None, # protodurs[ivname],
info="",
function="Hill",
fixNonMonotonic=True,
excludeNonMonotonic=False,
max_current=None,
)
try:
fitpars = spanalyzer.analysis_summary["FI_Growth"][0]["parameters"][0]
except:
CP(
"r",
f"fitpars has no solution? : {cell!s}, {celltype:s}, {spanalyzer.analysis_summary['FI_Growth']!s}",
)
return (
hill_max_derivs,
hill_i_max_derivs,
FI_fits,
linfits,
) # no fit, return without appending a new fit
# raise ValueError("couldn't get fitpars: no solution?")
y0 = fitpars[0]
ymax = fitpars[1]
m = fitpars[2]
n = fitpars[3]
xyfit = spanalyzer.analysis_summary["FI_Growth"][0]["fit"]
i_range = np.linspace(1e-12, np.max(xyfit[0]), 1000)
# print(f"fitpars: y0={y0:.3f}, ymax={ymax:.3f}, m={m*1e9:.3f}, n={n:.3f}")
deriv_hill = [self.hill_deriv(x=x, y0=y0, ymax=ymax, m=m, n=n) for x in i_range]
deriv_hill = np.array(deriv_hill) * 1e-9 # convert to sp/nA
max_deriv = np.max(deriv_hill)
arg_max_deriv = np.argmax(deriv_hill)
i_max_deriv = i_range[arg_max_deriv] * (1e12)
hill_max_derivs.append(max_deriv)
hill_i_max_derivs.append(i_max_deriv)
# print(f"max deriv: {max_deriv:.3f} sp/nA at {i_max_deriv:.1f} pA")
# print(xyfit[1])
if len(spanalyzer.analysis_summary["FI_Growth"]) > 0:
FI_fits["fits"].append(spanalyzer.analysis_summary["FI_Growth"][0]["fit"])
FI_fits["pars"].append(spanalyzer.analysis_summary["FI_Growth"][0]["parameters"])
linfit = spanalyzer.getFISlope(
i_inj=FI_Data_I,
spike_count=FI_Data_FR,
pulse_duration=None, # FR is already duration
min_current=0e-12,
max_current=300e-12,
)
linfits.append(linfit)
linx = np.arange(0, 300e-12, 10e-12)
liny = linfit.slope * linx + linfit.intercept
if plot_fits:
if ax is None:
fig, ax = mpl.subplots(1, 1)
fig.suptitle(f"{celltype:s} {cell:s}")
line_FI = ax.errorbar(
np.array(FI_Data_I) * 1e9,
FI_Data_FR,
yerr=FI_Data_FR_Std,
marker="o",
color="k",
linestyle=None,
)
# ax[1].plot(FI_Data_I * 1e12, FI_Data_N, marker="s")
if plot_raw:
for i, d in enumerate(FI_Data_I_): # plot the raw points before combining
ax.plot(np.array(FI_Data_I_[i]) * 1e9, FI_Data_FR_[i], "x", color="k")
# print("fit x * 1e9: ", spanalyzer.analysis_summary['FI_Growth'][0]['fit'][0]*1e9)
# print("fit y * 1: ", spanalyzer.analysis_summary['FI_Growth'][0]['fit'][1])
# ax[0].plot(linx * 1e12, liny, color="c", linestyle="dashdot")
celln = Path(cell).name
if len(spanalyzer.analysis_summary["FI_Growth"]) >= 0:
line_fit = ax.plot(
spanalyzer.analysis_summary["FI_Growth"][0]["fit"][0][0] * 1e9,
spanalyzer.analysis_summary["FI_Growth"][0]["fit"][1][0],
color="r",
linestyle="-",
zorder=100,
)
# derivative (in blue)
line_deriv = ax.plot(
i_range * 1e9, deriv_hill, color="b", linestyle="--", zorder=100
)
d_max = np.argmax(deriv_hill)
ax2 = ax.twinx()
ax2.set_ylim(0, 500)
ax2.set_ylabel("Firing Rate Slope (sp/s/nA)")
line_drop = ax2.plot(
[i_range[d_max] * 1e9, i_range[d_max] * 1e9],
[0, 1.1 * deriv_hill[d_max]],
color="b",
zorder=100,
)
ax.set_xlabel("Current (nA)")
ax.set_ylabel("Firing Rate (sp/s)")
# turn off top box
for loc, spine in ax.spines.items():
if loc in ["left", "bottom"]:
spine.set_visible(True)
elif loc in ["right", "top"]:
spine.set_visible(False)
for loc, spine in ax2.spines.items():
if loc in ["right", "bottom"]:
spine.set_visible(True)
elif loc in ["left", "top"]:
spine.set_visible(False)
# spine.set_color('none')
# do not draw the spine
# spine.set_color('none')
# do not draw the spine
PH.talbotTicks(ax, density=[2.0, 2.0])
PH.talbotTicks(ax2, density=[2.0, 2.0])
ax.legend(
[line_FI, line_fit[0], line_deriv[0], line_drop[0]],
["Firing Rate", "Hill Fit", "Derivative", "Max Derivative"],
loc="best",
frameon=False,
)
mpl.show()
return hill_max_derivs, hill_i_max_derivs, FI_fits, linfits
def check_excluded_dataset(self, day_slice_cell, experiment, protocol):
exclude_flag = day_slice_cell in experiment["excludeIVs"]
print(" IV is in exclusion table: ", exclude_flag)
if exclude_flag:
exclude_table = experiment["excludeIVs"][day_slice_cell]
print(" excluded table data: ", exclude_table)
print(" testing protocol: ", protocol)
proto = Path(protocol).name # passed protocol has day/slice/cell/protocol
if proto in exclude_table["protocols"] or exclude_table["protocols"] == ["all"]:
CP(
"y",
f"Excluded cell/protocol: {day_slice_cell:s}, {proto:s} because: {exclude_table['reason']:s}",
)
Logger.info(
f"Excluded cell: {day_slice_cell:s}, {proto:s} because: {exclude_table['reason']:s}"
)
return True
print(" Protocol passed: ", protocol)
return False
def compute_FI_Fits(
self,
experiment,
df: pd.DataFrame,
cell: str,
protodurs: list = [1.0],
plot_fits: bool = False,
ax: Union[mpl.Axes, None] = None,
):
CP("g", f"\n{'='*80:s}\nCell: {cell!s}, {df[df.cell_id==cell].cell_type.values[0]:s}")
df_cell, df_tmp = self.get_cell(experiment, df, cell)
if df_cell is None:
return None
print(" df_tmp group>>: ", df_tmp.Group.values)
print(" df_cell group>>: ", df_cell.keys())
protocols = list(df_cell.Spikes.keys())
spike_keys = list(df_cell.Spikes[protocols[0]].keys())
iv_keys = list(df_cell.IV[protocols[0]].keys())
srs = {}
dur = {}
important = {}
# for each CCIV type of protocol that was run:
for nprot, protocol in enumerate(protocols):
if protocol.endswith("0000"): # bad protocol name
continue
day_slice_cell = str(Path(df_cell.date, df_cell.slice_slice, df_cell.cell_cell))
CP("m", f"day_slice_cell: {day_slice_cell:s}, protocol: {protocol:s}")
if self.check_excluded_dataset(day_slice_cell, experiment, protocol):
continue
fullpath = Path(experiment["rawdatapath"], experiment["directory"], protocol)
with DR.acq4_reader.acq4_reader(fullpath, "MultiClamp1.ma") as AR:
try:
AR.getData(fullpath)
sample_rate = AR.sample_rate[0]
duration = AR.tend - AR.tstart
srs[protocol] = sample_rate
dur[protocol] = duration
important[protocol] = AR.checkProtocolImportant(fullpath)
CP("g", f" Protocol {protocol:s} has sample rate of {sample_rate:e}")
except ValueError:
CP("r", f"Acq4Read failed to read data file: {str(fullpath):s}")
raise ValueError(f"Acq4Read failed to read data file: {str(fullpath):s}")
protocols = list(srs.keys()) # only count valid protocols
CP("c", f"Valid Protocols: {protocols!s}")
if len(protocols) > 1:
protname = "combined"
elif len(protocols) == 1:
protname = protocols[0]
else:
return None
# parse group correctly.
# the first point in the Group column is likely a nan.
# if it is, then use the next point.
print("Group: ", df_tmp.Group, "protoname: ", protname)
group = df_tmp.Group.values[0]
datadict = {
"ID": str(df_tmp.cell_id.values[0]),
"Subject": str(df_tmp.cell_id.values[0]),
"cell_id": cell,
"Group": group,
"Date": str(df_tmp.Date.values[0]),
"age": str(df_tmp.age.values[0]),
"weight": str(df_tmp.weight.values[0]),
"sex": str(df_tmp.sex.values[0]),
"cell_type": df_tmp.cell_type.values[0],
"protocol": protname,
"important": important,
"protocols": list(df_cell.IV),
"sample_rate": srs,
"duration": dur,
}
# get the measures for the fixed values from the measure list
for measure in datacols:
datadict = self.get_measure(df_cell, measure, datadict, protocols, threshold_slope=experiment["AP_threshold_dvdt"])
# now combine the FI data across protocols for this cell
FI_Data_I1_:list_ = []
FI_Data_FR1_:list_ = [] # firing rate
FI_Data_I4_:list_ = []
FI_Data_FR4_:list_ = [] # firing rate
FI_fits:dict = {"fits": [], "pars": [], "names": []}
linfits:list = []
hill_max_derivs:list = []
hill_i_max_derivs:list = []
protofails = 0
for protocol in protocols:
if protocol.endswith("0000"): # bad protocol name
continue
# check if duration is acceptable:
if protodurs is not None:
durflag = False
for d in protodurs:
if not np.isclose(dur[protocol], d):
durflag = True
if durflag:
CP("y", f" >>>> Protocol {protocol:s} has duration of {dur[protocol]:e}")
CP("y", f" This is not in accepted limits of: {protodurs!s}")
continue
else:
CP("g", f" >>>> Protocol {protocol:s} has acceptable duration of {dur[protocol]:e}")
# print("protocol: ", protocol, "spikes: ", df_cell.Spikes[protocol]['spikes'])
if len(df_cell.Spikes[protocol]["spikes"]) == 0:
CP("y", f" >>>> Skipping protocol with no spikes: {protocol:s}")
continue
else:
CP("g", f" >>>> Analyzing FI for protocol: {protocol:s}")
try:
fidata = df_cell.Spikes[protocol]["FI_Curve"]
except KeyError:
print("FI curve not found for protocol: ", protocol, "for cell: ", cell)
# print(df_cell.Spikes[protocol])
protofails += 1
if protofails > 4:
raise ValueError(
"FI curve data not found for protocol: ",
protocol,
"for cell: ",
cell,
)
else:
continue
if np.max(fidata[0]) > 1.01e-9: # accumulate high-current protocols
FI_Data_I4_.extend(fidata[0])
FI_Data_FR4_.extend(fidata[1] / dur[protocol])
else: # accumulate other protocols <= 1 nA
FI_Data_I1_.extend(fidata[0])
FI_Data_FR1_.extend(fidata[1] / dur[protocol])
FI_Data_I1 = []
FI_Data_FR1 = []
FI_Data_I4 = []
FI_Data_FR4 = []
if len(FI_Data_I1_) > 0:
FI_Data_I1, FI_Data_FR1, FI_Data_FR1_Std, FI_Data_N1 = self.average_FI(
FI_Data_I1_, FI_Data_FR1_, 1e-9
)
if len(FI_Data_I4_) > 0:
FI_Data_I4, FI_Data_FR4, FI_Data_FR4_Std, FI_Data_N1 = self.average_FI(
FI_Data_I4_, FI_Data_FR4_, 4e-9
)
if len(FI_Data_I1) > 0:
# do a curve fit on the first 1 nA of the protocol
hill_max_derivs, hill_i_max_derivs, FI_fits, linfits = self.fit_FI_Hill(
FI_Data_I=FI_Data_I1,
FI_Data_FR=FI_Data_FR1,
FI_Data_I_=FI_Data_I1_,
FI_Data_FR_=FI_Data_FR1_,
FI_Data_FR_Std=FI_Data_FR1_Std,
FI_Data_N=FI_Data_N1,
hill_max_derivs=hill_max_derivs,
hill_i_max_derivs=hill_i_max_derivs,
FI_fits=FI_fits,
linfits=linfits,
cell=cell,
celltype=df_tmp.cell_type.values[0],
plot_fits=plot_fits,
ax=ax,
)
# save the results
datadict["FI_Curve"] = [FI_Data_I1, FI_Data_FR1]
datadict["FI_Curve4"] = [FI_Data_I4, FI_Data_FR4]
datadict["current"] = FI_Data_I1
datadict["spsec"] = FI_Data_FR1
# datadict["Subject"] = df_tmp.cell_id.values[0]
# datadict["Group"] = df_tmp.Group.values[0]
# datadict["sex"] = df_tmp.sex.values[0]
# datadict["celltype"] = df_tmp.cell_type.values[0]
datadict["pars"] = [FI_fits["pars"]]
datadict["names"] = []
datadict["fit"] = [FI_fits["fits"]]
datadict["F1amp"] = np.nan
datadict["F2amp"] = np.nan
datadict["Irate"] = np.nan
datadict["maxHillSlope"] = np.nan
datadict["maxHillSlope_SD"] = np.nan
datadict["I_maxHillSlope"] = np.nan
datadict["I_maxHillSlope_SD"] = np.nan
if len(linfits) > 0:
datadict["FISlope"] = np.mean([s.slope for s in linfits])
else:
datadict["FISlope"] = np.nan
if len(hill_max_derivs) > 0:
datadict["maxHillSlope"] = np.mean(hill_max_derivs)
datadict["maxHillSlope_SD"] = np.std(hill_max_derivs)
datadict["I_maxHillSlope"] = np.mean(hill_i_max_derivs)
datadict["I_maxHillSlope_SD"] = np.std(hill_i_max_derivs)
if len(FI_Data_I1) > 0:
i_one = np.where(FI_Data_I1 <= 1.01e-9)[0]
datadict["FIMax_1"] = np.nanmax(FI_Data_FR1[i_one])
if len(FI_Data_I4) > 0:
i_four = np.where(FI_Data_I4 <= 4.01e-9)[0]
datadict["FIMax_4"] = np.nanmax(FI_Data_FR4[i_four])
return datadict
def get_cell(self, experiment, df: pd.DataFrame, cell: str):
df_tmp = df[df.cell_id == cell] # df.copy() # .dropna(subset=["Date"])
print("\nGet_cell:: df_tmp head: \n", "Groups: ", df_tmp["Group"].unique(), "\n len df_tmp: ", len(df_tmp))
if len(df_tmp) == 0:
return None, None
try:
celltype = df_tmp.cell_type.values[0]
except ValueError:
celltype = df_tmp.cell_type
celltype = str(celltype).replace("\n", "")
if celltype == " ": # no cell type
celltype = "unknown"
CP("m", f"get cell: df_tmp cell type: {celltype:s}")
# look for original PKL file for cell in the dataset
# if it exists, use it to get the FI curve
# base_cellname = str(Path(cell)).split("_")
# print("base_cellname: ", base_cellname)
# sn = int(base_cellname[-1][1])
# cn = int(base_cellname[-1][3])
# different way from cell_id:
# The cell name may be a path, or just the cell name.
# we have to handle both cases.
parent = Path(cell).parent
if parent == ".": # just cell, not path
cell_parts = str(cell).split("_")
re_parse = re.compile("([Ss]{1})(\d{1,3})([Cc]{1})(\d{1,3})")
cnp = re_parse.match(cell_parts[-1]).group(2)
cn = int(cnp)
snp = re_parse.match(cell_parts[-1]).group(4)
sn = int(snp)
cell_day_name = cell_parts[-3].split("_")[0]
dir_path = None
else:
cell = Path(cell).name # just get the name here
cell_parts = cell.split("_")
re_parse = re.compile("([Ss]{1})(\d{1,3})([Cc]{1})(\d{1,3})")
# print("cell_parts: ", cell_parts[-1])
snp = re_parse.match(cell_parts[-1]).group(2)
sn = int(snp)
cnp = re_parse.match(cell_parts[-1]).group(4)
cn = int(cnp)
cell_day_name = cell_parts[0]
dir_path = parent
# print("Cell name, slice, cell: ", cell_parts, sn, cn)
# if cell_parts != ['2019.02.22', '000', 'S0C0']:
# return None, None
cname2 = f"{cell_day_name.replace('.', '_'):s}_S{snp:s}C{cnp:s}_{celltype:s}_IVs.pkl"
datapath2 = Path(experiment["analyzeddatapath"], experiment["directory"], celltype, cname2)
# cname2 = f"{cell_day_name.replace('.', '_'):s}_S{sn:02d}C{cn:02d}_{celltype:s}_IVs.pkl"
# datapath2 = Path(experiment["analyzeddatapath"], experiment["directory"], celltype, cname2)
# cname1 = f"{cell_day_name.replace('.', '_'):s}_S{sn:01d}C{cn:01d}_{celltype:s}_IVs.pkl"
# datapath1 = Path(experiment["analyzeddatapath"], experiment["directory"], celltype, cname1)
# print(datapath)
# if datapath1.is_file():
# CP("c", f"... {datapath1!s} is OK")
# datapath = datapath1
if datapath2.is_file():
CP("c", f"... {datapath2!s} is OK")
datapath = datapath2
else:
CP("r", f"no file: matching: {datapath2!s}, \n") # or: {datapath2!s}\n")
print("cell type: ", celltype)
raise ValueError
return None, None
try:
df_cell = pd.read_pickle(datapath, compression="gzip")
except ValueError:
try:
df_cell = pd.read_pickle(datapath) # try with no compression
except ValueError:
CP("r", f"Could not read {datapath!s}")
raise ValueError("Failed to read compressed pickle file")
if "Spikes" not in df_cell.keys() or df_cell.Spikes is None:
CP(
"r",
f"df_cell: {df_cell.age!s}, {df_cell.cell_type!s}, No spike protos:",
)
return None, None
# print(
# "df_cell: ",
# df_cell.age,
# df_cell.cell_type,
# "N spike protos: ",
# len(df_cell.Spikes),
# "\n",
# df_tmp['Group'],
# )
return df_cell, df_tmp
def get_lowest_current_spike(self, row, SP):
measured_first_spike = False
dvdts = []
for tr in SP.spikeShapes: # for each trace
if len(SP.spikeShapes[tr]) > 1: # if there is a spike
spk = SP.spikeShapes[tr][0] # get the first spike in the trace
dvdts.append(spk) # accumulate first spike info
if len(dvdts) > 0:
currents = []
for d in dvdts: # for each first spike, make a list of the currents
currents.append(d.current)
min_current = np.argmin(currents) # find spike elicited by the minimum current
row.dvdt_rising = dvdts[min_current].dvdt_rising
row.dvdt_falling = dvdts[min_current].dvdt_falling
row.dvdt_current = currents[min_current] * 1e12 # put in pA
row.AP_thr_V = 1e3 * dvdts[min_current].AP_begin_V
if dvdts[min_current].halfwidth_interpolated is not None:
row.AP_HW = dvdts[min_current].halfwidth_interpolated * 1e3
row.AP_begin_V = 1e3 * dvdts[min_current].AP_begin_V
CP(
"y",
f"I={currents[min_current]*1e12:6.1f} pA, dvdtRise={row.dvdt_rising:6.1f}, dvdtFall={row.dvdt_falling:6.1f}, APthr={row.AP_thr_V:6.1f} mV, HW={row.AP_HW*1e3:6.1f} usec",
)
return row
def find_lowest_current_trace(self, spikes):
current = []
trace = []
for sweep in spikes["spikes"]:
for spike in spikes["spikes"][sweep]:
this_spike = spikes["spikes"][sweep][spike]
current.append(this_spike.current)
trace.append(this_spike.trace)
break # only get the first one
# now find the index of the lowest current
if len(current) == 0:
return np.nan, np.nan, np.nan
min_current_index = np.argmin(current)
# print("current: ", current, "traces: ", trace)
# print(current[min_current_index], trace[min_current_index])
return min_current_index, current[min_current_index], trace[min_current_index]
def convert_FI_array(self, FI_values):
"""convert_FI_array Take a potential string representing the FI_data,
and convert it to a numpy array
Parameters
----------
FI_values : str or numpy array
data to be converted
Returns
-------
numpy array
converted data from FI_values
"""
if isinstance(FI_values, str):
fistring = FI_values.replace("[", "").replace("]", "").replace("\n", "")
fistring = fistring.split(" ")
FI_data = np.array([float(s) for s in fistring if len(s) > 0])
FI_data = FI_data.reshape(2, int(FI_data.shape[0] / 2))
else:
FI_data = FI_values
FI_data = np.array(FI_data)
return FI_data
def get_measure(self, df_cell, measure, datadict, protocols, threshold_slope:float=20.0):
"""get_measure : for the giveen cell, get the measure from the protocols
Parameters
----------
df_cell : _type_
_description_
measure : _type_
_description_
datadict : _type_
_description_
protocols : _type_
_description_
Returns
-------
_type_
_description_
"""
m = []
if measure in iv_keys:
for protocol in protocols:
if measure in df_cell.IV[protocol].keys():
m.append(df_cell.IV[protocol][measure])
elif measure in iv_mapper.keys() and iv_mapper[measure] in iv_keys:
for protocol in protocols:
if iv_mapper[measure] in df_cell.IV[protocol].keys():
m.append(df_cell.IV[protocol][iv_mapper[measure]])
elif measure in spike_keys:
maxadapt = 0
for protocol in protocols:
# print("p: ", p)
if measure == "AdaptRatio":
if df_cell.Spikes[protocol][mapper1[measure]] > 8.0:
continue
# print("\nprot, measure: ", protocol, measure, df_cell.Spikes[protocol][mapper1[measure]])
# print(df_cell.Spikes[protocol].keys())
# maxadapt = np.max([maxadapt, df_cell.Spikes[protocol][mapper1['AdaptRatio']]])
if measure in df_cell.Spikes[protocol].keys():
m.append(df_cell.Spikes[protocol][measure])
# if maxadapt > 8:
# exit()
elif measure in mapper1.keys() and mapper1[measure] in spike_keys:
for protocol in protocols:
if mapper1[measure] in df_cell.Spikes[protocol].keys():
m.append(df_cell.Spikes[protocol][mapper1[measure]])
elif measure == "current":
for protocol in protocols: # for all protocols with spike analysis data for this cell
if "spikes" not in df_cell.Spikes[protocol].keys():
continue
# we need to get the first spike evoked by the lowest current level ...
min_current_index, current, trace = self.find_lowest_current_trace(
df_cell.Spikes[protocol]
)
if not np.isnan(min_current_index):
m.append(current)
else:
m.append(np.nan)
else:
for protocol in protocols: # for all protocols with spike analysis data for this cell
# we need to get the first spike evoked by the lowest current level ...
prot_spike_count = 0
if "spikes" not in df_cell.Spikes[protocol].keys():
continue
spike_data = df_cell.Spikes[protocol]["spikes"]
if measure in [
"dvdt_rising",
"dvdt_falling",
"AP_HW",
"AHP_trough_V",
"AHP_depth_V",
]: # use lowest current spike
min_current_index, current, trace = self.find_lowest_current_trace(
df_cell.Spikes[protocol]
)
if not np.isnan(min_current_index):
spike_data = df_cell.Spikes[protocol]["spikes"][trace][0].__dict__
# print("spike data ", spike_data['dvdt_rising'])
m.append(spike_data[mapper[measure]])
else:
m.append(np.nan)
# print("spike data: ", spike_data.keys())
elif measure == "AP_thr_V": # have to try two variants. Note that threshold slope is defined in config file
min_current_index, current, trace = self.find_lowest_current_trace(
df_cell.Spikes[protocol]
)
if not np.isnan(min_current_index):
spike_data = df_cell.Spikes[protocol]["spikes"][trace][0].__dict__
# CP("c", "Check AP_thr_V")
Vthr, Vthr_time = UTIL.find_threshold(
spike_data["V"],
np.mean(np.diff(spike_data["Vtime"])),
threshold_slope=threshold_slope,
)
m.append(Vthr)
else:
m.append(np.nan)
elif (
measure in mapper.keys() and mapper[measure] in spike_data.keys()
): # if the measure exists for this sweep
m.append(spike_data[mapper[measure]])
else:
# print(measure in mapper.keys())
# print(spike_data.keys())
CP(
"r",
f"measure not found in spike_data, either: <{measure:s}>, {mapper.keys()!s}",
)
CP(
"r",
f"\n or mapped in {mapper[measure]!s} to {spike_data.keys()!s}",
)
raise ValueError()
exit()
prot_spike_count += 1
# CP("c", f"measure: {measure!s} : {m!s}")
# else:
# print(
# f"measure {measure:s} not found in either IV or Spikes keys. Skipping"
# )
# raise ValueError(f"measure {measure:s} not found in either IV or Spikes keys. Skipping")
for i, xm in enumerate(m):
if xm is None:
m[i] = np.nan
# m = [u for u in m if u is not None else np.nan] # sanitize data
N = np.count_nonzero(~np.isnan(m))
# print("N: ", N)
if N > 0:
datadict[measure] = np.nanmean(m)
else:
datadict[measure] = np.nan
return datadict
def textbox_setup(self, textbox):
self.textbox = textbox
def textclear(self):
if self.textbox is None:
raise ValueError("datatables - functions - textbox has not been set up")
if self is None: # or self.in_Parallel:
return
else:
self.textbox.clear()
def text_get(self):
if self.textbox is None:
raise ValueError("datatables - functions - textbox has not been set up")
return self.textbox.toPlainText()
def textappend(self, text, color="white"):
if self.textbox is None:
raise ValueError("datatables - functions - textbox has not been set up")
colormap = {
"[31m": "red",
"[48:5:208:0m": "orange",
"[33m": "yellow",
"[32m": "limegreen",
"[34m": "pink",
"[35m": "magenta",
"[36m": "cyan",
"[30m": "black",
"[37m": "white",
"[0m": "white",
"[100m": "backgray",
}
if self is None:
CP(color, text) # just go straight to the terminal
else:
text = "".join(text)
text = text.split("\n")
for textl in text:
# print(f"text: <{textl:s}>")
if len(textl) > 0 and textl[0] == "\x1b":
textl = textl[1:] # clip the escape sequence
for k in colormap.keys():
if textl.startswith(k): # skip the escape sequence
textl = textl[len(k) :]
textl = textl.replace("[0m", "")
color = colormap[k]
self.textbox.setTextColor(QtGui.QColor(color))
break
textl = textl.replace("[0m", "")
self.textbox.append(textl)
self.textbox.setTextColor(QtGui.QColor("white"))
|
marsiwiec/ephys
|
ephys/gui/data_table_functions.py
|
data_table_functions.py
|
py
| 51,215
|
python
|
en
|
code
| null |
github-code
|
6
|
27001984731
|
import random
def structDataSampling(**kwargs):
"""
:param num:
:param struct:
:return:
"""
result = []
for index in range(0, kwargs['num']):
element = []
for key, value in kwargs['struct'].items():
if value['datatype'] == "int":
it = iter(value['datarange'])
tmp = random.randint(next(it), next(it))
elif value['datatype'] == "float":
it = iter(value['datarange'])
tmp = random.uniform(next(it), next(it))
elif value['datatype'] == "str":
tmp = ''.join(random.SystemRandom().choice(value['datarange']) for _ in range(value['len']))
else:
break
element.append(tmp)
result.append(element)
return result
def structDataSamplinga(**kwargs):
"""
:param num:
:param struct:
:return:
"""
result = list()
for index in range(0, kwargs['num']):
element = list()
for key, value in kwargs['struct'].items():
if key == "int":
it = iter(value['datarange'])
tmp = random.randint(next(it), next(it))
elif key == "float":
it = iter(value['datarange'])
tmp = random.uniform(next(it), next(it))
elif key == "str":
tmp = ''.join(random.SystemRandom().choice(value['datarange']) for _ in range(value['len']))
else:
break;
element.append(tmp)
result.append(element)
return result
def out_1():
para = {"num": 4, "struct": {"int": {"datarange": (0, 100)},
"float": {"datarange": (0, 1000)}}}
t = structDataSamplinga(**para)
print(t)
parb = {"num": 4, "struct": {"int": {"datarange": (0, 90)}, "str": {"datarange": "ABCD", "len": 2},
"float": {"datarange": (0, 100)}}}
m = structDataSamplinga(**parb)
print(m)
parc = {"num": 3, "struct": {
"field1": {"datatype": 'int', "datarange": [100, 140]},
"field2": {"datatype": 'float', "datarange": [100, 150]},
"field3": {"datatype": 'str', "datarange": "ABCDEFG", "len": 3}
}}
n = structDataSampling(**parc)
print(n)
if __name__ == '__main__':
out_1()
|
wanghan79/2023_Python
|
2021013675 刘蓝营/work1/work1.py
|
work1.py
|
py
| 2,428
|
python
|
en
|
code
| 8
|
github-code
|
6
|
1422010768
|
#Dilation and Erosion
import cv2
import matplotlib.pyplot as plt
import numpy as np
#-----------------------------------Dilation------------------------------
# Reads in a binary image
img = cv2.imread('j.png',0)
# Create a 5x5 kernel of ones
Kernel = np.ones((5,5), np.uint8)
'''
To dilate an image in OpenCV, you can use the dilate function and three inputs:
an original binary image, a kernel that determines the size of the dilation (None will
result in a default size), and a number of iterations to perform the dilation (typically = 1).
In the below example, we have a 5x5 kernel of ones, which move over an image, like a filter,
and turn a pixel white if any of its surrounding pixels are white in a 5x5 window! We’ll
use a simple image of the cursive letter “j” as an example.
'''
dilation = cv2.dilate(img, Kernel, iterations = 1)
plt.imshow(dilation, cmap = 'gray')
#-----------------------------------Erosion--------------------------------
erosion = cv2.erode(img, Kernel, iterations = 1)
plt.imshow(erosion, cmap = 'gray')
#----------------------------------Opening------------------------------
'''
As mentioned, above, these operations are often combined for desired results! One such combination
is called opening, which is erosion followed by dilation. This is useful in noise reduction
in which erosion first gets rid of noise (and shrinks the object) then dilation enlarges the
object again, but the noise will have disappeared from the previous erosion!
To implement this in OpenCV, we use the function morphologyEx with our original image,
the operation we want to perform, and our kernel passed in.
'''
opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, Kernel)
plt.imshow(opening, cmap = 'gray')
#----------------------------------Closing------------------------------
'''
Closing is the reverse combination of opening; it’s dilation followed by erosion,
which is useful in closing small holes or dark areas within an object.
Closing is reverse of Opening, Dilation followed by Erosion. It is useful in
closing small holes inside the foreground objects, or small black points on the object.
'''
closing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, Kernel)
plt.imshow(closing, cmap = 'gray')
|
haderalim/Computer-Vision
|
Types of features and Image segmentation/Dilation- Erosion- Opeining and Closing/test.py
|
test.py
|
py
| 2,245
|
python
|
en
|
code
| 1
|
github-code
|
6
|
7436815802
|
from pathlib import Path
from zoneinfo import ZoneInfo
import datetime
import sys
TIME_ZONE = ZoneInfo('US/Eastern')
def main():
station_name = sys.argv[1]
dir_path = Path(sys.argv[2])
file_paths = sorted(dir_path.glob('*.WAV'))
for file_path in file_paths:
move_file(file_path, station_name)
def move_file(file_path, station_name):
recorder_name = file_path.parent.parent.name
file_name = file_path.name
start_time = parse_file_name(file_name)
night = get_night(start_time)
night_dir_name = night.strftime('%Y-%m-%d')
start_time_string = start_time.strftime('%Y-%m-%d_%H.%M.%S_Z')
new_file_name = f'{station_name}_{recorder_name}_{start_time_string}.wav'
night_dir_path = file_path.parent / night_dir_name
night_dir_path.mkdir(mode=0o755, parents=True, exist_ok=True)
new_file_path = night_dir_path / new_file_name
file_path.rename(new_file_path)
print(f'{start_time} {night_dir_path} {new_file_path}')
def parse_file_name(file_name):
start_time = datetime.datetime.strptime(file_name, '%Y%m%d_%H%M%S.WAV')
return start_time.replace(tzinfo=ZoneInfo('UTC'))
def get_night(dt):
dt = dt.astimezone(TIME_ZONE)
date = dt.date()
hour = dt.hour
if hour >= 12:
return date
else:
return datetime.date.fromordinal(dt.toordinal() - 1)
if __name__ == '__main__':
main()
|
HaroldMills/Vesper
|
scripts/organize_audiomoth_wav_files_by_night.py
|
organize_audiomoth_wav_files_by_night.py
|
py
| 1,464
|
python
|
en
|
code
| 47
|
github-code
|
6
|
3929101533
|
from sqlalchemy import Column, INTEGER, Identity, String
from src.data_access.database.models.base_entity import InoversityLibraryBase
__all__ = [
"StaffEntity"
]
class StaffEntity(InoversityLibraryBase):
user_id = Column("id", INTEGER, Identity(), primary_key=True, index=True)
role_level = Column("roleLevel", String(256), nullable=False, index=True)
staff_number = Column("staffNumber", String(20), nullable=False, unique=True)
department = Column("department", String(100), nullable=False)
job_title = Column("jobTitle", String(100), nullable=False)
|
mariusvrstr/PythonMicroservice
|
src/data_access/database/models/staff_entity.py
|
staff_entity.py
|
py
| 582
|
python
|
en
|
code
| 0
|
github-code
|
6
|
16543455789
|
from ai_chatbot.scripts import REDataHeader as Header
import dateparser
import datetime
def printData(data):
print('Station from : {0}'.format(data[Header.STATIONFROM]))
print('Station to : {0}'.format(data[Header.STATIONTO]))
print('departure date : {0}'.format(data[Header.DEPARTDATE]))
print('departure time : {0}'.format(data[Header.DEPARTTIME]))
def datecheck(date):
now = datetime.datetime.now()
dateobject = dateparser.parse(date)
if dateobject > now:
return 0
else:
return 1
def returndatecheck(date, returndate):
dateobject = dateparser.parse(date)
returndateobject = dateparser.parse(returndate)
if dateobject < returndateobject:
return 0
else:
return 1
def timecheck(date, time):
now = datetime.datetime.now()
dateobject = dateparser.parse(date)
timeobject = dateparser.parse(time)
fullobject = datetime.datetime.combine(dateobject.date(), timeobject.time())
if fullobject > now:
return 0
else:
return 1
def missingDataCheck(data):
if data[Header.STATIONFROM] == '':
return Header.STATIONFROM
elif data[Header.STATIONTO] == '':
return Header.STATIONTO
elif data[Header.DEPARTDATE] == '':
return Header.DEPARTDATE
elif datecheck(data[Header.DEPARTDATE]) == 1:
data[Header.DEPARTDATE] = ''
return Header.BADDATE
elif data[Header.DEPARTTIME] == '':
return Header.DEPARTTIME
elif timecheck(data[Header.DEPARTDATE], data[Header.DEPARTTIME]) == 1:
data[Header.DEPARTDATE] = ''
data[Header.DEPARTTIME] = ''
return Header.BADTIME
elif data[Header.SINGLERETURN].lower() == '':
return Header.SINGLERETURN
elif data[Header.SINGLERETURN].lower() == 'return':
if data[Header.RETURNDATE] == '':
return Header.RETURNDATE
elif returndatecheck(data[Header.DEPARTDATE], data[Header.RETURNDATE]) == 1:
data[Header.RETURNDATE] = ''
return Header.BADDATE
elif data[Header.RETURNTIME] == '':
return Header.RETURNTIME
return 0
else:
return 0
def verificationCheck(data):
if data[Header.CONFIRMED] == 'true':
return 0
return 1
def getURL(data):
# call function in scraper/scraper.py
print('Getting URL for...')
print('\t {0} --> {1}'.format(data.stationFrom, data.stationTo))
print('\t Departure date : {0}'.format(data.DepDate))
print('\t Departure time : {0}'.format(data.DepTime))
|
Grimmii/TrainChatBot
|
src/ai_chatbot/scripts/RE_function_booking.py
|
RE_function_booking.py
|
py
| 2,542
|
python
|
en
|
code
| 0
|
github-code
|
6
|
38217506704
|
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import matplotlib.animation as animation
import matplotlib.patches as mpatches
from matplotlib import ticker
from matplotlib import cm
from matplotlib.ticker import FuncFormatter
import numpy as np
from utils.occ_map_utils import load_map, display_occ_map, plot_grid_map_hmm, show_traj, \
black_cm, green_cm, red_cm, blue_cm, greens_cm, greys_cm
from utils.occ_map_utils import show_map
from utils.plot_utils import plot_4d_tensor
# key press to update figure for the next tracking setp
def on_press(event, animation):
if event.key == ' ':
if animation.pause:
animation.event_source.stop()
else:
animation.event_source.start()
animation.pause ^= True
def onclick(event, anim):
models = anim.models
ix, iy = event.xdata, event.ydata
coords = np.floor(np.array([ix, iy]) / models[0].map_res).astype(int)
print("Click at coordinates: {}".format(coords))
all_axes = [plot.axes for plot in anim.plots]
for i, ax in enumerate(all_axes):
# For infomation, print which axes the click was in
if ax == event.inaxes:
#print "Click is at filter {}".format(anim.models[i].name)
break
clicked = np.zeros_like(models[0].map)
x, y = coords[0], coords[1]
clicked[x, y] = 1
for plot in anim.plots:
plot.set_axes_data("occupancy_axes", clicked)
anim.fig.canvas.draw()
accessories_figures = [anim.nn_output_fig, anim.kernel_fig]
for fig in accessories_figures:
if fig is not None:
fig.clear()
anim.nn_output_fig.suptitle('Network Output')
anim.kernel_fig.suptitle('Motion pattern')
if models[i].kernels.ndim == 6:
kernel = models[i].kernels[x, y]
condi_prob = models[i].nn_probs[x, y]
else:
kernel = models[i].kernels
condi_prob = None
plot_4d_tensor(kernel, fig=anim.kernel_fig)
if condi_prob is not None:
plot_4d_tensor(condi_prob, fig=anim.nn_output_fig)
anim.set_axis_ticks(models[i].extent)
anim.accessories_plots['ma_plot'].set_axes_data("occupancy_axes", np.ones_like(models[i].ma_vel))
anim.accessories_plots['vel_plot'].set_axes_data("occupancy_axes", models[i].P_Vt_pred[x, y])
anim.accessories_plots['merge_vel_plot'].set_axes_data("occupancy_axes", models[i].P_Vt_merged[x, y])
anim.accessories_plots['final_vel_plot'].set_axes_data("occupancy_axes", models[i].P_Vt[x, y])
accessories_figures += [anim.vel_fig]
for fig in accessories_figures:
if fig is not None:
fig.canvas.draw()
for k, plot in anim.accessories_plots.items():
plot.refresh_colorbar()
model_names = map(lambda model: model.name, models)
occs = map(lambda model: model.P_Ot[x, y], models)
for name, occ in zip(model_names, occs):
print("loc ({}, {}) of model {} has occupancy of {}".format(x, y, name, occ))
class Plot(object):
def __init__(self, axes, map, res, plot_map=True, plot_seen=False, show_text=True, colorbar_on=None, title=None):
self.axes = axes
self.map = map
self.res = res
self.plot_seen = plot_seen
self.plot_map = plot_map
self.map_axes = None
self.occupancy_axes = None
self.ground_truth_axes = None
self.seen_axes = None
self.colorbars = []
self.show_text = show_text
if title is None:
title = 'Measurements'
self.axes.set_title(title)
if show_text:
self.text = self.axes.text(0.92, 0.92, "", bbox={'facecolor': 'red', 'alpha': 0.5, 'pad': 5},
transform=self.axes.transAxes, ha="right", color='white', zorder=14)
self.add_images()
self.add_colorbar(colorbar_on)
def add_images(self):
"""Add AxesImages for showing map, occupancy and seen."""
occupancy = np.zeros(self.map.shape, dtype=float)
self.occupancy_axes = show_map(occupancy, self.res, cmap=red_cm, ax=self.axes, zorder=11)
# initialize plots with map
map_ = self.map if self.plot_map else np.zeros_like(self.map)
self.map_axes = show_map(map_, self.res, cmap=black_cm, ax=self.axes, zorder=12)
if self.plot_seen:
# add seen image
self.seen_axes = show_map(occupancy, self.res, cmap=black_cm, alpha=0.2, ax=self.axes)
def set_axes_data(self, axes_name, data, vmin=None, vmax=None):
image_ax = getattr(self, axes_name)
image_ax.set_data(np.rot90(data))
vmin = vmin if vmin is not None else data.min()
vmax = vmax if vmax is not None else data.max()
image_ax.set_clim([vmin, vmax])
def add_custom_image(self, axes_name, cmap=None, image=None, **kwargs):
if image is None:
image = np.zeros(self.map.shape, dtype=float)
image_ax = show_map(image, self.res, cmap=cmap, ax=self.axes, **kwargs)
setattr(self, axes_name, image_ax)
def add_colorbar(self, colorbar_on):
if colorbar_on is None:
return
image_axes = getattr(self, colorbar_on)
if image_axes is not None:
cb = plt.colorbar(image_axes, ax=self.axes, fraction=0.046, pad=0.04)
tick_locator = ticker.MaxNLocator(nbins=5)
cb.locator = tick_locator
self.colorbars.append(cb)
def set_ylabel(self, text='', **kwargs):
self.axes.set_ylabel(text, **kwargs)
def refresh_colorbar(self):
for cb in self.colorbars:
cb.update_ticks()
def add_traj_line(self, num_targets=1):
""" Add 2D Lines for showing trajectories."""
colors = cm.Dark2(np.linspace(0, 1, num_targets))
# add lines for showing trajectories
self.lines = map(lambda _: self.axes.add_line(Line2D([], [], zorder=14, color='grey')), range(num_targets))
def set_title(self, title):
self.axes.set_title(title)
def set_text(self, text):
self.text.set_text(text)
class TrackingAnimation(animation.TimedAnimation):
def __init__(self, models, num_steps, simulated_data, plot_seen=False, plot_map=True, show_text=True, accessories=None):
self.num_models = len(models)
self.models = models
self.map = models[0].map
self.res = models[0].map_res
self.num_steps = num_steps
self.simulated_data = simulated_data
self.show_map = plot_map
self.show_seen = plot_seen
self.show_text = show_text
self.nn_output_fig = None
self.kernel_fig = None
self.vel_fig = None
self.accessories_plots = None
self.accessories = accessories
self.initialize_figure()
self.initialize_models()
self.initialize_accessories()
print(self.accessories_plots)
self.fig.canvas.mpl_connect('key_press_event', lambda event: on_press(event, self))
self.fig.canvas.mpl_connect('button_press_event', lambda event: onclick(event, self))
animation.TimedAnimation.__init__(self, self.fig, interval=500, blit=True, repeat=True, repeat_delay=1000)
def initialize_figure(self):
fig_size = (5 * self.num_models, 5)
self.fig = plt.figure(figsize=fig_size)
self.pause = True
# bind key press event to pause animation
self.plots = []
for i in range(self.num_models):
axes = self.fig.add_subplot(1, self.num_models, i + 1)
title = self.models[i].name
colorbar_on = "occupancy_axes" if self.simulated_data else None
plot = Plot(axes, self.map, self.res, self.show_map, self.show_seen, self.show_text, colorbar_on, title=title)
self.add_custom_element(plot)
self.plots.append(plot)
self.fig_title_axes = self.fig.add_axes([.4, .9, .2, .05])
self.fig_title_axes.set_axis_off()
self.fig_title = self.fig.text(.49, .9, "", transform=self.fig_title_axes.transAxes, fontsize=15, color='r', ha='center')
if not self.simulated_data:
self.add_legend()
def initialize_accessories(self):
if "motion_pattern" in self.accessories:
self.nn_output_fig = plt.figure(figsize=(5, 5))
self.nn_output_fig.suptitle('Network Output')
self.kernel_fig = plt.figure(figsize=(5, 5))
self.kernel_fig.suptitle('Motion pattern')
if "velocities" in self.accessories:
self.vel_fig = plt.figure(figsize=(12, 3))
ma_ax = self.vel_fig.add_subplot(141)
ma_plot = Plot(ma_ax, self.models[0].ma_vel, 1, False, False, False, colorbar_on=None, title=r'$P(V_{ma})$')
vel_ax = self.vel_fig.add_subplot(142)
vel_plot = Plot(vel_ax, self.models[0].ma_vel, 1, False, False, False, colorbar_on=None, title=r'$P(V_{pred})$')
merge_vel_ax = self.vel_fig.add_subplot(143)
merge_vel_plot = Plot(merge_vel_ax, self.models[0].ma_vel, 1, False, False, False, colorbar_on=None, title=r'$P(V_{merge})$')
final_vel_ax = self.vel_fig.add_subplot(144)
final_vel_plot = Plot(final_vel_ax, self.models[0].ma_vel, 1, False, False, False, colorbar_on=None, title='$P(V)$')
self.accessories_plots = dict(ma_plot=ma_plot, vel_plot=vel_plot,
merge_vel_plot=merge_vel_plot, final_vel_plot=final_vel_plot)
def set_axis_ticks(self, extent):
if self.vel_fig is not None:
xlabels = (np.arange(extent) + np.array([-(extent // 2)])).tolist()
ylabels = xlabels
def format_fn_x(tick_val, tick_pos):
if int(tick_val) in range(7):
return xlabels[int(tick_val)]
else:
return ''
def format_fn_y(tick_val, tick_pos):
if int(tick_val) in range(7):
return ylabels[int(tick_val)]
else:
return ''
ax = self.vel_fig.get_axes()[0]
max_extent = float(extent)
ax.set_xticks(np.arange(.5, max_extent, 1.0))
ax.set_yticks(np.arange(0.5, max_extent, 1.0))
ax.xaxis.set_major_formatter(FuncFormatter(format_fn_x))
ax.yaxis.set_major_formatter(FuncFormatter(format_fn_y))
ylabel = ax.set_ylabel(r'$V_y$', color='darkred', fontsize=12)
ylabel.set_rotation(0)
ax.yaxis.set_label_coords(-0.06, .95)
ax.set_xlabel(r'$V_x$', color='darkred', fontsize=12)
ax.xaxis.set_label_coords(1.05, -0.025)
for ax in self.vel_fig.get_axes()[1:]:
ax.set_xticks([])
ax.set_yticks([])
def add_custom_element(self, plot):
"""Add extra elements to plot. This method has to be overwritten by subclasses. """
pass
def update_custom_element(self, idx):
""" Update custom elements on animation. This method has to be overwritten by subclasses. """
def initialize_models(self):
"""Initialize BOFUM models. This method has to be overwritten by subclasses."""
pass
def _draw_frame(self, framedata):
t = self.models[0].t
t_count = "frame = " + str(t)
print(t_count)
self.fig_title.set_text(t_count)
measurement = self.models[0].measurement_at()
for model in self.models:
model.tracking_step(measurement=measurement)
# plot new occupancy
Ot_max = max(map(lambda model: model.P_Ot.max(), self.models))
Ot_min = min(map(lambda model: model.P_Ot.min(), self.models))
for i, model in enumerate(self.models):
self.plots[i].set_axes_data("occupancy_axes", model.P_Ot, Ot_min, Ot_max)
if self.show_seen:
seen = model.evaluate_loc_at(t)
self.plots[i].set_axes_data("seen_axes", seen)
if self.plots[i].show_text:
x_ent = model.calc_cross_entropy()
f1_score = model.calc_f1_score()
average_precision = model.calc_average_precision()
self.plots[i].text.set_text("x_ent: {:.3f}, f1: {:.3f}, ap: {:.3f}".format(x_ent, f1_score, average_precision))
self.update_custom_element(i)
# if i == self.num_models-1:
# self.add_legend()
self.plots[i].refresh_colorbar()
# repeat tracking
if framedata == self.num_steps-1:
for model in self.models:
model.reset()
def new_frame_seq(self):
return iter(range(self.num_steps))
def _init_draw(self):
pass
class TrackingAnimSimulation(TrackingAnimation):
def __init__(self, models, num_steps, num_targets=1, diagonal=False, plot_map=True, **kwargs):
self.num_targets = num_targets
self.diagonal = diagonal
self.trajs = None
self.distances = None
super(TrackingAnimSimulation, self).__init__(models, num_steps, True, plot_map=plot_map, **kwargs)
def add_custom_element(self, plot):
plot.add_traj_line(self.num_targets)
def update_custom_element(self, idx):
# add trajectory lines
truncated_trajs = self.models[0].traversed_traj_at()
for idx_, line in enumerate(self.plots[idx].lines):
xs, ys = truncated_trajs[idx_].T[0][-5:], truncated_trajs[idx_].T[1][-5:]
line.set_data(xs, ys)
def initialize_models(self):
self.distances , self.trajs = self.models[0].initialize(self.num_targets, self.num_steps)
init_model = lambda model: model.initialize(self.num_targets, self.num_steps,
distances=self.distances, trajectories=self.trajs)
map(init_model, self.models[1:])
class TrackingAnimRealdata(TrackingAnimation):
def __init__(self, models, num_steps, scene, plot_map=True,plot_seen=False, simulated_scenes=False, **kwargs):
self.scene = scene
self.simulated_scenes = simulated_scenes
super(TrackingAnimRealdata, self).__init__(models, num_steps, False,
plot_seen=plot_seen, plot_map=plot_map, **kwargs)
def update(self, scene, update_num_steps=True):
self.scene = scene
update_map = lambda plot: plot.set_axes_data("map_axes", self.scene.static_map)
map(update_map, self.plots)
map(lambda model: model.update(scene), self.models)
if update_num_steps:
self.num_steps = len(scene.hits)
self.frame_seq = self.new_frame_seq()
def initialize_models(self):
init_model = lambda model: model.initialize(self.scene, not self.simulated_scenes)
map(init_model, self.models)
def add_custom_element(self, plot):
# add false negative axes
# it shows locations where ground truth is occupied
# but BOFUM fails to track
plot.add_custom_image("fn_axes", blue_cm)
# add true positive axes
# it shows locations where ground truth is occupied
# and BOFUM predicts occupancy prob higher than 0
plot.add_custom_image("tp_axes", greens_cm)
plot.add_colorbar("tp_axes")
# add false positive axes
plot.add_custom_image("fp_axes", red_cm)
def add_legend(self):
g_patch = mpatches.Patch(color='g', label='True positive')
b_patch = mpatches.Patch(color='b', label='False negative')
o_patch = mpatches.Patch(color='orange', label='False positive')
plt.legend(handles=[g_patch, b_patch, o_patch], bbox_to_anchor=(1, 1),
bbox_transform=self.fig.transFigure)
def update_custom_element(self, idx):
t = self.models[0].t - 1
model = self.models[idx]
plot = self.plots[idx]
occupancy_prob = model.P_Ot
h_max = occupancy_prob.max()/2
#occupancy_prob = np.where(occupancy_prob>h_max, occupancy_prob, 0)
ground_truth = model.ground_truth_at(t)
overlap = np.logical_and(occupancy_prob, ground_truth)
# if occupany on ground truth location is higher than 0.1,
# it is not thought as a false negative
occupancy_temp = np.where(overlap, occupancy_prob, 0)
#predicted = np.where(occupancy_temp>0.1, 1, 0)
false_negative = np.where(occupancy_temp>0.1, 0, ground_truth)
# if model predicts occupancy higher than 0 on ground truth locations,
# it is thought as a true positive
true_positive = np.where(overlap, occupancy_prob, 0)
# if model predicts occupancy higher than 0 on non-ground truth locations,
# it is thought as a false positive
false_positive = occupancy_prob.copy()
false_positive[overlap] = 0
# only show for occupancies higher than 1/2 highest occupancy
h_max = false_positive.max() / 4
false_positive = np.where(false_positive>h_max, false_positive, 0)
Ot_max = max(map(lambda model: model.P_Ot.max(), self.models))
plot.set_axes_data("fn_axes", false_negative, 0, 1)
plot.set_axes_data("tp_axes", true_positive, 0, Ot_max)
plot.set_axes_data("fp_axes", false_positive, 0, Ot_max)
plot.set_axes_data("occupancy_axes", np.zeros_like(occupancy_prob))
|
stomachacheGE/bofmp
|
tracking/animation.py
|
animation.py
|
py
| 17,318
|
python
|
en
|
code
| 0
|
github-code
|
6
|
33359791664
|
import sys
import unittest
import psycopg2
sys.path.insert(0, '../src')
from src.utils import daily_reports_return_json, daily_reports_return_csv, time_series_return_csv, check_query_data_active, check_request
from src.config import connect_database
# import copy
class TestUtils(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestUtils, self).__init__(*args, **kwargs)
self.conn = connect_database()
def test_daily_reports_return_json(self):
try:
# conn = connect_database()
cur = self.conn.cursor()
# create a test table in the database with the table format of a daily report
cur.execute("DROP TABLE IF EXISTS test;")
self.conn.commit()
cur.execute("CREATE TABLE test (state VARCHAR(5), region VARCHAR(5), last_update VARCHAR(20), "
"confirmed INTEGER, deaths INTEGER, recovered INTEGER, active INTEGER, combined_key VARCHAR("
"5));")
self.conn.commit()
cur.execute("INSERT INTO test VALUES ('a', 'a', '2021-01-02 05:22:33', 10, 5, 0, 5, 'a, a'), "
"(null, 'b', '2021-01-02 05:22:33', 1, 0, 0, 1, 'b'), "
"('b', 'b', '2021-01-02 05:22:33', 4, 3, 0, 1, 'b, b');"
)
self.conn.commit()
date = "01/01/21"
types = ["Confirmed", "Deaths", "Recovered", "Active"]
locations = [{"Country/Region": "b"},
{"Country/Region": "a", "Province/State": "a", "Combined_Key": "a, a"}
]
result = daily_reports_return_json(cur, date, locations, types, 'test')
expected = {
"Date": date,
"Reports": [
{
"Active": 2,
"Confirmed": 5,
"Country/Region": "b",
"Deaths": 3,
"Recovered": 0
},
{
"Active": 5,
"Confirmed": 10,
"Country/Region": "a",
"Deaths": 5,
"Province/State": "a",
"Combined_Key": "a, a",
"Recovered": 0
}
]
}
self.assertEqual(result, expected)
except psycopg2.Error:
assert False, "Database Error"
def test_daily_reports_return_csv(self):
json_data = {
"Date": "01/01/21",
"Reports": [
{
"Active": 2,
"Confirmed": 5,
"Country/Region": "b",
"Deaths": 3,
"Recovered": 0
},
{
"Active": 5,
"Confirmed": 10,
"Country/Region": "a",
"Deaths": 5,
"Province/State": "a",
"Combined_Key": "a, a",
"Recovered": 0
}
]
}
result = daily_reports_return_csv(json_data, ["Confirmed", "Deaths", "Recovered", "Active"])
expected = "Date,Province/State,Country/Region,Combined_Key,Confirmed,Deaths,Recovered,Active" \
"\n01/01/21,,b,,5,3,0,2\n01/01/21,a,a,a, a,10,5,0,5"
self.assertEqual(result, expected)
def test_time_series_return_csv(self):
json_data = {"01/26/20": [{"Active": 0, "Confirmed": 0, "Country/Region": "Albania"}]}
expected = "Date,Province/State,Country/Region,Confirmed\n01/26/20,,Albania,0"
result = time_series_return_csv(json_data, ["01/26/20"], ["Confirmed"])
self.assertEqual(result, expected)
def test_check_query_data_active(self):
try:
# conn = connect_database()
cur = self.conn.cursor()
# create a test table in the database with the table format of a daily report
cur.execute("DROP TABLE IF EXISTS test;")
self.conn.commit()
self.assertEqual(check_query_data_active(cur, ["test"]), False)
except psycopg2.Error:
assert False, "Database Error"
def test_check_request(self):
result = check_request(['test'], {})
self.assertEqual(result[0], 'test')
if __name__ == '__main__':
unittest.main()
|
shin19991207/CSC301-A2
|
tests/test_utils.py
|
test_utils.py
|
py
| 4,518
|
python
|
en
|
code
| 0
|
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.