max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
tests/tests_pipeline/test_pipeline_helper.py | Anbang-Hu/shrike | 0 | 12765651 | <gh_stars>0
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""Unit tests for pipeline_helper"""
from functools import lru_cache
import pytest
import sys
from omegaconf import OmegaConf
from pathlib import Path
from shrike.pipeline import AMLPipelineHelper
import yaml
import os
import shutil
from shrike._core import stream_handler
@lru_cache(maxsize=None)
def pipeline_helper():
"""
Initiate AMLPipelineHelper class with the testing configuration.
"""
config = OmegaConf.load(Path(__file__).parent / "data/test_configuration.yaml")
rv = AMLPipelineHelper(config=config)
rv.connect()
return rv
# Define tenant_id
tenant_id = "72f988bf-86f1-41af-91ab-2d7cd011db47"
def test_validate_experiment_name():
"""Unit tests for validate_experiment_name function"""
with pytest.raises(ValueError):
AMLPipelineHelper.validate_experiment_name("")
with pytest.raises(ValueError):
AMLPipelineHelper.validate_experiment_name("_exp-name")
with pytest.raises(ValueError):
AMLPipelineHelper.validate_experiment_name("wront.period")
assert AMLPipelineHelper.validate_experiment_name("Correct-NAME_")
assert AMLPipelineHelper.validate_experiment_name("ALLARELETTERS")
assert AMLPipelineHelper.validate_experiment_name("12344523790")
def test_get_component_name_from_instance():
"""Unit tests for _get_component_name_from_instance"""
component_instance = pipeline_helper().component_load(component_key="dummy_key")
step_instance = component_instance()
component_name = pipeline_helper()._get_component_name_from_instance(step_instance)
assert component_name == "dummy_key"
def test_parse_pipeline_tags():
"""Unit tests for _parse_pipeline_tags"""
assert pipeline_helper()._parse_pipeline_tags() == {"test_key": "test_value"}
pipeline_helper().config.run.tags = '{"WRONG_JSON": 1'
with stream_handler("shrike.pipeline.pipeline_helper", level="INFO") as handler:
pipeline_helper()._parse_pipeline_tags()
assert (
'The pipeline tags {"WRONG_JSON": 1 is not a valid json-style string.'
in str(handler)
)
pipeline_helper().config.run.tags = '{"test_key": "test_value"}'
@pytest.mark.parametrize(
"windows,gpu", [(True, True), (True, False), (False, True), (False, False)]
)
def test_apply_parallel_runsettings(windows, gpu):
"""Unit tests for _apply_parallel_runsettings()"""
# Create a module instance
module_instance_fun = pipeline_helper().component_load(
component_key="prscomponentlinux"
)
module_instance = module_instance_fun(input_dir="foo")
if windows and gpu:
with pytest.raises(ValueError):
pipeline_helper()._apply_parallel_runsettings(
module_name="prscomponentlinux",
module_instance=module_instance,
windows=windows,
gpu=gpu,
)
else:
with stream_handler("shrike.pipeline.pipeline_helper", level="INFO") as handler:
pipeline_helper()._apply_parallel_runsettings(
module_name="prscomponentlinux",
module_instance=module_instance,
windows=windows,
gpu=gpu,
)
# Testing the stdout
out = str(handler)
assert "Using parallelrunstep compute target" in out
assert f"to run {module_instance.name}" in out
# Testing parallel runsetting parameter configuration
assert module_instance.runsettings.parallel.error_threshold == -1
assert module_instance.runsettings.parallel.mini_batch_size == "1"
assert module_instance.runsettings.parallel.node_count == 10
assert module_instance.runsettings.parallel.process_count_per_node is None
assert module_instance.runsettings.parallel.run_invocation_timeout == 10800
assert module_instance.runsettings.parallel.run_max_try == 3
# Testing compute target configuration
if windows and not gpu:
assert module_instance.runsettings.target == "cpu-dc-win"
elif not windows and gpu:
assert module_instance.runsettings.target == "gpu-cluster"
elif not windows and not gpu:
assert module_instance.runsettings.target == "cpu-cluster"
def test_apply_scope_runsettings():
module_instance = pipeline_helper().component_load("convert2ss")
step_instance = module_instance(TextData="foo", ExtractionClause="foo")
adla_account_name = "office-adhoc-c14"
custom_job_name_suffix = "test"
scope_param = "-tokens 50"
pipeline_helper()._apply_scope_runsettings(
"convert2ss",
step_instance,
adla_account_name=adla_account_name,
custom_job_name_suffix=custom_job_name_suffix,
scope_param=scope_param,
)
assert step_instance.runsettings.scope.adla_account_name == adla_account_name
assert (
step_instance.runsettings.scope.custom_job_name_suffix == custom_job_name_suffix
)
assert step_instance.runsettings.scope.scope_param == scope_param
def test_apply_datatransfer_runsettings():
module_instance = pipeline_helper().component_load("data_transfer")
step_instance = module_instance(source_data="foo")
pipeline_helper()._apply_datatransfer_runsettings("data_Transfer", step_instance)
assert step_instance.runsettings.target == "data-factory"
def test_apply_sweep_runsettings():
module_instance = pipeline_helper().component_load("sweep_component")
step_instance = module_instance()
algorithm = "random"
primary_metric = "result"
goal = "maximize"
policy_type = "median_stopping"
evaluation_interval = 1
delay_evaluation = 1
# Important tip: note that we can only either specify slack_factor, or slack_amount, but not both
# See this sweep doc for more details about sweep component: https://componentsdk.azurewebsites.net/components/sweep_component.html
slack_factor = 0.1
# slack_amount=0.2
truncation_percentage = 10
max_total_trials = 5
max_concurrent_trials = 4
timeout_minutes = 30
pipeline_helper()._apply_sweep_runsettings(
"sweep_component",
step_instance,
algorithm=algorithm,
primary_metric=primary_metric,
goal=goal,
policy_type=policy_type,
evaluation_interval=evaluation_interval,
delay_evaluation=delay_evaluation,
slack_factor=slack_factor,
truncation_percentage=truncation_percentage,
max_total_trials=max_total_trials,
max_concurrent_trials=max_concurrent_trials,
timeout_minutes=timeout_minutes,
)
assert step_instance.runsettings.sweep.algorithm == algorithm
assert step_instance.runsettings.sweep.objective.primary_metric == primary_metric
assert step_instance.runsettings.sweep.objective.goal == goal
assert step_instance.runsettings.sweep.early_termination.policy_type == policy_type
assert (
step_instance.runsettings.sweep.early_termination.evaluation_interval
== evaluation_interval
)
assert (
step_instance.runsettings.sweep.early_termination.delay_evaluation
== delay_evaluation
)
assert (
step_instance.runsettings.sweep.early_termination.slack_factor == slack_factor
)
assert (
step_instance.runsettings.sweep.early_termination.truncation_percentage
== truncation_percentage
)
assert step_instance.runsettings.sweep.limits.max_total_trials == max_total_trials
assert (
step_instance.runsettings.sweep.limits.max_concurrent_trials
== max_concurrent_trials
)
assert step_instance.runsettings.sweep.limits.timeout_minutes == timeout_minutes
def test2_apply_sweep_runsettings():
module_instance = pipeline_helper().component_load("sweep_component")
step_instance = module_instance()
algorithm = "random"
primary_metric = "result"
goal = "maximize"
policy_type = "median_stopping"
evaluation_interval = 1
delay_evaluation = 1
# Note that we can only either specify slack_factor, or slack_amount, but not both
# See this sweep doc for more details about sweep component: https://componentsdk.azurewebsites.net/components/sweep_component.html
# slack_factor = 0.1
slack_amount = 0.2
truncation_percentage = 10
max_total_trials = 5
max_concurrent_trials = 4
timeout_minutes = 30
pipeline_helper()._apply_sweep_runsettings(
"sweep_component",
step_instance,
algorithm=algorithm,
primary_metric=primary_metric,
goal=goal,
policy_type=policy_type,
evaluation_interval=evaluation_interval,
delay_evaluation=delay_evaluation,
slack_amount=slack_amount,
truncation_percentage=truncation_percentage,
max_total_trials=max_total_trials,
max_concurrent_trials=max_concurrent_trials,
timeout_minutes=timeout_minutes,
)
assert step_instance.runsettings.sweep.algorithm == algorithm
assert step_instance.runsettings.sweep.objective.primary_metric == primary_metric
assert step_instance.runsettings.sweep.objective.goal == goal
assert step_instance.runsettings.sweep.early_termination.policy_type == policy_type
assert (
step_instance.runsettings.sweep.early_termination.evaluation_interval
== evaluation_interval
)
assert (
step_instance.runsettings.sweep.early_termination.delay_evaluation
== delay_evaluation
)
assert (
step_instance.runsettings.sweep.early_termination.slack_amount == slack_amount
)
assert (
step_instance.runsettings.sweep.early_termination.truncation_percentage
== truncation_percentage
)
assert step_instance.runsettings.sweep.limits.max_total_trials == max_total_trials
assert (
step_instance.runsettings.sweep.limits.max_concurrent_trials
== max_concurrent_trials
)
assert step_instance.runsettings.sweep.limits.timeout_minutes == timeout_minutes
@pytest.mark.parametrize(
"compliant,datastore",
[(True, "fake_compliant_datastore"), (False, "workspaceblobstore")],
)
def test_apply_recommended_runsettings_datatransfer_datastore(compliant, datastore):
module_instance = pipeline_helper().component_load("data_transfer")
step_instance = module_instance(source_data="foo")
pipeline_helper().apply_recommended_runsettings(
"data_transfer", step_instance, datatransfer=True, compliant=compliant
)
assert step_instance.outputs.destination_data.datastore == datastore
@pytest.mark.parametrize("mpi", [True, False])
def test_apply_windows_runsettings(mpi):
"""Unit tests for _apply_windows_runsettings()"""
# Create a module instance
module_name = (
"stats_passthrough_windows_mpi" if mpi else "stats_passthrough_windows"
)
module_instance_fun = pipeline_helper().component_load(component_key=module_name)
module_instance = module_instance_fun(input_path="foo")
with stream_handler("shrike.pipeline.pipeline_helper", level="INFO") as handler:
pipeline_helper()._apply_windows_runsettings(
module_name=module_name,
module_instance=module_instance,
mpi=mpi,
node_count=2,
process_count_per_node=3,
)
# Testing the stdout
out = str(handler)
assert (
f"Using windows compute target cpu-dc-win to run {module_name} from pipeline class AMLPipelineHelper"
in out
)
assert f"to run {module_instance.name}" in out
# Testing mpi runsetting parameter configuration
if mpi:
assert module_instance.runsettings.resource_layout.node_count == 2
assert module_instance.runsettings.resource_layout.process_count_per_node is 3
# Testing compute target configuration
assert module_instance.runsettings.target == "cpu-dc-win"
# Testing input and output mode
assert module_instance.inputs.input_path.mode == "download"
assert module_instance.outputs.output_path.mode == "upload"
def test_apply_hdi_runsettings():
"""Unit tests for _apply_hdi_runsettings()"""
# Create a module instance
module_name = "SparkHelloWorld"
module_instance_fun = pipeline_helper().component_load(component_key=module_name)
module_instance = module_instance_fun(input_path="foo")
with stream_handler("shrike.pipeline.pipeline_helper", level="INFO") as handler:
pipeline_helper()._apply_hdi_runsettings(
module_name=module_name,
module_instance=module_instance,
conf='{"spark.yarn.maxAppAttempts": 1, "spark.sql.shuffle.partitions": 3000}',
)
# Testing the stdout
out = str(handler)
assert (
"Using HDI compute target cpu-cluster to run SparkHelloWorld from pipeline class AMLPipelineHelper"
in out
)
# Testing HDI runsetting parameter configuration
assert module_instance.runsettings.hdinsight.driver_memory == "2g"
assert module_instance.runsettings.hdinsight.driver_cores == 2
assert module_instance.runsettings.hdinsight.executor_memory == "2g"
assert module_instance.runsettings.hdinsight.executor_cores == 2
assert module_instance.runsettings.hdinsight.number_executors == 2
assert (
module_instance.runsettings.hdinsight.conf[
"spark.yarn.appMasterEnv.DOTNET_ASSEMBLY_SEARCH_PATHS"
]
== "./udfs"
)
assert module_instance.runsettings.hdinsight.conf["spark.yarn.maxAppAttempts"] == 1
assert (
module_instance.runsettings.hdinsight.conf[
"spark.yarn.appMasterEnv.PYSPARK_PYTHON"
]
== "/usr/bin/anaconda/envs/py37/bin/python3"
)
assert (
module_instance.runsettings.hdinsight.conf[
"spark.yarn.appMasterEnv.PYSPARK_DRIVER_PYTHON"
]
== "/usr/bin/anaconda/envs/py37/bin/python3"
)
assert (
module_instance.runsettings.hdinsight.conf["spark.sql.shuffle.partitions"]
== 3000
)
# Testing compute target configuration
assert module_instance.runsettings.target == "cpu-cluster"
# Testing input and output mode
assert module_instance.inputs.input_path.mode is None
assert module_instance.outputs.output_path.mode is None
@pytest.mark.parametrize(
"mpi,gpu", [(True, True), (True, False), (False, True), (False, False)]
)
def test_apply_linux_runsettings(mpi, gpu):
"""Unit tests for _apply_linux_runsettings()"""
# Create a module instance
module_name = "stats_passthrough_mpi" if mpi else "stats_passthrough"
module_instance_fun = pipeline_helper().component_load(component_key=module_name)
module_instance = module_instance_fun(input_path="foo")
with stream_handler("shrike.pipeline.pipeline_helper", level="INFO") as handler:
pipeline_helper()._apply_linux_runsettings(
module_name=module_name,
module_instance=module_instance,
mpi=mpi,
gpu=gpu,
node_count=2 if mpi else None,
process_count_per_node=3 if mpi else None,
)
out = str(handler)
sys.stdout.write(out)
# Testing mpi runsetting parameter configuration
if mpi:
assert module_instance.runsettings.resource_layout.node_count == 2
assert module_instance.runsettings.resource_layout.process_count_per_node == 3
# Testing compute target configuration
if gpu:
assert module_instance.runsettings.target == "gpu-cluster"
assert (
f"Using target gpu-cluster for local code GPU module {module_name} from pipeline class AMLPipelineHelper"
in out
)
else:
assert module_instance.runsettings.target == "cpu-cluster"
assert (
f"Using target cpu-cluster for local CPU module {module_name} from pipeline class AMLPipelineHelper"
in out
)
# Testing input and output mode
assert module_instance.inputs.input_path.mode == "download"
assert module_instance.outputs.output_path.mode == "upload"
@pytest.mark.parametrize(
"module_name,expected_stdout",
[
("MultiNodeTrainer", "Module MultiNodeTrainer detected as MPI: True"),
("SparkHelloWorld", "Module SparkHelloWorld detected as HDI: True"),
("stats_passthrough", ""),
(
"stats_passthrough_windows",
"Module stats_passthrough_windows detected as WINDOWS: True",
),
(
"stats_passthrough_windows_mpi",
"Module stats_passthrough_windows_mpi detected as WINDOWS: True",
),
(
"stats_passthrough_windows_mpi",
"Module stats_passthrough_windows_mpi detected as MPI: True",
),
("stats_passthrough_mpi", "Module stats_passthrough_mpi detected as MPI: True"),
("convert2ss", "Module convert2ss detected as SCOPE: True"),
("prscomponentlinux", "Module prscomponentlinux detected as PARALLEL: True"),
("dummy_key", ""),
("data_transfer", "Module data_transfer detected as DATATRANSFER: True"),
],
)
def test_apply_recommended_runsettings(module_name, expected_stdout):
"""Unit tests for apply_recommended_runsettings()"""
module_instance_fun = pipeline_helper().component_load(component_key=module_name)
if module_name == "convert2ss":
module_instance = module_instance_fun(
TextData="AnyFile", ExtractionClause="foo"
)
elif module_name == "data_transfer":
module_instance = module_instance_fun(source_data="foo", source_type="foo")
elif module_name == "dummy_key":
module_instance = module_instance_fun()
elif module_name == "MultiNodeTrainer":
module_instance = module_instance_fun(
vocab_file="foo", train_file="foo", validation_file="foo"
)
elif module_name == "prscomponentlinux":
module_instance = module_instance_fun(input_dir="foo")
elif module_name == "SparkHelloWorld":
module_instance = module_instance_fun(
input_path="foo", in_file_type="csv", percent_take=1, out_file_type="csv"
)
else:
module_instance = module_instance_fun(input_path="foo")
with stream_handler("shrike.pipeline.pipeline_helper", level="INFO") as handler:
pipeline_helper().apply_recommended_runsettings(
module_name=module_name,
module_instance=module_instance,
)
out = str(handler)
assert expected_stdout in out
def test_check_if_spec_yaml_override_is_needed_allow_override_false():
pipeline_helper().config.tenant_overrides.allow_override = False
override, _ = pipeline_helper()._check_if_spec_yaml_override_is_needed()
assert override == False
pipeline_helper().config.tenant_overrides.allow_override = True
def test_check_if_spec_yaml_override_is_needed_given_tenant_id():
override, mapping = pipeline_helper()._check_if_spec_yaml_override_is_needed()
assert override == True
assert mapping == {
"environment.docker.image": {
"polymerprod.azurecr.io/training/pytorch:scpilot-rc2": "mcr.microsoft.com/azureml/base-gpu:openmpi3.1.2-cuda10.1-cudnn7-ubuntu18.04"
},
"tags": {"Office": "aml-ds"},
"remove_polymer_pkg_idx": True,
}
def test_check_if_spec_yaml_override_is_needed_given_config_filename():
with open(Path(__file__).parent / "data/test_configuration.yaml", "r") as file:
spec = yaml.safe_load(file)
mapping = spec["tenant_overrides"]["mapping"][tenant_id]
mapping["description"] = "test"
spec["tenant_overrides"]["mapping"]["amlds"] = mapping
spec["tenant_overrides"]["mapping"].pop(tenant_id)
test_pipeline_helper = AMLPipelineHelper(config=OmegaConf.create(spec))
test_pipeline_helper.connect()
test_pipeline_helper.config.run.config_dir = os.path.join(
Path(__file__).parent / "sample/conf"
)
assert "amlds" in test_pipeline_helper.config.tenant_overrides.mapping
assert tenant_id not in test_pipeline_helper.config.tenant_overrides.mapping
override, mapping = test_pipeline_helper._check_if_spec_yaml_override_is_needed()
assert override == True
assert mapping == {
"environment.docker.image": {
"polymerprod.azurecr.io/training/pytorch:scpilot-rc2": "mcr.microsoft.com/azureml/base-gpu:openmpi3.1.2-cuda10.1-cudnn7-ubuntu18.04"
},
"tags": {"Office": "aml-ds"},
"description": "test",
"remove_polymer_pkg_idx": True,
}
def test_recover_spec_yaml_with_keeping_modified_files():
file_list = [
"tmp/spec_path.not_used",
"tmp/spec_path.yaml",
"tmp/env_path.not_used",
"tmp/env_path_" + tenant_id + ".yaml",
]
os.makedirs("tmp")
for i in file_list:
with open(i, "w") as file:
pass
pipeline_helper()._recover_spec_yaml([file_list], True)
assert os.path.exists("tmp/spec_path.yaml")
assert os.path.exists("tmp/env_path.yaml")
assert os.path.exists("tmp/spec_path_" + tenant_id + ".yaml")
assert os.path.exists("tmp/env_path_" + tenant_id + ".yaml")
assert len(os.listdir("tmp")) == 4
shutil.rmtree("tmp")
def test_recover_spec_yaml_without_keeping_modified_files():
file_list = [
"tmp/spec_path.not_used",
"tmp/spec_path.yaml",
"tmp/env_path.not_used",
"tmp/env_path_" + tenant_id + ".yaml",
]
os.makedirs("tmp")
for i in file_list:
with open(i, "w") as file:
pass
pipeline_helper()._recover_spec_yaml([file_list], False)
assert os.path.exists("tmp/spec_path.yaml")
assert os.path.exists("tmp/env_path.yaml")
assert not os.path.exists("tmp/spec_path_" + tenant_id + ".yaml")
assert not os.path.exists("tmp/env_path_" + tenant_id + ".yaml")
assert len(os.listdir("tmp")) == 2
shutil.rmtree("tmp")
def test_update_value_given_flattened_key():
d = {"a": {"b": {"c": 1}}}
pipeline_helper()._update_value_given_flattened_key(d, "a.b.c", 2)
assert d["a"]["b"]["c"] == 2
with pytest.raises(KeyError):
pipeline_helper()._update_value_given_flattened_key(d, "a.b.c.d", 2)
with pytest.raises(KeyError):
pipeline_helper()._update_value_given_flattened_key(d, "a.c.d", 2)
def test_override_spec_yaml():
spec_mapping = pipeline_helper().config.tenant_overrides["mapping"][tenant_id]
yaml_to_be_recovered = pipeline_helper()._override_spec_yaml(spec_mapping)
assert len(yaml_to_be_recovered) == len(
pipeline_helper().module_loader.modules_manifest
)
for pair in yaml_to_be_recovered:
assert len(pair) == 4
for file_path in pair:
if file_path:
assert os.path.exists(file_path)
pipeline_helper()._recover_spec_yaml(yaml_to_be_recovered, False)
def test_override_single_spec_yaml_without_environment_override():
folder_path = os.path.join(
Path(__file__).parent, "sample/steps/multinode_trainer_copy"
)
shutil.copytree(
os.path.join(Path(__file__).parent, "sample/steps/multinode_trainer"),
folder_path,
)
spec_path = os.path.join(folder_path, "module_spec.yaml")
with open(spec_path, "r") as file:
spec = yaml.safe_load(file)
assert (
spec["environment"]["docker"]["image"]
== "polymerprod.azurecr.io/training/pytorch:scpilot-rc2"
)
assert not spec["tags"]["Office"]
spec_mapping = pipeline_helper().config.tenant_overrides["mapping"][tenant_id]
(
old_spec_path,
old_env_file_path,
new_env_file_path,
) = pipeline_helper()._override_single_spec_yaml(spec_path, spec_mapping, False)
assert old_spec_path == os.path.join(folder_path, "module_spec.not_used")
assert not old_env_file_path
assert not new_env_file_path
with open(spec_path, "r") as file:
spec = yaml.safe_load(file)
assert (
spec["environment"]["docker"]["image"]
== "mcr.microsoft.com/azureml/base-gpu:openmpi3.1.2-cuda10.1-cudnn7-ubuntu18.04"
)
assert spec["tags"]["Office"] == "aml-ds"
shutil.rmtree(folder_path)
def test_override_single_spec_yaml_with_environment_override():
folder_path = os.path.join(
Path(__file__).parent, "sample/steps/multinode_trainer_copy"
)
shutil.copytree(
os.path.join(Path(__file__).parent, "sample/steps/multinode_trainer"),
folder_path,
)
spec_path = os.path.join(folder_path, "module_spec.yaml")
spec_mapping = pipeline_helper().config.tenant_overrides["mapping"][tenant_id]
(
old_spec_path,
old_env_file_path,
new_env_file_path,
) = pipeline_helper()._override_single_spec_yaml(spec_path, spec_mapping, True)
with open(old_env_file_path, "r") as file:
lines = file.readlines()
assert (
"--index-url https://o365exchange.pkgs.visualstudio.com/_packaging/PolymerPythonPackages/pypi/simple/"
in " ".join(lines)
)
with open(new_env_file_path, "r") as file:
lines = file.readlines()
assert (
"--index-url https://o365exchange.pkgs.visualstudio.com/_packaging/PolymerPythonPackages/pypi/simple/"
not in " ".join(lines)
)
shutil.rmtree(folder_path)
def test_remove_polymer_pkg_idx_if_exists_and_save_new():
os.makedirs("tmp")
with open("tmp/test.txt", "w") as file:
file.writelines(
[
"- --index-url https://o365exchange.pkgs.visualstudio.com/_packaging/PolymerPythonPackages/pypi/simple/",
"foo",
]
)
(
found_index_url,
new_file,
new_file_path,
old_file_path,
) = pipeline_helper()._remove_polymer_pkg_idx_if_exists_and_save_new(
"tmp",
"test.txt",
"--index-url https://o365exchange.pkgs.visualstudio.com/_packaging/PolymerPythonPackages/pypi/simple/",
)
assert found_index_url
assert new_file == "test_" + tenant_id + ".txt"
assert new_file_path == os.path.join("tmp", new_file)
assert old_file_path == os.path.join("tmp", "test.not_used")
shutil.rmtree("tmp")
os.makedirs("tmp")
with open("tmp/test.txt", "w"):
pass
(
found_index_url,
new_file,
new_file_path,
old_file_path,
) = pipeline_helper()._remove_polymer_pkg_idx_if_exists_and_save_new(
"tmp",
"test.txt",
"--index-url https://o365exchange.pkgs.visualstudio.com/_packaging/PolymerPythonPackages/pypi/simple/",
)
assert not found_index_url
assert not new_file
assert not new_file_path
assert not old_file_path
shutil.rmtree("tmp")
| 2.140625 | 2 |
dags/config/settings.py | geosolutions-it/getit-airflow | 1 | 12765652 | import os
from datetime import datetime, timedelta
#
# Airflow root directory
#
PROJECT_ROOT = os.path.dirname(
os.path.dirname(
os.path.dirname(__file__)
)
)
#
# Dates
#
# yesterday at beginning of day
yesterday_start = datetime.now() - timedelta(days=1)
yesterday_start = yesterday_start.replace(hour=0, minute=0, second=0, microsecond=0)
yesterday_start = yesterday_start.isoformat() + 'Z'
# yesterday at end of day
yesterday_end = datetime.now() - timedelta(days=1)
yesterday_end = yesterday_end.replace(hour=23, minute=59, second=59, microsecond=999999)
yesterday_end = yesterday_end.isoformat() + 'Z'
| 2.546875 | 3 |
src/network.py | KratosOmega/NAF-tensorflow | 0 | 12765653 | <filename>src/network.py
from logging import getLogger
logger = getLogger(__name__)
import tensorflow as tf
from tensorflow.contrib.framework import get_variables
from .ops import *
class Network:
def __init__(self, sess, input_shape, action_size, hidden_dims,
use_batch_norm, use_seperate_networks,
hidden_w, action_w, hidden_fn, action_fn, w_reg,
scope='NAF'):
self.sess = sess
with tf.compat.v1.variable_scope(scope):
x = tf.compat.v1.placeholder(tf.float32, (None,) + tuple(input_shape), name='observations')
u = tf.compat.v1.placeholder(tf.float32, (None, action_size), name='actions')
is_train = tf.compat.v1.placeholder(tf.bool, name='is_train')
hid_outs = {}
with tf.name_scope('hidden'):
if use_seperate_networks:
logger.info("Creating seperate networks for v, l, and mu")
for scope in ['v', 'l', 'mu']:
with tf.compat.v1.variable_scope(scope):
if use_batch_norm:
h = batch_norm(x, is_training=is_train)
else:
h = x
for idx, hidden_dim in enumerate(hidden_dims):
h = fc(h, hidden_dim, is_train, hidden_w, weight_reg=w_reg,
activation_fn=hidden_fn, use_batch_norm=use_batch_norm, scope='hid%d' % idx)
hid_outs[scope] = h
else:
logger.info("Creating shared networks for v, l, and mu")
if use_batch_norm:
h = batch_norm(x, is_training=is_train)
else:
h = x
for idx, hidden_dim in enumerate(hidden_dims):
h = fc(h, hidden_dim, is_train, hidden_w, weight_reg=w_reg,
activation_fn=hidden_fn, use_batch_norm=use_batch_norm, scope='hid%d' % idx)
hid_outs['v'], hid_outs['l'], hid_outs['mu'] = h, h, h
with tf.name_scope('value'):
V = fc(hid_outs['v'], 1, is_train,
hidden_w, use_batch_norm=use_batch_norm, scope='V')
with tf.name_scope('advantage'):
l = fc(hid_outs['l'], int((action_size * (action_size + 1))/2), is_train, hidden_w,
use_batch_norm=use_batch_norm, scope='l')
mu = fc(hid_outs['mu'], action_size, is_train, action_w,
activation_fn=action_fn, use_batch_norm=use_batch_norm, scope='mu')
pivot = 0
rows = []
for idx in range(action_size):
count = action_size - idx
diag_elem = tf.exp(tf.slice(l, (0, pivot), (-1, 1)))
non_diag_elems = tf.slice(l, (0, pivot+1), (-1, count-1))
row = tf.pad(tf.concat((diag_elem, non_diag_elems), 1), ((0, 0), (idx, 0)))
rows.append(row)
pivot += count
L = tf.transpose(tf.stack(rows, axis=1), (0, 2, 1))
P = tf.matmul(L, tf.transpose(L, (0, 2, 1)))
tmp = tf.expand_dims(u - mu, -1)
A = -tf.matmul(tf.transpose(tmp, [0, 2, 1]), tf.matmul(P, tmp))/2
A = tf.reshape(A, [-1, 1])
with tf.name_scope('Q'):
Q = A + V
with tf.name_scope('optimization'):
self.target_y = tf.compat.v1.placeholder(tf.float32, [None], name='target_y')
self.loss = tf.reduce_mean(tf.compat.v1.squared_difference(self.target_y, tf.squeeze(Q)), name='loss')
self.is_train = is_train
self.variables = get_variables(scope)
self.x, self.u, self.mu, self.V, self.Q, self.P, self.A = x, u, mu, V, Q, P, A
def predict_v(self, x, u):
return self.sess.run(self.V, {
self.x: x, self.u: u, self.is_train: False,
})
def predict(self, state):
return self.sess.run(self.mu, {
self.x: state, self.is_train: False
})
def update(self, optim, target_v, x_t, u_t):
_, q, v, a, l = self.sess.run([
optim, self.Q, self.V, self.A, self.loss
], {
self.target_y: target_v,
self.x: x_t,
self.u: u_t,
self.is_train: True,
})
return q, v, a, l
def make_soft_update_from(self, network, tau):
logger.info("Creating ops for soft target update...")
assert len(network.variables) == len(self.variables), \
"target and prediction network should have same # of variables"
self.assign_op = {}
for from_, to_ in zip(network.variables, self.variables):
if 'BatchNorm' in to_.name:
self.assign_op[to_.name] = to_.assign(from_)
else:
self.assign_op[to_.name] = to_.assign(tau * from_ + (1-tau) * to_)
def hard_copy_from(self, network):
logger.info("Creating ops for hard target update...")
assert len(network.variables) == len(self.variables), \
"target and prediction network should have same # of variables"
for from_, to_ in zip(network.variables, self.variables):
self.sess.run(to_.assign(from_))
def soft_update_from(self, network):
for variable in self.variables:
self.sess.run(self.assign_op[variable.name])
return True
| 2.46875 | 2 |
simulation.py | Artisan-Lab/SMTimer | 5 | 12765654 | import time
import argparse
import numpy as np
import json
import os
import sys
# from matplotlib import pyplot
from torch.utils.data import DataLoader
from preprocessing import Constants
from util import construct_data_from_json
from dgl_treelstm.KNN import KNN
from dgl_treelstm.nn_models import *
from dgl_treelstm.metric import *
from preprocessing import Vocab
from preprocessing import varTree
from dgl_treelstm.dgl_dataset import dgl_dataset
from check_time import process_data
from train import pad_feature_batcher, batcher
from preprocessing.Vector_Dataset import Vector_Dataset
from preprocessing.Tree_Dataset import Tree_Dataset
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, plot_roc_curve, roc_curve, precision_recall_curve
import warnings
from dataset_filename_seperation import get_dataset_seperation
warnings.filterwarnings('ignore')
# simulation for different models
class Simulation:
def __init__(self, model, time_selection="adjust", threshold=200):
self.model = model
# this threshold can be adaptive, which is updated in the simulation to find a better border for timeout
self.threshold = threshold
# save data for adaptive threshold
self.time_record = {"timeout":[], "solvable":[]}
# this is the actual time setting, which is fixed for comparision the ground truth
self.time_out_setting = 200
self.time_selection = time_selection
if isinstance(self.model, TreeLSTM):
self.model_type = "TreeLSTM"
self.preprocess = Tree_Dataset.generate_feature_dataset
elif isinstance(self.model, KNN):
self.model_type = "KNN"
self.preprocess = Vector_Dataset.generate_feature_dataset
elif isinstance(self.model, LSTM):
self.model_type = "LSTM"
self.preprocess = Vector_Dataset.generate_feature_dataset
def load_model(self, input):
raise NotImplementedError
if self.model_type == "KNN":
dataset = th.load("data/gnucore/fv2/gnucore_train")
x = [i.feature for i in dataset]
y = [1 if i.gettime("adjust") > 300 else 0 for i in dataset]
self.model.fit(x, y)
elif self.model_type == "LSTM":
model = th.load("checkpoints/gnucore/pad_feature_l_z.pkl")["model"]
self.model.load_state_dict(model)
elif self.model_type == "TreeLSTM":
model = th.load("checkpoints/g_tree_feature_t_z_r_200.pkl")["model"]
self.model.load_state_dict(model)
def script_to_feature(self, data):
raise NotImplementedError
# feature = self.preprocess(script)
if isinstance(data, varTree):
dataloader = dgl_dataset([data], None)
iterator = iter(dataloader)
data = next(iterator)
feature = data.logic_tree
solve_time = data.gettime("original")
elif self.model_type == "LSTM":
dataloader = DataLoader(dataset=[data], batch_size=1, collate_fn=pad_feature_batcher('cpu', 'original'),
shuffle=False, num_workers=0)
iterator = iter(dataloader)
data = next(iterator)
feature = rnn_utils.pack_padded_sequence(data.feature, data.data_len, enforce_sorted=False,
batch_first=True)
solve_time = data.label
else:
feature, solve_time = data.logic_tree, data.gettime("original")
return feature, solve_time
def predict(self, feature, truth):
raise NotImplementedError
if self.model_type == "KNN":
predict_result = self.model.incremental_predict(feature, truth)
skip = predict_result
elif self.model_type == "LSTM":
self.model.eval()
with th.no_grad():
predict_result = self.model(feature)
skip = predict_result > self.threshold
else:
self.model.eval()
with th.no_grad():
h = th.zeros((1, 150))
c = th.zeros((1, 150))
predict_result = self.model(feature, h, c)
skip = predict_result > self.threshold
return predict_result, skip
def modify_threshold(self, result, truth):
if self.model_type == "KNN":
return
if result < self.threshold and truth > self.time_out_setting:
self.time_record["timeout"].append(result)
elif result < self.threshold and truth < self.time_out_setting:
self.time_record["solvable"].append(result)
if result < self.threshold and truth > self.time_out_setting:
self.dynamic_threshold()
print("decrease threshold to ", str(self.threshold))
return
def dynamic_threshold(self):
timeout_list = np.array(self.time_record["timeout"])
solvable_list = self.time_record["solvable"]
try:
solvable_limit = max(np.percentile(solvable_list, 95), np.mean(solvable_list), 60)
suitable_timeout = list(filter(lambda x: x > solvable_limit, timeout_list))
if len(suitable_timeout) == 0:
suitable_timeout = [solvable_limit]
suitable_min_timeout = min(suitable_timeout)
suitable_min_timeout = min(suitable_min_timeout, self.threshold)
if isinstance(suitable_min_timeout, th.Tensor):
suitable_min_timeout = suitable_min_timeout.item()
max_solvable = max(filter(lambda x:x <= suitable_min_timeout, self.time_record["solvable"]))
if isinstance(max_solvable, th.Tensor):
max_solvable = max_solvable.item()
self.threshold = max(suitable_min_timeout - 1, (suitable_min_timeout + max_solvable) / 2,
self.threshold - 50, 60)
except (IndexError,ValueError):
pass
class KNN_Simulation(Simulation):
def __init__(self, model, time_selection="adjust", threshold=200):
super(KNN_Simulation, self).__init__(model, time_selection, threshold)
self.model_type = "KNN"
self.preprocess = Vector_Dataset.generate_feature_dataset
self.separate_test = False
def load_model(self, input):
# dataset = th.load(input)
dataset = construct_data_from_json(input)
# test_filename = ["echo", "ginstall", "expr", "tail", "seq", "split", "test", "yes", "chgrp", "date", "expand",
# "head", "nohup", "printf", "sha1sum", "stat", "timeout", "uniq", "nice", "pr"]
# test_filename = ["expand"]
# dataset = list(filter(lambda x:x.filename not in test_filename, dataset))
x = [i.feature for i in dataset]
if "smt-comp" in input:
fn = [x.filename.split("_")[0] for x in dataset]
else:
fn = [i.filename for i in dataset]
y = [1 if i.gettime(self.time_selection) > self.time_out_setting else 0 for i in dataset]
self.model.fit(x, y)
self.model.filename = np.array(fn)
def script_to_feature(self, data):
if not self.separate_test:
if ".smt2" in data.filename:
fn = data.filename.split("_")[0]
else:
fn = data.filename
self.model.remove_test(fn)
self.separate_test = True
feature, solve_time = data.feature, data.gettime(self.time_selection)
return feature, solve_time
def predict(self, feature, truth):
predict_result = self.model.incremental_predict(feature, truth)
skip = predict_result
return predict_result, skip
class LSTM_Simulation(Simulation):
def __init__(self, model, time_selection="adjust", threshold=200):
super(LSTM_Simulation, self).__init__(model, time_selection, threshold)
self.model_type = "LSTM"
self.preprocess = Vector_Dataset.generate_feature_dataset
def load_model(self, input):
model = th.load(input, map_location='cpu')["model"]
self.model.load_state_dict(model)
def script_to_feature(self, data):
dataloader = DataLoader(dataset=[data], batch_size=1, collate_fn=pad_feature_batcher('cpu', self.time_selection),
shuffle=False, num_workers=0)
iterator = iter(dataloader)
data = next(iterator)
feature = rnn_utils.pack_padded_sequence(data.feature, data.data_len, enforce_sorted=False,
batch_first=True)
solve_time = data.label.item()
return feature, solve_time
def predict(self, feature, truth):
self.model.eval()
with th.no_grad():
predict_result = self.model(feature)
skip = predict_result > self.threshold
return predict_result, skip
class TreeLSTM_Simulation(Simulation):
def __init__(self, model, time_selection="adjust", threshold=200):
super(TreeLSTM_Simulation, self).__init__(model, time_selection, threshold)
self.model_type = "TreeLSTM"
self.preprocess = Tree_Dataset.generate_feature_dataset
def load_model(self, input):
model = th.load(input, map_location='cpu')["model"]
# model = th.load("checkpoints/g_tree+feature_t_z_r_200.pkl")["model"]
self.model.load_state_dict(model)
def script_to_feature(self, data):
smt_vocab_file = './data/gnucore/smt.vocab'
smt_vocab = Vocab(filename=smt_vocab_file,
data=[Constants.UNK_WORD])
data = dgl_dataset([data], None, vocab=smt_vocab, time_selection=self.time_selection, time_threshold=self.threshold)
dataloader = DataLoader(dataset=data, batch_size=1, collate_fn=batcher("cpu"),
shuffle=False, num_workers=0)
iterator = iter(dataloader)
data = next(iterator)
feature = data.graph
solve_time = data.label[0].item()
return data, solve_time
def predict(self, feature, truth):
self.model.eval()
n = feature.graph.number_of_nodes()
with th.no_grad():
h = th.zeros((n, 150))
c = th.zeros((n, 150))
predict_result = self.model(feature, h, c)
skip = predict_result[0] > self.threshold
return predict_result[0], skip
# result saving structure
class Evalution:
def __init__(self, pred=np.array([]), truth=np.array([]), time_out_setting=200):
self.pred = self.get_numpy(pred)
self.truth = self.get_numpy(truth)
self.classify_result = np.array([])
self.time_out_setting = time_out_setting
def get_numpy(self, data):
if isinstance(data, th.Tensor):
data = data.cpu().numpy()
else:
data = data
return data
def add(self, pred, truth, classify_result):
self.pred = np.append(self.pred, self.get_numpy(pred))
self.truth = np.append(self.truth, self.get_numpy(truth))
self.classify_result = np.append(self.classify_result, self.get_numpy(classify_result))
def score(self):
truth = [1 if x > self.time_out_setting else 0 for x in self.truth]
acc = accuracy_score(truth, self.classify_result)
pre = precision_score(truth, self.classify_result)
rec = recall_score(truth, self.classify_result)
f1 = f1_score(truth, self.classify_result)
return acc, pre, rec, f1
# time calculation
class Time_Section:
def __init__(self):
self.original_time = 0
self.predict_time = 0
# overall time for simulation comparision(without solving phase 1 which manually added)
self.final_time = 0
self.preprocessing_time = 0
def update(self, predict_result, solve_time):
self.original_time += solve_time
# for the first solving phase t1=1s
self.final_time += 1
# skip if predicted timeout
if not predict_result:
self.final_time += solve_time
def add_prediction_time(self, predict_used_time, preprocessing_time):
self.preprocessing_time = preprocessing_time
self.predict_time = predict_used_time
self.final_time = self.final_time + predict_used_time + preprocessing_time
# load the test data, script to feature just like the training, we do not saving the result because the program number
# we also want to include the processing time into final time
def load_data(model, input):
dataset = None
if model == "Tree-LSTM":
dataset = Tree_Dataset(treeforassert=True, feature_number_limit=100)
elif model == "lstm":
dataset = Vector_Dataset(feature_number_limit=50)
elif model == "KNN":
dataset = Vector_Dataset(feature_number_limit=2)
else:
dataset = Tree_Dataset(feature_number_limit=100)
if "smt-comp" in input:
test_filename = input.split("/")[-1]
input = "/".join(input.split("/")[:-1])
dataset.fs_list = dataset.generate_feature_dataset(input, fileprefix=test_filename)
if len(dataset.fs_list) == 0:
print("smt-comp file are not separated with filename, but please use the similar structure, more information in simulation_smt-comp.md")
# test_filename1 = [x.filename for x in dataset.fs_list]
# test_file = list(filter(lambda x:x.split("_")[0] == test_filename, test_filename1))
# dataset.fs_list = dataset.split_with_filename(test_file)[1]
input = input + "/" + test_filename
else:
if "klee" in input:
# the klee processing is time-consuming because of the SMT scripts structure, so we saved the result for next time
# for other dataset we extract feature every time it simulates.
data_input = "data/klee/" + input.split("/")[-1] + model_name
try:
if model == "KNN":
dataset = construct_data_from_json(data_input)
else:
dataset = th.load(data_input)
except (TypeError,FileNotFoundError):
dataset.generate_feature_dataset(input)
if model != "KNN":
th.save(dataset, data_input)
else:
dataset.generate_feature_dataset(input)
return dataset.fs_list, input
# mainly for cross dataset prediction for adaptive KNN model, rely on my model naming pattern
def identify_dataset(input, dataset):
for i in ["busybox", "smt-comp", "klee"]:
if i in input:
return i
if "g_" in input or "gnucore/" in input:
return "gnucore"
if "b_" in input:
return "busybox"
if "s_" in input:
return "smt-comp"
if "k_" in input:
return "klee"
return "gnucore"
# our baseline result, not usable without result from PCC
def make_PCC_output(input, output_result):
if os.path.exists(input):
with open(input, "r") as f:
data = json.load(f)
serial_result = sorted(data["result"], key=lambda x:(len(x[0]), x[0]))
else:
serial_result = []
for i in range(1,4):
with open(input[:-5] + "_" + str(i) + ".json", "r") as f:
data = json.load(f)
serial_result.extend(sorted(data["result"], key=lambda x: (len(x[0]), x[0])))
od = serial_result
for i in ["arch", "chgrp", "csplit", "dirname", "fmt", "id", "md5sum", "mv", "pinky", "readlink", "seq",
"sleep", "tac", "tsort", "uptime", "base64", "chmod", "cut", "du", "fold", "join", "mkdir",
"nice", "pr", "rm", "setuidgid", "sort", "tail", "tty", "users", "basename", "chroot", "date", "expand", "ginstall",
"link", "mkfifo", "nl", "printenv", "rmdir", "sha1sum", "split", "test", "uname", "vdir",
"cat", "comm", "df", "expr", "head", "ln", "mknod", "od", "printf", "runcon", "shred", "stat", "touch", "unexpand", "wc",
"chcon", "cp", "dir", "factor", "hostname", "ls", "mktemp", "pathchk", "ptx", "shuf", "su",
"tr", "unlink", "who", "ifconfig", "rpm", "Sage2", "klogd", "mcm", "lfsr"]:
serial_result = list(filter(lambda x: x[0].startswith(i), od))
if len(serial_result) == 0:
continue
print(i)
truth = [x[2] for x in serial_result]
if isinstance(truth[0], list):
truth = list(map(lambda x:0 if x[0] else 300, truth))
pred = [x[1] for x in serial_result]
dt_simulation = Simulation(None)
dt_simulation.model_type = "DNN"
if isinstance(pred[0], int):
classify_result = pred
else:
threshold_list = []
for i in range(len(truth)):
dt_simulation.modify_threshold(pred[i], truth[i])
threshold_list.append(dt_simulation.threshold)
classify_result = [1.0 if pred[i] > threshold_list[i] else 0.0 for i in range(len(pred))]
# classify_result = [1.0 if x > data["time_limit_setting"] else 0.0 for x in pred]
original_time = sum(truth)
pred_truth_tuple = list(
zip(range(len(pred)), pred, truth, classify_result))
pred_truth_diff_tuple = list(filter(lambda a: a[3] != (a[2] > data["time_limit_setting"]), pred_truth_tuple))
pred_truth_tuple = list(filter(lambda a: a[3] != 0, pred_truth_tuple))
final_time = original_time - sum([x[2] for x in pred_truth_tuple])
truth = [1 if x > data["time_limit_setting"] else 0 for x in truth]
acc = accuracy_score(truth, classify_result)
pre = precision_score(truth, classify_result)
rec = recall_score(truth, classify_result)
f1 = f1_score(truth, classify_result)
print_output = {"train_dataset": "gnucore", "test_dataset": "gnucore", "pred_truth_diff_tuple": pred_truth_diff_tuple,
"original_time": original_time,
"total_time": final_time, "input": input, "pos_num":sum(truth), "tp": sum(truth)*rec,
"fn": sum(truth)*(1 - rec), "fp": sum(truth)*rec/pre - sum(truth)*rec}
print(print_output)
output = {"train_dataset": "gnucore", "test_dataset": "gnucore", "predicted_result": pred,
"acutal_solving_time": truth, "original_time": original_time, "total_time": final_time,
"metrics": {"acc": acc, "pre": pre, "rec": rec, "f1": f1, "pos_num":sum(truth), "tp": sum(truth)*rec,
"fn": sum(truth)*(1 - rec), "fp": sum(truth)*rec/pre - sum(truth)*rec},
"time_out_setting": data["time_limit_setting"],
"model": "PCC", "input": input}
output = json.dumps(output, indent=4)
# print(print_output)
print('test accuracy: {:.3}, precision: {:.3}, recall: {:.3}, f1 score: {:.3}'.format(acc, pre, rec, f1))
# fpr, tpr, thresholds = roc_curve(truth, pred)
# pyplot.plot(fpr, tpr, lw=1, label="lstm")
# # print(fpr, tpr, thresholds)
# pyplot.xlim([0.00, 1.0])
# pyplot.ylim([0.00, 1.0])
# pyplot.xlabel("False Positive Rate")
# pyplot.ylabel("True Positive Rate")
# pyplot.title("ROC")
# pyplot.legend(loc="lower right")
# pyplot.savefig(r"./ROC.png")
# pyplot.show()
if output_result:
try:
outpur_path = "_".join(["gnucore", input.split("/")[-1], "DNN"]) + ".json"
with open("simulation_result/" + outpur_path, "w")as f:
f.write(output)
except:
with open("simulation_result/output.json", "w")as f:
f.write(output)
# output the result for a single program
# to do: not support for adaptive threshold for regression simulation
def make_output(dsn1, dsn2, input, simulation, result, time_section, output_result=True, plot_picture=True):
pred_truth_tuple = list(zip(range(len(result.pred)), result.pred.tolist(), result.truth.tolist(), result.classify_result))
pred_truth_tuple = list(filter(lambda a:a[3] != (a[2] > simulation.time_out_setting), pred_truth_tuple))
truth = [1 if x > simulation.time_out_setting else 0 for x in result.truth]
acc = accuracy_score(truth, result.classify_result)
pre = precision_score(truth, result.classify_result)
rec = recall_score(truth, result.classify_result)
f1 = f1_score(truth, result.classify_result)
confusion_matrix = np.zeros((2, 2))
for t, p in zip(truth, result.classify_result):
confusion_matrix[t][int(p)] += 1
# print_output = {"train_dataset": dsn1, "test_dataset": dsn2, "pred_truth_diff_tuple": pred_truth_tuple,
# "original_time": time_section.original_time,
# "predict_time":time_section.predict_time + time_section.preprocessing_time,
# "total_time": time_section.final_time, "input":input, "pos_num":sum(truth), "tp": sum(truth)*rec,
# "fn": sum(truth)*(1 - rec), "fp": sum(truth)*rec/pre - sum(truth)*rec}
print_output = {"timeout_query_num":sum(truth), "true-positive number": confusion_matrix[1][1],
"false-negative number": confusion_matrix[1][0], "false-positive number": confusion_matrix[0][1]}
output = {"train_dataset": dsn1, "test_dataset": dsn2, "predicted_result": result.pred.tolist(),
"acutal_solving_time": result.truth.tolist(), "original_time": time_section.original_time, "predict_time":
time_section.predict_time + time_section.preprocessing_time, "total_time": time_section.final_time,
"metrics":{"acc": acc, "pre": pre, "rec": rec, "f1": f1}, "time_out_setting": simulation.time_out_setting,
"model":simulation.model_type, "input":input, "pos_num":sum(truth), "tp": confusion_matrix[1][1],
"fn": confusion_matrix[1][0], "fp": confusion_matrix[0][1]}
if not len(result.truth):
return
output = json.dumps(output, indent=4)
print("train dataset:" + dsn1)
# print("test dataset:" + dsn2)
print("test program:" + input)
print("prediction truth difference tuple(index, predicted result, truth, classification result):")
print(pred_truth_tuple)
print("original solving time:" + str(int(time_section.original_time)) + "s")
print("prediction time:" + str(int(time_section.predict_time + time_section.preprocessing_time)) + "s")
print("solving time with the predictor:" + str(int(time_section.final_time)) + "s")
print(print_output)
print('test accuracy: {:.3}, precision: {:.3}, recall: {:.3}, f1 score: {:.3}'.format(acc, pre, rec, f1))
# if simulation.model_type != 'KNN':
# fpr, tpr, thresholds = roc_curve(result.truth > simulation.time_out_setting, result.pred)
# pyplot.plot(fpr, tpr, lw=1, label=simulation.model_type)
# # print(fpr, tpr, thresholds)
# pyplot.xlim([0.00, 1.0])
# pyplot.ylim([0.00, 1.0])
# pyplot.xlabel("False Positive Rate")
# pyplot.ylabel("True Positive Rate")
# pyplot.title("ROC")
# pyplot.legend(loc="lower right")
# pyplot.savefig(r"./ROC.png")
# pyplot.show()
if output_result:
try:
if args.model_name == "KNN":
identify = ""
elif args.classification:
identify = "_c"
elif args.adapt:
identify = "_m"
else:
identify = "_r"
outpur_path = "_".join([dsn1, input.split("/")[-1], simulation.model_type]) + identify + ".json"
with open("simulation_result/" + outpur_path, "w")as f:
f.write(output)
except:
with open("simulation_result/output.json", "w")as f:
f.write(output)
# automatic partition selection since we use cross validation to generate three piece of result for a model
# used for the hardcoded switch
def choose_input(dataset, input, load_path):
fn = get_dataset_seperation(dataset)
f1, f2, f3 = fn[0], fn[1], fn[2]
input = input.split("/")[-1]
if dataset == "smt-comp":
input = input.split("_")[0]
if os.path.exists(load_path):
return load_path
if input in f1:
load_path = ".".join([load_path.split(".")[0] + "_0", load_path.split(".")[1]])
elif input in f2:
load_path = ".".join([load_path.split(".")[0] + "_1", load_path.split(".")[1]])
elif input in f3:
load_path = ".".join([load_path.split(".")[0] + "_2", load_path.split(".")[1]])
else:
load_path = ""
return load_path
# simulate the solving in real order, in the simulation, the predicted timeout solving would be skipped,
# the time different is taken as the time saved.
# the simulation may not reflect the real situation since wrongly skip path means the change of path selection, but if
# you give it a low priority, then these paths are just deferred, you may execute more paths in the same time budget.
def simulation_for_single_program(test_directory, args):
s = time.time()
input_index = args.input_index
load_path = args.load_file
# some setting process since all simulation use one entry
if not args.regression:
regression = False
else:
input_list[int(input_index)] = input_list[int(input_index)].replace("_r_", "_c_")
regression = True
if model_name == "KNN":
knn = KNN()
simulation = KNN_Simulation(knn, time_selection=args.time_selection)
if not input_index:
input_index = 8
elif model_name == "lstm":
lstm = LSTM(150, regression, False)
simulation = LSTM_Simulation(lstm, time_selection=args.time_selection)
if not input_index:
input_index = 0
else:
tree_lstm = TreeLSTM(133, 150, 150, 1, 0.5, regression, False, cell_type='childsum', pretrained_emb=None)
simulation = TreeLSTM_Simulation(tree_lstm, time_selection=args.time_selection)
if not input_index:
input_index = 2
# setting timeout threshold
# for original time, we collect the data with timeout with 100s, larger than it would be useless
simulation.time_out_setting = args.threshold
if test_directory == None:
test_directory = input_list[int(input_index)]
serial_data, test_input = load_data(model_name, test_directory)
time_section = Time_Section()
result = Evalution(time_out_setting=args.threshold)
# for cross project, identify dataset name
dsn1 = identify_dataset(input_list[int(input_index)], None)
dsn2 = identify_dataset(test_input, serial_data)
# load the model for different approach
if load_path == None:
load_path = input_list[int(input_index)]
if model_name != "KNN":
load_path = choose_input(dsn1, test_input, load_path)
simulation.load_model(load_path)
s1 = time.time()
aindex = 0
# simulation system, but not actual solving since the solving time is consuming, and time may be different
for data in serial_data:
data_index = len(result.truth)
feature, solve_time = simulation.script_to_feature(data)
predict_result, skip = simulation.predict(feature, 1 if solve_time > simulation.time_out_setting else 0)
if len(result.pred) % 500 == 0:
print(len(result.pred))
if model_name != "KNN" and regression and args.adapt:
pass
simulation.modify_threshold(predict_result, solve_time)
if model_name != "KNN" and not regression:
pred = th.argmax(F.log_softmax(predict_result), 1)
skip = pred == 1
predict_result = 1 if skip else 0
time_section.update(skip, solve_time)
result.add(predict_result, solve_time, skip)
aindex += 1
e = time.time()
time_section.add_prediction_time(e - s1, s1 - s)
make_output(dsn1, dsn2, test_directory, simulation, result, time_section, True, True)
def parse_arg():
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', default="KNN", help="model type, allow 'lstm', 'tree-lstm', 'KNN'")
parser.add_argument('--test_directory', default=None, help="the script saving directory for test program")
parser.add_argument('--load_file', default=None, help="the path to model for evaluation")
parser.add_argument('--input_index', type=int, default=8, help="short-way for switch evaluation model,"
"hardcoded, not recommanded to change for use")
parser.add_argument('--time_selection', default='original', help="the time label you want to use, allow "
"'original', 'adjust', the 'adjust' stand for 'z3' by now, modify when you experiment with other solver")
parser.add_argument('--regression', action='store_true', help="used for time prediction(regression),"
"not use for timeout constraint classification(classification)")
parser.add_argument('--adapt', action='store_true', help="an adaptive time threshold for neural network "
"models used for regression, because the predicted timeout threshold varies for different programs")
parser.add_argument('--threshold', type=int, default=200, help="the timeout threshold for solving")
parser.add_argument('--batch-size', type=int, default=64, help="some lstm setting in case you change the model")
parser.add_argument('--x-size', type=int, default=300)
parser.add_argument('--h-size', type=int, default=150)
parser.add_argument('--epochs', type=int, default=40)
parser.add_argument('--num_classes', type=float, default=2)
args = parser.parse_args()
print()
print("Simulation start:")
print(args)
return args
if __name__ == '__main__':
args = parse_arg()
model_name = args.model_name
input_index = args.input_index
# hardcoded short-way for switch evaluation model
input_list = ["checkpoints/simulation/g_serial_pad_feature_l_z_r_200.pkl",#0
"checkpoints/simulation/g_serial_tree_feature_t_z_r_200.pkl",#1
"checkpoints/simulation/g_tree+feature_t_z_r_200.pkl",#2
"checkpoints/simulation/b_serial_pad_feature_l_z_r_200.pkl",#3
"checkpoints/simulation/b_serial_tree_feature_t_z_r_200.pkl",#4
"checkpoints/simulation/b_tree+feature_t_z_r_200.pkl",#5
"checkpoints/simulation/s_serial_pad_feature_l_z_r_200.pkl",#6
"checkpoints/simulation/s_tree_feature_t_z_r_200.pkl",#7
"data/gnucore/fv2_serial/train",#8
"data/busybox/fv2_serial/train",#9
"data/smt-comp/fv2_serial/train",#10
"data/klee/fv2_serial/train",#11
"checkpoints/simulation/k_serial_pad_feature_l_z_r_200.pkl",#12
"checkpoints/simulation/k_serial_tree_feature_l_z_r_200.pkl"]#13
if args.load_file == None and (args.input_index > 13 or args.input_index < 0):
print("these paths are hardcoded shortway for specific directory name")
print(input_list)
exit(0)
# test for all programs in a dataset, the home directory is "data/gnucore/single_test"
# test_input_list = []
# for root, dir, files in os.walk("data/gnucore/single_test"):
# if not root.endswith("single_test"):
# test_input_list.append(root)
# for i in test_input_list:
# input = i
# simulation_for_single_program(test_directory, input_index)
if args.test_directory:
test_directory = args.test_directory
else:
test_directory = "data/example/arch"
# some test
# test_directory = "data/smt-comp/QF_BV/Sage"
# test_directory = "data/klee/arch-43200/solver-queries.smt2"
simulation_for_single_program(test_directory, args)
# make_PCC_output("data/PCC_result/mcm_c.json", False)
# regression simulation, not remember much, different time threshold
# input = "checkpoints/smt-comp/serial_pad_feature_evaluation_c.pkl"
# if os.path.exists(input):
# serial_result = th.load(input)["result"]
# else:
# serial_result = []
# for i in range(1, 4):
# a = th.load(input[:-4] + "_" + str(i) + ".pkl")["result"]
# serial_result.extend(a)
# result = serial_result
# pred = np.array(list(map(lambda x:x[0], result)))
# truth = np.array(list(map(lambda x:x[1], result)))
# for a in [40,50,60,100,150,200,250]:
# if truth.dtype == "int64":
# t, p = truth, pred
# else:
# t, p = truth > a, pred > a
# print("threshold", a)
# acc = accuracy_score(t, p)
# pre = precision_score(t, p)
# rec = recall_score(t, p)
# f1 = f1_score(t, p)
# print('test accuracy: {:.3}, precision: {:.3}, recall: {:.3}, f1 score: {:.3}'.format(acc, pre, rec, f1))
# if truth.dtype == "int64":
# break
# try:
# fpr, tpr, thresholds = precision_recall_curve(truth > a, pred)
# pyplot.plot(tpr, fpr, lw=1, label="lstm")
# # print(fpr)
# # print(tpr)
# # print(thresholds)
# i = np.searchsorted(thresholds, a)
# print(fpr[i], tpr[i], thresholds[i])
# pyplot.xlim([0.00, 1.0])
# pyplot.ylim([0.00, 1.0])
# pyplot.xlabel("False Positive Rate")
# pyplot.ylabel("True Positive Rate")
# pyplot.title("ROC")
# pyplot.legend(loc="lower right")
# pyplot.savefig(r"./ROC.png")
# pyplot.show()
# except (IndexError, ValueError):
# pass | 2.375 | 2 |
setup.py | adonskoi/integrity | 0 | 12765655 | from setuptools import setup
setup(
name="integrity",
version="0.1.0",
author="<NAME>",
author_email="<EMAIL>",
packages=["integrity"],
entry_points={"console_scripts": ["integrity = integrity.__main__:main"]},
)
| 1.132813 | 1 |
.src/PyPtt/_api_get_bottom_post_list.py | PttCodingMan/ptt_post_too_many_monitor | 0 | 12765656 | from SingleLog.log import Logger
from . import data_type
from . import i18n
from . import connect_core
from . import screens
from . import exceptions
from . import command
from . import _api_util
def get_bottom_post_list(api, board):
api._goto_board(board, end=True)
logger = Logger('get_bottom_post_list', Logger.INFO)
last_screen = api.connect_core.get_screen_queue()[-1]
bottom_screen = [line for line in last_screen.split('\n') if '★' in line[:8]]
bottom_length = len(bottom_screen)
# bottom_screen = '\n'.join(bottom_screen)
# print(bottom_screen)
if bottom_length == 0:
logger.info(i18n.catch_bottom_post_success)
return list()
cmd_list = list()
cmd_list.append(command.query_post)
cmd = ''.join(cmd_list)
target_list = [
connect_core.TargetUnit(
i18n.catch_post_success,
screens.Target.QueryPost,
break_detect=True,
refresh=False,
log_level=Logger.DEBUG),
connect_core.TargetUnit(
i18n.post_deleted,
screens.Target.InBoard,
break_detect=True,
log_level=Logger.DEBUG),
connect_core.TargetUnit(
i18n.no_such_board,
screens.Target.MainMenu_Exiting,
exceptions_=exceptions.NoSuchBoard(api.config, board)
),
]
result = list()
for _ in range(0, bottom_length):
api.connect_core.send(cmd, target_list)
last_screen = api.connect_core.get_screen_queue()[-1]
lock_post, post_author, post_title, post_aid, post_web, post_money, list_date, push_number, post_index = \
_api_util.parse_query_post(
api,
last_screen)
current_post = api.get_post(board, aid=post_aid, query=True)
# print(current_post.aid)
# print(current_post.title)
# print('==========================')
result.append(current_post)
cmd_list = list()
cmd_list.append(command.enter)
cmd_list.append(command.up)
cmd_list.append(command.query_post)
cmd = ''.join(cmd_list)
logger.info(i18n.catch_bottom_post_success)
return list(reversed(result))
| 2.25 | 2 |
test/core/test_shellcode.py | LuDubies/flopz | 7 | 12765657 | <gh_stars>1-10
from flopz.core.shellcode import Shellcode
from flopz.arch.ppc.vle.e200z0 import E200Z0
from flopz.arch.ppc.vle.instructions import *
def test_basic_shellcode():
# it should be an AddressableObject
s = Shellcode()
assert(s.object_addr == 0)
# it should keep a list of instructions
assert(len(s.instructions) == 0)
assert(s.get_instructions() == s.instructions)
# it provides a helper method for assembling its instructions
assert(s.bytes() == b'')
# it provides a method to iterate over all instructions
arch = E200Z0()
s = Shellcode(instructions=[
SeAdd(arch.r0, arch.r1),
SeB(0),
])
assert(any(s))
assert(all([isinstance(ins, Instruction) for ins in s]))
| 2.46875 | 2 |
cdbot/cogs/maths.py | eohomegrownapps/cyberdisc-bot | 0 | 12765658 | """
Set of bot commands designed for Maths Challenges.
"""
from io import BytesIO
import aiohttp
import dateutil.parser
import httpx
from discord import Colour, Embed, File
from discord.ext import tasks
from discord.ext.commands import Bot, Cog, Context, command
from html2markdown import convert
from cdbot.constants import Maths as constants
async def get_challenges(
client: httpx.AsyncClient, page_index: int = 0, page_size: int = 999
):
"""Get challenges, given the relevant parameters."""
return (
await client.post(
constants.Challenges.URL,
headers=dict(accessToken=constants.Challenges.TOKEN),
json={
"pageIndex": page_index,
"pageSize": page_size,
"orderBy": [{"desc": "answerDate"}],
"where": [
{"field": "sys.versionStatus", "equalTo": "published"},
{"field": "sys.contentTypeId", "in": ["mathsQuiz"]},
],
"fields": ["entryTitle", "category", "sys", "description", "answer"],
},
)
).json()["items"]
async def get_challenge(number: int) -> dict:
async with httpx.AsyncClient() as client:
challenge, *_ = await get_challenges(client, page_index=number - 1, page_size=1)
question = (
await client.post(
constants.Challenges.URL,
headers=dict(accessToken=constants.Challenges.TOKEN),
json={
"pageIndex": 0,
"pageSize": 1,
"where": [
{"field": "sys.slug", "equalTo": challenge["sys"]["slug"]},
{"field": "sys.versionStatus", "equalTo": "published"},
],
},
)
).json()["items"][0]["question"]
asset = question[1]["value"]["asset"]["sys"] if len(question) > 1 else None
return {
"title": challenge["entryTitle"],
"published": dateutil.parser.isoparse(
challenge["sys"]["version"]["created"]
).strftime("%d/%m/%Y"),
"category": challenge["category"][0]["entryTitle"],
"challenge": convert(question[0]["value"]).replace(" ", "")[:-1],
"image": (
(
"https://www.kingsmathsschool.com"
"".join(
asset["uri"].rpartition("/")[:2] + (asset["properties"]["filename"],)
)
)
if asset
else ""
),
"description": challenge["description"],
"slug": challenge["sys"]["slug"],
}
class Maths(Cog):
"""Maths-related commands."""
def __init__(self, bot: Bot):
self.bot = bot
self.update_challenge.start()
@tasks.loop(minutes=1)
async def update_challenge(self):
"""Check the Kings site for the latest challenges."""
print("Updating maths challenges...")
latest_challenge = float("inf")
latest_challenge = int(
self.channel.topic.split("Nerds, the lot of you | Challenge ")[1].split(
" "
)[0][:-1]
)
async with httpx.AsyncClient() as client:
challenges = await get_challenges(client)
for number, challenge in enumerate(challenges[::-1], 1):
title = challenge["entryTitle"]
if number > latest_challenge:
await self.challenge(self.channel, len(challenges) - number + 1)
await self.channel.edit(topic=constants.Challenges.TOPIC.format(title))
print("Maths challenges successfully updated.")
@update_challenge.before_loop
async def wait_until_ready(self):
"""Wait for bot to become ready."""
await self.bot.wait_until_ready()
self.channel = self.bot.get_channel(constants.Challenges.CHANNEL)
@Cog.listener()
async def on_message(self, message):
"""Check if the message contains inline LaTeX."""
# Cap the number processed in a single message to 3 for now, to reduce spam.
for expression in constants.LATEX_RE.findall(message.content)[:3]:
await self.latex(message.channel, expression)
@command()
async def challenge(self, ctx: Context, number: int = 1):
"""Show the provided challenge number."""
challenge = await get_challenge(number)
description = challenge["challenge"]
if len(description) > 2048:
description = description[:2045] + "..."
embed = Embed(
title=challenge["title"],
colour=Colour(0xE5E242),
url=f"https://www.kingsmathsschool.com/weekly-maths-challenge/{challenge['slug']}",
description=description,
)
embed.set_image(url=challenge["image"])
embed.set_thumbnail(
url="https://pbs.twimg.com/profile_images/502115424121528320/hTQzj_-R.png"
)
embed.set_author(name="King's Maths School")
embed.set_footer(
text=f"Challenge Released: {challenge['published']} | Category: {challenge['category']}"
)
return await ctx.send(embed=embed)
@command()
async def latex(self, ctx: Context, expression: str):
"""Render a LaTeX expression."""
channel = ctx.channel.id if type(ctx) is Context else ctx.id
if channel in constants.BLOCKED_CHANNELS:
return await ctx.send(
"\N{NO ENTRY SIGN} You cannot use this command in this channel!", delete_after=10
)
options = {
"auth": {"user": "guest", "password": "<PASSWORD>"},
"latex": expression,
"resolution": 900,
"color": "969696",
}
async with aiohttp.ClientSession() as session:
async with session.post(
"http://latex2png.com/api/convert", json=options
) as response:
result = await response.json()
if result.get('url'):
async with session.get("http://latex2png.com" + result["url"]) as response:
content = await response.content.read()
else:
return
await ctx.send(file=File(BytesIO(content), filename="result.png"))
def setup(bot):
"""
Required boilerplate for adding functionality of cog to bot.
"""
bot.add_cog(Maths(bot))
| 2.75 | 3 |
pyalign/tests/__init__.py | poke1024/pyalign | 21 | 12765659 | from collections.abc import Sequence
import numpy as np
import unittest
def to_tuple(x):
if isinstance(x, Sequence):
return tuple(map(to_tuple, x))
else:
return x
class TestCase(unittest.TestCase):
def _problems(self, alphabet, sim, **kwargs):
from ..problems import general, alphabetic, binary
yield general(sim, **kwargs)
yield alphabetic(alphabet, sim, **kwargs)
eq_ne = sim.binary_similarity_values
if eq_ne is not None:
yield binary(*eq_ne, **kwargs)
def _check_alignments(self, alignments, score, *edges, places=7):
computed_edges = []
for x in alignments:
self.assertAlmostEqual(x.score, score, places=places)
computed_edges.append(x.edges.tolist())
true_edges = sorted(to_tuple(edges))
computed_edges = sorted(to_tuple(computed_edges))
self.assertEqual(true_edges, computed_edges)
| 2.921875 | 3 |
data_analyse_nba.py | xingangzhang/PythonTest | 0 | 12765660 | #!/usr/bin/python
# -*-coding:utf-8-*-
"""
@author: smartgang
@contact: <EMAIL>
@file: data_analyse_nba.py
@time: 2017/12/5 11:02
""" | 1.085938 | 1 |
web/javarailsite/learn/migrations/0013_remove_challenge_has_indent.py | wilfp/JavaRail | 2 | 12765661 | # Generated by Django 2.1.3 on 2019-03-09 13:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('learn', '0012_challenge_has_indent'),
]
operations = [
migrations.RemoveField(
model_name='challenge',
name='has_indent',
),
]
| 1.523438 | 2 |
smartsheet/models/project_settings.py | Funtimes-Smarts/Python-import-Smart | 0 | 12765662 | <reponame>Funtimes-Smarts/Python-import-Smart
# pylint: disable=C0111,R0902,R0904,R0912,R0913,R0915,E1101
# Smartsheet Python SDK.
#
# Copyright 2017 Smartsheet.com, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from ..util import prep
from ..types import TypedList
from datetime import date
from dateutil.parser import parse
import six
import json
class ProjectSettings(object):
"""Smartsheet ProjectSettings data model."""
def __init__(self, props=None, base_obj=None):
"""Initialize the ProjectSettings model."""
self._base = None
if base_obj is not None:
self._base = base_obj
self._working_days = TypedList(six.string_types)
self._non_working_days = TypedList(date)
self._length_of_day = None
if props:
# account for alternate variable names from raw API response
if 'workingDays' in props:
self.working_days = props['workingDays']
if 'working_days' in props:
self.working_days = props['working_days']
if 'nonWorkingDays' in props:
self.non_working_days = props['nonWorkingDays']
if 'non_working_days' in props:
self.non_working_days = props['non_working_days']
if 'lengthOfDay' in props:
self.length_of_day = props['lengthOfDay']
if 'length_of_day' in props:
self.length_of_day = props['length_of_day']
self.__initialized = True
@property
def working_days(self):
return self._working_days
@working_days.setter
def working_days(self, value):
if isinstance(value, list):
self._working_days.purge()
self._working_days.extend([
(six.string_types(x, self._base)
if not isinstance(x, six.string_types) else x) for x in value
])
elif isinstance(value, TypedList):
self._working_days.purge()
self._working_days = value.to_list()
elif isinstance(value, six.string_types):
self._working_days.purge()
self._working_days.append(value)
@property
def non_working_days(self):
return self._non_working_days
@non_working_days.setter
def non_working_days(self, value):
if isinstance(value, list):
self._non_working_days.purge()
for x in value:
if isinstance(x, six.string_types):
x = parse(x).date()
if isinstance(x, date):
self._non_working_days.extend([x])
elif isinstance(value, TypedList):
self._non_working_days.purge()
self._non_working_days = value.to_list()
elif isinstance(value, date):
self._non_working_days.purge()
self._non_working_days.append(value)
elif isinstance(value, six.string_types):
value = parse(value).date()
self._non_working_days.purge()
self._non_working_days.append(value)
@property
def length_of_day(self):
return self._length_of_day
@length_of_day.setter
def length_of_day(self, value):
if isinstance(value, (six.integer_types, float)):
self._length_of_day = value
def to_dict(self, op_id=None, method=None):
obj = {
'workingDays': prep(self._working_days),
'nonWorkingDays': prep(self._non_working_days),
'lengthOfDay': prep(self._length_of_day)}
return obj
def to_json(self):
return json.dumps(self.to_dict(), indent=2)
def __str__(self):
return json.dumps(self.to_dict()) | 2.09375 | 2 |
python3/leetcodepy/n_queens_ii.py | qianbinbin/leetcode | 4 | 12765663 | <gh_stars>1-10
"""
The n-queens puzzle is the problem of placing n queens on an n x n chessboard such that no two queens attack each other.
Given an integer n, return the number of distinct solutions to the n-queens puzzle.
Example 1:
Input: n = 4
Output: 2
Explanation: There are two distinct solutions to the 4-queens puzzle as shown.
Example 2:
Input: n = 1
Output: 1
Constraints:
1 <= n <= 9
"""
from typing import List
class Solution1:
def totalNQueens(self, n: int) -> int:
result = [0]
col_used = [False] * n
diagonal_used = [False] * (2 * n - 1)
anti_diagonal_used = [False] * (2 * n - 1)
self.__total_n_queens(n, 0, col_used, diagonal_used, anti_diagonal_used, result)
return result[0]
def __total_n_queens(self, n: int, i: int,
col_used: List[bool], diagonal_used: List[bool], anti_diagonal_used: List[bool],
result: List[int]):
if i == n:
result[0] += 1
return
for j in range(n):
diagonal, anti_diagonal = n - 1 - i + j, i + j
if col_used[j] or diagonal_used[diagonal] or anti_diagonal_used[anti_diagonal]:
continue
col_used[j] = diagonal_used[diagonal] = anti_diagonal_used[anti_diagonal] = True
self.__total_n_queens(n, i + 1, col_used, diagonal_used, anti_diagonal_used, result)
col_used[j] = diagonal_used[diagonal] = anti_diagonal_used[anti_diagonal] = False
| 3.9375 | 4 |
Practice/Regex/Applications/Split_the_Phone_Numbers.py | alexanderbauer89/HackerRank | 1 | 12765664 | import re
def split_phone_numbers(s):
return re.split(r'[ -]', s)
for i in range(int(input())):
match = split_phone_numbers(input())
print('CountryCode=' + match[0] + ',LocalAreaCode=' + match[1] + ',Number=' + match[2])
| 3.828125 | 4 |
safe_relay_service/relay/tests/test_safe_creation_tx.py | vaporyorg/safe-relay-service | 5 | 12765665 | <gh_stars>1-10
from gnosis.safe.tests.test_safe_creation_tx import \
TestSafeCreationTx # noqa # pylint: disable=unused-import
| 1 | 1 |
services/core-api/app/api/mines/government_agencies/models/government_agency_type.py | bcgov/mds | 25 | 12765666 | from sqlalchemy.schema import FetchedValue
from app.api.utils.models_mixins import AuditMixin, Base
from app.extensions import db
class GovernmentAgencyType(AuditMixin, Base):
__tablename__ = 'government_agency_type'
government_agency_type_code = db.Column(db.String, primary_key=True)
description = db.Column(db.String, nullable=False)
active_ind = db.Column(db.Boolean, nullable=False, server_default=FetchedValue())
def __repr__(self):
return f'<{self.__class__.__name__} {self.government_agency_type_code}>'
@classmethod
def get_all(cls):
return cls.query.all() | 2.265625 | 2 |
MapsManipulations/TileSelection.py | clavlav12/RayCasting3D | 1 | 12765667 | <reponame>clavlav12/RayCasting3D
import sys
import pygame
from pygame.locals import *
from typing import List
from math import sqrt
def tile_selection_window(tiles_list: List[pygame.Surface], walls_selected=None, sprites_selected=None):
if walls_selected is None:
walls_selected = set()
if sprites_selected is None:
sprites_selected = set()
tile_size = tiles_list[0].get_width()
length = len(tiles_list)
greatest_divisor = -1
for i in range(1, int(sqrt(length))):
if length % i == 0:
greatest_divisor = max(greatest_divisor, i)
dimensions = (greatest_divisor, length // greatest_divisor)
pygame.init()
fps = 60
fpsClock = pygame.time.Clock()
width, height = (d * tile_size for d in dimensions)
scale = 3
actual_screen = pygame.display.set_mode((width * scale, height * scale))
screen = pygame.Surface((width, height))
screen.convert()
walls_selected_mark = pygame.Surface((tile_size, tile_size)).convert()
walls_selected_mark.fill((200, 130, 130))
sprites_selected_mark = pygame.Surface((tile_size, tile_size)).convert()
sprites_selected_mark.fill((130, 130, 255))
# pygame.draw.rect(selected_mark, (0, 0, 0), (0, 0, tile_size, tile_size), 1)
# Game loop.
running = 1
while running:
screen.fill((0, 0, 0))
for event in pygame.event.get():
if event.type == QUIT:
running = 0
elif event.type == pygame.MOUSEBUTTONDOWN:
pos = event.pos
tile = int(pos[0] / scale // tile_size + pos[1] / scale // tile_size * dimensions[0])
if event.button == 1: # left click
if tile in walls_selected:
walls_selected.remove(tile)
else:
walls_selected.add(tile)
elif event.button == 3: # right click
if tile in sprites_selected:
sprites_selected.remove(tile)
else:
sprites_selected.add(tile)
for x in range(0, dimensions[0]):
for y in range(0, dimensions[1]):
screen.blit(tiles_list[x + y * dimensions[0]], (x * tile_size, y * tile_size))
for index in walls_selected:
screen.blit(walls_selected_mark, (index % dimensions[0] * tile_size, index // dimensions[0] * tile_size), special_flags=pygame.BLEND_MULT)
for index in sprites_selected:
screen.blit(sprites_selected_mark, (index % dimensions[0] * tile_size, index // dimensions[0] * tile_size), special_flags=pygame.BLEND_MULT)
pygame.transform.scale(screen, (actual_screen.get_size()), actual_screen)
pygame.display.flip()
fpsClock.tick(fps)
pygame.quit()
return walls_selected, sprites_selected
| 2.890625 | 3 |
tests/morse_video_threshold_decoder/test_morse_decode.py | AndrewWasHere/video-morse-decoder | 0 | 12765668 | <reponame>AndrewWasHere/video-morse-decoder<filename>tests/morse_video_threshold_decoder/test_morse_decode.py
#
# Copyright 2018 <NAME>. All rights reserved.
#
# This software is released under the BSD 3-clause license. See LICENSE.txt or
# https://opensource.org/licenses/BSD-3-Clause for more information.
#
import numpy as np
from morse_video_decoder.morse_video_threshold_decoder import \
MorseVideoThresholdDecoder
def test_decode_empty(extract_edges):
"""`morse_decode` of an empty array should return an empty string. """
edges = np.array([], dtype=int)
edges_idx = np.array([], dtype=int)
s = MorseVideoThresholdDecoder.morse_decode(edges, edges_idx, 2, 7)
assert s == ''
def test_decode_no_transitions():
"""`morse_decode` an array with no transitions."""
edges = np.zeros(10, dtype=int)
edges_idx = np.array([], dtype=int)
s = MorseVideoThresholdDecoder.morse_decode(edges, edges_idx, 2, 7)
assert s == ''
def test_decode_one_transition(extract_edges):
"""`morse_decode` an array with only one transition."""
signal = np.array([1, 1, 1, 1, 1, 0, 0, 0], dtype=int)
edges, edges_idx = extract_edges(signal)
s = MorseVideoThresholdDecoder.morse_decode(edges, edges_idx, 2, 7)
assert s == ''
def test_so(extract_edges):
"""`morse_decode` SO."""
signal = np.array(
[0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1],
dtype=int
)
edges, edges_idx = extract_edges(signal)
s = MorseVideoThresholdDecoder.morse_decode(edges, edges_idx, 2, 7)
assert s == 'SO'
| 2.5 | 2 |
2016/18/part1.py | timofurrer/aoc-2020 | 0 | 12765669 | <reponame>timofurrer/aoc-2020
from pathlib import Path
puzzle_input_raw = (Path(__file__).parent / "input.txt").read_text()
rows = [[x == "." for x in puzzle_input_raw]]
is_trap = lambda left, center, right: not (
(not left and not center and right)
or (not center and not right and left)
or (not left and center and right)
or (not right and center and left)
)
for _ in range(40 - 1):
x = [True] + rows[-1] + [True]
rows.append([is_trap(*r) for r in zip(x, x[1:], x[2:])])
print(sum(sum(r) for r in rows)) | 2.96875 | 3 |
database/mongoDB.py | hero0926/PythonPlayground | 0 | 12765670 | # 몽고디비
from distutils.log import warn as printf
from random import randrange as rand
from pymongo import Connection, errors
from ushuffle_dbU import DBNAME, randName, FIELDS, tformat, cformat
COLLECTION = 'users'
class MongoTest(object) :
def __init__(self) :
try :
cxn = Connection()
except errors.AutoReconnect :
raise RuntimeError()
self.db = cxn[DBNAME]
self.users = self.db[COLLECTION]
def insert(self) :
self.users.insert(
dict(login=who, userid=uid, projid=rand(1,5) for who, uid in randName())
)
def update(self) :
fr = rand(1,5)
to = rand(1,5)
i = -1
for i, user in enumerate(self.users.find({'projid' : fr})) :
self.users.update(user, {'$set' : {'projid' : to}})
return fr, to, i+1
def delete(self) :
rm = rand(1,5)
i = -1
for i, user in enumerate(self.users.find({'projid' : rm})) :
self.users.remove(user)
return rm, i+1
def dbDump(self) :
for user in self.users.find() :
printf(''.join(map(tformat, (user[l] for k in FIELDS))))
def finish(self) :
self.db.connection.disconnect()
def main() :
try :
mongo = MongoTest()
except RuntimeError :
print("MONGODB SERVER ERROR")
return
mongo.insert()
mongo.dbDump()
fr, to, num = mongo.update()
fr, to, num = mongo.update()
mongo.dbDump()
rm, num = mongo.delete()
mongo.dbDump()
mongo.db.drop_collection(COLLECTION)
mongo.finish()
| 2.40625 | 2 |
realestate/listing/templatetags/extra_functions.py | jinrith27/Realestate | 42 | 12765671 | <reponame>jinrith27/Realestate
from django.contrib.humanize.templatetags.humanize import intcomma
from django import template
from realestate.listing.models import Listing
register = template.Library()
@register.filter
def currency(dollars):
try:
dollars = float(dollars)
except (ValueError, TypeError):
return '$0'
return "$%s" % intcomma(int(dollars), False)
@register.inclusion_tag('home/featured.html')
def get_featured(limit=5):
properties = Listing.objects.featured()[:limit]
return {'listings': properties} | 2.234375 | 2 |
fibber/metrics/fluency/gpt2_perplexity_metric.py | stungkit/fibber | 0 | 12765672 | <reponame>stungkit/fibber<filename>fibber/metrics/fluency/gpt2_perplexity_metric.py
"""This metric computes the perplexity ratio ppl(paraphrase) / ppl(original text).
The perplexity is estimated using GPT2 model. This metric can reveal the meaningfulness of a
sentence.
"""
import numpy as np
import torch
import torch.nn.functional as F
from transformers import GPT2LMHeadModel, GPT2TokenizerFast
from fibber import log, resources
from fibber.metrics.metric_base import MetricBase
logger = log.setup_custom_logger(__name__)
def make_input_output_pair(tokenizer, x):
"""Tokenize the text, then construct input and output for GPT2."""
toks = tokenizer.encode(x, add_special_tokens=True)
return [tokenizer.bos_token_id] + toks[:-1], toks
def make_batch(toks_list):
"""Convert multiple text to a batch tensor."""
n = len(toks_list)
max_len = max([len(x) for x in toks_list])
ids = np.zeros((n, max_len), dtype='int')
mask = np.zeros((n, max_len), dtype='int')
for i, item in enumerate(toks_list):
ids[i, :len(item)] = np.asarray(item)
mask[i, :len(item)] = 1
return ids, mask
class GPT2PerplexityMetric(MetricBase):
"""This metric computes the perplexity of paraphrased text divided by the perplexity of
original text. The perplexity is measured using GPT2 model.
"""
def __init__(self, gpt2_pretrained_model="gpt2-medium", gpt2_gpu_id=-1, **kargs):
"""Initialize GPT2 model."""
super(GPT2PerplexityMetric, self).__init__(**kargs)
logger.info("load gpt2 model.")
self._tokenizer = GPT2TokenizerFast.from_pretrained(
resources.get_transformers(gpt2_pretrained_model))
if gpt2_gpu_id == -1:
logger.warning("GPT2 metric is running on CPU.")
self._device = torch.device("cpu")
else:
logger.info("GPT2 metric is running on GPU %d.", gpt2_gpu_id)
self._device = torch.device("cuda:%d" % gpt2_gpu_id)
self._model = GPT2LMHeadModel.from_pretrained(
resources.get_transformers(gpt2_pretrained_model)).to(
self._device)
def _get_ppl(self, sentences):
"""Compute the perplexity of sentences."""
input_output = [make_input_output_pair(self._tokenizer, x) for x in sentences]
input, output = zip(*input_output)
toks_input, mask = make_batch(input)
toks_output, _ = make_batch(output)
mask = torch.tensor(mask).to(self._device)
toks_input = torch.tensor(toks_input).to(self._device)
toks_output = torch.tensor(toks_output).to(self._device)
with torch.no_grad():
logits = self._model(toks_input, attention_mask=mask)[0]
logpw = torch.gather(F.log_softmax(logits, dim=-1), dim=-1,
index=toks_output.unsqueeze(dim=2)).squeeze(dim=2)
ppl = torch.exp(-(logpw * mask).sum(dim=1) / mask.sum(dim=1))
ppl = ppl.detach().cpu().numpy()
return ppl
def measure_batch(self, origin, paraphrase_list, data_record=None, use_ratio=False):
"""Measure the metric on a batch of paraphrase_list.
Args:
origin (str): the original text.
paraphrase_list (list): a set of paraphrase_list.
data_record (dict): the corresponding data record of original text.
use_ratio (bool): returns the perplexity ratio.
Returns:
(list): a list containing the USE similarity metric for each paraphrase.
"""
if use_ratio:
ppls = self._get_ppl([origin] + paraphrase_list)
res = ppls[1:] / ppls[0]
else:
res = self._get_ppl(paraphrase_list)
return [float(x) for x in res]
def measure_multiple_examples(self, origin_list, paraphrase_list,
data_record_list=None, use_ratio=False):
assert len(origin_list) == len(paraphrase_list)
if use_ratio:
ppls = self._get_ppl(origin_list + paraphrase_list)
res = ppls[len(origin_list):] / ppls[:len(origin_list)]
else:
res = self._get_ppl(paraphrase_list)
print(res)
return [float(x) for x in res]
def measure_example(self, origin, paraphrase, data_record=None, use_ratio=False):
"""Compute the perplexity ratio.
Args:
origin (str): original text.
paraphrase (str): paraphrased text.
data_record: ignored.
use_ratio (bool): returns the perplexity ratio.
"""
if use_ratio:
ppl = self._get_ppl([origin, paraphrase])
res = float(ppl[1] / ppl[0])
else:
res = float(self._get_ppl([paraphrase])[0])
return res
| 2.9375 | 3 |
src/captcha/tests/urls.py | daniel-werner/stelagifts | 108 | 12765673 | <reponame>daniel-werner/stelagifts
from django.conf.urls.defaults import *
urlpatterns = patterns('',
url(r'test/$','captcha.tests.views.test',name='captcha-test'),
url(r'test2/$','captcha.tests.views.test_custom_error_message',name='captcha-test-custom-error-message'),
url(r'test3/$','captcha.tests.views.test_per_form_format', name='test_per_form_format'),
url(r'',include('captcha.urls')),
)
| 1.664063 | 2 |
custom_components/nam/const.py | bieniu/ha-nettigo-air-monitor | 4 | 12765674 | <filename>custom_components/nam/const.py
"""Constants for Nettigo Air Monitor integration."""
from datetime import timedelta
from homeassistant.const import (
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_SIGNAL_STRENGTH,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_TIMESTAMP,
PERCENTAGE,
PRESSURE_HPA,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
TEMP_CELSIUS,
)
DEFAULT_NAME = "Nettigo Air Monitor"
DEFAULT_UPDATE_INTERVAL = timedelta(minutes=6)
DOMAIN = "nam"
MANUFACTURER = "Nettigo"
AIR_QUALITY_SENSORS = {"sds": "SDS011", "sps30": "SPS30"}
SENSORS = {
"bme280_humidity": (
f"{DEFAULT_NAME} BME280 Humidity",
PERCENTAGE,
DEVICE_CLASS_HUMIDITY,
None,
True,
),
"bme280_pressure": (
f"{DEFAULT_NAME} BME280 Pressure",
PRESSURE_HPA,
DEVICE_CLASS_PRESSURE,
None,
True,
),
"bme280_temperature": (
f"{DEFAULT_NAME} BME280 Temperature",
TEMP_CELSIUS,
DEVICE_CLASS_TEMPERATURE,
None,
True,
),
"bmp280_pressure": (
f"{DEFAULT_NAME} BMP280 Pressure",
PRESSURE_HPA,
DEVICE_CLASS_PRESSURE,
None,
True,
),
"bmp280_temperature": (
f"{DEFAULT_NAME} BMP280 Temperature",
TEMP_CELSIUS,
DEVICE_CLASS_TEMPERATURE,
None,
True,
),
"heca_humidity": (
f"{DEFAULT_NAME} HECA Humidity",
PERCENTAGE,
DEVICE_CLASS_HUMIDITY,
None,
True,
),
"heca_temperature": (
f"{DEFAULT_NAME} HECA Temperature",
TEMP_CELSIUS,
DEVICE_CLASS_TEMPERATURE,
None,
True,
),
"sht3x_humidity": (
f"{DEFAULT_NAME} SHT3X Humidity",
PERCENTAGE,
DEVICE_CLASS_HUMIDITY,
None,
True,
),
"sht3x_temperature": (
f"{DEFAULT_NAME} SHT3X Temperature",
TEMP_CELSIUS,
DEVICE_CLASS_TEMPERATURE,
None,
True,
),
"sps30_p0": (
f"{DEFAULT_NAME} SPS30 Particulate Matter 1.0",
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
None,
"mdi:blur",
True,
),
"sps30_p4": (
f"{DEFAULT_NAME} SPS30 Particulate Matter 4.0",
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
None,
"mdi:blur",
True,
),
"humidity": (
f"{DEFAULT_NAME} DHT22 Humidity",
PERCENTAGE,
DEVICE_CLASS_HUMIDITY,
None,
True,
),
"signal": (
f"{DEFAULT_NAME} Signal Strength",
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
DEVICE_CLASS_SIGNAL_STRENGTH,
None,
False,
),
"temperature": (
f"{DEFAULT_NAME} DHT22 Temperature",
TEMP_CELSIUS,
DEVICE_CLASS_TEMPERATURE,
None,
True,
),
"uptime": (
f"{DEFAULT_NAME} Uptime",
None,
DEVICE_CLASS_TIMESTAMP,
None,
False,
),
}
| 1.953125 | 2 |
auth-api/src/auth_api/services/user.py | kvangorp/sbc-auth | 0 | 12765675 | <reponame>kvangorp/sbc-auth<gh_stars>0
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The User service.
This module manages the User Information.
"""
from sbc_common_components.tracing.service_tracing import ServiceTracing
from auth_api.models import User as UserModel
@ServiceTracing.trace(ServiceTracing.enable_tracing, ServiceTracing.should_be_tracing)
class User: # pylint: disable=too-many-instance-attributes
"""Manages all aspects of the User Entity.
This manages storing the User in the cache,
ensuring that the local cache is up to date,
submitting changes back to all storage systems as needed.
"""
def __init__(self):
"""Return a User Service object."""
self.__dao = None
self._username: str = None
self._roles: str = None
self._keycloak_guid: str = None
@property
def _dao(self):
if not self.__dao:
self.__dao = UserModel()
return self.__dao
@_dao.setter
def _dao(self, value):
self.__dao = value
self.username = self._dao.username
self.roles = self._dao.roles
@property
def username(self):
"""Return the User username."""
return self._username
@username.setter
def username(self, value: str):
"""Set the User username."""
self._username = value
self._dao.username = value
@property
def roles(self):
"""Return the User roles."""
return self._roles
@roles.setter
def roles(self, value: str):
"""Set the User roles."""
self._roles = value
self._dao.roles = value
@property
def keycloak_guid(self):
"""Return the keycloak GUID."""
return self._keycloak_guid
@keycloak_guid.setter
def keycloak_guid(self, value: str):
"""Set the keycloak GUID."""
self._keycloak_guid = value
self._dao.keycloak_guid = value
@ServiceTracing.disable_tracing
def asdict(self):
"""Return the User as a python dict.
None fields are not included in the dict.
"""
d = {'username': self.username}
if self.roles:
d['roles'] = self.roles
return d
def save(self):
"""Save the User information to the local cache."""
self._dao.save()
@classmethod
def save_from_jwt_token(cls, token: dict = None):
"""Save user to database."""
if not token:
return None
user_dao = UserModel.create_from_jwt_token(token)
if not user_dao:
return None
user = User()
user._dao = user_dao # pylint: disable=protected-access
return user
@classmethod
def find_by_jwt_token(cls, token: dict = None):
"""Find user from database by user token."""
if not token:
return None
user_dao = UserModel.find_by_jwt_token(token)
if not user_dao:
return None
user = User()
user._dao = user_dao # pylint: disable=protected-access
return user
@classmethod
def find_by_username(cls, username: str = None):
"""Given a username, this will return an Active User or None."""
if not username:
return None
# find locally
user_dao = UserModel.find_by_username(username)
if not user_dao:
return None
user = User()
user._dao = user_dao # pylint: disable=protected-access
return user
| 1.960938 | 2 |
Region/utils.py | kirilenkobm/bioinf_stuff | 0 | 12765676 | from os import stat
class Color:
"""Color class."""
color_name_dict = {
"white": (255, 255, 255),
"black": (0, 0, 0),
"grey": (128, 128, 128),
"red": (255, 0, 0),
"blue": (0, 0, 255),
"green": (0, 255, 0)
} # TODO: continue this / maybe better colors
def __init__(self, r, g, b):
"""Init class with raw int r, g, b values."""
# maybe store as float from 0 to 1?
# think about this later
self.r = r
self.g = g
self.b = b
self.__check_255()
def from_comma_sep_string(cls, cs_string):
"""Parse from comma-separated string."""
obj = cls.__new__(cls)
super(Color, obj).__init__()
# if any not-numeric: collapse
fields = [int(x) for x in cs_string.split(",") if x != ""]
if len(fields) != 3:
raise ValueError("Write err message here")
r_, g_, b_ = fields
obj.r = r_
obj.g = g_
obj.b = b_
cls.__check_255()
return obj
def magic_parse(cls, color_name):
"""Parse color name from any format."""
if color_name in cls.color_name_dict.keys():
obj = cls.__new__(cls)
super(Color, obj).__init__()
r_, g_, b_ = cls.color_name_dict[color_name]
obj.r = r_
obj.g = g_
obj.b = b_
return obj
elif color_name.startswith("#"):
# hex code read
return cls.read_from_hex(color_name)
elif len(color_name.split(",")) == 3:
# comma-separated color like in ucsc bed file
return cls.from_comma_sep_string(color_name)
# TODO: can be additional options
else:
ValueError(f"Cannot parse {color_name}")
def black(cls):
"""Create blank color obj."""
obj = cls.__new__(cls)
super(Color, obj).__init__()
obj.r = 0
obj.g = 0
obj.b = 0
return obj
def read_from_hex(cls, hex_str):
"""Read from string like #000000."""
pass
def __check_255(self):
"""Check that all provided values are correct."""
vals = (self.r, self.g, self.b)
# TODO: proper raise Value error and messages
assert all(not isinstance(v, int) for v in vals)
assert all(v <= 255 for v in vals)
assert all(v >= 0 for v in vals)
| 3.484375 | 3 |
flask_server.py | joshzarrabi/covid-publishing-api | 12 | 12765677 | """This file is the main module which contains the app.
"""
from app import create_app, db
from app.auth.auth_cli import getToken
from decouple import config
from flask.cli import AppGroup
import click
import config as configs
# Figure out which config we want based on the `ENV` env variable, default to local
from app.utils.backfill import backfill
env_config = config("ENV", cast=str, default="localpsql")
config_dict = {
'production': configs.Production,
'localpsql': configs.LocalPSQLConfig,
'develop': configs.Develop,
'testing': configs.Testing,
}
app = create_app(config_dict[env_config]())
# for production, require a real SECRET_KEY to be set
if env_config == 'production':
assert app.config['SECRET_KEY'] != "12345", "You must set a secure SECRET_KEY"
# register a custom command to get authentication tokens
auth_cli = AppGroup('auth')
@auth_cli.command("getToken")
@click.argument('name')
def getToken_cli(name):
click.echo(getToken(name))
app.cli.add_command(auth_cli)
@app.cli.command()
def deploy():
"""Run deployment tasks"""
# e.g. this _used_ to be where a database migration would run via `upgrade()`
pass
utils_cli = AppGroup('utils')
@utils_cli.command("backfill")
@click.argument('input_file')
def backfill_cli(input_file):
backfill(input_file)
app.cli.add_command(utils_cli)
| 2.46875 | 2 |
post/models.py | duhee77/Your-class-Backend | 1 | 12765678 | <gh_stars>1-10
from django.db import models
from django.conf import settings
from accounts.models import CustomUser
from subject.models import Subject
# Create your models here.
class Post(models.Model):
isNotice = models.BooleanField(default=True)
postUpdateDate = models.DateTimeField(auto_now_add=True)
postUserId = models.ForeignKey(CustomUser, on_delete=models.CASCADE)
postDetail = models.TextField()
postName = models.CharField(max_length=255)
postSubjectId = models.ForeignKey(Subject, on_delete=models.CASCADE)
class Meta:
db_table = 'Post'
class Comment(models.Model):
commentPostId = models.ForeignKey(Post, on_delete=models.CASCADE)
commentDetail = models.TextField()
commentUserId = models.ForeignKey(CustomUser, on_delete=models.CASCADE)
commentUpdateDate = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = 'Comment' | 2.21875 | 2 |
pbpstats/data_loader/stats_nba/shots/file.py | bahalbach/pbpstats | 54 | 12765679 | import json
from pathlib import Path
from pbpstats.data_loader.abs_data_loader import check_file_directory
from pbpstats.data_loader.stats_nba.file_loader import StatsNbaFileLoader
class StatsNbaShotsFileLoader(StatsNbaFileLoader):
"""
A ``StatsNbaShotsFileLoader`` object should be instantiated and passed into ``StatsNbaShotsLoader`` when loading data from file
:param str file_directory:
Directory in which data should be loaded from.
The specific file location will be `stats_home_shots_<game_id>.json`
and `stats_away_shots_<game_id>.json` in the `/game_details` subdirectory.
"""
def __init__(self, file_directory):
self.file_directory = file_directory
@check_file_directory
def load_data(self, game_id):
self.game_id = game_id
self.home_file_path = (
f"{self.file_directory }/game_details/stats_home_shots_{self.game_id}.json"
)
self.away_file_path = (
f"{self.file_directory }/game_details/stats_away_shots_{self.game_id}.json"
)
home_data_file = Path(self.home_file_path)
if not home_data_file.is_file():
raise Exception(f"{self.home_file_path} does not exist")
with open(self.home_file_path) as json_data:
self.home_source_data = json.load(json_data)
away_data_file = Path(self.away_file_path)
if not away_data_file.is_file():
raise Exception(f"{self.away_file_path} does not exist")
with open(self.away_file_path) as json_data:
self.away_source_data = json.load(json_data)
return self.home_source_data, self.away_source_data
| 3.265625 | 3 |
tests/resources/wallet/test_send_tokens.py | mbnunes/hathor-core | 51 | 12765680 | <reponame>mbnunes/hathor-core
import base64
from twisted.internet.defer import inlineCallbacks
from hathor.daa import TestMode, _set_test_mode
from hathor.p2p.resources import MiningResource
from hathor.wallet.resources import BalanceResource, HistoryResource, SendTokensResource
from tests import unittest
from tests.resources.base_resource import StubSite, TestDummyRequest, _BaseResourceTest
from tests.utils import add_blocks_unlock_reward, add_new_blocks, resolve_block_bytes
class BaseSendTokensTest(_BaseResourceTest._ResourceTest):
__test__ = False
def setUp(self):
super().setUp()
self.web = StubSite(SendTokensResource(self.manager))
self.web_mining = StubSite(MiningResource(self.manager))
self.web_balance = StubSite(BalanceResource(self.manager))
self.web_history = StubSite(HistoryResource(self.manager))
@inlineCallbacks
def test_post(self):
# Mining new block
response_mining = yield self.web_mining.get("mining")
data_mining = response_mining.json_value()
block_bytes = resolve_block_bytes(block_bytes=data_mining['block_bytes'])
yield self.web_mining.post("mining", {'block_bytes': base64.b64encode(block_bytes).decode('utf-8')})
add_blocks_unlock_reward(self.manager)
self.reactor.advance(10)
# Unlocking wallet
self.manager.wallet.unlock(b"MYPASS")
# Sending token to random address without input
# Options
yield self.web.options("wallet/send_tokens")
data_json = {"outputs": [{"address": self.get_address(0), "value": 505}], "inputs": []}
response = yield self.web.post("wallet/send_tokens", {'data': data_json})
data = response.json_value()
self.assertTrue(data['success'])
self.reactor.advance(10)
# Asserting new balance
response_balance = yield self.web_balance.get("wallet/balance")
data_balance = response_balance.json_value()
tokens_per_block = self.manager.get_tokens_issued_per_block(1)
self.assertEqual(data_balance['balance'], {'available': tokens_per_block - 505, 'locked': 0})
# Getting history, so we can get the input
response_history = yield self.web_history.get("wallet/history", {b'page': 1, b'count': 10})
data_history = response_history.json_value()
input_hash = data_history['history'][0]['tx_id']
# Sending token to random address with input wrong amount
data_json = {
"outputs": [{
"address": self.get_address(0),
"value": 500
}],
"inputs": [{
"tx_id": input_hash,
"index": 0
}]
}
response2 = yield self.web.post("wallet/send_tokens", {'data': data_json})
data2 = response2.json_value()
self.assertFalse(data2['success'])
self.reactor.advance(10)
# Sending duplicate input
data_json_duplicate = {
"outputs": [{
"address": self.get_address(0),
"value": 19000
}],
"inputs": [{
"tx_id": input_hash,
"index": 0
}, {
"tx_id": input_hash,
"index": 0
}]
}
response_duplicate = yield self.web.post("wallet/send_tokens", {'data': data_json_duplicate})
data_duplicate = response_duplicate.json_value()
self.assertFalse(data_duplicate['success'])
# Sending token to random address with input right amount
data_json2 = {
"outputs": [{
"address": self.get_address(0),
"value": self.manager.get_tokens_issued_per_block(1) - 505
}],
"inputs": [{
"tx_id": input_hash,
"index": 0
}]
}
response3 = yield self.web.post("wallet/send_tokens", {'data': data_json2})
data3 = response3.json_value()
self.assertTrue(data3['success'])
# Sending token to invalid addresses
data_json3 = {"outputs": [{"address": self.get_address(1), "value": 500}], "inputs": []}
response_error1 = yield self.web.post("wallet/send_tokens", {'data': data_json3})
data_error1 = response_error1.json_value()
self.assertFalse(data_error1['success'])
data_json4 = {"outputs": [{"address": "1234", "value": 500}], "inputs": []}
response_error2 = yield self.web.post("wallet/send_tokens", {'data': data_json4})
data_error2 = response_error2.json_value()
self.assertFalse(data_error2['success'])
# Error insuficient funds
data_json5 = {"outputs": [{"address": self.get_address(0), "value": 5000000}], "inputs": []}
response_error3 = yield self.web.post("wallet/send_tokens", {'data': data_json5})
data_error3 = response_error3.json_value()
self.assertFalse(data_error3['success'])
add_new_blocks(self.manager, 1, advance_clock=1)
add_new_blocks(self.manager, 1, advance_clock=1) # XXX: adding extra block, not sure why this is needed
add_blocks_unlock_reward(self.manager)
# Sending token with timelock
data_timelock = {
"outputs": [{
"address": self.get_address(0),
"value": 505,
"timelock": 1542995660
}],
"inputs": []
}
response_timelock = yield self.web.post("wallet/send_tokens", {'data': data_timelock})
data_response_timelock = response_timelock.json_value()
self.assertTrue(data_response_timelock['success'])
self.reactor.advance(5)
# Sending token with timestamp
data_timestamp = {
"outputs": [{
"address": self.get_address(0),
"value": 5
}],
"inputs": [],
"timestamp": int(self.reactor.seconds())
}
response_timestamp = yield self.web.post("wallet/send_tokens", {'data': data_timestamp})
data_response_timestamp = response_timestamp.json_value()
self.assertTrue(data_response_timestamp['success'])
self.reactor.advance(5)
# Sending token with timestamp=0
data_timestamp = {
"outputs": [{
"address": self.get_address(0),
"value": 5
}],
"inputs": [],
"timestamp": 0
}
response_timestamp = yield self.web.post("wallet/send_tokens", {'data': data_timestamp})
data_response_timestamp = response_timestamp.json_value()
self.assertTrue(data_response_timestamp['success'])
@inlineCallbacks
def test_tx_weight(self):
_set_test_mode(TestMode.DISABLED)
add_new_blocks(self.manager, 3, advance_clock=1)
add_blocks_unlock_reward(self.manager)
self.reactor.advance(3)
# Unlocking wallet
self.manager.wallet.unlock(b"MYPASS")
data_json = {
"outputs": [{
"address": self.get_address(0),
"value": 505
}],
"inputs": [],
"weight": 1
}
response = yield self.web.post("wallet/send_tokens", {'data': data_json})
data = response.json_value()
self.assertFalse(data['success'])
def test_error_request(self):
resource = SendTokensResource(self.manager)
request = TestDummyRequest('POST', 'wallet/send_tokens', {})
self.assertIsNotNone(request._finishedDeferreds)
resource._err_tx_resolve('Error', request)
self.assertIsNone(request._finishedDeferreds)
class SyncV1SendTokensTest(unittest.SyncV1Params, BaseSendTokensTest):
__test__ = True
class SyncV2SendTokensTest(unittest.SyncV2Params, BaseSendTokensTest):
__test__ = True
# sync-bridge should behave like sync-v2
class SyncBridgeSendTokensTest(unittest.SyncBridgeParams, SyncV2SendTokensTest):
pass
| 1.78125 | 2 |
server/app/services/content_element_service.py | bibbox/app-video-score | 2 | 12765681 | # -*- coding: utf-8 -*-
"""
MovieService class - This class holds the method related to User manipulations.
"""
from server.app.models.content_element import ContentElement
from server.app.services import SQLAlchemyService
from server.app import db
class ContentElementService(SQLAlchemyService):
__model__ = ContentElement
__db__ = db
def __init__(self):
# Creating a parent class ref to access parent class methods.
self.parentClassRef = super(ContentElementService, self), | 2.71875 | 3 |
.github/rename.py | potassco/gringo | 0 | 12765682 | <gh_stars>0
'''
Simple helper module to be used in scripts building pip packages that depend on
clingo.
'''
import re
from os.path import isfile
def rename_clingo_cffi():
'''
Replace all occurences of 'clingo' package by 'clingo-cffi' in pip package files.
'''
for n in ('setup.py', 'pyproject.toml'):
if not isfile(n):
continue
with open(n, 'r+') as f:
content = ""
for line in f:
if re.match(r'^ *(install_requires|requires) *= *\[', line) or re.match(r'''^ *name *= *['"]''', line):
content += re.sub(r'''(['"])clingo(['"])''', r'\1clingo-cffi\2', line)
else:
content += line
f.seek(0)
f.write(content)
f.truncate()
if __name__ == '__main__':
rename_clingo_cffi()
| 2.546875 | 3 |
test/test_vision/test_cutout.py | Fragile-azalea/homura | 1 | 12765683 | <filename>test/test_vision/test_cutout.py
import torch
from homura.vision import CutOut
def test_cutout():
input = torch.randn(3, 32, 32)
cutout = CutOut(16)
cutout(input)
| 1.695313 | 2 |
Notebooks/Assignment 3/.ipynb_checkpoints/generate_data-checkpoint.py | JRMfer/ABM_individual | 1 | 12765684 | import numpy as np
import pandas as pd
students = 250
nr_to_label = {0: 'bike', 1: 'car', 2: 'bus 40', 3: 'bus 240'}
label_to_nr = {v: k for k, v in nr_to_label.items()}
def choice(income, distance, lazy):
"""
Generate a choice based on the params
"""
if income < 500:
if distance < 8 and distance * lazy * lazy < 120:
return label_to_nr['bike']
elif income > 350:
return label_to_nr['bus 40']
else:
return label_to_nr['bus 240']
if lazy < 3:
return label_to_nr['bus 40']
return label_to_nr['car']
# generate some random numbers
idc = np.array([np.round(np.random.normal(300, 200, size=students).clip(min=0)),
np.random.poisson(8, size=students),
np.random.randint(1, 10, size=students)]).T
# get their favourite mode of transport
idct = np.hstack((idc, np.array([[choice(*row) for row in idc]]).T))
# add some randomness by shuffling some labels
replace = np.where(np.random.random(size=students) < 0.15)[0]
idct[replace, 3] = np.random.randint(0, 4, size=replace.size)
# store result
df = pd.DataFrame(idct, columns=['income', 'distance', 'lazy', 'transport'])
df['transport'] = df['transport'].map(nr_to_label)
df.to_csv('transport.csv', sep=';', encoding='utf-8')
| 3.34375 | 3 |
dataset/lfw.py | Vicent-xd/Residual_autoencoding | 432 | 12765685 | <filename>dataset/lfw.py
import os
import numpy as np
import joblib
from skimage import transform
import deeppy as dp
from .augment import (img_augment, sample_img_augment_params, AugmentedFeed,
SupervisedAugmentedFeed)
from .util import img_transform
cachedir = os.getenv('CACHE_HOME', './cache')
mem = joblib.Memory(cachedir=os.path.join(cachedir, 'lfw'))
@mem.cache
def lfw_imgs(alignment):
if alignment == 'landmarks':
dataset = dp.dataset.LFW('original')
imgs = dataset.imgs
landmarks = dataset.landmarks('68')
n_landmarks = 68
landmarks_mean = np.mean(landmarks, axis=0)
landmarks_mean = np.array([landmarks_mean[:n_landmarks],
landmarks_mean[n_landmarks:]])
aligned_imgs = []
for img, points in zip(imgs, landmarks):
points = np.array([points[:n_landmarks], points[n_landmarks:]])
transf = transform.estimate_transform('similarity',
landmarks_mean.T, points.T)
img = img / 255.
img = transform.warp(img, transf, order=3)
img = np.round(img*255).astype(np.uint8)
aligned_imgs.append(img)
imgs = np.array(aligned_imgs)
else:
dataset = dp.dataset.LFW(alignment)
imgs = dataset.imgs
return imgs
def lfw_imgs_split(alignment, split_name, with_attributes=True, test_fold=0):
imgs = lfw_imgs(alignment)
dataset = dp.dataset.LFW()
if split_name == 'testtrain':
all_persons = list(dataset.index.keys())
test_persons = dataset.people_splits['test'][test_fold]
persons = [p for p in all_persons if p not in test_persons]
if split_name == 'valtrain':
test_persons = dataset.people_splits['train']
elif split_name == 'val':
persons = dataset.people_splits[split_name]
elif split_name == 'test':
persons = dataset.people_splits[split_name][test_fold]
if not with_attributes:
new_imgs = []
for person_id in persons:
for img_idx in dataset.index[person_id]:
new_imgs.append(imgs[img_idx])
imgs = np.array(new_imgs)
return imgs
# Extract attributes vectors and discard images without attributes
new_imgs = []
attrs = []
for person_id in persons:
if person_id in dataset.attributes:
for img_no in range(1, len(dataset.index[person_id])+1):
if img_no in dataset.attributes[person_id]:
new_imgs.append(imgs[dataset.index[person_id][img_no-1]])
attrs.append(dataset.attributes[person_id][img_no])
imgs = np.array(new_imgs)
attrs = np.array(attrs).astype(dp.float_)
return imgs, attrs
def _resize(args):
img, crop_size, rescale_size = args
crop = (img.shape[0] - crop_size) // 2
img = img[crop:-crop, crop:-crop]
img = transform.resize(img, (rescale_size, rescale_size, 3), order=3)
img = (img*255).astype(np.uint8)
return img
def _resize_augment(args):
img, crop_size, rescale_size = args
augment_params = sample_img_augment_params(
translation_sigma=1.0, scale_sigma=0.01, rotation_sigma=0.01,
gamma_sigma=0.07, contrast_sigma=0.07, hue_sigma=0.0125
)
img = img_augment(img, *augment_params)
img = _resize((img, crop_size, rescale_size))
return img
@mem.cache
def resize_imgs(imgs, crop_size, rescale_size, n_augment=0):
if n_augment == 0:
preprocess_fun = _resize
n_imgs = len(imgs)
else:
preprocess_fun = _resize_augment
n_imgs = n_augment
def img_iter():
for i in range(n_imgs):
yield imgs[i % len(imgs)]
with joblib.Parallel(n_jobs=-2) as parallel:
imgs = parallel(joblib.delayed(preprocess_fun)
((img, crop_size, rescale_size)) for img in img_iter())
imgs = np.array(imgs)
return imgs
@mem.cache
def feeds(alignment, crop_size, rescale_size, batch_size, epoch_size,
n_augment=int(1e5), with_attributes=False, split='val'):
if split == 'val':
train_split = 'valtrain'
test_split = 'val'
elif split == 'test':
train_split = 'testtrain'
test_split = 'test'
x_train, y_train = lfw_imgs_split(alignment, train_split)
# Shuffle training images
idxs = np.random.permutation(len(x_train))
x_train = x_train[idxs]
y_train = y_train[idxs]
if n_augment > 0:
y_train = y_train[np.arange(n_augment) % len(x_train)]
x_train = resize_imgs(x_train, crop_size, rescale_size, n_augment)
x_train = np.transpose(x_train, (0, 3, 1, 2))
x_test, y_test = lfw_imgs_split(alignment, test_split)
x_test = resize_imgs(x_test, crop_size, rescale_size)
x_test = img_transform(x_test, to_bc01=True)
if with_attributes:
train_feed = SupervisedAugmentedFeed(
x_train, y_train, batch_size=batch_size, epoch_size=epoch_size
)
test_feed = dp.SupervisedFeed(
x_test, y_test, batch_size=batch_size
)
else:
train_feed = AugmentedFeed(x_train, batch_size, epoch_size)
test_feed = dp.Feed(x_test, batch_size)
return train_feed, test_feed
| 2.328125 | 2 |
core/migrations/0005_auto_20210422_2040.py | Neelamegam2000/QRcode-for-license | 0 | 12765686 | # Generated by Django 3.1.7 on 2021-04-22 15:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20210414_1640'),
]
operations = [
migrations.AddField(
model_name='document',
name='password',
field=models.CharField(blank=True, max_length=16),
),
migrations.AlterField(
model_name='document',
name='file_url',
field=models.CharField(blank=True, max_length=255),
),
]
| 1.585938 | 2 |
Publish_Files_In_Google_Drive/uploadFiles.py | OseiasBeu/PYTHON_PROJECTS | 0 | 12765687 | from __future__ import print_function
import os
from apiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
from apiclient.http import MediaFileUpload
import pandas as pd
import sys
def upload_files():
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
try:
os.remove("fileIds.csv")
print('Arquivo fileIds removido com sucesso!')
except:
print("Oops!", sys.exc_info()[0], "occurred.")
print('Arquivo fileIds não encontrado!')
SCOPES = 'PATH/drive.file'
store = file.Storage('storage.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets(
'client_secret.json', scope=SCOPES)
creds = tools.run_flow(flow, store,flags) \
if flags else tools.run(flow,store)
DRIVE = build('drive', 'v3', http = creds.authorize(Http()))
FILES = (
('YOUR_FILE.csv','(application/vnd.ms-excel)'),
('YOUR_FILE.xlsx','(application/vnd.openxmlformats-officedocument.spreadsheetml.sheet)'),
('YOUR_FILE.xlsx','(application/vnd.openxmlformats-officedocument.spreadsheetml.sheet)'),
('YOUR_FILE.xlsx','(application/vnd.openxmlformats-officedocument.spreadsheetml.sheet)'),
)
folder_id = 'YOUR FOLDER ID'
filesIds = []
for filename, mimeType in FILES:
metadata = {'name': filename,
'parents': [folder_id],
'resumable':True,}
if mimeType:
metadata['mimeType'] = mimeType
res = DRIVE.files().create(body=metadata, media_body=filename).execute()
filesIds.append(res.get('id'))
if res:
print('Upload "%s" (%s): ' %(filename,res['mimeType']))
print('File ID: %s' %(res.get('id')))
df = pd.DataFrame(filesIds)
df.to_csv('fileIds.csv', sep=';', index=False, header=None) | 2.65625 | 3 |
src/command_modules/azure-cli-interactive/azclishell/_dump_commands.py | v-Ajnava/azure-cli | 0 | 12765688 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
from importlib import import_module
import json
import os
import pkgutil
import yaml
from azure.cli.core.application import APPLICATION, Configuration
from azure.cli.core.commands import _update_command_definitions, BLACKLISTED_MODS
from azure.cli.core.help_files import helps
from azure.cli.core.commands.arm import add_id_parameters
import azclishell.configuration as config
class LoadFreshTable(object):
"""
this class generates and dumps the fresh command table into a file
as well as installs all the modules
"""
def __init__(self):
self.command_table = None
def install_modules(self):
installed_command_modules = []
for cmd in self.command_table:
try:
self.command_table[cmd].load_arguments()
except (ImportError, ValueError):
pass
mods_ns_pkg = import_module('azure.cli.command_modules')
for _, modname, _ in pkgutil.iter_modules(mods_ns_pkg.__path__):
if modname not in BLACKLISTED_MODS:
installed_command_modules.append(modname)
for mod in installed_command_modules:
try:
mod = import_module('azure.cli.command_modules.' + mod)
mod.load_params(mod)
mod.load_commands()
except Exception: # pylint: disable=broad-except
print("Error loading: {}".format(mod))
_update_command_definitions(self.command_table)
def load_help_files(self, data):
""" loads all the extra information from help files """
for cmd in helps:
diction_help = yaml.load(helps[cmd])
# extra descriptions
if "short-summary" in diction_help:
if cmd in data:
data[cmd]['help'] = diction_help["short-summary"]
else:
data[cmd] = {
'help': diction_help["short-summary"],
'parameters': {}
}
if callable(data[cmd]['help']):
data[cmd]['help'] = data[cmd]['help']()
# if there is extra help for this command but it's not reflected in the command table
if cmd not in data:
print("Command: {} not in Command Table".format(cmd))
continue
# extra parameters
if "parameters" in diction_help:
for param in diction_help["parameters"]:
if param["name"].split()[0] not in data[cmd]['parameters']:
options = {
'name': [],
'required': '',
'help': ''
}
data[cmd]['parameters'] = {
param["name"].split()[0]: options
}
if "short-summary" in param:
data[cmd]['parameters'][param["name"].split()[0]]['help']\
= param["short-summary"]
# extra examples
if "examples" in diction_help:
examples = []
for example in diction_help["examples"]:
examples.append([example['name'], example['text']])
data[cmd]['examples'] = examples
def dump_command_table(self):
""" dumps the command table """
self.command_table = APPLICATION.configuration.get_command_table()
command_file = config.CONFIGURATION.get_help_files()
self.install_modules()
add_id_parameters(self.command_table)
data = {}
for cmd in self.command_table:
com_descrip = {} # commands to their descriptions, examples, and parameter info
param_descrip = {} # parameters to their aliases, required, and descriptions
try:
command_description = self.command_table[cmd].description
if callable(command_description):
command_description = command_description()
com_descrip['help'] = command_description
com_descrip['examples'] = ""
# checking all the parameters for a single command
for key in self.command_table[cmd].arguments:
required = ""
help_desc = ""
if self.command_table[cmd].arguments[key].type.settings.get('required'):
required = "[REQUIRED]"
if self.command_table[cmd].arguments[key].type.settings.get('help'):
help_desc = self.command_table[cmd].arguments[key].type.settings.get('help')
# checking aliasing
name_options = []
for name in self.command_table[cmd].arguments[key].options_list:
name_options.append(name)
options = {
'name': name_options,
'required': required,
'help': help_desc
}
# the key is the first alias option
param_descrip[self.command_table[cmd].arguments[key].options_list[0]] = options
com_descrip['parameters'] = param_descrip
data[cmd] = com_descrip
except (ImportError, ValueError):
pass
self.load_help_files(data)
# dump into the cache file
with open(os.path.join(get_cache_dir(), command_file), 'w') as help_file:
json.dump(data, help_file)
def get_cache_dir():
""" gets the location of the cache """
azure_folder = config.get_config_dir()
cache_path = os.path.join(azure_folder, 'cache')
if not os.path.exists(azure_folder):
os.makedirs(azure_folder)
if not os.path.exists(cache_path):
os.makedirs(cache_path)
return cache_path
FRESH_TABLE = LoadFreshTable()
| 2.078125 | 2 |
examples/team-fullmetal/dockerComp/src/server/config.py | connectthefuture/docker-hacks | 0 | 12765689 | <gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
DEBUG = False
import os
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
THREADS_PER_PAGE = 2
SECRET_KEY = ""
# Dictionary that holds all the template configuration
TEMPLATE_CONFIGURATION = {
"title" : "dockerComp DDSC (Dockerized Distributed Scientific Computing)",
"header_text" : "dockerComp",
}
HOST = "0.0.0.0"
PORT = 5000
| 1.5 | 2 |
bridges/api/endpoints/votes.py | pegasystems/building-bridges | 20 | 12765690 | <reponame>pegasystems/building-bridges<gh_stars>10-100
import logging
from http import HTTPStatus
from typing import Dict, Tuple
from flask import request
from flask_restx import Resource
from bridges.api.endpoints.surveys import survey_api
from bridges.api.parse_args import put_question_parser
from bridges.api import logic
from bridges.api.restplus import api
from bridges.database.objects.survey import Survey
log = logging.getLogger(__name__)
ns = api.namespace('surveys', description='Operations related to surveys')
@ns.route('/<string:survey_url>/questions/<string:question_id>/vote')
class VotesCollection(Resource):
"""
Api points that operates on votes collection
in one question.
"""
@api.expect(put_question_parser, validate=True)
@survey_api.voting_enabled
def put(self, question_id: str) -> Tuple[Dict, int]:
"""
Add new vote
"""
logic.add_vote(
question_id=question_id,
user=request.user,
is_upvote=put_question_parser.parse_args(request)['type'] == 'up')
return None, HTTPStatus.CREATED
@survey_api.voting_enabled
def delete(self, question_id: str) -> Tuple[Dict, int]:
"""
Delete vote
"""
logic.delete_vote(question_id=question_id, user=request.user)
return None, HTTPStatus.NO_CONTENT
| 2.484375 | 2 |
tests/frontend/test_lib.py | CNR-ITTIG/plasodfaxp | 1 | 12765691 | <reponame>CNR-ITTIG/plasodfaxp
# -*- coding: utf-8 -*-
"""Front-end related functions and classes for testing."""
import io
import os
import unittest
from plaso.analysis import interface as analysis_interface
class StringIOOutputWriter(object):
"""Class that implements a StringIO output writer."""
def __init__(self):
"""Initialize the string output writer."""
super(StringIOOutputWriter, self).__init__()
self._string_io = io.StringIO()
# Make the output writer compatible with a filehandle interface.
self.write = self.Write
def flush(self):
"""Flush the internal buffer."""
self._string_io.flush()
def GetValue(self):
"""Returns the write buffer from the output writer."""
return self._string_io.getvalue()
def GetLine(self):
"""Returns a single line read from the output buffer."""
return self._string_io.readline()
def SeekToBeginning(self):
"""Seeks the output buffer to the beginning of the buffer."""
self._string_io.seek(0)
def Write(self, string):
"""Writes a string to the StringIO object."""
self._string_io.write(string)
class TestAnalysisPlugin(analysis_interface.AnalysisPlugin):
"""Test analysis plugin."""
NAME = u'test_analysis_plugin'
def CompileReport(self, unused_analysis_mediator):
"""Compiles a report of the analysis.
After the plugin has received every copy of an event to
analyze this function will be called so that the report
can be assembled.
Args:
analysis_mediator: The analysis mediator object (instance of
AnalysisMediator).
Returns:
The analysis report (instance of AnalysisReport).
"""
return
def ExamineEvent(
self, unused_analysis_mediator, unused_event_object, **unused_kwargs):
"""Analyzes an event object.
Args:
analysis_mediator: The analysis mediator object (instance of
AnalysisMediator).
event_object: An event object (instance of EventObject).
"""
return
class FrontendTestCase(unittest.TestCase):
"""The unit test case for a front-end."""
_DATA_PATH = os.path.join(os.getcwd(), u'data')
_TEST_DATA_PATH = os.path.join(os.getcwd(), u'test_data')
# Show full diff results, part of TestCase so does not follow our naming
# conventions.
maxDiff = None
def _GetTestFilePath(self, path_segments):
"""Retrieves the path of a test file relative to the test data directory.
Args:
path_segments: the path segments inside the test data directory.
Returns:
A path of the test file.
"""
# Note that we need to pass the individual path segments to os.path.join
# and not a list.
return os.path.join(self._TEST_DATA_PATH, *path_segments)
| 2.34375 | 2 |
ruffus/test/test_follows_mkdir.py | pombreda/ruffus | 1 | 12765692 | <gh_stars>1-10
#!/usr/bin/env python
from __future__ import print_function
"""
test_follows_mkdir.py
"""
import os
import sys
# add grandparent to search path for testing
grandparent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
sys.path.insert(0, grandparent_dir)
# module name = script name without extension
module_name = os.path.splitext(os.path.basename(__file__))[0]
# funky code to import by file name
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
ruffus_name = os.path.basename(parent_dir)
ruffus = __import__ (ruffus_name)
try:
attrlist = ruffus.__all__
except AttributeError:
attrlist = dir (ruffus)
for attr in attrlist:
if attr[0:2] != "__":
globals()[attr] = getattr (ruffus, attr)
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Tasks
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
directories = [os.path.abspath('a'), 'b']
@follows(mkdir(directories), mkdir('c'), mkdir('d', 'e'), mkdir('e'))
def task_which_makes_directories ():
pass
import unittest
class Test_task_mkdir(unittest.TestCase):
def setUp (self):
"""
"""
pass
def tearDown (self):
"""
delete directories
"""
for d in 'abcde':
fullpath = os.path.join(os.path.dirname(__file__), d)
os.rmdir(fullpath)
def test_mkdir (self):
pipeline_run(multiprocess = 10, verbose = 0, pipeline= "main")
for d in 'abcde':
fullpath = os.path.join(os.path.dirname(__file__), d)
self.assertTrue(os.path.exists(fullpath))
def test_newstyle_mkdir (self):
test_pipeline = Pipeline("test")
test_pipeline.follows(task_which_makes_directories, mkdir(directories), mkdir('c'), mkdir('d', 'e'), mkdir('e'))
test_pipeline.run(multiprocess = 10, verbose = 0)
for d in 'abcde':
fullpath = os.path.join(os.path.dirname(__file__), d)
self.assertTrue(os.path.exists(fullpath))
if __name__ == '__main__':
unittest.main()
| 2.34375 | 2 |
parameters_9000.py | zhangjiannan/easyflow | 0 | 12765693 | password="<PASSWORD>(1<PASSWORD>,20,<PASSWORD>)$<PASSWORD>$28115a6960f48c6f11<PASSWORD>0bc838e9f2d622c0262"
| 0.8125 | 1 |
tests/limetr_gradient.py | rsoren/limetr | 0 | 12765694 | # test function gradient
def limetr_gradient():
import numpy as np
from limetr.__init__ import LimeTr
ok = True
# setup test problem
# -------------------------------------------------------------------------
model = LimeTr.testProblem(use_trimming=True,
use_constraints=True,
use_regularizer=True,
use_uprior=True,
use_gprior=True,
know_obs_std=False,
share_obs_std=True)
tol = 1e-6
# test the gradient
# -------------------------------------------------------------------------
x = np.random.randn(model.k)
x[model.idx_gamma] = 0.1
x[model.idx_delta] = 0.1
tr_grad = model.gradient(x, use_ad=True)
my_grad = model.gradient(x)
err = np.linalg.norm(tr_grad - my_grad)
ok = ok and err < tol
if not ok:
print('err', err)
print('tr_grad', tr_grad)
print('my_grad', my_grad)
return ok
| 2.5625 | 3 |
src/usecases/update/update_blog.py | lokaimoma/BLOGG | 13 | 12765695 | <filename>src/usecases/update/update_blog.py
from datetime import datetime
from typing import Callable
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from src.domain_logic.blog_domain import BlogDomain
from src.model import get_database_session
from src.model.blog import Blog
from src.usecases.insert.insert_blog import insert_blog
async def update_blog(blog_id: int, blog_info: BlogDomain,
func: Callable[[], AsyncSession] = get_database_session):
async with func() as session:
blog_query = select(Blog).filter(Blog.id == blog_id)
result = await session.execute(blog_query)
blog: Blog = result.scalar_one_or_none()
if blog:
blog.title = blog_info.title
blog.body = blog_info.body
blog.last_updated = blog_info.last_updated if blog_info.last_updated else datetime.now()
await session.commit()
return
await insert_blog(blog_domain=blog_info)
| 2.40625 | 2 |
a2c/train.py | liuyuezhangadam/pyrl | 2 | 12765696 | from common.vec_env.vec_logger import VecLogger
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
GAMMA = 0.99
TAU = 1.00
N_STEPS = 5
CLIP_GRAD = 50
COEF_VALUE = 0.5
COEF_ENTROPY = 0.01
def train(args, venv, model, path, device):
N = args.num_processes
net = model(venv.observation_space.shape[0], venv.action_space.n).to(device)
net.train()
optimizer = optim.Adam(net.parameters(), lr=args.lr, amsgrad=args.amsgrad)
vlogger = VecLogger(N=N, path=path)
vlogger.add_model(net)
state = venv.reset()
state_v = torch.from_numpy(state).float().to(device)
hx = torch.zeros(N, 512).to(device)
cx = torch.zeros(N, 512).to(device)
t = 0
while t < args.num_timesteps:
# Reset gradients
loss_value_v = torch.zeros(1, 1).to(device)
loss_policy_v = torch.zeros(1, 1).to(device)
loss_entropy_v = torch.zeros(1, 1).to(device)
gae_v = torch.zeros(N, 1).to(device)
hx.detach_()
cx.detach_()
reward_vs = []
done_vs = []
value_vs = []
log_prob_action_vs = []
entropy_vs = []
for step in range(N_STEPS):
# Perform action according to policy
value_v, logit_v, (hx, cx) = net(state_v, (hx, cx))
prob_v = F.softmax(logit_v, dim=1)
action_v = prob_v.multinomial(num_samples=1)
action = action_v.data.cpu().numpy()
log_prob_v = F.log_softmax(logit_v, dim=1)
log_prob_action_v = log_prob_v.gather(1, action_v)
entropy_v = -(log_prob_v * prob_v).sum(dim=1, keepdim=True)
# Receive reward and new state
state, reward, done, info = venv.step(action)
t += N
reward = np.expand_dims(reward, axis=1)
done = np.expand_dims(done, axis=1)
info = np.expand_dims(info, axis=1)
vlogger.log(t, reward, info)
state_v = torch.from_numpy(state).float().to(device)
reward_v = torch.from_numpy(reward).float().to(device)
done_v = torch.from_numpy(done.astype('int')).float().to(device)
reward_vs.append(reward_v)
done_vs.append(done_v)
value_vs.append(value_v)
log_prob_action_vs.append(log_prob_action_v)
entropy_vs.append(entropy_v)
# Reset the LSTM state if done
hx = (1 - done_v) * hx
cx = (1 - done_v) * cx
# R
R_v = (1 - done_v) * net(state_v, (hx, cx))[0]
value_vs.append(R_v)
for i in reversed(range(len(reward_vs))):
R_v = (1 - done_vs[i]) * GAMMA * R_v + reward_vs[i]
# Accumulate gradients
adv_v = R_v.detach() - value_vs[i]
# Generalized Advantage Estimataion
delta_t = reward_vs[i] + (1 - done_vs[i]) * GAMMA * value_vs[i + 1] - value_vs[i]
gae_v = gae_v * (1 - done_vs[i]) * GAMMA * TAU + delta_t
loss_value_v += (0.5 * adv_v.pow(2)).sum()
loss_policy_v -= (log_prob_action_vs[i] * gae_v.detach()).sum() # cautious: detach()
loss_entropy_v -= (entropy_vs[i]).sum()
net.zero_grad()
loss_v = COEF_VALUE * loss_value_v + loss_policy_v + COEF_ENTROPY * loss_entropy_v
loss_v.backward()
nn.utils.clip_grad_norm_(net.parameters(), CLIP_GRAD)
optimizer.step()
venv.close()
| 2.28125 | 2 |
_assignments/pandas/dataframe/pandas_df_groupby_a.py | sages-pl/2022-01-pythonsqlalchemy-aptiv | 0 | 12765697 | """
* Assignment: DataFrame Groupby Phones
* Complexity: easy
* Lines of code: 5 lines
* Time: 8 min
English:
1. Read data from `DATA` as `df: pd.DataFrame`
2. Give information about total number of all phone calls for each calendar month
3. Run doctests - all must succeed
Polish:
1. Wczytaj dane z `DATA` jako `df: pd.DataFrame`
2. Podaj informacje o łącznej liczbie wszystkich połączeń telefonicznych dla każdego miesiąca kalendarzowego
3. Uruchom doctesty - wszystkie muszą się powieść
Tests:
>>> import sys; sys.tracebacklimit = 0
>>> pd.set_option('display.width', 500)
>>> pd.set_option('display.max_columns', 10)
>>> pd.set_option('display.max_rows', 10)
>>> assert result is not Ellipsis, \
'Assign result to variable: `result`'
>>> assert type(result) is pd.Series, \
'Variable `result` must be a `pd.Series` type'
>>> result # doctest: +NORMALIZE_WHITESPACE
year month
1999 10 16309.0
11 16780.0
12 14861.0
2000 1 18705.0
2 11019.0
3 14647.0
Name: duration, dtype: float64
"""
import pandas as pd
DATA = 'https://python.astrotech.io/_static/phones-pl.csv'
result = ...
| 3.90625 | 4 |
models_nonconvex_simple2/waternd2.py | grossmann-group/pyomo-MINLP-benchmarking | 0 | 12765698 | # MINLP written by GAMS Convert at 08/20/20 01:30:51
#
# Equation counts
# Total E G L N X C B
# 250 105 0 145 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 233 161 72 0 0 0 0 0
# FX 11 11 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 1151 626 525 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.b1 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b3 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b4 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b5 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b6 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b7 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b8 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b9 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b10 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b11 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b12 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b13 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b14 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b15 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b16 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b17 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b18 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b19 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b20 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b21 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b22 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b23 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b24 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b25 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b26 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b27 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b28 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b29 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b30 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b31 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b32 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b33 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b34 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b35 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b36 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b37 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b38 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b39 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b40 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b41 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b42 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b43 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b44 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b45 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b46 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b47 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b48 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b49 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b50 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b51 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b52 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b53 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b54 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b55 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b56 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b57 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b58 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b59 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b60 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b61 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b62 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b63 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b64 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b65 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b66 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b67 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b68 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b69 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b70 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b71 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b72 = Var(within=Binary,bounds=(0,1),initialize=0)
m.x74 = Var(within=Reals,bounds=(40,300),initialize=40)
m.x75 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x76 = Var(within=Reals,bounds=(0,50),initialize=0)
m.x77 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x78 = Var(within=Reals,bounds=(0,70),initialize=0)
m.x79 = Var(within=Reals,bounds=(0,80),initialize=0)
m.x80 = Var(within=Reals,bounds=(0,300),initialize=0)
m.x81 = Var(within=Reals,bounds=(0,300),initialize=0)
m.x82 = Var(within=Reals,bounds=(0,300),initialize=0)
m.x83 = Var(within=Reals,bounds=(40,40),initialize=40)
m.x84 = Var(within=Reals,bounds=(50,50),initialize=50)
m.x85 = Var(within=Reals,bounds=(60,60),initialize=60)
m.x86 = Var(within=Reals,bounds=(70,70),initialize=70)
m.x87 = Var(within=Reals,bounds=(80,80),initialize=80)
m.x88 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x89 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x90 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x91 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x92 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x93 = Var(within=Reals,bounds=(0,50),initialize=0)
m.x94 = Var(within=Reals,bounds=(0,50),initialize=0)
m.x95 = Var(within=Reals,bounds=(0,50),initialize=0)
m.x96 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x97 = Var(within=Reals,bounds=(0,50),initialize=0)
m.x98 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x99 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x100 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x101 = Var(within=Reals,bounds=(0,50),initialize=0)
m.x102 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x103 = Var(within=Reals,bounds=(0,70),initialize=0)
m.x104 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x105 = Var(within=Reals,bounds=(0,50),initialize=0)
m.x106 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x107 = Var(within=Reals,bounds=(0,70),initialize=0)
m.x108 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x109 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x110 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x111 = Var(within=Reals,bounds=(0,50),initialize=0)
m.x112 = Var(within=Reals,bounds=(0,50),initialize=0)
m.x113 = Var(within=Reals,bounds=(0,50),initialize=0)
m.x114 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x115 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x116 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x117 = Var(within=Reals,bounds=(0,70),initialize=0)
m.x118 = Var(within=Reals,bounds=(0,70),initialize=0)
m.x119 = Var(within=Reals,bounds=(0,70),initialize=0)
m.x120 = Var(within=Reals,bounds=(0,80),initialize=0)
m.x121 = Var(within=Reals,bounds=(0,80),initialize=0)
m.x122 = Var(within=Reals,bounds=(0,80),initialize=0)
m.x123 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x124 = Var(within=Reals,bounds=(0,50),initialize=0)
m.x125 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x126 = Var(within=Reals,bounds=(0,70),initialize=0)
m.x127 = Var(within=Reals,bounds=(0,80),initialize=0)
m.x128 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x129 = Var(within=Reals,bounds=(0,50),initialize=0)
m.x130 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x131 = Var(within=Reals,bounds=(0,70),initialize=0)
m.x132 = Var(within=Reals,bounds=(0,80),initialize=0)
m.x133 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x134 = Var(within=Reals,bounds=(0,50),initialize=0)
m.x135 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x136 = Var(within=Reals,bounds=(0,70),initialize=0)
m.x137 = Var(within=Reals,bounds=(0,80),initialize=0)
m.x138 = Var(within=Reals,bounds=(0,40),initialize=0)
m.x139 = Var(within=Reals,bounds=(0,50),initialize=0)
m.x140 = Var(within=Reals,bounds=(0,60),initialize=0)
m.x141 = Var(within=Reals,bounds=(0,70),initialize=0)
m.x142 = Var(within=Reals,bounds=(0,80),initialize=0)
m.x143 = Var(within=Reals,bounds=(0,300),initialize=0)
m.x144 = Var(within=Reals,bounds=(0,300),initialize=0)
m.x145 = Var(within=Reals,bounds=(0,300),initialize=0)
m.x146 = Var(within=Reals,bounds=(0,300),initialize=0)
m.x147 = Var(within=Reals,bounds=(0,300),initialize=0)
m.x148 = Var(within=Reals,bounds=(0,300),initialize=0)
m.x149 = Var(within=Reals,bounds=(0,300),initialize=0)
m.x150 = Var(within=Reals,bounds=(0,300),initialize=0)
m.x151 = Var(within=Reals,bounds=(0,300),initialize=0)
m.x152 = Var(within=Reals,bounds=(0,300),initialize=0)
m.x153 = Var(within=Reals,bounds=(0,300),initialize=0)
m.x154 = Var(within=Reals,bounds=(0,300),initialize=0)
m.x155 = Var(within=Reals,bounds=(0,300),initialize=0)
m.x156 = Var(within=Reals,bounds=(0,300),initialize=0)
m.x157 = Var(within=Reals,bounds=(0,300),initialize=0)
m.x158 = Var(within=Reals,bounds=(40,300),initialize=40)
m.x159 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x160 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x161 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x162 = Var(within=Reals,bounds=(0,50),initialize=0)
m.x163 = Var(within=Reals,bounds=(0,50),initialize=0)
m.x164 = Var(within=Reals,bounds=(0,50),initialize=0)
m.x165 = Var(within=Reals,bounds=(0,50),initialize=0)
m.x166 = Var(within=Reals,bounds=(0,50),initialize=0)
m.x167 = Var(within=Reals,bounds=(0,50),initialize=0)
m.x168 = Var(within=Reals,bounds=(0,50),initialize=0)
m.x169 = Var(within=Reals,bounds=(0,50),initialize=0)
m.x170 = Var(within=Reals,bounds=(0,50),initialize=0)
m.x171 = Var(within=Reals,bounds=(0,25),initialize=0)
m.x172 = Var(within=Reals,bounds=(0,25),initialize=0)
m.x173 = Var(within=Reals,bounds=(0,25),initialize=0)
m.x174 = Var(within=Reals,bounds=(25,25),initialize=25)
m.x175 = Var(within=Reals,bounds=(37.5,37.5),initialize=37.5)
m.x176 = Var(within=Reals,bounds=(25,25),initialize=25)
m.x177 = Var(within=Reals,bounds=(20,70),initialize=20)
m.x178 = Var(within=Reals,bounds=(20,70),initialize=20)
m.x179 = Var(within=Reals,bounds=(20,70),initialize=20)
m.x180 = Var(within=Reals,bounds=(16.6666666666667,66.6666666666667),initialize=16.6666666666667)
m.x181 = Var(within=Reals,bounds=(16.6666666666667,66.6666666666667),initialize=16.6666666666667)
m.x182 = Var(within=Reals,bounds=(16.6666666666667,66.6666666666667),initialize=16.6666666666667)
m.x183 = Var(within=Reals,bounds=(28.5714285714286,78.5714285714286),initialize=28.5714285714286)
m.x184 = Var(within=Reals,bounds=(28.5714285714286,78.5714285714286),initialize=28.5714285714286)
m.x185 = Var(within=Reals,bounds=(28.5714285714286,78.5714285714286),initialize=28.5714285714286)
m.x186 = Var(within=Reals,bounds=(12.5,37.5),initialize=12.5)
m.x187 = Var(within=Reals,bounds=(12.5,37.5),initialize=12.5)
m.x188 = Var(within=Reals,bounds=(0,25),initialize=0)
m.x189 = Var(within=Reals,bounds=(0,78.5714285714286),initialize=0)
m.x190 = Var(within=Reals,bounds=(0,78.5714285714286),initialize=0)
m.x191 = Var(within=Reals,bounds=(0,78.5714285714286),initialize=0)
m.x192 = Var(within=Reals,bounds=(0,78.5714285714286),initialize=0)
m.x193 = Var(within=Reals,bounds=(0,78.5714285714286),initialize=0)
m.x194 = Var(within=Reals,bounds=(0,78.5714285714286),initialize=0)
m.x195 = Var(within=Reals,bounds=(0,78.5714285714286),initialize=0)
m.x196 = Var(within=Reals,bounds=(0,78.5714285714286),initialize=0)
m.x197 = Var(within=Reals,bounds=(0,78.5714285714286),initialize=0)
m.x198 = Var(within=Reals,bounds=(0,3.92857142857142),initialize=0)
m.x199 = Var(within=Reals,bounds=(0,78.5714285714286),initialize=0)
m.x200 = Var(within=Reals,bounds=(0,78.5714285714286),initialize=0)
m.x201 = Var(within=Reals,bounds=(0,78.5714285714286),initialize=0)
m.x202 = Var(within=Reals,bounds=(0,78.5714285714286),initialize=0)
m.x203 = Var(within=Reals,bounds=(0,3.92857142857142),initialize=0)
m.x204 = Var(within=Reals,bounds=(0,78.5714285714286),initialize=0)
m.x205 = Var(within=Reals,bounds=(0,3.92857142857142),initialize=0)
m.x206 = Var(within=Reals,bounds=(0,78.5714285714286),initialize=0)
m.x207 = Var(within=Reals,bounds=(0,25),initialize=0)
m.x208 = Var(within=Reals,bounds=(0,37.5),initialize=0)
m.x209 = Var(within=Reals,bounds=(0,25),initialize=0)
m.x210 = Var(within=Reals,bounds=(0,70),initialize=0)
m.x211 = Var(within=Reals,bounds=(0,70),initialize=0)
m.x212 = Var(within=Reals,bounds=(0,70),initialize=0)
m.x213 = Var(within=Reals,bounds=(0,66.6666666666667),initialize=0)
m.x214 = Var(within=Reals,bounds=(0,66.6666666666667),initialize=0)
m.x215 = Var(within=Reals,bounds=(0,66.6666666666667),initialize=0)
m.x216 = Var(within=Reals,bounds=(0,78.5714285714286),initialize=0)
m.x217 = Var(within=Reals,bounds=(0,78.5714285714286),initialize=0)
m.x218 = Var(within=Reals,bounds=(0,78.5714285714286),initialize=0)
m.x219 = Var(within=Reals,bounds=(0,37.5),initialize=0)
m.x220 = Var(within=Reals,bounds=(0,37.5),initialize=0)
m.x221 = Var(within=Reals,bounds=(0,25),initialize=0)
m.x222 = Var(within=Reals,bounds=(0,3.92857142857142),initialize=0)
m.x223 = Var(within=Reals,bounds=(0,78.5714285714286),initialize=0)
m.x224 = Var(within=Reals,bounds=(0,78.5714285714286),initialize=0)
m.x225 = Var(within=Reals,bounds=(0,78.5714285714286),initialize=0)
m.x226 = Var(within=Reals,bounds=(0,78.5714285714286),initialize=0)
m.x227 = Var(within=Reals,bounds=(0,3.92857142857142),initialize=0)
m.x228 = Var(within=Reals,bounds=(0,78.5714285714286),initialize=0)
m.x229 = Var(within=Reals,bounds=(0,3.92857142857142),initialize=0)
m.x230 = Var(within=Reals,bounds=(0,78.5714285714286),initialize=0)
m.x231 = Var(within=Reals,bounds=(0,10),initialize=0)
m.x232 = Var(within=Reals,bounds=(0,10),initialize=0)
m.x233 = Var(within=Reals,bounds=(0,10),initialize=0)
m.obj = Objective(expr=0.1*(16800*(0.001 + m.x155)**0.7 + 9500*(0.001 + m.x156)**0.7 + 12600*(0.001 + m.x157)**0.7) +
8000*m.x155 + 320*m.x156 + 53.6*m.x157 + 0.1*(100*(0.001 + m.x75)**0.6 + 100*(0.001 + m.x76)**0.6
+ 100*(0.001 + m.x77)**0.6 + 100*(0.001 + m.x78)**0.6 + 100*(0.001 + m.x79)**0.6 + 100*(0.001 +
m.x80)**0.6 + 100*(0.001 + m.x81)**0.6 + 100*(0.001 + m.x82)**0.6 + 100*(0.001 + m.x88)**0.6 +
100*(0.001 + m.x89)**0.6 + 100*(0.001 + m.x90)**0.6 + 100*(0.001 + m.x91)**0.6 + 100*(0.001 +
m.x92)**0.6 + 100*(0.001 + m.x93)**0.6 + 100*(0.001 + m.x94)**0.6 + 100*(0.001 + m.x95)**0.6 +
100*(0.001 + m.x96)**0.6 + 100*(0.001 + m.x97)**0.6 + 100*(0.001 + m.x98)**0.6 + 100*(0.001 +
m.x99)**0.6 + 100*(0.001 + m.x100)**0.6 + 100*(0.001 + m.x101)**0.6 + 100*(0.001 + m.x102)**0.6
+ 100*(0.001 + m.x103)**0.6 + 100*(0.001 + m.x104)**0.6 + 100*(0.001 + m.x105)**0.6 + 100*(0.001
+ m.x106)**0.6 + 100*(0.001 + m.x107)**0.6 + 100*(0.001 + m.x108)**0.6 + 100*(0.001 + m.x109)**
0.6 + 100*(0.001 + m.x110)**0.6 + 100*(0.001 + m.x111)**0.6 + 100*(0.001 + m.x112)**0.6 + 100*(
0.001 + m.x113)**0.6 + 100*(0.001 + m.x114)**0.6 + 100*(0.001 + m.x115)**0.6 + 100*(0.001 +
m.x116)**0.6 + 100*(0.001 + m.x117)**0.6 + 100*(0.001 + m.x118)**0.6 + 100*(0.001 + m.x119)**0.6
+ 100*(0.001 + m.x120)**0.6 + 100*(0.001 + m.x121)**0.6 + 100*(0.001 + m.x122)**0.6 + 100*(0.001
+ m.x123)**0.6 + 100*(0.001 + m.x124)**0.6 + 100*(0.001 + m.x125)**0.6 + 100*(0.001 + m.x126)**
0.6 + 100*(0.001 + m.x127)**0.6 + 100*(0.001 + m.x128)**0.6 + 100*(0.001 + m.x129)**0.6 + 100*(
0.001 + m.x130)**0.6 + 100*(0.001 + m.x131)**0.6 + 100*(0.001 + m.x132)**0.6 + 100*(0.001 +
m.x133)**0.6 + 100*(0.001 + m.x134)**0.6 + 100*(0.001 + m.x135)**0.6 + 100*(0.001 + m.x136)**0.6
+ 100*(0.001 + m.x137)**0.6 + 100*(0.001 + m.x138)**0.6 + 100*(0.001 + m.x139)**0.6 + 100*(0.001
+ m.x140)**0.6 + 100*(0.001 + m.x141)**0.6 + 100*(0.001 + m.x142)**0.6 + 100*(0.001 + m.x149)**
0.6 + 100*(0.001 + m.x150)**0.6 + 100*(0.001 + m.x151)**0.6 + 100*(0.001 + m.x143)**0.6 + 100*(
0.001 + m.x144)**0.6 + 100*(0.001 + m.x145)**0.6 + 100*(0.001 + m.x146)**0.6 + 100*(0.001 +
m.x147)**0.6 + 100*(0.001 + m.x148)**0.6) + 48*m.x75 + 48*m.x76 + 48*m.x77 + 48*m.x78 + 48*m.x79
+ 48*m.x80 + 48*m.x81 + 48*m.x82 + 48*m.x88 + 48*m.x89 + 48*m.x90 + 48*m.x91 + 48*m.x92 + 48*
m.x93 + 48*m.x94 + 48*m.x95 + 48*m.x96 + 48*m.x97 + 48*m.x98 + 48*m.x99 + 48*m.x100 + 48*m.x101
+ 48*m.x102 + 48*m.x103 + 48*m.x104 + 48*m.x105 + 48*m.x106 + 48*m.x107 + 48*m.x108 + 48*m.x109
+ 48*m.x110 + 48*m.x111 + 48*m.x112 + 48*m.x113 + 48*m.x114 + 48*m.x115 + 48*m.x116 + 48*m.x117
+ 48*m.x118 + 48*m.x119 + 48*m.x120 + 48*m.x121 + 48*m.x122 + 48*m.x123 + 48*m.x124 + 48*m.x125
+ 48*m.x126 + 48*m.x127 + 48*m.x128 + 48*m.x129 + 48*m.x130 + 48*m.x131 + 48*m.x132 + 48*m.x133
+ 48*m.x134 + 48*m.x135 + 48*m.x136 + 48*m.x137 + 48*m.x138 + 48*m.x139 + 48*m.x140 + 48*m.x141
+ 48*m.x142 + 48*m.x143 + 48*m.x144 + 48*m.x145 + 48*m.x146 + 48*m.x147 + 48*m.x148 + 48*m.x149
+ 48*m.x150 + 48*m.x151 + 0.6*m.b1 + 0.6*m.b2 + 0.6*m.b3 + 0.6*m.b4 + 0.6*m.b5 + 0.6*m.b6
+ 0.6*m.b7 + 0.6*m.b8 + 0.6*m.b9 + 0.6*m.b10 + 0.6*m.b11 + 0.6*m.b12 + 0.6*m.b13 + 0.6*m.b14
+ 0.6*m.b15 + 0.6*m.b16 + 0.6*m.b17 + 0.6*m.b18 + 0.6*m.b19 + 0.6*m.b20 + 0.6*m.b21 + 0.6*m.b22
+ 0.6*m.b23 + 0.6*m.b24 + 0.6*m.b25 + 0.6*m.b26 + 0.6*m.b27 + 0.6*m.b28 + 0.6*m.b29 + 0.6*m.b30
+ 0.6*m.b31 + 0.6*m.b32 + 0.6*m.b33 + 0.6*m.b34 + 0.6*m.b35 + 0.6*m.b36 + 0.6*m.b37 + 0.6*m.b38
+ 0.6*m.b39 + 0.6*m.b40 + 0.6*m.b41 + 0.6*m.b42 + 0.6*m.b43 + 0.6*m.b44 + 0.6*m.b45 + 0.6*m.b46
+ 0.6*m.b47 + 0.6*m.b48 + 0.6*m.b49 + 0.6*m.b50 + 0.6*m.b51 + 0.6*m.b52 + 0.6*m.b53 + 0.6*m.b54
+ 0.6*m.b55 + 0.6*m.b56 + 0.6*m.b57 + 0.6*m.b58 + 0.6*m.b59 + 0.6*m.b60 + 0.6*m.b61 + 0.6*m.b62
+ 0.6*m.b63 + 0.6*m.b64 + 0.6*m.b65 + 0.6*m.b66 + 0.6*m.b67 + 0.6*m.b68 + 0.6*m.b69 + 0.6*m.b70
+ 0.6*m.b71 + 0.6*m.b72 + 8000*m.x74, sense=minimize)
m.c2 = Constraint(expr= m.x74 - m.x75 - m.x76 - m.x77 - m.x78 - m.x79 - m.x80 - m.x81 - m.x82 == 0)
m.c3 = Constraint(expr= - m.x75 - m.x92 - m.x96 - m.x100 - m.x104 - m.x128 - m.x133 - m.x138 == -40)
m.c4 = Constraint(expr= - m.x76 - m.x88 - m.x97 - m.x101 - m.x105 - m.x129 - m.x134 - m.x139 == -50)
m.c5 = Constraint(expr= - m.x77 - m.x89 - m.x93 - m.x102 - m.x106 - m.x130 - m.x135 - m.x140 == -60)
m.c6 = Constraint(expr= - m.x78 - m.x90 - m.x94 - m.x98 - m.x107 - m.x131 - m.x136 - m.x141 == -70)
m.c7 = Constraint(expr= - m.x79 - m.x91 - m.x95 - m.x99 - m.x103 - m.x132 - m.x137 - m.x142 == -80)
m.c8 = Constraint(expr=-(m.x92*m.x177 + m.x96*m.x180 + m.x100*m.x183 + m.x104*m.x186 + m.x128*m.x198 + m.x133*m.x201 +
m.x138*m.x204) + 40*m.x159 == 0)
m.c9 = Constraint(expr=-(m.x92*m.x178 + m.x96*m.x181 + m.x100*m.x184 + m.x104*m.x187 + m.x128*m.x199 + m.x133*m.x202 +
m.x138*m.x205) + 40*m.x160 == 0)
m.c10 = Constraint(expr=-(m.x92*m.x179 + m.x96*m.x182 + m.x100*m.x185 + m.x104*m.x188 + m.x128*m.x200 + m.x133*m.x203 +
m.x138*m.x206) + 40*m.x161 == 0)
m.c11 = Constraint(expr=-(m.x88*m.x174 + m.x97*m.x180 + m.x101*m.x183 + m.x105*m.x186 + m.x129*m.x198 + m.x134*m.x201 +
m.x139*m.x204) + 50*m.x162 == 0)
m.c12 = Constraint(expr=-(m.x88*m.x175 + m.x97*m.x181 + m.x101*m.x184 + m.x105*m.x187 + m.x129*m.x199 + m.x134*m.x202 +
m.x139*m.x205) + 50*m.x163 == 0)
m.c13 = Constraint(expr=-(m.x88*m.x176 + m.x97*m.x182 + m.x101*m.x185 + m.x105*m.x188 + m.x129*m.x200 + m.x134*m.x203 +
m.x139*m.x206) + 50*m.x164 == 0)
m.c14 = Constraint(expr=-(m.x89*m.x174 + m.x93*m.x177 + m.x102*m.x183 + m.x106*m.x186 + m.x130*m.x198 + m.x135*m.x201 +
m.x140*m.x204) + 60*m.x165 == 0)
m.c15 = Constraint(expr=-(m.x89*m.x175 + m.x93*m.x178 + m.x102*m.x184 + m.x106*m.x187 + m.x130*m.x199 + m.x135*m.x202 +
m.x140*m.x205) + 60*m.x166 == 0)
m.c16 = Constraint(expr=-(m.x89*m.x176 + m.x93*m.x179 + m.x102*m.x185 + m.x106*m.x188 + m.x130*m.x200 + m.x135*m.x203 +
m.x140*m.x206) + 60*m.x167 == 0)
m.c17 = Constraint(expr=-(m.x90*m.x174 + m.x94*m.x177 + m.x98*m.x180 + m.x107*m.x186 + m.x131*m.x198 + m.x136*m.x201 +
m.x141*m.x204) + 70*m.x168 == 0)
m.c18 = Constraint(expr=-(m.x90*m.x175 + m.x94*m.x178 + m.x98*m.x181 + m.x107*m.x187 + m.x131*m.x199 + m.x136*m.x202 +
m.x141*m.x205) + 70*m.x169 == 0)
m.c19 = Constraint(expr=-(m.x90*m.x176 + m.x94*m.x179 + m.x98*m.x182 + m.x107*m.x188 + m.x131*m.x200 + m.x136*m.x203 +
m.x141*m.x206) + 70*m.x170 == 0)
m.c20 = Constraint(expr=-(m.x91*m.x174 + m.x95*m.x177 + m.x99*m.x180 + m.x103*m.x183 + m.x132*m.x198 + m.x137*m.x201 +
m.x142*m.x204) + 80*m.x171 == 0)
m.c21 = Constraint(expr=-(m.x91*m.x175 + m.x95*m.x178 + m.x99*m.x181 + m.x103*m.x184 + m.x132*m.x199 + m.x137*m.x202 +
m.x142*m.x205) + 80*m.x172 == 0)
m.c22 = Constraint(expr=-(m.x91*m.x176 + m.x95*m.x179 + m.x99*m.x182 + m.x103*m.x185 + m.x132*m.x200 + m.x137*m.x203 +
m.x142*m.x206) + 80*m.x173 == 0)
m.c23 = Constraint(expr= - m.x83 == -40)
m.c24 = Constraint(expr= - m.x84 == -50)
m.c25 = Constraint(expr= - m.x85 == -60)
m.c26 = Constraint(expr= - m.x86 == -70)
m.c27 = Constraint(expr= - m.x87 == -80)
m.c28 = Constraint(expr=-m.x83*m.x174 + 40*m.x159 == -1000)
m.c29 = Constraint(expr=-m.x83*m.x175 + 40*m.x160 == -1500)
m.c30 = Constraint(expr=-m.x83*m.x176 + 40*m.x161 == -1000)
m.c31 = Constraint(expr=-m.x84*m.x177 + 50*m.x162 == -1000)
m.c32 = Constraint(expr=-m.x84*m.x178 + 50*m.x163 == -1000)
m.c33 = Constraint(expr=-m.x84*m.x179 + 50*m.x164 == -1000)
m.c34 = Constraint(expr=-m.x85*m.x180 + 60*m.x165 == -1000)
m.c35 = Constraint(expr=-m.x85*m.x181 + 60*m.x166 == -1000)
m.c36 = Constraint(expr=-m.x85*m.x182 + 60*m.x167 == -1000)
m.c37 = Constraint(expr=-m.x86*m.x183 + 70*m.x168 == -2000)
m.c38 = Constraint(expr=-m.x86*m.x184 + 70*m.x169 == -2000)
m.c39 = Constraint(expr=-m.x86*m.x185 + 70*m.x170 == -2000)
m.c40 = Constraint(expr=-m.x87*m.x186 + 80*m.x171 == -1000)
m.c41 = Constraint(expr=-m.x87*m.x187 + 80*m.x172 == -1000)
m.c42 = Constraint(expr=-m.x87*m.x188 + 80*m.x173 == 0)
m.c43 = Constraint(expr= m.x83 - m.x88 - m.x89 - m.x90 - m.x91 - m.x108 - m.x109 - m.x110 - m.x123 == 0)
m.c44 = Constraint(expr= m.x84 - m.x92 - m.x93 - m.x94 - m.x95 - m.x111 - m.x112 - m.x113 - m.x124 == 0)
m.c45 = Constraint(expr= m.x85 - m.x96 - m.x97 - m.x98 - m.x99 - m.x114 - m.x115 - m.x116 - m.x125 == 0)
m.c46 = Constraint(expr= m.x86 - m.x100 - m.x101 - m.x102 - m.x103 - m.x117 - m.x118 - m.x119 - m.x126 == 0)
m.c47 = Constraint(expr= m.x87 - m.x104 - m.x105 - m.x106 - m.x107 - m.x120 - m.x121 - m.x122 - m.x127 == 0)
m.c48 = Constraint(expr= - m.x174 + m.x207 == 0)
m.c49 = Constraint(expr= - m.x175 + m.x208 == 0)
m.c50 = Constraint(expr= - m.x176 + m.x209 == 0)
m.c51 = Constraint(expr= - m.x177 + m.x210 == 0)
m.c52 = Constraint(expr= - m.x178 + m.x211 == 0)
m.c53 = Constraint(expr= - m.x179 + m.x212 == 0)
m.c54 = Constraint(expr= - m.x180 + m.x213 == 0)
m.c55 = Constraint(expr= - m.x181 + m.x214 == 0)
m.c56 = Constraint(expr= - m.x182 + m.x215 == 0)
m.c57 = Constraint(expr= - m.x183 + m.x216 == 0)
m.c58 = Constraint(expr= - m.x184 + m.x217 == 0)
m.c59 = Constraint(expr= - m.x185 + m.x218 == 0)
m.c60 = Constraint(expr= - m.x186 + m.x219 == 0)
m.c61 = Constraint(expr= - m.x187 + m.x220 == 0)
m.c62 = Constraint(expr= - m.x188 + m.x221 == 0)
m.c63 = Constraint(expr= - m.x80 - m.x108 - m.x111 - m.x114 - m.x117 - m.x120 - m.x145 - m.x147 + m.x152 == 0)
m.c64 = Constraint(expr= - m.x81 - m.x109 - m.x112 - m.x115 - m.x118 - m.x121 - m.x143 - m.x148 + m.x153 == 0)
m.c65 = Constraint(expr= - m.x82 - m.x110 - m.x113 - m.x116 - m.x119 - m.x122 - m.x144 - m.x146 + m.x154 == 0)
m.c66 = Constraint(expr=m.x152*m.x189 - (m.x145*m.x225 + m.x147*m.x228 + m.x108*m.x207 + m.x111*m.x210 + m.x114*m.x213
+ m.x117*m.x216 + m.x120*m.x219) == 0)
m.c67 = Constraint(expr=m.x152*m.x190 - (m.x145*m.x226 + m.x147*m.x229 + m.x108*m.x208 + m.x111*m.x211 + m.x114*m.x214
+ m.x117*m.x217 + m.x120*m.x220) == 0)
m.c68 = Constraint(expr=m.x152*m.x191 - (m.x145*m.x227 + m.x147*m.x230 + m.x108*m.x209 + m.x111*m.x212 + m.x114*m.x215
+ m.x117*m.x218 + m.x120*m.x221) == 0)
m.c69 = Constraint(expr=m.x153*m.x192 - (m.x143*m.x222 + m.x148*m.x228 + m.x109*m.x207 + m.x112*m.x210 + m.x115*m.x213
+ m.x118*m.x216 + m.x121*m.x219) == 0)
m.c70 = Constraint(expr=m.x153*m.x193 - (m.x143*m.x223 + m.x148*m.x229 + m.x109*m.x208 + m.x112*m.x211 + m.x115*m.x214
+ m.x118*m.x217 + m.x121*m.x220) == 0)
m.c71 = Constraint(expr=m.x153*m.x194 - (m.x143*m.x224 + m.x148*m.x230 + m.x109*m.x209 + m.x112*m.x212 + m.x115*m.x215
+ m.x118*m.x218 + m.x121*m.x221) == 0)
m.c72 = Constraint(expr=m.x154*m.x195 - (m.x144*m.x222 + m.x146*m.x225 + m.x110*m.x207 + m.x113*m.x210 + m.x116*m.x213
+ m.x119*m.x216 + m.x122*m.x219) == 0)
m.c73 = Constraint(expr=m.x154*m.x196 - (m.x144*m.x223 + m.x146*m.x226 + m.x110*m.x208 + m.x113*m.x211 + m.x116*m.x214
+ m.x119*m.x217 + m.x122*m.x220) == 0)
m.c74 = Constraint(expr=m.x154*m.x197 - (m.x144*m.x224 + m.x146*m.x227 + m.x110*m.x209 + m.x113*m.x212 + m.x116*m.x215
+ m.x119*m.x218 + m.x122*m.x221) == 0)
m.c75 = Constraint(expr= m.x152 - m.x155 == 0)
m.c76 = Constraint(expr= m.x153 - m.x156 == 0)
m.c77 = Constraint(expr= m.x154 - m.x157 == 0)
m.c78 = Constraint(expr= - 0.0499999999999999*m.x189 + m.x198 == 0)
m.c79 = Constraint(expr= - m.x190 + m.x199 == 0)
m.c80 = Constraint(expr= - m.x191 + m.x200 == 0)
m.c81 = Constraint(expr= - m.x192 + m.x201 == 0)
m.c82 = Constraint(expr= - m.x193 + m.x202 == 0)
m.c83 = Constraint(expr= - 0.0499999999999999*m.x194 + m.x203 == 0)
m.c84 = Constraint(expr= - m.x195 + m.x204 == 0)
m.c85 = Constraint(expr= - 0.0499999999999999*m.x196 + m.x205 == 0)
m.c86 = Constraint(expr= - m.x197 + m.x206 == 0)
m.c87 = Constraint(expr= - m.x128 - m.x129 - m.x130 - m.x131 - m.x132 - m.x143 - m.x144 - m.x149 + m.x155 == 0)
m.c88 = Constraint(expr= - m.x133 - m.x134 - m.x135 - m.x136 - m.x137 - m.x145 - m.x146 - m.x150 + m.x156 == 0)
m.c89 = Constraint(expr= - m.x138 - m.x139 - m.x140 - m.x141 - m.x142 - m.x147 - m.x148 - m.x151 + m.x157 == 0)
m.c90 = Constraint(expr= - m.x198 + m.x222 == 0)
m.c91 = Constraint(expr= - m.x199 + m.x223 == 0)
m.c92 = Constraint(expr= - m.x200 + m.x224 == 0)
m.c93 = Constraint(expr= - m.x201 + m.x225 == 0)
m.c94 = Constraint(expr= - m.x202 + m.x226 == 0)
m.c95 = Constraint(expr= - m.x203 + m.x227 == 0)
m.c96 = Constraint(expr= - m.x204 + m.x228 == 0)
m.c97 = Constraint(expr= - m.x205 + m.x229 == 0)
m.c98 = Constraint(expr= - m.x206 + m.x230 == 0)
m.c99 = Constraint(expr= - m.x123 - m.x124 - m.x125 - m.x126 - m.x127 - m.x149 - m.x150 - m.x151 + m.x158 == 0)
m.c100 = Constraint(expr=m.x158*m.x231 - (m.x123*m.x207 + m.x124*m.x210 + m.x125*m.x213 + m.x126*m.x216 + m.x127*m.x219
+ m.x149*m.x222 + m.x150*m.x225 + m.x151*m.x228) == 0)
m.c101 = Constraint(expr=m.x158*m.x232 - (m.x123*m.x208 + m.x124*m.x211 + m.x125*m.x214 + m.x126*m.x217 + m.x127*m.x220
+ m.x149*m.x223 + m.x150*m.x226 + m.x151*m.x229) == 0)
m.c102 = Constraint(expr=m.x158*m.x233 - (m.x123*m.x209 + m.x124*m.x212 + m.x125*m.x215 + m.x126*m.x218 + m.x127*m.x221
+ m.x149*m.x224 + m.x150*m.x227 + m.x151*m.x230) == 0)
m.c103 = Constraint(expr=-(0.95*m.x152*m.x189 + m.x158*m.x231) == -6000)
m.c104 = Constraint(expr=-(0.95*m.x154*m.x196 + m.x158*m.x232) == -6500)
m.c105 = Constraint(expr=-(0.95*m.x153*m.x194 + m.x158*m.x233) == -5000)
m.c106 = Constraint(expr= - 40*m.b58 + m.x128 <= 0)
m.c107 = Constraint(expr= - 50*m.b59 + m.x129 <= 0)
m.c108 = Constraint(expr= - 60*m.b60 + m.x130 <= 0)
m.c109 = Constraint(expr= - 70*m.b61 + m.x131 <= 0)
m.c110 = Constraint(expr= - 80*m.b62 + m.x132 <= 0)
m.c111 = Constraint(expr= - 40*m.b63 + m.x133 <= 0)
m.c112 = Constraint(expr= - 50*m.b64 + m.x134 <= 0)
m.c113 = Constraint(expr= - 60*m.b65 + m.x135 <= 0)
m.c114 = Constraint(expr= - 70*m.b66 + m.x136 <= 0)
m.c115 = Constraint(expr= - 80*m.b67 + m.x137 <= 0)
m.c116 = Constraint(expr= - 40*m.b68 + m.x138 <= 0)
m.c117 = Constraint(expr= - 50*m.b69 + m.x139 <= 0)
m.c118 = Constraint(expr= - 60*m.b70 + m.x140 <= 0)
m.c119 = Constraint(expr= - 70*m.b71 + m.x141 <= 0)
m.c120 = Constraint(expr= - 80*m.b72 + m.x142 <= 0)
m.c121 = Constraint(expr= - m.x128 <= 0)
m.c122 = Constraint(expr= - m.x129 <= 0)
m.c123 = Constraint(expr= - m.x130 <= 0)
m.c124 = Constraint(expr= - m.x131 <= 0)
m.c125 = Constraint(expr= - m.x132 <= 0)
m.c126 = Constraint(expr= - m.x133 <= 0)
m.c127 = Constraint(expr= - m.x134 <= 0)
m.c128 = Constraint(expr= - m.x135 <= 0)
m.c129 = Constraint(expr= - m.x136 <= 0)
m.c130 = Constraint(expr= - m.x137 <= 0)
m.c131 = Constraint(expr= - m.x138 <= 0)
m.c132 = Constraint(expr= - m.x139 <= 0)
m.c133 = Constraint(expr= - m.x140 <= 0)
m.c134 = Constraint(expr= - m.x141 <= 0)
m.c135 = Constraint(expr= - m.x142 <= 0)
m.c136 = Constraint(expr= - 40*m.b9 + m.x88 <= 0)
m.c137 = Constraint(expr= - 40*m.b10 + m.x89 <= 0)
m.c138 = Constraint(expr= - 40*m.b11 + m.x90 <= 0)
m.c139 = Constraint(expr= - 40*m.b12 + m.x91 <= 0)
m.c140 = Constraint(expr= - 40*m.b13 + m.x92 <= 0)
m.c141 = Constraint(expr= - 50*m.b14 + m.x93 <= 0)
m.c142 = Constraint(expr= - 50*m.b15 + m.x94 <= 0)
m.c143 = Constraint(expr= - 50*m.b16 + m.x95 <= 0)
m.c144 = Constraint(expr= - 40*m.b17 + m.x96 <= 0)
m.c145 = Constraint(expr= - 50*m.b18 + m.x97 <= 0)
m.c146 = Constraint(expr= - 60*m.b19 + m.x98 <= 0)
m.c147 = Constraint(expr= - 60*m.b20 + m.x99 <= 0)
m.c148 = Constraint(expr= - 40*m.b21 + m.x100 <= 0)
m.c149 = Constraint(expr= - 50*m.b22 + m.x101 <= 0)
m.c150 = Constraint(expr= - 60*m.b23 + m.x102 <= 0)
m.c151 = Constraint(expr= - 70*m.b24 + m.x103 <= 0)
m.c152 = Constraint(expr= - 40*m.b25 + m.x104 <= 0)
m.c153 = Constraint(expr= - 50*m.b26 + m.x105 <= 0)
m.c154 = Constraint(expr= - 60*m.b27 + m.x106 <= 0)
m.c155 = Constraint(expr= - 70*m.b28 + m.x107 <= 0)
m.c156 = Constraint(expr= - m.x88 <= 0)
m.c157 = Constraint(expr= - m.x89 <= 0)
m.c158 = Constraint(expr= - m.x90 <= 0)
m.c159 = Constraint(expr= - m.x91 <= 0)
m.c160 = Constraint(expr= - m.x92 <= 0)
m.c161 = Constraint(expr= - m.x93 <= 0)
m.c162 = Constraint(expr= - m.x94 <= 0)
m.c163 = Constraint(expr= - m.x95 <= 0)
m.c164 = Constraint(expr= - m.x96 <= 0)
m.c165 = Constraint(expr= - m.x97 <= 0)
m.c166 = Constraint(expr= - m.x98 <= 0)
m.c167 = Constraint(expr= - m.x99 <= 0)
m.c168 = Constraint(expr= - m.x100 <= 0)
m.c169 = Constraint(expr= - m.x101 <= 0)
m.c170 = Constraint(expr= - m.x102 <= 0)
m.c171 = Constraint(expr= - m.x103 <= 0)
m.c172 = Constraint(expr= - m.x104 <= 0)
m.c173 = Constraint(expr= - m.x105 <= 0)
m.c174 = Constraint(expr= - m.x106 <= 0)
m.c175 = Constraint(expr= - m.x107 <= 0)
m.c176 = Constraint(expr= - 40*m.b29 + m.x108 <= 0)
m.c177 = Constraint(expr= - 40*m.b30 + m.x109 <= 0)
m.c178 = Constraint(expr= - 40*m.b31 + m.x110 <= 0)
m.c179 = Constraint(expr= - 50*m.b32 + m.x111 <= 0)
m.c180 = Constraint(expr= - 50*m.b33 + m.x112 <= 0)
m.c181 = Constraint(expr= - 50*m.b34 + m.x113 <= 0)
m.c182 = Constraint(expr= - 60*m.b35 + m.x114 <= 0)
m.c183 = Constraint(expr= - 60*m.b36 + m.x115 <= 0)
m.c184 = Constraint(expr= - 60*m.b37 + m.x116 <= 0)
m.c185 = Constraint(expr= - 70*m.b38 + m.x117 <= 0)
m.c186 = Constraint(expr= - 70*m.b39 + m.x118 <= 0)
m.c187 = Constraint(expr= - 70*m.b40 + m.x119 <= 0)
m.c188 = Constraint(expr= - 80*m.b41 + m.x120 <= 0)
m.c189 = Constraint(expr= - 80*m.b42 + m.x121 <= 0)
m.c190 = Constraint(expr= - 80*m.b43 + m.x122 <= 0)
m.c191 = Constraint(expr= - m.x108 <= 0)
m.c192 = Constraint(expr= - m.x109 <= 0)
m.c193 = Constraint(expr= - m.x110 <= 0)
m.c194 = Constraint(expr= - m.x111 <= 0)
m.c195 = Constraint(expr= - m.x112 <= 0)
m.c196 = Constraint(expr= - m.x113 <= 0)
m.c197 = Constraint(expr= - m.x114 <= 0)
m.c198 = Constraint(expr= - m.x115 <= 0)
m.c199 = Constraint(expr= - m.x116 <= 0)
m.c200 = Constraint(expr= - m.x117 <= 0)
m.c201 = Constraint(expr= - m.x118 <= 0)
m.c202 = Constraint(expr= - m.x119 <= 0)
m.c203 = Constraint(expr= - m.x120 <= 0)
m.c204 = Constraint(expr= - m.x121 <= 0)
m.c205 = Constraint(expr= - m.x122 <= 0)
m.c206 = Constraint(expr= - 40*m.b44 + m.x123 <= 0)
m.c207 = Constraint(expr= - 50*m.b45 + m.x124 <= 0)
m.c208 = Constraint(expr= - 60*m.b46 + m.x125 <= 0)
m.c209 = Constraint(expr= - 70*m.b47 + m.x126 <= 0)
m.c210 = Constraint(expr= - 80*m.b48 + m.x127 <= 0)
m.c211 = Constraint(expr= - m.x123 <= 0)
m.c212 = Constraint(expr= - m.x124 <= 0)
m.c213 = Constraint(expr= - m.x125 <= 0)
m.c214 = Constraint(expr= - m.x126 <= 0)
m.c215 = Constraint(expr= - m.x127 <= 0)
m.c216 = Constraint(expr= - 300*m.b49 + m.x149 <= 0)
m.c217 = Constraint(expr= - 300*m.b50 + m.x150 <= 0)
m.c218 = Constraint(expr= - 300*m.b51 + m.x151 <= 0)
m.c219 = Constraint(expr= - m.x149 <= 0)
m.c220 = Constraint(expr= - m.x150 <= 0)
m.c221 = Constraint(expr= - m.x151 <= 0)
m.c222 = Constraint(expr= - 300*m.b52 + m.x143 <= 0)
m.c223 = Constraint(expr= - 300*m.b53 + m.x144 <= 0)
m.c224 = Constraint(expr= - 300*m.b54 + m.x145 <= 0)
m.c225 = Constraint(expr= - 300*m.b55 + m.x146 <= 0)
m.c226 = Constraint(expr= - 300*m.b56 + m.x147 <= 0)
m.c227 = Constraint(expr= - 300*m.b57 + m.x148 <= 0)
m.c228 = Constraint(expr= - m.x143 <= 0)
m.c229 = Constraint(expr= - m.x144 <= 0)
m.c230 = Constraint(expr= - m.x145 <= 0)
m.c231 = Constraint(expr= - m.x146 <= 0)
m.c232 = Constraint(expr= - m.x147 <= 0)
m.c233 = Constraint(expr= - m.x148 <= 0)
m.c234 = Constraint(expr= - 40*m.b1 + m.x75 <= 0)
m.c235 = Constraint(expr= - 50*m.b2 + m.x76 <= 0)
m.c236 = Constraint(expr= - 60*m.b3 + m.x77 <= 0)
m.c237 = Constraint(expr= - 70*m.b4 + m.x78 <= 0)
m.c238 = Constraint(expr= - 80*m.b5 + m.x79 <= 0)
m.c239 = Constraint(expr= - m.x75 <= 0)
m.c240 = Constraint(expr= - m.x76 <= 0)
m.c241 = Constraint(expr= - m.x77 <= 0)
m.c242 = Constraint(expr= - m.x78 <= 0)
m.c243 = Constraint(expr= - m.x79 <= 0)
m.c244 = Constraint(expr= - 300*m.b6 + m.x80 <= 0)
m.c245 = Constraint(expr= - 300*m.b7 + m.x81 <= 0)
m.c246 = Constraint(expr= - 300*m.b8 + m.x82 <= 0)
m.c247 = Constraint(expr= - m.x80 <= 0)
m.c248 = Constraint(expr= - m.x81 <= 0)
m.c249 = Constraint(expr= - m.x82 <= 0)
m.c250 = Constraint(expr= m.b1 + m.b2 + m.b3 + m.b4 + m.b5 + m.b6 + m.b7 + m.b8 + m.b9 + m.b10 + m.b11 + m.b12 + m.b13
+ m.b14 + m.b15 + m.b16 + m.b17 + m.b18 + m.b19 + m.b20 + m.b21 + m.b22 + m.b23 + m.b24
+ m.b25 + m.b26 + m.b27 + m.b28 + m.b29 + m.b30 + m.b31 + m.b32 + m.b33 + m.b34 + m.b35
+ m.b36 + m.b37 + m.b38 + m.b39 + m.b40 + m.b41 + m.b42 + m.b43 + m.b44 + m.b45 + m.b46
+ m.b47 + m.b48 + m.b49 + m.b50 + m.b51 + m.b52 + m.b53 + m.b54 + m.b55 + m.b56 + m.b57
+ m.b58 + m.b59 + m.b60 + m.b61 + m.b62 + m.b63 + m.b64 + m.b65 + m.b66 + m.b67 + m.b68
+ m.b69 + m.b70 + m.b71 + m.b72 <= 72)
| 1.664063 | 2 |
layers/poky/bitbake/lib/toaster/orm/migrations/0012_use_release_instead_of_up_branch.py | dtischler/px30-test | 53 | 12765699 | <gh_stars>10-100
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.db.models import Q
def branch_to_release(apps, schema_editor):
Layer_Version = apps.get_model('orm', 'Layer_Version')
Release = apps.get_model('orm', 'Release')
print("Converting all layer version up_branches to releases")
# Find all the layer versions which have an upbranch and convert them to
# the release that they're for.
for layer_version in Layer_Version.objects.filter(
Q(release=None) & ~Q(up_branch=None)):
try:
# HEAD and local are equivalent
if "HEAD" in layer_version.up_branch.name:
release = Release.objects.get(name="local")
layer_version.commit = "HEAD"
layer_version.branch = "HEAD"
else:
release = Release.objects.get(
name=layer_version.up_branch.name)
layer_version.release = release
layer_version.save()
except Exception as e:
print("Couldn't work out an appropriate release for %s "
"the up_branch was %s "
"user the django admin interface to correct it" %
(layer_version.layer.name, layer_version.up_branch.name))
print(e)
continue
class Migration(migrations.Migration):
dependencies = [
('orm', '0011_delete_layersource'),
]
operations = [
migrations.AddField(
model_name='layer_version',
name='release',
field=models.ForeignKey(to='orm.Release', default=None, null=True),
),
migrations.RunPython(branch_to_release,
reverse_code=migrations.RunPython.noop),
migrations.RemoveField(
model_name='layer_version',
name='up_branch',
),
migrations.DeleteModel(
name='Branch',
),
]
| 2.03125 | 2 |
python/deprecated/mcpat_autogen/cache.py | JimmyZhang12/predict-T | 1 | 12765700 | # Copyright (c) 2020 University of Illinois
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: <NAME>
#
# cache.py
#
# Create and populate a cache class
from xml.etree import ElementTree
from xml.dom import minidom
class Cache:
def __init__(self, component_id, component_name, \
stat_dict, config_dict, sim_dict, ruby=False):
self.name = "cache"
self.id = "cache"
self.parameters = \
{
"config" : \
["0,1,2,3,4,5,6,7","Cache Capacity, Block Width, Associativity,"\
" Bank, Throughput w.r.t. core clock, Latency w.r.t. core clock," \
" Output Width, Cache Policy: 0 no write or write-though with" \
" non-write allocate; 1 write-back with write-allocate"],
"buffer_sizes" : \
["0,1,2,3","Cache controller buffer sizes: miss_buffer_size" \
"(MSHR), fill_buffer_size, prefetch_buffer_size, wb_buffer_size"],
"clockrate" : \
["1000","Clock rate in MHz"],
"vdd" : \
["1.2","Voltage"],
"power_gating_vcc" : \
["-1","-1 means default power gating"],
"ports" : \
["1,1,1","Number of R, W, RW ports"],
"device_type" : \
["0","0: HP, 1: LP"]
}
self.stats = \
{
"duty_cycle" : \
["1.0",""],
"read_accesses" : \
["0", "Cache Read Accesses Total"],
"read_misses" : \
["0", "Cache Read Req Misses Total"],
"write_accesses" : \
["0", "Cache Write Accesses Total"],
"write_misses" : \
["0", "Cache Write Req Misses Total"],
"conflicts" : \
["0", "Cache Replacements"]
}
self.name = component_name
self.id = component_id
# Init the Cache Parameters and Stats:
if ruby:
self.parameters["config"][0]= \
",".join([config_dict["size"],config_dict["tags.block_size"], \
config_dict["assoc"],"1","1",config_dict["response_latency"], \
config_dict["tags.block_size"],"0"])
self.parameters["buffer_sizes"][0]= \
",".join([config_dict["mshrs"],config_dict["mshrs"], \
config_dict["mshrs"],config_dict["mshrs"]])
self.parameters["clockrate"][0]= \
str((1.0e-6/float(config_dict["clock"]))*1.0e12)
self.parameters["vdd"][0]= \
str(float(sim_dict["voltage"]))
self.stats["read_accesses"][0]= \
str(int(stat_dict["ReadExReq_accesses::total"][1]) \
+int(stat_dict["ReadCleanReq_accesses::total"][1]) \
+int(stat_dict["ReadSharedReq_accesses::total"][1]))
self.stats["read_misses"][0]= \
str(int(stat_dict["ReadCleanReq_misses::total"][1]) \
+int(stat_dict["ReadExReq_misses::total"][1]))
self.stats["write_accesses"][0]= \
str(int(stat_dict["WritebackDirty_accesses::total"][1]) \
+int(stat_dict["WritebackClean_accesses::total"][1]))
self.stats["write_misses"][0]= \
str(int(stat_dict["WritebackClean_accesses::total"][1]) \
+int(stat_dict["WritebackClean_accesses::total"][1]) \
-int(stat_dict["WritebackDirty_hits::total"][1]) \
-int(stat_dict["WritebackDirty_hits::total"][1]))
self.stats["conflicts"][0]= \
str(int(stat_dict["replacements"][1]))
else:
self.parameters["config"][0]= \
",".join([config_dict["size"],config_dict["tags.block_size"], \
config_dict["assoc"],"1","1",config_dict["response_latency"], \
config_dict["tags.block_size"],"0"])
self.parameters["buffer_sizes"][0]= \
",".join([config_dict["mshrs"],config_dict["mshrs"], \
config_dict["mshrs"],config_dict["mshrs"]])
self.parameters["clockrate"][0]= \
str((1.0e-6/float(config_dict["clock"]))*1.0e12)
self.parameters["vdd"][0]= \
str(float(sim_dict["voltage"]))
self.stats["read_accesses"][0]= \
str(int(stat_dict["ReadExReq_accesses::total"][1]) \
+int(stat_dict["ReadCleanReq_accesses::total"][1]) \
+int(stat_dict["ReadSharedReq_accesses::total"][1]))
self.stats["read_misses"][0]= \
str(int(stat_dict["ReadCleanReq_misses::total"][1]) \
+int(stat_dict["ReadExReq_misses::total"][1]))
self.stats["write_accesses"][0]= \
str(int(stat_dict["WritebackDirty_accesses::total"][1]) \
+int(stat_dict["WritebackClean_accesses::total"][1]))
self.stats["write_misses"][0]= \
str(int(stat_dict["WritebackClean_accesses::total"][1]) \
+int(stat_dict["WritebackClean_accesses::total"][1]) \
-int(stat_dict["WritebackDirty_hits::total"][1]) \
-int(stat_dict["WritebackDirty_hits::total"][1]))
self.stats["conflicts"][0]= \
str(int(stat_dict["replacements"][1]))
def xml(self):
""" Build an XML Tree from the parameters, stats, and subcomponents """
top = ElementTree.Element('component', id=self.id, name=self.name)
for key in sorted(self.parameters):
top.append(ElementTree.Comment( \
", ".join(['param', key, self.parameters[key][1]])))
top.append(ElementTree.Element( \
'param', name=key, value=self.parameters[key][0]))
for key in sorted(self.stats):
top.append(ElementTree.Comment( \
", ".join(['stat', key, self.stats[key][1]])))
top.append(ElementTree.Element( \
'stat', name=key, value=self.stats[key][0]))
return top
class ICache:
def __init__(self, component_id, component_name, \
stat_dict, config_dict, sim_dict):
self.name = "icache"
self.id = "icache"
self.parameters = \
{
"icache_config" : \
["0,1,2,3,4,5,6,7","Cache Capacity, Block Width,"
"Associativity, Bank, Throughput w.r.t. core clock, Latency"
"w.r.t. core clock, Output Width, Cache Policy: 0 no write or"
"write-though with non-write allocate; 1 write-back with"
"write-allocate"],
"buffer_sizes" : \
["0,1,2,3","Cache controller buffer sizes:"
"miss_buffer_size(MSHR), fill_buffer_size,"
"prefetch_buffer_size, wb_buffer_size"]
}
self.stats = \
{
"read_accesses" : ["0", "Cache Read Accesses Total"],
"read_misses" : ["0", "Cache Read Req Misses Total"],
"conflicts" : ["0", "Cache Replacements"]
}
self.name = component_name
self.id = component_id
# Init the Cache Parameters and Stats:
self.parameters["icache_config"][0]= \
",".join([config_dict["size"],config_dict["tags.block_size"], \
config_dict["assoc"],"1","1",config_dict["response_latency"], \
config_dict["tags.block_size"],"0"])
self.parameters["buffer_sizes"][0]= \
",".join([config_dict["mshrs"],config_dict["mshrs"], \
config_dict["mshrs"],config_dict["mshrs"]])
self.stats["read_accesses"][0]= \
str(int(stat_dict["ReadReq_accesses::total"][1]))
self.stats["read_misses"][0]= \
str(int(stat_dict["ReadReq_misses::total"][1]))
self.stats["conflicts"][0]=str(int(stat_dict["replacements"][1]))
def xml(self):
""" Build an XML Tree from the parameters, stats, and
subcomponents """
top = ElementTree.Element('component', id=self.id, name=self.name)
for key in sorted(self.parameters):
top.append(ElementTree.Comment( \
", ".join(['param', key, self.parameters[key][1]])))
top.append(ElementTree.Element( \
'param', name=key, value=self.parameters[key][0]))
for key in sorted(self.stats):
top.append(ElementTree.Comment( \
", ".join(['stat', key, self.stats[key][1]])))
top.append(ElementTree.Element( \
'stat', name=key, value=self.stats[key][0]))
return top
class DCache:
def __init__(self, component_id, component_name, \
stat_dict, config_dict, sim_dict):
self.name = "dcache"
self.id = "dcache"
self.parameters = \
{
"dcache_config" :
["0,1,2,3,4,5,6,7","Cache Capacity, Block Width,"
"Associativity, Bank, Throughput w.r.t. core clock, Latency"
"w.r.t. core clock, Output Width, Cache Policy: 0 no write or"
"write-though with non-write allocate; 1 write-back with"
"write-allocate"],
"buffer_sizes" :
["0,1,2,3","Cache controller buffer sizes:"
"miss_buffer_size(MSHR), fill_buffer_size,"
"prefetch_buffer_size, wb_buffer_size"]
}
self.stats = \
{
"read_accesses" : ["0", "Cache Read Accesses Total"],
"read_misses" : ["0", "Cache Read Req Misses Total"],
"write_accesses" : ["0", "Cache Write Accesses Total"],
"write_misses" : ["0", "Cache Write Req Misses Total"],
"conflicts" : ["0", "Cache Replacements"]
}
self.name = component_name
self.id = component_id
# Init the Cache Parameters and Stats:
self.parameters["dcache_config"][0]= \
",".join([config_dict["size"],config_dict["tags.block_size"], \
config_dict["assoc"],"1","1",config_dict["response_latency"], \
config_dict["tags.block_size"],"0"])
self.parameters["buffer_sizes"][0]= \
",".join([config_dict["mshrs"],config_dict["mshrs"], \
config_dict["mshrs"],config_dict["mshrs"]])
self.stats["read_accesses"][0]= \
str(int(stat_dict["ReadReq_accesses::total"][1]))
self.stats["read_misses"][0]= \
str(int(stat_dict["ReadReq_misses::total"][1]))
self.stats["write_accesses"][0]= \
str(int(stat_dict["WriteReq_accesses::total"][1]))
self.stats["write_misses"][0]= \
str(int(stat_dict["WriteReq_misses::total"][1]))
self.stats["conflicts"][0]= \
str(int(stat_dict["replacements"][1]))
def xml(self):
""" Build an XML Tree from the parameters, stats, and
subcomponents """
top = ElementTree.Element('component', id=self.id, name=self.name)
for key in sorted(self.parameters):
top.append(ElementTree.Comment( \
", ".join(['param', key, self.parameters[key][1]])))
top.append(ElementTree.Element( \
'param', name=key, value=self.parameters[key][0]))
for key in sorted(self.stats):
top.append(ElementTree.Comment( \
", ".join(['stat', key, self.stats[key][1]])))
top.append(ElementTree.Element( \
'stat', name=key, value=self.stats[key][0]))
return top
| 1.390625 | 1 |
db.py | Hack-OR/hackor-teams | 0 | 12765701 | <filename>db.py<gh_stars>0
#!/usr/bin/env python3
# XXX: this file is kind of a hack, but this discord bot is only gonna be used
# once, so I don't think it matters much.
import yaml
import os
db = {
'users': {}
}
def read() -> None:
global db
try:
with open('db.yml', 'r') as f:
db = yaml.safe_load(f)
except FileNotFoundError:
# the file will be created next write
pass
print('[ ] read db size: %d' % len(str(db)))
def write() -> None:
global db
print('[ ] write db size: %d' % len(str(db)))
# write to different file THEN move to avoid potential race condition
# between opening and writing to files
with open('db.yml.tmp', 'w') as f:
f.write(yaml.dump(db))
os.rename('db.yml.tmp', 'db.yml')
| 2.703125 | 3 |
phase2/phase2.py | somayeh-rzie/Search-Engine | 0 | 12765702 | import math
from heapq import heappop, heappush
import pickle
from itertools import islice
k=5
threshhold = 10
doc_length = {}
champion_list = {}
scores = {}
heap_flag = 1
champion_flag = 1
index_elimination_flag = 1
# set into a list
def convert(set):
return sorted(set)
#sort
def sort(list):
return sorted(list.items(), key=lambda x: x[1], reverse=True)
#power
def power(b , p):
return math.pow(b ,p)
#logarithm
def logarithm(number , b):
return math.log(number , b)
def one_word(query):
docs_res = {}
if query in inverted_index.keys():
docs = inverted_index[query]
for d in docs :
docs_res[d] = URLs[d]
return d , docs_res
else :
return 'Sorry! The Query Does Not Found'
def multi_words(tokens):
relation = {}
for token in tokens.split():
if token in inverted_index.keys():
for i in inverted_index[token]:
relation[i] += 1
relation = sorted(relation.items(), key=lambda x: x[1], reverse=True)
index = list(relation.keys())[0]
return index,URLs[index]
# def one_word(query):
# docs_res = {}
# if query in inverted_index.keys():
# docs = inverted_index[query]
# for d in docs :
# docs_res[d] = URLs[d]
# return d , docs_res
# else :
# return 'Sorry! The Query Does Not Found'
# def multi_words(tokens):
# token_relevance = {}
# for token in tokens.split():
# if((index_elimination_flag and calculate_idf(token)<threshhold) or (not index_elimination_flag)):
# if (token in inverted_index.keys()):
# for i in inverted_index[token]:
# token_relevance[i] += 1
# token_relevance = sort(token_relevance)
# d = list(token_relevance.keys())[0]
# return d,URLs[d]
def C_list(tokens):
for t in tokens:
champion_list[t] = {}
if t in inverted_index.values():
for id in inverted_index[t].keys():
champion_list[t][id] = inverted_index[t][id] / calculate_doc_length(id)
champion_list[t] = sort(champion_list[t])
return champion_list
def calculate_doc_length(id):
l = 0
for token in contents[id].split():
# print(inverted_index[token])
# print('\n\n\n')
if token in inverted_index.keys():
for id , num in enumerate(inverted_index[token]):
l += power(num , 2)
l= power(l , 1.0/2)
return l
def calculate_tf(token , id):
w=0
tf = token_frequency_inDoc[token][id]
if (tf > 0):
w = 1 + logarithm(tf , 10)
return w
def calculate_idf(token):
N = len(contents)
df = len(inverted_index[token])
idf = logarithm((N / df) , 10)
return idf
def tf_idf(token, id):
return calculate_tf(token, id) * calculate_idf(token)
def cosine_similarity(query, id):
result = 0
for token in query.split():
if token in contents[id].split():
result += tf_idf(token, id) * inverted_index[token][id]
result /= doc_length[id]
return result
def calculate_score(query , champion_list):
for token in query.split():
# print('helllllllllllooooooooooooo')
# print('token is : ' , token)
if (champion_flag == 1) :
if ((token in champion_list.keys()) and (not champion_list[token])):
# print('+++++++',champion_list[token])
for j in champion_list[token]:
print(j)
# print('helllllllllllooooooooooooo')
id = j[0]
scores[id] = 0
if id in scores.keys():
scores[id] += cosine_similarity(token,id)
else :
for j in inverted_index[token]:
id = j[0]
scores[id] = 0
if id in scores.keys():
scores[id] += cosine_similarity(token,id)
return scores
def bubble_sort(my_list):
for mx in range(len(my_list)-1, -1, -1):
swapped = False
for i in range(mx):
if my_list[i][1] < my_list[i+1][1]:
my_list[i], my_list[i+1] = my_list[i+1], my_list[i]
swapped = True
if not swapped:
break
return my_list
def k_scores(query , champion_list):
scores = calculate_score(query , champion_list)
# print(scores)
best_scores = {}
if (heap_flag == 1):
heap = []
# print(scores)
for id in scores.keys():
heappush(heap, (-scores[id], id))
# print(id)
for i in range(k):
max, id = heappop(heap)
best_scores[id] = -max
else :
best_scores = bubble_sort(scores)
return list(islice(best_scores, k))
return best_scores
def main():
global inverted_index , contents , URLs , token_frequency_inDoc , champion_list
with open("inverted_index.txt", "rb+") as fp1: # Unpickling
inverted_index = pickle.load(fp1)
with open("contents.txt", "rb+") as fp2:
contents = pickle.load(fp2)
# print(contents[1])
# print(len(contents))
# print('\n\n\n')
with open("URLS.txt", "rb+") as fp3:
URLs = pickle.load(fp3)
with open("token_frequency_inDoc.txt", "rb+") as fp4:
token_frequency_inDoc = pickle.load(fp4)
with open("tokens.txt", "rb+") as fp5:
tokens = pickle.load(fp5)
champion_list = C_list(tokens)
# print('champion list : ' , champion_list)
number = input("1:Single Query\n2:Multi Query\n")
query = input("Please Enter Your Query: ")
if (int(number)==1):
print(one_word(query))
elif(int(number)==2):
print(multi_words(query))
else :
print('invalid input')
# else :
# print('invalid input')
if __name__ == "__main__":
main() | 2.953125 | 3 |
2017/quals/2017-re-food/generate_flag.py | tonghuaroot/google-ctf | 2,757 | 12765703 | #!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
def KSA(key):
keylength = len(key)
S = range(256)
j = 0
for i in range(256):
j = (j + S[i] + key[i % keylength]) % 256
S[i], S[j] = S[j], S[i] # swap
return S
def PRGA(S):
i = 0
j = 0
while True:
i = (i + 1) % 256
j = (j + S[i]) % 256
S[i], S[j] = S[j], S[i] # swap
K = S[(S[i] + S[j]) % 256]
yield K
def RC4(key):
S = KSA(key)
return PRGA(S)
def sig(v):
if v & 0x80:
return -0x100 + v
return v
flag = 'CTF{bacon_lettuce_tomato_lobster_soul}'
key = [random.choice(range(20)) for x in range(8)]
print 'key is', key
ks = RC4(key)
print 'flag is', [sig(ord(x) ^ y) for (x, y) in zip(flag, ks)]
xor = [random.choice(range(20)) for x in range(8)]
print 'xor 1', xor
print 'xor 2', [x ^ y for (x, y) in zip(key, xor)]
| 3.296875 | 3 |
clinvar/migrations/0007_alter_clinvar_details.py | snesic/varfish-server | 14 | 12765704 | # Generated by Django 3.2.9 on 2021-11-29 14:43
from django.db import migrations
import varfish.utils
class Migration(migrations.Migration):
dependencies = [
("clinvar", "0006_clinvarpathogenicgenes"),
]
operations = [
migrations.AlterField(
model_name="clinvar", name="details", field=varfish.utils.JSONField(),
),
]
| 1.453125 | 1 |
gsa_framework/models/__init__.py | aleksandra-kim/gsa_framework | 2 | 12765705 | from .life_cycle_assessment import LCAModel
from .test_functions import (
Morris,
Morris4,
Borehole,
Wingweight,
OTLcircuit,
Piston,
Moon,
SobolLevitan,
SobolGstar,
SobolG,
)
from .model_base import ModelBase
from .test_functions import Morris4
| 1.132813 | 1 |
src/Modulos/dicti.py | Project-Neon/NeonSeeker | 0 | 12765706 | import pickle
dict=pickle.load(open("memoria.p","rb"))
print (dict)
| 2.171875 | 2 |
resource_emulation.py | sogeti-esec-lab/LKD | 102 | 12765707 | import ctypes
import itertools
import windows
import windows.hooks
from windows.generated_def.winstructs import *
class Ressource(object):
def __init__(self, filename, lpName, lpType):
self.filename = filename
self.lpName = lpName
self.lpType = lpType
self.driver_data = None
self.loaded_ressource = None
def match(self, hModule, lpName, lpType):
x = not hModule and self.lpName == lpName and self.lpType == lpType
return x
def get_driver_data(self):
if self.driver_data is not None:
return self.driver_data
self.driver_data = open(self.filename, 'rb').read()
return self.driver_data
def load_resource(self):
driver_data = self.get_driver_data()
char_p = ctypes.c_char_p(driver_data)
real_addr = ctypes.cast(char_p, ctypes.c_void_p).value
return real_addr
def resource_len(self):
return len(self.get_driver_data())
resource_list = []
HRSRC_dict = {}
HRSRC_attibution = itertools.count(0x42424242)
@windows.hooks.Callback(PVOID, PVOID, PVOID, PVOID)
def FindResourceWHook(hModule, lpName, lpType, real_function):
for res in resource_list:
if res.match(hModule, lpName, lpType):
HRSRC = next(HRSRC_attibution)
HRSRC_dict[HRSRC] = res
return HRSRC
return real_function()
@windows.hooks.SizeofResourceCallback
def SizeofResourceHook(hModule, hResInfo, real_function):
if hResInfo in HRSRC_dict:
return HRSRC_dict[hResInfo].resource_len()
return real_function()
@windows.hooks.LoadResourceCallback
def LoadResourceHook(hModule, hResInfo, real_function):
if hResInfo in HRSRC_dict:
return HRSRC_dict[hResInfo].load_resource()
return real_function()
@windows.hooks.LockResourceCallback
def LockResourceHook(hResData, real_function):
x = real_function()
return x
| 2.09375 | 2 |
models/augmentation/__init__.py | zijian-hu/SimPLE | 36 | 12765708 | import torch
from torch import nn
from kornia import augmentation as K
from kornia import filters as F
from torchvision import transforms
from .augmenter import RandomAugmentation
from .randaugment import RandAugmentNS
# for type hint
from typing import List, Tuple, Union, Callable
from torch import Tensor
from torch.nn import Module
from PIL.Image import Image as PILImage
DatasetStatType = List[float]
ImageSizeType = Tuple[int, int]
PaddingInputType = Union[float, Tuple[float, float], Tuple[float, float, float, float]]
ImageType = Union[Tensor, PILImage]
def get_augmenter(augmenter_type: str,
image_size: ImageSizeType,
dataset_mean: DatasetStatType,
dataset_std: DatasetStatType,
padding: PaddingInputType = 1. / 8.,
pad_if_needed: bool = False,
subset_size: int = 2) -> Union[Module, Callable]:
"""
Args:
augmenter_type: augmenter type
image_size: (height, width) image size
dataset_mean: dataset mean value in CHW
dataset_std: dataset standard deviation in CHW
padding: percent of image size to pad on each border of the image. If a sequence of length 4 is provided,
it is used to pad left, top, right, bottom borders respectively. If a sequence of length 2 is provided, it is
used to pad left/right, top/bottom borders, respectively.
pad_if_needed: bool flag for RandomCrop "pad_if_needed" option
subset_size: number of augmentations used in subset
Returns: nn.Module for Kornia augmentation or Callable for torchvision transform
"""
if not isinstance(padding, tuple):
assert isinstance(padding, float)
padding = (padding, padding, padding, padding)
assert len(padding) == 2 or len(padding) == 4
if len(padding) == 2:
# padding of length 2 is used to pad left/right, top/bottom borders, respectively
# padding of length 4 is used to pad left, top, right, bottom borders respectively
padding = (padding[0], padding[1], padding[0], padding[1])
# image_size is of shape (h,w); padding values is [left, top, right, bottom] borders
padding = (
int(image_size[1] * padding[0]),
int(image_size[0] * padding[1]),
int(image_size[1] * padding[2]),
int(image_size[0] * padding[3])
)
augmenter_type = augmenter_type.strip().lower()
if augmenter_type == "simple":
return nn.Sequential(
K.RandomCrop(size=image_size, padding=padding, pad_if_needed=pad_if_needed,
padding_mode='reflect'),
K.RandomHorizontalFlip(p=0.5),
K.Normalize(mean=torch.tensor(dataset_mean, dtype=torch.float32),
std=torch.tensor(dataset_std, dtype=torch.float32)),
)
elif augmenter_type == "fixed":
return nn.Sequential(
K.RandomHorizontalFlip(p=0.5),
# K.RandomVerticalFlip(p=0.2),
K.RandomResizedCrop(size=image_size, scale=(0.8, 1.0), ratio=(1., 1.)),
RandomAugmentation(
p=0.5,
augmentation=F.GaussianBlur2d(
kernel_size=(3, 3),
sigma=(1.5, 1.5),
border_type='constant'
)
),
K.ColorJitter(contrast=(0.75, 1.5)),
# additive Gaussian noise
K.RandomErasing(p=0.1),
# Multiply
K.RandomAffine(
degrees=(-25., 25.),
translate=(0.2, 0.2),
scale=(0.8, 1.2),
shear=(-8., 8.)
),
K.Normalize(mean=torch.tensor(dataset_mean, dtype=torch.float32),
std=torch.tensor(dataset_std, dtype=torch.float32)),
)
elif augmenter_type in ["validation", "test"]:
return nn.Sequential(
K.Normalize(mean=torch.tensor(dataset_mean, dtype=torch.float32),
std=torch.tensor(dataset_std, dtype=torch.float32)),
)
elif augmenter_type == "randaugment":
return nn.Sequential(
K.RandomCrop(size=image_size, padding=padding, pad_if_needed=pad_if_needed,
padding_mode='reflect'),
K.RandomHorizontalFlip(p=0.5),
RandAugmentNS(n=subset_size, m=10),
K.Normalize(mean=torch.tensor(dataset_mean, dtype=torch.float32),
std=torch.tensor(dataset_std, dtype=torch.float32)),
)
else:
raise NotImplementedError(f"\"{augmenter_type}\" is not a supported augmenter type")
__all__ = [
# modules
# classes
# functions
"get_augmenter",
]
| 2.703125 | 3 |
pymoo/operators/crossover/util.py | gabicavalcante/pymoo | 11 | 12765709 | <filename>pymoo/operators/crossover/util.py
import numpy as np
def crossover_mask(X, M):
# convert input to output by flatting along the first axis
_X = np.copy(X)
_X[0][M] = X[1][M]
_X[1][M] = X[0][M]
return _X
| 3.046875 | 3 |
authors/apps/articles/apps.py | andela/ah-django-unchained | 0 | 12765710 | <filename>authors/apps/articles/apps.py
from django.apps import AppConfig
class ArticlesConfig(AppConfig):
name = 'authors.apps.articles'
def ready(self):
from authors.apps.usernotifications import handlers
| 1.46875 | 1 |
depthCompletion/utils/download_kitti_raw.py | abdo-eldesokey/evaluating_bdl | 0 | 12765711 | # code-checked
# server-checked
import os
# NOTE! NOTE! NOTE! make sure you run this code inside the kitti_raw directory (/root/data/kitti_raw)
kitti_depth_path = "/root/data/kitti_depth"
rgb_depth_path = "/root/data/kitti_rgb"
train_dirs = os.listdir(kitti_depth_path + "/train") # (contains "2011_09_26_drive_0001_sync" and so on)
val_dirs = os.listdir(kitti_depth_path + "/val")
# Create "train" and "val" dir for RGB
rgb_train_dir = os.path.join(rgb_depth_path, "train")
rgb_val_dir = os.path.join(rgb_depth_path, "val")
os.system("mkdir %s" % rgb_train_dir)
os.system("mkdir %s" % rgb_val_dir)
print ("num train dirs: %d" % len(train_dirs))
print ("num val dirs: %d" % len(val_dirs))
# Training set
for step, dir_name in enumerate(train_dirs):
print("########################### Training set #########################################")
print("step %d/%d" % (step+1, len(train_dirs)))
print(dir_name)
dir_name_no_sync = dir_name.split("_sync")[0] # (dir_name_no_sync == "2011_09_26_drive_0001")
# download the zip file:
os.system("wget https://s3.eu-central-1.amazonaws.com/avg-kitti/raw_data/%s/%s.zip" % (dir_name_no_sync, dir_name))
# unzip:
os.system("unzip %s.zip" % dir_name)
# move to rgb dir
zip_dir = dir_name.split('_drive')[0]
os.system("mv %s %s" % (os.path.join(zip_dir, dir_name), rgb_train_dir))
# Validation set
for step, dir_name in enumerate(val_dirs):
print("########################### Training set #########################################")
print("step %d/%d" % (step+1, len(val_dirs)))
print(dir_name)
dir_name_no_sync = dir_name.split("_sync")[0] # (dir_name_no_sync == "2011_09_26_drive_0001")
# download the zip file:
os.system("wget https://s3.eu-central-1.amazonaws.com/avg-kitti/raw_data/%s/%s.zip" % (dir_name_no_sync, dir_name))
# unzip:
os.system("unzip %s.zip" % dir_name)
# move to rgb dir
zip_dir = dir_name.split('_drive')[0]
os.system("mv %s %s" % (os.path.join(zip_dir, dir_name), rgb_val_dir))
| 2.484375 | 2 |
tests/__init__.py | sizumita/dpybrew | 1 | 12765712 | """Unit test package for dpybrew."""
| 0.898438 | 1 |
src/symmetry.py | Antoinehoff/Project_II | 0 | 12765713 | # -*- coding: utf-8 -*-
# Third party libs
import numpy
import nlopt
# Local libs
import images
from common import ProblemType
from filter import Filter
from topopt import TopoptProblem
from appearance import AppearanceCL
import solver
class Symmetric_Solver(Solver):
def design_variable_merge(self, x, nelx, nely):
"""
Implementation of the algorithm from Kosaka and Swan 1999
"""
return x
def index_to_position(index, nelx, nely):
"""
Convert the index of a element to a position on the grid
"""
return np.array([index % nely, int(index / nelx)])
| 2.421875 | 2 |
echopype/echodata/combine.py | gavinmacaulay/echopype | 0 | 12765714 | import warnings
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, List
import xarray as xr
from _echopype_version import version as ECHOPYPE_VERSION
from ..core import SONAR_MODELS
from ..qc import coerce_increasing_time, exist_reversed_time
from .echodata import EchoData
def union_attrs(datasets: List[xr.Dataset]) -> Dict[str, Any]:
"""
Merges attrs from a list of datasets.
Prioritizes keys from later datsets.
"""
total_attrs = dict()
for ds in datasets:
total_attrs.update(ds.attrs)
return total_attrs
def assemble_combined_provenance(input_paths):
return xr.Dataset(
data_vars={
"src_filenames": ("file", input_paths),
},
attrs={
"conversion_software_name": "echopype",
"conversion_software_version": ECHOPYPE_VERSION,
"conversion_time": datetime.utcnow().isoformat(timespec="seconds")
+ "Z", # use UTC time
},
)
def combine_echodata(echodatas: List[EchoData], combine_attrs="override") -> EchoData:
"""
Combines multiple `EchoData` objects into a single `EchoData` object.
Parameters
----------
echodatas: List[EchoData]
The list of `EchoData` objects to be combined.
combine_attrs: { "override", "drop", "identical", "no_conflicts", "overwrite_conflicts" }
String indicating how to combine attrs of the `EchoData` objects being merged.
This parameter matches the identically named xarray parameter
(see https://xarray.pydata.org/en/latest/generated/xarray.combine_nested.html)
with the exception of the "overwrite_conflicts" value.
* "override": Default. skip comparing and copy attrs from the first `EchoData`
object to the result.
* "drop": empty attrs on returned `EchoData` object.
* "identical": all attrs must be the same on every object.
* "no_conflicts": attrs from all objects are combined,
any that have the same name must also have the same value.
* "overwrite_conflicts": attrs from all `EchoData` objects are combined,
attrs with conflicting keys will be overwritten by later `EchoData` objects.
Returns
-------
EchoData
An `EchoData` object with all of the data from the input `EchoData` objects combined.
Raises
------
ValueError
If `echodatas` contains `EchoData` objects with different or `None` `sonar_model` values
(i.e., all `EchoData` objects must have the same non-None `sonar_model` value).
ValueError
If EchoData objects have conflicting source file names.
Warns
-----
UserWarning
If the `sonar_model` of the input `EchoData` objects is `"EK60"` and any `EchoData` objects
have non-monotonically increasing `ping_time`, `location_time` or `mru_time` values,
the corresponding values in the output `EchoData` object will be increased starting at the
timestamp where the reversal occurs such that all values in the output are monotonically
increasing. Additionally, the original `ping_time`, `location_time` or `mru_time` values
will be stored in the `Provenance` group, although this behavior may change in future
versions.
Warnings
--------
Changes in parameters between `EchoData` objects are not currently checked;
however, they may raise an error in future versions.
Notes
-----
* `EchoData` objects are combined by combining their groups individually.
* Attributes from all groups before the combination will be stored in the provenance group,
although this behavior may change in future versions.
* The `source_file` and `converted_raw_path` attributes will be copied from the first
`EchoData` object in the given list, but this may change in future versions.
Examples
--------
>>> ed1 = echopype.open_converted("file1.nc")
>>> ed2 = echopype.open_converted("file2.zarr")
>>> combined = echopype.combine_echodata([ed1, ed2])
"""
result = EchoData()
if len(echodatas) == 0:
return result
result.source_file = echodatas[0].source_file
result.converted_raw_path = echodatas[0].converted_raw_path
sonar_model = None
for echodata in echodatas:
if echodata.sonar_model is None:
raise ValueError(
"all EchoData objects must have non-None sonar_model values"
)
elif sonar_model is None:
sonar_model = echodata.sonar_model
elif echodata.sonar_model != sonar_model:
raise ValueError(
"all EchoData objects must have the same sonar_model value"
)
# ping time before reversal correction
old_ping_time = None
# ping time after reversal correction
new_ping_time = None
# location time before reversal correction
old_location_time = None
# location time after reversal correction
new_location_time = None
# mru time before reversal correction
old_mru_time = None
# mru time after reversal correction
new_mru_time = None
# all attributes before combination
# { group1: [echodata1 attrs, echodata2 attrs, ...], ... }
old_attrs: Dict[str, List[Dict[str, Any]]] = dict()
for group in EchoData.group_map:
group_datasets = [
getattr(echodata, group)
for echodata in echodatas
if getattr(echodata, group) is not None
]
if group in ("top", "sonar"):
combined_group = getattr(echodatas[0], group)
elif group == "provenance":
combined_group = assemble_combined_provenance(
[
echodata.source_file
if echodata.source_file is not None
else echodata.converted_raw_path
for echodata in echodatas
]
)
else:
if len(group_datasets) == 0:
setattr(result, group, None)
continue
concat_dim = SONAR_MODELS[sonar_model]["concat_dims"].get(
group, SONAR_MODELS[sonar_model]["concat_dims"]["default"]
)
concat_data_vars = SONAR_MODELS[sonar_model]["concat_data_vars"].get(
group, SONAR_MODELS[sonar_model]["concat_data_vars"]["default"]
)
combined_group = xr.combine_nested(
group_datasets,
[concat_dim],
data_vars=concat_data_vars,
coords="minimal",
combine_attrs="drop"
if combine_attrs == "overwrite_conflicts"
else combine_attrs,
)
if combine_attrs == "overwrite_conflicts":
combined_group.attrs.update(union_attrs(group_datasets))
if group == "beam":
if sonar_model == "EK80":
combined_group["transceiver_software_version"] = combined_group[
"transceiver_software_version"
].astype("<U10")
combined_group["channel_id"] = combined_group["channel_id"].astype(
"<U50"
)
elif sonar_model == "EK60":
combined_group["gpt_software_version"] = combined_group[
"gpt_software_version"
].astype("<U10")
combined_group["channel_id"] = combined_group["channel_id"].astype(
"<U50"
)
if sonar_model in ("EK60", "EK80"):
if "ping_time" in combined_group and exist_reversed_time(
combined_group, "ping_time"
):
if old_ping_time is None:
warnings.warn(
f"{sonar_model} ping_time reversal detected; the ping times will be corrected" # noqa
" (see https://github.com/OSOceanAcoustics/echopype/pull/297)"
)
old_ping_time = combined_group["ping_time"]
coerce_increasing_time(combined_group, time_name="ping_time")
new_ping_time = combined_group["ping_time"]
else:
combined_group["ping_time"] = new_ping_time
if "location_time" in combined_group and exist_reversed_time(
combined_group, "location_time"
):
if group != "nmea":
if old_location_time is None:
warnings.warn(
f"{sonar_model} location_time reversal detected; the location times will be corrected" # noqa
" (see https://github.com/OSOceanAcoustics/echopype/pull/297)"
)
old_location_time = combined_group["location_time"]
coerce_increasing_time(
combined_group, time_name="location_time"
)
new_location_time = combined_group["location_time"]
else:
combined_group["location_time"] = new_location_time
if sonar_model == "EK80":
if "mru_time" in combined_group and exist_reversed_time(
combined_group, "mru_time"
):
if old_mru_time is None:
warnings.warn(
f"{sonar_model} mru_time reversal detected; the mru times will be corrected" # noqa
" (see https://github.com/OSOceanAcoustics/echopype/pull/297)"
)
old_mru_time = combined_group["mru_time"]
coerce_increasing_time(combined_group, time_name="mru_time")
new_mru_time = combined_group["mru_time"]
else:
combined_group["mru_time"] = new_mru_time
if len(group_datasets) > 1:
old_attrs[group] = [group_dataset.attrs for group_dataset in group_datasets]
if combined_group is not None:
# xarray inserts this dimension when concating along multiple dimensions
combined_group = combined_group.drop_dims("concat_dim", errors="ignore")
setattr(result, group, combined_group)
# save ping time before reversal correction
if old_ping_time is not None:
result.provenance["old_ping_time"] = old_ping_time
result.provenance.attrs["reversed_ping_times"] = 1
# save location time before reversal correction
if old_location_time is not None:
result.provenance["old_location_time"] = old_location_time
result.provenance.attrs["reversed_ping_times"] = 1
# save mru time before reversal correction
if old_mru_time is not None:
result.provenance["old_mru_time"] = old_mru_time
result.provenance.attrs["reversed_ping_times"] = 1
# TODO: possible parameter to disable original attributes and original ping_time storage
# in provenance group?
# save attrs from before combination
for group in old_attrs:
all_group_attrs = set()
for group_attrs in old_attrs[group]:
for attr in group_attrs:
all_group_attrs.add(attr)
echodata_filenames = []
for ed in echodatas:
if ed.source_file is not None:
filepath = ed.source_file
elif ed.converted_raw_path is not None:
filepath = ed.converted_raw_path
else:
# unreachable
raise ValueError("EchoData object does not have a file path")
filename = Path(filepath).name
if filename in echodata_filenames:
raise ValueError("EchoData objects have conflicting filenames")
echodata_filenames.append(filename)
attrs = xr.DataArray(
[
[group_attrs.get(attr) for attr in all_group_attrs]
for group_attrs in old_attrs[group]
],
coords={
"echodata_filename": echodata_filenames,
f"{group}_attr_key": list(all_group_attrs),
},
dims=["echodata_filename", f"{group}_attr_key"],
)
result.provenance = result.provenance.assign({f"{group}_attrs": attrs})
# Add back sonar model
result.sonar_model = sonar_model
return result
| 2.375 | 2 |
coding_intereview/1897. Redistribute Characters to Make All Strings Equal.py | Jahidul007/Python-Bootcamp | 2 | 12765715 | class Solution:
def makeEqual(self, words: List[str]) -> bool:
n = len(words)
words = ''.join(words)
letterCounter = Counter(words)
print(letterCounter )
for letter in letterCounter:
if letterCounter[letter] % n !=0:
return False
return True
| 3.53125 | 4 |
setup.py | contributor123/fuzzywuzzy | 1 | 12765716 | <filename>setup.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014 SeatGeek
# This file is part of fuzzywuzzy.
from fuzzywuzzy import __version__
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def open_file(fname):
return open(os.path.join(os.path.dirname(__file__), fname))
setup(
name='fuzzywuzzy',
version=__version__,
author='<NAME>',
author_email='<EMAIL>',
packages=['fuzzywuzzy'],
url='https://github.com/seatgeek/fuzzywuzzy',
license=open('LICENSE.txt').read(),
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3'
],
description='Fuzzy string matching in python',
long_description=open_file('README.rst').read(),
zip_safe=True,
)
| 1.460938 | 1 |
hght/__init__.py | zephenryus/botw-hght | 0 | 12765717 | <filename>hght/__init__.py
from .read_hght import read_hght
from .write_hght import write_hght, compile_hght
from .generate_map import generate_map | 1.265625 | 1 |
src/athene/rte/utils/text_processing.py | UKPLab/fever-2018-team-athene | 41 | 12765718 | <reponame>UKPLab/fever-2018-team-athene
import gzip
import os
import pickle
import re
import nltk
import numpy as np
from common.util.log_helper import LogHelper
# import torch
np.random.seed(55)
def vocab_map(vocab):
voc_dict = {}
for i, v in enumerate(vocab):
voc_dict[v] = i
# else:
# voc_dict['UNK'] = i
return voc_dict
def tokenize(sequence):
tokens = [token.replace("``", '').replace("''", '').replace('"', '') for token in nltk.word_tokenize(sequence) if
token != " "]
# return tokens
return tokens
def clean_text(text):
text = re.sub(r'https?:\/\/.*[\r\n]*', '', text, flags=re.MULTILINE)
text = re.sub(r'\<a href', ' ', text)
text = re.sub(r'&', '', text)
text = re.sub(r'[_"()|+&=*#$@\[\]/]', '', text)
text = re.sub(r'\-', ' ', text)
text = re.sub(r'<br />', ' ', text)
text = text.replace("...", " ")
return text
def load_whole_glove(glove_file):
logger = LogHelper.get_logger("load_whole_glove")
is_gz = os.path.splitext(glove_file)[1] == '.gz'
# Getting embedding dimension
def _get_dim(_file):
line = _file.readline()
return len(line.strip().split(' ')) - 1
if is_gz:
with gzip.open(glove_file, 'rt') as file0:
emb_dim = _get_dim(file0)
else:
with open(glove_file, 'r', encoding='utf-8') as file0:
emb_dim = _get_dim(file0)
# First row of embedding matrix is 0 for zero padding
vocab = ['[PAD]']
embed = [[0.0] * emb_dim]
vocab.append('UNK')
embed.append([1.0] * emb_dim)
def _read_glove_file(_vocab, _embed, _file):
for line in _file:
items = line.replace('\r', '').replace('\n', '').split(' ')
if len(items) < 10:
logger.debug("exceptional line: {}".format(line))
continue
word = items[0]
_vocab.append(word)
vec = [float(i) for i in items[1:]]
_embed.append(vec)
return _vocab, _embed
# Reading embedding matrix
if is_gz:
with gzip.open(glove_file, 'rt') as file:
vocab, embed = _read_glove_file(vocab, embed, file)
else:
with open(glove_file, 'r', encoding='utf-8') as file:
vocab, embed = _read_glove_file(vocab, embed, file)
logger.info('Loaded GloVe!')
return vocab, embed
# if __name__=="__main__":
#
# text ="I don\'t think this is right..."
# text =clean_text(text)
# print(text)
| 2.40625 | 2 |
flask_viewful.py | nnewman/flask-viewful | 0 | 12765719 | from dataclasses import dataclass, field
from typing import Any, Dict, Iterable, Optional, Tuple, Union
from flask import Flask, Blueprint, request, Response
from flask.views import View
from werkzeug.exceptions import MethodNotAllowed
from werkzeug.routing import Map, MapAdapter, Rule
@dataclass
class RouteMeta:
route_path: str
methods: Iterable[str]
options: Dict = field(default_factory=dict)
route_keywords = {'index', 'get', 'post', 'put', 'patch', 'delete'}
def _lstrip(text: str, chars: str) -> str:
"""
Given a string `text`, remove the leading `chars` if and only if they
appear as the leading characters
"""
if chars and text[:len(chars)] == chars:
text = text[len(chars):]
return text
def route(path: str, methods: Iterable[str] = ('GET',), **options):
def decorator(func):
meta = RouteMeta(route_path=path, methods=methods, options=options)
func.route_meta = [meta, *getattr(func, 'route_meta', [])]
return func
return decorator
class ViewMeta(type):
def __new__(
mcs,
name: str,
bases: Tuple[type, ...],
attrs: Dict[str, Any]
):
url_map = Map()
# For bases, take the attrs that were not overridden and re-add them
# so they get processed
for base in bases:
if base_map := getattr(base, 'url_map', None): # type: Map
for rule in base_map.iter_rules():
if rule.endpoint not in attrs:
attrs[rule.endpoint] = getattr(base, rule.endpoint)
# Iterate over functions in the class
# If the function has the `@route` decorator, then:
# For each `@route` definition:
# Add a rule for that `@route`
for func_name, func in attrs.items():
if meta_list := getattr(func, 'route_meta', None):
for meta in meta_list: # type: RouteMeta
url_map.add(Rule(
meta.route_path,
methods=meta.methods,
endpoint=func_name,
**meta.options
))
# Register specially named routes that don't have `@route`
path = '/' if func_name in {'index', 'post'} else '/<id>'
if func_name in route_keywords and not hasattr(func, 'route_meta'):
url_map.add(Rule(
path,
methods=['get' if func_name == 'index' else func_name],
endpoint=func_name
))
attrs['url_map'] = url_map
return super().__new__(mcs, name, bases, attrs)
class GenericView(View, metaclass=ViewMeta):
route_base: Optional[str] = None
route_prefix: Optional[str] = None
_bp_prefix: Optional[str] = None # Placeholder for prefix on the BP
def dispatch_request(self, **kwargs):
bp_prefix = self._bp_prefix or ''
prefix = self.route_prefix or ''
base = self.route_base or ''
path = _lstrip(request.url_rule.rule, bp_prefix) # strip bp_prefix
path = _lstrip(path, prefix) # strip class prefix
path = _lstrip(path, base) # strip route base
method = request.method.lower()
view_func, _ = self.url_map_adapter.match(path, method)
if func := getattr(self, view_func, None):
self.before_view_func()
rv = func(**kwargs)
if type(rv) != tuple: # Flask view responses can be tuples
rv = (rv,)
return self.after_view_func(*rv)
raise MethodNotAllowed()
def before_view_func(self): pass
def after_view_func(self, response: Response, status: int = 200):
return response, status
@classmethod
def register(cls, app_or_bp: Union[Blueprint, Flask]):
# If the blueprint has a url_prefix, stash it on the class
if isinstance(app_or_bp, Blueprint):
cls._bp_prefix = app_or_bp.url_prefix
prefix = cls.route_prefix or ''
base = cls.route_base or ''
view = cls.as_view(cls.__name__)
cls.url_map_adapter: MapAdapter = cls.url_map.bind('')
_opts = {'rule', 'map', 'endpoint', 'methods', 'is_leaf', 'arguments'}
for rule in cls.url_map.iter_rules():
opts = {
key: val
for key, val in rule.__dict__.items()
if key not in _opts and not key.startswith('_')
}
if 'defaults' in opts and opts['defaults'] is None:
del opts['defaults']
app_or_bp.add_url_rule(
f'{prefix}{base}{rule.rule}',
endpoint=f'{cls.__name__}:{rule.endpoint}',
view_func=view,
methods=rule.methods,
**opts
)
| 2.546875 | 3 |
btclib/to_pubkey.py | pmazzocchi/btclib | 1 | 12765720 | <gh_stars>1-10
#!/usr/bin/env python3
# Copyright (C) 2017-2020 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
from typing import Union
from . import bip32
from .alias import Octets, Point, PubKey
from .curve import Curve
from .curves import secp256k1
from .secpoint import bytes_from_point, point_from_octets
from .utils import bytes_from_octets
def to_pubkey_tuple(P: PubKey, ec: Curve = secp256k1) -> Point:
"""Return a public key tuple from any possible representation.
It supports:
- BIP32 extended keys (bytes, string, or XkeyDict)
- SEC Octets (bytes or hex-string, with 02, 03, or 04 prefix)
- native tuple
"""
if isinstance(P, tuple):
if ec.is_on_curve(P) and P[1] != 0:
return P
raise ValueError(f"Not a public key: {P}")
elif isinstance(P, dict):
if P['key'][0] in (2, 3):
return point_from_octets(P['key'], ec)
raise ValueError(f"Not a public key: {P['key'].hex()}")
else:
try:
xkey = bip32.deserialize(P)
except Exception:
pass
else:
if xkey['key'][0] in (2, 3):
return point_from_octets(xkey['key'], ec)
raise ValueError(f"Not a public key: {xkey['key'].hex()}")
return point_from_octets(P, ec)
def to_pubkey_bytes(P: PubKey, compressed: bool = True, ec: Curve = secp256k1) -> bytes:
"""Return a public key tuple from any possible representation.
It supports:
- BIP32 extended keys (bytes, string, or XkeyDict)
- SEC Octets (bytes or hex-string, with 02, 03, or 04 prefix)
- native tuple
"""
if isinstance(P, tuple):
return bytes_from_point(P, compressed, ec)
elif isinstance(P, dict):
if not compressed:
m = "Uncompressed SEC / compressed BIP32 key mismatch"
raise ValueError(m)
if P['key'][0] in (2, 3):
return P['key']
raise ValueError(f"Not a public key: {P['key'].hex()}")
else:
try:
xkey = bip32.deserialize(P)
except Exception:
pass
else:
if not compressed:
m = "Uncompressed SEC / compressed BIP32 key mismatch"
raise ValueError(m)
if xkey['key'][0] in (2, 3):
return xkey['key']
raise ValueError(f"Not a public key: {xkey['key'].hex()}")
pubkey = bytes_from_octets(P)
if not compressed and len(pubkey) != 2*ec.psize + 1:
m = f"Wrong size ({len(pubkey)}-bytes) for uncompressed SEC key"
raise ValueError(m)
if compressed and len(pubkey) != ec.psize + 1:
m = f"Wrong size ({len(pubkey)}-bytes) for compressed SEC key"
raise ValueError(m)
Q = point_from_octets(pubkey, ec) # verify it is a valid point
return bytes_from_point(Q, compressed, ec)
| 2.15625 | 2 |
mapel/voting/other/matrix2png.py | szufix/mapel | 3 | 12765721 | import mapel
import mapel.voting.elections.mallows as mallows
from PIL import Image, ImageDraw
from math import sqrt
from sys import argv
def getrgb(value, MAX):
x = int(255 * value / MAX)
return (x, x, x)
def getrgb_uniform(value, MAX):
x = int(255 * value)
return (x, x, x)
def getsqrtrgb(value, MAX):
x = int(255 * (value ** 0.33) / (MAX ** 0.33))
return (x, x, x)
def getsqrtrgb_uniform(value, MAX):
x = int(255 * (value ** 0.25))
return (x, x, x)
def matrix2png(argv):
# introduce yourself
if len(argv) < 4:
print("Invocation:")
print(" python3 matrix2png num_candidates model_id reorder [param1]")
print(
" reorder -- election_id of the model_id to try to resemble (e.g., ID, or AN); use org to use original order")
print("")
exit()
# gather arguments
m = int(argv[1])
n = m * m
model = argv[2]
tgt = argv[3]
print("TGT:", tgt)
if len(argv) >= 5:
param = float(argv[4])
else:
param = None
if model != "mallows":
name = "%s_%d_%s.png" % (model, m, tgt)
else:
name = "%s_phi%d_%d_%s.png" % (model, param * 100, m, tgt)
# prepare the experiment/matrix
experiment = mapel.prepare_experiment()
experiment.set_default_num_candidates(m)
experiment.set_default_num_voters(n)
# Compass Matrices
experiment.add_election(election_model="uniformity", election_id="UN", color=(1, 0.5, 0.5),
marker="X")
experiment.add_election(election_model="identity", election_id="ID", color="red", marker="X")
experiment.add_election(election_model="antagonism", election_id="AN", color="black",
marker="o")
experiment.add_election(election_model="stratification", election_id="ST", color="black")
# form the matrix
if model != "mallows":
experiment.add_election(election_model=model, election_id="M")
else:
experiment.add_election(election_model="norm-mallows_matrix", params={"norm-phi": param},
election_id="M")
M = experiment.elections["M"].matrix
# get the mapping to a given election
experiment.compute_distances()
if tgt == "org":
match = list(range(m))
else:
match = experiment.matchings[tgt]["M"]
print(match)
# get reversed matching
rev_match = [0] * m
for i in range(m):
rev_match[match[i]] = i
print(rev_match)
# create the image
img = Image.new("RGB", (m, m), color="black")
draw = ImageDraw.Draw(img)
MAX = 0 # highest value in the matrix
for y in range(m):
for x in range(m):
MAX = max(MAX, M[y][x])
color = lambda v: getsqrtrgb_uniform(v, MAX)
### print columns
print("----")
for x in range(m):
print("%.2f" % x, end=" ")
print()
print("----")
### draw the matrix
for y in range(m):
for x in range(m):
draw.point((x, y), fill=color(M[y][rev_match[x]]))
print("%.2f" % M[y][rev_match[x]], end=" ")
print()
# save the image
img.save(name)
print("MAX value:", MAX)
| 2.9375 | 3 |
edgepipes.py | ppi-ai/edgepipes | 2 | 12765722 | #!/usr/bin/env python3
#
# Data pipelines for Edge Computing in Python.
#
# Inspired by Google Media pipelines
#
# Dataflow can be within a "process" and then hook in locally
# But can also be via a "bus" or other communication mechanism
#
# Example: Draw detections
#
# Input 1. Picture
# Input 2. Detections [...]
#
# They can come in one single combined data-packet och as a picture that should be "annotated"
# with labels
#
import cv2
import sys
import time
from calculators.image import *
from calculators.mqtt import *
from calculators.hand import *
from calculators.audio import *
from calculators.core import *
from google.protobuf import text_format
import pipeconfig_pb2
import sched
import importlib
import argparse
def _resolve_class(class_name):
"""Return a class instance based on the string representation"""
if class_name in globals():
return globals()[class_name]
class_info = class_name.rsplit('.', 1)
if len(class_info) != 2:
raise PipelineError(f"Could not resolve class name {class_name}")
try:
m = importlib.import_module(class_info[0])
try:
return getattr(m, class_info[1])
except AttributeError:
raise PipelineError(f"Class {class_name} does not exist")
except ImportError:
raise PipelineError(f"Could not find module for class {class_name}")
def _add_stream_input_node(stream_data, name, node):
if name not in stream_data:
stream_data[name] = []
stream_data[name].append((node, node.get_input_index(name)))
def _merge_options(mapoptions):
options = {**mapoptions.doubleOptions, **mapoptions.stringOptions}
return options
class PipelineError(Exception):
"""Exception raised for errors setting up the edge pipeline."""
def __init__(self, message):
super().__init__(message)
class Pipeline:
def __init__(self):
self.scheduler = sched.scheduler(time.time, time.sleep)
self.streaming_data = {}
self.pipeline = []
self.do_exit = False
self.run_pipeline = False
self.run_step = 0
self.elapsed = {}
self.count = {}
def add_node(self, calculator, prefix, options, input_streams, output_streams):
print("calculator", calculator)
node_class = _resolve_class(calculator)
n = node_class("Node:" + prefix + ":" + calculator, self.streaming_data, options=options)
n.set_input_names(input_streams)
n.set_output_names(output_streams)
for name in input_streams:
_add_stream_input_node(self.streaming_data, name, n)
self.pipeline.append(n)
def get_node_by_name(self, name):
return next((n for n in self.pipeline if n.name == name), None)
def get_nodes_by_type(self, node_class):
return [n for n in self.pipeline if isinstance(n, node_class)]
def get_nodes(self):
return list(self.pipeline)
# Setup a pipeline based on a configuration
def setup_pipeline(self, config, options=None, prefix=""):
if options is None:
options = {}
pipe = pipeconfig_pb2.CalculatorGraphConfig()
text_format.Parse(config, pipe)
print("Pipe-config:")
print(pipe)
print("Inputs:", pipe.input_stream)
print("Outputs:", pipe.output_stream)
# Should check if this already exists in the config...
# map_node_options: { key:"video"; value:"rtsp://192.168.1.237:7447/5c8d2bf990085177ff91c7a2_2" }
if "input_video" in pipe.input_stream:
ins = CaptureNode(prefix + "input_video", self.streaming_data, options=options.get('input_video', {}))
ins.set_input_names([])
ins.set_output_names([prefix + "input_video"])
self.pipeline.append(ins)
if "input_audio" in pipe.input_stream:
ins = AudioCaptureNode(prefix + "input_audio", self.streaming_data, options=options.get('input_audio', {}))
ins.set_input_names([])
ins.set_output_names([prefix + "input_audio"])
self.pipeline.append(ins)
if "output_video" in pipe.output_stream:
outs = ShowImage(prefix + "output_video", self.streaming_data)
outs.set_input_names([prefix + "output_video"])
outs.set_output_names([])
_add_stream_input_node(self.streaming_data, prefix + "output_video", outs)
self.pipeline.append(outs)
for nr, node in enumerate(pipe.node, start=1):
node_options = _merge_options(node.map_node_options)
self.add_node(node.calculator, prefix, node_options, list(map(lambda x: prefix + x, node.input_stream)),
list(map(lambda x: prefix + x, node.output_stream)))
for node in self.pipeline:
self.elapsed[node.name] = 0
self.count[node.name] = 0
return self.streaming_data, self.pipeline
def get_node_by_output(self, outputname):
return list(filter(lambda x: outputname in x.output, self.pipeline))
# Running with the main thread - as it make use of CV2s show image.
def run(self):
while not self.do_exit:
if self.run_pipeline or self.run_step > 0:
# Just process all nodes - they will produce output and process the input.
for node in self.pipeline:
t0 = time.time()
# Count elapsed time when processed!
if node.process_node():
t1 = time.time() - t0
self.elapsed[node.name] += t1
self.count[node.name] += 1
time.sleep(0.001)
self.run_step -= 1
else:
# Nothing running at the moment...
time.sleep(1)
# CV2 wait-key
if cv2.waitKey(1) & 0xFF == ord('q'):
return
self.scheduler.run()
cv2.destroyAllWindows()
def step(self):
self.run_step = 1
def start(self):
self.run_pipeline = True
def stop(self):
self.run_pipeline = False
# I always forget if it is quit or exit - so I have both...
def quit(self):
self.do_exit = True
def exit(self):
self.do_exit = True
# Either load a pbtxt file or use the default above
if __name__ == "__main__":
pipeline = Pipeline()
try:
args = sys.argv[1:]
p = argparse.ArgumentParser()
p.add_argument('--input', dest='input_video', default=None, help='video stream input')
p.add_argument('--input_audio', dest='input_audio', default=None, help='audio stream input')
p.add_argument('-n', '--dry-run', dest='dry_run', action='store_true', default=False,
help='test pipeline setup and exit')
p.add_argument('pipeline', nargs=1)
conopts = p.parse_args(args)
except Exception as e:
sys.exit(f"Illegal arguments: {e}")
print(f"Loading pipeline from {conopts.pipeline[0]}")
try:
with open(conopts.pipeline[0], "r") as f:
txt = f.read()
except FileNotFoundError:
sys.exit(f"Could not find the pipeline config file {conopts.pipeline[0]}")
opts = {}
if conopts.input_video:
video = int(conopts.input_video) if conopts.input_video.isnumeric() else conopts.input_video
opts['input_video'] = {'video': video}
if conopts.input_audio:
audio = int(conopts.input_audio) if conopts.input_audio.isnumeric() else conopts.input_audio
opts['input_audio'] = {'audio': audio}
pipeline.setup_pipeline(txt, options=opts)
if not conopts.dry_run:
pipeline.start()
pipeline.run()
| 2.828125 | 3 |
tests/api/serializers.py | TralahM/drf-generators | 340 | 12765723 | <reponame>TralahM/drf-generators
from rest_framework.serializers import ModelSerializer
from api.models import Category, Post
class CategorySerializer(ModelSerializer):
class Meta:
model = Category
fields = '__all__'
class PostSerializer(ModelSerializer):
class Meta:
model = Post
fields = '__all__'
| 2.140625 | 2 |
src/linear_search/run_linear_search_comparison.py | v-hill/advanced-computational-physics | 0 | 12765724 | <gh_stars>0
"""
This script runs a comparison between three different ways of computing
nearest-neighbours:
1) Euclidean distance
2) Euclidean distance squared
3) Manhattan distance
Points are generated in 2D.
Each program computes the neighbours of every point within a given max
distance denoted 'max_neighbour_dist'.
By default there are 'num_points' points with coordiantes values inside the
square defined by 'world_size'.
"""
# Python libraries
import argparse
import time
import numpy as np
# Code from local files
from linear_search.boids import World, Boids
import linear_search.utilities as utilities
# --------------------------------- func defs ---------------------------------
def setup_boids(world, num_points, max_dist):
boids = Boids(num_points, world, max_dist)
boids.generate_boids()
return boids
def function_timer(method, num_points, scan=False):
START = time.time()
method()
elapsed = time.time() - START
if scan:
print(f'{num_points},{elapsed*1000:0.2f}')
else:
print(f'\t {elapsed*1000:0.1f} ms')
# ----------------------------------- setup -----------------------------------
num_points = 1000
max_neighbour_dist = 100
world_size = [0, 1000, 0, 1000]
world = World(world_size)
# --------------------------------- func defs ---------------------------------
def main(world, num_points, max_neighbour_dist):
"""
This functions runs the linear search nearest neighbour algorithm to the
find the neighbours of 'num_points' many generated points. A comparison of
three different distance metrics for the to compute the distance between
neighbours is used.
Parameters
----------
world : linear_search.boids.World
Defines the range of x and y coordinates the points can take
num_points : int
The number of points to generate and find neighbours of
max_neighbour_dist : int
The maximum distance a point can be away from a given test poin in
order to still be considered a neighbour of said test point
"""
# Find all neighbours using euclidean distance metric
boids = setup_boids(world, num_points, max_neighbour_dist)
print('Euclidean distance:')
function_timer(boids.make_neighbourhoods_1, num_points)
# Find all neighbours using euclidean square distance metric
boids = setup_boids(world, num_points, max_neighbour_dist)
print('Euclidean squared:')
function_timer(boids.make_neighbourhoods_2, num_points)
# Find all neighbours using Manhattan distance metric
boids = setup_boids(world, num_points, max_neighbour_dist)
print('Manhattan distance:')
function_timer(boids.make_neighbourhoods_3, num_points)
def main_scan(world, max_neighbour_dist):
"""
This function performs the same as the above main() function, but for a
range of values for 'num_points'. Seven values are used which range
between 100 and 10000 distrubued logarithmicly.
Parameters
----------
world : linear_search.boids.World
Defines the range of x and y coordinates the points can take
max_neighbour_dist : int
The maximum distance a point can be away from a given test poin in
order to still be considered a neighbour of said test point
"""
print('Euclidean distance:')
for num_pts in num_pts_list:
boids = setup_boids(world, num_pts, max_neighbour_dist)
function_timer(boids.make_neighbourhoods_1, num_pts, scan=True)
print('Euclidean squared:')
for num_pts in num_pts_list:
boids = setup_boids(world, num_pts, max_neighbour_dist)
function_timer(boids.make_neighbourhoods_2, num_pts, scan=True)
print('Manhattan distance:')
for num_pts in num_pts_list:
boids = setup_boids(world, num_pts, max_neighbour_dist)
function_timer(boids.make_neighbourhoods_3, num_pts, scan=True)
# ------------------------------------ main -----------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'Nearest neighbour '
'search algorithm comparison')
parser.add_argument('-n', '--num_points',
help='The number of points',
type=int,
default = num_points)
parser.add_argument('-mnd', '--max_neighbour_dist',
help='The maximum neighbour distance',
type=int,
default = max_neighbour_dist)
parser.add_argument('--num_points_scan',
default=False, action='store_true',
help=('Run the script multiple times using a range '
'of values for the number of points. Values '
'range from 100 to 10000 with a logarithmic '
'distribution'))
parser.add_argument('-r', '--repeats',
help='The number of times to repeat each num_points '
'value',
type=int,
default = 1)
args = parser.parse_args()
# Execute for a single value of num_points
if not args.num_points_scan:
print('Comparing optimisations of the linear search nearest '
'neighbour algorithm')
print(f'Finding the neighbours of {num_points} generated points...')
main(world, args.num_points, args.max_neighbour_dist)
# Execute multiple times for a range of num_points values
else:
steps = np.arange(2, 4.1, 1/3)
num_pts_list = utilities.make_num_pts_list(steps, base=10,
repeats=args.repeats)
main_scan(world, args.max_neighbour_dist)
| 3.671875 | 4 |
tests/test_deduplication.py | jhnphm/boar | 0 | 12765725 | # -*- coding: utf-8 -*-
# Copyright 2010 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os, unittest, shutil
import sqlite3
if os.getenv("BOAR_SKIP_DEDUP_TESTS") == "1":
print "Skipping test_deduplication.py due to BOAR_SKIP_DEDUP_TESTS"
sys.exit(0)
if __name__ == '__main__':
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import workdir
from blobrepo import repository
repository.DEDUP_BLOCK_SIZE = 3 # Make deduplication cases more manageble
from common import get_tree, my_relpath, convert_win_path_to_unix, md5sum, DevNull
from boar_exceptions import UserError, SoftCorruptionError
from front import Front, verify_repo
from wdtools import read_tree, write_tree, WorkdirHelper, boar_dirs, write_file
from deduplication import print_recipe
from deduplication import RecipeFinder
from deduplication import BlocksDB
from cdedup import IntegerSet, calc_rolling
class FakePieceHandler:
def init_piece(self, index): pass
def add_piece_data(self, index, data): pass
def end_piece(self, index): pass
def close(self): pass
def get_piece_address(self, index): return ("FAKEBLOB", 0)
class TestRecipeFinder(unittest.TestCase, WorkdirHelper):
def setUp(self):
self.remove_at_teardown = []
self.dbname = self.createTmpName()
self.blocksdb = BlocksDB(self.dbname, 3)
self.piece_handler = FakePieceHandler()
self.integer_set = IntegerSet(1)
def testSimpleUnaligned(self):
self.integer_set.add(3298534883712) # "aaa"
recipe_finder = RecipeFinder(self.blocksdb, 3, self.integer_set, None, original_piece_handler = self.piece_handler)
self.blocksdb.begin()
self.blocksdb.add_block("47bce5c74f589f4867dbd57e9ca9f808", 0, "47bce5c74f589f4867dbd57e9ca9f808")
self.blocksdb.commit()
recipe_finder.feed("XX")
recipe_finder.feed("Xa")
recipe_finder.feed("aa")
recipe_finder.close()
recipe = recipe_finder.get_recipe()
self.assertEquals(recipe, {'md5sum': '5afc35e6684b843ceb498f5031f22660',
'method': 'concat', 'size': 6,
'pieces': [{'source': 'FAKEBLOB', 'size': 3L,
'original': True, 'repeat': 1, 'offset': 0},
{'source': u'47bce5c74f589f4867dbd57e9ca9f808', 'size': 3,
'original': False, 'repeat': 1, 'offset': 0}]
})
class TestConcurrentCommit(unittest.TestCase, WorkdirHelper):
def setUp(self):
self.remove_at_teardown = []
self.workdir1 = self.createTmpName()
self.workdir2 = self.createTmpName()
self.repopath = self.createTmpName()
repository.create_repository(self.repopath, enable_deduplication = True)
os.mkdir(self.workdir1)
self.wd1 = workdir.Workdir(self.repopath, u"TestSession1", u"", None, self.workdir1)
self.wd1.setLogOutput(DevNull())
self.wd1.use_progress_printer(False)
self.wd1.get_front().mksession(u"TestSession1")
os.mkdir(self.workdir2)
self.wd2 = workdir.Workdir(self.repopath, u"TestSession2", u"", None, self.workdir2)
self.wd2.setLogOutput(DevNull())
self.wd2.use_progress_printer(False)
self.wd2.get_front().mksession(u"TestSession2")
def testIdenticalCommits(self):
write_file(self.workdir1, "a.txt", "aaa")
self.wd1.checkin()
write_file(self.workdir2, "b2.txt", "aaaaaa")
write_file(self.workdir1, "b1.txt", "aaaaaa")
# Make the checkin() go just almost all the way...
wd2_commit = self.wd2.front.commit
self.wd2.front.commit = lambda session_name, log_message, progress_callback: None
self.wd2.checkin() # Will not complete
self.wd1.checkin()
wd2_commit(u"TestSession2", None) # Resume the commit
def testIdenticalNewBlob(self):
write_file(self.workdir1, "a.txt", "aaa")
write_file(self.workdir1, "b.txt", "bbb")
self.wd1.checkin()
write_file(self.workdir1, "c1.txt", "aaaccc")
write_file(self.workdir2, "c2.txt", "bbbccc")
# Make the checkin() go just almost all the way...
wd2_commit = self.wd2.front.commit
self.wd2.front.commit = lambda session_name, log_message, progress_callback: None
self.wd2.checkin() # Will not complete
self.wd1.checkin()
wd2_commit(u"TestSession2", None) # Resume the commit
self.assertEquals("ccc", self.wd1.front.get_blob("9df62e693988eb4e1e1444ece0578579").read())
def testRedundantNewBlob(self):
# aaa 47bce5c74f589f4867dbd57e9ca9f808
# bbb 08f8e0260c64418510cefb2b06eee5cd
# ccc 9df62e693988eb4e1e1444ece0578579
# bbbccc 71d27475242f6b50db02ddf1476107ee
write_file(self.workdir1, "a.txt", "aaa")
self.wd1.checkin()
write_file(self.workdir2, "b.txt", "aaabbbccc")
# b.txt is deduplicated to aaa + bbbccc
# Make the checkin() go just almost all the way...
wd2_commit = self.wd2.front.commit
self.wd2.front.commit = lambda session_name, log_message, progress_callback: None
self.wd2.checkin() # Will not complete
# Is deduplicated to aaa + bbb
write_file(self.workdir1, "b.txt", "aaabbb")
self.wd1.checkin()
# Is deduplicated to aaa + bbb + ccc
write_file(self.workdir1, "b.txt", "aaabbbccc")
self.wd1.checkin()
wd2_commit(u"TestSession2", None) # Resume the commit
self.assertEquals(set(), self.wd1.front.repo.get_orphan_blobs())
def testThatTrimmedBlobsAreRemovedFromDb(self):
# aaa 47bce5c74f589f4867dbd57e9ca9f808
# bbb 08f8e0260c64418510cefb2b06eee5cd
# ccc 9df62e693988eb4e1e1444ece0578579
# bbbccc 71d27475242f6b50db02ddf1476107ee
write_file(self.workdir2, "bbbccc.txt", "bbbccc")
# Make the checkin() go just almost all the way...
wd2_commit = self.wd2.front.commit
self.wd2.front.commit = lambda session_name, log_message, progress_callback: None
self.wd2.checkin() # Will not complete
# Now, make sure bbbccc exists as a recipe
write_file(self.workdir1, "bbb.txt", "bbb")
write_file(self.workdir1, "ccc.txt", "ccc")
self.wd1.checkin()
write_file(self.workdir1, "bbbccc.txt", "bbbccc")
self.wd1.checkin()
wd2_commit(u"TestSession2", None) # Resume the commit
#####################
# Is deduplicated to bbbccc + X
# If bbbccc is stored in the blocksdb, the commit will fail
write_file(self.workdir1, "b.txt", "bbbcccX")
self.wd1.checkin()
self.assertEquals(set(), self.wd1.front.repo.get_orphan_blobs())
def testIdenticalBlocksOnlyAddedOnce(self):
write_file(self.workdir1, "a.txt", "aaa")
write_file(self.workdir2, "b.txt", "aaa")
# Make the checkin() go just almost all the way...
wd2_commit = self.wd2.front.commit
self.wd2.front.commit = lambda session_name, log_message, progress_callback: None
self.wd2.checkin() # Will not complete
self.wd1.checkin()
wd2_commit(u"TestSession2", None) # Resume the commit
for blockdb in (self.wd1.front.repo.blocksdb, self.wd2.front.repo.blocksdb):
self.assertEquals(blockdb.get_all_rolling(), [3298534883712])
self.assertTrue(blockdb.has_block("47bce5c74f589f4867dbd57e9ca9f808"))
block_locations = blockdb.get_block_locations("47bce5c74f589f4867dbd57e9ca9f808")
self.assertEquals(list(block_locations), [('47bce5c74f589f4867dbd57e9ca9f808', 0)])
def tearDown(self):
verify_repo(self.wd1.get_front())
for d in self.remove_at_teardown:
shutil.rmtree(d, ignore_errors = True)
class TestDeduplicationWorkdir(unittest.TestCase, WorkdirHelper):
def setUp(self):
self.remove_at_teardown = []
self.workdir = self.createTmpName()
self.repopath = self.createTmpName()
repository.create_repository(self.repopath, enable_deduplication = True)
os.mkdir(self.workdir)
self.wd = workdir.Workdir(self.repopath, u"TestSession", u"", None, self.workdir)
self.wd.setLogOutput(DevNull())
self.wd.use_progress_printer(False)
self.repo = self.wd.front.repo
id = self.wd.get_front().mksession(u"TestSession")
assert id == 1
def testThatRepeatedHitsAreFound(self):
self.addWorkdirFile("a.txt", "aaa")
self.wd.checkin()
blob = self.addWorkdirFile("b.txt", "aaaaaa")
self.wd.checkin()
recipe = self.repo.get_recipe(blob)
self.assertEquals(len(recipe['pieces']), 1)
rebuilt_content = self.wd.front.get_blob("0b4e7a0e5fe84ad35fb5f95b9ceeac79").read()
self.assertEquals(md5sum(rebuilt_content), "0b4e7a0e5fe84ad35fb5f95b9ceeac79")
def testDeduplicationWithinCommit(self):
blob_a = self.addWorkdirFile("a.txt", "aaaX")
blob_b = self.addWorkdirFile("b.txt", "aaaY")
self.wd.checkin()
recipe = self.repo.get_recipe(blob_a)
self.assertTrue(self.repo.get_recipe(blob_a) or self.repo.get_recipe(blob_b))
def testThatNonalignedEndingsAreDeduplicated(self):
self.addWorkdirFile("a.txt", "aaab")
self.wd.checkin()
blob = self.addWorkdirFile("b.txt", "Xaaab")
self.wd.checkin()
recipe = self.repo.get_recipe(blob)
print_recipe(recipe)
self.assertEquals(len(recipe['pieces']), 2)
rebuilt_content = self.wd.front.get_blob("1446f760b3a89d261a13d8b37c20ef11").read()
self.assertEquals(md5sum(rebuilt_content), "1446f760b3a89d261a13d8b37c20ef11")
def testTailMatchTooShort(self):
self.addWorkdirFile("a.txt", "aaab")
self.wd.checkin()
blob = self.addWorkdirFile("b.txt", "Xaaabb")
self.wd.checkin()
recipe = self.repo.get_recipe(blob)
self.assertEquals(len(recipe['pieces']), 3)
def testTailMatchFalsePositive(self):
self.addWorkdirFile("a.txt", "aaabc")
self.wd.checkin()
blob = self.addWorkdirFile("b.txt", "Xaaabb")
self.wd.checkin()
recipe = self.repo.get_recipe(blob)
rebuilt_content = self.wd.front.get_blob("acd3e6fdfcd9e03e3f941c0ed516be81").read()
self.assertEquals(md5sum(rebuilt_content), "acd3e6fdfcd9e03e3f941c0ed516be81")
def testMultiplePossibleHits1(self):
self.addWorkdirFile("a.txt", "aaabbbcccaaabbbaaabbbaaabbb")
self.wd.checkin()
blob = self.addWorkdirFile("b.txt", "Xaaabbbcccaaabbbaaabbbaaabbb")
self.wd.checkin()
recipe = self.repo.get_recipe(blob)
self.assertEquals(len(recipe['pieces']), 2)
self.assertEquals(recipe['pieces'][0], {
'source': '02129bb861061d1a052c592e2dc6b383',
'repeat': 1, 'original': True, 'offset': 0, 'size': 1})
self.assertEquals(recipe['pieces'][1], {
'source': '00312b74e44d0712882387b8e0f0a57e',
'repeat': 1, 'original': False, 'offset': 0, 'size': 27})
rebuilt_content = self.wd.front.get_blob(blob).read()
self.assertEquals(md5sum(rebuilt_content), "407badd3ba116d47c556d1366343048c")
def testMultiplePossibleHits2(self):
first_blob = self.addWorkdirFile("a.txt", "aaabbbaaabbbaaabbbaaabbbccc")
self.wd.checkin()
blob = self.addWorkdirFile("b.txt", "aaabbbccc")
self.wd.checkin()
recipe = self.repo.get_recipe(blob)
#print_recipe(recipe)
self.assertEquals(len(recipe['pieces']), 1)
self.assertEquals(recipe['pieces'][0], {
'source': first_blob,
'repeat': 1, 'original': False, 'offset': 18, 'size': 9})
rebuilt_content = self.wd.front.get_blob(blob).read()
self.assertEquals(md5sum(rebuilt_content), "d1aaf4767a3c10a473407a4e47b02da6")
def testSplitMatch(self):
a_blob = self.addWorkdirFile("a.txt", "aaa")
b_blob = self.addWorkdirFile("b.txt", "bbb")
self.wd.checkin()
c_blob = self.addWorkdirFile("c.txt", "aaabbb")
self.wd.checkin()
recipe = self.repo.get_recipe(c_blob)
#print_recipe(recipe)
self.assertEquals(len(recipe['pieces']), 2)
self.assertEquals(recipe['pieces'][0], {
'source': a_blob,
'repeat': 1, 'original': False, 'offset': 0, 'size': 3})
self.assertEquals(recipe['pieces'][1], {
'source': b_blob,
'repeat': 1, 'original': False, 'offset': 0, 'size': 3})
rebuilt_content = self.wd.front.get_blob(c_blob).read()
self.assertEquals(md5sum(rebuilt_content), "6547436690a26a399603a7096e876a2d")
def testConcatenatedFragments(self):
self.addWorkdirFile("a.txt", "aaa")
self.wd.checkin()
self.addWorkdirFile("a.txt", "aaabbbaaaXaaaccc")
self.wd.checkin()
# Should now exist due to concatenation of original parts in previous commit
self.assertTrue(md5sum("bbbXccc") in self.wd.get_front().get_all_raw_blobs())
self.addWorkdirFile("a.txt", "cccbbb")
self.wd.checkin()
recipe = self.repo.get_recipe(md5sum("cccbbb"))
self.assertEquals(len(recipe['pieces']), 2)
self.assertEquals(recipe['pieces'][0], {
'source': md5sum("bbbXccc"),
'repeat': 1, 'original': False, 'offset': 4, 'size': 3})
self.assertEquals(recipe['pieces'][1], {
'source': md5sum("bbbXccc"),
'repeat': 1, 'original': False, 'offset': 0, 'size': 3})
self.assertEquals(self.wd.front.get_blob(md5sum("bbbXccc")).read(), "bbbXccc")
def testInterleavedHit1(self):
a_blob = self.addWorkdirFile("a.txt", "aaa")
self.wd.checkin()
b_blob = self.addWorkdirFile("b.txt", "XaaaXaaaX")
self.wd.checkin()
x_blob = md5sum("X")
xxx_blob = md5sum("XXX") # All the original pieces joined
recipe = self.repo.get_recipe(b_blob)
#print_recipe(recipe)
self.assertEquals(len(recipe['pieces']), 5)
self.assertEquals(recipe['pieces'][0], {
'source': xxx_blob,
'repeat': 1, 'original': True, 'offset': 0, 'size': 1})
self.assertEquals(recipe['pieces'][1], {
'source': a_blob,
'repeat': 1, 'original': False, 'offset': 0, 'size': 3})
self.assertEquals(recipe['pieces'][2], {
'source': xxx_blob,
'repeat': 1, 'original': True, 'offset': 1, 'size': 1})
self.assertEquals(recipe['pieces'][3], {
'source': a_blob,
'repeat': 1, 'original': False, 'offset': 0, 'size': 3})
self.assertEquals(recipe['pieces'][4], {
'source': xxx_blob,
'repeat': 1, 'original': True, 'offset': 2, 'size': 1})
rebuilt_content = self.wd.front.get_blob(b_blob).read()
self.assertEquals(md5sum(rebuilt_content), "e18585992d1ea79a30a34e015c49719e")
def testInterleavedHit2(self):
a_blob = self.addWorkdirFile("a.txt", "aaa")
self.wd.checkin()
b_blob = self.addWorkdirFile("b.txt", "aaaXaaa")
self.wd.checkin()
x_blob = md5sum("X")
recipe = self.repo.get_recipe(b_blob)
#print_recipe(recipe)
self.assertEquals(len(recipe['pieces']), 3)
self.assertEquals(recipe['pieces'][0], {
'source': a_blob,
'repeat': 1, 'original': False, 'offset': 0, 'size': 3})
self.assertEquals(recipe['pieces'][1], {
'source': x_blob,
'repeat': 1, 'original': True, 'offset': 0, 'size': 1})
self.assertEquals(recipe['pieces'][2], {
'source': a_blob,
'repeat': 1, 'original': False, 'offset': 0, 'size': 3})
rebuilt_content = self.wd.front.get_blob(b_blob).read()
self.assertEquals(md5sum(rebuilt_content), "78c011eeafaad0783eb1d90392e08b46")
def testAmbigousHit(self):
a_blob = self.addWorkdirFile("a.txt", "aaaaaa")
self.wd.checkin()
b_blob = self.addWorkdirFile("b.txt", "aaa")
self.wd.checkin()
recipe = self.repo.get_recipe(b_blob)
self.assertEquals(len(recipe['pieces']), 1)
self.assertEquals(recipe['pieces'][0], {
'source': a_blob,
'repeat': 1, 'original': False, 'offset': 0, 'size': 3})
rebuilt_content = self.wd.front.get_blob(b_blob).read()
self.assertEquals(rebuilt_content, "aaa")
#print_recipe(recipe)
def testRepeatedHit(self):
a_blob = self.addWorkdirFile("a.txt", "aaa")
self.wd.checkin()
b_blob = self.addWorkdirFile("b.txt", "XXXaaaXXXaaaXXX")
self.wd.checkin()
x_blob = md5sum("X")
recipe = self.repo.get_recipe(b_blob)
#print_recipe(recipe)
def testManyFragments(self):
# Make sure we don't get a "OSError: [Errno 24] Too many open files"
a_blob = self.addWorkdirFile("a.txt", "aaa")
self.wd.checkin()
b_data = "aaa".join(map(str, range(0,10000)))
b_blob = self.addWorkdirFile("b.txt", b_data)
self.wd.checkin()
def testSameRecipeTwice(self):
a_blob = self.addWorkdirFile("a.txt", "aaa")
self.wd.checkin()
b_blob = self.addWorkdirFile("b.txt", "aaaccc")
c_blob = self.addWorkdirFile("c.txt", "aaaccc")
self.wd.checkin()
#print_recipe(recipe)
def testEmptyFile(self):
a_blob = self.addWorkdirFile("empty.txt", "")
self.wd.checkin()
self.assertTrue("d41d8cd98f00b204e9800998ecf8427e" in self.wd.get_front().get_all_raw_blobs())
#print_recipe(recipe)
def testPartialRecipeReads(self):
a_blob = self.addWorkdirFile("a.txt", "aaa")
self.wd.checkin()
# 000000000011
# 012345678901
b_blob = self.addWorkdirFile("b.txt", "XaaaYaaaZaaa")
self.wd.checkin()
recipe = self.repo.get_recipe(b_blob)
self.assertEquals(len(recipe['pieces']), 6)
self.assertEquals(self.wd.front.get_blob(b_blob, 0, 12).read(), "XaaaYaaaZaaa")
self.assertEquals(self.wd.front.get_blob(b_blob, 0, 1).read(), "X")
self.assertEquals(self.wd.front.get_blob(b_blob, 0, 3).read(), "Xaa")
self.assertEquals(self.wd.front.get_blob(b_blob, 0, 5).read(), "XaaaY")
self.assertEquals(self.wd.front.get_blob(b_blob, 8, 3).read(), "Zaa")
self.assertEquals(self.wd.front.get_blob(b_blob, 0, 1).read(), "X")
self.assertEquals(self.wd.front.get_blob(b_blob, 8, 1).read(), "Z")
self.assertEquals(self.wd.front.get_blob(b_blob, 7, 2).read(), "aZ")
self.assertEquals(self.wd.front.get_blob(b_blob, 0).read(), "XaaaYaaaZaaa")
self.assertEquals(self.wd.front.get_blob(b_blob, 4).read(), "YaaaZaaa")
self.assertEquals(self.wd.front.get_blob(b_blob, 12).read(), "")
self.assertEquals(self.wd.front.get_blob(b_blob).read(), "XaaaYaaaZaaa")
reader = self.wd.front.get_blob(b_blob, 0, 12)
for c in "XaaaYaaaZaaa":
self.assertEquals(c, reader.read(1))
self.assertEquals(reader.read(1), "");
reader = self.wd.front.get_blob(b_blob, 0, 12)
for cc in "Xa", "aa", "Ya", "aa", "Za", "aa":
self.assertEquals(cc, reader.read(2))
self.assertEquals(reader.read(2), "");
reader = self.wd.front.get_blob(b_blob, 0, 12)
for ccc in "Xaa", "aYa", "aaZ", "aaa":
self.assertEquals(ccc, reader.read(3))
self.assertEquals(reader.read(3), "");
reader = self.wd.front.get_blob(b_blob, 0, 12)
for ccccc in "XaaaY", "aaaZa", "aa":
self.assertEquals(ccccc, reader.read(5))
self.assertEquals(reader.read(5), "");
reader = self.wd.front.get_blob(b_blob, 4)
for cc in "Ya", "aa", "Za", "aa":
self.assertEquals(cc, reader.read(2))
self.assertEquals(reader.read(2), "");
def tearDown(self):
verify_repo(self.wd.get_front())
self.assertFalse(self.wd.get_front().repo.get_orphan_blobs())
for d in self.remove_at_teardown:
shutil.rmtree(d, ignore_errors = True)
class TestBlockLocationsDB(unittest.TestCase, WorkdirHelper):
def setUp(self):
self.remove_at_teardown = []
self.workdir = self.createTmpName()
os.mkdir(self.workdir)
self.dbfile = os.path.join(self.workdir, "database.sqlite")
self.db = BlocksDB(self.dbfile, 2**16)
def testCompleteCorruption(self):
del self.db
with open(self.dbfile, "w") as f:
f.write("X" * 100000)
self.assertRaises(SoftCorruptionError, BlocksDB, self.dbfile, 2**16)
def testCrcCorruption(self):
self.db.begin()
self.db.add_block("d41d8cd98f00b204e9800998ecf8427e", 0, "00000000000000000000000000000000")
self.db.commit()
con = sqlite3.connect(self.dbfile)
con.execute("UPDATE blocks SET offset = 1")
con.commit()
self.assertRaises(SoftCorruptionError, self.db.get_block_locations, "00000000000000000000000000000000")
def testRollingEmpty(self):
self.assertEquals(self.db.get_all_rolling(), [])
def testRollingSimple(self):
self.db.begin()
self.db.add_rolling(17)
self.db.commit()
self.assertEquals(self.db.get_all_rolling(), [17])
def testRollingDuplicate(self):
self.db.begin()
self.db.add_rolling(17)
self.db.add_rolling(17)
self.db.commit()
self.assertEquals(self.db.get_all_rolling(), [17])
def testRollingRange(self):
self.db.begin()
self.db.add_rolling(0)
self.db.add_rolling(2**64 - 1)
self.db.commit()
self.assertEquals(set(self.db.get_all_rolling()), set([0, 2**64 - 1]))
self.db.begin()
self.assertRaises(OverflowError, self.db.add_rolling, -1)
self.assertRaises(OverflowError, self.db.add_rolling, 2**64)
def testHighBlock(self):
self.db.begin()
self.db.add_block("d41d8cd98f00b204e9800998ecf8427e", 2**32 + 1, "00000000000000000000000000000000")
self.db.add_block("d41d8cd98f00b204e9800998ecf8427e", 2**64 - 1, "00000000000000000000000000000001")
self.db.commit()
self.assertEquals(list(self.db.get_block_locations("00000000000000000000000000000000")),
[("d41d8cd98f00b204e9800998ecf8427e", 2**32 + 1)])
self.assertEquals(list(self.db.get_block_locations("00000000000000000000000000000001")),
[("d41d8cd98f00b204e9800998ecf8427e", 2**64 - 1)])
def testOffsetLimits(self):
self.db.begin()
self.assertRaises(OverflowError, self.db.add_block,
"d41d8cd98f00b204e9800998ecf8427e", -1, "00000000000000000000000000000002")
self.assertRaises(OverflowError, self.db.add_block,
"d41d8cd98f00b204e9800998ecf8427e", 2**64, "00000000000000000000000000000002")
def testBlockSimple(self):
# blob, offset, md5
self.db.begin()
self.db.add_block("d41d8cd98f00b204e9800998ecf8427e", 0, "00000000000000000000000000000000")
self.db.commit()
self.assertEquals(list(self.db.get_block_locations("00000000000000000000000000000000")),
[("d41d8cd98f00b204e9800998ecf8427e", 0)])
def testBlockDuplicate(self):
# blob, offset, md5
self.db.begin()
self.db.add_block("d41d8cd98f00b204e9800998ecf8427e", 0, "00000000000000000000000000000000")
self.db.add_block("d41d8cd98f00b204e9800998ecf8427e", 0, "00000000000000000000000000000000")
self.db.commit()
self.assertEquals(list(self.db.get_block_locations("00000000000000000000000000000000")),
[("d41d8cd98f00b204e9800998ecf8427e", 0)])
def tearDown(self):
for d in self.remove_at_teardown:
shutil.rmtree(d, ignore_errors = True)
if __name__ == '__main__':
unittest.main()
| 2.109375 | 2 |
Python/redis.set.py | MarsBighead/mustang | 4 | 12765726 | <filename>Python/redis.set.py
#!/usr/bin/python
import redis
client = redis.Redis(host='localhost', port=6379, db=0)
if client.ping():
print "Connect to server successfully!"
else:
print "Connect to server failed!"
# Build a set myset (sadd),
# redis support add multiple, maybe python module limit it to one
client.sadd('myset','redis')
client.sadd('myset','hello')
client.sadd('myset','bar')
# Return all members of the set ``name``,smembers(self, name)
setVal = client.smembers('myset')
print "All members in the set:", setVal
# Get numbers of set member, scard(self,name)
setNum = client.scard('myset')
print "Get numbers of element in the set:",setNum
# Return the difference of sets specified by ``keys``, sdiff(self, keys *args)
client.sadd('myset1','redis')
#client.sadd('myset1','hello')
client.sadd('myset2','bar')
client.sadd('myset1','hi')
setDiff = client.sdiff(['myset','myset1','myset2'])
print "The difference of sets specified by keys:",setDiff
# Store the difference of sets specified by ``keys`` into a new
# set named ``dest``. Returns the number of keys in the new set.
# sdiffstore(self, dest, keys, *args)
client.sdiffstore('setdiff',['myset','myset1'])
setDiff = client.smembers('setdiff')
print "The difference of sets store in the new set setDiff:", setDiff
# Return the intersection of sets specified by ``keys``, sinter(self, keys, *args)
setInter = client.sinter(['myset','myset1'])
print "The intersection of sets specified by keys:",setInter
# sinterstore(self, dest, keys, *args)
# Store the intersection of sets specified by ``keys`` into a new
# set named ``dest``. Returns the number of keys in the new set.
client.sinterstore('setinter',['myset','myset1'])
setInter = client.smembers('setinter')
print "The intererence of sets store in the new set setInter:", setInter
# Judge if element belongs to te set(sismember)
judge =['redis','foo']
for ele in judge:
boonVal = client.sismember('myset',ele)
if boonVal:
print "Element",ele," belongs to set myset"
else:
print "Element",ele," doesn't belong to set myset"
# smove(self, src, dst, value)
# Move ``value`` from set ``src`` to set ``dst`` atomically
client.sadd('myset','world')
client.smove('myset','myset1','world')
setNew = client.smembers('myset1')
print "The distination set:", setNew
# Remove and return a random member of set ``name``, spop(self, name)
spop = client.spop('myset1')
print "Get return random element:",spop, ", remaining elements:",client.smembers('myset1')
# Return a random member of set ``name``, srandmember(self, name),
# >2.6 return numbers can be self define
srandom = client.srandmember('myset')
print "Return random element:",srandom
# Remove ``value`` from set ``name``, srem(self, name, value)
rem = client.srem('myset','hello')
print "Get srem boolean:",rem
# sunion(self, keys, *args)
# Return the union of sets specifiued by ``keys``
client.sadd('myset0','special')
setUnion = client.sunion(['myset','myset0','myset1'])
print "The union of sets specified by keys:",setUnion
# sunionstore(self, dest, keys, *args)
# Store the union of sets specified by ``keys`` into a new
# set named ``dest``. Returns the number of keys in the new set.
client.sunionstore('setunion',['myset','myset1'])
setUnion = client.smembers('setunion')
print "The union of sets store in the new set setUnion:", setUnion
# no SSCAN
#Empty db
client.flushdb()
| 3.34375 | 3 |
src/views/create_chat_view.py | antkrit/chat | 0 | 12765727 | <gh_stars>0
"""Create chat view used to render index template with list of chats.
This module defines the following views:
- `create_chat_post`, POST handler for create_chat
- `create_chat_get`, GET handler for create_chat
"""
import aiohttp_jinja2
from aiohttp import web
from src.services import ChatService
from src.utils.flash import flash_get, flash_set
@aiohttp_jinja2.template('create_chat.html')
async def create_chat_post(request: web.Request):
"""Render `create_chat.html` template for url route \
POST `/create` and endpoint `create_chat_post`.
:param request: `web.Request`
:return: request context with list of chats
"""
async with request.app['db'].acquire() as conn:
data = await request.post()
is_used = await ChatService.is_used(conn, title=data['name'])
if not is_used:
await ChatService.save_to_db(
conn,
data['name'],
data['description']
)
flash_set(request, 'flash', 'Successfully created.')
return web.HTTPSeeOther(
str(request.app.router['index'].url_for())
)
flash_set(request, 'flash', 'A chat with this title already exists.')
return web.HTTPSeeOther(
str(request.app.router['create_chat'].url_for())
)
@aiohttp_jinja2.template('create_chat.html')
async def create_chat_get(request: web.Request):
"""Render `create_chat.html` template for url route \
GET `/create` and endpoint `create_chat`.
:param request: `web.Request`
:return: request context with list of chats
"""
flash = flash_get(request, 'flash')
if flash:
return {'flash': flash}
| 2.4375 | 2 |
Beta/Return Even Whatever Youve Been Given.py | mwk0408/codewars_solutions | 6 | 12765728 | <gh_stars>1-10
def always_even(n): return n-n%2 | 2.265625 | 2 |
stanCode_projects/hangman game/complement.py | KevinCheng713/stanCode_project | 0 | 12765729 | """
File: complement.py
Name:鄭凱元
----------------------------
This program uses string manipulation to
tackle a real world problem - finding the
complement strand of a DNA sequence.
THe program asks uses for a DNA sequence as
a python string that is case-insensitive.
Your job is to output the complement of it.
"""
def main():
"""
Converts characters to uppercase,
then output the complementary sequence through the newly created function (build_complement())
"""
dna = input('Please give me a DNA strand and I\'ll find the complement: ')
# Converts characters to uppercase
dna = dna.upper
ans = build_complement(dna)
print('The complement of ' + str(dna) + ' is ' + str(ans))
def build_complement(dna):
# start from an empty string
ans = ''
# For loop: return complementary sequences
for dna_fragment in dna:
if dna_fragment == 'A':
ans += 'T'
elif dna_fragment == 'T':
ans += 'A'
elif dna_fragment == 'C':
ans += 'G'
elif dna_fragment == 'G':
ans += 'C'
return ans
###### DO NOT EDIT CODE BELOW THIS LINE ######
if __name__ == '__main__':
main()
| 4.46875 | 4 |
bibliopixel/layout/matrix.py | rec/leds | 253 | 12765730 | import math, threading, time
from .. import colors
from .. util import deprecated, log
from . import matrix_drawing as md
from . import font
from . layout import MultiLayout
from . geometry import make_matrix_coord_map_multi
from . geometry.matrix import (
make_matrix_coord_map, make_matrix_coord_map_positions)
ROTATION_WARNING = """
Matrix.rotation must be a multiple of 90 degrees but was in fact %s degress.
It was rounded to %s degrees."""
class Matrix(MultiLayout):
CLONE_ATTRS = MultiLayout.CLONE_ATTRS + (
'width', 'height', 'rotation', 'vert_flip', 'y_flip', 'serpentine',
'pixelSize')
def __init__(self, drivers, width=0, height=0,
rotation=0, vert_flip=False, y_flip=False,
serpentine=True,
threadedUpdate=False, brightness=255,
pixelSize=(1, 1), **kwargs):
"""Main class for matricies.
driver -- instance that inherits from DriverBase
width -- X axis size of matrix
height -- Y axis size of matrix
coord_map -- a 2D matrix defining the X,Y to strip index mapping.
Not needed in most cases
rotation -- how to rotate when generating the map.
Not used if coord_map specified
vert_flip - flips the generated map along the Y axis.
This along with rotation can achieve any orientation
"""
self.gen_multi = make_matrix_coord_map_multi
super().__init__(drivers, threadedUpdate, brightness, **kwargs)
rot_mod = rotation % 360
self.rotation = 90 * round(rot_mod / 90)
if self.rotation != rot_mod:
log.warning(ROTATION_WARNING, rotation, self.rotation)
self.width = width or getattr(self.drivers[0], 'width') or 0
self.height = height or getattr(self.drivers[0], 'height') or 0
self.vert_flip = vert_flip
self.y_flip = y_flip
self.serpentine = serpentine
self.pixelSize = pixelSize
pw, ph = self.pixelSize
# If both are 0, try to assume it's a square display.
if not (self.width or self.height):
square = int(math.sqrt(self.numLEDs))
if (square * square) == self.numLEDs:
self.width = self.height = square
else:
raise TypeError('No width or height passed but '
'the number of LEDs is not a perfect square')
if self.width * self.height > self.numLEDs:
raise ValueError(
'width * height cannot exceed total pixel count! %s * %s > %s'
% (self.width, self.height, self.numLEDs))
if not self.coord_map:
if len(self.drivers) == 1:
# TODO: this should really go into documentation
log.debug(
'Auto generating coordinate map. Use make_matrix_coord_map '
'directly if more control needed.')
# was switched to y_flip, but need to keep vert_flip available
y_flip = y_flip or vert_flip
self.coord_map = make_matrix_coord_map(
self.width, self.height,
serpentine=serpentine,
rotation=rotation,
y_flip=vert_flip)
elif self.drivers:
raise TypeError(
'Must provide coord_map if using multiple drivers!')
self.set_pixel_positions(
make_matrix_coord_map_positions(self.coord_map))
# If rotation is 90 or 270 degrees, dimensions need to be swapped so
# they match the matrix rotation.
if rotation in (90, 270):
w = self.width
h = self.height
self.width = h
self.height = w
self.texture = None
self.set = self._setColor
if pw < 0 or pw > self.width or ph < 0 or ph > self.height:
raise ValueError(
'pixelSize must be greater than 0 '
'and not larger than total matrix')
if self.width % pw != 0 or self.height % ph != 0:
raise ValueError(
'pixelSize must evenly divide into matrix dimensions!')
if pw == 1 and ph == 1:
self._set = self.__setNormal
else:
self._set = self.__setScaled
self.width = self.width / pw
self.height = self.height / ph
self.numLEDs = self.width * self.height
self.fonts = font.fonts
@property
def shape(self):
"""Returns ``width, height``"""
return self.width, self.height
def get(self, x, y):
"""
Return the pixel color at position (x, y), or Colors.black if that
position is out-of-bounds.
"""
try:
pixel = self.coord_map[y][x]
return self._get_base(pixel)
except IndexError:
return colors.COLORS.Black
def set(self, x, y, color):
"""Set the pixel color at position x, y."""
# The actual implementation of this method is computed at construction
# time and monkey-patched in from one of self._setTexture,
# self.__setNormal or self.__setScaled
raise NotImplementedError
def get_pixel_positions(self):
return make_matrix_coord_map_positions(self.coord_map)
def loadFont(self, name, height, width, data):
self.fonts[name] = {
'data': data,
'height': height,
'width': width
}
def setTexture(self, tex=None):
if tex is None:
self.texture = tex
self.set = self._setColor
return
if not isinstance(tex, list):
raise ValueError('Texture must be a list!')
if len(tex) != self.height:
raise ValueError(
'Given texture is must be {} high!'.format(self.height))
for r in tex:
if not isinstance(r, list):
raise ValueError('Texture rows must be lists!')
if len(r) != self.width:
raise ValueError(
'Texture rows must be {} wide!'.format(self.width))
self.texture = tex
self.set = self._setTexture
def __setNormal(self, x, y, color):
try:
pixel = self.coord_map[y][x]
self._set_base(pixel, color)
except IndexError:
pass
def __setScaled(self, x, y, color):
sx = x * self.pixelSize[0]
sy = y * self.pixelSize[1]
for xs in range(sx, sx + self.pixelSize[0]):
for ys in range(sy, sy + self.pixelSize[1]):
self.__setNormal(xs, ys, color)
# Set single pixel to Color value
def _setColor(self, x, y, color=None):
try:
self._set(x, y, color or (0, 0, 0))
except IndexError:
pass
def _setTexture(self, x, y, color=None):
if x >= 0 and y >= 0:
try:
self._set(x, y, color or self.texture[y][x])
except IndexError:
pass
def setHSV(self, x, y, hsv):
color = colors.hsv2rgb(hsv)
self._set(x, y, color)
def setRGB(self, x, y, r, g, b):
color = (r, g, b)
self._set(x, y, color)
##########################################################################
# Drawing Functions
# Lovingly borrowed from Adafruit
# https://github.com/adafruit/Adafruit-GFX-Library/blob/master/Adafruit_GFX.cpp
##########################################################################
def drawCircle(self, x0, y0, r, color=None):
"""
Draw a circle in an RGB color, with center x0, y0 and radius r.
"""
md.draw_circle(self.set, x0, y0, r, color)
def fillCircle(self, x0, y0, r, color=None):
"""
Draw a filled circle in an RGB color, with center x0, y0 and radius r.
"""
md.fill_circle(self.set, x0, y0, r, color)
def drawLine(self, x0, y0, x1, y1, color=None, colorFunc=None, aa=False):
"""
Draw a between x0, y0 and x1, y1 in an RGB color.
:param colorFunc: a function that takes an integer from x0 to x1 and
returns a color corresponding to that point
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""
md.draw_line(self.set, x0, y0, x1, y1, color, colorFunc, aa)
# Bresenham's algorithm
def bresenham_line(self, x0, y0, x1, y1, color=None, colorFunc=None):
"""
Draw line from point x0, y0 to x1, y1 using Bresenham's algorithm.
Will draw beyond matrix bounds.
"""
md.bresenham_line(self.set, x0, y0, x1, y1, color, colorFunc)
# Xiaolin Wu's Line Algorithm
def wu_line(self, x0, y0, x1, y1, color=None, colorFunc=None):
"""
Draw a between x0, y0 and x1, y1 in an RGB color.
:param colorFunc: a function that takes an integer from x0 to x1 and
returns a color corresponding to that point
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""
md.wu_line(self.set, x0, y0, x1, y1, color, colorFunc)
def drawRect(self, x, y, w, h, color=None, aa=False):
"""
Draw rectangle with top-left corner at x,y, width w and height h
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""
md.draw_rect(self.set, x, y, w, h, color, aa)
def fillRect(self, x, y, w, h, color=None, aa=False):
"""
Draw a solid rectangle with top-left corner at (x, y), width w and
height h.
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""
md.fill_rect(self.set, x, y, w, h, color, aa)
def fillScreen(self, color=None):
"""Fill the matrix with the given RGB color"""
md.fill_rect(self.set, 0, 0, self.width, self.height, color)
def drawRoundRect(self, x, y, w, h, r, color=None, aa=False):
"""
Draw a rounded rectangle with top-left corner at (x, y), width w,
height h, and corner radius r
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""
md.draw_round_rect(self.set, x, y, w, h, r, color, aa)
def fillRoundRect(self, x, y, w, h, r, color=None, aa=False):
"""
Draw a rounded rectangle with top-left corner at (x, y), width w,
height h, and corner radius r
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""
md.fill_round_rect(self.set, x, y, w, h, r, color, aa)
def drawTriangle(self, x0, y0, x1, y1, x2, y2, color=None, aa=False):
"""
Draw triangle with vertices (x0, y0), (x1, y1) and (x2, y2)
:param aa: if True, use Bresenham's algorithm for line drawing;
Otherwise use Xiaolin Wu's algorithm
"""
md.draw_triangle(self.set, x0, y0, x1, y1, x2, y2, color, aa)
def fillTriangle(self, x0, y0, x1, y1, x2, y2, color=None, aa=False):
"""
Draw filled triangle with points x0,y0 - x1,y1 - x2,y2
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""
md.fill_triangle(self.set, x0, y0, x1, y1, x2, y2, color, aa)
if deprecated.allowed(): # pragma: no cover
fillTrangle = fillTriangle
def drawChar(self, x, y, c, color, bg,
aa=False, font=font.default_font, font_scale=1):
"""
Draw a single character c at at (x, y) in an RGB color.
"""
md.draw_char(self.fonts, self.set, self.width, self.height,
x, y, c, color, bg, aa, font, font_scale)
def drawText(self, text, x=0, y=0, color=None,
bg=colors.COLORS.Off, aa=False, font=font.default_font,
font_scale=1):
"""
Draw a line of text starting at (x, y) in an RGB color.
:param colorFunc: a function that takes an integer from x0 to x1 and
returns a color corresponding to that point
:param aa: if True, use Bresenham's algorithm for line drawing;
otherwise use Xiaolin Wu's algorithm
"""
md.draw_text(self.fonts, self.set, text, self.width, self.height,
x, y, color, bg, aa, font, font_scale)
if deprecated.allowed(): # pragma: no cover
LEDMatrix = Matrix
| 2.578125 | 3 |
setup.py | Pierre-Thibault/neo-observer | 0 | 12765731 | <gh_stars>0
#!/usr/bin/env python
from setuptools import setup
setup(name='neo-observer',
py_modules=['neo_observer'],
packages=[],
install_requires=[],
version='1.0.1',
description='Python module implementing the observer pattern using a centralized registry',
keywords=['messaging'],
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/Pierre-Thibault/neo-observer',
test_suite='test_neo_observer',
license='MIT',
classifiers=['License :: OSI Approved :: MIT License',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
],
)
| 0.964844 | 1 |
Suggestions.py | harishvc/githubanalytics | 32 | 12765732 | <reponame>harishvc/githubanalytics<gh_stars>10-100
#https://github.com/seatgeek/fuzzywuzzy
#https://pypi.python.org/pypi/fuzzywuzzy/0.4.0
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
choices = ["trending now",
"top organizations",
"top contributors",
"top repositories",
"top new repositories"]
def compare(input):
#print "comparing ....", input
r = process.extract(input, choices,limit=5)
suggestionList = ""
#Pick top 3 if more than 75% exact
if (r[0][1] >= 75):
suggestionList += "<p class=\"text-info\">Did you mean:</p><ul>"
cnt = 1
for row in r:
if (row[1] >= 75 and cnt <= 3):
cnt = cnt + 1
suggestionList += "<li><a href=\"/?q=" + str(row[0]) + "&action=Search\">" + str(row[0]) + "</a></li>"
else:
break
suggestionList += "</ul>"
#Pick one if no exact
elif (r[0][1] >= 0):
suggestionList += "<p class=\"text-info\">Suggestion:</p><a href=\"/?q=" + str(r[0][0]) + "&action=Search\">" + str(r[0][0]) + "</a>"
#print suggestionList
return suggestionList
| 2.90625 | 3 |
cogs/weather.py | SQCS-TW/SQCS_bot | 0 | 12765733 | import discord
from discord.ext import commands
from core.classes import Cog_Extension
import requests
import os
data_prefix = {
"0": "天氣描述",
"1": "最高溫度",
"2": "最低溫度",
"3": "體感描述",
"4": "降水機率"
}
data_suffix = {
"0": "",
"1": "度",
"2": "度",
"3": "",
"4": "%"
}
time_range_title = {
"0": "時段一",
"1": "時段二",
"2": "時段三"
}
class WeatherQuery(Cog_Extension):
@commands.group()
async def wea(self, ctx):
pass
@wea.command()
async def query(self, ctx, target_county: str = ''):
response = requests.get(f'https://opendata.cwb.gov.tw/fileapi/v1/opendataapi/F-C0032-001?Authorization={str(os.environ.get("PhantomTWWeatherApiKey"))}&format=json')
location_weather_data = response.json()["cwbopendata"]["dataset"]["location"]
county_weather_info = str()
if target_county == '':
target_county = ctx.author.roles[1].name
for item in location_weather_data:
if item["locationName"].find(target_county) != -1:
loc_json = item["weatherElement"]
county_weather_info += item["locationName"] + '\n'
for time_range in range(3):
county_weather_info += f'{time_range_title[str(time_range)]}::\n'
for (index, info) in enumerate(loc_json):
county_weather_info += f'{data_prefix[str(index)]}: {info["time"][time_range]["parameter"]["parameterName"]} {data_suffix[str(index)]}\n'
await ctx.send(county_weather_info)
county_weather_info = ''
def setup(bot):
bot.add_cog(WeatherQuery(bot))
| 2.828125 | 3 |
electionleaflets/apps/parties/migrations/0002_auto__add_field_party_show_on_add_page.py | electionleaflets/electionleaflets | 0 | 12765734 | <gh_stars>0
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Party.show_on_add_page'
db.add_column(u'party', 'show_on_add_page', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
def backwards(self, orm):
# Deleting field 'Party.show_on_add_page'
db.delete_column(u'party', 'show_on_add_page')
models = {
'core.country': {
'Meta': {'object_name': 'Country', 'db_table': "u'country'"},
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'iso': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'iso3': ('django.db.models.fields.CharField', [], {'max_length': '9', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '240'})
},
'parties.party': {
'Meta': {'object_name': 'Party', 'db_table': "u'party'"},
'colour': ('django.db.models.fields.CharField', [], {'max_length': '18', 'blank': 'True'}),
'count': ('django.db.models.fields.IntegerField', [], {}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Country']"}),
'force_top': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo_file': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'major': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '765'}),
'popular': ('django.db.models.fields.IntegerField', [], {}),
'show_on_add_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'twitter_account': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'url_id': ('django.db.models.fields.CharField', [], {'max_length': '765', 'blank': 'True'})
}
}
complete_apps = ['parties']
| 2.15625 | 2 |
2020/13/13a.py | befeleme/aoc | 3 | 12765735 | import fileinput
contents = [x.strip() for x in fileinput.input()]
departure = int(contents[0])
buses = contents[1].split(",")
# dummy big value to start with comparing
closest = 10000000000000000
for bus in buses:
if bus != "x":
bus = int(bus)
next_cycle = ((departure // bus) * bus + bus) - departure
if next_cycle < closest:
closest = next_cycle
bus_no = bus
print(bus_no*closest)
| 3.40625 | 3 |
DBOperation/DBSQLite.py | jcg/d-tailor | 14 | 12765736 | <reponame>jcg/d-tailor<filename>DBOperation/DBSQLite.py
'''
Created on Nov 1, 2012
@author: jcg
'''
from DBOperation.DBAbstract import DBAbstract
from sqlite3 import connect,Row
from time import strftime
from math import sqrt
from random import choice
from uuid import uuid4
from subprocess import check_output
from socket import gethostbyname,gethostname
from Functions import pick_random
class DBSQLite(DBAbstract):
'''
Constructor
'''
def __init__(self, dbfile, designMethod, initialize=True, seedSequence=None):
self.dbfile = dbfile
self.designMethod = designMethod
self.seedSequence = seedSequence
self.seedId = str(uuid4().int)
#SQL queries buffers
self.des_solutions = {}
self.des_solutions_sql = []
self.gen_solutions_sql = []
self.gen_solutions_id = {}
#Connect database
self.con = connect(self.dbfile + ".sqlite")
self.con.row_factory = Row
self.con.isolation_level = None
self.cur = self.con.cursor()
#Initiate DB
if initialize:
self.DBInit()
#Register worker
self.worker_id = str(uuid4().int)
self.registerWorker()
def DBInit(self):
'''
Initialize database
input: dbfile - file to save DB
input: designMethod - a class Design with information about features name/type and design wanted
returns: Nothing
'''
pass
#Design Dynamic tables
features_fields = ''.join([feature+"Level TEXT, " for feature in self.designMethod.featuresList])
table_ds = "create table desired_solution(des_solution_id TEXT PRIMARY KEY,"+features_fields+\
"status TEXT,worker_id TEXT,start_time TEXT,FOREIGN KEY(worker_id) REFERENCES worker(worker_id));"
features_values_fields = ''.join([feature+" "+self.designMethod.features[feature]['type']+", " for feature in self.designMethod.featuresList])
features_level_position_fields = ''.join([feature+"Position "+self.designMethod.features[feature]['type']+", " for feature in self.designMethod.featuresList])
table_gs = "create table generated_solution(generated_solution_id TEXT PRIMARY KEY, des_solution_id TEXT, sequence TEXT,"+features_values_fields+features_fields+features_level_position_fields+\
"worker_id TEXT, FOREIGN KEY(worker_id) REFERENCES worker(worker_id));"
#Create Tables
self.cur.executescript("""
PRAGMA writable_schema = 1;
delete from sqlite_master where type = 'table';
PRAGMA writable_schema = 0;
VACUUM;
PRAGMA INTEGRITY_CHECK;
create table worker(worker_id TEXT PRIMARY KEY, hostname TEXT, ip TEXT, time_start INTEGER, time_finish INTEGER);""" +\
table_ds + "\n" + \
table_gs + "\n");
#Populate Tables
#Desired Solutions DB
n_features = self.designMethod.n_features
all_comb = [tuple(d_sol.split('.')) + (d_sol,) for d_sol in self.designMethod.listDesigns]
features_levels_fields = ''.join([feature+"Level, " for feature in self.designMethod.featuresList])
sql = "insert into desired_solution("+features_levels_fields+"des_solution_id,status, worker_id, start_time) \
values("+"?,"*(n_features+1)+"'WAITING',NULL,NULL);"
self.cur.executemany(sql,all_comb)
def registerWorker(self):
start_time = strftime("%Y-%m-%d %H:%M:%S %Z")
hostname = check_output("hostname").rstrip()
ip = gethostbyname(gethostname()).rstrip()
self.cur.execute("insert into worker(worker_id, hostname, ip, time_start, time_finish) values (?,?,?,?,NULL);",
(self.worker_id, hostname, ip, start_time) )
self.cur.execute("select * from desired_solution")
for row in self.cur.fetchall():
key = str(row['des_solution_id'])
self.des_solutions[key] = {'status': str(row['status']), 'des_solution_id': str(row['des_solution_id'])}
self.cur.execute("select generated_solution_id from generated_solution")
for row in self.cur.fetchall():
self.gen_solutions_id[str(row['generated_solution_id'])] = '1'
return 0
def DBGetSolution(self, solution_id):
'''
Get solution given solution_id
returns: a dictionary with a solution with all attributes
'''
pass
self.cur.execute("select * from generated_solution where generated_solution_id=?",(solution_id,))
return dict(self.cur.fetchone())
def DBGetDesiredSolution(self):
'''
Get a desired solution that wasn't found yet
returns: a dictionary with a desired solution or None
'''
pass
#Insert buffered solutions into DB
self.flushSQL()
if self.designMethod.listDesigns == []:
return None
# get a desired solution from db
self.cur.execute("select * from desired_solution where status='WAITING' order by random() LIMIT 1")
des_solution = self.cur.fetchone()
if des_solution != None:
des_solution = dict(des_solution)
#des_sol = des_solutions[0]
# set worker as working on desired solution
start_time = strftime("%Y-%m-%d %H:%M:%S %Z")
self.cur.execute("update desired_solution set worker_id=?, status=?, start_time=? where des_solution_id = ?;", (self.worker_id, 'RUNNING', start_time, des_solution['des_solution_id'] ) )
return des_solution
else: #no more solutions waiting, so help doing the ones that are currently running
self.cur.execute("select * from desired_solution where status='RUNNING' LIMIT 20")
des_solutions = (self.cur.fetchall())
if des_solutions != []:
return dict(choice(des_solutions))
else:
return None
def DBChangeStatusDesiredSolution(self, desired_solution_id, status='WAITING'):
self.cur.execute("update desired_solution set worker_id=NULL, status=?, start_time=NULL where des_solution_id = ?;", (status, desired_solution_id ) )
def DBGetClosestSolution(self,desiredSolution):
'''
Get a solution that is closer to the desired solution
returns: a dictionary with a solution with all attributes
'''
if self.designMethod.listDesigns == [] or desiredSolution == None:
# if there is no desiredSolution return a random solution
query = "SELECT generated_solution_id, sequence FROM generated_solution ORDER BY RANDOM() limit 1;"
self.cur.execute(query)
return self.cur.fetchone()
#get closer solutions and distances from a sample of 1000
query = "SELECT * FROM generated_solution AS r1 JOIN (SELECT (ABS(RANDOM() % (select count(*) from generated_solution))) as selid FROM generated_solution limit 5000) as r2 ON r1.rowid == r2.selid"
self.cur.execute(query,desiredSolution)
all_solutions = (self.cur.fetchall())
#print all_solutions
distanceArray = [self.distanceBetweenSolutions(sol_i, desiredSolution) for sol_i in all_solutions]
total_fit = sum([1/(dist+0.0001) for dist in distanceArray])
p_array = [((1/dist)/total_fit) for dist in distanceArray]
if all_solutions == []:
return None
else:
rnd_sol_indx = pick_random(p_array)
return dict(all_solutions[rnd_sol_indx])
pass
def DBCheckDesign(self, desired_solution_id):
'''
Get the status of a solution to design
returns: a boolean with the result of status == 'DONE'
'''
pass
self.cur.execute("select * from desired_solution where des_solution_id=?",(desired_solution_id,))
return self.cur.fetchone()['status'] == "DONE"
def DBInsertSolution(self,solution,desired_solution_id=""):
'''
Insert solution into database
returns: Nothing
'''
pass
if self.gen_solutions_id.has_key(solution.solid) or solution.valid == False:
return 0
else:
self.gen_solutions_id[solution.solid] = '1'
key = '.'.join([str(solution.levels[feature+'Level']) for feature in self.designMethod.featuresList])
if not self.designMethod.listDesigns==[]: #RandomSampling mode does not have desired targets
if desired_solution_id == "": #Worker found solution for something it WASN'T looking for
if self.des_solutions.has_key(key):
desired_solution_id = str(self.des_solutions[key]['des_solution_id'])
if self.des_solutions[key]['status'] != 'DONE':
self.des_solutions[key]['status'] = 'DONE'
self.des_solutions_sql.append({'worker_id' : self.worker_id, 'status': 'DONE', 'des_solution_id' : desired_solution_id})
else:
self.des_solutions[key]['status'] = 'DONE'
self.des_solutions_sql.append({'worker_id' : self.worker_id, 'status': 'DONE', 'des_solution_id' : desired_solution_id})
else:
desired_solution_id = key
#update generated solution table
dict_with_values = {'generated_solution_id' : solution.solid,
'des_solution_id': desired_solution_id,
'sequence' :solution.sequence ,
'worker_id': self.worker_id}
dict_with_values.update(solution.scores)
dict_with_values.update(solution.levels)
dict_with_values.update({ (feature+'Position'): self.calculateRelativeLevel(feature,solution.levels[feature+'Level'],solution.scores[feature]) for feature in self.designMethod.features })
self.gen_solutions_sql.append(dict_with_values)
def DBCloseConnection(self):
'''
Closes connection to DB
returns: Nothing
'''
#Insert buffered solutions into DB
self.flushSQL()
finish_time = strftime("%Y-%m-%d %H:%M:%S %Z")
self.cur.execute("update worker set time_finish = ? where worker_id = ?;", (finish_time, self.worker_id) )
self.cur.close()
#############
# Auxiliary functions
def flushSQL(self):
# desired solutions
sql = "update desired_solution set worker_id=:worker_id, status=:status where des_solution_id=:des_solution_id"
self.cur.executemany(sql , self.des_solutions_sql)
self.des_solutions_sql[:] = [] #empty list
# generated solutions
features_fields = ','.join([feature+", "+feature+"Level, "+feature+"Position" for feature in self.designMethod.features])
features_values_fields = ','.join([":"+feature+", :"+feature+"Level, :"+feature+"Position" for feature in self.designMethod.features])
sql = "insert into generated_solution(generated_solution_id, des_solution_id, sequence, "+features_fields+",worker_id) \
values(:generated_solution_id, :des_solution_id, :sequence, "+features_values_fields+", :worker_id);"
self.cur.executemany(sql , self.gen_solutions_sql)
self.gen_solutions_sql[:] = [] #empty list
def distanceBetweenSolutions(self,sol1,levels_sol2):
euc_dist = 0
for feature in self.designMethod.features:
if levels_sol2[feature+'Level']=='?' or sol1[feature+'Level']=='?':
#d = int(max(self.designMethod.thresholds[feature].keys()))
d=1
elif int(levels_sol2[feature+'Level'])==int(sol1[feature+'Level']):
d=0
else:
d=(int(levels_sol2[feature+'Level'])-int(sol1[feature+'Level']))
rel_level = self.calculateRelativeLevel(feature,sol1[feature+'Level'],sol1[feature])
if d > 0:
rel_level = 1-rel_level
d = abs(d)+rel_level
euc_dist += d**2
euc_dist = sqrt(euc_dist)
return euc_dist
def calculateRelativeLevel(self,feature="",level=1,featureScore=0):
if level == '?':
return 0
thresholds = self.designMethod.thresholds[feature][level]
if isinstance(thresholds,tuple):
t_max = thresholds[1]
t_min = thresholds[0]
#TODO how to see how far a solution is when limits are infinity?
if t_max==None:
return 0
elif t_min==None:
return 0
return float(abs(featureScore-t_min)/abs(t_max-t_min))
return 0 | 2.296875 | 2 |
python/geo_calculator/src/planets.py | JEsperancinhaOrg/geo-calculator | 0 | 12765737 | <reponame>JEsperancinhaOrg/geo-calculator
EARTH = 6371000
MOON = 1737100
MARS = 3389500
JUPITER = 69911000
| 1.539063 | 2 |
texts/extraction/frame_based/process.py | nicolay-r/frame-based-attitude-extraction-workflow | 0 | 12765738 | <filename>texts/extraction/frame_based/process.py<gh_stars>0
# -*- coding: utf-8 -*-
from core.evaluation.labels import Label
from texts.readers.utils import NewsInfo
from texts.extraction.base import TextProcessor
from texts.extraction.frame_based.obj_auth import TextObjectAuthorizer
from texts.extraction.frame_based.utils import get_frames_polarities, mean
from texts.frames import TextFrameVariantsCollection
from texts.printing.utils import TitleDescriptor
from texts.utils import optional_invert_label
class FrameBasedTextProcessor(TextProcessor):
pref = "[FrameProcessor]: "
ATTITUDES_COUNT_CHECKED_EP = "Число отношений проанализировано"
ATTITUDES_COUNT_WITH_NON_VALID_OBJ_TYPES_EP = \
"Число отношений в которых один из объектов вне множества {group}"
OPINIONS_COUNT_APPLIED_FOR_PROCESSING_BY_FRAMES_EP = \
"Число отношений, принятых на обработку фреймами"
OPINIONS_COUNT_WITHOUT_FRAMES_INSIDE_EP = \
"Число отношений без фреймов внутри"
OPINIONS_COUNT_WITHOUT_POLARITY_FRAMES_EP = \
"Число отношений в которых есть фреймы без полярности"
OPINIONS_COUNT_WITH_UNKNOWN_LABEL_EP = \
"Число отношений в которых не удалось определить метку"
OPINIONS_COUNT_VALID_EP = \
"Число отношений прошедших проверку"
def __init__(self,
settings,
contexts_printer,
opinion_statistic_printer,
parse_frames_in_news_sentences,
object_statistic_printer=None,
flag_process_only_titles=False):
assert(isinstance(flag_process_only_titles, bool))
super(FrameBasedTextProcessor, self).__init__(
settings=settings,
contexts_printer=contexts_printer,
opinion_statistic_printer=opinion_statistic_printer,
parse_frames_in_news_sentences=parse_frames_in_news_sentences,
object_statistic_printer=object_statistic_printer)
self.__debug_title_opinions_checked = 0
self.__debug_title_opinions_with_empty_frames = 0
self.__debug_title_opinions_with_unknown_label = 0
self.__debug_title_opinions_with_polarities_missed = 0
self.__debug_title_opinions_with_objs_non_valid_by_type = 0
self.__debug_title_opinions_processed_by_frames = 0
self.__debug_valid = 0
self.__process_only_titles = flag_process_only_titles
self.__ner_types_limitation = TextObjectAuthorizer(settings.NERClassType)
# region public methods
def process_news(self, text_index, news_info):
return self.process_news_core(text_index=text_index,
news_info=news_info)
def process_news_and_print(self, text_index, news_info):
processed_result = self.process_news_core(text_index=text_index,
news_info=news_info)
if processed_result is None:
return
td, cds, title_opinions, text_opinions = processed_result
if len(title_opinions) == 0:
return None
self.print_contexts(title_descriptor=td,
context_descriptors=[] if self.__process_only_titles else cds)
self.update_opinions_statistics(title_opinions if self.__process_only_titles else text_opinions)
def process_news_core(self, text_index, news_info):
assert(isinstance(text_index, int))
assert(isinstance(news_info, NewsInfo))
if len(news_info) == 0:
return None
title_terms, parsed_title, title_objects, title_frames = self._process_sentence_core(news_info)
if len(title_frames) == 0:
return None
if len(title_objects) == 0:
return None
title_opinion_refs, title_opinions = self._extract_opinions_from_title(
title_terms=title_terms,
title_objects=title_objects,
title_frames=title_frames,
synonyms=self.Settings.Synonyms)
if len(title_opinions) == 0:
return None
td = TitleDescriptor(news_info=news_info,
parsed_title=parsed_title,
text_index=text_index,
title_frames=title_frames,
opinion_refs=title_opinion_refs,
objects_collection=title_objects,
frames=self.Settings.Frames)
if self.__process_only_titles:
return td, None, title_opinions, None
cds, text_opinions = self.process_news_content(
news_info=news_info,
title_opinions=title_opinions,
synonyms=self.Settings.Synonyms)
return td, cds, title_opinions, text_opinions
def decide_label_of_pair_in_title_optional(self, i, j, title_objects, title_frames):
self.__debug_title_opinions_checked += 1
# Checking left object.
l_obj = title_objects.get_object(i)
if not self.__ner_types_limitation.is_auth(l_obj):
self.__debug_title_opinions_with_objs_non_valid_by_type += 1
return None
# Checking right object.
r_obj = title_objects.get_object(j)
if not self.__ner_types_limitation.is_auth(r_obj):
self.__debug_title_opinions_with_objs_non_valid_by_type += 1
return None
# Getting object bounds
l_bound = l_obj.get_bound()
r_bound = r_obj.get_bound()
frame_variants_in = self.__get_frames_within(left_in=l_bound.TermIndex + l_bound.Length,
right_in=r_bound.TermIndex - 1,
text_frame_variants=title_frames)
text_polarities, is_inverted = get_frames_polarities(text_frame_variants=frame_variants_in,
frames=self.Settings.Frames)
self.__debug_title_opinions_processed_by_frames += 1
if len(frame_variants_in) == 0:
self.__debug_title_opinions_with_empty_frames += 1
return None
if len(frame_variants_in) != len(text_polarities):
self.__debug_title_opinions_with_polarities_missed += 1
return None
labels = [optional_invert_label(p.Label, is_inverted[p_index]).to_int()
for p_index, p in enumerate(text_polarities)]
label = mean(labels)
# Force to negative if there is a negative example
if -1 in labels:
label = -1
if -1 < label < 1:
self.__debug_title_opinions_with_unknown_label += 1
return None
self.__debug_valid += 1
return Label.from_int(int(label))
# endregion
# region protected methods
def _add_title_processing_statistics(self):
self.OpinionStatisticPrinter.add_extra_parameter(
description="{pref}{val}".format(pref=self.pref,
val=self.ATTITUDES_COUNT_CHECKED_EP),
value=str(self.__debug_title_opinions_checked))
self.OpinionStatisticPrinter.add_extra_parameter(
description="\t{pref}{val}".format(pref=self.pref,
val=self.ATTITUDES_COUNT_WITH_NON_VALID_OBJ_TYPES_EP.format(
group=self.__ner_types_limitation.SupportedNerTypesSet)),
value=str(self.__debug_title_opinions_with_objs_non_valid_by_type))
self.OpinionStatisticPrinter.add_extra_parameter(
description="\t{pref}{val}".format(pref=self.pref,
val=self.OPINIONS_COUNT_APPLIED_FOR_PROCESSING_BY_FRAMES_EP),
value=str(self.__debug_title_opinions_processed_by_frames))
self.OpinionStatisticPrinter.add_extra_parameter(
description="\t\t{pref}{val}".format(pref=self.pref,
val=self.OPINIONS_COUNT_WITHOUT_FRAMES_INSIDE_EP),
value=str(self.__debug_title_opinions_with_empty_frames))
self.OpinionStatisticPrinter.add_extra_parameter(
description="\t\t{pref}{val}".format(pref=self.pref,
val=self.OPINIONS_COUNT_WITHOUT_POLARITY_FRAMES_EP),
value=str(self.__debug_title_opinions_with_polarities_missed))
self.OpinionStatisticPrinter.add_extra_parameter(
description="\t\t{pref}{val}".format(pref=self.pref,
val=self.OPINIONS_COUNT_WITH_UNKNOWN_LABEL_EP),
value=str(self.__debug_title_opinions_with_unknown_label))
self.OpinionStatisticPrinter.add_extra_parameter(
description="\t\t{pref}{val}".format(pref=self.pref,
val=self.OPINIONS_COUNT_VALID_EP),
value=str(self.__debug_valid))
# endregion
# region private methods
@staticmethod
def __get_frames_within(left_in, right_in, text_frame_variants):
assert(isinstance(text_frame_variants, TextFrameVariantsCollection))
frames_in = []
for frame in text_frame_variants:
if left_in <= frame.Position <= right_in:
frames_in.append(frame)
return frames_in
# endregion
| 2.078125 | 2 |
hello.py | sandipsahajoy/CMPUT404-Lab3 | 0 | 12765739 | <gh_stars>0
#!/usr/bin/env python3
import os, json
### Print python env variables as plain text
# print("Content-Type: text/plain")
# print()
# print(os.environ)
### Print python env variables as json
# print("Content-Type: application/json")
# print()
# print(json.dumps(dict(os.environ), indent=2))
### Print query parameter data in html
# print("Content-Type:text/html")
# print()
# print("<p>QUERY_STRING: {}</p>".format(os.environ['QUERY_STRING']))
### Print user's browser parameter data in html
print("Content-Type:text/html")
print()
print("<p>HTTP_USER_AGENT: {}</p>".format(os.environ['HTTP_USER_AGENT']))
| 2.640625 | 3 |
ultra/learning_algorithm/pdgd.py | lfsblack/ULTRA | 1 | 12765740 | """Training and testing the Pairwise Differentiable Gradient Descent (PDGD) algorithm for unbiased learning to rank.
See the following paper for more information on the Pairwise Differentiable Gradient Descent (PDGD) algorithm.
* Oosterhuis, Harrie, and <NAME>. "Differentiable unbiased online learning to rank." In Proceedings of the 27th ACM International Conference on Information and Knowledge Management, pp. 1293-1302. ACM, 2018.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import time
import numpy as np
import tensorflow as tf
import copy
import itertools
from six.moves import zip
from tensorflow import dtypes
from ultra.learning_algorithm.base_algorithm import BaseAlgorithm
import ultra.utils as utils
import ultra
class PDGD(BaseAlgorithm):
"""The Pairwise Differentiable Gradient Descent (PDGD) algorithm for unbiased learning to rank.
This class implements the Pairwise Differentiable Gradient Descent (PDGD) algorithm based on the input layer
feed. See the following paper for more information on the algorithm.
* Oosterhuis, Harrie, and <NAME>. "Differentiable unbiased online learning to rank." In Proceedings of the 27th ACM International Conference on Information and Knowledge Management, pp. 1293-1302. ACM, 2018.
"""
def __init__(self, data_set, exp_settings, forward_only=False):
"""Create the model.
Args:
data_set: (Raw_data) The dataset used to build the input layer.
exp_settings: (dictionary) The dictionary containing the model settings.
forward_only: Set true to conduct prediction only, false to conduct training.
"""
print('Build Pairwise Differentiable Gradient Descent (PDGD) algorithm.')
self.hparams = ultra.utils.hparams.HParams(
learning_rate=0.05, # Learning rate (\mu).
# Scalar for the probability distribution.
tau=1,
max_gradient_norm=1.0, # Clip gradients to this norm.
# Set strength for L2 regularization.
l2_loss=0.005,
grad_strategy='ada', # Select gradient strategy
)
print(exp_settings['learning_algorithm_hparams'])
self.hparams.parse(exp_settings['learning_algorithm_hparams'])
self.exp_settings = exp_settings
self.model = None
self.max_candidate_num = exp_settings['max_candidate_num']
self.feature_size = data_set.feature_size
self.learning_rate = tf.Variable(
float(self.hparams.learning_rate), trainable=False)
# Feeds for inputs.
self.is_training = tf.placeholder(tf.bool, name="is_train")
self.docid_inputs = [] # a list of top documents
self.letor_features = tf.placeholder(tf.float32, shape=[None, self.feature_size],
name="letor_features") # the letor features for the documents
self.labels = [] # the labels for the documents (e.g., clicks)
for i in range(self.max_candidate_num):
self.docid_inputs.append(tf.placeholder(tf.int64, shape=[None],
name="docid_input{0}".format(i)))
self.labels.append(tf.placeholder(tf.float32, shape=[None],
name="label{0}".format(i)))
self.global_step = tf.Variable(0, trainable=False)
self.output = tf.concat(
self.get_ranking_scores(
self.docid_inputs,
is_training=self.is_training,
scope='ranking_model'),
1)
# reshape from [rank_list_size, ?] to [?, rank_list_size]
reshaped_labels = tf.transpose(tf.convert_to_tensor(self.labels))
pad_removed_output = self.remove_padding_for_metric_eval(
self.docid_inputs, self.output)
for metric in self.exp_settings['metrics']:
for topn in self.exp_settings['metrics_topn']:
metric_value = ultra.utils.make_ranking_metric_fn(
metric, topn)(reshaped_labels, pad_removed_output, None)
tf.summary.scalar(
'%s_%d' %
(metric, topn), metric_value, collections=['eval'])
# Build model
if not forward_only:
self.rank_list_size = exp_settings['train_list_cutoff']
self.train_output = self.ranking_model(
self.rank_list_size, scope='ranking_model')
train_labels = self.labels[:self.rank_list_size]
# reshape from [rank_list_size, ?] to [?, rank_list_size]
reshaped_train_labels = tf.transpose(
tf.convert_to_tensor(train_labels))
pad_removed_output = self.remove_padding_for_metric_eval(
self.docid_inputs, self.train_output)
for metric in self.exp_settings['metrics']:
for topn in self.exp_settings['metrics_topn']:
metric_value = ultra.utils.make_ranking_metric_fn(metric, topn)(
reshaped_train_labels, pad_removed_output, None)
tf.summary.scalar(
'%s_%d' %
(metric, topn), metric_value, collections=['train_eval'])
# Build training pair inputs only when it is training
self.positive_docid_inputs = tf.placeholder(
tf.int64, shape=[None], name="positive_docid_input")
self.negative_docid_inputs = tf.placeholder(
tf.int64, shape=[None], name="negative_docid_input")
self.pair_weights = tf.placeholder(
tf.float32, shape=[None], name="pair_weight")
# Build ranking loss
pair_scores = self.get_ranking_scores(
[self.positive_docid_inputs,
self.negative_docid_inputs], is_training=self.is_training, scope='ranking_model'
)
self.loss = tf.reduce_sum(
tf.math.multiply(
#self.pairwise_cross_entropy_loss(pair_scores[0], pair_scores[1]),
tf.reduce_sum(-tf.exp(pair_scores[0]) / (
tf.exp(pair_scores[0]) + tf.exp(pair_scores[1])), 1),
self.pair_weights
)
)
params = tf.trainable_variables()
if self.hparams.l2_loss > 0:
for p in params:
self.loss += self.hparams.l2_loss * tf.nn.l2_loss(p)
# Select optimizer
self.optimizer_func = tf.train.AdagradOptimizer
if self.hparams.grad_strategy == 'sgd':
self.optimizer_func = tf.train.GradientDescentOptimizer
# Gradients and SGD update operation for training the model.
opt = self.optimizer_func(self.hparams.learning_rate)
self.gradients = tf.gradients(self.loss, params)
if self.hparams.max_gradient_norm > 0:
self.clipped_gradients, self.norm = tf.clip_by_global_norm(self.gradients,
self.hparams.max_gradient_norm)
self.updates = opt.apply_gradients(zip(self.clipped_gradients, params),
global_step=self.global_step)
tf.summary.scalar(
'Gradient Norm',
self.norm,
collections=['train'])
else:
self.norm = None
self.updates = opt.apply_gradients(zip(self.gradients, params),
global_step=self.global_step)
tf.summary.scalar(
'Learning Rate',
self.learning_rate,
collections=['train'])
tf.summary.scalar('Loss', self.loss, collections=['train'])
self.train_summary = tf.summary.merge_all(key='train')
self.train_eval_summary = tf.summary.merge_all(key='train_eval')
self.eval_summary = tf.summary.merge_all(key='eval')
self.saver = tf.train.Saver(tf.global_variables())
def step(self, session, input_feed, forward_only):
"""Run a step of the model feeding the given inputs.
Args:
session: (tf.Session) tensorflow session to use.
input_feed: (dictionary) A dictionary containing all the input feed data.
forward_only: whether to do the backward step (False) or only forward (True).
Returns:
A triple consisting of the loss, outputs (None if we do backward),
and a tf.summary containing related information about the step.
"""
if not forward_only:
# Run the model to get ranking scores
input_feed[self.is_training.name] = False
rank_outputs = session.run(
[self.train_output, self.train_eval_summary], input_feed)
# reduce value to avoid numerical problems
rank_outputs[0] = np.array(rank_outputs[0])
rank_outputs[0] = rank_outputs[0] - \
np.amax(rank_outputs[0], axis=1, keepdims=True)
exp_ranking_scores = np.exp(self.hparams.tau * rank_outputs[0])
# Remove scores for padding documents
letor_features_length = len(input_feed[self.letor_features.name])
for i in range(len(input_feed[self.labels[0].name])):
for j in range(self.rank_list_size):
# not a valid doc
if input_feed[self.docid_inputs[j].name][i] == letor_features_length:
exp_ranking_scores[i][j] = 0.0
# Compute denominator for each position
denominators = np.cumsum(
exp_ranking_scores[:, ::-1], axis=1)[:, ::-1]
sum_log_denominators = np.sum(
np.log(
denominators,
out=np.zeros_like(denominators),
where=denominators > 0),
axis=1)
# Create training pairs based on the ranking scores and the labels
positive_docids, negative_docids, pair_weights = [], [], []
for i in range(len(input_feed[self.labels[0].name])):
# Generate pairs and compute weights
for j in range(self.rank_list_size):
l = self.rank_list_size - 1 - j
# not a valid doc
if input_feed[self.docid_inputs[l].name][i] == letor_features_length:
continue
if input_feed[self.labels[l].name][i] > 0: # a clicked doc
for k in range(l + 2):
# find a negative/unclicked doc
if k < self.rank_list_size and input_feed[self.labels[k]
.name][i] < input_feed[self.labels[l].name][i]:
# not a valid doc
if input_feed[self.docid_inputs[k]
.name][i] == letor_features_length:
continue
positive_docids.append(
input_feed[self.docid_inputs[l].name][i])
negative_docids.append(
input_feed[self.docid_inputs[k].name][i])
flipped_exp_scores = np.copy(
exp_ranking_scores[i])
flipped_exp_scores[k] = exp_ranking_scores[i][l]
flipped_exp_scores[l] = exp_ranking_scores[i][k]
flipped_denominator = np.cumsum(
flipped_exp_scores[::-1])[::-1]
sum_log_flipped_denominator = np.sum(
np.log(
flipped_denominator,
out=np.zeros_like(flipped_denominator),
where=flipped_denominator > 0))
#p_r = np.prod(rank_prob[i][min_i:max_i+1])
#p_rs = np.prod(flipped_rank_prob[min_i:max_i+1])
# weight = p_rs / (p_r + p_rs) = 1 / (1 +
# (d_rs/d_r)) = 1 / (1 + exp(log_drs - log_dr))
weight = 1.0 / \
(1.0 +
np.exp(min(sum_log_flipped_denominator -
sum_log_denominators[i], 20)))
if np.isnan(weight):
print('SOMETHING WRONG!!!!!!!')
print(
'sum_log_denominators[i] is nan: ' + str(np.isnan(sum_log_denominators[i])))
print('sum_log_flipped_denominator is nan ' +
str(np.isnan(sum_log_flipped_denominator)))
pair_weights.append(weight)
input_feed[self.positive_docid_inputs.name] = positive_docids
input_feed[self.negative_docid_inputs.name] = negative_docids
input_feed[self.pair_weights.name] = pair_weights
# Train the model
input_feed[self.is_training.name] = True
train_outputs = session.run([
self.updates, # Update Op that does SGD.
self.loss, # Loss for this batch.
self.train_summary # Summarize statistics.
], input_feed)
summary = utils.merge_TFSummary(
[rank_outputs[-1], train_outputs[-1]], [0.5, 0.5])
# loss, no outputs, summary.
return train_outputs[1], rank_outputs, summary
else:
input_feed[self.is_training.name] = False
output_feed = [
self.eval_summary, # Summarize statistics.
self.output # Model outputs
]
outputs = session.run(output_feed, input_feed)
return None, outputs[1], outputs[0] # loss, outputs, summary.
| 3.09375 | 3 |
src/morphforgecontrib/simulation/channels/util.py | mikehulluk/morphforge | 1 | 12765741 | <reponame>mikehulluk/morphforge
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 <NAME>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
#from morphforgecontrib.simulation.channels import inftauinterpolated
import numpy as np
from morphforgecontrib.simulation.channels.hh_style import StdChlAlphaBeta
from morphforgecontrib.simulation.channels.hh_style import StdChlAlphaBetaBeta
from morphforge.units import qty
from morphforge import units
class ChannelConverter(object):
@classmethod
def AlphaBetaToInterpolateInfTauFunctorConvertor(cls, chl_functor, new_id=None, new_name=None, clone_id_suffix="_AsInfTau", clone_name_suffix="_AsInfTau", voltage_interpolation_values=None, ):
# Create a new functor:
def newFunctor(env, _voltage_interpolation_values=voltage_interpolation_values):
old_chl = chl_functor(env)
assert isinstance(old_chl, (StdChlAlphaBeta,
StdChlAlphaBetaBeta)) # or issubclass(StdChlAlphaBetaBeta, old_chl)
# New Name
if new_name is not None:
chl_name = new_name
else:
chl_name = old_chl.name + clone_name_suffix
# Interpolation voltages:
# voltage_interpolation_values=voltage_interpolation_values
if _voltage_interpolation_values is None:
_voltage_interpolation_values = np.linspace(-80, 60, 10) * qty('mV')
# Copy the state variables
new_state_vars = {}
for state_var in old_chl.get_state_variables():
alpha, beta = old_chl.get_alpha_beta_at_voltage(statevar=state_var, V=_voltage_interpolation_values)
inf, tau = InfTauCalculator.alpha_beta_to_inf_tau(alpha, beta)
V = _voltage_interpolation_values.rescale('mV').magnitude
inf = inf.rescale(units.dimensionless).magnitude
tau = tau.rescale('ms').magnitude
new_state_vars[state_var] = InfTauInterpolation(V=V, inf=inf, tau=tau)
chl = env.Channel(
MM_InfTauInterpolatedChannel,
name=chl_name,
ion=old_chl.ion,
equation=old_chl.eqn,
conductance=old_chl.conductance,
reversalpotential=old_chl.reversalpotential,
statevars_new=new_state_vars,
)
return chl
return newFunctor
# V1 = self.state1.plotinf.lineplot.index.get_data().tolist()
# inf1 = self.state1.plotinf.lineplot.value.get_data().tolist()
# tau1 = self.state1.plottau.lineplot.value.get_data().tolist()
#
# V2 = self.state2.plotinf.lineplot.index.get_data().tolist()
# inf2 = self.state2.plotinf.lineplot.value.get_data().tolist()
# tau2 = self.state2.plottau.lineplot.value.get_data().tolist()
#
# #V1 = self.state1.plotinf.mx.tolist()
# #inf1 = self.state1.plotinf.my.tolist()
# #tau1 = self.state1.plottau.my.tolist()
# #V2 = self.state2.plotinf.mx.tolist()
# #inf2 = self.state2.plotinf.my.tolist()
# #tau2 = self.state2.plottau.my.tolist()
#
# ks_vars = {
# self.state_var_name1: InfTauInterpolation(V=V1, inf=inf1, tau=tau1),
# self.state_var_name2: InfTauInterpolation(V=V2, inf=inf2, tau=tau2),
# }
#
# #inf_data1 = zip(self.state1.plotinf.mx.tolist(), self.state1.plotinf.my.tolist())
# #tau_data1 = zip(self.state1.plottau.mx.tolist(), self.state1.plottau.my.tolist())
#
# #inf_data2 = zip(self.state2.plotinf.mx.tolist(), self.state2.plotinf.my.tolist())
# #tau_data2 = zip(self.state2.plottau.mx.tolist(), self.state2.plottau.my.tolist())
# #
# #ks_vars = {
# # self.state_var_name1: { 'inf': inf_data1, 'tau': tau_data1, },
# # self.state_var_name2: { 'inf': inf_data2, 'tau': tau_data2, },
# #
# # }
# ks = env.Channel(MM_InfTauInterpolatedChannel,
# name=self.chlname,
# ion='None',
# equation=self.eqn,
# conductance = '%2.2f:mS/cm2' % gbar,
# reversalpotential = '%2.2f:mV' % vrev,
# statevars_new = ks_vars)
#
#
#
#
# ca_state_vars = { "m": {"alpha": [4.05, 0.0, 1.0, -15.32, -13.57], "beta1": [0.093 * 10.63, 0.093, -1, 10.63, 1], "beta2":[1.28, 0, 1, 5.39, 12.11] } }
# caChannels = env.Channel(
# StdChlCalciumAlphaBetaBeta,
# name="CaChl", ion="ca",
# equation="m*m",
# permeability = qty("1.425:cm/s") * 0.1 * 0.15,
# intracellular_concentration = qty("100:nMol"),
# extracellular_concentration = qty("10:uMol"),
# temperature = qty("300:K"),
# beta2threshold = qty("-25:mV"),
# statevars=ca_state_vars,
# )
# return caChannels
#
#
#
#
#
#
# state_names = chl.statevars.keys()
# assert len(state_names) == 2
# state_name1 = state_names[0]
# state_name2 = state_names[1]
#
# [intV, tauV], [intV, infV] = convertAlphaBetaToInfTauInterpolated(chl, state_name1, 10)
# state1=HHGeneralStatePanel(initial_tau= [intV, tauV], initial_inf=[intV, infV])
#
# [intV, tauV], [intV, infV] = convertAlphaBetaToInfTauInterpolated(chl, state_name2, 10)
# state2=HHGeneralStatePanel(initial_tau= [intV, tauV], initial_inf=[intV, infV])
#
# return HHChannelPaneInfTau2(sim_config=sim_config,
# general_pane=general,
# state_pane1=state1,
# state_pane2=state2,
# eqn = chl.eqn,
# state_var_name1 = state_name1,
# state_var_name2 = state_name2,
# chlname = chlname
# )
#
| 1.289063 | 1 |
features/base_hist.py | zerebom/Elo-merchant-category-recomendation | 0 | 12765742 | <gh_stars>0
import argparse
import inspect
import re
from abc import ABCMeta, abstractmethod
from pathlib import Path
import pandas as pd
from utils.__init__ import timer
def get_arguments_hist():
#パーサーインスタンスがを作成する
parser = argparse.ArgumentParser()
#overwriteする引数を与えている
parser.add_argument(
'--force', '-f', action='store_true', help='Overwrite existing files'
)
return parser.parse_args()
def get_features(namespace):
for k, v in namespace.items():
#vがクラスであり、Featureのサブ(派生)クラスであり、
#抽象クラスでないとき
if inspect.isclass(v) and issubclass(v, Feature_hist) \
and not inspect.isabstract(v):
#vを返す
yield v()
def generate_features_hist(namespace, overwrite):
for f in get_features(namespace):
#すでに、パスが存在する(特徴量ができた後なら飛ばす)
if f.historical_transactions_path.exists() and f.new_merchant_transactions_path.exists() and not overwrite:
print(f.name, 'was skipped')
else:
f.run().save()
class Feature_hist(metaclass=ABCMeta):
prefix = ''
suffix = ''
dir = '.'
def __init__(self):
#クラスの名前がすべて大文字なら
if self.__class__.__name__.isupper():
#小文字にします
self.name = self.__class__.__name__.lower()
else:
self.name = re.sub(
"([A-Z])",
lambda x: "_" + x.group(1).lower(), self.__class__.__name__
).lstrip('_')
self.historical_transactions = pd.DataFrame()
self.new_merchant_transactions = pd.DataFrame()
self.historical_transactions_path = Path(self.dir) / f'{self.name}_historical_transactions.feather'
self.new_merchant_transactions_path = Path(self.dir) / f'{self.name}_new_merchant_transactions.feather'
def run(self):
with timer(self.name):
self.create_features()
self.historical_transactions=reduce_mem_usage(self.historical_transactions)
self.new_merchant_transactions=reduce_mem_usage(self.new_merchant_transactions)
prefix = self.prefix + '_' if self.prefix else ''
suffix = '_' + self.suffix if self.suffix else ''
self.historical_transactions.columns = prefix + self.historical_transactions.columns + suffix
self.new_merchant_transactions.columns = prefix + self.new_merchant_transactions.columns + suffix
return self
@abstractmethod
def create_features(self):
raise NotImplementedError
def save(self):
self.historical_transactions.to_feather(str(self.historical_transactions_path))
self.new_merchant_transactions.to_feather(str(self.new_merchant_transactions_path))
def load(self):
self.historical_transactions = pd.read_feather(str(self.historical_transactions_path))
self.new_merchant_transactions = pd.read_feather(str(self.new_merchant_transactions_path))
def reduce_mem_usage(df, verbose=True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df
| 2.34375 | 2 |
src/gcardvault.py | rtomac/gcardvault | 0 | 12765743 | <gh_stars>0
import os
import glob
import requests
import pathlib
from getopt import gnu_getopt, GetoptError
from xml.etree import ElementTree
from googleapiclient.discovery import build
from .google_oauth2 import GoogleOAuth2
from .git_vault_repo import GitVaultRepo
from .etag_manager import ETagManager
# Note: OAuth2 auth code flow for "installed applications" assumes the client secret
# cannot actually be kept secret (must be embedded in application/source code).
# Access to user data must be consented by the user and [more importantly] the
# access & refresh tokens are stored locally with the user running the program.
DEFAULT_CLIENT_ID = "160026605549-ktl7ghvc9gttpa8u902nm65j3tro3119.apps.googleusercontent.com"
DEFAULT_CLIENT_SECRET = "<KEY>"
OAUTH_SCOPES = [
"openid",
"https://www.googleapis.com/auth/userinfo.email",
"https://www.googleapis.com/auth/contacts.readonly",
"https://www.googleapis.com/auth/carddav",
]
# Note: Technically, CardDAV URLs should be discovered dynamically in very REST-like
# fashion, so these could be subject to change. Risk of that down the road
# is worth the trade-off of using the People API to discover contact list,
# so much simpler to work with than implementing the full DAV/CardDAV flow.
GOOGLE_CARDDAV_ADDRESSBOOK_URI_FORMAT = "https://www.googleapis.com/carddav/v1/principals/{principal}/lists/default/"
GOOGLE_CARDDAV_CONTACT_HREF_FORMAT = "/carddav/v1/principals/{principal}/lists/default/{contact_id}"
CONTACT_RESOURCE_PAGE_SIZE = 500
CARDDAV_REPORT_PAGE_SIZE = 250
COMMANDS = ['sync', 'noop']
dirname = os.path.dirname(__file__)
usage_file_path = os.path.join(dirname, "USAGE.txt")
version_file_path = os.path.join(dirname, "VERSION.txt")
class Gcardvault:
def __init__(self, google_oauth2=None, google_apis=None):
self.command = None
self.user = None
self.export_only = False
self.clean = False
self.conf_dir = os.path.expanduser("~/.gcardvault")
self.output_dir = os.getcwd()
self.client_id = DEFAULT_CLIENT_ID
self.client_secret = DEFAULT_CLIENT_SECRET
self._repo = None
self._google_oauth2 = google_oauth2 if google_oauth2 is not None else GoogleOAuth2()
self._google_apis = google_apis if google_apis is not None else GoogleApis()
def run(self, cli_args):
if not self._parse_options(cli_args):
return
getattr(self, self.command)()
def noop(self):
self._ensure_dirs()
pass
def sync(self):
self._ensure_dirs()
credentials = self._get_oauth2_credentials()
if not self.export_only:
self._repo = GitVaultRepo("gcardvault", self.output_dir, [".vcf"])
contacts = self._get_contacts(credentials)
if self.clean:
self._clean_output_dir(contacts)
contacts_to_update = self._filter_contacts_to_update(contacts)
if contacts_to_update:
vcards = self._get_vcards_for_contacts(credentials, contacts_to_update)
self._save_vcards(contacts_to_update, vcards)
if self._repo:
self._repo.add_all_files()
if self._repo:
self._repo.commit("gcardvault sync")
def usage(self):
return pathlib.Path(usage_file_path).read_text().strip()
def version(self):
return pathlib.Path(version_file_path).read_text().strip()
def _parse_options(self, cli_args):
show_help = show_version = False
try:
(opts, pos_args) = gnu_getopt(
cli_args,
'efc:o:h',
['export-only', 'clean',
'conf-dir=', 'output-dir=', 'vault-dir=',
'client-id=', 'client-secret=',
'help', 'version', ]
)
except GetoptError as e:
raise GcardvaultError(e) from e
for opt, val in opts:
if opt in ['-e', '--export-only']:
self.export_only = True
elif opt in ['-f', '--clean']:
self.clean = True
elif opt in ['-c', '--conf-dir']:
self.conf_dir = val
elif opt in ['-o', '--output-dir', '--vault-dir']:
self.output_dir = val
elif opt in ['--client-id']:
self.client_id = val
elif opt in ['--client-secret']:
self.client_secret = val
elif opt in ['-h', '--help']:
show_help = True
elif opt in ['--version']:
show_version = True
if len(opts) == 0 and len(pos_args) == 0:
show_help = True
if show_help:
print(self.usage())
return False
if show_version:
print(self.version())
return False
if len(pos_args) >= 1:
self.command = pos_args[0]
if len(pos_args) >= 2:
self.user = pos_args[1].lower().strip()
if self.command is None:
raise GcardvaultError("<command> argument is required", "command")
if self.command not in COMMANDS:
raise GcardvaultError("Invalid <command> argument", "command")
if self.user is None:
raise GcardvaultError("<user> argument is required", "user")
if len(pos_args) > 2:
raise GcardvaultError("Unrecognized arguments")
return True
def _ensure_dirs(self):
for dir in [self.conf_dir, self.output_dir]:
pathlib.Path(dir).mkdir(parents=True, exist_ok=True)
def _get_oauth2_credentials(self):
token_file_path = os.path.join(self.conf_dir, f"{self.user}.token.json")
(credentials, new_authorization) = self._google_oauth2 \
.get_credentials(token_file_path, self.client_id, self.client_secret, OAUTH_SCOPES, self.user)
if new_authorization:
user_info = self._google_oauth2.request_user_info(credentials)
profile_email = user_info['email'].lower().strip()
if self.user != profile_email:
if os.path.exists(token_file_path):
os.remove(token_file_path)
raise GcardvaultError(f"Authenticated user - {profile_email} - was different than <user> argument specified")
#print(credentials.token)
return credentials
def _get_contacts(self, credentials):
contacts = []
next_page_token = None
while True:
resource = self._google_apis.request_contact_list(credentials, page_token=next_page_token)
self._add_contacts_from_resource(contacts, resource)
next_page_token = resource.get('nextPageToken')
if next_page_token is None:
break
return contacts
def _add_contacts_from_resource(self, contacts, resource):
for connection in resource.get('connections', []):
contact = self._get_contact_from_connection(connection)
if contact:
contacts.append(contact)
def _get_contact_from_connection(self, connection):
contact_source = None
sources = connection.get('metadata', {}).get('sources', [])
for source in sources:
if source.get('type') == "CONTACT":
contact_source = source
break
display_name = None
names = connection.get('names', [])
for name in names:
name_source_type = name.get('metadata', {}).get('source', {}).get('type')
if name_source_type == "CONTACT":
display_name = name['displayName']
break
if contact_source:
id = contact_source['id']
etag = contact_source['etag']
return Contact(id, display_name, self.user, etag)
return None
def _clean_output_dir(self, contacts):
contact_ids = [contact.id for contact in contacts]
files_on_disk = self._get_vcard_files_on_disk()
for contact_id in files_on_disk:
if contact_id not in contact_ids:
file_name = files_on_disk[contact_id]
os.remove(os.path.join(self.output_dir, file_name))
if self._repo:
self._repo.remove_file(file_name)
print(f"Removed file '{file_name}'")
def _filter_contacts_to_update(self, contacts):
contacts_to_update = []
contacts_up_to_date = 0
etags = ETagManager(self.conf_dir)
for contact in contacts:
vcard_file_path = os.path.join(self.output_dir, contact.file_name)
etag_changed = etags.test_for_change_and_save(contact.id, contact.etag)
if os.path.exists(vcard_file_path) and not etag_changed:
contacts_up_to_date += 1
continue
contacts_to_update.append(contact)
print(f"{contacts_up_to_date} contact(s) are up to date")
print(f"{len(contacts_to_update)} contact(s) need to be updated")
return contacts_to_update
def _get_vcards_for_contacts(self, credentials, contacts):
vcards = {}
print(f"Downloading vCards for {len(contacts)} contact(s)")
count = CARDDAV_REPORT_PAGE_SIZE
start = 0
while start < len(contacts):
end = start + count
contacts_in_batch = contacts[start:end]
self._get_vcards_for_contacts_batch(credentials, contacts_in_batch, vcards)
start += count
return vcards
def _get_vcards_for_contacts_batch(self, credentials, contacts, vcards):
ns = {"d": "DAV:", "card": "urn:ietf:params:xml:ns:carddav", }
carddav_hrefs = [contact.carddav_href for contact in contacts]
carddav_href_xml_nodes = "<d:href>" + "</d:href><d:href>".join(carddav_hrefs) + "</d:href>"
request_body = f"""
<card:addressbook-multiget xmlns:d="{ns['d']}" xmlns:card="{ns['card']}" >
<d:prop>
<card:address-data />
</d:prop>
{carddav_href_xml_nodes}
</card:addressbook-multiget>
"""
xml = self._google_apis.request_carddav_report(credentials, self.user, request_body)
multistatus = ElementTree.fromstring(xml)
for response in multistatus:
href = response.findtext("d:href", namespaces=ns)
for propstat in response.findall("d:propstat", namespaces=ns):
if propstat.findtext("d:status", namespaces=ns) == "HTTP/1.1 200 OK":
vcard = propstat.findtext("d:prop/card:address-data", namespaces=ns)
if vcard:
vcards[href] = vcard
for contact in contacts:
if contact.carddav_href not in vcards:
raise RuntimeError(f"vCard could not be downloaded for contact '{contact.name}'")
def _save_vcards(self, contacts, vcards):
files_on_disk = self._get_vcard_files_on_disk()
for contact in contacts:
vcard = vcards[contact.carddav_href]
target_file_path = os.path.join(self.output_dir, contact.file_name)
existing_file_name = files_on_disk.get(contact.id)
if existing_file_name and existing_file_name != contact.file_name:
existing_file_path = os.path.join(self.output_dir, existing_file_name)
os.rename(existing_file_path, target_file_path)
with open(target_file_path, 'w') as file:
file.write(vcard)
print(f"Saved contact '{contact.name}' to {contact.file_name}")
def _get_vcard_files_on_disk(self):
files_on_disk = {}
files_names = [os.path.basename(file).lower() for file in glob.glob(os.path.join(self.output_dir, "*.vcf"))]
for file_name in files_names:
file_name_wo_ext = os.path.splitext(file_name)[0]
id = file_name_wo_ext.split("_")[-1]
files_on_disk[id] = file_name
return files_on_disk
class GcardvaultError(RuntimeError):
pass
class Contact():
def __init__(self, id, name, principal, etag):
self.id = id
self.name = name if name else id
self.principal = principal
self.etag = etag
prefix = "contact"
if name:
prefix = "_".join(name.strip().lower().split())
self.file_name = f"{prefix}_{id.lower()}.vcf"
self.carddav_href = GOOGLE_CARDDAV_CONTACT_HREF_FORMAT.format(
principal=self.principal,
contact_id=id,
)
class GoogleApis():
def request_contact_list(self, credentials, page_token=None):
with build('people', 'v1', credentials=credentials) as service:
return service.people().connections().list(
resourceName="people/me",
sources="READ_SOURCE_TYPE_CONTACT",
personFields="metadata,names",
sortOrder="FIRST_NAME_ASCENDING",
pageSize=CONTACT_RESOURCE_PAGE_SIZE,
pageToken=page_token,
).execute()
def request_carddav_report(self, credentials, principal, request_body):
url = GOOGLE_CARDDAV_ADDRESSBOOK_URI_FORMAT.format(principal=principal)
headers = {
"Authorization": f"Bearer {credentials.token}",
"Content-Type": "application/xml; charset=utf-8",
}
response = requests.request("REPORT", url, headers=headers, data=request_body)
response.raise_for_status()
return response.text
| 2.15625 | 2 |
yacht/__init__.py | IusztinPaul/yacht | 5 | 12765744 | from enum import Enum
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, Yacht"
__credits__ = ["<NAME>"]
__license__ = "Apache-2.0"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class Mode(Enum):
Download = 'download'
ExportActions = 'export_actions'
Train = 'train'
FineTuneTrain = 'fine_tune_train'
BacktestTrain = 'backtest_on_train'
BacktestValidation = 'backtest_on_validation'
BacktestTest = 'backtest_on_test'
BestMetricBacktestTrain = 'best_backtest_on_train'
BestMetricBacktestValidation = 'best_backtest_on_validation'
BestMetricBacktestTest = 'best_backtest_on_test'
@classmethod
def from_string(cls, value: str) -> 'Mode':
if value == 'backtest':
value = 'backtest_on_test'
return cls(value.lower())
def is_trainable(self) -> bool:
return self in {self.Train, self.FineTuneTrain}
def is_fine_tuning(self) -> bool:
return self == self.FineTuneTrain
def is_backtest_on_train(self) -> bool:
return self in {self.BacktestTrain, self.BestMetricBacktestTrain}
def is_validation(self) -> bool:
return self in {self.BacktestValidation, self.BestMetricBacktestValidation}
def is_test(self) -> bool:
return self in {self.BacktestTest, self.BestMetricBacktestTest}
def is_backtestable(self) -> bool:
return self in {
self.BacktestTrain,
self.BacktestValidation,
self.BacktestTest,
self.BestMetricBacktestTrain,
self.BestMetricBacktestValidation,
self.BestMetricBacktestTest,
}
def is_best_metric(self) -> bool:
return self in {
self.BestMetricBacktestTrain,
self.BestMetricBacktestValidation,
self.BestMetricBacktestTest
}
def is_trainval(self) -> bool:
return any([self.is_trainable(), self.is_validation()])
def is_download(self) -> bool:
return self == self.Download
def to_step_key(self) -> str:
return f'{self.value}_step'
| 2.46875 | 2 |
osa-ironic-ui/osaui/__main__.py | neillc/junkcode | 0 | 12765745 | <reponame>neillc/junkcode<filename>osa-ironic-ui/osaui/__main__.py
import subprocess
import click
import pyrax
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
server = None
@click.command()
@click.option(
'--name', prompt='Server name',
help='The name for the new server'
)
@click.option(
'--keypair',
default=None,
help='Keypair to inject'
)
def create_server(name, keypair):
cmd = (
'rack servers instance create --name="{name}" --flavor-id=io1-15'
' --block-device source-type=image,'
'source-id=1d3ea64f-1ead-4042-8cb6-8ceb523b6149,'
'destination-type=volume,volume-size=150'
).format(name=name)
if keypair:
cmd += ' --keypair={keypair}'.format(keypair)
print('Creating server {name}'.format(name=name))
print(cmd)
print(cmd.split(' '))
subprocess.run(cmd.split(' '))
def get_server_status(name):
pyrax.set_credential_file("/Users/neill/pyraxcreds")
cs = pyrax.cloudservers
print(cs.servers.list())
def create_server2(name, keypair):
pyrax.set_credential_file("/Users/neill/pyraxcreds")
nova = pyrax.cloudservers
bdm = {
'source_type': 'image',
'uuid': '1d3ea64f-1ead-4042-8cb6-8ceb523b6149',
'destination_type': 'volume',
'volume_size': '150',
'boot_index': '0'
}
my_server = nova.servers.create(
name=name,
image=None,
flavor='io1-15',
key_name=keypair,
block_device_mapping_v2=[bdm]
)
print(my_server.status)
def main():
# create_server()
# get_server_status('test-xx')
create_server2('test-xx', 'neill')
main()
| 2.203125 | 2 |
RecoHI/HiTracking/python/HIPixelVertices_cff.py | ckamtsikis/cmssw | 852 | 12765746 | <reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
# pixel cluster vertex finder
from RecoHI.HiTracking.HIPixelClusterVertex_cfi import *
# pixel track producer
from RecoHI.HiTracking.HIPixel3ProtoTracks_cfi import *
# fast vertex finding
from RecoHI.HiTracking.HIPixelMedianVertex_cfi import *
# selected pixel tracks
from RecoHI.HiTracking.HISelectedProtoTracks_cfi import *
# accurate vertex finding
from RecoHI.HiTracking.HIPixelAdaptiveVertex_cfi import *
# selection of best primary vertex
from RecoHI.HiTracking.HIBestVertexSequences_cff import *
hiPixelVerticesTask = cms.Task(hiPixelClusterVertex
, PixelLayerTriplets
, hiPixel3ProtoTracksTask
, hiPixelMedianVertex
, hiSelectedProtoTracks
, hiPixelAdaptiveVertex
, bestHiVertexTask )
hiPixelVertices = cms.Sequence(hiPixelVerticesTask)
| 0.964844 | 1 |
atom/nucleus/python/nucleus_api/models/account_overview_vo.py | sumit4-ttn/SDK | 0 | 12765747 | <reponame>sumit4-ttn/SDK<filename>atom/nucleus/python/nucleus_api/models/account_overview_vo.py
# coding: utf-8
"""
Hydrogen Atom API
The Hydrogen Atom API # noqa: E501
OpenAPI spec version: 1.7.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AccountOverviewVO(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_asset_size': 'float',
'account_asset_size_date': 'datetime',
'account_holdings': 'list[AccountHoldingVO]',
'account_id': 'str',
'account_name': 'str',
'account_type_id': 'str',
'account_type_name': 'str',
'allocations': 'list[AllocationVO]',
'clients': 'list[AccountClientsOverviewVO]',
'deposits': 'list[DepositVO]',
'withdrawals': 'list[WithdrawalVO]'
}
attribute_map = {
'account_asset_size': 'account_asset_size',
'account_asset_size_date': 'account_asset_size_date',
'account_holdings': 'account_holdings',
'account_id': 'account_id',
'account_name': 'account_name',
'account_type_id': 'account_type_id',
'account_type_name': 'account_type_name',
'allocations': 'allocations',
'clients': 'clients',
'deposits': 'deposits',
'withdrawals': 'withdrawals'
}
def __init__(self, account_asset_size=None, account_asset_size_date=None, account_holdings=None, account_id=None, account_name=None, account_type_id=None, account_type_name=None, allocations=None, clients=None, deposits=None, withdrawals=None): # noqa: E501
"""AccountOverviewVO - a model defined in Swagger""" # noqa: E501
self._account_asset_size = None
self._account_asset_size_date = None
self._account_holdings = None
self._account_id = None
self._account_name = None
self._account_type_id = None
self._account_type_name = None
self._allocations = None
self._clients = None
self._deposits = None
self._withdrawals = None
self.discriminator = None
if account_asset_size is not None:
self.account_asset_size = account_asset_size
if account_asset_size_date is not None:
self.account_asset_size_date = account_asset_size_date
if account_holdings is not None:
self.account_holdings = account_holdings
if account_id is not None:
self.account_id = account_id
if account_name is not None:
self.account_name = account_name
if account_type_id is not None:
self.account_type_id = account_type_id
if account_type_name is not None:
self.account_type_name = account_type_name
if allocations is not None:
self.allocations = allocations
if clients is not None:
self.clients = clients
if deposits is not None:
self.deposits = deposits
if withdrawals is not None:
self.withdrawals = withdrawals
@property
def account_asset_size(self):
"""Gets the account_asset_size of this AccountOverviewVO. # noqa: E501
:return: The account_asset_size of this AccountOverviewVO. # noqa: E501
:rtype: float
"""
return self._account_asset_size
@account_asset_size.setter
def account_asset_size(self, account_asset_size):
"""Sets the account_asset_size of this AccountOverviewVO.
:param account_asset_size: The account_asset_size of this AccountOverviewVO. # noqa: E501
:type: float
"""
self._account_asset_size = account_asset_size
@property
def account_asset_size_date(self):
"""Gets the account_asset_size_date of this AccountOverviewVO. # noqa: E501
:return: The account_asset_size_date of this AccountOverviewVO. # noqa: E501
:rtype: datetime
"""
return self._account_asset_size_date
@account_asset_size_date.setter
def account_asset_size_date(self, account_asset_size_date):
"""Sets the account_asset_size_date of this AccountOverviewVO.
:param account_asset_size_date: The account_asset_size_date of this AccountOverviewVO. # noqa: E501
:type: datetime
"""
self._account_asset_size_date = account_asset_size_date
@property
def account_holdings(self):
"""Gets the account_holdings of this AccountOverviewVO. # noqa: E501
:return: The account_holdings of this AccountOverviewVO. # noqa: E501
:rtype: list[AccountHoldingVO]
"""
return self._account_holdings
@account_holdings.setter
def account_holdings(self, account_holdings):
"""Sets the account_holdings of this AccountOverviewVO.
:param account_holdings: The account_holdings of this AccountOverviewVO. # noqa: E501
:type: list[AccountHoldingVO]
"""
self._account_holdings = account_holdings
@property
def account_id(self):
"""Gets the account_id of this AccountOverviewVO. # noqa: E501
:return: The account_id of this AccountOverviewVO. # noqa: E501
:rtype: str
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""Sets the account_id of this AccountOverviewVO.
:param account_id: The account_id of this AccountOverviewVO. # noqa: E501
:type: str
"""
self._account_id = account_id
@property
def account_name(self):
"""Gets the account_name of this AccountOverviewVO. # noqa: E501
:return: The account_name of this AccountOverviewVO. # noqa: E501
:rtype: str
"""
return self._account_name
@account_name.setter
def account_name(self, account_name):
"""Sets the account_name of this AccountOverviewVO.
:param account_name: The account_name of this AccountOverviewVO. # noqa: E501
:type: str
"""
self._account_name = account_name
@property
def account_type_id(self):
"""Gets the account_type_id of this AccountOverviewVO. # noqa: E501
:return: The account_type_id of this AccountOverviewVO. # noqa: E501
:rtype: str
"""
return self._account_type_id
@account_type_id.setter
def account_type_id(self, account_type_id):
"""Sets the account_type_id of this AccountOverviewVO.
:param account_type_id: The account_type_id of this AccountOverviewVO. # noqa: E501
:type: str
"""
self._account_type_id = account_type_id
@property
def account_type_name(self):
"""Gets the account_type_name of this AccountOverviewVO. # noqa: E501
:return: The account_type_name of this AccountOverviewVO. # noqa: E501
:rtype: str
"""
return self._account_type_name
@account_type_name.setter
def account_type_name(self, account_type_name):
"""Sets the account_type_name of this AccountOverviewVO.
:param account_type_name: The account_type_name of this AccountOverviewVO. # noqa: E501
:type: str
"""
self._account_type_name = account_type_name
@property
def allocations(self):
"""Gets the allocations of this AccountOverviewVO. # noqa: E501
:return: The allocations of this AccountOverviewVO. # noqa: E501
:rtype: list[AllocationVO]
"""
return self._allocations
@allocations.setter
def allocations(self, allocations):
"""Sets the allocations of this AccountOverviewVO.
:param allocations: The allocations of this AccountOverviewVO. # noqa: E501
:type: list[AllocationVO]
"""
self._allocations = allocations
@property
def clients(self):
"""Gets the clients of this AccountOverviewVO. # noqa: E501
:return: The clients of this AccountOverviewVO. # noqa: E501
:rtype: list[AccountClientsOverviewVO]
"""
return self._clients
@clients.setter
def clients(self, clients):
"""Sets the clients of this AccountOverviewVO.
:param clients: The clients of this AccountOverviewVO. # noqa: E501
:type: list[AccountClientsOverviewVO]
"""
self._clients = clients
@property
def deposits(self):
"""Gets the deposits of this AccountOverviewVO. # noqa: E501
:return: The deposits of this AccountOverviewVO. # noqa: E501
:rtype: list[DepositVO]
"""
return self._deposits
@deposits.setter
def deposits(self, deposits):
"""Sets the deposits of this AccountOverviewVO.
:param deposits: The deposits of this AccountOverviewVO. # noqa: E501
:type: list[DepositVO]
"""
self._deposits = deposits
@property
def withdrawals(self):
"""Gets the withdrawals of this AccountOverviewVO. # noqa: E501
:return: The withdrawals of this AccountOverviewVO. # noqa: E501
:rtype: list[WithdrawalVO]
"""
return self._withdrawals
@withdrawals.setter
def withdrawals(self, withdrawals):
"""Sets the withdrawals of this AccountOverviewVO.
:param withdrawals: The withdrawals of this AccountOverviewVO. # noqa: E501
:type: list[WithdrawalVO]
"""
self._withdrawals = withdrawals
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AccountOverviewVO, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AccountOverviewVO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 1.585938 | 2 |
backend/src/namespaces/word_guessing/__init__.py | didi/MeetDot | 6 | 12765748 | <reponame>didi/MeetDot
from .namespace import WordGuessingNamespace
| 0.996094 | 1 |
zentral/contrib/inventory/migrations/0023_puppetdb.py | arubdesu/zentral | 634 | 12765749 | <reponame>arubdesu/zentral<filename>zentral/contrib/inventory/migrations/0023_puppetdb.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-15 13:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('inventory', '0022_auto_20170530_0724'),
]
operations = [
migrations.CreateModel(
name='PuppetCertificateExtension',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mt_hash', models.CharField(max_length=40, unique=True)),
('mt_created_at', models.DateTimeField(auto_now_add=True)),
('extension_key', models.TextField()),
('extension_value', models.TextField()),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PuppetDBInventory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mt_hash', models.CharField(max_length=40, unique=True)),
('mt_created_at', models.DateTimeField(auto_now_add=True)),
('certname_trusted', models.TextField()),
('authenticated', models.TextField()),
('aio_agent_version', models.TextField(blank=True, null=True)),
('environment', models.TextField(blank=True, null=True)),
('timestamp', models.DateTimeField()),
('agent_specified_environment', models.TextField(blank=True, null=True)),
('clientversion', models.TextField(blank=True, null=True)),
('extensions', models.ManyToManyField(to='inventory.PuppetCertificateExtension')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PuppetFact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mt_hash', models.CharField(max_length=40, unique=True)),
('mt_created_at', models.DateTimeField(auto_now_add=True)),
('fact_key', models.TextField()),
('fact_key_display_name', models.TextField()),
('fact_value', models.TextField()),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='puppetdbinventory',
name='facts',
field=models.ManyToManyField(to='inventory.PuppetFact'),
),
migrations.AddField(
model_name='machinesnapshot',
name='puppetdb_inventory',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='inventory.PuppetDBInventory'),
),
]
| 1.679688 | 2 |
Algorithm/LeetCode/188_best_time_to_buy_and_sell_stock_iv.py | hqzhang83/Everything101 | 0 | 12765750 | """
* https://leetcode.com/problems/best-time-to-buy-and-sell-stock-iv/
You are given an integer array prices where prices[i] is the price of a given stock on the ith day.
Design an algorithm to find the maximum profit. You may complete at most k transactions.
Notice that you may not engage in multiple transactions simultaneously (i.e., you must sell the stock before you buy again).
"""
def max_profit_unlimited(prices, i, j):
# max profit between i and j with unlimited transcations
steps = 0
for x in range(i + 1, j + 1):
steps += max(0, prices[x] - prices[x - 1])
return steps
def max_profit_ij(prices, i, j):
# max profit with at most 1 transcation from day 0 to day i
l = len(prices)
if i < 0 or i >= l or j < 0 or j >= l or j <= i:
return 0
lowest = prices[i]
max_profit = 0
for x in range(i, j + 1):
price = prices[x]
max_profit = max(max_profit, price - lowest)
lowest = min(lowest, price)
return max_profit
def solution1(k, prices):
# divide conquer; Time O(2^N); Space(1); TLE
l = len(prices)
if l <= 1:
return 0
if 2 * k >= l:
return max_profit_unlimited(prices, 0, l - 1)
def max_profit_from_i(k, i):
# max profit with at most k transactions from day i
if k < 1:
return 0
elif k == 1:
return max_profit_ij(prices, i, l - 1)
max_profit = 0
for j in range(i, l):
max_profit_j = max_profit_ij(prices, i, j) + max_profit_from_i(k - 1, j)
max_profit = max(max_profit, max_profit_j)
return max_profit
return max_profit_from_i(k, 0)
def solution2(k, prices):
# Time: O(K*N); Space: O(K*N)
l = len(prices)
if l <= 1:
return 0
if 2 * k >= l:
return max_profit_unlimited(prices, 0, l - 1)
cash_by_k = [[0] * l for _ in range(k + 1) ]
for k0 in range(1, k + 1):
cash_last_around = cash_by_k[k0 - 1]
cash_curr_around = cash_by_k[k0]
max_profit_may_with_trans = cash_last_around[0]
for today in range(1, l):
yesterday = today - 1
max_profit_may_with_trans = max(
cash_last_around[today],
max_profit_may_with_trans + prices[today] - prices[yesterday])
cash_curr_around[today] = max(cash_curr_around[yesterday], max_profit_may_with_trans)
return cash_by_k[-1][-1]
def solution3(k, prices):
l = len(prices)
if l <= 1 or k == 0:
return 0
if 2 * k >= l:
return max_profit_unlimited(prices, 0, l - 1)
def find_slop(start):
i = start + 1
while i < l and prices[i] >= prices[i - 1]:
i += 1
return (start, i - 1)
slops = []
i = 0
while i < l:
(start, end) = find_slop(i)
if end > start:
slops.append((start, end))
i = end + 1
while len(slops) > k:
# one merge: two near slops with min profit lost
min_merge_lost = float('inf')
to_merge = (0, 1)
for i in range(1, len(slops)):
s1, s2 = slops[i - 1], slops[i]
merge_lost = min(
prices[s1[1]] - prices[s1[0]],
prices[s2[1]] - prices[s2[0]],
prices[s1[1]] - prices[s2[0]],)
if merge_lost < min_merge_lost:
min_merge_lost = merge_lost
to_merge = (i - 1, i)
s1, s2 = slops[to_merge[0]], slops[to_merge[1]]
p1, p2 = prices[s1[1]] - prices[s1[0]], prices[s2[1]] - prices[s2[0]]
p_merge = prices[s2[1]] - prices[s1[0]]
if p_merge > p1 and p_merge > p2:
merge_to = (s1[0], s2[1])
else:
merge_to = s1 if p1 > p2 else s2
slops[to_merge[0]] = merge_to
slops.pop(to_merge[1])
return sum([prices[end] - prices[start] for (start, end) in slops])
import unittest
from unittest_data_provider import data_provider
def data():
return [
(2, 2, [2,4,1]),
(7, 2, [3,2,6,5,0,3]),
(0, 0, [1, 3]),
(6, 2, [3,3,5,0,0,3,1,4]),
(5, 1, [6,1,6,4,3,0,2]),
(5, 1, [8,9,6,1,6,4,3,0,2]),
(11, 2, [8,6,4,3,3,2,3,5,8,3,8,2,6]),
]
def big_data():
return [
(482, 11, [48,12,60,93,97,42,25,64,17,56,85,93,9,48,52,42,58,85,81,84,69,36,1,54,23,15,72,15,11,94]),
]
class Tests(unittest.TestCase):
@data_provider(data)
def test_all_solutions(self, expected, *argv):
for n in range(1, 10):
fn_name = 'solution' + str(n)
if fn_name in globals():
fn = globals()[fn_name]
#print('Expect %s. Testing %s with input %s' % (str(expected), fn_name, str(argv)))
self.assertEqual(expected, fn(*argv))
@data_provider(big_data)
def test_big_input(self, expected, *argv):
self.assertEqual(expected, solution2(*argv))
self.assertEqual(expected, solution3(*argv))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(Tests)
unittest.TextTestRunner(verbosity=2).run(suite)
| 3.65625 | 4 |