hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
51d108784cff76de7b29f2597632b0919f2ce39a | 4,072 | py | Python | insert.py | DigiLog-N/Cassandra_Loaders | ad97e8b5ee4b9dfd4887476f1818fa101075aa8d | [
"Apache-2.0"
] | null | null | null | insert.py | DigiLog-N/Cassandra_Loaders | ad97e8b5ee4b9dfd4887476f1818fa101075aa8d | [
"Apache-2.0"
] | null | null | null | insert.py | DigiLog-N/Cassandra_Loaders | ad97e8b5ee4b9dfd4887476f1818fa101075aa8d | [
"Apache-2.0"
] | null | null | null | ##############################################################################
# insert.py
# https://github.com/DigiLog-N/SynopticDataClient
# Copyright 2020 Canvass Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from cassandra.cluster import Cluster
cluster = Cluster()
session = cluster.connect('digilog_n')
'''
rows = session.execute('SELECT * FROM digilog_n.obd')
for row in rows:
print(row)
user_lookup_stmt = session.prepare("SELECT * FROM users WHERE user_id=?")
INSERT INTO
users = []
for user_id in user_ids_to_query:
user = session.execute(user_lookup_stmt, [user_id])
users.append(user)
session.execute(
"""
INSERT INTO users (name, credits, user_id)
VALUES (%s, %s, %s)
""",
("John O'Reilly", 42, uuid.uuid1())
)
'''
| 32.83871 | 79 | 0.561886 | ##############################################################################
# insert.py
# https://github.com/DigiLog-N/SynopticDataClient
# Copyright 2020 Canvass Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from cassandra.cluster import Cluster
cluster = Cluster()
session = cluster.connect('digilog_n')
'''
rows = session.execute('SELECT * FROM digilog_n.obd')
for row in rows:
print(row)
user_lookup_stmt = session.prepare("SELECT * FROM users WHERE user_id=?")
INSERT INTO
users = []
for user_id in user_ids_to_query:
user = session.execute(user_lookup_stmt, [user_id])
users.append(user)
session.execute(
"""
INSERT INTO users (name, credits, user_id)
VALUES (%s, %s, %s)
""",
("John O'Reilly", 42, uuid.uuid1())
)
'''
class obd:
def __init__(csv_path):
self.csv_path = csv.path
def load(self):
with open(self.csv_path, 'r') as f:
lines = f.readlines()
lines = [x.strip().split(',') for x in lines]
for line in lines:
vehicle_id = line[6]
ts_year = 1900
ts_month = 1
ts_day = 1
air_intake_temp = int(line[17])
ambient_air_temp = int(line[11])
// automatic: three options 's', 'n', or null
automatic = line[5]
barometric_pressure_kpa = int(line[7])
car_year = int(line[3])
days_of_week = int(line[30])
dtc_number = line[24]
engine_coolant_temp = int(line[8])
engine_load = float(line[10])
engine_power = float(line[4])
engine_rpm = int(line[12])
engine_runtime = line[22]
epoch_time = line[0]
equiv_ratio = line[27]
fuel_level = line[9]
fuel_pressure = line[18]
fuel_type = line[16]
hours = line[29]
intake_manifold_pressure = line[13]
long_term_fuel_trim_bank_2 = line[15]
maf = line[14]
mark = line[1]
min = line[28]
model = line[2]
months = line[31]
p_class = line[33]
short_term_fuel_trim_bank_1 = line[21]
short_term_fuel_trim_bank_2 = line[20]
speed = line[19]
throttle_pos = line[23]
timing_advance = line[26]
trouble_codes = line[25]
year = line[32]
// could be converted to elapsed time in seconds or hours minutes seconds
engine_runtime text,
equiv_ratio float,
fuel_level float,
fuel_pressure int,
fuel_type text,
hours int,
intake_manifold_pressure int,
long_term_fuel_trim_bank_2 float,
maf float,
mark text,
min int,
model text,
months int,
short_term_fuel_trim_bank_1 float,
short_term_fuel_trim_bank_2 float,
speed int,
throttle_pos float,
timestamp bigint,
timing_advance float,
trouble_codes text,
// vehicle_id should be key as well
year int,
//standard practice for time-series data would be to use vehicle_id
// itself as the primary key, and use year, month, and day components
// of the timestamp to be clustering columns.
primary key ((vehicle_id), ts_year, ts_month, ts_day)
| 1,744 | 947 | 23 |
d30800ec4925e47b07687e5abc8b953f353accd1 | 184 | py | Python | nwu-eecs-339-db/py-btree-lab/tests/config.py | mzhang-code/courses | 35672c5355cc10ddebb54d17fb7ccbf0f462be00 | [
"MIT"
] | null | null | null | nwu-eecs-339-db/py-btree-lab/tests/config.py | mzhang-code/courses | 35672c5355cc10ddebb54d17fb7ccbf0f462be00 | [
"MIT"
] | null | null | null | nwu-eecs-339-db/py-btree-lab/tests/config.py | mzhang-code/courses | 35672c5355cc10ddebb54d17fb7ccbf0f462be00 | [
"MIT"
] | 1 | 2019-11-29T17:49:37.000Z | 2019-11-29T17:49:37.000Z |
import pytest
from btreelab.disk import Disk, DiskController
@pytest.fixture()
def dc():
'''disk controller
'''
return DiskController(block_size=124, block_num=8)
| 16.727273 | 55 | 0.690217 |
import pytest
from btreelab.disk import Disk, DiskController
@pytest.fixture()
def dc():
'''disk controller
'''
return DiskController(block_size=124, block_num=8)
| 0 | 0 | 0 |
f208f86a147806a8b0c97fa5e9dd1609d1f04c1e | 3,092 | py | Python | gapid_tests/resource_creation_tests/vkCreateFramebuffer_test/vkCreateFramebuffer_test.py | RenfengLiu/vulkan_test_applications | 04359b7184ad94659810213ff63ae71296426182 | [
"Apache-2.0"
] | 55 | 2017-06-20T13:54:31.000Z | 2022-02-08T23:58:11.000Z | gapid_tests/resource_creation_tests/vkCreateFramebuffer_test/vkCreateFramebuffer_test.py | RenfengLiu/vulkan_test_applications | 04359b7184ad94659810213ff63ae71296426182 | [
"Apache-2.0"
] | 53 | 2017-06-15T19:23:07.000Z | 2022-03-30T19:56:30.000Z | gapid_tests/resource_creation_tests/vkCreateFramebuffer_test/vkCreateFramebuffer_test.py | RenfengLiu/vulkan_test_applications | 04359b7184ad94659810213ff63ae71296426182 | [
"Apache-2.0"
] | 42 | 2017-06-15T19:05:40.000Z | 2022-03-30T14:15:25.000Z | # Copyright 2017 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gapit_test_framework import gapit_test, require, require_equal
from gapit_test_framework import require_not_equal, little_endian_bytes_to_int
from gapit_test_framework import GapitTest, get_read_offset_function
from struct_offsets import VulkanStruct, UINT32_T, SIZE_T, POINTER
from struct_offsets import HANDLE, FLOAT, CHAR, ARRAY
from vulkan_constants import *
FRAMEBUFFER_CREATE_INFO = [
("sType", UINT32_T), ("pNext", POINTER), ("flags", UINT32_T),
("renderPass", HANDLE), ("attachmentCount", UINT32_T),
("pAttachments", POINTER), ("width", UINT32_T), ("height", UINT32_T),
("layers", UINT32_T)
]
@gapit_test("vkCreateFramebuffer_test")
| 45.470588 | 78 | 0.731565 | # Copyright 2017 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gapit_test_framework import gapit_test, require, require_equal
from gapit_test_framework import require_not_equal, little_endian_bytes_to_int
from gapit_test_framework import GapitTest, get_read_offset_function
from struct_offsets import VulkanStruct, UINT32_T, SIZE_T, POINTER
from struct_offsets import HANDLE, FLOAT, CHAR, ARRAY
from vulkan_constants import *
FRAMEBUFFER_CREATE_INFO = [
("sType", UINT32_T), ("pNext", POINTER), ("flags", UINT32_T),
("renderPass", HANDLE), ("attachmentCount", UINT32_T),
("pAttachments", POINTER), ("width", UINT32_T), ("height", UINT32_T),
("layers", UINT32_T)
]
@gapit_test("vkCreateFramebuffer_test")
class SingleAttachment(GapitTest):
def expect(self):
architecture = self.architecture
device_properties = require(
self.next_call_of("vkGetPhysicalDeviceProperties"))
create_framebuffer = require(self.next_call_of("vkCreateFramebuffer"))
require_not_equal(0, create_framebuffer.int_device)
require_equal(0, create_framebuffer.hex_pAllocator)
framebuffer_create_info = VulkanStruct(
architecture, FRAMEBUFFER_CREATE_INFO,
get_read_offset_function(create_framebuffer,
create_framebuffer.hex_pCreateInfo))
require_equal(VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
framebuffer_create_info.sType)
require_equal(0, framebuffer_create_info.pNext)
require_equal(0, framebuffer_create_info.flags)
require_not_equal(0, framebuffer_create_info.renderPass)
require_equal(1, framebuffer_create_info.attachmentCount)
require_not_equal(0, framebuffer_create_info.pAttachments)
require_not_equal(0, framebuffer_create_info.width)
require_not_equal(0, framebuffer_create_info.height)
require_equal(1, framebuffer_create_info.layers)
_ = require(
create_framebuffer.get_read_data(
framebuffer_create_info.pAttachments,
NON_DISPATCHABLE_HANDLE_SIZE))
_ = require(
create_framebuffer.get_write_data(
create_framebuffer.hex_pFramebuffer,
NON_DISPATCHABLE_HANDLE_SIZE))
destroy_framebuffer = require(
self.next_call_of("vkDestroyFramebuffer"))
require_not_equal(0, destroy_framebuffer.int_framebuffer)
require_not_equal(0, destroy_framebuffer.int_device)
require_equal(0, destroy_framebuffer.hex_pAllocator)
| 1,795 | 13 | 49 |
2af14be3d99155fb27452df82a3ee04acbf74715 | 16,524 | py | Python | benchmarks/scripts/benchmarks.py | riddopic/delta | 532da849290f9fa63ccacfc176f39f6d4775035d | [
"Apache-2.0"
] | null | null | null | benchmarks/scripts/benchmarks.py | riddopic/delta | 532da849290f9fa63ccacfc176f39f6d4775035d | [
"Apache-2.0"
] | null | null | null | benchmarks/scripts/benchmarks.py | riddopic/delta | 532da849290f9fa63ccacfc176f39f6d4775035d | [
"Apache-2.0"
] | null | null | null | #
# Copyright (2021) The Delta Lake Project Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from scripts.utils import *
from datetime import datetime
import time
class BenchmarkSpec:
"""
Specifications of a benchmark.
:param format_name: Spark format name
:param maven_artifacts: Maven artifact name in x:y:z format
:param spark_confs: list of spark conf strings in key=value format
:param benchmark_main_class: Name of main Scala class from the JAR to run
:param main_class_args command line args for the main class
"""
class TPCDSDataLoadSpec(BenchmarkSpec):
"""
Specifications of TPC-DS data load process.
Always mixin in this first before the base benchmark class.
"""
class TPCDSBenchmarkSpec(BenchmarkSpec):
"""
Specifications of TPC-DS benchmark
"""
# ============== Delta benchmark specifications ==============
class DeltaBenchmarkSpec(BenchmarkSpec):
"""
Specification of a benchmark using the Delta format
"""
@staticmethod
# ============== General benchmark execution ==============
class Benchmark:
"""
Represents a benchmark that can be run on a remote Spark cluster
:param benchmark_name: A name to be used for uniquely identifying this benchmark.
Added to file names generated by this benchmark.
:param benchmark_spec: Specification of the benchmark. See BenchmarkSpec.
"""
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
| 44.659459 | 131 | 0.653292 | #
# Copyright (2021) The Delta Lake Project Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from scripts.utils import *
from datetime import datetime
import time
class BenchmarkSpec:
"""
Specifications of a benchmark.
:param format_name: Spark format name
:param maven_artifacts: Maven artifact name in x:y:z format
:param spark_confs: list of spark conf strings in key=value format
:param benchmark_main_class: Name of main Scala class from the JAR to run
:param main_class_args command line args for the main class
"""
def __init__(
self, format_name, maven_artifacts, spark_confs,
benchmark_main_class, main_class_args, extra_spark_shell_args=None, **kwargs):
if main_class_args is None:
main_class_args = []
if extra_spark_shell_args is None:
extra_spark_shell_args = []
self.format_name = format_name
self.maven_artifacts = maven_artifacts
self.spark_confs = spark_confs
self.benchmark_main_class = benchmark_main_class
self.benchmark_main_class_args = main_class_args
self.extra_spark_shell_args = extra_spark_shell_args
def append_spark_confs(self, new_confs):
if new_confs is not None and isinstance(new_confs, list):
self.spark_confs.extend(new_confs)
def append_main_class_args(self, new_args):
if new_args is not None and isinstance(new_args, list):
self.benchmark_main_class_args.extend(new_args)
def get_sparksubmit_cmd(self, benchmark_jar_path):
spark_conf_str = ""
for conf in self.spark_confs:
print(f"conf={conf}")
spark_conf_str += f"""--conf "{conf}" """
main_class_args = ' '.join(self.benchmark_main_class_args)
spark_shell_args_str = ' '.join(self.extra_spark_shell_args)
spark_submit_cmd = (
f"spark-submit {spark_shell_args_str} --packages {self.maven_artifacts} " +
f"{spark_conf_str} --class {self.benchmark_main_class} " +
f"{benchmark_jar_path} {main_class_args}"
)
print(spark_submit_cmd)
return spark_submit_cmd
def get_sparkshell_cmd(self, benchmark_jar_path, benchmark_init_file_path):
spark_conf_str = ""
for conf in self.spark_confs:
print(f"conf={conf}")
spark_conf_str += f"""--conf "{conf}" """
spark_shell_args_str = ' '.join(self.extra_spark_shell_args)
spark_shell_cmd = (
f"spark-shell {spark_shell_args_str} --packages {self.maven_artifacts} " +
f"{spark_conf_str} --jars {benchmark_jar_path} -I {benchmark_init_file_path}"
)
print(spark_shell_cmd)
return spark_shell_cmd
class TPCDSDataLoadSpec(BenchmarkSpec):
"""
Specifications of TPC-DS data load process.
Always mixin in this first before the base benchmark class.
"""
def __init__(self, scale_in_gb, exclude_nulls=True, **kwargs):
# forward all keyword args to next constructor
super().__init__(benchmark_main_class="benchmark.TPCDSDataLoad", **kwargs)
self.benchmark_main_class_args.extend([
"--format", self.format_name,
"--scale-in-gb", str(scale_in_gb),
"--exclude-nulls", str(exclude_nulls),
])
# To access the public TPCDS parquet files on S3
self.spark_confs.extend(["spark.hadoop.fs.s3.useRequesterPaysHeader=true"])
class TPCDSBenchmarkSpec(BenchmarkSpec):
"""
Specifications of TPC-DS benchmark
"""
def __init__(self, scale_in_gb, **kwargs):
# forward all keyword args to next constructor
super().__init__(benchmark_main_class="benchmark.TPCDSBenchmark", **kwargs)
# after init of super class, use the format to add main class args
self.benchmark_main_class_args.extend([
"--format", self.format_name,
"--scale-in-gb", str(scale_in_gb)
])
# ============== Delta benchmark specifications ==============
class DeltaBenchmarkSpec(BenchmarkSpec):
"""
Specification of a benchmark using the Delta format
"""
def __init__(self, delta_version, benchmark_main_class, main_class_args=None, scala_version="2.12", **kwargs):
delta_spark_confs = [
"spark.sql.extensions=io.delta.sql.DeltaSparkSessionExtension",
"spark.sql.catalog.spark_catalog=org.apache.spark.sql.delta.catalog.DeltaCatalog",
"spark.delta.logStore.class=org.apache.spark.sql.delta.storage.S3SingleDriverLogStore"
]
self.scala_version = scala_version
super().__init__(
format_name="delta",
maven_artifacts=self.delta_maven_artifacts(delta_version, self.scala_version),
spark_confs=delta_spark_confs,
benchmark_main_class=benchmark_main_class,
main_class_args=main_class_args,
**kwargs
)
def update_delta_version(self, new_delta_version):
self.maven_artifacts = \
DeltaBenchmarkSpec.delta_maven_artifacts(new_delta_version, self.scala_version)
@staticmethod
def delta_maven_artifacts(delta_version, scala_version):
return f"io.delta:delta-core_{scala_version}:{delta_version},io.delta:delta-hive_{scala_version}:0.2.0"
class DeltaTPCDSDataLoadSpec(TPCDSDataLoadSpec, DeltaBenchmarkSpec):
def __init__(self, delta_version, scale_in_gb=1):
super().__init__(delta_version=delta_version, scale_in_gb=scale_in_gb)
class DeltaTPCDSBenchmarkSpec(TPCDSBenchmarkSpec, DeltaBenchmarkSpec):
def __init__(self, delta_version, scale_in_gb=1):
super().__init__(delta_version=delta_version, scale_in_gb=scale_in_gb)
# ============== General benchmark execution ==============
class Benchmark:
"""
Represents a benchmark that can be run on a remote Spark cluster
:param benchmark_name: A name to be used for uniquely identifying this benchmark.
Added to file names generated by this benchmark.
:param benchmark_spec: Specification of the benchmark. See BenchmarkSpec.
"""
def __init__(self, benchmark_name, benchmark_spec, use_spark_shell, local_delta_dir=None):
now = datetime.now()
self.benchmark_id = now.strftime("%Y%m%d-%H%M%S") + "-" + benchmark_name
self.benchmark_spec = benchmark_spec
# Add benchmark id as a spark conf so that it get transferred automatically to scala code
self.benchmark_spec.append_spark_confs([f"spark.benchmarkId={self.benchmark_id}"])
self.output_file = Benchmark.output_file(self.benchmark_id)
self.json_report_file = Benchmark.json_report_file(self.benchmark_id)
self.completed_file = Benchmark.completed_file(self.benchmark_id)
self.use_spark_shell = use_spark_shell
self.local_delta_dir = local_delta_dir
def run(self, cluster_hostname, ssh_id_file):
if self.local_delta_dir and isinstance(self.benchmark_spec, DeltaBenchmarkSpec):
# Upload new Delta jar to cluster and update spec to use the jar's version
delta_version_to_use = \
self.upload_delta_jars_to_cluster_and_get_version(cluster_hostname, ssh_id_file)
self.benchmark_spec.update_delta_version(delta_version_to_use)
jar_path_in_cluster = self.upload_jar_to_cluster(cluster_hostname, ssh_id_file)
self.start_benchmark_via_ssh(cluster_hostname, ssh_id_file, jar_path_in_cluster)
Benchmark.wait_for_completion(cluster_hostname, ssh_id_file, self.benchmark_id)
def spark_submit_script_content(self, jar_path):
return f"""
#!/bin/bash
jps | grep "Spark" | cut -f 1 -d ' ' | xargs kill -9
set -e
{self.benchmark_spec.get_sparksubmit_cmd(jar_path)} 2>&1 | tee {self.output_file}
""".strip()
def spark_shell_script_content(self, jar_path):
shell_init_file_name = f"{self.benchmark_id}_shell_init.scala"
benchmark_cmd_line_params_str = \
', '.join(f'"{w}"' for w in self.benchmark_spec.benchmark_main_class_args)
call_main_with_args = \
f"{self.benchmark_spec.benchmark_main_class}.main(Array[String]({benchmark_cmd_line_params_str}))"
shell_init_file_content = \
"try { %s } catch { case t => println(t); println(\"FAILED\"); System.exit(1) } ; System.exit(0)" % call_main_with_args
shell_cmd = self.benchmark_spec.get_sparkshell_cmd(jar_path, shell_init_file_name)
return f"""
#!/bin/bash
jps | grep "Spark" | cut -f 1 -d ' ' | xargs kill -9
echo '{shell_init_file_content}' > {shell_init_file_name}
{shell_cmd} 2>&1 | tee {self.output_file}
touch {self.completed_file}
""".strip()
def upload_jar_to_cluster(self, cluster_hostname, ssh_id_file, delta_version_to_use=None):
# Compile JAR
# Note: Deleting existing JARs instead of sbt clean is faster
if os.path.exists("target"):
run_cmd("""find target -name "*.jar" -type f -delete""", stream_output=True)
run_cmd("build/sbt assembly", stream_output=True)
(_, out, _) = run_cmd("find target -name *.jar")
print(">>> Benchmark JAR compiled\n")
# Upload JAR
jar_local_path = out.decode("utf-8").strip()
jar_remote_path = f"{self.benchmark_id}-benchmarks.jar"
scp_cmd = \
f"scp -C -i {ssh_id_file} {jar_local_path} hadoop@{cluster_hostname}:{jar_remote_path}"
print(scp_cmd)
run_cmd(scp_cmd, stream_output=True)
print(">>> Benchmark JAR uploaded to cluster\n")
return f"~/{jar_remote_path}"
def start_benchmark_via_ssh(self, cluster_hostname, ssh_id_file, jar_path):
# Generate and upload the script to run the benchmark
script_file_name = f"{self.benchmark_id}-cmd.sh"
if self.use_spark_shell:
script_file_text = self.spark_shell_script_content(jar_path)
else:
script_file_text = self.spark_submit_script_content(jar_path)
# print("Benchmark script:\n----\n" + script_file_text + "\n----")
try:
script_file = open(script_file_name, "w")
script_file.write(script_file_text)
script_file.close()
scp_cmd = (
f"scp -i {ssh_id_file} {script_file_name}" +
f" hadoop@{cluster_hostname}:{script_file_name}"
)
print(scp_cmd)
run_cmd(scp_cmd, stream_output=True)
run_cmd(f"ssh -i {ssh_id_file} hadoop@{cluster_hostname} chmod +x {script_file_name}")
finally:
if os.path.exists(script_file_name):
os.remove(script_file_name)
print(">>> Benchmark script generated and uploaded\n")
# Start the script
job_cmd = (
f"ssh -i {ssh_id_file} hadoop@{cluster_hostname} " +
f"screen -d -m bash {script_file_name}"
)
print(job_cmd)
run_cmd(job_cmd, stream_output=True)
# Print the screen where it is running
run_cmd(f"ssh -i {ssh_id_file} hadoop@{cluster_hostname}" +
f""" "screen -ls ; sleep 2; echo Files for this benchmark: ; ls {self.benchmark_id}*" """,
stream_output=True, throw_on_error=False)
print(f">>> Benchmark id {self.benchmark_id} started in a screen. Stdout piped into {self.output_file}. "
f"Final report will be generated on completion in {self.json_report_file}.\n")
@staticmethod
def output_file(benchmark_id):
return f"{benchmark_id}-out.txt"
@staticmethod
def json_report_file(benchmark_id):
return f"{benchmark_id}-report.json"
@staticmethod
def csv_report_file(benchmark_id):
return f"{benchmark_id}-report.csv"
@staticmethod
def completed_file(benchmark_id):
return f"{benchmark_id}-completed.txt"
@staticmethod
def wait_for_completion(cluster_hostname, ssh_id_file, benchmark_id, copy_report=True):
completed = False
succeeded = False
output_file = Benchmark.output_file(benchmark_id)
completed_file = Benchmark.completed_file(benchmark_id)
json_report_file = Benchmark.json_report_file(benchmark_id)
csv_report_file = Benchmark.csv_report_file(benchmark_id)
print(f"\nWaiting for completion of benchmark id {benchmark_id}")
while not completed:
# Print the size of the output file to show progress
(_, out, _) = run_cmd_over_ssh(f"stat -c '%n: [%y] [%s bytes]' {output_file}",
cluster_hostname, ssh_id_file,
throw_on_error=False)
out = out.decode("utf-8").strip()
print(out)
if "No such file" in out:
print(">>> Benchmark failed to start")
return
# Check for the existence of the completed file
(_, out, _) = run_cmd_over_ssh(f"ls {completed_file}", cluster_hostname, ssh_id_file,
throw_on_error=False)
if completed_file in out.decode("utf-8"):
completed = True
else:
time.sleep(60)
# Check the last few lines of output files to identify success
(_, out, _) = run_cmd_over_ssh(f"tail {output_file}", cluster_hostname, ssh_id_file,
throw_on_error=False)
if "SUCCESS" in out.decode("utf-8"):
succeeded = True
print(">>> Benchmark completed with success\n")
else:
print(">>> Benchmark completed with failure\n")
# Copy reports
if succeeded and copy_report:
report_files = [ json_report_file, csv_report_file]
for report_file in report_files:
run_cmd(f"scp -C -i {ssh_id_file} " +
f"hadoop@{cluster_hostname}:{report_file} {report_file}",
stream_output=True)
print(">>> Copied reports to local directory")
def upload_delta_jars_to_cluster_and_get_version(self, cluster_hostname, ssh_id_file):
if not self.local_delta_dir:
raise Exception("Path to delta repo not specified")
delta_repo_dir = os.path.abspath(self.local_delta_dir)
with WorkingDirectory(delta_repo_dir):
# Compile Delta JARs by publishing to local maven cache
print(f"Compiling Delta to local dir {delta_repo_dir}")
local_maven_delta_dir = os.path.expanduser("~/.ivy2/local/io.delta/")
if os.path.exists(local_maven_delta_dir):
run_cmd(f"rm -rf {local_maven_delta_dir}", stream_output=True)
print(f"Cleared local maven cache at {local_maven_delta_dir}")
run_cmd("build/sbt publishLocal", stream_output=False, throw_on_error=True)
# Get the new version
(_, out, _) = run_cmd("""build/sbt "show version" """)
version = out.decode("utf-8").strip().rsplit("\n", 1)[-1].rsplit(" ", 1)[-1].strip()
if not version:
raise Exception(f"Could not find the version from the sbt output:\n--\n{out}\n-")
# Upload JARs to cluster's local maven cache
remote_maven_dir = ".ivy2/local/" # must have "/" at the end
run_cmd_over_ssh(f"rm -rf {remote_maven_dir}/*", cluster_hostname,
ssh_id_file, stream_output=True, throw_on_error=False)
run_cmd_over_ssh(f"mkdir -p {remote_maven_dir}", cluster_hostname,
ssh_id_file, stream_output=True)
scp_cmd = f"""scp -r -C -i {ssh_id_file} {local_maven_delta_dir.rstrip("/")} """ +\
f"hadoop@{cluster_hostname}:{remote_maven_dir}"
print(scp_cmd)
run_cmd(scp_cmd, stream_output=True)
print(f">>> Delta {version} JAR uploaded to cluster\n")
return version
| 13,715 | 96 | 681 |
7fd483db80d173ed61885938f51715a5a658b565 | 3,003 | py | Python | sewing/display.py | except-pass/sewing | d6b1098b1d7826da3b2cfb4fe3f591ac5dea5529 | [
"Apache-2.0"
] | 1 | 2021-08-16T23:57:35.000Z | 2021-08-16T23:57:35.000Z | sewing/display.py | except-pass/sewing | d6b1098b1d7826da3b2cfb4fe3f591ac5dea5529 | [
"Apache-2.0"
] | null | null | null | sewing/display.py | except-pass/sewing | d6b1098b1d7826da3b2cfb4fe3f591ac5dea5529 | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/display.ipynb (unless otherwise specified).
__all__ = ['encode', 'DiscordEncoder', 'Formatter', 'serialize_content', 'html_content']
# Cell
import discord
# Cell
import json
# Cell
#TODO change the data model for this to something more standard.
# use only strings for the keywords rather than discord objects | 33.741573 | 92 | 0.55045 | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/display.ipynb (unless otherwise specified).
__all__ = ['encode', 'DiscordEncoder', 'Formatter', 'serialize_content', 'html_content']
# Cell
import discord
# Cell
import json
def encode(u):
if isinstance(u, discord.Message):
ts = u.created_at
serialized = "({ts}){author}: {content}".format(ts=ts.strftime("%b %d %Y %H:%M:%S"),
author=u.author.name,
content=u.content)
return serialized
elif isinstance(u, discord.Thread):
return 'Thread: {}'.format(u.name)
elif isinstance(u, discord.TextChannel):
return 'Channel: {}'.format(u.name)
elif isinstance(u, discord.Guild):
return 'Guild: {}'.format(u.name)
else:
type_name = u.__class__.__name__
raise TypeError("Unexpected type {0}".format(type_name))
class DiscordEncoder(json.JSONEncoder):
def default(self, u):
if isinstance(u, discord.Message):
"""
serialized = {
"id": u.id,
"content": u.content,
"author": u.author.name,
"created_at": u.created_at.isoformat()
}
"""
serialized = "({ts}){author}: {content}".format(ts=u.created_at.isoformat(),
author=u.author.name,
content=u.content)
return serialized
elif isinstance(u, discord.Thread):
return 'Thread: {}'.format(u.name)
elif isinstance(u, discord.TextChannel):
return 'Channel: {}'.format(u.name)
elif isinstance(u, discord.Guild):
return 'Guild: {}'.format(u.name)
else:
#type_name = u.__class__.__name__
#raise TypeError("Unexpected type {0}".format(type_name))
return json.JSONEncoder.default(self, obj)
class Formatter:
def __init__(self):
self.lines = []
def add(self, thing):
#entry = json.dumps(thing, cls=DiscordEncoder)
entry = encode(thing)
self.lines.append(entry)
# Cell
#TODO change the data model for this to something more standard.
# use only strings for the keywords rather than discord objects
def serialize_content(guild_content):
fmt = Formatter()
print('--------- content summary -------------')
for guild, channels_d in guild_content.items():
fmt.add(guild)
for channel_obj, thread_d in channels_d.items():
fmt.add(channel_obj)
for thread, msg_list in thread_d.items():
if msg_list:
fmt.add(thread)
for msg in msg_list:
fmt.add(msg)
return fmt.lines
def html_content(guild_content):
lines = serialize_content(guild_content)
print(lines)
return '\n<br>'.join(lines) | 2,430 | 13 | 193 |
b96f81a47e50f86d3f3575e40a69cd10fa944edd | 13,154 | py | Python | skyportal/handlers/api/comment.py | LSSTDESC/skyportal | 1a433aae67b26ffd3516e65e0fdbf866b4751486 | [
"BSD-3-Clause"
] | null | null | null | skyportal/handlers/api/comment.py | LSSTDESC/skyportal | 1a433aae67b26ffd3516e65e0fdbf866b4751486 | [
"BSD-3-Clause"
] | null | null | null | skyportal/handlers/api/comment.py | LSSTDESC/skyportal | 1a433aae67b26ffd3516e65e0fdbf866b4751486 | [
"BSD-3-Clause"
] | null | null | null | import string
import base64
from distutils.util import strtobool
from marshmallow.exceptions import ValidationError
from baselayer.app.access import permissions, auth_or_token
from ..base import BaseHandler
from ...models import (
DBSession,
Source,
Comment,
Group,
Candidate,
Filter,
Obj,
User,
UserNotification,
)
| 33.133501 | 108 | 0.504713 | import string
import base64
from distutils.util import strtobool
from marshmallow.exceptions import ValidationError
from baselayer.app.access import permissions, auth_or_token
from ..base import BaseHandler
from ...models import (
DBSession,
Source,
Comment,
Group,
Candidate,
Filter,
Obj,
User,
UserNotification,
)
def users_mentioned(text):
punctuation = string.punctuation.replace("-", "").replace("@", "")
usernames = []
for word in text.replace(",", " ").split():
word = word.strip(punctuation)
if word.startswith("@"):
usernames.append(word.replace("@", ""))
users = User.query.filter(User.username.in_(usernames)).all()
return users
class CommentHandler(BaseHandler):
@auth_or_token
def get(self, comment_id):
"""
---
description: Retrieve a comment
tags:
- comments
parameters:
- in: path
name: comment_id
required: true
schema:
type: integer
responses:
200:
content:
application/json:
schema: SingleComment
400:
content:
application/json:
schema: Error
"""
comment = Comment.get_if_readable_by(comment_id, self.current_user)
if comment is None:
return self.error('Invalid comment ID.')
self.verify_permissions()
return self.success(data=comment)
@permissions(['Comment'])
def post(self):
"""
---
description: Post a comment
tags:
- comments
requestBody:
content:
application/json:
schema:
type: object
properties:
obj_id:
type: string
text:
type: string
group_ids:
type: array
items:
type: integer
description: |
List of group IDs corresponding to which groups should be
able to view comment. Defaults to all of requesting user's
groups.
attachment:
type: object
properties:
body:
type: string
format: byte
description: base64-encoded file contents
name:
type: string
required:
- obj_id
- text
responses:
200:
content:
application/json:
schema:
allOf:
- $ref: '#/components/schemas/Success'
- type: object
properties:
data:
type: object
properties:
comment_id:
type: integer
description: New comment ID
"""
data = self.get_json()
obj_id = data.get("obj_id")
if obj_id is None:
return self.error("Missing required field `obj_id`")
comment_text = data.get("text")
# Ensure user/token has access to associated Obj
_ = Obj.get_if_readable_by(obj_id, self.current_user)
user_accessible_group_ids = [g.id for g in self.current_user.accessible_groups]
user_accessible_filter_ids = [
filtr.id
for g in self.current_user.accessible_groups
for filtr in g.filters
if g.filters is not None
]
group_ids = [int(id) for id in data.pop("group_ids", user_accessible_group_ids)]
group_ids = set(group_ids).intersection(user_accessible_group_ids)
if not group_ids:
return self.error(
f"Invalid group IDs field ({group_ids}): "
"You must provide one or more valid group IDs."
)
# Only post to groups source/candidate is actually associated with
candidate_group_ids = [
f.group_id
for f in (
DBSession()
.query(Filter)
.join(Candidate)
.filter(Filter.id.in_(user_accessible_filter_ids))
.filter(Candidate.obj_id == obj_id)
.all()
)
]
source_group_ids = [
source.group_id
for source in DBSession()
.query(Source)
.filter(Source.obj_id == obj_id)
.all()
]
group_ids = set(group_ids).intersection(candidate_group_ids + source_group_ids)
if not group_ids:
return self.error("Obj is not associated with any of the specified groups.")
groups = Group.query.filter(Group.id.in_(group_ids)).all()
if 'attachment' in data:
if (
isinstance(data['attachment'], dict)
and 'body' in data['attachment']
and 'name' in data['attachment']
):
attachment_bytes = str.encode(
data['attachment']['body'].split('base64,')[-1]
)
attachment_name = data['attachment']['name']
else:
return self.error("Malformed comment attachment")
else:
attachment_bytes, attachment_name = None, None
author = self.associated_user_object
comment = Comment(
text=comment_text,
obj_id=obj_id,
attachment_bytes=attachment_bytes,
attachment_name=attachment_name,
author=author,
groups=groups,
)
users_mentioned_in_comment = users_mentioned(comment_text)
if users_mentioned_in_comment:
for user_mentioned in users_mentioned_in_comment:
DBSession().add(
UserNotification(
user=user_mentioned,
text=f"*@{self.current_user.username}* mentioned you in a comment on *{obj_id}*",
url=f"/source/{obj_id}",
)
)
DBSession().add(comment)
self.finalize_transaction()
if users_mentioned_in_comment:
for user_mentioned in users_mentioned_in_comment:
self.flow.push(user_mentioned.id, "skyportal/FETCH_NOTIFICATIONS", {})
self.push_all(
action='skyportal/REFRESH_SOURCE',
payload={'obj_key': comment.obj.internal_key},
)
return self.success(data={'comment_id': comment.id})
@permissions(['Comment'])
def put(self, comment_id):
"""
---
description: Update a comment
tags:
- comments
parameters:
- in: path
name: comment_id
required: true
schema:
type: integer
requestBody:
content:
application/json:
schema:
allOf:
- $ref: '#/components/schemas/CommentNoID'
- type: object
properties:
group_ids:
type: array
items:
type: integer
description: |
List of group IDs corresponding to which groups should be
able to view comment.
responses:
200:
content:
application/json:
schema: Success
400:
content:
application/json:
schema: Error
"""
c = Comment.get_if_readable_by(comment_id, self.current_user)
if c is None:
return self.error('Invalid comment ID.')
data = self.get_json()
group_ids = data.pop("group_ids", None)
data['id'] = comment_id
attachment_bytes = data.pop('attachment_bytes', None)
schema = Comment.__schema__()
try:
schema.load(data, partial=True)
except ValidationError as e:
return self.error(f'Invalid/missing parameters: {e.normalized_messages()}')
if attachment_bytes is not None:
attachment_bytes = str.encode(attachment_bytes.split('base64,')[-1])
c.attachment_bytes = attachment_bytes
bytes_is_none = c.attachment_bytes is None
name_is_none = c.attachment_name is None
if bytes_is_none ^ name_is_none:
return self.error(
'This update leaves one of attachment name or '
'attachment bytes null. Both fields must be '
'filled, or both must be null.'
)
DBSession().flush()
if group_ids is not None:
c = Comment.get_if_readable_by(comment_id, self.current_user)
groups = Group.query.filter(Group.id.in_(group_ids)).all()
if not groups:
return self.error(
"Invalid group_ids field. Specify at least one valid group ID."
)
if not all(
[group in self.current_user.accessible_groups for group in groups]
):
return self.error(
"Cannot associate comment with groups you are not a member of."
)
c.groups = groups
self.finalize_transaction()
self.push_all(
action='skyportal/REFRESH_SOURCE', payload={'obj_key': c.obj.internal_key}
)
return self.success()
@permissions(['Comment'])
def delete(self, comment_id):
"""
---
description: Delete a comment
tags:
- comments
parameters:
- in: path
name: comment_id
required: true
schema:
type: integer
responses:
200:
content:
application/json:
schema: Success
"""
user = self.associated_user_object
c = Comment.query.get(comment_id)
if c is None:
return self.error("Invalid comment ID")
obj_key = c.obj.internal_key
if user.is_system_admin or c.author == user:
Comment.query.filter_by(id=comment_id).delete()
self.finalize_transaction()
else:
return self.error('Insufficient user permissions.')
self.push_all(action='skyportal/REFRESH_SOURCE', payload={'obj_key': obj_key})
return self.success()
class CommentAttachmentHandler(BaseHandler):
@auth_or_token
def get(self, comment_id):
"""
---
description: Download comment attachment
tags:
- comments
parameters:
- in: path
name: comment_id
required: true
schema:
type: integer
- in: query
name: download
nullable: True
schema:
type: boolean
description: If true, download the attachment; else return file data as text. True by default.
responses:
200:
content:
application:
schema:
type: string
format: base64
description: base64-encoded contents of attachment
application/json:
schema:
allOf:
- $ref: '#/components/schemas/Success'
- type: object
properties:
data:
type: object
properties:
comment_id:
type: integer
description: Comment ID attachment came from
attachment:
type: string
description: The attachment file contents decoded as a string
"""
download = strtobool(self.get_query_argument('download', "True").lower())
comment = Comment.get_if_readable_by(comment_id, self.current_user)
if comment is None:
return self.error('Invalid comment ID.')
self.verify_permissions()
if download:
self.set_header(
"Content-Disposition",
"attachment; " f"filename={comment.attachment_name}",
)
self.set_header("Content-type", "application/octet-stream")
self.write(base64.b64decode(comment.attachment_bytes))
else:
return self.success(
data={
"commentId": int(comment_id),
"attachment": base64.b64decode(comment.attachment_bytes).decode(),
}
)
| 350 | 12,380 | 69 |
4fc50fa8fdc22ae0a42408d29bb1eaa2f4f6629f | 255 | py | Python | producer.py | Fran006/tarea_kafka | 4956c1469ee28a22cfde3dd0c6de8b5b4ae57062 | [
"Apache-2.0"
] | null | null | null | producer.py | Fran006/tarea_kafka | 4956c1469ee28a22cfde3dd0c6de8b5b4ae57062 | [
"Apache-2.0"
] | null | null | null | producer.py | Fran006/tarea_kafka | 4956c1469ee28a22cfde3dd0c6de8b5b4ae57062 | [
"Apache-2.0"
] | 1 | 2021-11-10T22:42:25.000Z | 2021-11-10T22:42:25.000Z | from kafka import KafkaProducer
import json
producer = KafkaProducer(value_serializer=lambda m: json.dumps(m).encode('ascii'), bootstrap_servers=['localhost:9092'])
producer.send('event', {'id': 123, 'email_vendedor': 'asdas@mail.com'})
producer.flush() | 42.5 | 120 | 0.760784 | from kafka import KafkaProducer
import json
producer = KafkaProducer(value_serializer=lambda m: json.dumps(m).encode('ascii'), bootstrap_servers=['localhost:9092'])
producer.send('event', {'id': 123, 'email_vendedor': 'asdas@mail.com'})
producer.flush() | 0 | 0 | 0 |
c1b599e6e1ca1877962e355a6ed461f34dc75d4a | 153 | py | Python | app/__init__.py | themagpimag/magpi-api | f99b9bc534dc7f44fc7d84d7f266370e5794b467 | [
"MIT"
] | 1 | 2015-08-11T11:44:45.000Z | 2015-08-11T11:44:45.000Z | app/__init__.py | themagpimag/magpi-api | f99b9bc534dc7f44fc7d84d7f266370e5794b467 | [
"MIT"
] | null | null | null | app/__init__.py | themagpimag/magpi-api | f99b9bc534dc7f44fc7d84d7f266370e5794b467 | [
"MIT"
] | null | null | null | import os, sys
sys.path.insert(1, os.path.join(os.path.abspath('.'), 'flaskstuff'))
from flask import Flask
app = Flask(__name__)
from app import views
| 21.857143 | 68 | 0.732026 | import os, sys
sys.path.insert(1, os.path.join(os.path.abspath('.'), 'flaskstuff'))
from flask import Flask
app = Flask(__name__)
from app import views
| 0 | 0 | 0 |
1d16e66cae87b4d4e0d7cb5177e60619c21733ff | 1,471 | py | Python | ssh_telnet/pexpect/ex11_ssh_pexpect_threads.py | levs72/pyneng-examples | d6288292dcf9d1ebc5a9db4a0d620bd11b4a2df9 | [
"MIT"
] | 11 | 2021-04-05T09:30:23.000Z | 2022-03-09T13:27:56.000Z | ssh_telnet/pexpect/ex11_ssh_pexpect_threads.py | levs72/pyneng-examples | d6288292dcf9d1ebc5a9db4a0d620bd11b4a2df9 | [
"MIT"
] | null | null | null | ssh_telnet/pexpect/ex11_ssh_pexpect_threads.py | levs72/pyneng-examples | d6288292dcf9d1ebc5a9db4a0d620bd11b4a2df9 | [
"MIT"
] | 11 | 2021-04-06T03:44:35.000Z | 2022-03-04T21:20:40.000Z | from concurrent.futures import ThreadPoolExecutor
from itertools import repeat
from pprint import pprint
import pexpect
import yaml
import logging
logging.basicConfig(
format="%(threadName)s %(name)s %(levelname)s: %(message)s", level=logging.INFO
)
if __name__ == "__main__":
with open("devices.yaml") as f:
devices = yaml.safe_load(f)
r = send_show_to_devices(devices, "sh int desc")
pprint(r, width=120)
| 28.288462 | 86 | 0.645139 | from concurrent.futures import ThreadPoolExecutor
from itertools import repeat
from pprint import pprint
import pexpect
import yaml
import logging
logging.basicConfig(
format="%(threadName)s %(name)s %(levelname)s: %(message)s", level=logging.INFO
)
def send_show_command(host, username, password, enable_pass, command):
logging.info(f"Connecting to {host}")
with pexpect.spawn(f"ssh {username}@{host}", encoding="utf-8") as ssh:
ssh.expect("[Pp]assword")
ssh.sendline(password)
ssh.expect(">")
ssh.sendline("enable")
ssh.expect("Password")
ssh.sendline(enable_pass)
ssh.expect("#")
ssh.sendline("terminal length 0")
ssh.expect("#")
ssh.sendline(command)
ssh.expect("#")
output = ssh.before
logging.info(f"<<< Received output from {host}")
return output.replace("\r\n", "\n")
def send_show_to_devices(devices, show, max_threads=10):
result_dict = {}
with ThreadPoolExecutor(max_workers=max_threads) as executor:
future_list = [
executor.submit(send_show_command, **dev, command=show) for dev in devices
]
for dev, f in zip(devices, future_list):
result_dict[dev["host"]] = f.result()
return result_dict
if __name__ == "__main__":
with open("devices.yaml") as f:
devices = yaml.safe_load(f)
r = send_show_to_devices(devices, "sh int desc")
pprint(r, width=120)
| 987 | 0 | 46 |
39021714010b6620176e6528eda72866dde39f19 | 1,060 | py | Python | HealthKit/healthapp/forms.py | Koushik-Sarker-Seemanto/DatabaseProject | d936d83643242942b903381ef263dc10f8fbc177 | [
"MIT"
] | null | null | null | HealthKit/healthapp/forms.py | Koushik-Sarker-Seemanto/DatabaseProject | d936d83643242942b903381ef263dc10f8fbc177 | [
"MIT"
] | null | null | null | HealthKit/healthapp/forms.py | Koushik-Sarker-Seemanto/DatabaseProject | d936d83643242942b903381ef263dc10f8fbc177 | [
"MIT"
] | null | null | null | from django.contrib.auth.models import User
from django import forms
from healthapp.models import UserDoctor, UserPatient, Schedule
| 30.285714 | 163 | 0.710377 | from django.contrib.auth.models import User
from django import forms
from healthapp.models import UserDoctor, UserPatient, Schedule
class DoctorRegForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput)
class Meta:
model = UserDoctor
fields = ['doctor_name', 'doctor_email', 'doctor_workplace', 'doctor_category', 'doctor_degree', 'doctor_gender', 'doctor_age', 'doctor_phone', 'username']
class PatientRegForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput)
class Meta:
model = UserPatient
fields = ['patient_name', 'patient_email', 'patient_area', 'patient_blood', 'patient_gender', 'patient_age', 'patient_phone', 'username']
class DoctorLoginForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput)
class Meta:
model = UserDoctor
fields = ['username']
class ScheduleForm(forms.ModelForm):
class Meta:
model = Schedule
fields = ['start_time', 'end_time', 'hospital_name', 'doctor_id']
| 0 | 832 | 92 |
26834ebb2aeaa8b8b055be1d5f31f9e4d6541584 | 127 | py | Python | qbatch/__init__.py | Gab-D-G/qbatch | 3ecff388d7d4d19468403f9b5595e7f8a865ff73 | [
"Unlicense"
] | 29 | 2015-12-11T19:30:41.000Z | 2021-08-30T12:39:48.000Z | qbatch/__init__.py | Gab-D-G/qbatch | 3ecff388d7d4d19468403f9b5595e7f8a865ff73 | [
"Unlicense"
] | 157 | 2015-12-15T20:42:14.000Z | 2021-08-19T16:26:15.000Z | qbatch/__init__.py | gdevenyi/qbatch | 40e62a8232f199c26de2bbe4a619ae474d8398d6 | [
"Unlicense"
] | 15 | 2015-12-15T19:48:38.000Z | 2021-07-09T14:47:09.000Z | from __future__ import absolute_import
from . import qbatch
from .qbatch import qbatchParser
from .qbatch import qbatchDriver
| 21.166667 | 38 | 0.84252 | from __future__ import absolute_import
from . import qbatch
from .qbatch import qbatchParser
from .qbatch import qbatchDriver
| 0 | 0 | 0 |
ef3f4491efa17c348ebc2c021ea1f9365a52f5f0 | 1,950 | py | Python | 6.00.1x/PS7/AdoptionCenter.py | batnam/MITx6.00.1x | f2c342c8b69ecf22b2e40f3ce554255efdc6f7c0 | [
"MIT"
] | null | null | null | 6.00.1x/PS7/AdoptionCenter.py | batnam/MITx6.00.1x | f2c342c8b69ecf22b2e40f3ce554255efdc6f7c0 | [
"MIT"
] | null | null | null | 6.00.1x/PS7/AdoptionCenter.py | batnam/MITx6.00.1x | f2c342c8b69ecf22b2e40f3ce554255efdc6f7c0 | [
"MIT"
] | null | null | null | __author__ = 'ThanhNam'
# Enter your code for the AdoptionCenter class here
# Be sure to include the __init__, get_name, get_species_count, get_number_of_species, and adopt_pet methods.
class AdoptionCenter:
"""
The AdoptionCenter class stores the important information that a
client would need to know about, such as the different numbers of
species stored, the location, and the name. It also has a method
to adopt a pet.
""" | 45.348837 | 109 | 0.657949 | __author__ = 'ThanhNam'
# Enter your code for the AdoptionCenter class here
# Be sure to include the __init__, get_name, get_species_count, get_number_of_species, and adopt_pet methods.
class AdoptionCenter:
"""
The AdoptionCenter class stores the important information that a
client would need to know about, such as the different numbers of
species stored, the location, and the name. It also has a method
to adopt a pet.
"""
def __init__(self, name, species_types, location):
# name- A string that represents the name of the adoption center
# location - A tuple (x, y) That represents the x and y as floating
# point coordinates of the adoption center location.
# species_types- A string:integer dictionary that represents the number
# of specific pets that each adoption center holds.
self.name = name
self.location = (float(location[0]), float(location[1]))
self.species_types = species_types.copy()
def get_number_of_species(self, species):
# Returns the number of a given species that the adoption center has.
try :
return self.species_types[species]
except :
return 0
def get_location(self):
# Returns location of the adoption centre
return self.location
def get_species_count(self):
# Returns a copy of the full list and count of the available species
# at the adoption center.
return self.species_types.copy()
def get_name(self):
# Returns name of center
return self.name
def adopt_pet(self, species):
# Decrements the value of a specific species at the adoption center
# and does not return anything.
if (self.species_types[species]) > 1 :
self.species_types[species] -= 1
else :
self.species_types.pop(species)
return None | 1,343 | 0 | 156 |
07ccf046f59337ff05c6654450113b1bb0cbf3bc | 830 | py | Python | gammapy/datasets/tests/test_load.py | contrera/gammapy | aa0a74baa977ee2477b5c63e036075f4219792a3 | [
"BSD-3-Clause"
] | null | null | null | gammapy/datasets/tests/test_load.py | contrera/gammapy | aa0a74baa977ee2477b5c63e036075f4219792a3 | [
"BSD-3-Clause"
] | 1 | 2020-10-29T19:55:46.000Z | 2020-10-29T19:55:46.000Z | gammapy/datasets/tests/test_load.py | qpiel/gammapy | cfb976909e63f4d5d578e1495245c0baad69482b | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
from numpy.testing import assert_allclose
from ...utils.testing import requires_data
from ..core import gammapy_extra
from ...datasets import load_poisson_stats_image
@requires_data("gammapy-extra")
def test_gammapy_extra():
"""Try loading a file from gammapy-extra.
"""
assert gammapy_extra.dir.is_dir()
@requires_data("gammapy-extra")
| 33.2 | 82 | 0.760241 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
from numpy.testing import assert_allclose
from ...utils.testing import requires_data
from ..core import gammapy_extra
from ...datasets import load_poisson_stats_image
@requires_data("gammapy-extra")
def test_gammapy_extra():
"""Try loading a file from gammapy-extra.
"""
assert gammapy_extra.dir.is_dir()
@requires_data("gammapy-extra")
def test_load_poisson_stats_image():
data = load_poisson_stats_image()
assert data.sum() == 40896
images = load_poisson_stats_image(extra_info=True)
refs = dict(counts=40896, model=41000, source=1000, background=40000)
for name, expected in refs.items():
assert_allclose(images[name].sum(), expected)
| 308 | 0 | 22 |
f7f700d638e18614ca377341088ad873b1037c34 | 2,301 | py | Python | GPflow/testing/test_triang.py | mlilab/Mixed-Effect-Composite-RNN-Gaussian-Process | dd7da89ce3c41d459a26ad1ce5ed2f40ab4ca85d | [
"Apache-2.0"
] | 24 | 2018-11-29T07:00:59.000Z | 2021-04-22T19:12:31.000Z | GPflow/testing/test_triang.py | mlilab/Mixed-Effect-Composite-RNN-Gaussian-Process | dd7da89ce3c41d459a26ad1ce5ed2f40ab4ca85d | [
"Apache-2.0"
] | 1 | 2018-12-04T11:51:21.000Z | 2018-12-04T11:51:21.000Z | GPflow/testing/test_triang.py | OpenXAIProject/Mixed-Effect-Composite-RNN-Gaussian-Process | dd7da89ce3c41d459a26ad1ce5ed2f40ab4ca85d | [
"Apache-2.0"
] | 12 | 2018-11-30T00:40:13.000Z | 2019-10-30T16:09:52.000Z | import unittest
from gpflow.tf_wraps import vec_to_tri
import tensorflow as tf
import numpy as np
from testing.gpflow_testcase import GPflowTestCase
from gpflow.tf_wraps import vec_to_tri
if __name__ == "__main__":
unittest.main()
| 37.721311 | 88 | 0.65189 | import unittest
from gpflow.tf_wraps import vec_to_tri
import tensorflow as tf
import numpy as np
from testing.gpflow_testcase import GPflowTestCase
from gpflow.tf_wraps import vec_to_tri
class TestVecToTri(GPflowTestCase):
def referenceInverse(self, matrices):
#this is the inverse operation of the vec_to_tri
#op being tested.
D, N, _ = matrices.shape
M = (N * (N + 1)) // 2
tril_indices = np.tril_indices(N)
output = np.zeros((D, M))
for vector_index in range(D):
matrix = matrices[vector_index, :]
output[vector_index, :] = matrix[tril_indices]
return output
def getExampleMatrices(self, D, N ):
rng = np.random.RandomState(1)
random_matrices = rng.randn(D, N, N)
for matrix_index in range(D):
for row_index in range(N):
for col_index in range(N):
if col_index > row_index:
random_matrices[matrix_index, row_index, col_index] = 0.
return random_matrices
def testBasicFunctionality(self):
with self.test_session() as sess:
N = 3
D = 3
reference_matrices = self.getExampleMatrices(D, N)
input_vector_tensor = tf.constant(self.referenceInverse(reference_matrices))
test_matrices_tensor = vec_to_tri(input_vector_tensor, N)
test_matrices = sess.run(test_matrices_tensor)
np.testing.assert_array_almost_equal(reference_matrices, test_matrices)
def testDifferentiable(self):
with self.test_session() as sess:
N = 3
D = 3
reference_matrices = self.getExampleMatrices(D, N)
input_vector_array = self.referenceInverse(reference_matrices)
input_vector_tensor = tf.constant(input_vector_array)
test_matrices_tensor = vec_to_tri(input_vector_tensor, N)
reduced_sum = tf.reduce_sum(test_matrices_tensor)
gradient = tf.gradients(reduced_sum, input_vector_tensor)[0]
reference_gradient = np.ones_like(input_vector_array)
test_gradient = sess.run(gradient)
np.testing.assert_array_almost_equal(reference_gradient, test_gradient)
if __name__ == "__main__":
unittest.main()
| 1,919 | 14 | 130 |
f0434d2467c088882bbe829db3af0534d65a607c | 1,173 | py | Python | kiwi-entry/kiwi/UserManager.py | bubblegumsoldier/kiwi | 91701c1806dcfbc1b038fecf7c2cab8bb07a01d4 | [
"MIT"
] | null | null | null | kiwi-entry/kiwi/UserManager.py | bubblegumsoldier/kiwi | 91701c1806dcfbc1b038fecf7c2cab8bb07a01d4 | [
"MIT"
] | null | null | null | kiwi-entry/kiwi/UserManager.py | bubblegumsoldier/kiwi | 91701c1806dcfbc1b038fecf7c2cab8bb07a01d4 | [
"MIT"
] | null | null | null | from aiohttp.client import ClientSession
from http import HTTPStatus
from sanic.exceptions import abort
| 37.83871 | 96 | 0.581415 | from aiohttp.client import ClientSession
from http import HTTPStatus
from sanic.exceptions import abort
class UserManager:
def __init__(self, base_url):
self.url = base_url
self.register = 'register'
self.auth = 'authenticate'
async def authenticate_user(self, username, session):
return await self._wrap_request(session, username,
{'endpoint': self.auth,
'key': 'valid'})
async def register_user(self, username, session):
return await self._wrap_request(session, username,
{'endpoint': self.register,
'key': 'success'})
async def _wrap_request(self, session, username, meta):
async with session.get('{}/{}/{}'.format(self.url, meta['endpoint'], username)) as resp:
return await self._is_response_valid(resp, meta['key'])
async def _is_response_valid(self, response, key):
if response.status == HTTPStatus.OK:
content = await response.json()
return content[key]
abort(response.status)
| 914 | -3 | 157 |
38026a46d227486abffd913d93d9c5a5bf30660e | 1,607 | py | Python | GUTG_Vote/views.py | KnightHawk3/GUTG-Vote | 73187427ecf4848f5942b627ccb01a6bb46e20c7 | [
"MIT"
] | null | null | null | GUTG_Vote/views.py | KnightHawk3/GUTG-Vote | 73187427ecf4848f5942b627ccb01a6bb46e20c7 | [
"MIT"
] | null | null | null | GUTG_Vote/views.py | KnightHawk3/GUTG-Vote | 73187427ecf4848f5942b627ccb01a6bb46e20c7 | [
"MIT"
] | null | null | null | from flask import Blueprint, redirect, url_for, request, render_template, flash, g
from flask.ext.login import login_user, logout_user, current_user, login_required
from GUTG_Vote import utilities
from GUTG_Vote.models import User, Game
from GUTG_Vote.forms import LoginForm
from GUTG_Vote.extensions import db
main = Blueprint('main', __name__)
@main.before_request
@main.route('/')
@main.route('/login', methods=['GET', 'POST'])
@main.route('/logout')
@main.route('/<game_id>/vote', methods=['POST'])
@login_required | 31.509804 | 82 | 0.698818 | from flask import Blueprint, redirect, url_for, request, render_template, flash, g
from flask.ext.login import login_user, logout_user, current_user, login_required
from GUTG_Vote import utilities
from GUTG_Vote.models import User, Game
from GUTG_Vote.forms import LoginForm
from GUTG_Vote.extensions import db
main = Blueprint('main', __name__)
@main.before_request
def before_request():
g.user = current_user
@main.route('/')
def index():
return render_template('index.html', games=Game.objects.order_by('votes'))
@main.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
registered_user = User.objects(
username=form.username.data,
password=form.password.data).first()
login_user(registered_user, remember=form.remember_me.data)
flash("Logged in Sucessfully", 'info')
return redirect(request.args.get("next") or url_for("main.index"))
return render_template('login.html', form=form)
@main.route('/logout')
def logout():
logout_user()
flash("Logged out Sucessfully", 'info')
return redirect(url_for('main.index'))
@main.route('/<game_id>/vote', methods=['POST'])
@login_required
def vote_comment(game_id):
game = Game.objects(game_id=game_id).first()
print(current_user)
print(game)
if current_user.username not in game.voters:
game.votes += 1
game.voters.append(current_user.username)
game.save()
utilities.sync_spreadsheet_with_mongo()
return str(game.votes)
else:
return "Already Voted" | 970 | 0 | 111 |
6fbaf79ece07e32cc3a732534e0de0019416c4c2 | 1,349 | py | Python | src/cogs/Apps.py | SlumberDemon/SlEepy | feee98662463d6a17f31a00aad5374d8bee143dd | [
"MIT"
] | null | null | null | src/cogs/Apps.py | SlumberDemon/SlEepy | feee98662463d6a17f31a00aad5374d8bee143dd | [
"MIT"
] | null | null | null | src/cogs/Apps.py | SlumberDemon/SlEepy | feee98662463d6a17f31a00aad5374d8bee143dd | [
"MIT"
] | null | null | null | import discord, dislash, datetime
from dislash import slash_command, SlashInteraction, ContextMenuInteraction
from discord.ext import commands
from src.extras.views import url_button_generator
| 38.542857 | 119 | 0.702001 | import discord, dislash, datetime
from dislash import slash_command, SlashInteraction, ContextMenuInteraction
from discord.ext import commands
from src.extras.views import url_button_generator
class Apps(commands.Cog):
def __init__(self, bot):
self.bot = bot
@dislash.user_command(name="Created at")
async def created_at(self, inter: ContextMenuInteraction):
await inter.respond(
f"{inter.user} was created at {inter.user.created_at}",
ephemeral=True
)
@dislash.message_command(name="Reverse")
async def reverse(self, inter: ContextMenuInteraction):
if inter.message.content:
await inter.respond(inter.message.content[::-1])
else:
await inter.respond("There's no content", ephemeral=True)
@dislash.message_command(name="Quote")
async def quote(self, inter: ContextMenuInteraction):
view = url_button_generator(label='View message', url=inter.message.jump_url)
embed = discord.Embed(description=inter.message.content, timestamp=datetime.datetime.utcnow(), colour=0xc3d9df)
embed.set_author(name=f'{inter.message.author}', icon_url=inter.message.author.avatar.url)
embed.set_footer(text='Quoted at ')
await inter.respond(embed=embed, view=view)
def setup(bot):
bot.add_cog(Apps(bot))
| 865 | 245 | 46 |
5ba26c08e92e3d324c6c278311b3671c8361e603 | 3,460 | py | Python | scripts/getannotations.py | spyysalo/consensus-pipeline | 2301fcd6fa5bacc7daa18abcc93e5abf28a9a8c1 | [
"MIT"
] | null | null | null | scripts/getannotations.py | spyysalo/consensus-pipeline | 2301fcd6fa5bacc7daa18abcc93e5abf28a9a8c1 | [
"MIT"
] | null | null | null | scripts/getannotations.py | spyysalo/consensus-pipeline | 2301fcd6fa5bacc7daa18abcc93e5abf28a9a8c1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Get annotations with context from database.
import sys
import os
import re
from logging import warning, error
from standoff import Textbound
try:
from sqlitedict import SqliteDict
except ImportError:
error('failed to import sqlitedict, try `pip3 install sqlitedict`')
raise
def get_annotation(standoff, id_):
"""Get annotation with given ID from standoff"""
for ln, line in enumerate(standoff.splitlines(), start=1):
fields = line.split('\t')
if fields[0] == id_:
if id_[0] == 'T':
return Textbound.from_standoff(line)
else:
raise NotImplementedError()
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 28.833333 | 76 | 0.591329 | #!/usr/bin/env python3
# Get annotations with context from database.
import sys
import os
import re
from logging import warning, error
from standoff import Textbound
try:
from sqlitedict import SqliteDict
except ImportError:
error('failed to import sqlitedict, try `pip3 install sqlitedict`')
raise
def argparser():
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('-w', '--words', metavar='NUM', default=5, type=int,
help='number of context words to include')
ap.add_argument('-as', '--ann-suffix', default='.ann',
help='suffix for annotations')
ap.add_argument('-ts', '--text-suffix', default='.txt',
help='suffix for texts')
ap.add_argument('ids', metavar='IDS',
help='list of DOC-ID<TAB>ANN-ID to output')
ap.add_argument('data', metavar='DB', help='database')
return ap
def get_annotation(standoff, id_):
"""Get annotation with given ID from standoff"""
for ln, line in enumerate(standoff.splitlines(), start=1):
fields = line.split('\t')
if fields[0] == id_:
if id_[0] == 'T':
return Textbound.from_standoff(line)
else:
raise NotImplementedError()
def is_word(token):
return any(c for c in token if c.isalnum()) # loose definition
def get_words(text, maximum, reverse=False):
split = re.split(r'(\s+)', text)
if reverse:
split = reversed(split)
words, count = [], 0
for w in split:
if count >= maximum:
break
words.append(w)
if is_word(w):
count += 1
if reverse:
words = reversed(words)
return ''.join(words)
def normalize_space(s):
return s.replace('\n', ' ').replace('\t', ' ')
def get_annotations(dbpath, ids, options):
# No context manager: close() can block and this is read-only
db = SqliteDict(dbpath, flag='r', autocommit=False)
for docid, annid in ids:
so_key = docid + options.ann_suffix
so = db.get(so_key)
if so is None:
warning('{} not found in {}, skipping'.format(so_key, dbpath))
continue
text_key = docid + options.text_suffix
text = db.get(text_key)
if text is None:
warning('{} not found in {}, skipping'.format(text_key, dbpath))
continue
ann = get_annotation(so, annid)
before = 'DOCSTART ' + text[:ann.start]
after = text[ann.end:] + 'DOCEND'
before = get_words(before, options.words, reverse=True)
after = get_words(after, options.words, reverse=False)
before = normalize_space(before)
after = normalize_space(after)
print('\t'.join([docid, annid, ann.type, before, ann.text, after]))
def read_ids(fn, options):
ids = []
with open(fn) as f:
for ln, line in enumerate(f, start=1):
line = line.rstrip()
fields = line.split('\t')
docid, annid = fields[0:2]
ids.append((docid, annid))
return ids
def main(argv):
args = argparser().parse_args(argv[1:])
if args.words < 1:
error('invalid --words NUM {}'.format(args.words))
return 1
ids = read_ids(args.ids, args)
try:
get_annotations(args.data, ids, args)
except BrokenPipeError:
pass
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 2,557 | 0 | 161 |
63cf91e19f748137ebd544fda7899261450dbabc | 605 | py | Python | botforms/urls.py | johan--/botforms | d10731b658459b1cf15b05820f227c3ac7bf9aa3 | [
"MIT"
] | null | null | null | botforms/urls.py | johan--/botforms | d10731b658459b1cf15b05820f227c3ac7bf9aa3 | [
"MIT"
] | null | null | null | botforms/urls.py | johan--/botforms | d10731b658459b1cf15b05820f227c3ac7bf9aa3 | [
"MIT"
] | null | null | null | """botform URL Configuration
"""
from django.conf.urls import url, include
from rest_framework import routers
from botform import api as form_api
router = routers.DefaultRouter()
router.register(r'forms', form_api.FormsViewSet)
router.register(r'submissions', form_api.SubmissionsViewSet)
urlpatterns = [
url(r'^api/v1/', include(router.urls)),
url(r'^api/v1/forms/(?P<pk>\d+)/details/?$', form_api.grid_details),
url(r'^api/v1/forms/(?P<pk>\d+)/details/submission/?$', form_api.grid_submissions),
url(r'^', include('botform.urls')),
url(r'^accounts/', include('allauth.urls')),
]
| 30.25 | 87 | 0.709091 | """botform URL Configuration
"""
from django.conf.urls import url, include
from rest_framework import routers
from botform import api as form_api
router = routers.DefaultRouter()
router.register(r'forms', form_api.FormsViewSet)
router.register(r'submissions', form_api.SubmissionsViewSet)
urlpatterns = [
url(r'^api/v1/', include(router.urls)),
url(r'^api/v1/forms/(?P<pk>\d+)/details/?$', form_api.grid_details),
url(r'^api/v1/forms/(?P<pk>\d+)/details/submission/?$', form_api.grid_submissions),
url(r'^', include('botform.urls')),
url(r'^accounts/', include('allauth.urls')),
]
| 0 | 0 | 0 |
9424a83d00589bd1ee61bcc276172b2e7b5d853b | 364 | py | Python | spider/goData/client_mongodb.py | hellohuizz/The-Employment-System | 4b6ea74797a39ce3d469acd03945945a626e5100 | [
"Apache-2.0"
] | null | null | null | spider/goData/client_mongodb.py | hellohuizz/The-Employment-System | 4b6ea74797a39ce3d469acd03945945a626e5100 | [
"Apache-2.0"
] | null | null | null | spider/goData/client_mongodb.py | hellohuizz/The-Employment-System | 4b6ea74797a39ce3d469acd03945945a626e5100 | [
"Apache-2.0"
] | null | null | null | from pymongo import MongoClient
| 20.222222 | 50 | 0.620879 | from pymongo import MongoClient
class ConMongodb:
def __init__(self):
self.conn = MongoClient('127.0.0.1',27017)
def con_collection(self,database,collection):
# 连接数据库
db = self.conn[database]
# 连接集合
self.collection = db[collection]
def read_data(self):
#返回全部数据
return self.collection.find()
| 261 | -4 | 103 |
09d397d02fc2b73e519ee160bc9f72a5d0483391 | 351 | py | Python | Aula_3/aaa.py | Mateus-Silva11/AulasPython | d34dc4f62ade438e68b0a80e0baac4d6ec0d378e | [
"MIT"
] | null | null | null | Aula_3/aaa.py | Mateus-Silva11/AulasPython | d34dc4f62ade438e68b0a80e0baac4d6ec0d378e | [
"MIT"
] | null | null | null | Aula_3/aaa.py | Mateus-Silva11/AulasPython | d34dc4f62ade438e68b0a80e0baac4d6ec0d378e | [
"MIT"
] | null | null | null | from operacaoes3 import mais , menos , vezes , divicao , resto , raiz , divicao_f
n1 = int(input('a'))
n2 = int(input('b'))
a = mais(n1,n2)
b = menos(n1,n2)
c = vezes(n1,n2)
d = divicao(n1,n2)
e = resto(n1,n2)
f = raiz(n1,n2)
g = divicao_f(n1,n2)
print(f'{a}')
print(f'{b}')
print(f'{c}')
print(f'{d}')
print(f'{e}')
print(f'{f}')
print(f'{g}')
| 15.26087 | 81 | 0.584046 | from operacaoes3 import mais , menos , vezes , divicao , resto , raiz , divicao_f
n1 = int(input('a'))
n2 = int(input('b'))
a = mais(n1,n2)
b = menos(n1,n2)
c = vezes(n1,n2)
d = divicao(n1,n2)
e = resto(n1,n2)
f = raiz(n1,n2)
g = divicao_f(n1,n2)
print(f'{a}')
print(f'{b}')
print(f'{c}')
print(f'{d}')
print(f'{e}')
print(f'{f}')
print(f'{g}')
| 0 | 0 | 0 |
218ffd003ad6258797c16b4042895fa4389c24c3 | 5,088 | py | Python | test/update-tests.py | edanaher/scry | 6958acea46ae6d43540092bb7397ebb4fcc97463 | [
"MIT"
] | 1 | 2020-06-09T20:46:39.000Z | 2020-06-09T20:46:39.000Z | test/update-tests.py | edanaher/scry | 6958acea46ae6d43540092bb7397ebb4fcc97463 | [
"MIT"
] | null | null | null | test/update-tests.py | edanaher/scry | 6958acea46ae6d43540092bb7397ebb4fcc97463 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import argparse
import os
import psycopg2
import sys
# Why is it so hard to get python imports working?
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from scry import scry
if __name__ == "__main__":
main()
| 31.602484 | 149 | 0.64033 | #!/usr/bin/env python
import argparse
import os
import psycopg2
import sys
# Why is it so hard to get python imports working?
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from scry import scry
def parseadd(args):
db = psycopg2.connect("")
cur = db.cursor()
table_info = scry.get_table_info(cur)
foreign_keys = scry.get_foreign_keys(cur)
unique_keys = scry.get_unique_keys(cur)
query = args.command
tree, aliases, command, alias = scry.parse(scry.default_settings(), table_info, foreign_keys, query)
keys = { "unique": unique_keys, "foreign": foreign_keys }
sql_clauses = scry.generate_sql(keys, tree)
uniques = sql_clauses["uniques"]
sql = scry.serialize_sql(sql_clauses, 100)
print(sql)
cur.execute(sql)
results = scry.reshape_results(cur, sql_clauses)
output = scry.format_results(results)
print("\n".join(output))
test_file_name = os.path.dirname(__file__) + "/test_scry.py"
test_file = open(test_file_name, "r")
tests = test_file.readlines()
test_file.close()
add_index = tests.index(" # End of instances\n")
new_lines = f""" Instance(
{repr(args.name)},
{repr(args.command)},
{repr(tree)},
{repr(sql_clauses)},
{repr(sql)},
{repr(results)},
{repr(output)}
),
"""
tests[add_index:add_index] = [new_lines.rstrip() + "\n"]
test_file = open(test_file_name, "w")
tests = test_file.writelines(tests)
test_file.close()
def parseupdate(args):
update_fields = args.fields.split(",")
def should_be_same(f):
return f not in update_fields
db = psycopg2.connect("")
cur = db.cursor()
table_info = scry.get_table_info(cur)
foreign_keys = scry.get_foreign_keys(cur)
unique_keys = scry.get_unique_keys(cur)
from test_scry import test_instances, Instance
new_instances = []
for instance in test_instances:
name = instance.name
query = instance.query
tree = scry.parse(scry.defaultSettings(), table_info, foreign_keys, query)
if should_be_same("tree") and tree != instance.tree:
raise Exception(f"Tree doesn't match for {name}\n\n{tree}\n\n{instance.tree}")
keys = { "unique": unique_keys, "foreign": foreign_keys }
sql_clauses = scry.generate_sql(keys, tree)
if should_be_same("sql_clauses") and sql_clauses != instance.sql_clauses:
raise Exception(f"Sql_clauses don't match for {name}\n\n{sql_clauses}\n\n{instance.sql_clauses}")
uniques = sql_clauses["uniques"]
sql = scry.serialize_sql(sql_clauses, 100)
if should_be_same("sql") and sql != instance.sql:
raise Exception(f"Sql doesn't match for {name}\n\n{sql}\n\n{instance.sql}")
print(sql)
cur.execute(sql)
results = scry.reshape_results(cur, sql_clauses)
if should_be_same("results") and results != instance.results:
raise Exception(f"Results don't match for {name}\n\n{results}\n\n{instance.results}")
output = scry.format_results(results)
if should_be_same("output") and output != instance.output:
raise Exception(f"Output doesn't match for {name}\n\n{sql}\n\n{instance.sql}")
print("\n".join(output))
new_instances.append(Instance(
name,
query,
tree,
sql_clauses,
sql,
results,
output
))
test_file_name = os.path.dirname(__file__) + "/test_scry.py"
test_file = open(test_file_name, "r")
tests = test_file.readlines()
test_file.close()
start_index = tests.index("test_instances = [\n") + 1
finish_index = tests.index(" # End of instances\n")
new_lines = [f""" Instance(
{repr(i.name)},
{repr(i.query)},
{repr(i.tree)},
{repr(i.sql_clauses)},
{repr(i.sql)},
{repr(i.results)},
{repr(i.output)}
),
""" for i in new_instances]
tests[start_index:finish_index] = [l.rstrip() + "\n" for l in new_lines]
test_file = open(test_file_name, "w")
tests = test_file.writelines(tests)
test_file.close()
def parseargs():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help="sub-command help")
parser_add = subparsers.add_parser("add", help="add test instance")
parser_add.set_defaults(func=parseadd)
parser_add.add_argument("-n", "--name", help="name of test instance", required=True)
parser_add.add_argument("-c", "--command", help="command to run for test", required=True)
parser_update = subparsers.add_parser("update", help="update existing test(s)")
parser_update.add_argument("-f", "--fields", help="comma-separated list of fields to update: tree,sql_clauses,sql,results,output", required=True)
parser_update.set_defaults(func=parseupdate)
return parser.parse_args()
def main():
args = parseargs()
print(args)
args.func(args)
if __name__ == "__main__":
main()
| 4,721 | 0 | 92 |
ab148763a282ac4ed2f0fd91c8a2b01c2150e582 | 305 | py | Python | pyrolysis/common/__init__.py | fmerlin/pyrolysis | 2edf1094466b09b4c5d1ab93f5cbc2bb01cc73c3 | [
"MIT"
] | 1 | 2020-05-01T14:14:23.000Z | 2020-05-01T14:14:23.000Z | pyrolysis/common/__init__.py | fmerlin/pyrolysis | 2edf1094466b09b4c5d1ab93f5cbc2bb01cc73c3 | [
"MIT"
] | null | null | null | pyrolysis/common/__init__.py | fmerlin/pyrolysis | 2edf1094466b09b4c5d1ab93f5cbc2bb01cc73c3 | [
"MIT"
] | null | null | null | __version__ = '0.1.5'
try:
import pandas
pandas_df_type = pandas.DataFrame
except ImportError:
pandas_df_type = type(None)
try:
import msgpack
has_msgpack = True
except ImportError:
has_msgpack = False
try:
import os
login = os.getlogin()
except OSError:
login = ''
| 15.25 | 37 | 0.672131 | __version__ = '0.1.5'
try:
import pandas
pandas_df_type = pandas.DataFrame
except ImportError:
pandas_df_type = type(None)
try:
import msgpack
has_msgpack = True
except ImportError:
has_msgpack = False
try:
import os
login = os.getlogin()
except OSError:
login = ''
| 0 | 0 | 0 |
e08c7b206b10f6e7ff785d1da11f76d04ad807cf | 1,555 | py | Python | metax/db/extract_gbff_rrna.py | yesimon/metax_bakeoff_2019 | e90da33a9d9407c6a804fee8d58333212148806c | [
"MIT"
] | 16 | 2019-08-08T16:00:15.000Z | 2021-01-22T00:13:05.000Z | metax/db/extract_gbff_rrna.py | yesimon/metax_bakeoff_2019 | e90da33a9d9407c6a804fee8d58333212148806c | [
"MIT"
] | 4 | 2019-09-23T11:23:01.000Z | 2021-01-20T23:20:50.000Z | metax/db/extract_gbff_rrna.py | yesimon/metax_bakeoff_2019 | e90da33a9d9407c6a804fee8d58333212148806c | [
"MIT"
] | 2 | 2019-10-24T14:50:59.000Z | 2019-12-31T02:57:56.000Z | #!/usr/bin/env python3
import argparse
import sys
import os
from pathlib import Path
from Bio import SeqIO
import gzip
| 37.02381 | 118 | 0.550482 | #!/usr/bin/env python3
import argparse
import sys
import os
from pathlib import Path
from Bio import SeqIO
import gzip
def add_command(subparsers):
parser = subparsers.add_parser('extract-gbff-rrna', description='Extract rRNA entries from GBFF files.')
parser.add_argument('-i', '--input-dir', help='Output directory')
parser.add_argument('-o', '--output-dir', help='Output directory')
parser.set_defaults(func=extract_gbff_rrna)
def extract_gbff_rrna(args):
output_dir = Path(args.output_dir)
for path in Path(args.input_dir).iterdir():
if not path.name.endswith('.gbff.gz'):
continue
output_path = output_dir / (path.name[:-8] + '.fa')
rrna_found = 0
with gzip.open(str(path), 'rt') as f:
with output_path.open('wt') as of:
for record in SeqIO.parse(f, 'genbank'):
for feature in record.features:
if feature.type != 'rRNA':
continue
prods = feature.qualifiers.get('product')
if not prods:
continue
for prod in prods:
sub_record = record[feature.location.start:feature.location.end]
sub_record.id = '{}:{}-{}'.format(record.id, feature.location.start, feature.location.end)
SeqIO.write(sub_record, of, 'fasta')
print(prod)
rrna_found += 1
| 1,388 | 0 | 46 |
961e5203d3d494ae7891731578eafae7a39ef2dc | 1,818 | py | Python | tools/online_assembler.py | rakati/ppci-mirror | 8f5b0282fd1122d7c389b39c86fcf5d9352b7bb2 | [
"BSD-2-Clause"
] | 161 | 2020-05-31T03:29:42.000Z | 2022-03-07T08:36:19.000Z | tools/online_assembler.py | rakati/ppci-mirror | 8f5b0282fd1122d7c389b39c86fcf5d9352b7bb2 | [
"BSD-2-Clause"
] | 74 | 2020-05-26T18:05:48.000Z | 2021-02-13T21:55:39.000Z | tools/online_assembler.py | rakati/ppci-mirror | 8f5b0282fd1122d7c389b39c86fcf5d9352b7bb2 | [
"BSD-2-Clause"
] | 19 | 2020-05-27T19:22:11.000Z | 2022-02-17T18:53:52.000Z | """ Helper to assemble code from a web page. """
import flask
import subprocess
import tempfile
main_html = r"""
<!DOCTYPE html>
<html><head>
<title>Online compiler</title>
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="http://www.w3schools.com/lib/w3.css">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.1.1/jquery.min.js"></script>
<script>
function do_compile() {
source = $("#source").val()
$.post("compile", { source: source },
function(data, status) {
$("#result").text(data.replace("\\n", "<br>", "g"));
});
}
</script>
</head>
<body>
<div class="w3-container w3-teal"><h1>Online assembler</h1></div>
<div class="w3-container"><textarea id="source">mov rax,rbx</textarea></div>
<div class="w3-container">
<button class="w3-btn" onclick="do_compile()">Compile</button>
</div>
<div class="w3-container"><p id="result"></p></div>
<div class="w3-container w3-teal"><p>By Windel Bouwman 2016</p></div>
</body></html>
"""
app = flask.Flask(__name__)
@app.route('/')
@app.route('/compile', methods=['POST'])
if __name__ == '__main__':
app.run()
| 25.25 | 88 | 0.640264 | """ Helper to assemble code from a web page. """
import flask
import subprocess
import tempfile
main_html = r"""
<!DOCTYPE html>
<html><head>
<title>Online compiler</title>
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="http://www.w3schools.com/lib/w3.css">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.1.1/jquery.min.js"></script>
<script>
function do_compile() {
source = $("#source").val()
$.post("compile", { source: source },
function(data, status) {
$("#result").text(data.replace("\\n", "<br>", "g"));
});
}
</script>
</head>
<body>
<div class="w3-container w3-teal"><h1>Online assembler</h1></div>
<div class="w3-container"><textarea id="source">mov rax,rbx</textarea></div>
<div class="w3-container">
<button class="w3-btn" onclick="do_compile()">Compile</button>
</div>
<div class="w3-container"><p id="result"></p></div>
<div class="w3-container w3-teal"><p>By Windel Bouwman 2016</p></div>
</body></html>
"""
app = flask.Flask(__name__)
@app.route('/')
def main():
return main_html
@app.route('/compile', methods=['POST'])
def compile():
source = flask.request.form['source']
_, tmp = tempfile.mkstemp()
print(tmp)
with open(tmp, 'w') as f:
f.write(source)
# res2 = asm_x86(tmp)
res2 = asm_arm(tmp)
return str(source) + str(res2.stdout)
def asm_x86(tmp):
res = subprocess.run(['nasm', '-f', 'elf64', tmp])
print(res)
res2 = subprocess.run(['objdump', '-d', tmp + '.o'], stdout=subprocess.PIPE)
print(res2)
return res2
def asm_arm(tmp):
res = subprocess.run(['arm-none-eabi-as', tmp])
print(res)
res2 = subprocess.run(['arm-none-eabi-objdump', '-d'], stdout=subprocess.PIPE)
print(res2)
return res2
if __name__ == '__main__':
app.run()
| 596 | 0 | 90 |
7cc91d079a51e8d8c6dac891686716599ed68ead | 188 | py | Python | week0/swapy.py | peacekeeper6/Python-Project | d27101dda4b977fcd1143c02670fd1b78759543f | [
"MIT"
] | null | null | null | week0/swapy.py | peacekeeper6/Python-Project | d27101dda4b977fcd1143c02670fd1b78759543f | [
"MIT"
] | 4 | 2022-03-14T21:40:34.000Z | 2022-03-28T21:46:17.000Z | week0/swapy.py | peacekeeper6/Python-Project | d27101dda4b977fcd1143c02670fd1b78759543f | [
"MIT"
] | 1 | 2022-03-14T21:31:59.000Z | 2022-03-14T21:31:59.000Z |
if __name__ == "__main__":
a = input("first number:")
b = input("second number:")
print(', '.join(swap(a, b)))
| 15.666667 | 30 | 0.494681 | def swap(a, b):
if b < a:
a, b = b, a
return a, b
if __name__ == "__main__":
a = input("first number:")
b = input("second number:")
print(', '.join(swap(a, b)))
| 46 | 0 | 22 |
aa4789eaa3baabff0de064cf8ec592610618a86d | 10,124 | py | Python | labelling_tool/tracking/DaSiamRPN/DaSiamRPN.py | abhineet123/animal_detection_ | be0dd60d2b56b267f329b7be71d7f037499f98bc | [
"CC-BY-4.0"
] | 6 | 2020-06-18T16:41:40.000Z | 2022-03-10T07:15:13.000Z | labelling_tool/tracking/DaSiamRPN/DaSiamRPN.py | abhineet123/animal_detection_ | be0dd60d2b56b267f329b7be71d7f037499f98bc | [
"CC-BY-4.0"
] | 1 | 2021-08-11T08:42:28.000Z | 2021-08-11T08:42:28.000Z | labelling_tool/tracking/DaSiamRPN/DaSiamRPN.py | abhineet123/animal_detection_ | be0dd60d2b56b267f329b7be71d7f037499f98bc | [
"CC-BY-4.0"
] | 1 | 2022-02-25T11:06:17.000Z | 2022-02-25T11:06:17.000Z | import os
import sys
import time
import math
import inspect
import copy
import logging
import numpy as np
import cv2
import torch
from torch.autograd import Variable
import torch.nn.functional as F
from .DaSiamRPN_net import SiamRPNvot, SiamRPNBIG, SiamRPNotb
from .run_SiamRPN import generate_anchor, tracker_eval
from .DaSiamRPN_utils import get_subwindow_tracking
class DaSiamRPNParams:
"""
:param int model: 0: SiamRPNvot 1: SiamRPNBIG 2: SiamRPNotb,
:param str windowing: to penalize large displacements [cosine/uniform]
:param int exemplar_size: input z size
:param int instance_size: input x size (search region)
:param float context_amount: context amount for the exemplar
:param bool adaptive: adaptive change search region
:param int score_size: size of score map
:param int anchor_num: number of anchors
"""
class DaSiamRPN:
"""
:type params: DaSiamRPNParams
:type logger: logging.RootLogger
:type states: list[dict]
"""
def __init__(self, params, logger, target_id=0,
label='generic', confidence=1.0):
"""
:type params: DaSiamRPNParams
:type logger: logging.RootLogger | None
:type target_id: int
:rtype: None
"""
# self.tf_graph = tf.Graph()
# avoid printing TF debugging information
self._params = params
self._logger = logger
self.target_id = target_id
self.label = label
self.confidence = confidence
self.cumulative_confidence = confidence
if self._logger is None:
self._logger = logging.getLogger()
self._logger.setLevel(logging.INFO)
# self.logger.handlers[0].setFormatter(logging.Formatter(
# '%(levelname)s::%(module)s::%(funcName)s::%(lineno)s : %(message)s'))
self.anchor = []
# self.params.update(cfg={})
self.associated_frames = 1
self.unassociated_frames = 0
self.associated = 0
# self.is_initialized = 0
self.bbox = None
self.gpu_id = self._params.gpu_id
self.pretrained_wts_dir = self._params.pretrained_wts_dir
if self._params.rel_path:
self.pretrained_wts_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), self.pretrained_wts_dir)
self.net = None
self.score_sz = self._params.score_size
self.final_score_sz = self._params.score_size
if self._params.update_location == 0:
self._logger.info('Location updating is disabled')
self.state = None
def initialize(self, init_frame, init_bbox):
"""
:param np.ndarray init_frame:
:param np.ndarray | list | tuple init_bbox:
:return:
"""
if self.net is None:
if self._params.model == 0:
net = SiamRPNvot()
net.load_state_dict(torch.load(os.path.join(self.pretrained_wts_dir, 'SiamRPNVOT.model')))
# self._logger.info('Using SiamRPNVOT model')
elif self._params.model == 1:
net = SiamRPNBIG()
net.load_state_dict(torch.load(os.path.join(self.pretrained_wts_dir, 'SiamRPNBIG.model')))
# self._logger.info('Using SiamRPNBIG model')
elif self._params.model == 2:
net = SiamRPNotb()
net.load_state_dict(torch.load(os.path.join(self.pretrained_wts_dir, 'SiamRPNOTB.model')))
# self._logger.info('Using SiamRPNOTB model')
else:
raise IOError('Invalid model_type: {}'.format(self._params.model))
net.eval().cuda(self.gpu_id)
self.net = net
cx, cy, target_w, target_h = init_bbox
target_pos = np.array([cx, cy])
target_sz = np.array([target_w, target_h])
self._params.update(self.net.cfg)
state = dict()
state['im_h'] = init_frame.shape[0]
state['im_w'] = init_frame.shape[1]
if self._params.adaptive:
if ((target_sz[0] * target_sz[1]) / float(state['im_h'] * state['im_w'])) < 0.004:
self._params.instance_size = 287 # small object big search region
else:
self._params.instance_size = 271
self._params.score_size = (
self._params.instance_size - self._params.exemplar_size) / self._params.total_stride + 1
self.anchor = generate_anchor(self._params.total_stride, self._params.scales, self._params.ratios,
int(self._params.score_size))
avg_chans = np.mean(init_frame, axis=(0, 1))
wc_z = target_sz[0] + self._params.context_amount * sum(target_sz)
hc_z = target_sz[1] + self._params.context_amount * sum(target_sz)
s_z = round(np.sqrt(wc_z * hc_z))
# initialize the exemplar
z_crop = get_subwindow_tracking(init_frame, target_pos, self._params.exemplar_size, s_z, avg_chans)
z = Variable(z_crop.unsqueeze(0))
self.net.temple(z.cuda(self.gpu_id))
if self._params.windowing == 'cosine':
window = np.outer(np.hanning(self.score_sz), np.hanning(self.score_sz))
elif self._params.windowing == 'uniform':
window = np.ones((self.score_sz, self.score_sz))
else:
raise IOError('Invalid windowing type: {}'.format(self._params.windowing))
window = np.tile(window.flatten(), self._params.anchor_num)
# state['p'] = self.params
pos_x, pos_y = target_pos
target_w, target_h = target_sz
xmin, ymin = pos_x - target_w / 2, pos_y - target_h / 2
xmax, ymax = xmin + target_w, ymin + target_h
bbox = [xmin, ymin, target_w, target_h]
state['net'] = self.net
state['avg_chans'] = avg_chans
state['window'] = window
state['target_pos'] = target_pos
state['target_sz'] = target_sz
self.bbox = [xmin, ymin, xmax, ymax]
self.state = state
| 35.152778 | 134 | 0.602529 | import os
import sys
import time
import math
import inspect
import copy
import logging
import numpy as np
import cv2
import torch
from torch.autograd import Variable
import torch.nn.functional as F
from .DaSiamRPN_net import SiamRPNvot, SiamRPNBIG, SiamRPNotb
from .run_SiamRPN import generate_anchor, tracker_eval
from .DaSiamRPN_utils import get_subwindow_tracking
class DaSiamRPNParams:
"""
:param int model: 0: SiamRPNvot 1: SiamRPNBIG 2: SiamRPNotb,
:param str windowing: to penalize large displacements [cosine/uniform]
:param int exemplar_size: input z size
:param int instance_size: input x size (search region)
:param float context_amount: context amount for the exemplar
:param bool adaptive: adaptive change search region
:param int score_size: size of score map
:param int anchor_num: number of anchors
"""
def __init__(self):
self.windowing = 'cosine'
self.exemplar_size = 127
self.instance_size = 271
self.total_stride = 8
self.context_amount = 0.5
self.ratios = (0.33, 0.5, 1, 2, 3)
self.scales = (8,)
self.penalty_k = 0.055
self.window_influence = 0.42
self.lr = 0.295
self.adaptive = 0
self.visualize = 0
self.anchor_num = len(self.ratios) * len(self.scales)
self.score_size = int((self.instance_size - self.exemplar_size) / self.total_stride + 1)
self.gpu_id = 0
self.model = 0
self.update_location = 1
self.rel_path = 1
self.pretrained_wts_dir = 'pretrained_weights'
self.help = {
}
def update(self, cfg):
for k, v in cfg.items():
setattr(self, k, v)
self.score_size = int((self.instance_size - self.exemplar_size) / self.total_stride + 1)
self.anchor_num = len(self.ratios) * len(self.scales)
class DaSiamRPN:
"""
:type params: DaSiamRPNParams
:type logger: logging.RootLogger
:type states: list[dict]
"""
def __init__(self, params, logger, target_id=0,
label='generic', confidence=1.0):
"""
:type params: DaSiamRPNParams
:type logger: logging.RootLogger | None
:type target_id: int
:rtype: None
"""
# self.tf_graph = tf.Graph()
# avoid printing TF debugging information
self._params = params
self._logger = logger
self.target_id = target_id
self.label = label
self.confidence = confidence
self.cumulative_confidence = confidence
if self._logger is None:
self._logger = logging.getLogger()
self._logger.setLevel(logging.INFO)
# self.logger.handlers[0].setFormatter(logging.Formatter(
# '%(levelname)s::%(module)s::%(funcName)s::%(lineno)s : %(message)s'))
self.anchor = []
# self.params.update(cfg={})
self.associated_frames = 1
self.unassociated_frames = 0
self.associated = 0
# self.is_initialized = 0
self.bbox = None
self.gpu_id = self._params.gpu_id
self.pretrained_wts_dir = self._params.pretrained_wts_dir
if self._params.rel_path:
self.pretrained_wts_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), self.pretrained_wts_dir)
self.net = None
self.score_sz = self._params.score_size
self.final_score_sz = self._params.score_size
if self._params.update_location == 0:
self._logger.info('Location updating is disabled')
self.state = None
def initialize(self, init_frame, init_bbox):
"""
:param np.ndarray init_frame:
:param np.ndarray | list | tuple init_bbox:
:return:
"""
if self.net is None:
if self._params.model == 0:
net = SiamRPNvot()
net.load_state_dict(torch.load(os.path.join(self.pretrained_wts_dir, 'SiamRPNVOT.model')))
# self._logger.info('Using SiamRPNVOT model')
elif self._params.model == 1:
net = SiamRPNBIG()
net.load_state_dict(torch.load(os.path.join(self.pretrained_wts_dir, 'SiamRPNBIG.model')))
# self._logger.info('Using SiamRPNBIG model')
elif self._params.model == 2:
net = SiamRPNotb()
net.load_state_dict(torch.load(os.path.join(self.pretrained_wts_dir, 'SiamRPNOTB.model')))
# self._logger.info('Using SiamRPNOTB model')
else:
raise IOError('Invalid model_type: {}'.format(self._params.model))
net.eval().cuda(self.gpu_id)
self.net = net
cx, cy, target_w, target_h = init_bbox
target_pos = np.array([cx, cy])
target_sz = np.array([target_w, target_h])
self._params.update(self.net.cfg)
state = dict()
state['im_h'] = init_frame.shape[0]
state['im_w'] = init_frame.shape[1]
if self._params.adaptive:
if ((target_sz[0] * target_sz[1]) / float(state['im_h'] * state['im_w'])) < 0.004:
self._params.instance_size = 287 # small object big search region
else:
self._params.instance_size = 271
self._params.score_size = (
self._params.instance_size - self._params.exemplar_size) / self._params.total_stride + 1
self.anchor = generate_anchor(self._params.total_stride, self._params.scales, self._params.ratios,
int(self._params.score_size))
avg_chans = np.mean(init_frame, axis=(0, 1))
wc_z = target_sz[0] + self._params.context_amount * sum(target_sz)
hc_z = target_sz[1] + self._params.context_amount * sum(target_sz)
s_z = round(np.sqrt(wc_z * hc_z))
# initialize the exemplar
z_crop = get_subwindow_tracking(init_frame, target_pos, self._params.exemplar_size, s_z, avg_chans)
z = Variable(z_crop.unsqueeze(0))
self.net.temple(z.cuda(self.gpu_id))
if self._params.windowing == 'cosine':
window = np.outer(np.hanning(self.score_sz), np.hanning(self.score_sz))
elif self._params.windowing == 'uniform':
window = np.ones((self.score_sz, self.score_sz))
else:
raise IOError('Invalid windowing type: {}'.format(self._params.windowing))
window = np.tile(window.flatten(), self._params.anchor_num)
# state['p'] = self.params
pos_x, pos_y = target_pos
target_w, target_h = target_sz
xmin, ymin = pos_x - target_w / 2, pos_y - target_h / 2
xmax, ymax = xmin + target_w, ymin + target_h
bbox = [xmin, ymin, target_w, target_h]
state['net'] = self.net
state['avg_chans'] = avg_chans
state['window'] = window
state['target_pos'] = target_pos
state['target_sz'] = target_sz
self.bbox = [xmin, ymin, xmax, ymax]
self.state = state
def update(self, frame):
state = self.state
# p = state['p']
net = state['net']
avg_chans = state['avg_chans']
window = state['window']
target_pos = state['target_pos']
target_sz = state['target_sz']
wc_z = target_sz[1] + self._params.context_amount * sum(target_sz)
hc_z = target_sz[0] + self._params.context_amount * sum(target_sz)
s_z = np.sqrt(wc_z * hc_z)
scale_z = self._params.exemplar_size / s_z
d_search = (self._params.instance_size - self._params.exemplar_size) / 2
pad = d_search / scale_z
s_x = s_z + 2 * pad
# extract scaled crops for search region x at previous target position
x_crop = Variable(get_subwindow_tracking(frame, target_pos, self._params.instance_size,
round(s_x), avg_chans).unsqueeze(0))
target_pos, target_sz, score, pscore, delta, score_id = tracker_eval(net, x_crop.cuda(self.gpu_id), target_pos,
target_sz * scale_z, window,
scale_z, self._params, self.anchor)
score_map = np.reshape(score, (-1, self.score_sz, self.score_sz))
pscore_map = np.reshape(pscore, (-1, self.score_sz, self.score_sz))
delta_map = np.reshape(delta, (-1, self.score_sz, self.score_sz))
unravel_id = np.unravel_index(score_id, score_map.shape)
best_pscore_map = pscore_map[unravel_id[0], :, :].squeeze()
best_pscore_map_max_idx = np.argmax(best_pscore_map)
best_pscore_map_max_idx_ur = np.unravel_index(best_pscore_map_max_idx, best_pscore_map.shape)
target_pos[0] = max(0, min(state['im_w'], target_pos[0]))
target_pos[1] = max(0, min(state['im_h'], target_pos[1]))
target_sz[0] = max(10, min(state['im_w'], target_sz[0]))
target_sz[1] = max(10, min(state['im_h'], target_sz[1]))
if self._params.update_location:
state['target_pos'] = target_pos
state['target_sz'] = target_sz
state['score'] = score
state['pscore'] = pscore
best_score = pscore[score_id]
pos_x, pos_y = target_pos
target_w, target_h = target_sz
xmin, ymin = pos_x - target_w / 2, pos_y - target_h / 2
xmax, ymax = xmin + target_w, ymin + target_h
state['net'] = self.net
state['avg_chans'] = avg_chans
state['window'] = window
state['target_pos'] = target_pos
state['target_sz'] = target_sz
self.bbox = [xmin, ymin, xmax, ymax]
self.confidence = best_score
self.cumulative_confidence *= best_score
# self._logger.info('confidence: {}'.format(self.confidence))
# self._logger.info('cumulative_confidence: {}'.format(self.cumulative_confidence))
bbox = [xmin, ymin, target_w, target_h]
return bbox
def close(self):
pass
| 3,944 | 0 | 108 |
dd7a28c483c819da0124ed863aebb47475aa02bc | 3,776 | py | Python | devel/forms.py | VanirLab/VOS | e6cb3e4e391e583e98d548292b5f272320d38cc4 | [
"MIT"
] | null | null | null | devel/forms.py | VanirLab/VOS | e6cb3e4e391e583e98d548292b5f272320d38cc4 | [
"MIT"
] | null | null | null | devel/forms.py | VanirLab/VOS | e6cb3e4e391e583e98d548292b5f272320d38cc4 | [
"MIT"
] | null | null | null | import random
from collections import OrderedDict
from string import ascii_letters, digits
from django import forms
from django.contrib.auth.models import User, Group
from django.contrib.sites.models import Site
from django.core.mail import send_mail
from django.template import loader
from .models import UserProfile
# vim: set ts=4 sw=4 et:
| 36.307692 | 80 | 0.613083 | import random
from collections import OrderedDict
from string import ascii_letters, digits
from django import forms
from django.contrib.auth.models import User, Group
from django.contrib.sites.models import Site
from django.core.mail import send_mail
from django.template import loader
from .models import UserProfile
class ProfileForm(forms.Form):
email = forms.EmailField(label='Private email (not shown publicly):',
help_text="Used for out-of-date notifications, etc.")
passwd1 = forms.CharField(label='New Password', required=False,
widget=forms.PasswordInput)
passwd2 = forms.CharField(label='Confirm Password', required=False,
widget=forms.PasswordInput)
def clean(self):
if self.cleaned_data['passwd1'] != self.cleaned_data['passwd2']:
raise forms.ValidationError('Passwords do not match.')
return self.cleaned_data
class UserProfileForm(forms.ModelForm):
def clean_pgp_key(self):
data = self.cleaned_data['pgp_key']
# strip 0x prefix if provided; store uppercase
if data.startswith('0x'):
data = data[2:]
return data.upper()
class Meta:
model = UserProfile
exclude = ('allowed_repos', 'user', 'latin_name')
class NewUserForm(forms.ModelForm):
username = forms.CharField(max_length=30)
private_email = forms.EmailField()
first_name = forms.CharField(required=False)
last_name = forms.CharField(required=False)
groups = forms.ModelMultipleChoiceField(required=False,
queryset=Group.objects.all())
class Meta:
model = UserProfile
exclude = ('picture', 'user')
def __init__(self, *args, **kwargs):
super(NewUserForm, self).__init__(*args, **kwargs)
# Hack ourself so certain fields appear first
old = self.fields
self.fields = OrderedDict()
keys = ('username', 'private_email', 'first_name', 'last_name',
'alias', 'public_email')
for key in keys:
self.fields[key] = old[key]
for key, _ in list(old.items()):
if key not in keys:
self.fields[key] = old[key]
def clean_username(self):
username = self.cleaned_data['username']
if User.objects.filter(username=username).exists():
raise forms.ValidationError(
"A user with that username already exists.")
return username
def save(self, commit=True):
profile = super(NewUserForm, self).save(False)
pwletters = ascii_letters + digits
password = ''.join([random.choice(pwletters) for _ in range(8)])
user = User.objects.create_user(username=self.cleaned_data['username'],
email=self.cleaned_data['private_email'], password=password)
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
user.save()
# sucks that the MRM.add() method can't take a list directly... we have
# to resort to dirty * magic.
user.groups.add(*self.cleaned_data['groups'])
profile.user = user
if commit:
profile.save()
self.save_m2m()
template = loader.get_template('devel/new_account.txt')
ctx = {
'site': Site.objects.get_current(),
'user': user,
'password': password,
}
send_mail("Your new archweb account",
template.render(ctx),
'Arch Website Notification <nobody@archlinux.org>',
[user.email],
fail_silently=False)
# vim: set ts=4 sw=4 et:
| 2,302 | 1,036 | 73 |
8b16473ee83dc682af821beb59c1c5dc297a5544 | 3,006 | py | Python | numpy_ann.py | anushaa51/YTrendNet | 3c5179d493aea6c45ca4258e1f41857947ec953e | [
"Apache-2.0"
] | 1 | 2019-07-08T15:57:36.000Z | 2019-07-08T15:57:36.000Z | numpy_ann.py | anushab05/YTrendNet | 3c5179d493aea6c45ca4258e1f41857947ec953e | [
"Apache-2.0"
] | null | null | null | numpy_ann.py | anushab05/YTrendNet | 3c5179d493aea6c45ca4258e1f41857947ec953e | [
"Apache-2.0"
] | null | null | null | import numpy as np
# hidden layer activation function
# derivate of hidden layer activation function for gradient descent
# output layer activation function
# cost function
# derivative of cost function for gradient descent
| 34.551724 | 80 | 0.619428 | import numpy as np
# hidden layer activation function
def sigmoid(s):
return 1/(1 + np.exp(-s))
# derivate of hidden layer activation function for gradient descent
def delta_sigmoid(s):
return s * (1 - s)
# output layer activation function
def softmax(s):
exps = np.exp(s - np.max(s, axis=1, keepdims=True))
return exps/np.sum(exps, axis=1, keepdims=True)
# cost function
def cross_entropy(pred,real):
no_of_instances = real.shape[0]
p = softmax(pred)
log_likelihood = -np.log(p[np.arange(no_of_instances),real.argmax(axis=1)])
loss = np.sum(log_likelihood) / no_of_instances
return loss
# derivative of cost function for gradient descent
def delta_cross_entropy(pred,real):
no_of_instances = real.shape[0]
grad = softmax(pred)
grad[np.arange(no_of_instances),real.argmax(axis=1)] -= 1
grad = grad/no_of_instances
return grad
class ANN:
def __init__(self, data, labels):
self.data = data
self.labels = labels
self.learning_rate = 1
neurons = 1000 # no. of nodes in each hidden layer
no_of_ip_nodes = data.shape[1]
no_of_op_nodes = labels.shape[1]
np.random.seed(10)
#initialising weights and bias
self.w1 = np.random.randn(no_of_ip_nodes, neurons)
self.b1 = np.zeros((1, neurons))
self.w2 = np.random.randn(neurons, neurons)
self.b2 = np.zeros((1, neurons))
self.w3 = np.random.randn(neurons, no_of_op_nodes)
self.b3 = np.zeros((1, no_of_op_nodes))
def feedforward(self):
# activation of nodes of hidden layers
z1 = np.dot(self.data, self.w1) + self.b1
self.a1 = sigmoid(z1)
z2 = np.dot(self.a1, self.w2) + self.b2
self.a2 = sigmoid(z2)
#activation of nodes of output layer
z3 = np.dot(self.a2, self.w3) + self.b3
self.a3 = softmax(z3)
def backpropogate(self):
loss = cross_entropy(self.a3, self.labels)
print('Cost:', loss)
# calculation of cost and derivative of cost function
a3_delta = delta_cross_entropy(self.a3, self.labels)
z2_delta = np.dot(a3_delta, self.w3.T)
a2_delta = z2_delta * delta_sigmoid(self.a2)
z1_delta = np.dot(a2_delta, self.w2.T)
a1_delta = z1_delta * delta_sigmoid(self.a1)
# gradient descent for weights of each layer
self.w3 -= self.learning_rate * np.dot(self.a2.T, a3_delta)
self.b3 -= self.learning_rate * np.sum(a3_delta, axis=0, keepdims=True)
self.w2 -= self.learning_rate * np.dot(self.a1.T, a2_delta)
self.b2 -= self.learning_rate * np.sum(a2_delta, axis=0)
self.w1 -= self.learning_rate * np.dot(self.data.T, a1_delta)
self.b1 -= self.learning_rate * np.sum(a1_delta, axis=0)
def predict(self, data):
self.data = data
self.feedforward()
return self.a3.argmax() | 2,515 | -11 | 263 |
5a2cb2c9491ab33ef0c17574907ea19101bad7c3 | 1,797 | py | Python | notebooks/models/spoilers/optimizer.py | AnveshAeturi/deep-learning-workshop | 20be7d92f310ca27a176e96e3a0b557f3fee2ec2 | [
"MIT"
] | 486 | 2016-06-23T09:12:57.000Z | 2022-03-03T11:23:38.000Z | notebooks/models/spoilers/optimizer.py | AnveshAeturi/deep-learning-workshop | 20be7d92f310ca27a176e96e3a0b557f3fee2ec2 | [
"MIT"
] | 4 | 2016-06-24T03:36:02.000Z | 2020-05-15T06:59:54.000Z | notebooks/models/spoilers/optimizer.py | AnveshAeturi/deep-learning-workshop | 20be7d92f310ca27a176e96e3a0b557f3fee2ec2 | [
"MIT"
] | 133 | 2016-07-23T02:50:53.000Z | 2022-03-19T03:41:46.000Z | l_in = lasagne.layers.InputLayer((None, 784))
l_out = lasagne.layers.DenseLayer(l_in,
num_units=10,
nonlinearity=lasagne.nonlinearities.softmax)
X_sym = T.matrix()
y_sym = T.ivector()
output = lasagne.layers.get_output(l_out, X_sym)
pred = output.argmax(-1)
loss = T.mean(lasagne.objectives.categorical_crossentropy(output, y_sym))
acc = T.mean(T.eq(pred, y_sym))
params = lasagne.layers.get_all_params(l_out)
grad = T.grad(loss, params)
updates = lasagne.updates.adam(grad, params, learning_rate=0.001)
f_train = theano.function([X_sym, y_sym], [loss, acc], updates=updates)
f_val = theano.function([X_sym, y_sym], [loss, acc])
f_predict = theano.function([X_sym], pred)
BATCH_SIZE = 64
N_BATCHES = len(X_train) // BATCH_SIZE
N_VAL_BATCHES = len(X_val) // BATCH_SIZE
for epoch in range(10):
train_loss = 0
train_acc = 0
for _ in range(N_BATCHES):
X, y = next(train_batches)
loss, acc = f_train(X, y)
train_loss += loss
train_acc += acc
train_loss /= N_BATCHES
train_acc /= N_BATCHES
val_loss = 0
val_acc = 0
for _ in range(N_VAL_BATCHES):
X, y = next(val_batches)
loss, acc = f_val(X, y)
val_loss += loss
val_acc += acc
val_loss /= N_VAL_BATCHES
val_acc /= N_VAL_BATCHES
print('Epoch {}, Train (val) loss {:.03f} ({:.03f}) ratio {:.03f}'.format(
epoch, train_loss, val_loss, val_loss/train_loss))
print('Train (val) accuracy {:.03f} ({:.03f})'.format(train_acc, val_acc))
weights = l_out.W.get_value()
plt.figure(figsize=(12,3))
for i in range(10):
plt.subplot(1, 10, i+1)
plt.imshow(weights[:,i].reshape((28, 28)), cmap='gray', interpolation='nearest')
plt.axis('off') | 30.457627 | 84 | 0.633278 | l_in = lasagne.layers.InputLayer((None, 784))
l_out = lasagne.layers.DenseLayer(l_in,
num_units=10,
nonlinearity=lasagne.nonlinearities.softmax)
X_sym = T.matrix()
y_sym = T.ivector()
output = lasagne.layers.get_output(l_out, X_sym)
pred = output.argmax(-1)
loss = T.mean(lasagne.objectives.categorical_crossentropy(output, y_sym))
acc = T.mean(T.eq(pred, y_sym))
params = lasagne.layers.get_all_params(l_out)
grad = T.grad(loss, params)
updates = lasagne.updates.adam(grad, params, learning_rate=0.001)
f_train = theano.function([X_sym, y_sym], [loss, acc], updates=updates)
f_val = theano.function([X_sym, y_sym], [loss, acc])
f_predict = theano.function([X_sym], pred)
BATCH_SIZE = 64
N_BATCHES = len(X_train) // BATCH_SIZE
N_VAL_BATCHES = len(X_val) // BATCH_SIZE
for epoch in range(10):
train_loss = 0
train_acc = 0
for _ in range(N_BATCHES):
X, y = next(train_batches)
loss, acc = f_train(X, y)
train_loss += loss
train_acc += acc
train_loss /= N_BATCHES
train_acc /= N_BATCHES
val_loss = 0
val_acc = 0
for _ in range(N_VAL_BATCHES):
X, y = next(val_batches)
loss, acc = f_val(X, y)
val_loss += loss
val_acc += acc
val_loss /= N_VAL_BATCHES
val_acc /= N_VAL_BATCHES
print('Epoch {}, Train (val) loss {:.03f} ({:.03f}) ratio {:.03f}'.format(
epoch, train_loss, val_loss, val_loss/train_loss))
print('Train (val) accuracy {:.03f} ({:.03f})'.format(train_acc, val_acc))
weights = l_out.W.get_value()
plt.figure(figsize=(12,3))
for i in range(10):
plt.subplot(1, 10, i+1)
plt.imshow(weights[:,i].reshape((28, 28)), cmap='gray', interpolation='nearest')
plt.axis('off') | 0 | 0 | 0 |
4f3678ead1dd3f3d96e7f7ed489968e2d3d73686 | 14,367 | py | Python | .venv/Lib/site-packages/lemoncheesecake/filter.py | yadavdeepa365/HUDL_PYTHON | e1d5d264e3748f0add18258496f5a850e16b7ee6 | [
"MIT"
] | 34 | 2017-06-12T18:50:36.000Z | 2021-11-29T01:59:07.000Z | .venv/Lib/site-packages/lemoncheesecake/filter.py | yadavdeepa365/HUDL_PYTHON | e1d5d264e3748f0add18258496f5a850e16b7ee6 | [
"MIT"
] | 25 | 2017-12-07T13:35:29.000Z | 2022-03-10T01:27:58.000Z | .venv/Lib/site-packages/lemoncheesecake/filter.py | yadavdeepa365/HUDL_PYTHON | e1d5d264e3748f0add18258496f5a850e16b7ee6 | [
"MIT"
] | 4 | 2019-05-05T03:19:00.000Z | 2021-10-06T13:12:05.000Z | '''
Created on Sep 8, 2016
@author: nicolas
'''
import re
import fnmatch
from functools import reduce
from lemoncheesecake.reporting import load_report
from lemoncheesecake.reporting.reportdir import DEFAULT_REPORT_DIR_NAME
from lemoncheesecake.reporting.report import Result, TestResult, Step, Log, Check, Attachment, Url
from lemoncheesecake.testtree import BaseTest, BaseSuite
from lemoncheesecake.suite import Test
from lemoncheesecake.exceptions import UserError
_NEGATION_FLAGS = "-^~"
| 34.536058 | 123 | 0.661934 | '''
Created on Sep 8, 2016
@author: nicolas
'''
import re
import fnmatch
from functools import reduce
from lemoncheesecake.reporting import load_report
from lemoncheesecake.reporting.reportdir import DEFAULT_REPORT_DIR_NAME
from lemoncheesecake.reporting.report import Result, TestResult, Step, Log, Check, Attachment, Url
from lemoncheesecake.testtree import BaseTest, BaseSuite
from lemoncheesecake.suite import Test
from lemoncheesecake.exceptions import UserError
_NEGATION_FLAGS = "-^~"
def _iter_grepable(steps):
for step in steps:
yield step.description
for log in step.get_logs():
if isinstance(log, Log):
yield log.message
elif isinstance(log, Check):
yield log.description
if log.details:
yield log.details
elif isinstance(log, Attachment):
yield log.filename
yield log.description
elif isinstance(log, Url):
yield log.url
yield log.description
def _grep(pattern, steps):
return any(map(pattern.search, _iter_grepable(steps)))
class Filter(object):
def __bool__(self):
raise NotImplementedError()
def __nonzero__(self): # for Python 2 compatibility
return self.__bool__()
def __call__(self, test):
raise NotImplementedError()
class BaseTreeNodeFilter(Filter):
def __init__(self, paths=(), descriptions=(), tags=(), properties=(), links=()):
self.paths = list(paths)
self.descriptions = list(descriptions)
self.tags = list(tags)
self.properties = list(properties)
self.links = list(links)
def __bool__(self):
return any((
self.paths, self.descriptions, self.tags, self.properties, self.links
))
@staticmethod
def _match_values(values, patterns):
if not patterns:
return True
values = [value or "" for value in values] # convert None to ""
for pattern in patterns:
if pattern[0] in _NEGATION_FLAGS:
if not fnmatch.filter(values, pattern[1:]):
return True
else:
if fnmatch.filter(values, pattern):
return True
return False
@staticmethod
def _match_key_values(key_values, patterns):
if not patterns:
return True
for key, value in patterns:
if key in key_values:
if value[0] in _NEGATION_FLAGS:
if not fnmatch.fnmatch(key_values[key], value[1:]):
return True
else:
if fnmatch.fnmatch(key_values[key], value):
return True
return False
@staticmethod
def _match_values_lists(lsts, patterns):
return BaseTreeNodeFilter._match_values(
reduce(lambda x, y: list(x) + list(y), lsts, []), # make a flat list
patterns
)
def _do_paths(self, node):
return self._match_values(node.hierarchy_paths, self.paths)
def _do_descriptions(self, node):
return all(self._match_values(node.hierarchy_descriptions, descs) for descs in self.descriptions)
def _do_tags(self, node):
return all(self._match_values(node.hierarchy_tags, tags) for tags in self.tags)
def _do_properties(self, node):
return all(self._match_key_values(node.hierarchy_properties, props) for props in self.properties)
def _do_links(self, node):
return all(self._match_values_lists(node.hierarchy_links, links) for links in self.links)
@staticmethod
def _apply_criteria(obj, *criteria):
return all(criterion(obj) for criterion in criteria)
def __call__(self, node):
assert isinstance(node, (BaseTest, BaseSuite))
return self._apply_criteria(
node, self._do_paths, self._do_descriptions, self._do_tags, self._do_properties, self._do_links
)
class TestFilter(BaseTreeNodeFilter):
def __init__(self, enabled=False, disabled=False, **kwargs):
BaseTreeNodeFilter.__init__(self, **kwargs)
self.enabled = enabled
self.disabled = disabled
def __bool__(self):
return BaseTreeNodeFilter.__bool__(self) or any((self.enabled, self.disabled))
def _do_enabled(self, test):
return not test.is_disabled() if self.enabled else True
def _do_disabled(self, test):
return test.is_disabled() if self.disabled else True
def _apply_test_criteria(self, test):
return self._apply_criteria(test, self._do_enabled, self._do_disabled)
def __call__(self, test):
assert isinstance(test, Test)
return BaseTreeNodeFilter.__call__(self, test) and self._apply_test_criteria(test)
class ResultFilter(BaseTreeNodeFilter):
def __init__(self, statuses=None, enabled=False, disabled=False, grep=None, **kwargs):
BaseTreeNodeFilter.__init__(self, **kwargs)
self.statuses = set(statuses) if statuses is not None else set()
self.enabled = enabled
self.disabled = disabled
self.grep = grep
def __bool__(self):
return BaseTreeNodeFilter.__bool__(self) or any((self.statuses, self.enabled, self.disabled, self.grep))
def _do_statuses(self, result):
return result.status in self.statuses if self.statuses else True
def _do_enabled(self, result):
return result.status != "disabled" if self.enabled else True
def _do_disabled(self, result):
return result.status == "disabled" if self.disabled else True
def _do_grep(self, result):
if not self.grep:
return True
return _grep(self.grep, result.get_steps())
def _apply_result_criteria(self, result):
return self._apply_criteria(
result, self._do_statuses, self._do_enabled, self._do_disabled, self._do_grep
)
def __call__(self, result):
# type: (Result) -> bool
assert isinstance(result, Result)
# test result:
if isinstance(result, TestResult):
return BaseTreeNodeFilter.__call__(self, result) and self._apply_result_criteria(result)
# suite setup or teardown result, apply the base filter on the suite node:
elif result.parent_suite:
return BaseTreeNodeFilter.__call__(self, result.parent_suite) and self._apply_result_criteria(result)
# session setup or teardown:
else:
if BaseTreeNodeFilter.__bool__(self):
# no criteria of BaseFilter is applicable to a session setup/teardown result,
# meaning it's a no match
return False
else:
return self._apply_result_criteria(result)
class StepFilter(BaseTreeNodeFilter):
def __init__(self, passed=False, failed=False, grep=None, **kwargs):
BaseTreeNodeFilter.__init__(self, **kwargs)
self.passed = passed
self.failed = failed
self.grep = grep
def __bool__(self):
return BaseTreeNodeFilter.__bool__(self) or any((self.passed, self.failed, self.grep))
def _do_passed(self, step):
return step.is_successful() if self.passed else True
def _do_failed(self, step):
return not step.is_successful() if self.failed else True
def _do_grep(self, step):
if not self.grep:
return True
return _grep(self.grep, (step,))
def _apply_step_criteria(self, step):
return self._apply_criteria(
step, self._do_passed, self._do_failed, self._do_grep
)
def __call__(self, step):
# type: (Step) -> bool
assert isinstance(step, Step)
# test result:
if isinstance(step.parent_result, TestResult):
return BaseTreeNodeFilter.__call__(self, step.parent_result) and self._apply_step_criteria(step)
# suite setup or teardown result, apply the base filter on the suite node:
elif step.parent_result.parent_suite:
return BaseTreeNodeFilter.__call__(self, step.parent_result.parent_suite) and self._apply_step_criteria(step)
# session setup or teardown:
else:
if BaseTreeNodeFilter.__bool__(self):
# no criteria of BaseFilter is applicable to a session setup/teardown result,
# meaning it's a no match
return False
else:
return self._apply_step_criteria(step)
class FromTestsFilter(Filter):
def __init__(self, tests):
self._tests = [test.path for test in tests]
def __bool__(self):
return True
def __call__(self, test):
return test.path in self._tests
def _add_filter_cli_args(cli_parser, no_positional_argument=False, only_executed_tests=False):
def property_value(value):
splitted = value.split(":")
if len(splitted) != 2:
raise ValueError()
return splitted
group = cli_parser.add_argument_group("Filtering")
if no_positional_argument:
group.add_argument(
"--path", "-p", nargs="+", help="Filter on test/suite path (wildcard character '*' can be used)"
)
else:
group.add_argument(
"path", nargs="*", default=[], help="Filter on test/suite path (wildcard character '*' can be used)"
)
group.add_argument(
"--desc", nargs="+", action="append", default=[], help="Filter on descriptions"
)
group.add_argument(
"--tag", "-a", nargs="+", action="append", default=[], help="Filter on tags"
)
group.add_argument(
"--property", "-m", nargs="+", type=property_value, action="append", default=[], help="Filter on properties"
)
group.add_argument(
"--link", "-l", nargs="+", action="append", default=[], help="Filter on links (names and URLs)"
)
group.add_argument(
"--passed", action="store_true", help="Filter on passed tests"
)
group.add_argument(
"--failed", action="store_true", help="Filter on failed tests"
)
group.add_argument(
"--grep", "-g", help="Filter result content using pattern"
)
if not only_executed_tests:
group.add_argument(
"--skipped", action="store_true", help="Filter on skipped tests"
)
group.add_argument(
"--non-passed", action="store_true", help="Alias for --failed --skipped"
)
group.add_argument(
"--disabled", action="store_true", help="Filter on disabled tests"
)
group.add_argument(
"--enabled", action="store_true", help="Filter on enabled (non-disabled) tests"
)
return group
def add_test_filter_cli_args(cli_parser):
group = _add_filter_cli_args(cli_parser)
group.add_argument(
"--from-report", required=False, help="When enabled, the filtering is based on the given report"
)
return group
def add_result_filter_cli_args(cli_parser, only_executed_tests=False):
return _add_filter_cli_args(cli_parser, no_positional_argument=True, only_executed_tests=only_executed_tests)
def add_step_filter_cli_args(cli_parser):
return _add_filter_cli_args(cli_parser, no_positional_argument=True, only_executed_tests=True)
def _set_common_filter_criteria(fltr, cli_args, only_executed_tests=False):
if not only_executed_tests and (cli_args.disabled and cli_args.enabled):
raise UserError("--disabled and --enabled arguments are mutually exclusive")
fltr.paths = cli_args.path
fltr.descriptions = cli_args.desc
fltr.tags = cli_args.tag
fltr.properties = cli_args.property
fltr.links = cli_args.link
if not only_executed_tests:
fltr.disabled = cli_args.disabled
fltr.enabled = cli_args.enabled
def _make_test_filter(cli_args):
if cli_args.passed or cli_args.failed or cli_args.skipped:
raise UserError("--passed, --failed and --skipped arguments can only be used on the report-based filter")
test_filter = TestFilter()
_set_common_filter_criteria(test_filter, cli_args)
return test_filter
def _make_grep_criterion(grep):
return re.compile(grep, re.IGNORECASE | re.MULTILINE)
def make_result_filter(cli_args, only_executed_tests=False):
result_filter = ResultFilter()
_set_common_filter_criteria(result_filter, cli_args, only_executed_tests=only_executed_tests)
if only_executed_tests:
if cli_args.passed:
result_filter.statuses.add("passed")
if cli_args.failed:
result_filter.statuses.add("failed")
# when neither --passed not --failed was passed, enforce statuses passed and failed
# to select tests that have been executed
if not result_filter.statuses:
result_filter.statuses.update(("passed", "failed"))
else:
if cli_args.passed:
result_filter.statuses.add("passed")
if cli_args.failed:
result_filter.statuses.add("failed")
if cli_args.skipped:
result_filter.statuses.add("skipped")
if cli_args.non_passed:
result_filter.statuses.update(("failed", "skipped"))
if cli_args.grep:
result_filter.grep = _make_grep_criterion(cli_args.grep)
return result_filter
def _make_from_report_filter(cli_args, only_executed_tests=False):
report = load_report(cli_args.from_report or DEFAULT_REPORT_DIR_NAME)
test_filter = make_result_filter(cli_args, only_executed_tests=only_executed_tests)
return FromTestsFilter(filter(test_filter, report.all_tests()))
def make_test_filter(cli_args):
if any((cli_args.from_report, cli_args.passed, cli_args.failed, cli_args.skipped, cli_args.non_passed, cli_args.grep)):
return _make_from_report_filter(cli_args)
else:
return _make_test_filter(cli_args)
def make_step_filter(cli_args):
if cli_args.passed and cli_args.failed:
raise UserError("--passed and --failed arguments are mutually exclusive")
step_filter = StepFilter()
_set_common_filter_criteria(step_filter, cli_args, only_executed_tests=True)
step_filter.passed = cli_args.passed
step_filter.failed = cli_args.failed
if cli_args.grep:
step_filter.grep = _make_grep_criterion(cli_args.grep)
return step_filter
| 12,225 | 466 | 1,161 |
06a50c62e1fde1ccf15cad6a38ed701acf4dc5f3 | 20,995 | py | Python | scenegraph/exp-official/taskographyv2medium10_FF/taskographyv2medium10_FF_test.py | taskography/3dscenegraph-dev | 2c261241230fbea1f1c687ff793478248f25c02c | [
"MIT"
] | 1 | 2022-01-30T22:06:57.000Z | 2022-01-30T22:06:57.000Z | scenegraph/exp-official/taskographyv2medium10_FF/taskographyv2medium10_FF_test.py | taskography/3dscenegraph-dev | 2c261241230fbea1f1c687ff793478248f25c02c | [
"MIT"
] | null | null | null | scenegraph/exp-official/taskographyv2medium10_FF/taskographyv2medium10_FF_test.py | taskography/3dscenegraph-dev | 2c261241230fbea1f1c687ff793478248f25c02c | [
"MIT"
] | null | null | null | STATS = [
{
"num_node_expansions": 653,
"plan_length": 167,
"search_time": 0.52,
"total_time": 0.52
},
{
"num_node_expansions": 978,
"plan_length": 167,
"search_time": 0.86,
"total_time": 0.86
},
{
"num_node_expansions": 1087,
"plan_length": 194,
"search_time": 15.85,
"total_time": 15.85
},
{
"num_node_expansions": 923,
"plan_length": 198,
"search_time": 15.21,
"total_time": 15.21
},
{
"num_node_expansions": 667,
"plan_length": 142,
"search_time": 13.94,
"total_time": 13.94
},
{
"num_node_expansions": 581,
"plan_length": 156,
"search_time": 11.54,
"total_time": 11.54
},
{
"num_node_expansions": 505,
"plan_length": 134,
"search_time": 2.79,
"total_time": 2.79
},
{
"num_node_expansions": 953,
"plan_length": 165,
"search_time": 6.22,
"total_time": 6.22
},
{
"num_node_expansions": 792,
"plan_length": 163,
"search_time": 0.33,
"total_time": 0.33
},
{
"num_node_expansions": 554,
"plan_length": 160,
"search_time": 0.27,
"total_time": 0.27
},
{
"num_node_expansions": 706,
"plan_length": 156,
"search_time": 2.44,
"total_time": 2.44
},
{
"num_node_expansions": 620,
"plan_length": 138,
"search_time": 1.65,
"total_time": 1.65
},
{
"num_node_expansions": 661,
"plan_length": 169,
"search_time": 0.28,
"total_time": 0.28
},
{
"num_node_expansions": 774,
"plan_length": 178,
"search_time": 0.4,
"total_time": 0.4
},
{
"num_node_expansions": 615,
"plan_length": 171,
"search_time": 0.53,
"total_time": 0.53
},
{
"num_node_expansions": 516,
"plan_length": 134,
"search_time": 0.71,
"total_time": 0.71
},
{
"num_node_expansions": 1077,
"plan_length": 221,
"search_time": 0.58,
"total_time": 0.58
},
{
"num_node_expansions": 1029,
"plan_length": 213,
"search_time": 0.62,
"total_time": 0.62
},
{
"num_node_expansions": 753,
"plan_length": 173,
"search_time": 0.47,
"total_time": 0.47
},
{
"num_node_expansions": 814,
"plan_length": 210,
"search_time": 0.5,
"total_time": 0.5
},
{
"num_node_expansions": 569,
"plan_length": 134,
"search_time": 3.06,
"total_time": 3.06
},
{
"num_node_expansions": 899,
"plan_length": 176,
"search_time": 5.84,
"total_time": 5.84
},
{
"num_node_expansions": 531,
"plan_length": 144,
"search_time": 3.15,
"total_time": 3.15
},
{
"num_node_expansions": 631,
"plan_length": 164,
"search_time": 3.74,
"total_time": 3.74
},
{
"num_node_expansions": 479,
"plan_length": 138,
"search_time": 0.11,
"total_time": 0.11
},
{
"num_node_expansions": 941,
"plan_length": 148,
"search_time": 0.22,
"total_time": 0.22
},
{
"num_node_expansions": 1023,
"plan_length": 197,
"search_time": 9.46,
"total_time": 9.46
},
{
"num_node_expansions": 1152,
"plan_length": 196,
"search_time": 12.7,
"total_time": 12.7
},
{
"num_node_expansions": 629,
"plan_length": 147,
"search_time": 4.14,
"total_time": 4.14
},
{
"num_node_expansions": 697,
"plan_length": 160,
"search_time": 2.82,
"total_time": 2.82
},
{
"num_node_expansions": 646,
"plan_length": 158,
"search_time": 3.74,
"total_time": 3.74
},
{
"num_node_expansions": 741,
"plan_length": 152,
"search_time": 4.56,
"total_time": 4.56
},
{
"num_node_expansions": 486,
"plan_length": 136,
"search_time": 1.77,
"total_time": 1.77
},
{
"num_node_expansions": 602,
"plan_length": 146,
"search_time": 3.22,
"total_time": 3.22
},
{
"num_node_expansions": 774,
"plan_length": 186,
"search_time": 1.56,
"total_time": 1.56
},
{
"num_node_expansions": 1512,
"plan_length": 209,
"search_time": 4.48,
"total_time": 4.48
},
{
"num_node_expansions": 791,
"plan_length": 180,
"search_time": 14.5,
"total_time": 14.5
},
{
"num_node_expansions": 1019,
"plan_length": 211,
"search_time": 18.59,
"total_time": 18.59
},
{
"num_node_expansions": 450,
"plan_length": 133,
"search_time": 2.75,
"total_time": 2.75
},
{
"num_node_expansions": 526,
"plan_length": 135,
"search_time": 3.02,
"total_time": 3.02
},
{
"num_node_expansions": 1329,
"plan_length": 182,
"search_time": 8.07,
"total_time": 8.07
},
{
"num_node_expansions": 655,
"plan_length": 134,
"search_time": 3.8,
"total_time": 3.8
},
{
"num_node_expansions": 636,
"plan_length": 159,
"search_time": 7.13,
"total_time": 7.13
},
{
"num_node_expansions": 1403,
"plan_length": 196,
"search_time": 16.16,
"total_time": 16.16
},
{
"num_node_expansions": 664,
"plan_length": 175,
"search_time": 4.18,
"total_time": 4.18
},
{
"num_node_expansions": 760,
"plan_length": 150,
"search_time": 6.37,
"total_time": 6.37
},
{
"num_node_expansions": 593,
"plan_length": 163,
"search_time": 9.42,
"total_time": 9.42
},
{
"num_node_expansions": 1043,
"plan_length": 179,
"search_time": 16.75,
"total_time": 16.75
},
{
"num_node_expansions": 390,
"plan_length": 103,
"search_time": 0.46,
"total_time": 0.46
},
{
"num_node_expansions": 419,
"plan_length": 120,
"search_time": 0.55,
"total_time": 0.55
},
{
"num_node_expansions": 606,
"plan_length": 160,
"search_time": 13.41,
"total_time": 13.41
},
{
"num_node_expansions": 905,
"plan_length": 213,
"search_time": 29.84,
"total_time": 29.84
},
{
"num_node_expansions": 525,
"plan_length": 146,
"search_time": 0.31,
"total_time": 0.31
},
{
"num_node_expansions": 522,
"plan_length": 147,
"search_time": 0.32,
"total_time": 0.32
},
{
"num_node_expansions": 652,
"plan_length": 165,
"search_time": 10.19,
"total_time": 10.19
},
{
"num_node_expansions": 1188,
"plan_length": 178,
"search_time": 13.24,
"total_time": 13.24
},
{
"num_node_expansions": 450,
"plan_length": 136,
"search_time": 1.48,
"total_time": 1.48
},
{
"num_node_expansions": 1179,
"plan_length": 209,
"search_time": 3.44,
"total_time": 3.44
},
{
"num_node_expansions": 834,
"plan_length": 204,
"search_time": 20.08,
"total_time": 20.08
},
{
"num_node_expansions": 1133,
"plan_length": 187,
"search_time": 15.61,
"total_time": 15.61
},
{
"num_node_expansions": 777,
"plan_length": 181,
"search_time": 13.35,
"total_time": 13.35
},
{
"num_node_expansions": 591,
"plan_length": 136,
"search_time": 2.59,
"total_time": 2.59
},
{
"num_node_expansions": 580,
"plan_length": 143,
"search_time": 2.89,
"total_time": 2.89
},
{
"num_node_expansions": 977,
"plan_length": 173,
"search_time": 8.97,
"total_time": 8.97
},
{
"num_node_expansions": 694,
"plan_length": 167,
"search_time": 8.22,
"total_time": 8.22
},
{
"num_node_expansions": 861,
"plan_length": 188,
"search_time": 1.14,
"total_time": 1.14
},
{
"num_node_expansions": 790,
"plan_length": 160,
"search_time": 0.93,
"total_time": 0.93
},
{
"num_node_expansions": 841,
"plan_length": 188,
"search_time": 5.61,
"total_time": 5.61
},
{
"num_node_expansions": 436,
"plan_length": 128,
"search_time": 2.46,
"total_time": 2.46
},
{
"num_node_expansions": 550,
"plan_length": 127,
"search_time": 0.03,
"total_time": 0.03
},
{
"num_node_expansions": 434,
"plan_length": 134,
"search_time": 0.03,
"total_time": 0.03
},
{
"num_node_expansions": 958,
"plan_length": 195,
"search_time": 9.09,
"total_time": 9.09
},
{
"num_node_expansions": 658,
"plan_length": 174,
"search_time": 6.01,
"total_time": 6.01
},
{
"num_node_expansions": 370,
"plan_length": 126,
"search_time": 0.06,
"total_time": 0.06
},
{
"num_node_expansions": 440,
"plan_length": 119,
"search_time": 0.08,
"total_time": 0.08
},
{
"num_node_expansions": 648,
"plan_length": 168,
"search_time": 8.1,
"total_time": 8.1
},
{
"num_node_expansions": 832,
"plan_length": 178,
"search_time": 10.9,
"total_time": 10.9
},
{
"num_node_expansions": 355,
"plan_length": 116,
"search_time": 0.7,
"total_time": 0.7
},
{
"num_node_expansions": 495,
"plan_length": 123,
"search_time": 0.86,
"total_time": 0.86
},
{
"num_node_expansions": 612,
"plan_length": 148,
"search_time": 4.23,
"total_time": 4.23
},
{
"num_node_expansions": 1067,
"plan_length": 174,
"search_time": 6.3,
"total_time": 6.3
},
{
"num_node_expansions": 821,
"plan_length": 185,
"search_time": 3.0,
"total_time": 3.0
},
{
"num_node_expansions": 625,
"plan_length": 153,
"search_time": 2.98,
"total_time": 2.98
},
{
"num_node_expansions": 304,
"plan_length": 99,
"search_time": 0.16,
"total_time": 0.16
},
{
"num_node_expansions": 477,
"plan_length": 133,
"search_time": 0.4,
"total_time": 0.4
},
{
"num_node_expansions": 651,
"plan_length": 160,
"search_time": 0.18,
"total_time": 0.18
},
{
"num_node_expansions": 594,
"plan_length": 147,
"search_time": 0.17,
"total_time": 0.17
},
{
"num_node_expansions": 524,
"plan_length": 134,
"search_time": 5.3,
"total_time": 5.3
},
{
"num_node_expansions": 400,
"plan_length": 127,
"search_time": 4.95,
"total_time": 4.95
},
{
"num_node_expansions": 825,
"plan_length": 185,
"search_time": 6.37,
"total_time": 6.37
},
{
"num_node_expansions": 613,
"plan_length": 156,
"search_time": 4.57,
"total_time": 4.57
},
{
"num_node_expansions": 427,
"plan_length": 121,
"search_time": 0.09,
"total_time": 0.09
},
{
"num_node_expansions": 362,
"plan_length": 116,
"search_time": 0.07,
"total_time": 0.07
},
{
"num_node_expansions": 459,
"plan_length": 119,
"search_time": 0.75,
"total_time": 0.75
},
{
"num_node_expansions": 501,
"plan_length": 132,
"search_time": 0.86,
"total_time": 0.86
},
{
"num_node_expansions": 697,
"plan_length": 156,
"search_time": 4.24,
"total_time": 4.24
},
{
"num_node_expansions": 1024,
"plan_length": 162,
"search_time": 7.13,
"total_time": 7.13
},
{
"num_node_expansions": 501,
"plan_length": 122,
"search_time": 4.67,
"total_time": 4.67
},
{
"num_node_expansions": 577,
"plan_length": 126,
"search_time": 5.56,
"total_time": 5.56
},
{
"num_node_expansions": 633,
"plan_length": 152,
"search_time": 17.98,
"total_time": 17.98
},
{
"num_node_expansions": 833,
"plan_length": 186,
"search_time": 24.85,
"total_time": 24.85
},
{
"num_node_expansions": 996,
"plan_length": 183,
"search_time": 4.05,
"total_time": 4.05
},
{
"num_node_expansions": 1246,
"plan_length": 206,
"search_time": 5.39,
"total_time": 5.39
},
{
"num_node_expansions": 466,
"plan_length": 137,
"search_time": 2.03,
"total_time": 2.03
},
{
"num_node_expansions": 530,
"plan_length": 142,
"search_time": 2.28,
"total_time": 2.28
},
{
"num_node_expansions": 923,
"plan_length": 189,
"search_time": 19.77,
"total_time": 19.77
},
{
"num_node_expansions": 799,
"plan_length": 167,
"search_time": 16.16,
"total_time": 16.16
},
{
"num_node_expansions": 651,
"plan_length": 173,
"search_time": 1.38,
"total_time": 1.38
},
{
"num_node_expansions": 590,
"plan_length": 159,
"search_time": 0.94,
"total_time": 0.94
},
{
"num_node_expansions": 542,
"plan_length": 155,
"search_time": 0.07,
"total_time": 0.07
},
{
"num_node_expansions": 418,
"plan_length": 130,
"search_time": 0.05,
"total_time": 0.05
},
{
"num_node_expansions": 881,
"plan_length": 182,
"search_time": 11.01,
"total_time": 11.01
},
{
"num_node_expansions": 1256,
"plan_length": 205,
"search_time": 15.58,
"total_time": 15.58
},
{
"num_node_expansions": 612,
"plan_length": 146,
"search_time": 2.92,
"total_time": 2.92
},
{
"num_node_expansions": 567,
"plan_length": 145,
"search_time": 2.43,
"total_time": 2.43
},
{
"num_node_expansions": 655,
"plan_length": 152,
"search_time": 9.25,
"total_time": 9.25
},
{
"num_node_expansions": 499,
"plan_length": 133,
"search_time": 7.5,
"total_time": 7.5
},
{
"num_node_expansions": 500,
"plan_length": 137,
"search_time": 0.3,
"total_time": 0.3
},
{
"num_node_expansions": 869,
"plan_length": 156,
"search_time": 0.47,
"total_time": 0.47
},
{
"num_node_expansions": 522,
"plan_length": 161,
"search_time": 0.06,
"total_time": 0.06
},
{
"num_node_expansions": 712,
"plan_length": 181,
"search_time": 0.07,
"total_time": 0.07
},
{
"num_node_expansions": 708,
"plan_length": 142,
"search_time": 4.46,
"total_time": 4.46
},
{
"num_node_expansions": 642,
"plan_length": 163,
"search_time": 5.26,
"total_time": 5.26
},
{
"num_node_expansions": 426,
"plan_length": 134,
"search_time": 0.11,
"total_time": 0.11
},
{
"num_node_expansions": 471,
"plan_length": 129,
"search_time": 0.14,
"total_time": 0.14
},
{
"num_node_expansions": 520,
"plan_length": 135,
"search_time": 1.65,
"total_time": 1.65
},
{
"num_node_expansions": 666,
"plan_length": 144,
"search_time": 3.02,
"total_time": 3.02
},
{
"num_node_expansions": 563,
"plan_length": 159,
"search_time": 2.27,
"total_time": 2.27
},
{
"num_node_expansions": 566,
"plan_length": 162,
"search_time": 2.06,
"total_time": 2.06
},
{
"num_node_expansions": 836,
"plan_length": 203,
"search_time": 16.69,
"total_time": 16.69
},
{
"num_node_expansions": 604,
"plan_length": 145,
"search_time": 1.25,
"total_time": 1.25
},
{
"num_node_expansions": 506,
"plan_length": 124,
"search_time": 0.99,
"total_time": 0.99
},
{
"num_node_expansions": 851,
"plan_length": 203,
"search_time": 1.15,
"total_time": 1.15
},
{
"num_node_expansions": 603,
"plan_length": 166,
"search_time": 0.76,
"total_time": 0.76
},
{
"num_node_expansions": 497,
"plan_length": 118,
"search_time": 0.3,
"total_time": 0.3
},
{
"num_node_expansions": 590,
"plan_length": 117,
"search_time": 0.32,
"total_time": 0.32
},
{
"num_node_expansions": 409,
"plan_length": 129,
"search_time": 0.08,
"total_time": 0.08
},
{
"num_node_expansions": 669,
"plan_length": 165,
"search_time": 0.12,
"total_time": 0.12
},
{
"num_node_expansions": 786,
"plan_length": 161,
"search_time": 18.85,
"total_time": 18.85
},
{
"num_node_expansions": 474,
"plan_length": 144,
"search_time": 10.09,
"total_time": 10.09
},
{
"num_node_expansions": 579,
"plan_length": 165,
"search_time": 1.18,
"total_time": 1.18
},
{
"num_node_expansions": 620,
"plan_length": 160,
"search_time": 1.01,
"total_time": 1.01
},
{
"num_node_expansions": 1523,
"plan_length": 221,
"search_time": 25.37,
"total_time": 25.37
},
{
"num_node_expansions": 961,
"plan_length": 207,
"search_time": 18.62,
"total_time": 18.62
},
{
"num_node_expansions": 444,
"plan_length": 127,
"search_time": 3.93,
"total_time": 3.93
},
{
"num_node_expansions": 464,
"plan_length": 127,
"search_time": 4.01,
"total_time": 4.01
},
{
"num_node_expansions": 773,
"plan_length": 194,
"search_time": 0.78,
"total_time": 0.78
},
{
"num_node_expansions": 676,
"plan_length": 161,
"search_time": 0.83,
"total_time": 0.83
},
{
"num_node_expansions": 414,
"plan_length": 127,
"search_time": 0.39,
"total_time": 0.39
},
{
"num_node_expansions": 623,
"plan_length": 165,
"search_time": 0.66,
"total_time": 0.66
},
{
"num_node_expansions": 703,
"plan_length": 163,
"search_time": 1.06,
"total_time": 1.06
},
{
"num_node_expansions": 785,
"plan_length": 176,
"search_time": 1.02,
"total_time": 1.02
},
{
"num_node_expansions": 986,
"plan_length": 167,
"search_time": 15.72,
"total_time": 15.72
},
{
"num_node_expansions": 955,
"plan_length": 205,
"search_time": 12.55,
"total_time": 12.55
},
{
"num_node_expansions": 417,
"plan_length": 118,
"search_time": 0.05,
"total_time": 0.05
},
{
"num_node_expansions": 521,
"plan_length": 141,
"search_time": 0.06,
"total_time": 0.06
},
{
"num_node_expansions": 815,
"plan_length": 182,
"search_time": 26.55,
"total_time": 26.55
}
]
num_timeouts = 15
num_timeouts = 0
num_problems = 172
| 22.146624 | 36 | 0.475685 | STATS = [
{
"num_node_expansions": 653,
"plan_length": 167,
"search_time": 0.52,
"total_time": 0.52
},
{
"num_node_expansions": 978,
"plan_length": 167,
"search_time": 0.86,
"total_time": 0.86
},
{
"num_node_expansions": 1087,
"plan_length": 194,
"search_time": 15.85,
"total_time": 15.85
},
{
"num_node_expansions": 923,
"plan_length": 198,
"search_time": 15.21,
"total_time": 15.21
},
{
"num_node_expansions": 667,
"plan_length": 142,
"search_time": 13.94,
"total_time": 13.94
},
{
"num_node_expansions": 581,
"plan_length": 156,
"search_time": 11.54,
"total_time": 11.54
},
{
"num_node_expansions": 505,
"plan_length": 134,
"search_time": 2.79,
"total_time": 2.79
},
{
"num_node_expansions": 953,
"plan_length": 165,
"search_time": 6.22,
"total_time": 6.22
},
{
"num_node_expansions": 792,
"plan_length": 163,
"search_time": 0.33,
"total_time": 0.33
},
{
"num_node_expansions": 554,
"plan_length": 160,
"search_time": 0.27,
"total_time": 0.27
},
{
"num_node_expansions": 706,
"plan_length": 156,
"search_time": 2.44,
"total_time": 2.44
},
{
"num_node_expansions": 620,
"plan_length": 138,
"search_time": 1.65,
"total_time": 1.65
},
{
"num_node_expansions": 661,
"plan_length": 169,
"search_time": 0.28,
"total_time": 0.28
},
{
"num_node_expansions": 774,
"plan_length": 178,
"search_time": 0.4,
"total_time": 0.4
},
{
"num_node_expansions": 615,
"plan_length": 171,
"search_time": 0.53,
"total_time": 0.53
},
{
"num_node_expansions": 516,
"plan_length": 134,
"search_time": 0.71,
"total_time": 0.71
},
{
"num_node_expansions": 1077,
"plan_length": 221,
"search_time": 0.58,
"total_time": 0.58
},
{
"num_node_expansions": 1029,
"plan_length": 213,
"search_time": 0.62,
"total_time": 0.62
},
{
"num_node_expansions": 753,
"plan_length": 173,
"search_time": 0.47,
"total_time": 0.47
},
{
"num_node_expansions": 814,
"plan_length": 210,
"search_time": 0.5,
"total_time": 0.5
},
{
"num_node_expansions": 569,
"plan_length": 134,
"search_time": 3.06,
"total_time": 3.06
},
{
"num_node_expansions": 899,
"plan_length": 176,
"search_time": 5.84,
"total_time": 5.84
},
{
"num_node_expansions": 531,
"plan_length": 144,
"search_time": 3.15,
"total_time": 3.15
},
{
"num_node_expansions": 631,
"plan_length": 164,
"search_time": 3.74,
"total_time": 3.74
},
{
"num_node_expansions": 479,
"plan_length": 138,
"search_time": 0.11,
"total_time": 0.11
},
{
"num_node_expansions": 941,
"plan_length": 148,
"search_time": 0.22,
"total_time": 0.22
},
{
"num_node_expansions": 1023,
"plan_length": 197,
"search_time": 9.46,
"total_time": 9.46
},
{
"num_node_expansions": 1152,
"plan_length": 196,
"search_time": 12.7,
"total_time": 12.7
},
{
"num_node_expansions": 629,
"plan_length": 147,
"search_time": 4.14,
"total_time": 4.14
},
{
"num_node_expansions": 697,
"plan_length": 160,
"search_time": 2.82,
"total_time": 2.82
},
{
"num_node_expansions": 646,
"plan_length": 158,
"search_time": 3.74,
"total_time": 3.74
},
{
"num_node_expansions": 741,
"plan_length": 152,
"search_time": 4.56,
"total_time": 4.56
},
{
"num_node_expansions": 486,
"plan_length": 136,
"search_time": 1.77,
"total_time": 1.77
},
{
"num_node_expansions": 602,
"plan_length": 146,
"search_time": 3.22,
"total_time": 3.22
},
{
"num_node_expansions": 774,
"plan_length": 186,
"search_time": 1.56,
"total_time": 1.56
},
{
"num_node_expansions": 1512,
"plan_length": 209,
"search_time": 4.48,
"total_time": 4.48
},
{
"num_node_expansions": 791,
"plan_length": 180,
"search_time": 14.5,
"total_time": 14.5
},
{
"num_node_expansions": 1019,
"plan_length": 211,
"search_time": 18.59,
"total_time": 18.59
},
{
"num_node_expansions": 450,
"plan_length": 133,
"search_time": 2.75,
"total_time": 2.75
},
{
"num_node_expansions": 526,
"plan_length": 135,
"search_time": 3.02,
"total_time": 3.02
},
{
"num_node_expansions": 1329,
"plan_length": 182,
"search_time": 8.07,
"total_time": 8.07
},
{
"num_node_expansions": 655,
"plan_length": 134,
"search_time": 3.8,
"total_time": 3.8
},
{
"num_node_expansions": 636,
"plan_length": 159,
"search_time": 7.13,
"total_time": 7.13
},
{
"num_node_expansions": 1403,
"plan_length": 196,
"search_time": 16.16,
"total_time": 16.16
},
{
"num_node_expansions": 664,
"plan_length": 175,
"search_time": 4.18,
"total_time": 4.18
},
{
"num_node_expansions": 760,
"plan_length": 150,
"search_time": 6.37,
"total_time": 6.37
},
{
"num_node_expansions": 593,
"plan_length": 163,
"search_time": 9.42,
"total_time": 9.42
},
{
"num_node_expansions": 1043,
"plan_length": 179,
"search_time": 16.75,
"total_time": 16.75
},
{
"num_node_expansions": 390,
"plan_length": 103,
"search_time": 0.46,
"total_time": 0.46
},
{
"num_node_expansions": 419,
"plan_length": 120,
"search_time": 0.55,
"total_time": 0.55
},
{
"num_node_expansions": 606,
"plan_length": 160,
"search_time": 13.41,
"total_time": 13.41
},
{
"num_node_expansions": 905,
"plan_length": 213,
"search_time": 29.84,
"total_time": 29.84
},
{
"num_node_expansions": 525,
"plan_length": 146,
"search_time": 0.31,
"total_time": 0.31
},
{
"num_node_expansions": 522,
"plan_length": 147,
"search_time": 0.32,
"total_time": 0.32
},
{
"num_node_expansions": 652,
"plan_length": 165,
"search_time": 10.19,
"total_time": 10.19
},
{
"num_node_expansions": 1188,
"plan_length": 178,
"search_time": 13.24,
"total_time": 13.24
},
{
"num_node_expansions": 450,
"plan_length": 136,
"search_time": 1.48,
"total_time": 1.48
},
{
"num_node_expansions": 1179,
"plan_length": 209,
"search_time": 3.44,
"total_time": 3.44
},
{
"num_node_expansions": 834,
"plan_length": 204,
"search_time": 20.08,
"total_time": 20.08
},
{
"num_node_expansions": 1133,
"plan_length": 187,
"search_time": 15.61,
"total_time": 15.61
},
{
"num_node_expansions": 777,
"plan_length": 181,
"search_time": 13.35,
"total_time": 13.35
},
{
"num_node_expansions": 591,
"plan_length": 136,
"search_time": 2.59,
"total_time": 2.59
},
{
"num_node_expansions": 580,
"plan_length": 143,
"search_time": 2.89,
"total_time": 2.89
},
{
"num_node_expansions": 977,
"plan_length": 173,
"search_time": 8.97,
"total_time": 8.97
},
{
"num_node_expansions": 694,
"plan_length": 167,
"search_time": 8.22,
"total_time": 8.22
},
{
"num_node_expansions": 861,
"plan_length": 188,
"search_time": 1.14,
"total_time": 1.14
},
{
"num_node_expansions": 790,
"plan_length": 160,
"search_time": 0.93,
"total_time": 0.93
},
{
"num_node_expansions": 841,
"plan_length": 188,
"search_time": 5.61,
"total_time": 5.61
},
{
"num_node_expansions": 436,
"plan_length": 128,
"search_time": 2.46,
"total_time": 2.46
},
{
"num_node_expansions": 550,
"plan_length": 127,
"search_time": 0.03,
"total_time": 0.03
},
{
"num_node_expansions": 434,
"plan_length": 134,
"search_time": 0.03,
"total_time": 0.03
},
{
"num_node_expansions": 958,
"plan_length": 195,
"search_time": 9.09,
"total_time": 9.09
},
{
"num_node_expansions": 658,
"plan_length": 174,
"search_time": 6.01,
"total_time": 6.01
},
{
"num_node_expansions": 370,
"plan_length": 126,
"search_time": 0.06,
"total_time": 0.06
},
{
"num_node_expansions": 440,
"plan_length": 119,
"search_time": 0.08,
"total_time": 0.08
},
{
"num_node_expansions": 648,
"plan_length": 168,
"search_time": 8.1,
"total_time": 8.1
},
{
"num_node_expansions": 832,
"plan_length": 178,
"search_time": 10.9,
"total_time": 10.9
},
{
"num_node_expansions": 355,
"plan_length": 116,
"search_time": 0.7,
"total_time": 0.7
},
{
"num_node_expansions": 495,
"plan_length": 123,
"search_time": 0.86,
"total_time": 0.86
},
{
"num_node_expansions": 612,
"plan_length": 148,
"search_time": 4.23,
"total_time": 4.23
},
{
"num_node_expansions": 1067,
"plan_length": 174,
"search_time": 6.3,
"total_time": 6.3
},
{
"num_node_expansions": 821,
"plan_length": 185,
"search_time": 3.0,
"total_time": 3.0
},
{
"num_node_expansions": 625,
"plan_length": 153,
"search_time": 2.98,
"total_time": 2.98
},
{
"num_node_expansions": 304,
"plan_length": 99,
"search_time": 0.16,
"total_time": 0.16
},
{
"num_node_expansions": 477,
"plan_length": 133,
"search_time": 0.4,
"total_time": 0.4
},
{
"num_node_expansions": 651,
"plan_length": 160,
"search_time": 0.18,
"total_time": 0.18
},
{
"num_node_expansions": 594,
"plan_length": 147,
"search_time": 0.17,
"total_time": 0.17
},
{
"num_node_expansions": 524,
"plan_length": 134,
"search_time": 5.3,
"total_time": 5.3
},
{
"num_node_expansions": 400,
"plan_length": 127,
"search_time": 4.95,
"total_time": 4.95
},
{
"num_node_expansions": 825,
"plan_length": 185,
"search_time": 6.37,
"total_time": 6.37
},
{
"num_node_expansions": 613,
"plan_length": 156,
"search_time": 4.57,
"total_time": 4.57
},
{
"num_node_expansions": 427,
"plan_length": 121,
"search_time": 0.09,
"total_time": 0.09
},
{
"num_node_expansions": 362,
"plan_length": 116,
"search_time": 0.07,
"total_time": 0.07
},
{
"num_node_expansions": 459,
"plan_length": 119,
"search_time": 0.75,
"total_time": 0.75
},
{
"num_node_expansions": 501,
"plan_length": 132,
"search_time": 0.86,
"total_time": 0.86
},
{
"num_node_expansions": 697,
"plan_length": 156,
"search_time": 4.24,
"total_time": 4.24
},
{
"num_node_expansions": 1024,
"plan_length": 162,
"search_time": 7.13,
"total_time": 7.13
},
{
"num_node_expansions": 501,
"plan_length": 122,
"search_time": 4.67,
"total_time": 4.67
},
{
"num_node_expansions": 577,
"plan_length": 126,
"search_time": 5.56,
"total_time": 5.56
},
{
"num_node_expansions": 633,
"plan_length": 152,
"search_time": 17.98,
"total_time": 17.98
},
{
"num_node_expansions": 833,
"plan_length": 186,
"search_time": 24.85,
"total_time": 24.85
},
{
"num_node_expansions": 996,
"plan_length": 183,
"search_time": 4.05,
"total_time": 4.05
},
{
"num_node_expansions": 1246,
"plan_length": 206,
"search_time": 5.39,
"total_time": 5.39
},
{
"num_node_expansions": 466,
"plan_length": 137,
"search_time": 2.03,
"total_time": 2.03
},
{
"num_node_expansions": 530,
"plan_length": 142,
"search_time": 2.28,
"total_time": 2.28
},
{
"num_node_expansions": 923,
"plan_length": 189,
"search_time": 19.77,
"total_time": 19.77
},
{
"num_node_expansions": 799,
"plan_length": 167,
"search_time": 16.16,
"total_time": 16.16
},
{
"num_node_expansions": 651,
"plan_length": 173,
"search_time": 1.38,
"total_time": 1.38
},
{
"num_node_expansions": 590,
"plan_length": 159,
"search_time": 0.94,
"total_time": 0.94
},
{
"num_node_expansions": 542,
"plan_length": 155,
"search_time": 0.07,
"total_time": 0.07
},
{
"num_node_expansions": 418,
"plan_length": 130,
"search_time": 0.05,
"total_time": 0.05
},
{
"num_node_expansions": 881,
"plan_length": 182,
"search_time": 11.01,
"total_time": 11.01
},
{
"num_node_expansions": 1256,
"plan_length": 205,
"search_time": 15.58,
"total_time": 15.58
},
{
"num_node_expansions": 612,
"plan_length": 146,
"search_time": 2.92,
"total_time": 2.92
},
{
"num_node_expansions": 567,
"plan_length": 145,
"search_time": 2.43,
"total_time": 2.43
},
{
"num_node_expansions": 655,
"plan_length": 152,
"search_time": 9.25,
"total_time": 9.25
},
{
"num_node_expansions": 499,
"plan_length": 133,
"search_time": 7.5,
"total_time": 7.5
},
{
"num_node_expansions": 500,
"plan_length": 137,
"search_time": 0.3,
"total_time": 0.3
},
{
"num_node_expansions": 869,
"plan_length": 156,
"search_time": 0.47,
"total_time": 0.47
},
{
"num_node_expansions": 522,
"plan_length": 161,
"search_time": 0.06,
"total_time": 0.06
},
{
"num_node_expansions": 712,
"plan_length": 181,
"search_time": 0.07,
"total_time": 0.07
},
{
"num_node_expansions": 708,
"plan_length": 142,
"search_time": 4.46,
"total_time": 4.46
},
{
"num_node_expansions": 642,
"plan_length": 163,
"search_time": 5.26,
"total_time": 5.26
},
{
"num_node_expansions": 426,
"plan_length": 134,
"search_time": 0.11,
"total_time": 0.11
},
{
"num_node_expansions": 471,
"plan_length": 129,
"search_time": 0.14,
"total_time": 0.14
},
{
"num_node_expansions": 520,
"plan_length": 135,
"search_time": 1.65,
"total_time": 1.65
},
{
"num_node_expansions": 666,
"plan_length": 144,
"search_time": 3.02,
"total_time": 3.02
},
{
"num_node_expansions": 563,
"plan_length": 159,
"search_time": 2.27,
"total_time": 2.27
},
{
"num_node_expansions": 566,
"plan_length": 162,
"search_time": 2.06,
"total_time": 2.06
},
{
"num_node_expansions": 836,
"plan_length": 203,
"search_time": 16.69,
"total_time": 16.69
},
{
"num_node_expansions": 604,
"plan_length": 145,
"search_time": 1.25,
"total_time": 1.25
},
{
"num_node_expansions": 506,
"plan_length": 124,
"search_time": 0.99,
"total_time": 0.99
},
{
"num_node_expansions": 851,
"plan_length": 203,
"search_time": 1.15,
"total_time": 1.15
},
{
"num_node_expansions": 603,
"plan_length": 166,
"search_time": 0.76,
"total_time": 0.76
},
{
"num_node_expansions": 497,
"plan_length": 118,
"search_time": 0.3,
"total_time": 0.3
},
{
"num_node_expansions": 590,
"plan_length": 117,
"search_time": 0.32,
"total_time": 0.32
},
{
"num_node_expansions": 409,
"plan_length": 129,
"search_time": 0.08,
"total_time": 0.08
},
{
"num_node_expansions": 669,
"plan_length": 165,
"search_time": 0.12,
"total_time": 0.12
},
{
"num_node_expansions": 786,
"plan_length": 161,
"search_time": 18.85,
"total_time": 18.85
},
{
"num_node_expansions": 474,
"plan_length": 144,
"search_time": 10.09,
"total_time": 10.09
},
{
"num_node_expansions": 579,
"plan_length": 165,
"search_time": 1.18,
"total_time": 1.18
},
{
"num_node_expansions": 620,
"plan_length": 160,
"search_time": 1.01,
"total_time": 1.01
},
{
"num_node_expansions": 1523,
"plan_length": 221,
"search_time": 25.37,
"total_time": 25.37
},
{
"num_node_expansions": 961,
"plan_length": 207,
"search_time": 18.62,
"total_time": 18.62
},
{
"num_node_expansions": 444,
"plan_length": 127,
"search_time": 3.93,
"total_time": 3.93
},
{
"num_node_expansions": 464,
"plan_length": 127,
"search_time": 4.01,
"total_time": 4.01
},
{
"num_node_expansions": 773,
"plan_length": 194,
"search_time": 0.78,
"total_time": 0.78
},
{
"num_node_expansions": 676,
"plan_length": 161,
"search_time": 0.83,
"total_time": 0.83
},
{
"num_node_expansions": 414,
"plan_length": 127,
"search_time": 0.39,
"total_time": 0.39
},
{
"num_node_expansions": 623,
"plan_length": 165,
"search_time": 0.66,
"total_time": 0.66
},
{
"num_node_expansions": 703,
"plan_length": 163,
"search_time": 1.06,
"total_time": 1.06
},
{
"num_node_expansions": 785,
"plan_length": 176,
"search_time": 1.02,
"total_time": 1.02
},
{
"num_node_expansions": 986,
"plan_length": 167,
"search_time": 15.72,
"total_time": 15.72
},
{
"num_node_expansions": 955,
"plan_length": 205,
"search_time": 12.55,
"total_time": 12.55
},
{
"num_node_expansions": 417,
"plan_length": 118,
"search_time": 0.05,
"total_time": 0.05
},
{
"num_node_expansions": 521,
"plan_length": 141,
"search_time": 0.06,
"total_time": 0.06
},
{
"num_node_expansions": 815,
"plan_length": 182,
"search_time": 26.55,
"total_time": 26.55
}
]
num_timeouts = 15
num_timeouts = 0
num_problems = 172
| 0 | 0 | 0 |
9ae1fd07ba143f523cf6ec0b09ff7db797b76542 | 4,448 | py | Python | scratch/parse_flexible_args.py | strukovsv/PyHAML | 75d7774f30809f755dad2867e9ab55cea3019046 | [
"BSD-3-Clause"
] | 21 | 2015-01-27T13:32:46.000Z | 2022-03-12T21:45:12.000Z | scratch/parse_flexible_args.py | strukovsv/PyHAML | 75d7774f30809f755dad2867e9ab55cea3019046 | [
"BSD-3-Clause"
] | 2 | 2017-05-23T11:30:01.000Z | 2019-07-29T01:21:27.000Z | scratch/parse_flexible_args.py | strukovsv/PyHAML | 75d7774f30809f755dad2867e9ab55cea3019046 | [
"BSD-3-Clause"
] | 8 | 2015-07-13T17:46:24.000Z | 2021-12-08T18:13:22.000Z | import re
import ast
import operator
def literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
and None.
"""
_safe_names = {
'None': None,
'True': True,
'False': False,
'dict': dict,
'list': list,
'sorted': sorted
}
if isinstance(node_or_string, str):
node_or_string = parse(node_or_string, mode='eval')
if isinstance(node_or_string, ast.Expression):
node_or_string = node_or_string.body
return _convert(node_or_string)
if __name__ == '__main__':
signatures = '''
(1, 2, 3) more
(key='value') more
(**dict(key='value')) more
(*[1, 2, 3]) more
{:class => "code", :id => "message"} Hello
(class_='before %s after' % 'middle') hello
(data-crud=dict(id=34, url='/api')) crud goes here
(u'unicode!', b'bytes!')
(' '.join(['hello', 'there'])) after
([i for i in 'hello'])
'''.strip().splitlines()
for sig in signatures:
print sig
args, remaining = parse_args(sig[1:], {'(':')', '{':'}'}[sig[0]])
for key, source, root in args:
try:
value = literal_eval(root)
print '%s: %r' % (key, value)
except ValueError as e:
print '%s -> %s' % (key, e)
print repr(remaining), 'remains'
print
| 29.263158 | 80 | 0.510342 | import re
import ast
import operator
def parse_args(input, end=')'):
chunks = re.split(r'(,|%s)' % re.escape(end), input)
output = []
# Continue processing chunks as long as we keep getting something.
last_output = -1
while len(output) != last_output:
last_output = len(output)
# Extract kwarg name.
m = re.match(r'\s*(?::?([\w-]+)\s*=>?|(\*{1,2}))', chunks[0])
if m:
name = m.group(1) or m.group(2)
chunks[0] = chunks[0][m.end():]
else:
name = None
# Keep finding chunks until it compiles:
for i in range(1, len(chunks), 2):
source = ''.join(chunks[:i]).lstrip()
try:
parsed = ast.parse(source, mode='eval')
except SyntaxError as e:
continue
output.append((name, source, parsed))
next_delim = chunks[i]
chunks = chunks[i + 1:]
break
else:
break
if next_delim == end:
break
return output, ''.join(chunks)
def literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
and None.
"""
_safe_names = {
'None': None,
'True': True,
'False': False,
'dict': dict,
'list': list,
'sorted': sorted
}
if isinstance(node_or_string, str):
node_or_string = parse(node_or_string, mode='eval')
if isinstance(node_or_string, ast.Expression):
node_or_string = node_or_string.body
def _convert(node):
if isinstance(node, ast.Str):
return node.s
elif isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.Tuple):
return tuple(map(_convert, node.elts))
elif isinstance(node, ast.List):
return list(map(_convert, node.elts))
elif isinstance(node, ast.Dict):
return dict((_convert(k), _convert(v)) for k, v
in zip(node.keys, node.values))
elif isinstance(node, ast.Name):
if node.id in _safe_names:
return _safe_names[node.id]
elif isinstance(node, ast.BinOp):
left = _convert(node.left)
right = _convert(node.right)
op = {
ast.Add: operator.add,
ast.Sub: operator.sub,
ast.Mult: operator.mul,
ast.Div: operator.div,
ast.Mod: operator.mod
}.get(type(node.op), None)
if op:
return op(left, right)
elif isinstance(node, ast.Call):
func = _convert(node.func)
args = map(_convert, node.args)
kwargs = dict((kw.arg, _convert(kw.value)) for kw in node.keywords)
if node.starargs:
args.extend(_convert(node.starargs))
if node.kwargs:
kwargs.update(_convert(node.kwargs))
return func(*args, **kwargs)
elif isinstance(node, ast.Attribute):
if not node.attr.startswith('_'):
return getattr(_convert(node.value), node.attr)
raise ValueError('malformed string: %r' % node)
return _convert(node_or_string)
if __name__ == '__main__':
signatures = '''
(1, 2, 3) more
(key='value') more
(**dict(key='value')) more
(*[1, 2, 3]) more
{:class => "code", :id => "message"} Hello
(class_='before %s after' % 'middle') hello
(data-crud=dict(id=34, url='/api')) crud goes here
(u'unicode!', b'bytes!')
(' '.join(['hello', 'there'])) after
([i for i in 'hello'])
'''.strip().splitlines()
for sig in signatures:
print sig
args, remaining = parse_args(sig[1:], {'(':')', '{':'}'}[sig[0]])
for key, source, root in args:
try:
value = literal_eval(root)
print '%s: %r' % (key, value)
except ValueError as e:
print '%s -> %s' % (key, e)
print repr(remaining), 'remains'
print
| 2,830 | 0 | 58 |
5e6887bbdad82231a81256d4d50e3159e34d107f | 8,506 | py | Python | old/dig1/sageutil.py | letonchanh/dig-m3 | 98b6f2bd6efeddbd8c047e77f4d3b506abf022b9 | [
"MIT"
] | null | null | null | old/dig1/sageutil.py | letonchanh/dig-m3 | 98b6f2bd6efeddbd8c047e77f4d3b506abf022b9 | [
"MIT"
] | null | null | null | old/dig1/sageutil.py | letonchanh/dig-m3 | 98b6f2bd6efeddbd8c047e77f4d3b506abf022b9 | [
"MIT"
] | null | null | null | from collections import OrderedDict
from sage.all import (operator, flatten, PolynomialRing, SR, QQ, ZZ, RR, sage, oo)
from vu_common import (pause, get_logger,is_iterable, is_str, is_empty)
is_sage_expr = lambda x: isinstance(x, sage.symbolic.expression.Expression)
is_sage_real = lambda x: isinstance(x, sage.rings.real_mpfr.RealLiteral)
is_sage_int = lambda x: isinstance(x, sage.rings.integer.Integer)
is_sage_num = lambda x: is_sage_real(x) or is_sage_int(x)
def is_sage_inf(x):
"""
Example:
sage: is_sage_inf(oo)
True
sage: is_sage_inf(-oo)
True
sage: is_sage_inf(oo+3)
True
sage: is_sage_inf(oo-3)
True
sage: is_sage_inf(SR(-oo))
True
sage: is_sage_inf(x)
False
sage: is_sage_inf(x+3)
False
sage: is_sage_inf(8)
False
"""
try:
return x.is_infinity()
except AttributeError:
return x == oo or x == -oo
is_sage_int_inf = lambda x: is_sage_int(x) or is_sage_inf(x)
to_sage_int = lambda x: x if is_sage_int(x) else ZZ(x)
def is_sage_symbol(s):
"""
sage: assert is_sage_symbol(x)
sage: assert not is_sage_symbol(x+1)
sage: assert not is_sage_symbol(1)
"""
try:
return s.is_symbol()
except AttributeError:
return False
def is_sage_rel(f, rel=None):
"""
sage: assert not is_sage_rel(7.2)
sage: assert not is_sage_rel(x)
sage: assert not is_sage_rel(x+7)
sage: assert is_sage_rel(x==3,operator.eq)
sage: assert is_sage_rel(x<=3,operator.le)
sage: assert not is_sage_rel(x<=3,operator.lt)
sage: assert not is_sage_rel(x+3,operator.lt)
sage: y = var('y')
sage: assert is_sage_rel(x+y<=3)
"""
try:
if not f.is_relational():
return False
if rel is None:
return True
else:
return f.operator() == rel
except AttributeError:
return False
is_sage_eq = lambda f: is_sage_rel(f, operator.eq)
def get_vars(ps):
"""
Returns a list of uniq variables from a list of properties
Examples:
sage: var('a b c x')
(a, b, c, x)
sage: assert [a, b, c, x] == get_vars([x^(a*b) + a**2+b+2==0, c**2-b==100, b**2 + c**2 + a**3>= 1])
sage: assert get_vars(a**2+b+5*c+2==0) == [a, b, c]
sage: assert get_vars(x+x^2) == [x]
sage: assert get_vars([3]) == []
sage: assert get_vars((3,'x + c',x+b)) == [b, x]
"""
ps = ps if is_iterable(ps) else [ps]
vs = flatten([p.variables() for p in ps if is_sage_expr(p)])
return sorted(set(vs), key=str)
def get_coefs_terms(p, base_ring = QQ, as_dict=False):
"""
Returns the Coefs and Terms of a given expression
Examples:
sage: assert get_coefs_terms(x) == ([1], [x])
sage: assert get_coefs_terms(x,as_dict=True) == {x: 1}
sage: var('a b c')
(a, b, c)
sage: assert get_coefs_terms(a**2+b+5*c+2==0) == ([1, 1, 5, 2], [a^2, b, c, 1])
sage: assert get_coefs_terms(a**2+b+5*c+2==0, as_dict=True) == {b: 1, 1: 2, a^2: 1, c: 5}
sage: assert get_coefs_terms(10/3*a**2+3*b+5*c+2) == ([10/3, 3, 5, 2], [a^2, b, c, 1])
sage: assert get_coefs_terms(10/3*a**2+3*b+5*c+2, as_dict=True) == {b: 3, 1: 2, a^2: 10/3, c: 5}
sage: assert get_coefs_terms(a+b<=3, as_dict=True) == {1: -3, b: 1, a: 1}
sage: assert all(is_sage_int(v) for v in get_coefs_terms(a+b<=3, as_dict=True, base_ring=ZZ).values())
#sage 6.2 breaks this
#sage: assert get_coefs_terms(a - b <= oo) == ([1, -1, -infinity], [a, b, 1])
sage: assert get_coefs_terms(SR(7), as_dict=True) == {1: 7}
sage: assert get_coefs_terms(SR(3))==([3], [1])
sage: assert get_coefs_terms(SR(oo))==([+Infinity], [1])
sage: assert get_coefs_terms(SR(-oo)) == ([-Infinity], [1])
sage: assert get_coefs_terms(a + b <= .9,base_ring=ZZ) == ([1, 1, -0.900000000000000], [a, b, 1])
sage: assert is_sage_int(get_coefs_terms(SR(7),base_ring=ZZ,as_dict=True).values()[0])
"""
use_wrong_base_ring = False
if is_sage_rel(p):
p = mk_rhs_0(p).lhs()
if p.is_integer() or p.is_real():
ts = [SR(1)]
cs = [p if p.is_infinity() else base_ring(p)]
else:
ss = get_vars(p)
assert not is_empty(ss), (p,ss)
mk_pr = lambda b, p: PolynomialRing(b, ss, None if len(ss) >= 2 else 1)(p)
try:
pr_p = mk_pr(base_ring, p)
except TypeError:
if base_ring == RR:
#if cannot do over RR then return None
return None
else:
#otherwise, try with RR
try:
pr_p = mk_pr(RR,p)
use_wrong_base_ring = True
except Exception as msg:
return None
cs = pr_p.coefficients()
ts = map(SR, pr_p.monomials())
if use_wrong_base_ring:
ts = [SR(1) if bool(t.is_one()) else t for t in ts]
cs_ = []
for c in cs:
if c == oo:
cs_.append(oo)
elif c == -oo:
cs_.append(-oo)
else:
try:
cs_.append(base_ring(c))
except ValueError:
cs_.append(c)
except TypeError:
cs_.append(c)
cs = cs_
assert all(is_sage_expr(t) for t in ts), ts
if as_dict:
d = OrderedDict()
for t,c in zip(ts,cs):
d[t] = c
return d
else:
return cs,ts
def mk_rhs_0(p):
"""
sage: var('x,y')
(x, y)
sage: mk_rhs_0(x - y >= 3)
x - y - 3 >= 0
sage: mk_rhs_0(x - y - 3 >= 0)
x - y - 3 >= 0
sage: mk_rhs_0(0 <= x - y - 3)
-x + y + 3 <= 0
sage: mk_rhs_0(0 == x)
-x == 0
sage: mk_rhs_0(10 == -x)
x + 10 == 0
#Sage 5.11 broke all these (i.e., broke lhs.add(..,hold=))
# sage: mk_rhs_0(x <= oo)
# x - Infinity <= 0
# sage: mk_rhs_0(x <= -oo)
# x + +Infinity <= 0
# sage: mk_rhs_0(x >= oo)
# x - Infinity >= 0
# sage: mk_rhs_0(oo >= x)
# +Infinity - x >= 0
sage: mk_rhs_0(x - y - 3)
Traceback (most recent call last):
...
AssertionError: x - y - 3
"""
assert is_sage_rel(p), p
rhs = p.rhs()
lhs = p.lhs()
if not rhs.is_zero():
lhs = lhs.add(-rhs, hold=(rhs.is_infinity() or lhs.is_infinity()))
rhs = 0
p = p.operator()(lhs, rhs)
return p
# def myreduce(op, ls):
# """
# Apply operator op to list of arguments
# Note, it seems the above arguments are *enough*, no need to implement for (-,div) etc because the function that calls this will break x - y to myreduce(op,[x,-y]) or x / y to myreduce(op,[x,1/y]) and 1/y => mul(1,y^{-1})
# sage: assert myreduce(operator.add, [x,x]) == 2*x
# sage: assert myreduce(operator.add, [3,x]) == x + 3
# sage: myreduce(operator.le, [3,x])
# 3 <= x
# sage: assert myreduce(operator.pow,[3,x]) == 3^x
# """
# if __debug__:
# assert len(ls) >= 2, ls
# assert op in [operator.add,operator.mul,
# operator.pow,operator.eq,operator.ne,
# operator.le,operator.lt,operator.ge,operator.gt], op
# return reduce(lambda a, b: op(a,b), ls[1:], ls[0])
# def mk_expr(expr, d, ring_typ=ZZ):
# """
# Make a new expression like expr but with all vars in expr replaced
# with those in dictionary d. Used when subs() is not applicable
# sage: y = var('y')
# sage: lp = MixedIntegerLinearProgram()
# sage: s0 = lp['s0']
# sage: s1 = lp['s1']
# sage: d = {x:s0,y:s1}
# sage: mk_expr(x+y+3, d)
# 3 + x_0 + x_1
# sage: mk_expr(x+y+3<=8,d)
# 3 + x_0 + x_1 <= 8
# sage: mk_expr(x==y+5,d)
# x_0 == 5 + x_1
# """
# def retval(expr):
# if is_sage_symbol(expr): #symbol, e.g. x
# return d[expr]
# else: #const , e.g. 3
# return ring_typ(expr)
# try:
# oprs = expr.operands()
# except AttributeError:
# #e.g. const 3, .5
# return retval(expr)
# if is_empty(oprs): #symbol
# return retval(expr)
# else:
# oprs = [mk_expr(o,d) for o in oprs]
# print oprs
# rs = myreduce(expr.operator(), oprs)
# return rs
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27.175719 | 229 | 0.53715 | from collections import OrderedDict
from sage.all import (operator, flatten, PolynomialRing, SR, QQ, ZZ, RR, sage, oo)
from vu_common import (pause, get_logger,is_iterable, is_str, is_empty)
is_sage_expr = lambda x: isinstance(x, sage.symbolic.expression.Expression)
is_sage_real = lambda x: isinstance(x, sage.rings.real_mpfr.RealLiteral)
is_sage_int = lambda x: isinstance(x, sage.rings.integer.Integer)
is_sage_num = lambda x: is_sage_real(x) or is_sage_int(x)
def is_sage_inf(x):
"""
Example:
sage: is_sage_inf(oo)
True
sage: is_sage_inf(-oo)
True
sage: is_sage_inf(oo+3)
True
sage: is_sage_inf(oo-3)
True
sage: is_sage_inf(SR(-oo))
True
sage: is_sage_inf(x)
False
sage: is_sage_inf(x+3)
False
sage: is_sage_inf(8)
False
"""
try:
return x.is_infinity()
except AttributeError:
return x == oo or x == -oo
is_sage_int_inf = lambda x: is_sage_int(x) or is_sage_inf(x)
to_sage_int = lambda x: x if is_sage_int(x) else ZZ(x)
def is_sage_symbol(s):
"""
sage: assert is_sage_symbol(x)
sage: assert not is_sage_symbol(x+1)
sage: assert not is_sage_symbol(1)
"""
try:
return s.is_symbol()
except AttributeError:
return False
def is_sage_rel(f, rel=None):
"""
sage: assert not is_sage_rel(7.2)
sage: assert not is_sage_rel(x)
sage: assert not is_sage_rel(x+7)
sage: assert is_sage_rel(x==3,operator.eq)
sage: assert is_sage_rel(x<=3,operator.le)
sage: assert not is_sage_rel(x<=3,operator.lt)
sage: assert not is_sage_rel(x+3,operator.lt)
sage: y = var('y')
sage: assert is_sage_rel(x+y<=3)
"""
try:
if not f.is_relational():
return False
if rel is None:
return True
else:
return f.operator() == rel
except AttributeError:
return False
is_sage_eq = lambda f: is_sage_rel(f, operator.eq)
def get_vars(ps):
"""
Returns a list of uniq variables from a list of properties
Examples:
sage: var('a b c x')
(a, b, c, x)
sage: assert [a, b, c, x] == get_vars([x^(a*b) + a**2+b+2==0, c**2-b==100, b**2 + c**2 + a**3>= 1])
sage: assert get_vars(a**2+b+5*c+2==0) == [a, b, c]
sage: assert get_vars(x+x^2) == [x]
sage: assert get_vars([3]) == []
sage: assert get_vars((3,'x + c',x+b)) == [b, x]
"""
ps = ps if is_iterable(ps) else [ps]
vs = flatten([p.variables() for p in ps if is_sage_expr(p)])
return sorted(set(vs), key=str)
def get_coefs_terms(p, base_ring = QQ, as_dict=False):
"""
Returns the Coefs and Terms of a given expression
Examples:
sage: assert get_coefs_terms(x) == ([1], [x])
sage: assert get_coefs_terms(x,as_dict=True) == {x: 1}
sage: var('a b c')
(a, b, c)
sage: assert get_coefs_terms(a**2+b+5*c+2==0) == ([1, 1, 5, 2], [a^2, b, c, 1])
sage: assert get_coefs_terms(a**2+b+5*c+2==0, as_dict=True) == {b: 1, 1: 2, a^2: 1, c: 5}
sage: assert get_coefs_terms(10/3*a**2+3*b+5*c+2) == ([10/3, 3, 5, 2], [a^2, b, c, 1])
sage: assert get_coefs_terms(10/3*a**2+3*b+5*c+2, as_dict=True) == {b: 3, 1: 2, a^2: 10/3, c: 5}
sage: assert get_coefs_terms(a+b<=3, as_dict=True) == {1: -3, b: 1, a: 1}
sage: assert all(is_sage_int(v) for v in get_coefs_terms(a+b<=3, as_dict=True, base_ring=ZZ).values())
#sage 6.2 breaks this
#sage: assert get_coefs_terms(a - b <= oo) == ([1, -1, -infinity], [a, b, 1])
sage: assert get_coefs_terms(SR(7), as_dict=True) == {1: 7}
sage: assert get_coefs_terms(SR(3))==([3], [1])
sage: assert get_coefs_terms(SR(oo))==([+Infinity], [1])
sage: assert get_coefs_terms(SR(-oo)) == ([-Infinity], [1])
sage: assert get_coefs_terms(a + b <= .9,base_ring=ZZ) == ([1, 1, -0.900000000000000], [a, b, 1])
sage: assert is_sage_int(get_coefs_terms(SR(7),base_ring=ZZ,as_dict=True).values()[0])
"""
use_wrong_base_ring = False
if is_sage_rel(p):
p = mk_rhs_0(p).lhs()
if p.is_integer() or p.is_real():
ts = [SR(1)]
cs = [p if p.is_infinity() else base_ring(p)]
else:
ss = get_vars(p)
assert not is_empty(ss), (p,ss)
mk_pr = lambda b, p: PolynomialRing(b, ss, None if len(ss) >= 2 else 1)(p)
try:
pr_p = mk_pr(base_ring, p)
except TypeError:
if base_ring == RR:
#if cannot do over RR then return None
return None
else:
#otherwise, try with RR
try:
pr_p = mk_pr(RR,p)
use_wrong_base_ring = True
except Exception as msg:
return None
cs = pr_p.coefficients()
ts = map(SR, pr_p.monomials())
if use_wrong_base_ring:
ts = [SR(1) if bool(t.is_one()) else t for t in ts]
cs_ = []
for c in cs:
if c == oo:
cs_.append(oo)
elif c == -oo:
cs_.append(-oo)
else:
try:
cs_.append(base_ring(c))
except ValueError:
cs_.append(c)
except TypeError:
cs_.append(c)
cs = cs_
assert all(is_sage_expr(t) for t in ts), ts
if as_dict:
d = OrderedDict()
for t,c in zip(ts,cs):
d[t] = c
return d
else:
return cs,ts
def mk_rhs_0(p):
"""
sage: var('x,y')
(x, y)
sage: mk_rhs_0(x - y >= 3)
x - y - 3 >= 0
sage: mk_rhs_0(x - y - 3 >= 0)
x - y - 3 >= 0
sage: mk_rhs_0(0 <= x - y - 3)
-x + y + 3 <= 0
sage: mk_rhs_0(0 == x)
-x == 0
sage: mk_rhs_0(10 == -x)
x + 10 == 0
#Sage 5.11 broke all these (i.e., broke lhs.add(..,hold=))
# sage: mk_rhs_0(x <= oo)
# x - Infinity <= 0
# sage: mk_rhs_0(x <= -oo)
# x + +Infinity <= 0
# sage: mk_rhs_0(x >= oo)
# x - Infinity >= 0
# sage: mk_rhs_0(oo >= x)
# +Infinity - x >= 0
sage: mk_rhs_0(x - y - 3)
Traceback (most recent call last):
...
AssertionError: x - y - 3
"""
assert is_sage_rel(p), p
rhs = p.rhs()
lhs = p.lhs()
if not rhs.is_zero():
lhs = lhs.add(-rhs, hold=(rhs.is_infinity() or lhs.is_infinity()))
rhs = 0
p = p.operator()(lhs, rhs)
return p
# def myreduce(op, ls):
# """
# Apply operator op to list of arguments
# Note, it seems the above arguments are *enough*, no need to implement for (-,div) etc because the function that calls this will break x - y to myreduce(op,[x,-y]) or x / y to myreduce(op,[x,1/y]) and 1/y => mul(1,y^{-1})
# sage: assert myreduce(operator.add, [x,x]) == 2*x
# sage: assert myreduce(operator.add, [3,x]) == x + 3
# sage: myreduce(operator.le, [3,x])
# 3 <= x
# sage: assert myreduce(operator.pow,[3,x]) == 3^x
# """
# if __debug__:
# assert len(ls) >= 2, ls
# assert op in [operator.add,operator.mul,
# operator.pow,operator.eq,operator.ne,
# operator.le,operator.lt,operator.ge,operator.gt], op
# return reduce(lambda a, b: op(a,b), ls[1:], ls[0])
# def mk_expr(expr, d, ring_typ=ZZ):
# """
# Make a new expression like expr but with all vars in expr replaced
# with those in dictionary d. Used when subs() is not applicable
# sage: y = var('y')
# sage: lp = MixedIntegerLinearProgram()
# sage: s0 = lp['s0']
# sage: s1 = lp['s1']
# sage: d = {x:s0,y:s1}
# sage: mk_expr(x+y+3, d)
# 3 + x_0 + x_1
# sage: mk_expr(x+y+3<=8,d)
# 3 + x_0 + x_1 <= 8
# sage: mk_expr(x==y+5,d)
# x_0 == 5 + x_1
# """
# def retval(expr):
# if is_sage_symbol(expr): #symbol, e.g. x
# return d[expr]
# else: #const , e.g. 3
# return ring_typ(expr)
# try:
# oprs = expr.operands()
# except AttributeError:
# #e.g. const 3, .5
# return retval(expr)
# if is_empty(oprs): #symbol
# return retval(expr)
# else:
# oprs = [mk_expr(o,d) for o in oprs]
# print oprs
# rs = myreduce(expr.operator(), oprs)
# return rs
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 | 0 |
2d9d4e84698d3d2c0c263817fbcf9fb9d2d85765 | 142 | py | Python | src/skdh/io/_extensions/__init__.py | PfizerRD/scikit-digital-health | f834a82d750d9e3cdd35f4f5692a0a388210b821 | [
"MIT"
] | 1 | 2022-03-31T20:56:49.000Z | 2022-03-31T20:56:49.000Z | src/skdh/io/_extensions/__init__.py | PfizerRD/scikit-digital-health | f834a82d750d9e3cdd35f4f5692a0a388210b821 | [
"MIT"
] | null | null | null | src/skdh/io/_extensions/__init__.py | PfizerRD/scikit-digital-health | f834a82d750d9e3cdd35f4f5692a0a388210b821 | [
"MIT"
] | null | null | null | from .read import read_axivity, read_geneactiv
from .gt3x_convert import read_gt3x
__all__ = ("read_axivity", "read_geneactiv", "read_gt3x")
| 28.4 | 57 | 0.795775 | from .read import read_axivity, read_geneactiv
from .gt3x_convert import read_gt3x
__all__ = ("read_axivity", "read_geneactiv", "read_gt3x")
| 0 | 0 | 0 |
52f5eda8651e076c601fe495f7f73ef9cf7c5ced | 3,552 | py | Python | reports/migrations/0001_initial.py | CMU-TRP/podd-api | 6eb5c4598f848f75d131287163cd9babf2a0a0fc | [
"MIT"
] | 3 | 2020-04-26T06:28:50.000Z | 2021-04-05T08:02:26.000Z | reports/migrations/0001_initial.py | CMU-TRP/podd-api | 6eb5c4598f848f75d131287163cd9babf2a0a0fc | [
"MIT"
] | 10 | 2020-06-05T17:36:10.000Z | 2022-03-11T23:16:42.000Z | reports/migrations/0001_initial.py | CMU-TRP/podd-api | 6eb5c4598f848f75d131287163cd9babf2a0a0fc | [
"MIT"
] | 5 | 2021-04-08T08:43:49.000Z | 2021-11-27T06:36:46.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django.contrib.gis.db.models.fields
| 39.466667 | 114 | 0.546734 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AdministrationArea',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('lft', models.PositiveIntegerField(db_index=True)),
('rgt', models.PositiveIntegerField(db_index=True)),
('tree_id', models.PositiveIntegerField(db_index=True)),
('depth', models.PositiveIntegerField(db_index=True)),
('name', models.CharField(max_length=200)),
('location', django.contrib.gis.db.models.fields.PointField(srid=4326)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Report',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('report_id', models.IntegerField()),
('guid', models.TextField()),
('report_location', django.contrib.gis.db.models.fields.PointField(srid=4326)),
('administration_location', django.contrib.gis.db.models.fields.PointField(srid=4326)),
('date', models.DateField()),
('incident_date', models.DateField()),
('form_data', models.TextField(blank=True)),
('negative_flag', models.BooleanField(default=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('administration_area', models.ForeignKey(to='reports.AdministrationArea')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ReportImage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('note', models.TextField()),
('image_url', models.URLField()),
('thumbnail_url', models.URLField()),
('report', models.ForeignKey(to='reports.Report')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ReportType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('form_definition', models.TextField()),
('version', models.IntegerField()),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='report',
name='type',
field=models.ForeignKey(to='reports.ReportType'),
preserve_default=True,
),
migrations.AddField(
model_name='report',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
| 0 | 3,346 | 23 |
e1dfe9ea6751bada26fc5b052be6b8b9471c1399 | 1,203 | py | Python | project_template/{{cookiecutter.app_name}}/setup.py | pvandyken/snakebids | 10186d116dc016769e6b43c67f10e0d50264d053 | [
"MIT"
] | null | null | null | project_template/{{cookiecutter.app_name}}/setup.py | pvandyken/snakebids | 10186d116dc016769e6b43c67f10e0d50264d053 | [
"MIT"
] | null | null | null | project_template/{{cookiecutter.app_name}}/setup.py | pvandyken/snakebids | 10186d116dc016769e6b43c67f10e0d50264d053 | [
"MIT"
] | null | null | null | import setuptools
import json
with open("README.rst", "r") as fh:
long_description = fh.read()
with open('pipeline_description.json', 'r') as fh:
pipeline = json.load(fh)
name = pipeline['GeneratedBy'][0]['Name']
description = pipeline['Name']
version = pipeline['GeneratedBy'][0]['Version']
url = pipeline['GeneratedBy'][0]['CodeURL']
author = pipeline['GeneratedBy'][0]['Author']
author_email = pipeline['GeneratedBy'][0]['AuthorEmail']
setuptools.setup(
name=name,
version=version,
author=author,
author_email=author_email,
description=description,
long_description=long_description,
long_description_content_type="text/x-rst",
url=url,
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={'console_scripts': [
'{{cookiecutter.app_name}}={{cookiecutter.app_name}}.run:main'
]},
install_requires=[
"snakebids>={{cookiecutter.snakebids_version}}",
"snakemake"
],
python_requires='>=3.7'
)
| 29.341463 | 70 | 0.654198 | import setuptools
import json
with open("README.rst", "r") as fh:
long_description = fh.read()
with open('pipeline_description.json', 'r') as fh:
pipeline = json.load(fh)
name = pipeline['GeneratedBy'][0]['Name']
description = pipeline['Name']
version = pipeline['GeneratedBy'][0]['Version']
url = pipeline['GeneratedBy'][0]['CodeURL']
author = pipeline['GeneratedBy'][0]['Author']
author_email = pipeline['GeneratedBy'][0]['AuthorEmail']
setuptools.setup(
name=name,
version=version,
author=author,
author_email=author_email,
description=description,
long_description=long_description,
long_description_content_type="text/x-rst",
url=url,
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={'console_scripts': [
'{{cookiecutter.app_name}}={{cookiecutter.app_name}}.run:main'
]},
install_requires=[
"snakebids>={{cookiecutter.snakebids_version}}",
"snakemake"
],
python_requires='>=3.7'
)
| 0 | 0 | 0 |
8e6325079667283a268285f4f7694919da11f536 | 7,332 | py | Python | yales2/droplet-AMR-opt/pymooCFD/util/handleData.py | gmclove/pymoo-CFD | fd4a7b1335db33b467fe975ec804f4b055de407c | [
"Apache-2.0"
] | null | null | null | yales2/droplet-AMR-opt/pymooCFD/util/handleData.py | gmclove/pymoo-CFD | fd4a7b1335db33b467fe975ec804f4b055de407c | [
"Apache-2.0"
] | null | null | null | yales2/droplet-AMR-opt/pymooCFD/util/handleData.py | gmclove/pymoo-CFD | fd4a7b1335db33b467fe975ec804f4b055de407c | [
"Apache-2.0"
] | null | null | null | from pymooCFD.setupOpt import checkpointFile, dataDir, nCP, archDir, \
preProcDir, cluster
from pymooCFD.util.sysTools import removeDir #, makeDir, emptyDir
from pymooCFD.setupCFD import runCase
import numpy as np
import time
import os
import tarfile
from dask.distributed import Client
from sys import exit
# def getGen(checkpointFile=checkpointFile):
# try:
# loadCP(checkpointFile=checkpointFile)
# except FileNotFoundError as err:
# print(err)
# return 0
# def popGen(gen, checkpointFile=checkpointFile):
# '''
# Parameters
# ----------
# gen : int
# generation you wish to get population from
# checkpointFile : str, optional
# checkpoint file path where Algorithm object was saved using numpy.save().
# The default is checkpointFile (defined in beginning of setupOpt.py).
# Returns
# -------
# pop :
# Contains StaticProblem object with population of individuals from
# generation <gen>.
# Notes
# -----
# - development needed to handle constraints
# '''
# alg = loadCP(checkpointFile=checkpointFile)
# X = alg.callback.data['var'][gen]
# F = alg.callback.data['obj'][gen]
# from pymoo.model.evaluator import Evaluator
# from pymoo.model.population import Population
# from pymoo.model.problem import StaticProblem
# # now the population object with all its attributes is created (CV, feasible, ...)
# pop = Population.new("X", X)
# pop = Evaluator().eval(StaticProblem(problem, F=F), pop) # , G=G), pop)
# return pop, alg
# def loadTxt(fileX, fileF, fileG=None):
# print(f'Loading population from files {fileX} and {fileF}...')
# X = np.loadtxt(fileX)
# F = np.loadtxt(fileF)
# # F = np.loadtxt(f'{dataDir}/{fileF}')
# if fileG is not None:
# # G = np.loadtxt(f'{dataDir}/{fileG}')
# G = np.loadtxt(fileG)
# else:
# G = None
# from pymoo.model.evaluator import Evaluator
# from pymoo.model.population import Population
# from pymoo.model.problem import StaticProblem
# # now the population object with all its attributes is created (CV, feasible, ...)
# pop = Population.new("X", X)
# pop = Evaluator().eval(StaticProblem(problem, F=F, G=G), pop)
# from pymooCFD.setupOpt import pop_size
# # from pymoo.algorithms.so_genetic_algorithm import GA
# # # the algorithm is now called with the population - biased initialization
# # algorithm = GA(pop_size=pop_size, sampling=pop)
# from pymoo.algorithms.nsga2 import NSGA2
# algorithm = NSGA2(pop_size=pop_size, sampling=pop)
# return algorithm
# def restartGen(gen, checkpointFile=checkpointFile):
# pop, alg = popGen(gen, checkpointFile=checkpointFile)
# alg.sampling()
# # from pymoo.algorithms.so_genetic_algorithm import GA
# # the algorithm is now called with the population - biased initialization
# # algorithm = GA(pop_size=100, sampling=pop)
# from pymoo.optimize import minimize
# from pymooCFD.setupOpt import problem
# res = minimize(problem,
# alg,
# ('n_gen', 10),
# seed=1,
# verbose=True)
# return res
# def loadTxt():
# try:
# print('Loading from text files')
# X = np.loadtxt('var.txt')
# F = np.loadtxt('obj.txt')
# except OSError as err:
# print(err)
# print('Failed to load text files')
# print('Data loading failed returning "None, None"...')
# return None, None
# def archive(dirName, archName = 'archive.tar.gz'):
# with tarfile.open(archName, 'a') as tar:
# tar.add(dirName)
# compressDir('../../dump')
# print('creating archive')
# out = tarfile.open('example.tar.gz', mode='a')
# try:
# print('adding README.txt')
# out.add('../dump')
# finally:
# print('closing tar archive')
# out.close()
#
# print('Contents of archived file:')
# t = tarfile.open('example.tar.gz', 'r')
# for member in t.getmembers():
# print(member.name)
| 30.677824 | 88 | 0.635161 | from pymooCFD.setupOpt import checkpointFile, dataDir, nCP, archDir, \
preProcDir, cluster
from pymooCFD.util.sysTools import removeDir #, makeDir, emptyDir
from pymooCFD.setupCFD import runCase
import numpy as np
import time
import os
import tarfile
from dask.distributed import Client
from sys import exit
# def getGen(checkpointFile=checkpointFile):
# try:
# loadCP(checkpointFile=checkpointFile)
# except FileNotFoundError as err:
# print(err)
# return 0
def archive(dirToComp, archDir=archDir, background=True):
if background:
from multiprocessing import Process
p = Process(target=compressDir, args=(dirToComp, archDir))
p.start()
else:
compressDir(dirToComp, archDir)
def compressDir(dirToComp, archDir):
print(f'{dirToComp} compression started')
# destination file naming
timestr = time.strftime("%y%m%d-%H%M")
try:
fname = f'{dirToComp[dirToComp.rindex("/"):]}_{timestr}'
except ValueError:
fname = f'{dirToComp}_{timestr}'
# concatenate compression file path and name
compFile = os.path.join(archDir, f'{fname}.tar.gz')
with tarfile.open(compFile, 'w:gz') as tar:
tar.add(dirToComp)
print(f'{dirToComp} compression finished')
removeDir(dirToComp)
def saveData(algorithm):
gen = algorithm.n_gen
# genDir = f'gen{gen}'
# retrieve population from lastest generation
genX = algorithm.pop.get('X')
genF = algorithm.pop.get('F')
# save checkpoint after each generation
np.save(os.path.join(dataDir, 'checkpoint'), algorithm)
# gen0 and every nCP generations save additional static checkpoint
if gen % nCP == 1:
np.save(f"{dataDir}/checkpoint-gen%i" % gen, algorithm)
# save text file of variables and objectives as well
# this provides more options for post-processesing data
with open(f'{dataDir}/gen{gen}X.txt', "w+") as file: # write file
np.savetxt(file, genX)
with open(f'{dataDir}/gen{gen}F.txt', "w+") as file: # write file
np.savetxt(file, genF)
def loadCP(checkpointFile=checkpointFile, hasTerminated=False):
try:
checkpoint, = np.load(checkpointFile, allow_pickle=True).flatten()
# only necessary if for the checkpoint the termination criterion has been met
checkpoint.has_terminated = hasTerminated
alg = checkpoint
# Update any changes made to the algorithms between runs
from pymooCFD.setupOpt import pop_size, n_offsprings, xl, xu
alg.pop_size = pop_size
alg.n_offsprings = n_offsprings
alg.problem.xl = xl
alg.problem.xu = xu
return alg
except FileNotFoundError as err:
print(err)
raise Exception(f'{checkpointFile} load failed.')
# return None
def printArray(array, labels, title):
print(title, ' - ', end='')
for i, label in enumerate(labels):
print(f'{label}: {array[i]} / ', end='')
print()
def runPop(X):
client = Client(cluster())
def fun(x_i, x):
caseDir = os.path.join(preProcDir, f'lim_perm_sim-{x_i}')
f = runCase(caseDir, x)
return f
jobs = [client.submit(fun, x_i, x) for x_i, x in enumerate(X)]
obj = np.row_stack([job.result() for job in jobs])
client.close()
return obj
def loadTxt(folder, fname):
file = os.path.join(folder, fname)
dat = np.loadtxt(file)
return dat
def findKeywordLine(kw, file_lines):
kw_line = -1
kw_line_i = -1
for line_i in range(len(file_lines)):
line = file_lines[line_i]
if line.find(kw) >= 0:
kw_line = line
kw_line_i = line_i
return kw_line, kw_line_i
# def popGen(gen, checkpointFile=checkpointFile):
# '''
# Parameters
# ----------
# gen : int
# generation you wish to get population from
# checkpointFile : str, optional
# checkpoint file path where Algorithm object was saved using numpy.save().
# The default is checkpointFile (defined in beginning of setupOpt.py).
# Returns
# -------
# pop :
# Contains StaticProblem object with population of individuals from
# generation <gen>.
# Notes
# -----
# - development needed to handle constraints
# '''
# alg = loadCP(checkpointFile=checkpointFile)
# X = alg.callback.data['var'][gen]
# F = alg.callback.data['obj'][gen]
# from pymoo.model.evaluator import Evaluator
# from pymoo.model.population import Population
# from pymoo.model.problem import StaticProblem
# # now the population object with all its attributes is created (CV, feasible, ...)
# pop = Population.new("X", X)
# pop = Evaluator().eval(StaticProblem(problem, F=F), pop) # , G=G), pop)
# return pop, alg
# def loadTxt(fileX, fileF, fileG=None):
# print(f'Loading population from files {fileX} and {fileF}...')
# X = np.loadtxt(fileX)
# F = np.loadtxt(fileF)
# # F = np.loadtxt(f'{dataDir}/{fileF}')
# if fileG is not None:
# # G = np.loadtxt(f'{dataDir}/{fileG}')
# G = np.loadtxt(fileG)
# else:
# G = None
# from pymoo.model.evaluator import Evaluator
# from pymoo.model.population import Population
# from pymoo.model.problem import StaticProblem
# # now the population object with all its attributes is created (CV, feasible, ...)
# pop = Population.new("X", X)
# pop = Evaluator().eval(StaticProblem(problem, F=F, G=G), pop)
# from pymooCFD.setupOpt import pop_size
# # from pymoo.algorithms.so_genetic_algorithm import GA
# # # the algorithm is now called with the population - biased initialization
# # algorithm = GA(pop_size=pop_size, sampling=pop)
# from pymoo.algorithms.nsga2 import NSGA2
# algorithm = NSGA2(pop_size=pop_size, sampling=pop)
# return algorithm
# def restartGen(gen, checkpointFile=checkpointFile):
# pop, alg = popGen(gen, checkpointFile=checkpointFile)
# alg.sampling()
# # from pymoo.algorithms.so_genetic_algorithm import GA
# # the algorithm is now called with the population - biased initialization
# # algorithm = GA(pop_size=100, sampling=pop)
# from pymoo.optimize import minimize
# from pymooCFD.setupOpt import problem
# res = minimize(problem,
# alg,
# ('n_gen', 10),
# seed=1,
# verbose=True)
# return res
# def loadTxt():
# try:
# print('Loading from text files')
# X = np.loadtxt('var.txt')
# F = np.loadtxt('obj.txt')
# except OSError as err:
# print(err)
# print('Failed to load text files')
# print('Data loading failed returning "None, None"...')
# return None, None
# def archive(dirName, archName = 'archive.tar.gz'):
# with tarfile.open(archName, 'a') as tar:
# tar.add(dirName)
# compressDir('../../dump')
# print('creating archive')
# out = tarfile.open('example.tar.gz', mode='a')
# try:
# print('adding README.txt')
# out.add('../dump')
# finally:
# print('closing tar archive')
# out.close()
#
# print('Contents of archived file:')
# t = tarfile.open('example.tar.gz', 'r')
# for member in t.getmembers():
# print(member.name)
| 3,029 | 0 | 184 |
86b03430e41a966ea5e3eaf883b62becfb48eb41 | 409 | py | Python | _bak/more/test1.py | hello-sea/DeepLearning_Wavelet-LSTM | 1606c16005a5338333b4943f782f57311c6b5e49 | [
"MIT"
] | 95 | 2018-04-13T03:34:51.000Z | 2022-03-30T10:10:28.000Z | _bak/more/test1.py | Dlaiven/DeepLearning_Wavelet-LSTM | 1606c16005a5338333b4943f782f57311c6b5e49 | [
"MIT"
] | 3 | 2019-07-18T11:19:53.000Z | 2020-12-28T05:45:19.000Z | _bak/more/test1.py | Dlaiven/DeepLearning_Wavelet-LSTM | 1606c16005a5338333b4943f782f57311c6b5e49 | [
"MIT"
] | 35 | 2018-07-27T09:21:18.000Z | 2021-11-30T02:13:01.000Z | from scipy import signal
import matplotlib.pyplot as plt
import numpy as np
t = np.linspace(1, 201, 200, endpoint=False)
sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)
widths = np.arange(1, 31)
cwtmatr = signal.cwt(sig, signal.ricker, widths)
plt.imshow(cwtmatr, extent=[1, 201, 31, 1], cmap='PRGn', aspect='auto',
vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
plt.show()
| 34.083333 | 71 | 0.667482 | from scipy import signal
import matplotlib.pyplot as plt
import numpy as np
t = np.linspace(1, 201, 200, endpoint=False)
sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)
widths = np.arange(1, 31)
cwtmatr = signal.cwt(sig, signal.ricker, widths)
plt.imshow(cwtmatr, extent=[1, 201, 31, 1], cmap='PRGn', aspect='auto',
vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
plt.show()
| 0 | 0 | 0 |
46a9510d72ae38ad38081220dacac06a010d2c9c | 124 | py | Python | nicos_mlz/toftof/setups/startup.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12 | 2019-11-06T15:40:36.000Z | 2022-01-01T16:23:00.000Z | nicos_mlz/toftof/setups/startup.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91 | 2020-08-18T09:20:26.000Z | 2022-02-01T11:07:14.000Z | nicos_mlz/toftof/setups/startup.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6 | 2020-01-11T10:52:30.000Z | 2022-02-25T12:35:23.000Z | description = 'minimal NICOS startup setup'
group = 'lowlevel'
sysconfig = dict(
cache = 'tofhw.toftof.frm2:14869',
)
| 15.5 | 43 | 0.693548 | description = 'minimal NICOS startup setup'
group = 'lowlevel'
sysconfig = dict(
cache = 'tofhw.toftof.frm2:14869',
)
| 0 | 0 | 0 |
fc24227f0e215a87ee747c0150d6cd24927c0923 | 118 | py | Python | valhalla/__init__.py | Best10-study/Kakao-Valhalla | 9d60783f67f38882b9935c913af7018bd3087aa1 | [
"Apache-2.0"
] | 10 | 2018-11-11T13:09:06.000Z | 2019-08-13T11:01:33.000Z | valhalla/__init__.py | Best10-study/Kakao-Valhalla | 9d60783f67f38882b9935c913af7018bd3087aa1 | [
"Apache-2.0"
] | 4 | 2018-11-14T03:07:55.000Z | 2018-11-18T06:14:29.000Z | valhalla/__init__.py | Best10-study/Kakao-Valhalla | 9d60783f67f38882b9935c913af7018bd3087aa1 | [
"Apache-2.0"
] | 4 | 2018-11-16T06:51:37.000Z | 2020-02-22T20:14:56.000Z | from valhalla.extract import DataExtractor
from sklearn.pipeline import Pipeline
from ._transform import FeatureConcat | 39.333333 | 42 | 0.881356 | from valhalla.extract import DataExtractor
from sklearn.pipeline import Pipeline
from ._transform import FeatureConcat | 0 | 0 | 0 |
86f60d4ac6c228fc43e7b01916beec33a223dbf4 | 2,449 | py | Python | lab01/guessing_game_graph.py | ryandroll/CS61A_Practice | 01c955e670929be21fa0a085b935d1a43451f083 | [
"Apache-2.0"
] | null | null | null | lab01/guessing_game_graph.py | ryandroll/CS61A_Practice | 01c955e670929be21fa0a085b935d1a43451f083 | [
"Apache-2.0"
] | null | null | null | lab01/guessing_game_graph.py | ryandroll/CS61A_Practice | 01c955e670929be21fa0a085b935d1a43451f083 | [
"Apache-2.0"
] | null | null | null | """Guessing Game Visualization
You do not need to understand any of the code in this file.
"""
# This section avoids asking for user input.
import lab01
lab01.LOWER = 1
lab01.UPPER = 100
lab01.prompt_for_number = prompt_for_number
lab01.is_correct = is_correct
lab01.is_too_high = is_too_high
# This section runs an algorithm many times.
from collections import defaultdict
import sys
import webbrowser
def get_frequency(algorithm_name, runs=1000):
"""Collect frequencies and plot them."""
if not hasattr(lab01, algorithm_name):
raise ValueError('invalid guessing algorithm ({0})'.format(algorithm_name))
algorithm = getattr(lab01, algorithm_name)
counts = defaultdict(int)
for i in range(runs):
num_guesses = algorithm()
counts[num_guesses] += 1
most_guesses = max(counts)
if most_guesses == 1:
raise ValueError('num_guesses was always 1. Make sure your functions '
'are returning the correct number of guesses!')
xs = range(1, most_guesses+1)
ys = [sum(counts[i] for i in range(1, x+1)) for x in xs]
if algorithm_name == 'guess_binary':
x_axis_string = '|'.join(map(str, xs))
y_axis_string = ','.join(map(str, ys))
chxp = ','.join(map(str, range(int(100 / 2 / most_guesses)+1, 100, int(100 / most_guesses))))
data_string = 'chd=t:{0}&chxl=0:|{1}|2:|Max number of guesses|3:|Frequency|&chxp=0,{3}|2,50|3,{2}'.format(y_axis_string, x_axis_string, runs/2, chxp)
else:
step = max(most_guesses // 10, 1)
x_axis_string = '|'.join(map(str, range(0, most_guesses+1, step)))
y_axis_string = ','.join(map(str, ys))
data_string = 'chd=t:{0}&chxl=0:|{1}|2:|Max number of guesses|3:|Frequency|&chxp=0,0|2,50|3,{2}'.format(y_axis_string, x_axis_string, runs/2)
url = 'http://chart.googleapis.com/chart?cht=bvg&chtt={0}&chxt=x,y,x,y&chs=500x500&{1}&chds=a&chco=3072F3&chbh=a&chm=s,000000,0,-1,5|s,000000,1,-1,5&chdlp=l'.format(algorithm_name, data_string)
webbrowser.open_new(url)
if __name__ == "__main__":
file_name, algorithm_name = sys.argv
get_frequency(algorithm_name) | 37.106061 | 197 | 0.683953 | """Guessing Game Visualization
You do not need to understand any of the code in this file.
"""
# This section avoids asking for user input.
def prompt_for_number(lower, upper):
from random import randint
prompt_for_number.number = randint(lower, upper)
return prompt_for_number.number
def is_correct(guess):
return guess == prompt_for_number.number
def is_too_high(guess):
return guess > prompt_for_number.number
import lab01
lab01.LOWER = 1
lab01.UPPER = 100
lab01.prompt_for_number = prompt_for_number
lab01.is_correct = is_correct
lab01.is_too_high = is_too_high
# This section runs an algorithm many times.
from collections import defaultdict
import sys
import webbrowser
def get_frequency(algorithm_name, runs=1000):
"""Collect frequencies and plot them."""
if not hasattr(lab01, algorithm_name):
raise ValueError('invalid guessing algorithm ({0})'.format(algorithm_name))
algorithm = getattr(lab01, algorithm_name)
counts = defaultdict(int)
for i in range(runs):
num_guesses = algorithm()
counts[num_guesses] += 1
most_guesses = max(counts)
if most_guesses == 1:
raise ValueError('num_guesses was always 1. Make sure your functions '
'are returning the correct number of guesses!')
xs = range(1, most_guesses+1)
ys = [sum(counts[i] for i in range(1, x+1)) for x in xs]
if algorithm_name == 'guess_binary':
x_axis_string = '|'.join(map(str, xs))
y_axis_string = ','.join(map(str, ys))
chxp = ','.join(map(str, range(int(100 / 2 / most_guesses)+1, 100, int(100 / most_guesses))))
data_string = 'chd=t:{0}&chxl=0:|{1}|2:|Max number of guesses|3:|Frequency|&chxp=0,{3}|2,50|3,{2}'.format(y_axis_string, x_axis_string, runs/2, chxp)
else:
step = max(most_guesses // 10, 1)
x_axis_string = '|'.join(map(str, range(0, most_guesses+1, step)))
y_axis_string = ','.join(map(str, ys))
data_string = 'chd=t:{0}&chxl=0:|{1}|2:|Max number of guesses|3:|Frequency|&chxp=0,0|2,50|3,{2}'.format(y_axis_string, x_axis_string, runs/2)
url = 'http://chart.googleapis.com/chart?cht=bvg&chtt={0}&chxt=x,y,x,y&chs=500x500&{1}&chds=a&chco=3072F3&chbh=a&chm=s,000000,0,-1,5|s,000000,1,-1,5&chdlp=l'.format(algorithm_name, data_string)
webbrowser.open_new(url)
if __name__ == "__main__":
file_name, algorithm_name = sys.argv
get_frequency(algorithm_name) | 227 | 0 | 69 |
2df6888285e22ed0f6b1ee4a5e5e52074a12b373 | 680 | py | Python | src/digolds/manipulate/drop_column.py | digolds/dp | 305089cbd2e933a0fff4d044485d591b6a2f3349 | [
"MIT"
] | null | null | null | src/digolds/manipulate/drop_column.py | digolds/dp | 305089cbd2e933a0fff4d044485d591b6a2f3349 | [
"MIT"
] | null | null | null | src/digolds/manipulate/drop_column.py | digolds/dp | 305089cbd2e933a0fff4d044485d591b6a2f3349 | [
"MIT"
] | null | null | null | import pandas as pd
name = 'drop-column'
if __name__ == "__main__":
data = [['tom', 10], ['nick', 15], ['juli', 15]]
df = pd.DataFrame(data, columns = ['Name', 'Age'])
args = {
'--columns':[
'Age',
'Name'
]
}
operator(df, args) | 22.666667 | 64 | 0.567647 | import pandas as pd
def _parse(args):
columns = args.get('--columns')
remain = args.get('--remain', False)
return [columns, remain]
def _drop_column(df, columns, remain):
if remain:
return df.filter(columns, axis=1).reset_index(drop=True)
else:
return df.drop(columns, axis=1).reset_index(drop=True)
name = 'drop-column'
def operator(df, args):
return _drop_column(df, *_parse(args))
if __name__ == "__main__":
data = [['tom', 10], ['nick', 15], ['juli', 15]]
df = pd.DataFrame(data, columns = ['Name', 'Age'])
args = {
'--columns':[
'Age',
'Name'
]
}
operator(df, args) | 317 | 0 | 69 |
14d91785c4e47c8abaef5e5e52cdeb6c231349a9 | 422 | py | Python | sample_webapp/migrations/0003_foo_geojson.py | calocan/rescape_graphene | 20b35e7f7020470da7958e9beb1d10b82e1e20cd | [
"MIT"
] | 1 | 2021-05-08T12:04:54.000Z | 2021-05-08T12:04:54.000Z | sample_webapp/migrations/0003_foo_geojson.py | calocan/rescape_graphene | 20b35e7f7020470da7958e9beb1d10b82e1e20cd | [
"MIT"
] | 6 | 2021-04-08T18:27:30.000Z | 2022-02-10T10:30:50.000Z | sample_webapp/migrations/0003_foo_geojson.py | rescapes/rescape-graphene | 90ac2002636aa9700d5ea459d6aa0699487379f7 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.7 on 2018-11-02 22:15
from django.db import migrations, models
| 22.210526 | 53 | 0.599526 | # Generated by Django 2.0.7 on 2018-11-02 22:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sample_webapp', '0002_foo_geo_collection'),
]
operations = [
migrations.AddField(
model_name='foo',
name='geojson',
field=models.JSONField(default=None),
preserve_default=False,
),
]
| 0 | 309 | 23 |
cc976d628e959da54d922c002f02bd4799ee0d6e | 6,751 | py | Python | model/resnet.py | collector-m/UniTrack | e8e56e164f2dd40ba590a19ed7a4a75d8da7e2eb | [
"MIT"
] | 240 | 2021-06-20T13:50:42.000Z | 2022-03-31T05:08:29.000Z | model/resnet.py | collector-m/UniTrack | e8e56e164f2dd40ba590a19ed7a4a75d8da7e2eb | [
"MIT"
] | 27 | 2021-07-12T01:19:39.000Z | 2021-12-27T08:05:08.000Z | model/resnet.py | collector-m/UniTrack | e8e56e164f2dd40ba590a19ed7a4a75d8da7e2eb | [
"MIT"
] | 24 | 2021-07-01T09:48:24.000Z | 2022-03-14T06:39:46.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
import torchvision.models.resnet as torch_resnet
from torchvision.models.resnet import BasicBlock, Bottleneck
model_urls = {'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
| 42.727848 | 107 | 0.654273 | import torch
import torch.nn as nn
import torch.nn.functional as F
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
import torchvision.models.resnet as torch_resnet
from torchvision.models.resnet import BasicBlock, Bottleneck
model_urls = {'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
class ResNet(torch_resnet.ResNet):
def __init__(self, *args, **kwargs):
super(ResNet, self).__init__(*args, **kwargs)
def modify(self, remove_layers=[], padding=''):
# Set stride of layer3 and layer 4 to 1 (from 2)
filter_layers = lambda x: [l for l in x if getattr(self, l) is not None]
for layer in filter_layers(['layer3', 'layer4']):
for m in getattr(self, layer).modules():
if isinstance(m, torch.nn.Conv2d):
m.stride = tuple(1 for _ in m.stride)
# Set padding (zeros or reflect, doesn't change much;
# zeros requires lower temperature)
if padding != '' and padding != 'no':
for m in self.modules():
if isinstance(m, torch.nn.Conv2d) and sum(m.padding) > 0:
m.padding_mode = padding
elif padding == 'no':
for m in self.modules():
if isinstance(m, torch.nn.Conv2d) and sum(m.padding) > 0:
m.padding = (0,0)
# Remove extraneous layers
remove_layers += ['fc', 'avgpool']
for layer in filter_layers(remove_layers):
setattr(self, layer, None)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = x if self.maxpool is None else self.maxpool(x)
x = self.layer1(x)
x = F.avg_pool2d(x,(2,2)) if self.layer2 is None else self.layer2(x)
x = x if self.layer3 is None else self.layer3(x)
x = x if self.layer4 is None else self.layer4(x)
return x
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs) -> ResNet:
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
| 2,226 | 13 | 203 |
f71fb300004e91ff987107bb558165bb8d7b340e | 14,538 | py | Python | chatto_transform/datastores/sqlalchemy_datastore.py | chatto-hub-test2/Spaceboy2 | 7b6b91baf06290e6b047ae75e7ea61cee4846b3a | [
"Unlicense",
"MIT"
] | null | null | null | chatto_transform/datastores/sqlalchemy_datastore.py | chatto-hub-test2/Spaceboy2 | 7b6b91baf06290e6b047ae75e7ea61cee4846b3a | [
"Unlicense",
"MIT"
] | null | null | null | chatto_transform/datastores/sqlalchemy_datastore.py | chatto-hub-test2/Spaceboy2 | 7b6b91baf06290e6b047ae75e7ea61cee4846b3a | [
"Unlicense",
"MIT"
] | null | null | null | import pandas
from ..schema.schema_base import *
from .datastore_base import DataStore
from .odo_datastore import OdoDataStore
from ..config import config
from functools import lru_cache, partial
from sqlalchemy import Table, MetaData, select
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import create_engine
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import Select, and_
from sqlalchemy import sql
import io
import tempfile
import time
import os
import datetime
import ciso8601
import odo
metadatas = {}
########################################################################
for col_type in [dt, delta, num, bool_]:
col_type._storage_target_registry['sqlalchemy'] = col_type._storage_target_registry['pandas'].copy()
@cat.register_check('sqlalchemy')
@cat.register_transform('sqlalchemy')
@id_.register_check('sqlalchemy')
@id_.register_transform('sqlalchemy')
########################################################################
@cat.register_metadata('sqlalchemy')
@id_.register_metadata('sqlalchemy')
@dt.register_metadata('sqlalchemy')
@delta.register_metadata('sqlalchemy')
@big_dt.register_metadata('sqlalchemy')
@num.register_metadata('sqlalchemy')
@bool_.register_metadata('sqlalchemy')
########################################################################
@lru_cache()
sa_type_2_col_type = {
sql.sqltypes.Integer: num,
sql.sqltypes.String: cat,
sql.sqltypes.Date: dt,
sql.sqltypes.DateTime: dt,
sql.sqltypes.Interval: delta,
sql.sqltypes.Numeric: num,
sql.sqltypes.Boolean: bool_
}
########################################################################
| 35.896296 | 155 | 0.623882 | import pandas
from ..schema.schema_base import *
from .datastore_base import DataStore
from .odo_datastore import OdoDataStore
from ..config import config
from functools import lru_cache, partial
from sqlalchemy import Table, MetaData, select
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import create_engine
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import Select, and_
from sqlalchemy import sql
import io
import tempfile
import time
import os
import datetime
import ciso8601
import odo
metadatas = {}
def get_engine_metadata(engine):
if engine in metadatas:
return metadatas[engine]
else:
metadata = MetaData()
metadata.bind = engine
metadatas[engine] = metadata
return metadata
def get_reflected_metadata(engine, schema_name=None):
metadata = MetaData()
metadata.reflect(bind=engine, schema=schema_name)
metadata.bind = engine
return metadata
########################################################################
for col_type in [dt, delta, num, bool_]:
col_type._storage_target_registry['sqlalchemy'] = col_type._storage_target_registry['pandas'].copy()
@cat.register_check('sqlalchemy')
def _(col):
return col.dtype == 'object'
@cat.register_transform('sqlalchemy')
def _(col):
return col.astype('object')
@id_.register_check('sqlalchemy')
def _(col):
return col.dtype == 'object'
@id_.register_transform('sqlalchemy')
def _(col):
return col.astype('object')
########################################################################
@cat.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.Text, nullable=True)
@id_.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.Integer, nullable=True)
@dt.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.DateTime(timezone=True), nullable=True)
@delta.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.Interval, nullable=True)
@big_dt.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.DateTime(timezone=True), nullable=True)
@num.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.Float, nullable=True)
@bool_.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.Boolean, nullable=True)
########################################################################
@lru_cache()
def schema_as_table(schema, engine):
if schema.options.get('temporary', False):
prefixes = ['TEMPORARY']
else:
prefixes = []
db_schema = schema.options.get('db_schema', None)
metadata = get_engine_metadata(engine)
return Table(schema.name, metadata, *[col.metadata('sqlalchemy') for col in schema.cols], schema=db_schema, prefixes=prefixes)
sa_type_2_col_type = {
sql.sqltypes.Integer: num,
sql.sqltypes.String: cat,
sql.sqltypes.Date: dt,
sql.sqltypes.DateTime: dt,
sql.sqltypes.Interval: delta,
sql.sqltypes.Numeric: num,
sql.sqltypes.Boolean: bool_
}
def table_as_schema(table):
schema_cols = []
for sa_col in table.c:
for sa_type, col_type in sa_type_2_col_type.items():
if isinstance(sa_col.type, sa_type):
if isinstance(sa_col.type, sql.sqltypes.Integer) and (sa_col.primary_key or sa_col.foreign_keys):
schema_cols.append(id_(sa_col.name))
else:
schema_cols.append(col_type(sa_col.name))
break
options = {}
if table.schema is not None:
options['db_schema'] = table.schema
s = Schema(table.name, schema_cols, options=options)
return s
########################################################################
def fast_sql_to_df(table, schema):
engine = table.bind
if engine.dialect.name == 'mysql':
return fast_mysql_to_df(table, schema)
elif engine.dialect.name == 'postgresql':
return fast_postgresql_to_df(table, schema)
ods = OdoDataStore(schema, table)
df = ods.load()
df = df[schema.col_names()]
return df
def fast_mysql_to_df(table, schema):
f = tempfile.NamedTemporaryFile('w', suffix='.csv', dir=config.data_dir+'tmp')
try:
f.close()
table_name = str(table)
if not isinstance(table, Table):
table_name = '({})'.format(table_name)
# converting to csv
sql = """SELECT {cols} FROM {table} INTO OUTFILE '{filename}'
FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"'
ESCAPED BY '\\\\'
LINES TERMINATED BY '\n'""".format(
cols=', '.join('`'+colname+'`' for colname in schema.col_names()),
filename=f.name,
table=table_name)
table.bind.execute(sql)
# reading csv
df = pandas.read_csv(f.name, header=None, names=schema.col_names(), na_values=['\\N'])
finally:
os.remove(f.name)
for col in schema.cols:
if isinstance(col, dt):
# converting datetime column
df[col.name] = pandas.to_datetime(df[col.name], format="%Y-%m-%d %H:%M:%S", coerce=True)
if isinstance(col, big_dt):
# converting big_dt column
strptime = datetime.datetime.strptime
parse_func = (lambda x: strptime(x, "%Y-%m-%d %H:%M:%S"))
df[col.name] = df[col.name].map(parse_func, na_action='ignore')
return df
def fast_postgresql_to_df(table, schema):
engine = table.bind
conn = engine.raw_connection()
with conn.cursor() as cur:
with io.StringIO() as f:
table_name = str(table)
if not isinstance(table, Table):
table_name = '({})'.format(table_name)
sql = "COPY {table_name} TO STDOUT WITH (FORMAT CSV, HEADER TRUE)".format(
table_name=table_name)
cur.copy_expert(sql, f)
f.seek(0)
df = pandas.read_csv(f)
for col in schema.cols:
if isinstance(col, dt):
# converting datetime column
df[col.name] = pandas.to_datetime(df[col.name], format="%Y-%m-%d %H:%M:%S", coerce=True)
if isinstance(col, big_dt):
# converting big_dt column
strptime = datetime.datetime.strptime
parse_func = (lambda x: strptime(x, "%Y-%m-%d %H:%M:%S"))
df[col.name] = df[col.name].map(parse_func, na_action='ignore')
return df
def fast_postgresql_to_csv(table, file_path):
engine = table.bind
conn = engine.raw_connection()
with conn.cursor() as cur:
with open(file_path, 'w') as f:
table_name = str(table)
if not isinstance(table, Table):
table_name = '({})'.format(table_name)
sql = "COPY {table_name} TO STDOUT WITH (FORMAT CSV, HEADER TRUE)".format(
table_name=table_name)
cur.copy_expert(sql, f)
def fast_df_to_sql(df, table, schema):
ods = OdoDataStore(schema, table, storage_target_type='sqlalchemy')
ods.store(df)
class SATableDataStore(DataStore):
def __init__(self, schema, engine, where_clauses=None):
super().__init__(schema)
self.engine = engine
self.table = schema_as_table(self.schema, self.engine)
self.where_clauses = where_clauses
def storage_target(self):
return 'sqlalchemy'
def _load(self):
query = self.table
if self.where_clauses is not None:
query = query.select()
for where_clause in self.where_clauses:
query = query.where(where_clause)
df = fast_sql_to_df(query, self.schema)
return df
def to_csv(self, file_path):
if self.engine.dialect.name != 'postgresql':
raise NotImplementedError('converting directly to csv not supported for non-postgres databases')
query = self.table
if self.where_clauses is not None:
query = query.select()
for where_clause in self.where_clauses:
query = query.where(where_clause)
fast_postgresql_to_csv(query, file_path)
def _store(self, df):
if self.where_clauses is not None:
raise NotImplementedError('Cannot store to a query (where_clauses must be left blank)')
df = df.copy()
fast_df_to_sql(self.table, self.schema)
def _update(self, df):
if self.where_clauses is not None:
raise NotImplementedError('Cannot update to a query (where_clauses must be left blank)')
df = df.copy()
with self.engine.connect() as conn:
temp_schema = Schema.rename(self.schema, 'temp_'+self.schema.name)
temp_schema.options['temporary'] = True
temp_table = schema_as_table(temp_schema, self.engine)
print('storing new df in temp table')
fast_df_to_sql(df, temp_table, temp_schema)
print('updating table from matching rows')
index = self.schema.options['index']
update = self.table.update(
values={
col_name: temp_table.c[col_name] for col_name in self.schema.col_names()
},
whereclause=self.table.c[index] == temp_table.c[index]
)
update_res = conn.execute(update)
print('inserting new rows into table')
exists_query = self.table.select().where(self.table.c[index] == temp_table.c[index]).exists()
insert = self.table.insert().from_select(
temp_schema.col_names(),
temp_table.select().where(~exists_query))
ins_res = conn.execute(insert)
def delete(self):
if self.where_clauses is not None:
raise NotImplementedError('Cannot delete a query (where_clauses must be left blank)')
self.table.drop(self.engine)
class SAJoinDataStore(DataStore):
def __init__(self, root_schema, engine, has_schemas=None, belongs_to_schemas=None, root_conditions=None, where_clauses=None):
self.engine = engine
self.root_schema = root_schema
self.root_table = schema_as_table(self.root_schema, self.engine)
self.has_schemas, self.has_join_conditions = self._parse_schema_list(has_schemas)
self.has_tables = [schema_as_table(h_schema, self.engine) for h_schema in self.has_schemas]
self.belongs_to_schemas, self.belongs_to_join_conditions = self._parse_schema_list(belongs_to_schemas)
self.belongs_to_tables = [schema_as_table(b_schema, self.engine) for b_schema in self.belongs_to_schemas]
self.root_conditions = root_conditions
self.where_clauses = where_clauses
schema = Schema.union([self.root_schema] + self.has_schemas + self.belongs_to_schemas, with_prefix=True, schema_name=self.root_schema.name+'_join')
super().__init__(schema)
def _parse_schema_list(self, schema_list=None):
if schema_list is None:
schema_list = []
schemas = []
join_conditions = {}
for schema in schema_list:
if isinstance(schema, tuple):
schema, j_c = schema
join_conditions[schema] = j_c
schemas.append(schema)
return schemas, join_conditions
def storage_target(self):
return 'sqlalchemy'
def _load(self):
root = self.root_table
if self.root_conditions is not None:
root = root.select().where(and_(*self.root_conditions)).alias()
join_clause = root
select_clause = []
root_col_prefix = self.root_schema.options['prefix']
for col in root.c:
select_clause.append(col.label("{}.{}".format(root_col_prefix, col.name)))
for h_table, h_schema in zip(self.has_tables, self.has_schemas):
col_prefix = h_schema.options['prefix']
h_join_conditions = [root.c.id == h_table.c['{}_id'.format(root_col_prefix)]]
for join_condition in self.has_join_conditions.get(h_schema, []):
h_join_conditions.append(join_condition)
join_clause = join_clause.outerjoin(h_table, and_(*h_join_conditions))
for col in h_table.c:
select_clause.append(col.label("{}.{}".format(col_prefix, col.name)))
for b_table, b_schema in zip(self.belongs_to_tables, self.belongs_to_schemas):
col_prefix = b_schema.options['prefix']
b_join_conditions = [root.c['{}_id'.format(col_prefix)] == b_table.c.id]
for join_condition in self.belongs_to_join_conditions.get(b_schema, []):
b_join_conditions.append(join_condition)
join_clause = join_clause.outerjoin(b_table, and_(*b_join_conditions))
for col in b_table.c:
select_clause.append(col.label("{}.{}".format(col_prefix, col.name)))
temp_schema = Schema.rename(self.schema, 'temp_'+self.schema.name)
temp_table = schema_as_table(temp_schema, self.engine)
try:
temp_table.create(self.engine)
query = select(select_clause).select_from(join_clause)
if self.where_clauses is not None:
query = query.where(and_(*self.where_clauses))
insert = temp_table.insert().from_select(temp_schema.col_names(), query)
start = time.time()
print('executing join into temp table')
self.engine.execute(insert)
joined = time.time()
print('loading rows from temp table')
df = fast_sql_to_df(temp_table, temp_schema)
loaded = time.time()
finally:
temp_table.drop(self.engine)
print('type checking and sorting')
print('took', joined - start, 'seconds to perform the join')
print('took', loaded - joined, 'seconds to load the results')
return df
class SAQueryDataStore(DataStore):
def __init__(self, schema, engine, query):
self.engine = engine
self.query = query
self.schema = schema
def _load(self):
df = pandas.read_sql(self.query, self.engine)
return df
| 11,912 | 38 | 866 |
f772e5b4a86ee9267ab9234ff8197c72c91ef62a | 16,056 | py | Python | models/RelationNetworks/relation_rcnn/core/rcnn.py | RamsteinWR/PneumoniaRSNA1 | 08bdba51292307a78ef711c6be4a63faea240ddf | [
"MIT"
] | null | null | null | models/RelationNetworks/relation_rcnn/core/rcnn.py | RamsteinWR/PneumoniaRSNA1 | 08bdba51292307a78ef711c6be4a63faea240ddf | [
"MIT"
] | null | null | null | models/RelationNetworks/relation_rcnn/core/rcnn.py | RamsteinWR/PneumoniaRSNA1 | 08bdba51292307a78ef711c6be4a63faea240ddf | [
"MIT"
] | null | null | null | """
Fast R-CNN:
data =
{'data': [num_images, c, h, w],
'rois': [num_rois, 5]}
label =
{'label': [num_rois],
'bbox_target': [num_rois, 4 * num_classes],
'bbox_weight': [num_rois, 4 * num_classes]}
roidb extended format [image_index]
['image', 'height', 'width', 'flipped',
'boxes', 'gt_classes', 'gt_overlaps', 'max_classes', 'max_overlaps', 'bbox_targets']
"""
import numpy as np
import numpy.random as npr
from bbox.bbox_regression import expand_bbox_regression_targets
from bbox.bbox_transform import bbox_overlaps, bbox_transform
from utils.image import get_image, tensor_vstack
def get_rcnn_testbatch(roidb, cfg):
"""
return a dict of testbatch
:param roidb: ['image', 'flipped'] + ['boxes']
:return: data, label, im_info
"""
# assert len(roidb) == 1, 'Single batch only'
imgs, roidb = get_image(roidb, cfg)
im_array = imgs
im_info = [np.array([roidb[i]['im_info']], dtype=np.float32) for i in range(len(roidb))]
im_rois = [roidb[i]['boxes'] for i in range(len(roidb))]
if cfg.network.ROIDispatch:
data = []
for i in range(len(im_rois)):
w = im_rois[i][:, 2] - im_rois[i][:, 0] + 1
h = im_rois[i][:, 3] - im_rois[i][:, 1] + 1
feat_id = np.clip(np.floor(2 + np.log2(np.sqrt(w * h) / 224)), 0, 3).astype(int)
rois_0 = im_rois[i][np.where(feat_id == 0)]
if len(rois_0) == 0:
rois_0 = np.zeros((1, 4))
rois_1 = im_rois[i][np.where(feat_id == 1)]
if len(rois_1) == 0:
rois_1 = np.zeros((1, 4))
rois_2 = im_rois[i][np.where(feat_id == 2)]
if len(rois_2) == 0:
rois_2 = np.zeros((1, 4))
rois_3 = im_rois[i][np.where(feat_id == 3)]
if len(rois_3) == 0:
rois_3 = np.zeros((1, 4))
# stack batch index
data.append({'data': im_array[i],
'rois_0': np.hstack((0 * np.ones((rois_0.shape[0], 1)), rois_0)),
'rois_1': np.hstack((0 * np.ones((rois_1.shape[0], 1)), rois_1)),
'rois_2': np.hstack((0 * np.ones((rois_2.shape[0], 1)), rois_2)),
'rois_3': np.hstack((0 * np.ones((rois_3.shape[0], 1)), rois_3))})
if cfg.TEST.LEARN_NMS:
data[-1]['im_info'] = im_info[i]
else:
rois = im_rois
rois_array = [np.hstack((0 * np.ones((rois[i].shape[0], 1)), rois[i])) for i in range(len(rois))]
data = []
for i in range(len(roidb)):
data.append({'data': im_array[i],
'rois': rois_array[i]})
if cfg.TEST.LEARN_NMS:
data[-1]['im_info'] = im_info[i]
label = {}
return data, label, im_info
def get_rcnn_batch(roidb, cfg):
"""
return a dict of multiple images
:param roidb: a list of dict, whose length controls batch size
['images', 'flipped'] + ['gt_boxes', 'boxes', 'gt_overlap'] => ['bbox_targets']
:return: data, label
"""
num_images = len(roidb)
imgs, roidb = get_image(roidb, cfg)
im_array = tensor_vstack(imgs)
assert cfg.TRAIN.BATCH_ROIS == -1 or cfg.TRAIN.BATCH_ROIS % cfg.TRAIN.BATCH_IMAGES == 0, \
'BATCHIMAGES {} must divide BATCH_ROIS {}'.format(cfg.TRAIN.BATCH_IMAGES, cfg.TRAIN.BATCH_ROIS)
if cfg.TRAIN.BATCH_ROIS == -1:
rois_per_image = np.sum([iroidb['boxes'].shape[0] for iroidb in roidb])
fg_rois_per_image = rois_per_image
else:
rois_per_image = cfg.TRAIN.BATCH_ROIS / cfg.TRAIN.BATCH_IMAGES
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image).astype(int)
if cfg.network.ROIDispatch:
rois_array_0 = list()
rois_array_1 = list()
rois_array_2 = list()
rois_array_3 = list()
else:
rois_array = list()
gt_labels_array = list()
labels_array = list()
bbox_targets_array = list()
bbox_weights_array = list()
for im_i in range(num_images):
roi_rec = roidb[im_i]
# infer num_classes from gt_overlaps
num_classes = roi_rec['gt_overlaps'].shape[1]
# label = class RoI has max overlap with
rois = roi_rec['boxes']
labels = roi_rec['max_classes']
overlaps = roi_rec['max_overlaps']
bbox_targets = roi_rec['bbox_targets']
gt_lables = roi_rec['is_gt']
if cfg.TRAIN.BATCH_ROIS == -1:
im_rois, labels_t, bbox_targets, bbox_weights = \
sample_rois_v2(rois, num_classes, cfg, labels=labels, overlaps=overlaps, bbox_targets=bbox_targets,
gt_boxes=None)
assert np.abs(im_rois - rois).max() < 1e-3
assert np.abs(labels_t - labels).max() < 1e-3
else:
im_rois, labels, bbox_targets, bbox_weights, gt_lables = \
sample_rois(rois, fg_rois_per_image, rois_per_image, num_classes, cfg,
labels, overlaps, bbox_targets, gt_lables=gt_lables)
# project im_rois
# do not round roi
if cfg.network.ROIDispatch:
w = im_rois[:, 2] - im_rois[:, 0] + 1
h = im_rois[:, 3] - im_rois[:, 1] + 1
feat_id = np.clip(np.floor(2 + np.log2(np.sqrt(w * h) / 224)), 0, 3).astype(int)
rois_0_idx = np.where(feat_id == 0)[0]
rois_0 = im_rois[rois_0_idx]
if len(rois_0) == 0:
rois_0 = np.zeros((1, 4))
label_0 = -np.ones((1,))
gt_label_0 = -np.ones((1,))
bbox_targets_0 = np.zeros((1, bbox_targets.shape[1]))
bbox_weights_0 = np.zeros((1, bbox_weights.shape[1]))
else:
label_0 = labels[rois_0_idx]
gt_label_0 = gt_lables[rois_0_idx]
bbox_targets_0 = bbox_targets[rois_0_idx]
bbox_weights_0 = bbox_weights[rois_0_idx]
rois_1_idx = np.where(feat_id == 1)[0]
rois_1 = im_rois[rois_1_idx]
if len(rois_1) == 0:
rois_1 = np.zeros((1, 4))
label_1 = -np.ones((1,))
gt_label_1 = -np.ones((1,))
bbox_targets_1 = np.zeros((1, bbox_targets.shape[1]))
bbox_weights_1 = np.zeros((1, bbox_weights.shape[1]))
else:
label_1 = labels[rois_1_idx]
gt_label_1 = gt_lables[rois_1_idx]
bbox_targets_1 = bbox_targets[rois_1_idx]
bbox_weights_1 = bbox_weights[rois_1_idx]
rois_2_idx = np.where(feat_id == 2)
rois_2 = im_rois[rois_2_idx]
if len(rois_2) == 0:
rois_2 = np.zeros((1, 4))
label_2 = -np.ones((1,))
gt_label_2 = -np.ones((1,))
bbox_targets_2 = np.zeros((1, bbox_targets.shape[1]))
bbox_weights_2 = np.zeros((1, bbox_weights.shape[1]))
else:
label_2 = labels[rois_2_idx]
gt_label_2 = gt_lables[rois_2_idx]
bbox_targets_2 = bbox_targets[rois_2_idx]
bbox_weights_2 = bbox_weights[rois_2_idx]
rois_3_idx = np.where(feat_id == 3)
rois_3 = im_rois[rois_3_idx]
if len(rois_3) == 0:
rois_3 = np.zeros((1, 4))
label_3 = -np.ones((1,))
gt_label_3 = -np.ones((1,))
bbox_targets_3 = np.zeros((1, bbox_targets.shape[1]))
bbox_weights_3 = np.zeros((1, bbox_weights.shape[1]))
else:
label_3 = labels[rois_3_idx]
gt_label_3 = gt_lables[rois_3_idx]
bbox_targets_3 = bbox_targets[rois_3_idx]
bbox_weights_3 = bbox_weights[rois_3_idx]
# stack batch index
rois_array_0.append(np.hstack((im_i * np.ones((rois_0.shape[0], 1)), rois_0)))
rois_array_1.append(np.hstack((im_i * np.ones((rois_1.shape[0], 1)), rois_1)))
rois_array_2.append(np.hstack((im_i * np.ones((rois_2.shape[0], 1)), rois_2)))
rois_array_3.append(np.hstack((im_i * np.ones((rois_3.shape[0], 1)), rois_3)))
labels = np.concatenate([label_0, label_1, label_2, label_3], axis=0)
gt_lables = np.concatenate([gt_label_0, gt_label_1, gt_label_2, gt_label_3], axis=0)
bbox_targets = np.concatenate([bbox_targets_0, bbox_targets_1, bbox_targets_2, bbox_targets_3], axis=0)
bbox_weights = np.concatenate([bbox_weights_0, bbox_weights_1, bbox_weights_2, bbox_weights_3], axis=0)
else:
rois = im_rois
batch_index = im_i * np.ones((rois.shape[0], 1))
rois_array_this_image = np.hstack((batch_index, rois))
rois_array.append(rois_array_this_image)
# add labels
gt_labels_array.append(gt_lables)
labels_array.append(labels)
bbox_targets_array.append(bbox_targets)
bbox_weights_array.append(bbox_weights)
gt_labels_array = np.array(gt_labels_array)
nongt_index_array = np.where(gt_labels_array == 0)[1]
labels_array = np.array(labels_array)
bbox_targets_array = np.array(bbox_targets_array)
bbox_weights_array = np.array(bbox_weights_array)
if cfg.network.USE_NONGT_INDEX:
label = {'label': labels_array,
'nongt_index': nongt_index_array,
'bbox_target': bbox_targets_array,
'bbox_weight': bbox_weights_array}
else:
label = {'label': labels_array,
'bbox_target': bbox_targets_array,
'bbox_weight': bbox_weights_array}
if cfg.network.ROIDispatch:
rois_array_0 = np.array(rois_array_0)
rois_array_1 = np.array(rois_array_1)
rois_array_2 = np.array(rois_array_2)
rois_array_3 = np.array(rois_array_3)
# rois_concate = np.concatenate((rois_array_0, rois_array_1, rois_array_2, rois_array_3), axis=1)
# gt_rois_t = rois_concate[:, gt_labels_array[0,:] > 0]
data = {'data': im_array,
'rois_0': rois_array_0,
'rois_1': rois_array_1,
'rois_2': rois_array_2,
'rois_3': rois_array_3}
else:
rois_array = np.array(rois_array)
data = {'data': im_array,
'rois': rois_array}
if cfg.TRAIN.LEARN_NMS:
# im info
im_info = np.array([roidb[0]['im_info']], dtype=np.float32)
# gt_boxes
if roidb[0]['gt_classes'].size > 0:
gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :]
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
else:
gt_boxes = np.empty((0, 5), dtype=np.float32)
data['im_info'] = im_info
data['gt_boxes'] = gt_boxes
return data, label
def sample_rois_v2(rois, num_classes, cfg,
labels=None, overlaps=None, bbox_targets=None, gt_boxes=None):
"""
generate random sample of ROIs comprising foreground and background examples
:param rois: all_rois [n, 4]; e2e: [n, 5] with batch_index
:param fg_rois_per_image: foreground roi number
:param rois_per_image: total roi number
:param num_classes: number of classes
:param labels: maybe precomputed
:param overlaps: maybe precomputed (max_overlaps)
:param bbox_targets: maybe precomputed
:param gt_boxes: optional for e2e [n, 5] (x1, y1, x2, y2, cls)
:return: (labels, rois, bbox_targets, bbox_weights)
"""
if labels is None:
overlaps = bbox_overlaps(rois[:, 1:].astype(np.float), gt_boxes[:, :4].astype(np.float))
gt_assignment = overlaps.argmax(axis=1)
overlaps = overlaps.max(axis=1)
labels = gt_boxes[gt_assignment, 4]
# set labels of bg_rois to be 0
bg_ind = np.where(overlaps < cfg.TRAIN.BG_THRESH_HI)[0]
labels[bg_ind] = 0
# load or compute bbox_target
if bbox_targets is not None:
bbox_target_data = bbox_targets
else:
targets = bbox_transform(rois[:, 1:], gt_boxes[gt_assignment, :4])
if cfg.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:
targets = ((targets - np.array(cfg.TRAIN.BBOX_MEANS))
/ np.array(cfg.TRAIN.BBOX_STDS))
bbox_target_data = np.hstack((labels[:, np.newaxis], targets))
bbox_targets, bbox_weights = \
expand_bbox_regression_targets(bbox_target_data, num_classes, cfg)
return rois, labels, bbox_targets, bbox_weights
def sample_rois(rois, fg_rois_per_image, rois_per_image, num_classes, cfg,
labels=None, overlaps=None, bbox_targets=None, gt_boxes=None, gt_lables=None):
"""
generate random sample of ROIs comprising foreground and background examples
:param rois: all_rois [n, 4]; e2e: [n, 5] with batch_index
:param fg_rois_per_image: foreground roi number
:param rois_per_image: total roi number
:param num_classes: number of classes
:param labels: maybe precomputed
:param overlaps: maybe precomputed (max_overlaps)
:param bbox_targets: maybe precomputed
:param gt_boxes: optional for e2e [n, 5] (x1, y1, x2, y2, cls)
:return: (labels, rois, bbox_targets, bbox_weights)
"""
if labels is None:
overlaps = bbox_overlaps(rois[:, 1:].astype(np.float), gt_boxes[:, :4].astype(np.float))
gt_assignment = overlaps.argmax(axis=1)
overlaps = overlaps.max(axis=1)
labels = gt_boxes[gt_assignment, 4]
# foreground RoI with FG_THRESH overlap
fg_indexes = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# guard against the case when an image has fewer than fg_rois_per_image foreground RoIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_indexes.size)
# Sample foreground regions without replacement
if len(fg_indexes) > fg_rois_per_this_image:
fg_indexes = npr.choice(fg_indexes, size=fg_rois_per_this_image, replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_indexes = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) & (overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image, bg_indexes.size)
# Sample foreground regions without replacement
if len(bg_indexes) > bg_rois_per_this_image:
bg_indexes = npr.choice(bg_indexes, size=bg_rois_per_this_image, replace=False)
# indexes selected
keep_indexes = np.append(fg_indexes, bg_indexes)
# pad more to ensure a fixed minibatch size
while keep_indexes.shape[0] < rois_per_image:
gap = np.minimum(len(rois), rois_per_image - keep_indexes.shape[0])
gap_indexes = npr.choice(range(len(rois)), size=gap, replace=False)
keep_indexes = np.append(keep_indexes, gap_indexes)
# select gt_labels
gt_lables = gt_lables[keep_indexes]
# select labels
labels = labels[keep_indexes]
# set labels of bg_rois to be 0
bg_ind = np.where(overlaps[keep_indexes] < cfg.TRAIN.BG_THRESH_HI)[0]
labels[bg_ind] = 0
rois = rois[keep_indexes]
# load or compute bbox_target
if bbox_targets is not None:
bbox_target_data = bbox_targets[keep_indexes, :]
else:
targets = bbox_transform(rois[:, 1:], gt_boxes[gt_assignment[keep_indexes], :4])
if cfg.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:
targets = ((targets - np.array(cfg.TRAIN.BBOX_MEANS))
/ np.array(cfg.TRAIN.BBOX_STDS))
bbox_target_data = np.hstack((labels[:, np.newaxis], targets))
bbox_targets, bbox_weights = \
expand_bbox_regression_targets(bbox_target_data, num_classes, cfg)
return rois, labels, bbox_targets, bbox_weights, gt_lables
| 41.921671 | 115 | 0.607686 | """
Fast R-CNN:
data =
{'data': [num_images, c, h, w],
'rois': [num_rois, 5]}
label =
{'label': [num_rois],
'bbox_target': [num_rois, 4 * num_classes],
'bbox_weight': [num_rois, 4 * num_classes]}
roidb extended format [image_index]
['image', 'height', 'width', 'flipped',
'boxes', 'gt_classes', 'gt_overlaps', 'max_classes', 'max_overlaps', 'bbox_targets']
"""
import numpy as np
import numpy.random as npr
from bbox.bbox_regression import expand_bbox_regression_targets
from bbox.bbox_transform import bbox_overlaps, bbox_transform
from utils.image import get_image, tensor_vstack
def get_rcnn_testbatch(roidb, cfg):
"""
return a dict of testbatch
:param roidb: ['image', 'flipped'] + ['boxes']
:return: data, label, im_info
"""
# assert len(roidb) == 1, 'Single batch only'
imgs, roidb = get_image(roidb, cfg)
im_array = imgs
im_info = [np.array([roidb[i]['im_info']], dtype=np.float32) for i in range(len(roidb))]
im_rois = [roidb[i]['boxes'] for i in range(len(roidb))]
if cfg.network.ROIDispatch:
data = []
for i in range(len(im_rois)):
w = im_rois[i][:, 2] - im_rois[i][:, 0] + 1
h = im_rois[i][:, 3] - im_rois[i][:, 1] + 1
feat_id = np.clip(np.floor(2 + np.log2(np.sqrt(w * h) / 224)), 0, 3).astype(int)
rois_0 = im_rois[i][np.where(feat_id == 0)]
if len(rois_0) == 0:
rois_0 = np.zeros((1, 4))
rois_1 = im_rois[i][np.where(feat_id == 1)]
if len(rois_1) == 0:
rois_1 = np.zeros((1, 4))
rois_2 = im_rois[i][np.where(feat_id == 2)]
if len(rois_2) == 0:
rois_2 = np.zeros((1, 4))
rois_3 = im_rois[i][np.where(feat_id == 3)]
if len(rois_3) == 0:
rois_3 = np.zeros((1, 4))
# stack batch index
data.append({'data': im_array[i],
'rois_0': np.hstack((0 * np.ones((rois_0.shape[0], 1)), rois_0)),
'rois_1': np.hstack((0 * np.ones((rois_1.shape[0], 1)), rois_1)),
'rois_2': np.hstack((0 * np.ones((rois_2.shape[0], 1)), rois_2)),
'rois_3': np.hstack((0 * np.ones((rois_3.shape[0], 1)), rois_3))})
if cfg.TEST.LEARN_NMS:
data[-1]['im_info'] = im_info[i]
else:
rois = im_rois
rois_array = [np.hstack((0 * np.ones((rois[i].shape[0], 1)), rois[i])) for i in range(len(rois))]
data = []
for i in range(len(roidb)):
data.append({'data': im_array[i],
'rois': rois_array[i]})
if cfg.TEST.LEARN_NMS:
data[-1]['im_info'] = im_info[i]
label = {}
return data, label, im_info
def get_rcnn_batch(roidb, cfg):
"""
return a dict of multiple images
:param roidb: a list of dict, whose length controls batch size
['images', 'flipped'] + ['gt_boxes', 'boxes', 'gt_overlap'] => ['bbox_targets']
:return: data, label
"""
num_images = len(roidb)
imgs, roidb = get_image(roidb, cfg)
im_array = tensor_vstack(imgs)
assert cfg.TRAIN.BATCH_ROIS == -1 or cfg.TRAIN.BATCH_ROIS % cfg.TRAIN.BATCH_IMAGES == 0, \
'BATCHIMAGES {} must divide BATCH_ROIS {}'.format(cfg.TRAIN.BATCH_IMAGES, cfg.TRAIN.BATCH_ROIS)
if cfg.TRAIN.BATCH_ROIS == -1:
rois_per_image = np.sum([iroidb['boxes'].shape[0] for iroidb in roidb])
fg_rois_per_image = rois_per_image
else:
rois_per_image = cfg.TRAIN.BATCH_ROIS / cfg.TRAIN.BATCH_IMAGES
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image).astype(int)
if cfg.network.ROIDispatch:
rois_array_0 = list()
rois_array_1 = list()
rois_array_2 = list()
rois_array_3 = list()
else:
rois_array = list()
gt_labels_array = list()
labels_array = list()
bbox_targets_array = list()
bbox_weights_array = list()
for im_i in range(num_images):
roi_rec = roidb[im_i]
# infer num_classes from gt_overlaps
num_classes = roi_rec['gt_overlaps'].shape[1]
# label = class RoI has max overlap with
rois = roi_rec['boxes']
labels = roi_rec['max_classes']
overlaps = roi_rec['max_overlaps']
bbox_targets = roi_rec['bbox_targets']
gt_lables = roi_rec['is_gt']
if cfg.TRAIN.BATCH_ROIS == -1:
im_rois, labels_t, bbox_targets, bbox_weights = \
sample_rois_v2(rois, num_classes, cfg, labels=labels, overlaps=overlaps, bbox_targets=bbox_targets,
gt_boxes=None)
assert np.abs(im_rois - rois).max() < 1e-3
assert np.abs(labels_t - labels).max() < 1e-3
else:
im_rois, labels, bbox_targets, bbox_weights, gt_lables = \
sample_rois(rois, fg_rois_per_image, rois_per_image, num_classes, cfg,
labels, overlaps, bbox_targets, gt_lables=gt_lables)
# project im_rois
# do not round roi
if cfg.network.ROIDispatch:
w = im_rois[:, 2] - im_rois[:, 0] + 1
h = im_rois[:, 3] - im_rois[:, 1] + 1
feat_id = np.clip(np.floor(2 + np.log2(np.sqrt(w * h) / 224)), 0, 3).astype(int)
rois_0_idx = np.where(feat_id == 0)[0]
rois_0 = im_rois[rois_0_idx]
if len(rois_0) == 0:
rois_0 = np.zeros((1, 4))
label_0 = -np.ones((1,))
gt_label_0 = -np.ones((1,))
bbox_targets_0 = np.zeros((1, bbox_targets.shape[1]))
bbox_weights_0 = np.zeros((1, bbox_weights.shape[1]))
else:
label_0 = labels[rois_0_idx]
gt_label_0 = gt_lables[rois_0_idx]
bbox_targets_0 = bbox_targets[rois_0_idx]
bbox_weights_0 = bbox_weights[rois_0_idx]
rois_1_idx = np.where(feat_id == 1)[0]
rois_1 = im_rois[rois_1_idx]
if len(rois_1) == 0:
rois_1 = np.zeros((1, 4))
label_1 = -np.ones((1,))
gt_label_1 = -np.ones((1,))
bbox_targets_1 = np.zeros((1, bbox_targets.shape[1]))
bbox_weights_1 = np.zeros((1, bbox_weights.shape[1]))
else:
label_1 = labels[rois_1_idx]
gt_label_1 = gt_lables[rois_1_idx]
bbox_targets_1 = bbox_targets[rois_1_idx]
bbox_weights_1 = bbox_weights[rois_1_idx]
rois_2_idx = np.where(feat_id == 2)
rois_2 = im_rois[rois_2_idx]
if len(rois_2) == 0:
rois_2 = np.zeros((1, 4))
label_2 = -np.ones((1,))
gt_label_2 = -np.ones((1,))
bbox_targets_2 = np.zeros((1, bbox_targets.shape[1]))
bbox_weights_2 = np.zeros((1, bbox_weights.shape[1]))
else:
label_2 = labels[rois_2_idx]
gt_label_2 = gt_lables[rois_2_idx]
bbox_targets_2 = bbox_targets[rois_2_idx]
bbox_weights_2 = bbox_weights[rois_2_idx]
rois_3_idx = np.where(feat_id == 3)
rois_3 = im_rois[rois_3_idx]
if len(rois_3) == 0:
rois_3 = np.zeros((1, 4))
label_3 = -np.ones((1,))
gt_label_3 = -np.ones((1,))
bbox_targets_3 = np.zeros((1, bbox_targets.shape[1]))
bbox_weights_3 = np.zeros((1, bbox_weights.shape[1]))
else:
label_3 = labels[rois_3_idx]
gt_label_3 = gt_lables[rois_3_idx]
bbox_targets_3 = bbox_targets[rois_3_idx]
bbox_weights_3 = bbox_weights[rois_3_idx]
# stack batch index
rois_array_0.append(np.hstack((im_i * np.ones((rois_0.shape[0], 1)), rois_0)))
rois_array_1.append(np.hstack((im_i * np.ones((rois_1.shape[0], 1)), rois_1)))
rois_array_2.append(np.hstack((im_i * np.ones((rois_2.shape[0], 1)), rois_2)))
rois_array_3.append(np.hstack((im_i * np.ones((rois_3.shape[0], 1)), rois_3)))
labels = np.concatenate([label_0, label_1, label_2, label_3], axis=0)
gt_lables = np.concatenate([gt_label_0, gt_label_1, gt_label_2, gt_label_3], axis=0)
bbox_targets = np.concatenate([bbox_targets_0, bbox_targets_1, bbox_targets_2, bbox_targets_3], axis=0)
bbox_weights = np.concatenate([bbox_weights_0, bbox_weights_1, bbox_weights_2, bbox_weights_3], axis=0)
else:
rois = im_rois
batch_index = im_i * np.ones((rois.shape[0], 1))
rois_array_this_image = np.hstack((batch_index, rois))
rois_array.append(rois_array_this_image)
# add labels
gt_labels_array.append(gt_lables)
labels_array.append(labels)
bbox_targets_array.append(bbox_targets)
bbox_weights_array.append(bbox_weights)
gt_labels_array = np.array(gt_labels_array)
nongt_index_array = np.where(gt_labels_array == 0)[1]
labels_array = np.array(labels_array)
bbox_targets_array = np.array(bbox_targets_array)
bbox_weights_array = np.array(bbox_weights_array)
if cfg.network.USE_NONGT_INDEX:
label = {'label': labels_array,
'nongt_index': nongt_index_array,
'bbox_target': bbox_targets_array,
'bbox_weight': bbox_weights_array}
else:
label = {'label': labels_array,
'bbox_target': bbox_targets_array,
'bbox_weight': bbox_weights_array}
if cfg.network.ROIDispatch:
rois_array_0 = np.array(rois_array_0)
rois_array_1 = np.array(rois_array_1)
rois_array_2 = np.array(rois_array_2)
rois_array_3 = np.array(rois_array_3)
# rois_concate = np.concatenate((rois_array_0, rois_array_1, rois_array_2, rois_array_3), axis=1)
# gt_rois_t = rois_concate[:, gt_labels_array[0,:] > 0]
data = {'data': im_array,
'rois_0': rois_array_0,
'rois_1': rois_array_1,
'rois_2': rois_array_2,
'rois_3': rois_array_3}
else:
rois_array = np.array(rois_array)
data = {'data': im_array,
'rois': rois_array}
if cfg.TRAIN.LEARN_NMS:
# im info
im_info = np.array([roidb[0]['im_info']], dtype=np.float32)
# gt_boxes
if roidb[0]['gt_classes'].size > 0:
gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :]
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
else:
gt_boxes = np.empty((0, 5), dtype=np.float32)
data['im_info'] = im_info
data['gt_boxes'] = gt_boxes
return data, label
def sample_rois_v2(rois, num_classes, cfg,
labels=None, overlaps=None, bbox_targets=None, gt_boxes=None):
"""
generate random sample of ROIs comprising foreground and background examples
:param rois: all_rois [n, 4]; e2e: [n, 5] with batch_index
:param fg_rois_per_image: foreground roi number
:param rois_per_image: total roi number
:param num_classes: number of classes
:param labels: maybe precomputed
:param overlaps: maybe precomputed (max_overlaps)
:param bbox_targets: maybe precomputed
:param gt_boxes: optional for e2e [n, 5] (x1, y1, x2, y2, cls)
:return: (labels, rois, bbox_targets, bbox_weights)
"""
if labels is None:
overlaps = bbox_overlaps(rois[:, 1:].astype(np.float), gt_boxes[:, :4].astype(np.float))
gt_assignment = overlaps.argmax(axis=1)
overlaps = overlaps.max(axis=1)
labels = gt_boxes[gt_assignment, 4]
# set labels of bg_rois to be 0
bg_ind = np.where(overlaps < cfg.TRAIN.BG_THRESH_HI)[0]
labels[bg_ind] = 0
# load or compute bbox_target
if bbox_targets is not None:
bbox_target_data = bbox_targets
else:
targets = bbox_transform(rois[:, 1:], gt_boxes[gt_assignment, :4])
if cfg.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:
targets = ((targets - np.array(cfg.TRAIN.BBOX_MEANS))
/ np.array(cfg.TRAIN.BBOX_STDS))
bbox_target_data = np.hstack((labels[:, np.newaxis], targets))
bbox_targets, bbox_weights = \
expand_bbox_regression_targets(bbox_target_data, num_classes, cfg)
return rois, labels, bbox_targets, bbox_weights
def sample_rois(rois, fg_rois_per_image, rois_per_image, num_classes, cfg,
labels=None, overlaps=None, bbox_targets=None, gt_boxes=None, gt_lables=None):
"""
generate random sample of ROIs comprising foreground and background examples
:param rois: all_rois [n, 4]; e2e: [n, 5] with batch_index
:param fg_rois_per_image: foreground roi number
:param rois_per_image: total roi number
:param num_classes: number of classes
:param labels: maybe precomputed
:param overlaps: maybe precomputed (max_overlaps)
:param bbox_targets: maybe precomputed
:param gt_boxes: optional for e2e [n, 5] (x1, y1, x2, y2, cls)
:return: (labels, rois, bbox_targets, bbox_weights)
"""
if labels is None:
overlaps = bbox_overlaps(rois[:, 1:].astype(np.float), gt_boxes[:, :4].astype(np.float))
gt_assignment = overlaps.argmax(axis=1)
overlaps = overlaps.max(axis=1)
labels = gt_boxes[gt_assignment, 4]
# foreground RoI with FG_THRESH overlap
fg_indexes = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# guard against the case when an image has fewer than fg_rois_per_image foreground RoIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_indexes.size)
# Sample foreground regions without replacement
if len(fg_indexes) > fg_rois_per_this_image:
fg_indexes = npr.choice(fg_indexes, size=fg_rois_per_this_image, replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_indexes = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) & (overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image, bg_indexes.size)
# Sample foreground regions without replacement
if len(bg_indexes) > bg_rois_per_this_image:
bg_indexes = npr.choice(bg_indexes, size=bg_rois_per_this_image, replace=False)
# indexes selected
keep_indexes = np.append(fg_indexes, bg_indexes)
# pad more to ensure a fixed minibatch size
while keep_indexes.shape[0] < rois_per_image:
gap = np.minimum(len(rois), rois_per_image - keep_indexes.shape[0])
gap_indexes = npr.choice(range(len(rois)), size=gap, replace=False)
keep_indexes = np.append(keep_indexes, gap_indexes)
# select gt_labels
gt_lables = gt_lables[keep_indexes]
# select labels
labels = labels[keep_indexes]
# set labels of bg_rois to be 0
bg_ind = np.where(overlaps[keep_indexes] < cfg.TRAIN.BG_THRESH_HI)[0]
labels[bg_ind] = 0
rois = rois[keep_indexes]
# load or compute bbox_target
if bbox_targets is not None:
bbox_target_data = bbox_targets[keep_indexes, :]
else:
targets = bbox_transform(rois[:, 1:], gt_boxes[gt_assignment[keep_indexes], :4])
if cfg.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:
targets = ((targets - np.array(cfg.TRAIN.BBOX_MEANS))
/ np.array(cfg.TRAIN.BBOX_STDS))
bbox_target_data = np.hstack((labels[:, np.newaxis], targets))
bbox_targets, bbox_weights = \
expand_bbox_regression_targets(bbox_target_data, num_classes, cfg)
return rois, labels, bbox_targets, bbox_weights, gt_lables
| 0 | 0 | 0 |
7cbfc364b8c5913e09428b8051b575c6d9ddf730 | 1,177 | py | Python | tests/medium/plugins/test_file_output.py | hyacker/eventgen | 2d33536e5b830b4bccb620b5239f25609c647c1a | [
"Apache-2.0"
] | null | null | null | tests/medium/plugins/test_file_output.py | hyacker/eventgen | 2d33536e5b830b4bccb620b5239f25609c647c1a | [
"Apache-2.0"
] | null | null | null | tests/medium/plugins/test_file_output.py | hyacker/eventgen | 2d33536e5b830b4bccb620b5239f25609c647c1a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
import os
import sys
from mock import MagicMock, patch
from splunk_eventgen.__main__ import parse_args
from splunk_eventgen.eventgen_core import EventGenerator
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
| 30.179487 | 111 | 0.654206 | #!/usr/bin/env python
# encoding: utf-8
import os
import sys
from mock import MagicMock, patch
from splunk_eventgen.__main__ import parse_args
from splunk_eventgen.eventgen_core import EventGenerator
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
class TestFileOutputPlugin(object):
def test_output_data_to_file(self):
configfile = "tests/sample_eventgen_conf/medium_test/eventgen.conf.fileoutput"
testargs = ["eventgen", "generate", configfile]
with patch.object(sys, 'argv', testargs):
pargs = parse_args()
assert pargs.subcommand == 'generate'
assert pargs.configfile == configfile
eventgen = EventGenerator(args=pargs)
eventgen.start()
file_output_path = os.path.abspath(os.path.join(FILE_DIR, '..', '..', '..', 'test_file_output.result'))
assert os.path.isfile(file_output_path)
f = open(file_output_path, 'r')
line_count = 1
for output_line in f:
if not output_line or line_count == 6:
break
assert "WINDBAG Event {} of 5".format(line_count) in output_line
line_count += 1
| 852 | 14 | 50 |
e8caf316aff1c240c2bd8b5750f5fa64ff151a5c | 994 | py | Python | documentstore_migracao/utils/files.py | robertatakenaka/document-store-migracao | 54e8dc279b336f40a772e204de3142a7bb715382 | [
"BSD-2-Clause"
] | null | null | null | documentstore_migracao/utils/files.py | robertatakenaka/document-store-migracao | 54e8dc279b336f40a772e204de3142a7bb715382 | [
"BSD-2-Clause"
] | null | null | null | documentstore_migracao/utils/files.py | robertatakenaka/document-store-migracao | 54e8dc279b336f40a772e204de3142a7bb715382 | [
"BSD-2-Clause"
] | null | null | null | """ module to utils methods to file """
import os
import shutil
import logging
from documentstore_migracao import config
logger = logging.getLogger(__name__)
| 19.88 | 70 | 0.65493 | """ module to utils methods to file """
import os
import shutil
import logging
from documentstore_migracao import config
logger = logging.getLogger(__name__)
def setup_processing_folder():
paths = config.INITIAL_PATH
for path in paths:
if not os.path.exists(path):
logger.debug("Criando pasta : %s", path)
os.makedirs(path)
def move_xml_conversion2success(xml_file):
shutil.move(
os.path.join(config.get("CONVERSION_PATH"), xml_file),
os.path.join(config.get("SUCCESS_PROCESSING_PATH"), xml_file),
)
def list_dir(path):
return [f for f in os.listdir(path) if f.endswith(".xml")]
def read_file(path):
logger.debug("Lendo arquivo: %s", path)
file = open(path, "r", encoding="utf-8")
text = file.read()
file.close()
return text
def write_file(path, source):
logger.debug("Gravando arquivo: %s", path)
file = open(path, "w", encoding="utf-8")
file.write(source)
file.close()
| 713 | 0 | 115 |
340dfb07bbad6b3d413744279dd2ff9f7f8e7bd0 | 1,059 | py | Python | docs/reST/ext/customversion.py | czogran/pygame1 | 1591a3295402f914950ae15039b91136d8bf8f79 | [
"Python-2.0",
"OLDAP-2.3"
] | 2 | 2021-04-03T20:01:35.000Z | 2021-09-09T23:42:21.000Z | docs/reST/ext/customversion.py | czogran/pygame1 | 1591a3295402f914950ae15039b91136d8bf8f79 | [
"Python-2.0",
"OLDAP-2.3"
] | null | null | null | docs/reST/ext/customversion.py | czogran/pygame1 | 1591a3295402f914950ae15039b91136d8bf8f79 | [
"Python-2.0",
"OLDAP-2.3"
] | 1 | 2021-06-06T17:52:54.000Z | 2021-06-06T17:52:54.000Z | from sphinx.domains.changeset import versionlabels, VersionChange
from sphinx.locale import _ # just to suppress warnings
try:
from sphinx.domains.changeset import versionlabel_classes
except ImportError:
# versionlabel_classes doesn't exist in old Sphinx versions.
UPDATE_VERIONLABEL_CLASSES = False
else:
UPDATE_VERIONLABEL_CLASSES = True
labels = ('versionadded', 'versionchanged', 'deprecated', 'versionextended')
| 32.090909 | 89 | 0.734655 | from sphinx.domains.changeset import versionlabels, VersionChange
from sphinx.locale import _ # just to suppress warnings
try:
from sphinx.domains.changeset import versionlabel_classes
except ImportError:
# versionlabel_classes doesn't exist in old Sphinx versions.
UPDATE_VERIONLABEL_CLASSES = False
else:
UPDATE_VERIONLABEL_CLASSES = True
labels = ('versionadded', 'versionchanged', 'deprecated', 'versionextended')
def set_version_formats(app, config):
for label in labels:
versionlabels[label] = \
_(getattr(config, '{}_format'.format(label)))
def setup(app):
app.add_directive('versionextended', VersionChange)
versionlabels['versionextended'] = 'Extended in pygame %s'
if UPDATE_VERIONLABEL_CLASSES:
versionlabel_classes['versionextended'] = 'extended'
for label in ('versionadded', 'versionchanged', 'deprecated', 'versionextended'):
app.add_config_value('{}_format'.format(label), str(versionlabels[label]), 'env')
app.connect('config-inited', set_version_formats)
| 574 | 0 | 46 |
36831c603a00768a85c8ba8b8274bcd3f0371013 | 1,945 | py | Python | AspDec/SSCL/word2vec.py | dumpmemory/AspDecSSCL | 004d73d3248e8fdee1336cfc6490ef4872583665 | [
"MIT"
] | 23 | 2020-12-16T15:50:07.000Z | 2022-03-17T05:51:20.000Z | AspDec/SSCL/word2vec.py | dumpmemory/AspDecSSCL | 004d73d3248e8fdee1336cfc6490ef4872583665 | [
"MIT"
] | 3 | 2021-02-01T07:24:35.000Z | 2021-11-24T09:28:35.000Z | AspDec/SSCL/word2vec.py | dumpmemory/AspDecSSCL | 004d73d3248e8fdee1336cfc6490ef4872583665 | [
"MIT"
] | 4 | 2021-05-12T06:49:43.000Z | 2021-10-04T04:41:08.000Z | '''
@author Tian Shi
Please contact tshi@vt.edu
'''
import json
import os
import random
import gensim
import numpy as np
from tqdm import tqdm
def run_word2vec(args):
'''
Run word2vec.
'''
cluster_dir = '../cluster_results'
if not os.path.exists(cluster_dir):
os.mkdir(cluster_dir)
if not os.path.exists('../nats_results'):
os.mkdir('../nats_results')
fp = open(os.path.join(args.data_dir, args.file_train_w2v), 'r')
sentences = []
for line in tqdm(fp):
itm = json.loads(line)
sentences.append(itm['text_uae'].split())
fp.close()
random.shuffle(sentences)
print('-'*50)
print('Number of sentences: {}'.format(len(sentences)))
print('Begin to train word2vec...')
model = gensim.models.Word2Vec(
sentences,
size=args.emb_size,
window=args.window,
min_count=args.min_count,
workers=args.workers)
model.save(os.path.join(cluster_dir, 'w2v_embedding'))
print('Taining Done.')
print('-'*50)
def convert_vectors(args):
'''
convert vectors and vocab.
'''
cluster_dir = '../cluster_results'
file_vocab = 'vocab.txt'
file_wordvec = 'vectors_w2v'
model = gensim.models.Word2Vec.load(
os.path.join(cluster_dir, 'w2v_embedding'))
lexicon = {}
for word in model.wv.vocab:
if word.strip() == '':
continue
lexicon[word] = model.wv[word]
vocab = []
for wd in lexicon:
vocab.append(wd)
vocab = sorted(vocab)
vec = np.zeros([len(lexicon), args.emb_size])
for k, wd in enumerate(vocab):
vec[k] = lexicon[wd]
print('Vocabulary size: {}'.format(vec.shape[0]))
np.save(os.path.join(cluster_dir, file_wordvec), vec)
fout = open(os.path.join(cluster_dir, file_vocab), 'w')
for k, itm in enumerate(vocab):
itm = [itm, str(k)]
fout.write(' '.join(itm) + '\n')
fout.close()
| 24.935897 | 68 | 0.60874 | '''
@author Tian Shi
Please contact tshi@vt.edu
'''
import json
import os
import random
import gensim
import numpy as np
from tqdm import tqdm
def run_word2vec(args):
'''
Run word2vec.
'''
cluster_dir = '../cluster_results'
if not os.path.exists(cluster_dir):
os.mkdir(cluster_dir)
if not os.path.exists('../nats_results'):
os.mkdir('../nats_results')
fp = open(os.path.join(args.data_dir, args.file_train_w2v), 'r')
sentences = []
for line in tqdm(fp):
itm = json.loads(line)
sentences.append(itm['text_uae'].split())
fp.close()
random.shuffle(sentences)
print('-'*50)
print('Number of sentences: {}'.format(len(sentences)))
print('Begin to train word2vec...')
model = gensim.models.Word2Vec(
sentences,
size=args.emb_size,
window=args.window,
min_count=args.min_count,
workers=args.workers)
model.save(os.path.join(cluster_dir, 'w2v_embedding'))
print('Taining Done.')
print('-'*50)
def convert_vectors(args):
'''
convert vectors and vocab.
'''
cluster_dir = '../cluster_results'
file_vocab = 'vocab.txt'
file_wordvec = 'vectors_w2v'
model = gensim.models.Word2Vec.load(
os.path.join(cluster_dir, 'w2v_embedding'))
lexicon = {}
for word in model.wv.vocab:
if word.strip() == '':
continue
lexicon[word] = model.wv[word]
vocab = []
for wd in lexicon:
vocab.append(wd)
vocab = sorted(vocab)
vec = np.zeros([len(lexicon), args.emb_size])
for k, wd in enumerate(vocab):
vec[k] = lexicon[wd]
print('Vocabulary size: {}'.format(vec.shape[0]))
np.save(os.path.join(cluster_dir, file_wordvec), vec)
fout = open(os.path.join(cluster_dir, file_vocab), 'w')
for k, itm in enumerate(vocab):
itm = [itm, str(k)]
fout.write(' '.join(itm) + '\n')
fout.close()
| 0 | 0 | 0 |
c2d27542ebbc9981ab52262f7a073f4fa9ee3436 | 303 | py | Python | GenerateDatapack.py | edgecdec/TobaccoAwarenessMC | 2fce9181ae3bb8b65dd4dcbfb4f3363e42e3dd90 | [
"MIT"
] | null | null | null | GenerateDatapack.py | edgecdec/TobaccoAwarenessMC | 2fce9181ae3bb8b65dd4dcbfb4f3363e42e3dd90 | [
"MIT"
] | null | null | null | GenerateDatapack.py | edgecdec/TobaccoAwarenessMC | 2fce9181ae3bb8b65dd4dcbfb4f3363e42e3dd90 | [
"MIT"
] | null | null | null | from GenerateFolders import generateFolders
from GenerateFiles import generateFiles
from AddictionHelperGenerator import addictionHelperGenerator
import shutil
from Constants import *
generateFolders()
generateFiles()
addictionHelperGenerator()
shutil.make_archive('tobacco_awareness', 'zip', rootdir) | 27.545455 | 61 | 0.864686 | from GenerateFolders import generateFolders
from GenerateFiles import generateFiles
from AddictionHelperGenerator import addictionHelperGenerator
import shutil
from Constants import *
generateFolders()
generateFiles()
addictionHelperGenerator()
shutil.make_archive('tobacco_awareness', 'zip', rootdir) | 0 | 0 | 0 |
933c5eecbd0001ecf9e6633f65b9d333411d7767 | 1,391 | py | Python | demo_data_format.py | chrisoldnall/PHS-Code | a37bdb10821b40c40ece21451a29b1327ce4585d | [
"MIT"
] | null | null | null | demo_data_format.py | chrisoldnall/PHS-Code | a37bdb10821b40c40ece21451a29b1327ce4585d | [
"MIT"
] | null | null | null | demo_data_format.py | chrisoldnall/PHS-Code | a37bdb10821b40c40ece21451a29b1327ce4585d | [
"MIT"
] | null | null | null | from final_code.fcts_data_formatting import day_to_month, day_to_quarter, import_datasets, time_interval, add_categories, \
HB_to_areas, extract_data, day_to_quarter, month_to_quarter
import numpy as np
import matplotlib.pyplot as plt
data31, data62, operations, diag, covid = import_datasets(['31DayData', '62DayData', 'cancellations_by_board_november_2021', \
'diagnostics_by_board_september_2021', 'covid_2022'])
print(covid)
data31 = time_interval(data31, ['2018Q1', '2020Q1'])
data31 = HB_to_areas(data31)
groupings = {'new_CT':['Breast', 'Cervical'], 'all_reg':['NCA','SCAN','WOSCAN']}
data31 = add_categories(data31, groupings)
print(data31.index.names)
data31.info()
d31 = extract_data(data31, ('all_reg', 'all_reg','new_CT'), ['HB', 'HBT','CancerType'], ['NumberOfEligibleReferrals31DayStandard'])
covid = day_to_quarter(covid)
print(covid)
operations = time_interval(operations, ['201807', '202107'])
operations = HB_to_areas(operations)
print(operations.index.names)
operations.info()
op1, op2 = extract_data(operations, 'NCA', 'HBT', ['TotalOperations', 'TotalCancelled'])
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.plot(op1[0,:],op1[1,:])
every_nth = 4
for n, label in enumerate(ax.xaxis.get_ticklabels()):
if n % every_nth != 0:
label.set_visible(False)
plt.show() | 39.742857 | 131 | 0.698778 | from final_code.fcts_data_formatting import day_to_month, day_to_quarter, import_datasets, time_interval, add_categories, \
HB_to_areas, extract_data, day_to_quarter, month_to_quarter
import numpy as np
import matplotlib.pyplot as plt
data31, data62, operations, diag, covid = import_datasets(['31DayData', '62DayData', 'cancellations_by_board_november_2021', \
'diagnostics_by_board_september_2021', 'covid_2022'])
print(covid)
data31 = time_interval(data31, ['2018Q1', '2020Q1'])
data31 = HB_to_areas(data31)
groupings = {'new_CT':['Breast', 'Cervical'], 'all_reg':['NCA','SCAN','WOSCAN']}
data31 = add_categories(data31, groupings)
print(data31.index.names)
data31.info()
d31 = extract_data(data31, ('all_reg', 'all_reg','new_CT'), ['HB', 'HBT','CancerType'], ['NumberOfEligibleReferrals31DayStandard'])
covid = day_to_quarter(covid)
print(covid)
operations = time_interval(operations, ['201807', '202107'])
operations = HB_to_areas(operations)
print(operations.index.names)
operations.info()
op1, op2 = extract_data(operations, 'NCA', 'HBT', ['TotalOperations', 'TotalCancelled'])
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.plot(op1[0,:],op1[1,:])
every_nth = 4
for n, label in enumerate(ax.xaxis.get_ticklabels()):
if n % every_nth != 0:
label.set_visible(False)
plt.show() | 0 | 0 | 0 |
245415851fb2a753e9f96d775498e919aa2a04d2 | 940 | py | Python | src/utils.py | afshinbigboy/imt-rl-mcmc | 4c541c32566979d527b608fbbe36a4403398ebb3 | [
"MIT"
] | 1 | 2021-05-01T16:59:49.000Z | 2021-05-01T16:59:49.000Z | src/utils.py | afshinbigboy/imt-rl-mcmc | 4c541c32566979d527b608fbbe36a4403398ebb3 | [
"MIT"
] | null | null | null | src/utils.py | afshinbigboy/imt-rl-mcmc | 4c541c32566979d527b608fbbe36a4403398ebb3 | [
"MIT"
] | null | null | null | import sys
| 32.413793 | 70 | 0.552128 | import sys
class ColorPrint:
@staticmethod
def print_fail(*messages, end = '\n'):
msg = ' '.join(str(m) for m in messages)
sys.stderr.write('\x1b[1;31m' + msg.strip() + '\x1b[0m' + end)
@staticmethod
def print_pass(*messages, end = '\n'):
msg = ' '.join(str(m) for m in messages)
sys.stdout.write('\x1b[1;32m' + msg.strip() + '\x1b[0m' + end)
@staticmethod
def print_warn(*messages, end = '\n'):
msg = ' '.join(str(m) for m in messages)
sys.stderr.write('\x1b[1;33m' + msg.strip() + '\x1b[0m' + end)
@staticmethod
def print_info(*messages, end = '\n'):
msg = ' '.join(str(m) for m in messages)
sys.stdout.write('\x1b[1;34m' + msg.strip() + '\x1b[0m' + end)
@staticmethod
def print_bold(*messages, end = '\n'):
msg = ' '.join(str(m) for m in messages)
sys.stdout.write('\x1b[1;37m' + msg.strip() + '\x1b[0m' + end) | 685 | 221 | 23 |
09f57377b2f200c0b923b75e399a536a35973e22 | 152 | py | Python | LinkedClips/__init__.py | NSUSpray/LinkedClips | 8cd0974c42632e9be15a60934d7b3e5fd270c214 | [
"MIT"
] | 5 | 2019-11-27T15:44:28.000Z | 2022-03-21T03:54:44.000Z | LinkedClips/__init__.py | NSUSpray/LinkedClips | 8cd0974c42632e9be15a60934d7b3e5fd270c214 | [
"MIT"
] | null | null | null | LinkedClips/__init__.py | NSUSpray/LinkedClips | 8cd0974c42632e9be15a60934d7b3e5fd270c214 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
| 25.333333 | 40 | 0.802632 | from __future__ import absolute_import
def create_instance (c_instance):
from .LinkedClips import LinkedClips
return LinkedClips (c_instance)
| 89 | 0 | 23 |
c4f014e481ebe1253497317b5cde2d3e85a86be6 | 33,527 | py | Python | graphsense/api/addresses_api.py | iknaio/graphsense-python | b61c66b6ec0bb9720036ae61777e90ce63a971cc | [
"MIT"
] | null | null | null | graphsense/api/addresses_api.py | iknaio/graphsense-python | b61c66b6ec0bb9720036ae61777e90ce63a971cc | [
"MIT"
] | 1 | 2022-02-24T11:21:49.000Z | 2022-02-24T11:21:49.000Z | graphsense/api/addresses_api.py | INTERPOL-Innovation-Centre/GraphSense-Maltego-transform | 2a9b352289ab64903a7012c5d84cb4c6d8172ade | [
"MIT"
] | null | null | null | """
GraphSense API
GraphSense API # noqa: E501
The version of the OpenAPI document: 0.5.1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from graphsense.api_client import ApiClient, Endpoint as _Endpoint
from graphsense.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from graphsense.model.address import Address
from graphsense.model.address_tags import AddressTags
from graphsense.model.address_txs import AddressTxs
from graphsense.model.entity import Entity
from graphsense.model.links import Links
from graphsense.model.neighbors import Neighbors
class AddressesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
| 36.323944 | 211 | 0.440272 | """
GraphSense API
GraphSense API # noqa: E501
The version of the OpenAPI document: 0.5.1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from graphsense.api_client import ApiClient, Endpoint as _Endpoint
from graphsense.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from graphsense.model.address import Address
from graphsense.model.address_tags import AddressTags
from graphsense.model.address_txs import AddressTxs
from graphsense.model.entity import Entity
from graphsense.model.links import Links
from graphsense.model.neighbors import Neighbors
class AddressesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __get_address(
self,
currency,
address,
**kwargs
):
"""Get an address, optionally with tags # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_address(currency, address, async_req=True)
>>> result = thread.get()
Args:
currency (str): The cryptocurrency code (e.g., btc)
address (str): The cryptocurrency address
Keyword Args:
include_tags (bool): Whether to include the first page of tags. Use the respective /tags endpoint to retrieve more if needed.. [optional] if omitted the server will use the default value of False
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Address
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['currency'] = \
currency
kwargs['address'] = \
address
return self.call_with_http_info(**kwargs)
self.get_address = _Endpoint(
settings={
'response_type': (Address,),
'auth': [
'api_key'
],
'endpoint_path': '/{currency}/addresses/{address}',
'operation_id': 'get_address',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'currency',
'address',
'include_tags',
],
'required': [
'currency',
'address',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'currency':
(str,),
'address':
(str,),
'include_tags':
(bool,),
},
'attribute_map': {
'currency': 'currency',
'address': 'address',
'include_tags': 'include_tags',
},
'location_map': {
'currency': 'path',
'address': 'path',
'include_tags': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_address
)
def __get_address_entity(
self,
currency,
address,
**kwargs
):
"""Get the entity of an address # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_address_entity(currency, address, async_req=True)
>>> result = thread.get()
Args:
currency (str): The cryptocurrency code (e.g., btc)
address (str): The cryptocurrency address
Keyword Args:
include_tags (bool): Whether to include the first page of tags. Use the respective /tags endpoint to retrieve more if needed.. [optional] if omitted the server will use the default value of False
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Entity
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['currency'] = \
currency
kwargs['address'] = \
address
return self.call_with_http_info(**kwargs)
self.get_address_entity = _Endpoint(
settings={
'response_type': (Entity,),
'auth': [
'api_key'
],
'endpoint_path': '/{currency}/addresses/{address}/entity',
'operation_id': 'get_address_entity',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'currency',
'address',
'include_tags',
],
'required': [
'currency',
'address',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'currency':
(str,),
'address':
(str,),
'include_tags':
(bool,),
},
'attribute_map': {
'currency': 'currency',
'address': 'address',
'include_tags': 'include_tags',
},
'location_map': {
'currency': 'path',
'address': 'path',
'include_tags': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_address_entity
)
def __list_address_links(
self,
currency,
address,
neighbor,
**kwargs
):
"""Get outgoing transactions between two addresses # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_address_links(currency, address, neighbor, async_req=True)
>>> result = thread.get()
Args:
currency (str): The cryptocurrency code (e.g., btc)
address (str): The cryptocurrency address
neighbor (str): Neighbor address
Keyword Args:
page (str): Resumption token for retrieving the next page. [optional]
pagesize (int): Number of items returned in a single page. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Links
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['currency'] = \
currency
kwargs['address'] = \
address
kwargs['neighbor'] = \
neighbor
return self.call_with_http_info(**kwargs)
self.list_address_links = _Endpoint(
settings={
'response_type': (Links,),
'auth': [
'api_key'
],
'endpoint_path': '/{currency}/addresses/{address}/links',
'operation_id': 'list_address_links',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'currency',
'address',
'neighbor',
'page',
'pagesize',
],
'required': [
'currency',
'address',
'neighbor',
],
'nullable': [
],
'enum': [
],
'validation': [
'pagesize',
]
},
root_map={
'validations': {
('pagesize',): {
'inclusive_minimum': 1,
},
},
'allowed_values': {
},
'openapi_types': {
'currency':
(str,),
'address':
(str,),
'neighbor':
(str,),
'page':
(str,),
'pagesize':
(int,),
},
'attribute_map': {
'currency': 'currency',
'address': 'address',
'neighbor': 'neighbor',
'page': 'page',
'pagesize': 'pagesize',
},
'location_map': {
'currency': 'path',
'address': 'path',
'neighbor': 'query',
'page': 'query',
'pagesize': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__list_address_links
)
def __list_address_neighbors(
self,
currency,
address,
direction,
**kwargs
):
"""Get an addresses' neighbors in the address graph # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_address_neighbors(currency, address, direction, async_req=True)
>>> result = thread.get()
Args:
currency (str): The cryptocurrency code (e.g., btc)
address (str): The cryptocurrency address
direction (str): Incoming or outgoing neighbors
Keyword Args:
include_labels (bool): Whether to include labels of first page of tags. [optional] if omitted the server will use the default value of False
page (str): Resumption token for retrieving the next page. [optional]
pagesize (int): Number of items returned in a single page. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Neighbors
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['currency'] = \
currency
kwargs['address'] = \
address
kwargs['direction'] = \
direction
return self.call_with_http_info(**kwargs)
self.list_address_neighbors = _Endpoint(
settings={
'response_type': (Neighbors,),
'auth': [
'api_key'
],
'endpoint_path': '/{currency}/addresses/{address}/neighbors',
'operation_id': 'list_address_neighbors',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'currency',
'address',
'direction',
'include_labels',
'page',
'pagesize',
],
'required': [
'currency',
'address',
'direction',
],
'nullable': [
],
'enum': [
'direction',
],
'validation': [
'pagesize',
]
},
root_map={
'validations': {
('pagesize',): {
'inclusive_minimum': 1,
},
},
'allowed_values': {
('direction',): {
"IN": "in",
"OUT": "out"
},
},
'openapi_types': {
'currency':
(str,),
'address':
(str,),
'direction':
(str,),
'include_labels':
(bool,),
'page':
(str,),
'pagesize':
(int,),
},
'attribute_map': {
'currency': 'currency',
'address': 'address',
'direction': 'direction',
'include_labels': 'include_labels',
'page': 'page',
'pagesize': 'pagesize',
},
'location_map': {
'currency': 'path',
'address': 'path',
'direction': 'query',
'include_labels': 'query',
'page': 'query',
'pagesize': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__list_address_neighbors
)
def __list_address_txs(
self,
currency,
address,
**kwargs
):
"""Get all transactions an address has been involved in # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_address_txs(currency, address, async_req=True)
>>> result = thread.get()
Args:
currency (str): The cryptocurrency code (e.g., btc)
address (str): The cryptocurrency address
Keyword Args:
page (str): Resumption token for retrieving the next page. [optional]
pagesize (int): Number of items returned in a single page. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
AddressTxs
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['currency'] = \
currency
kwargs['address'] = \
address
return self.call_with_http_info(**kwargs)
self.list_address_txs = _Endpoint(
settings={
'response_type': (AddressTxs,),
'auth': [
'api_key'
],
'endpoint_path': '/{currency}/addresses/{address}/txs',
'operation_id': 'list_address_txs',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'currency',
'address',
'page',
'pagesize',
],
'required': [
'currency',
'address',
],
'nullable': [
],
'enum': [
],
'validation': [
'pagesize',
]
},
root_map={
'validations': {
('pagesize',): {
'inclusive_minimum': 1,
},
},
'allowed_values': {
},
'openapi_types': {
'currency':
(str,),
'address':
(str,),
'page':
(str,),
'pagesize':
(int,),
},
'attribute_map': {
'currency': 'currency',
'address': 'address',
'page': 'page',
'pagesize': 'pagesize',
},
'location_map': {
'currency': 'path',
'address': 'path',
'page': 'query',
'pagesize': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__list_address_txs
)
def __list_tags_by_address(
self,
currency,
address,
**kwargs
):
"""Get attribution tags for a given address # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_tags_by_address(currency, address, async_req=True)
>>> result = thread.get()
Args:
currency (str): The cryptocurrency code (e.g., btc)
address (str): The cryptocurrency address
Keyword Args:
page (str): Resumption token for retrieving the next page. [optional]
pagesize (int): Number of items returned in a single page. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
AddressTags
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['currency'] = \
currency
kwargs['address'] = \
address
return self.call_with_http_info(**kwargs)
self.list_tags_by_address = _Endpoint(
settings={
'response_type': (AddressTags,),
'auth': [
'api_key'
],
'endpoint_path': '/{currency}/addresses/{address}/tags',
'operation_id': 'list_tags_by_address',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'currency',
'address',
'page',
'pagesize',
],
'required': [
'currency',
'address',
],
'nullable': [
],
'enum': [
],
'validation': [
'pagesize',
]
},
root_map={
'validations': {
('pagesize',): {
'inclusive_minimum': 1,
},
},
'allowed_values': {
},
'openapi_types': {
'currency':
(str,),
'address':
(str,),
'page':
(str,),
'pagesize':
(int,),
},
'attribute_map': {
'currency': 'currency',
'address': 'address',
'page': 'page',
'pagesize': 'pagesize',
},
'location_map': {
'currency': 'path',
'address': 'path',
'page': 'query',
'pagesize': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__list_tags_by_address
)
| 32,574 | 0 | 27 |
69378bc6d7659c9fe346d6602e04664de994dad4 | 3,665 | py | Python | bftool/__main__.py | shoriwe/bftool | cb5c84e08f3703ebc86d56ad1cf6a755f7e8e82d | [
"MIT"
] | 2 | 2020-06-16T06:11:19.000Z | 2020-08-26T01:50:19.000Z | bftool/__main__.py | shoriwe/bftool | cb5c84e08f3703ebc86d56ad1cf6a755f7e8e82d | [
"MIT"
] | 18 | 2020-04-27T17:11:35.000Z | 2021-02-21T02:18:17.000Z | bftool/__main__.py | shoriwe/bftool | cb5c84e08f3703ebc86d56ad1cf6a755f7e8e82d | [
"MIT"
] | null | null | null | import argparse
import collections
import json
import random
import string
import sys
import types
import bftool
# Default argument capture for the main function
def _get_arguments() -> argparse.Namespace:
"""Default function to prepare the arguments for the `Runner` during it's execution in a terminal
Returns:
- bftool.Arguments with all the configurations provided by the user
"""
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument("-mt", "--max-threads",
help="Maximum number of threads per process", default=1, type=int)
argument_parser.add_argument("-mp", "--max-processes",
help="Maximum number of process to have active at the same time",
default=1, type=int)
argument_parser.add_argument("-w", "--wordlist", help="File wordlist to use"
" based on \"{'argument_1': FILE_PATH, ...}\"",
default="{}")
argument_parser.add_argument("-b", "--bruteforce",
help="Generate a virtual wordlist based on \
rules \"{'argument_1': {'elements': [element_1, ...], 'minlength': INT, 'maxlength': "
"INT, 'string-join': BOOL}, ...}\"",
default="{}")
argument_parser.add_argument("-sf", "--success-function",
help="Function to pass the success result to (default is custom 'print')",
default="lambda output: print(f\"[+] {output}\\n\", end='')")
argument_parser.add_argument("-cf", "--check-function",
help="Function useful to check the output (default is 'lambda output: output')",
default="lambda output: output")
argument_parser.add_argument("-sp", "--script_path", help="Python script to import", default=None, type=str)
argument_parser.add_argument("expression", help="expression that will result in a callable")
return argument_parser.parse_args()
if __name__ == "__main__":
sys.argv[0] = "bftool"
parsed_arguments = _get_arguments()
function_ = import_function(parsed_arguments.expression, parsed_arguments.script_path)
success_function = import_function(parsed_arguments.success_function, parsed_arguments.script_path)
check_function = import_function(parsed_arguments.check_function, parsed_arguments.script_path)
function_arguments = bftool.Arguments(
function_=function_,
files=json.loads(parsed_arguments.wordlist),
bruteforce_rules=json.loads(parsed_arguments.bruteforce),
)
bftool.Pool(
function_,
function_arguments=function_arguments,
check_function=check_function,
success_function=success_function,
max_processes=parsed_arguments.max_processes,
max_threads=parsed_arguments.max_threads
).run()
| 46.987179 | 119 | 0.635198 | import argparse
import collections
import json
import random
import string
import sys
import types
import bftool
# Default argument capture for the main function
def _get_arguments() -> argparse.Namespace:
"""Default function to prepare the arguments for the `Runner` during it's execution in a terminal
Returns:
- bftool.Arguments with all the configurations provided by the user
"""
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument("-mt", "--max-threads",
help="Maximum number of threads per process", default=1, type=int)
argument_parser.add_argument("-mp", "--max-processes",
help="Maximum number of process to have active at the same time",
default=1, type=int)
argument_parser.add_argument("-w", "--wordlist", help="File wordlist to use"
" based on \"{'argument_1': FILE_PATH, ...}\"",
default="{}")
argument_parser.add_argument("-b", "--bruteforce",
help="Generate a virtual wordlist based on \
rules \"{'argument_1': {'elements': [element_1, ...], 'minlength': INT, 'maxlength': "
"INT, 'string-join': BOOL}, ...}\"",
default="{}")
argument_parser.add_argument("-sf", "--success-function",
help="Function to pass the success result to (default is custom 'print')",
default="lambda output: print(f\"[+] {output}\\n\", end='')")
argument_parser.add_argument("-cf", "--check-function",
help="Function useful to check the output (default is 'lambda output: output')",
default="lambda output: output")
argument_parser.add_argument("-sp", "--script_path", help="Python script to import", default=None, type=str)
argument_parser.add_argument("expression", help="expression that will result in a callable")
return argument_parser.parse_args()
def random_name():
return "".join(random.choice(string.ascii_lowercase) for _ in range(32))
def import_function(expression: str, path: str = None) -> collections.abc.Callable:
func_name = random_name()
if path is not None:
module = bftool.import_module_from_path(path)
exec(f"{func_name} = {expression}", module.__dict__, module.__dict__)
return module.__getattribute__(func_name)
definitions = types.ModuleType("definitions")
exec(f"{func_name} = {expression}", definitions.__dict__, definitions.__dict__)
return definitions.__getattribute__(func_name)
if __name__ == "__main__":
sys.argv[0] = "bftool"
parsed_arguments = _get_arguments()
function_ = import_function(parsed_arguments.expression, parsed_arguments.script_path)
success_function = import_function(parsed_arguments.success_function, parsed_arguments.script_path)
check_function = import_function(parsed_arguments.check_function, parsed_arguments.script_path)
function_arguments = bftool.Arguments(
function_=function_,
files=json.loads(parsed_arguments.wordlist),
bruteforce_rules=json.loads(parsed_arguments.bruteforce),
)
bftool.Pool(
function_,
function_arguments=function_arguments,
check_function=check_function,
success_function=success_function,
max_processes=parsed_arguments.max_processes,
max_threads=parsed_arguments.max_threads
).run()
| 558 | 0 | 46 |
f7ebbd284107fe437f704970e715d15f4b7ea07d | 631 | py | Python | samples/conanfile.py | SiennaSaito/libtcod | 97984740f92758864d2ae9181617a33d2d3c7813 | [
"BSD-3-Clause"
] | 686 | 2018-07-01T15:49:10.000Z | 2022-03-30T14:13:40.000Z | samples/conanfile.py | SiennaSaito/libtcod | 97984740f92758864d2ae9181617a33d2d3c7813 | [
"BSD-3-Clause"
] | 99 | 2018-10-23T17:02:08.000Z | 2022-03-29T18:47:47.000Z | samples/conanfile.py | SiennaSaito/libtcod | 97984740f92758864d2ae9181617a33d2d3c7813 | [
"BSD-3-Clause"
] | 76 | 2018-07-29T03:51:42.000Z | 2022-03-26T03:10:10.000Z | from conans import ConanFile, CMake
| 27.434783 | 53 | 0.554675 | from conans import ConanFile, CMake
class LibtcodSamplesConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
requires = (
"libtcod/[~=1.15]@hexdecimal/stable",
"sdl2/[~=2.0.5]@bincrafters/stable",
)
default_options = {"*:shared": True}
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def imports(self):
self.copy("*.dll", dst="bin", src="bin")
self.copy("*.pdb", dst="bin", src="bin")
self.copy("*.dylib*", dst="bin", src="lib")
self.copy("*.so*", dst="bin", src="lib")
| 267 | 304 | 23 |
07ed54bfb2c231467fae4f929f7219986db1ccf4 | 6,998 | py | Python | scripts/visualize.py | Karthikprabhu22/lynx | 2b0124085cc7a5f1eb3d06a89f6c56f02a087e12 | [
"MIT"
] | null | null | null | scripts/visualize.py | Karthikprabhu22/lynx | 2b0124085cc7a5f1eb3d06a89f6c56f02a087e12 | [
"MIT"
] | 4 | 2020-05-06T20:14:01.000Z | 2020-05-07T07:45:12.000Z | scripts/visualize.py | Karthikprabhu22/lynx | 2b0124085cc7a5f1eb3d06a89f6c56f02a087e12 | [
"MIT"
] | 1 | 2020-08-15T05:24:16.000Z | 2020-08-15T05:24:16.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from pathlib import Path
import sys
import click
import h5py
import yaml
import lynx
import hoover
import pymaster as nmt
from scipy.optimize import minimize
import emcee
import healpy as hp
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import pandas as pd
import numpy as np
from scipy import stats
from lynx import Masking
_logger = logging.getLogger(__name__)
@click.command()
@click.option('-d', '--data_path', 'data_path', required=True,
type=click.Path(exists=True), help='path to data configuration')
@click.option('-m', '--model_path', 'model_path', required=True,
type=click.Path(exists=False), help='path to model configuration')
@click.option('-p', '--mask_path', 'mask_path', required=True,
type=click.Path(exists=False), help='path to masking configuration')
@click.option('--quiet', 'log_level', flag_value=logging.WARNING, default=True)
@click.option('-v', '--verbose', 'log_level', flag_value=logging.INFO)
@click.option('-vv', '--very-verbose', 'log_level', flag_value=logging.DEBUG)
@click.version_option(lynx.__version__)
if __name__ == '__main__':
main() | 35.165829 | 97 | 0.615462 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from pathlib import Path
import sys
import click
import h5py
import yaml
import lynx
import hoover
import pymaster as nmt
from scipy.optimize import minimize
import emcee
import healpy as hp
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import pandas as pd
import numpy as np
from scipy import stats
from lynx import Masking
_logger = logging.getLogger(__name__)
@click.command()
@click.option('-d', '--data_path', 'data_path', required=True,
type=click.Path(exists=True), help='path to data configuration')
@click.option('-m', '--model_path', 'model_path', required=True,
type=click.Path(exists=False), help='path to model configuration')
@click.option('-p', '--mask_path', 'mask_path', required=True,
type=click.Path(exists=False), help='path to masking configuration')
@click.option('--quiet', 'log_level', flag_value=logging.WARNING, default=True)
@click.option('-v', '--verbose', 'log_level', flag_value=logging.INFO)
@click.option('-vv', '--very-verbose', 'log_level', flag_value=logging.DEBUG)
@click.version_option(lynx.__version__)
def main(data_path: Path, mask_path: Path, model_path: Path, log_level: int):
logging.basicConfig(stream=sys.stdout,
level=log_level,
datefmt='%Y-%m-%d %H:%M',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
masking = lynx.Masking(mask_path)
fitting_masks = list(masking.get_fitting_indices())
model_identifier, lnP = hoover.LogProb.load_model_from_yaml(model_path)
with h5py.File(data_path, 'r') as f:
sky_config = yaml.load(f.attrs['config'], Loader=yaml.FullLoader)
nmc = sky_config['monte_carlo']
frequencies = np.array(sky_config['frequencies'])
nside = sky_config['nside']
amplitude_output_shape = (2, hp.nside2npix(nside))
parameter_output_shape = (hp.nside2npix(nside),)
for fitting_name, fitting_parameters in fitting_masks:
T_bar = {comp: np.zeros(amplitude_output_shape) for comp in lnP._components}
par = {par: np.zeros(parameter_output_shape) for par in lnP.free_parameters}
for imc in range(nmc):
logging.info(r"""
Working on Monte Carlo realization: {:d}
""".format(imc))
hdf5_record = str(Path(model_identifier) / fitting_name / 'mc{:04d}'.format(imc))
logging.info(r"""
Working on fitting scheme: {:s}
""".format(fitting_name))
with h5py.File(data_path, 'a') as f:
# Create a group which contains results for this sky
# patch, model, and MC realization.
opt = f[hdf5_record]
# Create a dataset for the whole sky, and log the
# results for this patch in the corresponding indices.
# Do the same for the spectral parameters.
for component in lnP._components:
T_bar[component] += opt[component][...]
for parameter in lnP.free_parameters:
par[parameter] += opt[parameter][...]
T_bar = {key: value / float(nmc) for key, value in T_bar.items()}
par = {key: value / float(nmc) for key, value in par.items()}
hp.mollview(T_bar['cmb'][0])
hp.mollview(par['beta_d'])
plt.show()
def plot_fisher(mean, cov, truth=None, fpath=None, xlabel=None, ylabel=None):
nb = 128
fig = plt.figure(figsize=(6, 6))
grid = plt.GridSpec(4, 4, hspace=0., wspace=0.)
main_ax = fig.add_subplot(grid[:-1, 1:])
y_ax = fig.add_subplot(grid[:-1, 0], xticklabels=[], sharey=main_ax)
x_ax = fig.add_subplot(grid[-1, 1:], yticklabels=[], sharex=main_ax)
# scatter points on the main axes
plot_fisher_2d(mean, cov, main_ax)
if truth is not None:
main_ax.axvline(truth[0], color='gray', linestyle='--')
main_ax.axhline(truth[1], color='gray', linestyle='--')
# histogram on the attached axes
sigma = np.sqrt(cov[0, 0])
x_arr = mean[0] - 4 * sigma + 8 * sigma * np.arange(nb) / (nb - 1.)
p_arr = np.exp(- (x_arr - mean[0]) ** 2 / (2 * sigma ** 2))
x_ax.plot(x_arr, p_arr, color='tan', label='Validation')
if truth is not None:
x_ax.axvline(truth[0], color='gray', linestyle='--', label=True)
x_ax.invert_yaxis()
l, h = x_ax.get_legend_handles_labels(legend_handler_map=None)
label = fig.add_subplot(grid[0, 0], visible=False)
main_ax.legend(l, h, frameon=False, loc='upper right', bbox_to_anchor=(1., 1.))
sigma = np.sqrt(cov[1, 1])
y_arr = mean[1] - 4 * sigma + 8 * sigma * np.arange(nb) / (nb - 1.)
p_arr = np.exp(- (y_arr - mean[1]) ** 2 / (2 * sigma ** 2))
y_ax.plot(p_arr, y_arr, color='tan')
if truth is not None:
y_ax.axhline(truth[1], color='gray', linestyle='--')
y_ax.invert_xaxis()
x_ax.set_xlabel(xlabel)
y_ax.set_ylabel(ylabel)
x_ax.get_yaxis().set_visible(False)
y_ax.get_xaxis().set_visible(False)
main_ax.get_xaxis().set_visible(False)
main_ax.get_yaxis().set_visible(False)
if fpath is not None:
fig.savefig(fpath, bbox_inches='tight')
return
def plot_fisher_1d(mean, cov, i, ax, labels=None):
fontsize = 16
nb = 128
sigma = np.sqrt(cov[i, i])
x_arr = mean[i] - 4 * sigma + 8 * sigma * np.arange(nb) / (nb - 1.)
p_arr = np.exp(- (x_arr - mean[i]) ** 2 / (2 * sigma ** 2))
ax.plot(x_arr, p_arr)
ax.set_xlim([mean[i] - 3. * sigma, mean[i] + 3. * sigma])
for label in ax.get_yticklabels():
label.set_fontsize(fontsize - 2)
for label in ax.get_xticklabels():
label.set_fontsize(fontsize - 2)
return
def plot_fisher_2d(mean, cov, ax, labels=None):
fontsize = 16
w, v = np.linalg.eigh(cov)
angle = 180. * np.arctan2(v[1, 0], v[0, 0]) / np.pi
a_1s=np.sqrt(2.3 * w[0])
b_1s=np.sqrt(2.3 * w[1])
a_2s=np.sqrt(6.17 * w[0])
b_2s=np.sqrt(6.17 * w[1])
sigma_00 = np.sqrt(cov[0, 0])
sigma_11 = np.sqrt(cov[1, 1])
e_1s = Ellipse(xy=mean, width=2 * a_1s, height=2 * b_1s, angle=angle, color='tan')
e_2s = Ellipse(xy=mean, width=2 * a_2s, height=2 * b_2s, angle=angle, alpha=0.5, color='tan')
ax.add_artist(e_1s)
ax.add_artist(e_2s)
ax.set_xlim([mean[0] - 3 * sigma_00, mean[0] + 3 * sigma_00])
ax.set_ylim([mean[1] - 3 * sigma_11, mean[1] + 3 * sigma_11])
for label in ax.get_yticklabels():
label.set_fontsize(fontsize - 2)
for label in ax.get_xticklabels():
label.set_fontsize(fontsize - 2)
return
def compute_mean_cov(arr):
assert arr.ndim == 2
nmc = float(arr.shape[0])
mean = np.mean(arr, axis=0)
diff = arr - mean[None, :]
cov = diff[:, None, :] * diff[:, :, None]
cov = np.sum(cov, axis=0) / nmc
return mean, cov
if __name__ == '__main__':
main() | 5,659 | 0 | 115 |
6763751da420b8743657612a663c583a88edff6e | 1,978 | py | Python | manage.py | lgoodridge/Asteroids-AI | bf6abd2b42db1b13667060c30de13a53aaa05110 | [
"MIT"
] | 4 | 2018-11-06T16:27:11.000Z | 2021-11-12T12:23:54.000Z | manage.py | lgoodridge/Asteroids-AI | bf6abd2b42db1b13667060c30de13a53aaa05110 | [
"MIT"
] | null | null | null | manage.py | lgoodridge/Asteroids-AI | bf6abd2b42db1b13667060c30de13a53aaa05110 | [
"MIT"
] | 1 | 2022-02-25T10:35:34.000Z | 2022-02-25T10:35:34.000Z | """
Script for running management commands for the Asteroids Game / AI.
Usage: python manage.py [--help]
"""
from ai.experiment import merge_experiments
import click
import settings
class TransparentGroup(click.Group):
"""
A Click Group class that passes all provided
arguments to its subcommands without processing them.
"""
@click.group(cls=TransparentGroup)
@click.pass_context
@manage.command(short_help='Merges experiments into a new experiment',
context_settings=dict(ignore_unknown_options=True,
allow_extra_args=True,))
@click.argument('parent_dirs', nargs=-1)
@click.argument('output_dir')
@click.pass_context
def merge(ctx, parent_dirs, output_dir):
"""
Merges the best brains of the parent experment directories
into a new directory, and initializes (but does not run)
that experiment:
\b
The settings passed to this command will be used to initialize
and perform the initial evaluation of the merged experiment.
\b
Arguments:
parent_dirs - Directories of parent experiments to merge.
output_dir - Directory to place the merged experiment into.
"""
# Remove all options from the directory arguments
parent_dirs = [x for x in list(parent_dirs) if not x.startswith("--")]
if output_dir.startswith("--"):
output_dir = parent_dirs.pop() if len(parent_dirs) > 0 else ""
# Configure settings, then actually merge the experiments
settings.configure_settings()
merge_experiments(parent_dirs, output_dir)
@manage.command('settings', short_help='View configurable settings')
@click.pass_context
def view_settings(ctx):
"""
View the configurable settings for the other commands.
"""
click.echo(settings.cli_configure_settings.get_help(ctx))
if __name__ == "__main__":
manage()
| 30.90625 | 74 | 0.71638 | """
Script for running management commands for the Asteroids Game / AI.
Usage: python manage.py [--help]
"""
from ai.experiment import merge_experiments
import click
import settings
class TransparentGroup(click.Group):
"""
A Click Group class that passes all provided
arguments to its subcommands without processing them.
"""
def invoke(self, ctx):
ctx.obj = tuple(ctx.args)
super(TransparentGroup, self).invoke(ctx)
@click.group(cls=TransparentGroup)
@click.pass_context
def manage(ctx):
args = ctx.obj
@manage.command(short_help='Merges experiments into a new experiment',
context_settings=dict(ignore_unknown_options=True,
allow_extra_args=True,))
@click.argument('parent_dirs', nargs=-1)
@click.argument('output_dir')
@click.pass_context
def merge(ctx, parent_dirs, output_dir):
"""
Merges the best brains of the parent experment directories
into a new directory, and initializes (but does not run)
that experiment:
\b
The settings passed to this command will be used to initialize
and perform the initial evaluation of the merged experiment.
\b
Arguments:
parent_dirs - Directories of parent experiments to merge.
output_dir - Directory to place the merged experiment into.
"""
# Remove all options from the directory arguments
parent_dirs = [x for x in list(parent_dirs) if not x.startswith("--")]
if output_dir.startswith("--"):
output_dir = parent_dirs.pop() if len(parent_dirs) > 0 else ""
# Configure settings, then actually merge the experiments
settings.configure_settings()
merge_experiments(parent_dirs, output_dir)
@manage.command('settings', short_help='View configurable settings')
@click.pass_context
def view_settings(ctx):
"""
View the configurable settings for the other commands.
"""
click.echo(settings.cli_configure_settings.get_help(ctx))
if __name__ == "__main__":
manage()
| 99 | 0 | 48 |
b02bcec7907a8cc2bcaf89b751f1bbc626c8b99d | 7,002 | py | Python | bigfastapi/tutorial.py | danieliheonu/bigfastapi | 483554776195c9f38bb46ba719b613360eda1028 | [
"MIT"
] | 1 | 2022-03-20T21:46:05.000Z | 2022-03-20T21:46:05.000Z | bigfastapi/tutorial.py | danieliheonu/bigfastapi | 483554776195c9f38bb46ba719b613360eda1028 | [
"MIT"
] | null | null | null | bigfastapi/tutorial.py | danieliheonu/bigfastapi | 483554776195c9f38bb46ba719b613360eda1028 | [
"MIT"
] | null | null | null | from operator import or_
import sqlalchemy
from fastapi import APIRouter, HTTPException, status
from fastapi.param_functions import Depends
from fastapi.responses import JSONResponse
from fastapi import APIRouter
import fastapi as _fastapi
import sqlalchemy.orm as _orm
from bigfastapi.db.database import get_db
from bigfastapi.schemas import plan_schema, tutorial_schema
from bigfastapi.models import plan_model, tutorial_model, user_models
from uuid import uuid4
from bigfastapi import db, users
from typing import List
from sqlalchemy.exc import IntegrityError
from sqlalchemy import func
import datetime as _dt
app = APIRouter(tags=["Tutorials"])
# SAVE TUTORIAL ENDPOINT
@app.post('/tutorial', response_model=tutorial_schema.TutorialSingleRes)
# GET TUTORIALS - Can be filtered by category, title or both
@app.get('/tutorials', response_model=tutorial_schema.TutorialListRes)
# GET TUTORIALS IN GROUPED OF CATEGORIES- Return result as groups of categories
@app.get('/tutorials/group/categories')
# GET A LIST OF ALL TUTORIAL CATEGORIES
@app.get('/tutorials/categories')
# SEARCH TUTORIAL BY MATCHING KEYWORDS
@app.get('/tutorials/search/{keyword}', response_model=tutorial_schema.TutorialListRes)
# UPDATE TUTORIAL DETAILS
@app.put('/tutorials/{itemId}')
@app.delete('/tutorials/{itemId}/user/{userId}')
# --------------------------------------------------------------------------------------------------#
# HELPER FUNCTIONS SECION
# --------------------------------------------------------------------------------------------------#
# SKIP and OFFSET
# SAVE A NEW TUTORIA
# PAGINATION LOGIC
# RUN QUERY
# BUID CATEGORY LIST
# GENERIC STRUCTURED RESPONSE BUILDER
| 37.848649 | 107 | 0.688803 | from operator import or_
import sqlalchemy
from fastapi import APIRouter, HTTPException, status
from fastapi.param_functions import Depends
from fastapi.responses import JSONResponse
from fastapi import APIRouter
import fastapi as _fastapi
import sqlalchemy.orm as _orm
from bigfastapi.db.database import get_db
from bigfastapi.schemas import plan_schema, tutorial_schema
from bigfastapi.models import plan_model, tutorial_model, user_models
from uuid import uuid4
from bigfastapi import db, users
from typing import List
from sqlalchemy.exc import IntegrityError
from sqlalchemy import func
import datetime as _dt
app = APIRouter(tags=["Tutorials"])
# SAVE TUTORIAL ENDPOINT
@app.post('/tutorial', response_model=tutorial_schema.TutorialSingleRes)
async def store(newTutorial: tutorial_schema.TutorialRequest, db: _orm.Session = _fastapi.Depends(get_db)):
try:
tutorial = await saveNewTutorial(newTutorial, db)
return tutorial_model.buildSuccessRes(tutorial, False)
except PermissionError as exception:
raise HTTPException(status_code=401, detail=str(exception))
except LookupError as exception:
raise HTTPException(status_code=404, detail=str(exception))
# GET TUTORIALS - Can be filtered by category, title or both
@app.get('/tutorials', response_model=tutorial_schema.TutorialListRes)
async def getTutorials(
category: str = None, title: str = None,
page_size: int = 10, page: int = 1,
db: _orm.Session = _fastapi.Depends(get_db)):
rowCount = await tutorial_model.getRowCount(db)
skip = getSkip(page, page_size)
tutorials = await runFetchQuery(category, title, page_size, skip, rowCount, db)
return buildSuccessRes(
tutorials, True, page_size, rowCount,
getPagination(page, page_size, rowCount, '/tutorials'))
# GET TUTORIALS IN GROUPED OF CATEGORIES- Return result as groups of categories
@app.get('/tutorials/group/categories')
async def getGroup(
page_size: int = 10, page: int = 1,
db: _orm.Session = _fastapi.Depends(get_db)):
rowCount = await tutorial_model.getRowCount(db)
skip = getSkip(page, page_size)
groupedTutorials = await tutorial_model.groupByCategory(db, skip, page_size)
pagination = getPagination(
page, page_size, rowCount, '/tutorials/group/categories')
return {"data": groupedTutorials, "total": rowCount, "count": page_size, "pagination": pagination}
# GET A LIST OF ALL TUTORIAL CATEGORIES
@app.get('/tutorials/categories')
async def getCategoryLsit(page_size: int = 10, page: int = 1,
db: _orm.Session = _fastapi.Depends(get_db)):
skip = getSkip(page, page_size)
tutorials = await tutorial_model.groupByCategory(db, skip, page_size)
categories = buildCategoryList(tutorials)
return {"data": categories}
# SEARCH TUTORIAL BY MATCHING KEYWORDS
@app.get('/tutorials/search/{keyword}', response_model=tutorial_schema.TutorialListRes)
async def searchByKeyWord(
keyword: str, page_size: int = 10, page: int = 1,
db: _orm.Session = _fastapi.Depends(get_db)):
rowCount = await tutorial_model.getRowCount(db)
skip = getSkip(page, page_size)
pagination = getPagination(
page, page_size, rowCount, '/tutorials/search/{keyword}')
tutorials = await tutorial_model.searchWithAll(keyword, db, skip, page_size)
return buildSuccessRes(tutorials, True, page_size, rowCount, pagination)
# UPDATE TUTORIAL DETAILS
@app.put('/tutorials/{itemId}')
async def update(
itemId: str, newTutorial: tutorial_schema.TutorialRequest,
db: _orm.Session = _fastapi.Depends(get_db)):
try:
tutorial = await tutorial_model.update(newTutorial, itemId, newTutorial.added_by, db)
return tutorial_model.buildSuccessRes(tutorial, False)
except PermissionError as exception:
raise HTTPException(status_code=401, details=str(exception))
except LookupError as exception:
raise HTTPException(status_code=404, details=str(exception))
@app.delete('/tutorials/{itemId}/user/{userId}')
async def delete(itemId: str, userId: str, db: _orm.Session = _fastapi.Depends(get_db)):
try:
dbResponse = await tutorial_model.delete(itemId, userId, db)
return {'data': dbResponse}
except PermissionError as exception:
raise HTTPException(status_code=401, details=str(exception))
except LookupError as exception:
raise HTTPException(status_code=404, details=str(exception))
# --------------------------------------------------------------------------------------------------#
# HELPER FUNCTIONS SECION
# --------------------------------------------------------------------------------------------------#
# SKIP and OFFSET
def getSkip(page: int, pageSize: int):
return (page-1)*pageSize
# SAVE A NEW TUTORIA
async def saveNewTutorial(newTutorial: tutorial_schema.TutorialRequest, db: _orm.Session):
user = await tutorial_model.getUser(newTutorial.added_by, db)
if user != None:
if user.is_superuser:
dbRes = await tutorial_model.store(newTutorial, db)
return dbRes
else:
raise PermissionError("Lacks super admin access")
else:
raise LookupError('Could not find user')
# PAGINATION LOGIC
def getPagination(page: int, pageSize: int, count: int, endpoint: str):
paging = {}
if (pageSize + getSkip(page, pageSize)) >= count:
paging['next'] = None
if page > 1:
paging['previous'] = f"{endpoint}?page={page-1}&page_size={pageSize}"
else:
paging['previous'] = None
else:
paging['next'] = f"{endpoint}?page={page+1}&page_size={pageSize}"
if page > 1:
paging['previous'] = f"{endpoint}?page={page-1}&page_size={pageSize}"
else:
paging['previous'] = None
return paging
# RUN QUERY
async def runFetchQuery(
category: str, title: str, page_size: int, skip: int,
rowCount: int, db: _orm.Session = _fastapi.Depends(get_db)):
if category is None and title is None:
return await tutorial_model.fetchAll(db, skip, page_size)
if category is None and title != None:
return await tutorial_model.getBytitle(title, db, skip, page_size)
if category != None and title != None:
return await tutorial_model.getByCatByTitle(category, title, db, skip, page_size)
# BUID CATEGORY LIST
def buildCategoryList(tutorials: List[tutorial_model.Tutorial]):
categories = []
for tutorial in tutorials:
categories.append(tutorial.category)
return categories
# GENERIC STRUCTURED RESPONSE BUILDER
def buildSuccessRes(resData, isList: bool, pageSize: int, totalCount: int, pagination: dict):
if isList:
return tutorial_schema.TutorialListRes(
data=resData, total=totalCount, count=pageSize, pagination=pagination)
else:
return tutorial_schema.TutorialSingleRes(data=resData)
| 4,983 | 0 | 286 |
34e86699e57eaae95439d97b066c5ec26b48ef0f | 1,194 | py | Python | tyc_area.py | skymap/tyc | ba5175c7fa1e45e9c0ea4881d7211ae2ad6d0cc9 | [
"MIT"
] | null | null | null | tyc_area.py | skymap/tyc | ba5175c7fa1e45e9c0ea4881d7211ae2ad6d0cc9 | [
"MIT"
] | null | null | null | tyc_area.py | skymap/tyc | ba5175c7fa1e45e9c0ea4881d7211ae2ad6d0cc9 | [
"MIT"
] | null | null | null | from PIL import Image, ImageDraw
w = 7200
h = 3600
i = 1
j = 0
k = 0
c6 = [(255, 153, 204), (255, 255, 153), (153, 255, 153), (153, 204, 255)]
black = (0, 0, 0)
white = (255, 255, 255)
im = Image.new('RGB', (w, h), white)
draw = ImageDraw.Draw(im)
r = open('index_src.dat', 'r')
src = r.read()
r.close
src = src.replace(' ', '')
rows = src.split('\n')
for row in rows:
d = row.split('|')
if len(d) == 6:
if len(d[2]) > 0 and len(d[3]) > 0 and len(d[4]) > 0 and len(d[5]) > 0:
ra0 = int((360 - float(d[2])) * 20)
ra1 = int((360 - float(d[3])) * 20)
ra2 = int((ra0 + ra1) / 2)
de0 = int((90 - float(d[4])) * 20)
de1 = int((90 - float(d[5])) * 20)
de2 = int((de0 + de1) / 2)
if i > 4662:
if de2 < k - 3:
j = 0 if j > 2 else (j + 1)
else:
if de2 > k + 3:
j = 0 if j > 2 else (j + 1)
draw.rectangle((ra0, de0, ra1, de1), fill=c6[j], outline=black)
draw.text((ra2, de2), str(i), fill=black)
k = de2
i = i + 1
im.save('tyc_area.png') | 32.27027 | 80 | 0.421273 | from PIL import Image, ImageDraw
w = 7200
h = 3600
i = 1
j = 0
k = 0
c6 = [(255, 153, 204), (255, 255, 153), (153, 255, 153), (153, 204, 255)]
black = (0, 0, 0)
white = (255, 255, 255)
im = Image.new('RGB', (w, h), white)
draw = ImageDraw.Draw(im)
r = open('index_src.dat', 'r')
src = r.read()
r.close
src = src.replace(' ', '')
rows = src.split('\n')
for row in rows:
d = row.split('|')
if len(d) == 6:
if len(d[2]) > 0 and len(d[3]) > 0 and len(d[4]) > 0 and len(d[5]) > 0:
ra0 = int((360 - float(d[2])) * 20)
ra1 = int((360 - float(d[3])) * 20)
ra2 = int((ra0 + ra1) / 2)
de0 = int((90 - float(d[4])) * 20)
de1 = int((90 - float(d[5])) * 20)
de2 = int((de0 + de1) / 2)
if i > 4662:
if de2 < k - 3:
j = 0 if j > 2 else (j + 1)
else:
if de2 > k + 3:
j = 0 if j > 2 else (j + 1)
draw.rectangle((ra0, de0, ra1, de1), fill=c6[j], outline=black)
draw.text((ra2, de2), str(i), fill=black)
k = de2
i = i + 1
im.save('tyc_area.png') | 0 | 0 | 0 |
d5eb4b2832c31aa9dcedcb5cb4cc3df3b93096ce | 832 | py | Python | rst/sphinx_ext/wordcount.py | beebus/CPUfactory3 | 3331e63f2996542a6bc34d455e3b8d17d733d520 | [
"BSD-3-Clause"
] | 1 | 2019-04-16T14:14:49.000Z | 2019-04-16T14:14:49.000Z | rst/sphinx_ext/wordcount.py | rblack42/GitBuilder | 1944ef2d6d3c6eaee44ffb663e6c20477046dd9c | [
"BSD-3-Clause"
] | null | null | null | rst/sphinx_ext/wordcount.py | rblack42/GitBuilder | 1944ef2d6d3c6eaee44ffb663e6c20477046dd9c | [
"BSD-3-Clause"
] | 3 | 2019-04-16T00:18:39.000Z | 2019-08-30T18:20:46.000Z |
from docutils import nodes
from docutils.parsers.rst import Directive
| 24.470588 | 83 | 0.66226 | def setup(app):
app.add_node(wordcount)
app.add_directive('wordcount',WordcountDirective)
app.connect('doctree-resolved', process_wordcount_nodes)
from docutils import nodes
class wordcount(nodes.General, nodes.Element):
pass
from docutils.parsers.rst import Directive
class WordcountDirective(Directive):
has_content = True
def run(self):
return [wordcount('')]
def process_wordcount_nodes(app, doctree, fromdocname):
env = app.builder.env
count=0
for node in doctree.traverse(nodes.paragraph):
tt=node.astext()
tt.split(" ")
count+=len(tt)
for node in doctree.traverse(wordcount):
para = nodes.paragraph()
para += nodes.Text("Read time: %d minutes (%d words)" % (count/250, count))
node.replace_self([para])
| 551 | 99 | 91 |
bcf1b868a4a8b6c143f44a61213975d0cf88b939 | 1,839 | py | Python | mail_coupons.py | Coiling-Dragon/Email_scraper | 22dfa6726164bb0f318a883bf48c0198646b5142 | [
"Apache-2.0"
] | null | null | null | mail_coupons.py | Coiling-Dragon/Email_scraper | 22dfa6726164bb0f318a883bf48c0198646b5142 | [
"Apache-2.0"
] | null | null | null | mail_coupons.py | Coiling-Dragon/Email_scraper | 22dfa6726164bb0f318a883bf48c0198646b5142 | [
"Apache-2.0"
] | null | null | null | import imaplib
import email
from email.header import decode_header
import pandas as pd
mails_df = pd.read_csv('mails.csv')
csv_values = mails_df.values
c = 1
with open('mails_with_coupons.csv', 'w', encoding='utf-8') as f:
out_row = 'EMAIL,PASS,COUPONS\n'
f.write(out_row)
for each in csv_values:
user = each[0]
password = each[1]
# Mailbox interaction
M = imaplib.IMAP4_SSL('imap.mail.com')
M.login(user, password)
M.select('Inbox')
typ, data = M.search(None, 'ALL')
ids = data[0]
id_list = ids.split()
# get the most recent email id
latest_email_id = int(id_list[-1])
COUPON_AMOUNT = '15'
# iterate through 15 messages in descending order starting with latest_email_id
# the '-1' dictates reverse looping order
for i in range(latest_email_id, latest_email_id - 15, -1):
typ, data = M.fetch(str(i), '(RFC822)')
for response_part in data:
if isinstance(response_part, tuple):
mail_bytes = response_part[1].decode('UTF-8')
msg = email.message_from_string(mail_bytes)
varSubject = msg['subject']
varFrom = msg['from']
varSubject = decode_header(varSubject)[0][0]
if f'$coupon' in str(varSubject):
print(f'{c} Mail: {user}\n Subject: {varSubject}\n')
with open('mails_with_coupons.csv', 'a') as f:
row = f'{user},{password},"${COUPON_AMOUNT}"\n'
f.write(row)
c += 1
data_frame = pd.read_csv('mails_with_coupons.csv', encoding="utf-8").drop_duplicates(
subset='EMAIL', keep='first', inplace=False)
data_frame.to_csv('mails_with_coupons.csv', index=False, encoding="utf-8")
| 32.263158 | 86 | 0.588363 | import imaplib
import email
from email.header import decode_header
import pandas as pd
mails_df = pd.read_csv('mails.csv')
csv_values = mails_df.values
c = 1
with open('mails_with_coupons.csv', 'w', encoding='utf-8') as f:
out_row = 'EMAIL,PASS,COUPONS\n'
f.write(out_row)
for each in csv_values:
user = each[0]
password = each[1]
# Mailbox interaction
M = imaplib.IMAP4_SSL('imap.mail.com')
M.login(user, password)
M.select('Inbox')
typ, data = M.search(None, 'ALL')
ids = data[0]
id_list = ids.split()
# get the most recent email id
latest_email_id = int(id_list[-1])
COUPON_AMOUNT = '15'
# iterate through 15 messages in descending order starting with latest_email_id
# the '-1' dictates reverse looping order
for i in range(latest_email_id, latest_email_id - 15, -1):
typ, data = M.fetch(str(i), '(RFC822)')
for response_part in data:
if isinstance(response_part, tuple):
mail_bytes = response_part[1].decode('UTF-8')
msg = email.message_from_string(mail_bytes)
varSubject = msg['subject']
varFrom = msg['from']
varSubject = decode_header(varSubject)[0][0]
if f'$coupon' in str(varSubject):
print(f'{c} Mail: {user}\n Subject: {varSubject}\n')
with open('mails_with_coupons.csv', 'a') as f:
row = f'{user},{password},"${COUPON_AMOUNT}"\n'
f.write(row)
c += 1
data_frame = pd.read_csv('mails_with_coupons.csv', encoding="utf-8").drop_duplicates(
subset='EMAIL', keep='first', inplace=False)
data_frame.to_csv('mails_with_coupons.csv', index=False, encoding="utf-8")
| 0 | 0 | 0 |
5da81609aa9b605da4cf8c44c6852fa7d339ba4e | 3,883 | py | Python | Air_PnP/serializers.py | BranBer/AirPnP | d6ee9cad5d57af222192eaa02fb3267ed2749975 | [
"MIT"
] | 1 | 2020-02-12T05:07:21.000Z | 2020-02-12T05:07:21.000Z | Air_PnP/serializers.py | BranBer/AirPnP | d6ee9cad5d57af222192eaa02fb3267ed2749975 | [
"MIT"
] | null | null | null | Air_PnP/serializers.py | BranBer/AirPnP | d6ee9cad5d57af222192eaa02fb3267ed2749975 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from django.contrib.auth import authenticate
from rest_framework import exceptions
from Air_PnP.models import * | 39.222222 | 184 | 0.666753 | from rest_framework import serializers
from django.contrib.auth import authenticate
from rest_framework import exceptions
from Air_PnP.models import *
class Payment_Info_Serializer(serializers.ModelSerializer):
class Meta:
model = Payment_Info
fields = ['email', 'user']
class Invoices_Serializer(serializers.ModelSerializer):
class Meta:
model = Invoices
fields = ['amount', 'date', 'payer', 'payee']
class Ratings_Serializer(serializers.ModelSerializer):
class Meta:
model = Ratings
fields = ['user', 'bathroom_id', 'score', 'title', 'description']
class TimesAvailable_Serializer(serializers.ModelSerializer):
class Meta:
model = TimesAvailable
fields = ['week_day', 'open_time', 'close_time', 'users']
class DayAvailable_Serializer(serializers.ModelSerializer):
timesAvailable = TimesAvailable_Serializer(many = False, read_only = True)
class Meta:
model = DayAvailable
fields = ['bathroom_id', 'week_day', 'timesAvailable']
class PricingOption_Serializer(serializers.ModelSerializer):
class Meta:
model = PricingOption
fields = ['bathroom_id', 'timePeriod', 'amount']
class BathroomPost_Serializer(serializers.ModelSerializer):
class Meta:
model = Bathrooms
fields = '__all__'
class Bathrooms_Serializer(serializers.ModelSerializer):
ratings = Ratings_Serializer(many = True, read_only = True)
#pricing = PricingOption_Serializer(many = True, read_only = True)
#avgRatings = Bathroom_Score_Avg_Serializer(many = True, read_only = True)
class Meta:
model = Bathrooms
depth = 1
fields = ['id', 'address_id', 'has_shower', 'has_bath', 'has_sink', 'has_fem_products', 'num_of_toilets', 'has_toilet_paper', 'ratings', 'image1', 'image2', 'image3', 'image4']
class Scheduler_Serializer(serializers.ModelSerializer):
class Meta:
model = Scheduler
fields = ['user', 'bathroom', 'date']
class Addresses_Serializer(serializers.ModelSerializer):
bathrooms = Bathrooms_Serializer(many = True, read_only = True)
class Meta:
model = Addresses
fields = ['id', 'user', 'address_line1', 'address_line2', 'city', 'state', 'zip', 'longitude', 'latitude', 'bathrooms']
class Users_Serializer(serializers.ModelSerializer):
addresses = Addresses_Serializer(many = True, read_only = True)
class Meta:
model = Users
fields = ['username', 'personalEmail', 'user_image', 'first_name', 'last_name', 'home_address', 'addresses']
class Registration_Serializer(serializers.ModelSerializer):
password2 = serializers.CharField(style = {'input_type': 'password'}, write_only = True)
class Meta:
model = Users
fields = ['username', 'personalEmail', 'first_name', 'user_image', 'last_name', 'home_address', 'home_city', 'home_state', 'home_zip', 'password', 'password2']
extra_kwargs = {
'password': {'write_only': True}
}
def save(self):
user = Users(
username = self.validated_data['username'],
personalEmail = self.validated_data['personalEmail'],
first_name = self.validated_data['first_name'],
last_name = self.validated_data['last_name'],
home_address = self.validated_data['home_address'],
home_state = self.validated_data['home_state'],
home_city = self.validated_data['home_city'],
home_zip = int(self.validated_data['home_zip']),
)
password = self.validated_data['password']
password2 = self.validated_data['password2']
if (password != password2):
raise serializers.ValidationError({'password': 'Passwords must match.'})
user.set_password(password)
user.save()
return user | 829 | 2,615 | 288 |
2d32035a799216dbf33bc6c52112336dd2e122a5 | 303 | py | Python | utils/date.py | josylad/RoomScout | a3d067dd67dfdd43702ea2e89064213dbd469157 | [
"MIT"
] | null | null | null | utils/date.py | josylad/RoomScout | a3d067dd67dfdd43702ea2e89064213dbd469157 | [
"MIT"
] | null | null | null | utils/date.py | josylad/RoomScout | a3d067dd67dfdd43702ea2e89064213dbd469157 | [
"MIT"
] | null | null | null | import datetime
import re
"""
Input is supposed to be in the format yyyy-mm-dd
if it is not then return false
""" | 23.307692 | 83 | 0.613861 | import datetime
import re
"""
Input is supposed to be in the format yyyy-mm-dd
if it is not then return false
"""
def check_format(input):
if isinstance(input, datetime.date):
return True
else:
return bool(re.match(r"[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]", input)) | 158 | 0 | 22 |
ac6774b8af1cc5195c08280dd7ecab780a9d29fe | 2,322 | py | Python | qs_backend/qs_backend/dal/tests/userstockpref_tests/test_unit_select_userstockpref.py | Praneesh/quickstocks | 2ad4f985b7cc11721209cc81c36937e9cf25fb60 | [
"MIT"
] | 2 | 2016-12-28T18:08:23.000Z | 2017-04-01T18:09:55.000Z | qs_backend/qs_backend/dal/tests/userstockpref_tests/test_unit_select_userstockpref.py | Praneesh/quickstocks | 2ad4f985b7cc11721209cc81c36937e9cf25fb60 | [
"MIT"
] | null | null | null | qs_backend/qs_backend/dal/tests/userstockpref_tests/test_unit_select_userstockpref.py | Praneesh/quickstocks | 2ad4f985b7cc11721209cc81c36937e9cf25fb60 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
# __author__ = "Praneesh Kataru"
# __credits__ = []
# __version__ = "0.1.1"
# __maintainer__ = "Praneesh Kataru"
# __email__ = "pranuvitmsse05@gmail.com"
# __status__ = "Prototype"
import unittest
from pprint import pprint
from qs_backend.dal.user_stock_pref_dal import UserStockPrefDAL
class UserStockPrefSelectTests(unittest.TestCase):
"""
Unit Test Case for Validating ``UserStockPrefs`` table Selects
""" | 38.065574 | 114 | 0.717054 | #! /usr/bin/env python3
# __author__ = "Praneesh Kataru"
# __credits__ = []
# __version__ = "0.1.1"
# __maintainer__ = "Praneesh Kataru"
# __email__ = "pranuvitmsse05@gmail.com"
# __status__ = "Prototype"
import unittest
from pprint import pprint
from qs_backend.dal.user_stock_pref_dal import UserStockPrefDAL
class UserStockPrefSelectTests(unittest.TestCase):
"""
Unit Test Case for Validating ``UserStockPrefs`` table Selects
"""
def setUp(self):
self.user_id = 'praneesh'
self.stock_key = 'HON'
self.insert_key = 'YHOO'
def tearDown(self):
pass
def test_db_user_stock_pref_select_all(self):
user_stock_pref_obj = UserStockPrefDAL()
print("Executing Test Case : Fetch All User Preferences")
select_exception, all_user_prefs = user_stock_pref_obj.get_all_user_preferences()
pprint(all_user_prefs)
self.assertEqual(select_exception, None)
self.assertGreater(all_user_prefs.__len__(), 0, msg='Some records received')
def test_db_get_all_stock_preferences(self):
user_stock_pref_obj = UserStockPrefDAL()
print("Executing Test Case : Fetch All Users Stock Keys")
select_exception, all_stock_keys = user_stock_pref_obj.get_all_stock_preferences()
pprint(all_stock_keys)
self.assertEqual(select_exception, None)
self.assertGreater(all_stock_keys.__len__(), 0, msg='Some records received')
def test_db_fetch_stock_pref_by_user_id(self):
user_stock_pref_obj = UserStockPrefDAL()
print("Executing Test Case : Fetch Stock Preferences By User ID: {}".format(self.user_id))
select_exception, stock_prefs = user_stock_pref_obj.get_stock_preferences_by_user_id(user_id=self.user_id)
pprint(stock_prefs)
self.assertEqual(stock_prefs['userID'], self.user_id)
self.assertEqual(select_exception, None)
def test_db_fetch_users_by_stock_key(self):
user_stock_pref_obj = UserStockPrefDAL()
print("Executing Test Case : Fetch Stock Preferences By Stock Key : {}".format(self.stock_key))
select_exception, user_prefs = user_stock_pref_obj.get_users_by_stock_preference(stock_key=self.stock_key)
pprint(user_prefs)
self.assertEqual(select_exception, None) | 1,689 | 0 | 162 |
98c4e07e264eb503bc873002e824f308fbf7f99b | 20,838 | py | Python | deprecated_code/workflows/mpi/imaging-pipelines-mpi.py | ska-telescope/algorithm-reference-library | 1b2c8d6079249202864abf8c60cdea40f0f123cb | [
"Apache-2.0"
] | 22 | 2016-12-14T11:20:07.000Z | 2021-08-13T15:23:41.000Z | deprecated_code/workflows/mpi/imaging-pipelines-mpi.py | ska-telescope/algorithm-reference-library | 1b2c8d6079249202864abf8c60cdea40f0f123cb | [
"Apache-2.0"
] | 30 | 2017-06-27T09:15:38.000Z | 2020-09-11T18:16:37.000Z | deprecated_code/workflows/mpi/imaging-pipelines-mpi.py | SKA-ScienceDataProcessor/algorithm-reference-library | 1b2c8d6079249202864abf8c60cdea40f0f123cb | [
"Apache-2.0"
] | 20 | 2017-07-02T03:45:49.000Z | 2019-12-11T17:19:01.000Z |
# coding: utf-8
# # Pipeline processing using serial workflows.
#
# This is a serial unrolled version of the predict step
# In[1]:
#get_ipython().run_line_magic('matplotlib', 'inline')
import os
import sys
sys.path.append(os.path.join('..', '..'))
from data_models.parameters import arl_path
from mpi4py import MPI
results_dir = './results/mpi'
#from matplotlib import pylab
#pylab.rcParams['figure.figsize'] = (12.0, 12.0)
#pylab.rcParams['image.cmap'] = 'rainbow'
import numpy
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.wcs.utils import pixel_to_skycoord
#from matplotlib import pyplot as plt
from data_models.polarisation import PolarisationFrame
from wrappers.serial.calibration.calibration import solve_gaintable
from wrappers.serial.calibration.operations import apply_gaintable
from wrappers.serial.calibration.calibration_control import create_calibration_controls
from wrappers.serial.visibility.base import create_blockvisibility
from wrappers.serial.visibility.coalesce import convert_blockvisibility_to_visibility
from wrappers.serial.skycomponent.operations import create_skycomponent
from wrappers.serial.image.deconvolution import deconvolve_cube
#from wrappers.serial.image.operations import show_image, export_image_to_fits, qa_image
from wrappers.serial.image.operations import export_image_to_fits, qa_image
from wrappers.serial.visibility.iterators import vis_timeslice_iter
from wrappers.serial.simulation.testing_support import create_low_test_image_from_gleam
from processing_components.simulation.configurations import create_named_configuration
from wrappers.serial.imaging.base import predict_2d, create_image_from_visibility, advise_wide_field
from workflows.serial.imaging.imaging_serial import invert_list_serial_workflow, predict_list_serial_workflow, deconvolve_list_serial_workflow
from workflows.serial.simulation.simulation_serial import simulate_list_serial_workflow, corrupt_list_serial_workflow
from workflows.serial.pipelines.pipeline_serial import continuum_imaging_list_serial_workflow, ical_list_serial_workflow
from workflows.mpi.pipelines.pipeline_mpi import continuum_imaging_list_mpi_workflow, ical_list_mpi_workflow
from workflows.mpi.imaging.imaging_mpi import predict_list_mpi_workflow, invert_list_mpi_workflow, deconvolve_list_mpi_workflow
import time
import pprint
# Uncomment this line if profiling with extrae/paraver toolset
#import pyextrae.mpi as pyextrae
pp = pprint.PrettyPrinter()
import logging
import argparse
log = init_logging()
parser = argparse.ArgumentParser(description='Imaging pipelines in MPI.')
parser.add_argument('--nfreqwin', type=int, nargs='?', default=7,
help='The number of frequency windows')
args = parser.parse_args()
# In[2]:
# ################### Rationale of data distribution: ################### #
# In this version all data resides at rank0 and needs to be distributed #
# at every function when needed. #
# TODO: Pass on the comm parameter!
# vis_list -> rank0 #
# vis_slices, npixel, cellsize -> rep #
# gleam_model -> rank0 (later rep) #
# predicted_vis -> rank0 (later dist) #
# model_list ->rank0 (later rep)
# disrty_list psf_list -> rank0 (later dist)
# continuum_imaging_list -> rank0
# ####################################################################### #
#pylab.rcParams['figure.figsize'] = (12.0, 12.0)
#pylab.rcParams['image.cmap'] = 'Greys'
# Set up MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
# We make the visibility. The parameter rmax determines the distance of the furthest antenna/stations used. All over parameters are determined from this number.
# In[3]:
#nfreqwin=7
nfreqwin=args.nfreqwin
ntimes=5
rmax=300.0
frequency=numpy.linspace(1.0e8,1.2e8,nfreqwin)
#ntimes=11
#frequency=numpy.linspace(0.9e8,1.1e8,nfreqwin)
channel_bandwidth=numpy.array(nfreqwin*[frequency[1]-frequency[0]])
times = numpy.linspace(-numpy.pi/3.0, numpy.pi/3.0, ntimes)
#phasecentre=SkyCoord(ra=+30.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000')
phasecentre=SkyCoord(ra=+0.0 * u.deg, dec=-40.0 * u.deg, frame='icrs', equinox='J2000')
log.info("Starting imaging-pipeline with %d MPI processes nfreqwin %d ntimes %d" %(size,nfreqwin,ntimes))
print("Starting imaging-pipeline with %d MPI processes nfreqwin %d ntimes %d"
%(size,nfreqwin,ntimes),flush=True)
log.debug('%d: frequency len %d frequency list:'%(rank,len(frequency)))
#print(frequency,flush=True)
if rank == 0:
bvis_list=simulate_list_serial_workflow('LOWBD2',
frequency=frequency,
channel_bandwidth=channel_bandwidth,
times=times,
phasecentre=phasecentre,
order='frequency',
rmax=rmax, format='blockvis')
else:
bvis_list=list()
vis_list = [convert_blockvisibility_to_visibility(bv) for bv in bvis_list]
log.debug('%d: %d elements in vis_list' % (rank,len(vis_list)))
#log.handlers[0].flush()
#print(vis_list
# In[4]:
if rank == 0:
wprojection_planes=1
advice_low=advise_wide_field(vis_list[0], guard_band_image=8.0, delA=0.02,
wprojection_planes=wprojection_planes)
advice_high=advise_wide_field(vis_list[-1], guard_band_image=8.0, delA=0.02,
wprojection_planes=wprojection_planes)
vis_slices = advice_low['vis_slices']
npixel=advice_high['npixels2']
cellsize=min(advice_low['cellsize'], advice_high['cellsize'])
else:
vis_slices = 0
npixel = 0
cellsize = 0
(vis_slices,npixel,cellsize) = comm.bcast((vis_slices,npixel,cellsize),root=0)
log.debug('%d: After advice: vis_slices %d npixel %d cellsize %d' % (rank,vis_slices, npixel, cellsize))
# Now make a graph to fill with a model drawn from GLEAM
# In[ ]:
log.info('%d:About to make GLEAM model' %(rank))
sub_frequency = numpy.array_split(frequency, size)
sub_channel_bandwidth = numpy.array_split(channel_bandwidth,size)
sub_gleam_model = [create_low_test_image_from_gleam(npixel=npixel,
frequency=[sub_frequency[rank][f]],
channel_bandwidth=[sub_channel_bandwidth[rank][f]],
cellsize=cellsize,
phasecentre=phasecentre,
polarisation_frame=PolarisationFrame("stokesI"),
flux_limit=1.0,
applybeam=True)
for f, freq in enumerate(sub_frequency[rank])]
# NOTE: We could do an allgather here to avoid bcast of
# each freqw during predict, it would safe time but use more space
gleam_model=comm.gather(sub_gleam_model,root=0)
if rank==0:
gleam_model=numpy.concatenate(gleam_model)
else:
gleam_model=list()
# In[ ]:
original_predict=False
if original_predict:
if rank==0:
log.info('About to run predict to get predicted visibility')
predicted_vislist = predict_list_serial_workflow(vis_list, gleam_model,
context='wstack', vis_slices=vis_slices)
else:
log.info('%d: About to run predict to get predicted visibility'%(rank))
print('%d: About to run predict to get predicted visibility'%(rank),flush=True)
start=time.time()
# All procs call the function but only rank=0 gets the predicted_vislist
predicted_vislist = predict_list_mpi_workflow(vis_list, gleam_model,
context='wstack',
vis_slices=vis_slices)
end=time.time()
#log.info('About to run corrupt to get corrupted visibility')
#corrupted_vislist = corrupt_list_serial_workflow(predicted_vislist, phase_error=1.0)
# Get the LSM. This is currently blank.
# In[ ]:
### I need to scatter vis_list cause worker don't have it
## frequency and channel_bandwidth are replicated and they have already
## been split
log.info('%d: predict finished in %f seconds'%(rank,end-start))
print('%d: predict finished in %f seconds'%(rank,end-start),flush=True)
log.info('%d: About create image from visibility'%(rank))
sub_vis_list= numpy.array_split(vis_list, size)
sub_vis_list=comm.scatter(sub_vis_list,root=0)
sub_model_list = [create_image_from_visibility(sub_vis_list[f],
npixel=npixel,
frequency=[sub_frequency[rank][f]],
channel_bandwidth=[sub_channel_bandwidth[rank][f]],
cellsize=cellsize,
phasecentre=phasecentre,
polarisation_frame=PolarisationFrame("stokesI"))
for f, freq in enumerate(sub_frequency[rank])]
# NOTE: We could do allgather here, if enough memory space
model_list=comm.gather(sub_model_list,root=0)
if rank==0:
#model_list=numpy.concatenate(model_list)
model_list=concat_tuples(model_list)
# In[ ]:
else:
model_list=list()
log.debug('%d model_list len %d' %(rank,len(model_list)))
log.info('%d: About to start invert'%(rank))
print('%d: About to start invert'%(rank),flush=True)
start=time.time()
original_invert=False
if original_invert:
if rank==0:
dirty_list = invert_list_serial_workflow(predicted_vislist, model_list,
context='wstack',
vis_slices=vis_slices, dopsf=False)
psf_list = invert_list_serial_workflow(predicted_vislist, model_list,
context='wstack',
vis_slices=vis_slices, dopsf=True)
else:
dirty_list = invert_list_mpi_workflow(predicted_vislist, model_list,
context='wstack',
vis_slices=vis_slices, dopsf=False)
psf_list = invert_list_mpi_workflow(predicted_vislist, model_list,
context='wstack',
vis_slices=vis_slices, dopsf=True)
# Create and execute graphs to make the dirty image and PSF
# In[ ]:
end=time.time()
log.info('%d: invert finished'%(rank))
print('%d: invert finished in %f seconds'%(rank,end-start),flush=True)
if rank==0:
#print("sumwts",flush=True)
#print(dirty_list[0][1])
log.info('After invert to get dirty image')
dirty = dirty_list[0][0]
#show_image(dirty, cm='Greys', vmax=1.0, vmin=-0.1)
#plt.show()
print(qa_image(dirty))
export_image_to_fits(dirty, '%s/imaging-dirty.fits'
%(results_dir))
log.info('After invert to get PSF')
psf = psf_list[0][0]
#show_image(psf, cm='Greys', vmax=0.1, vmin=-0.01)
#plt.show()
print(qa_image(psf))
export_image_to_fits(psf, '%s/imaging-psf.fits'
%(results_dir))
# Now deconvolve using msclean
# In[ ]:
log.info('%d: About to run deconvolve'%(rank))
print('%d: About to run deconvolve'%(rank),flush=True)
start=time.time()
original_deconv=False
if original_deconv:
if rank==0:
deconvolved,_ = deconvolve_list_serial_workflow(dirty_list, psf_list, model_imagelist=model_list,
deconvolve_facets=8, deconvolve_overlap=16, deconvolve_taper='tukey',
scales=[0, 3, 10],
algorithm='msclean', niter=1000,
fractional_threshold=0.1,
threshold=0.1, gain=0.1, psf_support=64)
else:
print(" types of dirty list",type(dirty_list)," and psf_list",type(psf_list))
deconvolved = deconvolve_list_mpi_workflow(dirty_list, psf_list, model_imagelist=model_list,
deconvolve_facets=8, deconvolve_overlap=16, deconvolve_taper='tukey',
scales=[0, 3, 10],
algorithm='msclean', niter=1000,
fractional_threshold=0.1,
threshold=0.1, gain=0.1, psf_support=64)
#show_image(deconvolved[0], cm='Greys', vmax=0.1, vmin=-0.01)
#plt.show()
end=time.time()
log.info('%d: After deconvolve'%(rank))
print('%d: deconvolve finished in %f sec'%(rank,end-start))
# In[ ]:
log.info('%d: About to run continuum imaging'%(rank))
print('%d: About to run continuum imaging'%(rank),flush=True)
start=time.time()
original_continuumimaging=False
if original_continuumimaging:
if rank==0:
continuum_imaging_list = continuum_imaging_list_serial_workflow(predicted_vislist,
model_imagelist=model_list,
context='wstack', vis_slices=vis_slices,
scales=[0, 3, 10], algorithm='mmclean',
nmoment=3, niter=1000,
fractional_threshold=0.1,
threshold=0.1, nmajor=5, gain=0.25,
deconvolve_facets = 8, deconvolve_overlap=16,
deconvolve_taper='tukey', psf_support=64)
else:
continuum_imaging_list = continuum_imaging_list_mpi_workflow(predicted_vislist,
model_imagelist=model_list,
context='wstack', vis_slices=vis_slices,
scales=[0, 3, 10], algorithm='mmclean',
nmoment=3, niter=1000,
fractional_threshold=0.1,
threshold=0.1, nmajor=5, gain=0.25,
deconvolve_facets = 8, deconvolve_overlap=16,
deconvolve_taper='tukey', psf_support=64)
# In[ ]:
end=time.time()
log.info('%d: continuum imaging finished'%(rank))
print('%d: continuum imaging finished in %f sec.'%(rank,end-start),flush=True)
if rank==0:
deconvolved = continuum_imaging_list[0][0]
residual = continuum_imaging_list[1][0]
restored = continuum_imaging_list[2][0]
#f=show_image(deconvolved, title='Clean image - no selfcal', cm='Greys',
# vmax=0.1, vmin=-0.01)
print(qa_image(deconvolved, context='Clean image - no selfcal'))
#plt.show()
#f=show_image(restored, title='Restored clean image - no selfcal',
# cm='Greys', vmax=1.0, vmin=-0.1)
print(qa_image(restored, context='Restored clean image - no selfcal'))
#plt.show()
export_image_to_fits(restored, '%s/imaging-dask_continuum_imaging_restored.fits'
%(results_dir))
#f=show_image(residual[0], title='Residual clean image - no selfcal', cm='Greys',
# vmax=0.1, vmin=-0.01)
print(qa_image(residual[0], context='Residual clean image - no selfcal'))
#plt.show()
export_image_to_fits(residual[0], '%s/imaging-dask_continuum_imaging_residual.fits'
%(results_dir))
if rank==0:
for chan in range(nfreqwin):
residual = continuum_imaging_list[1][chan]
#show_image(residual[0], title='Channel %d' % chan, cm='Greys',
# vmax=0.1, vmin=-0.01)
#plt.show()
# In[ ]:
controls = create_calibration_controls()
controls['T']['first_selfcal'] = 1
controls['G']['first_selfcal'] = 3
controls['B']['first_selfcal'] = 4
controls['T']['timeslice'] = 'auto'
controls['G']['timeslice'] = 'auto'
controls['B']['timeslice'] = 1e5
pp.pprint(controls)
# In[ ]:
# TODO I change this to predicted_vislist to make it deterministic, I hope it makes
# sense :)
#ical_list = ical_list_serial_workflow(corrupted_vislist,
log.info('%d: About to run ical'%(rank))
print('%d: About to run ical'%(rank),flush=True)
start=time.time()
original_ical=False
if original_ical:
if rank==0:
ical_list = ical_list_serial_workflow(predicted_vislist,
model_imagelist=model_list,
context='wstack',
calibration_context = 'TG',
controls=controls,
scales=[0, 3, 10], algorithm='mmclean',
nmoment=3, niter=1000,
fractional_threshold=0.1,
threshold=0.1, nmajor=5, gain=0.25,
deconvolve_facets = 8,
deconvolve_overlap=16,
deconvolve_taper='tukey',
vis_slices=ntimes,
timeslice='auto',
global_solution=False,
psf_support=64,
do_selfcal=True)
else:
ical_list = ical_list_mpi_workflow(predicted_vislist,
model_imagelist=model_list,
context='wstack',
calibration_context = 'TG',
controls=controls,
scales=[0, 3, 10], algorithm='mmclean',
nmoment=3, niter=1000,
fractional_threshold=0.1,
threshold=0.1, nmajor=5, gain=0.25,
deconvolve_facets = 8,
deconvolve_overlap=16,
deconvolve_taper='tukey',
vis_slices=ntimes,
timeslice='auto',
global_solution=False,
psf_support=64,
do_selfcal=True)
# In[ ]:
end=time.time()
log.info('%d: ical finished '%(rank))
print('%d: ical finished in %f sec.'%(rank,end-start),flush=True)
if rank==0:
log.info('After ical')
deconvolved = ical_list[0][0]
residual = ical_list[1][0]
restored = ical_list[2][0]
#f=show_image(deconvolved, title='Clean image', cm='Greys', vmax=1.0, vmin=-0.1)
print(qa_image(deconvolved, context='Clean image'))
#plt.show()
#f=show_image(restored, title='Restored clean image', cm='Greys', vmax=1.0,
# vmin=-0.1)
print(qa_image(restored, context='Restored clean image'))
#plt.show()
export_image_to_fits(restored, '%s/imaging-dask_ical_restored.fits'
%(results_dir))
#f=show_image(residual[0], title='Residual clean image', cm='Greys',
# vmax=0.1, vmin=-0.01)
print(qa_image(residual[0], context='Residual clean image'))
#plt.show()
export_image_to_fits(residual[0], '%s/imaging-dask_ical_residual.fits'
%(results_dir))
| 39.61597 | 160 | 0.584029 |
# coding: utf-8
# # Pipeline processing using serial workflows.
#
# This is a serial unrolled version of the predict step
# In[1]:
#get_ipython().run_line_magic('matplotlib', 'inline')
import os
import sys
sys.path.append(os.path.join('..', '..'))
from data_models.parameters import arl_path
from mpi4py import MPI
results_dir = './results/mpi'
#from matplotlib import pylab
#pylab.rcParams['figure.figsize'] = (12.0, 12.0)
#pylab.rcParams['image.cmap'] = 'rainbow'
import numpy
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.wcs.utils import pixel_to_skycoord
#from matplotlib import pyplot as plt
from data_models.polarisation import PolarisationFrame
from wrappers.serial.calibration.calibration import solve_gaintable
from wrappers.serial.calibration.operations import apply_gaintable
from wrappers.serial.calibration.calibration_control import create_calibration_controls
from wrappers.serial.visibility.base import create_blockvisibility
from wrappers.serial.visibility.coalesce import convert_blockvisibility_to_visibility
from wrappers.serial.skycomponent.operations import create_skycomponent
from wrappers.serial.image.deconvolution import deconvolve_cube
#from wrappers.serial.image.operations import show_image, export_image_to_fits, qa_image
from wrappers.serial.image.operations import export_image_to_fits, qa_image
from wrappers.serial.visibility.iterators import vis_timeslice_iter
from wrappers.serial.simulation.testing_support import create_low_test_image_from_gleam
from processing_components.simulation.configurations import create_named_configuration
from wrappers.serial.imaging.base import predict_2d, create_image_from_visibility, advise_wide_field
from workflows.serial.imaging.imaging_serial import invert_list_serial_workflow, predict_list_serial_workflow, deconvolve_list_serial_workflow
from workflows.serial.simulation.simulation_serial import simulate_list_serial_workflow, corrupt_list_serial_workflow
from workflows.serial.pipelines.pipeline_serial import continuum_imaging_list_serial_workflow, ical_list_serial_workflow
from workflows.mpi.pipelines.pipeline_mpi import continuum_imaging_list_mpi_workflow, ical_list_mpi_workflow
from workflows.mpi.imaging.imaging_mpi import predict_list_mpi_workflow, invert_list_mpi_workflow, deconvolve_list_mpi_workflow
import time
import pprint
# Uncomment this line if profiling with extrae/paraver toolset
#import pyextrae.mpi as pyextrae
pp = pprint.PrettyPrinter()
import logging
import argparse
def init_logging():
log = logging.getLogger(__name__)
logging.basicConfig(filename='%s/imaging-predict.log' % results_dir,
filemode='w', # 'a' for append
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.ERROR) # DEBUG INFO WARNING ERROR CRITICAL
# an attempt to flush the output and output in stdout
# don't know how to flush to a file ...
#h = logging.StreamHandler(sys.stdout)
#h.flush = sys.stdout.flush
#log.addHandler(h)
return log
log = init_logging()
parser = argparse.ArgumentParser(description='Imaging pipelines in MPI.')
parser.add_argument('--nfreqwin', type=int, nargs='?', default=7,
help='The number of frequency windows')
args = parser.parse_args()
# In[2]:
# ################### Rationale of data distribution: ################### #
# In this version all data resides at rank0 and needs to be distributed #
# at every function when needed. #
# TODO: Pass on the comm parameter!
# vis_list -> rank0 #
# vis_slices, npixel, cellsize -> rep #
# gleam_model -> rank0 (later rep) #
# predicted_vis -> rank0 (later dist) #
# model_list ->rank0 (later rep)
# disrty_list psf_list -> rank0 (later dist)
# continuum_imaging_list -> rank0
# ####################################################################### #
#pylab.rcParams['figure.figsize'] = (12.0, 12.0)
#pylab.rcParams['image.cmap'] = 'Greys'
# Set up MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
# We make the visibility. The parameter rmax determines the distance of the furthest antenna/stations used. All over parameters are determined from this number.
# In[3]:
#nfreqwin=7
nfreqwin=args.nfreqwin
ntimes=5
rmax=300.0
frequency=numpy.linspace(1.0e8,1.2e8,nfreqwin)
#ntimes=11
#frequency=numpy.linspace(0.9e8,1.1e8,nfreqwin)
channel_bandwidth=numpy.array(nfreqwin*[frequency[1]-frequency[0]])
times = numpy.linspace(-numpy.pi/3.0, numpy.pi/3.0, ntimes)
#phasecentre=SkyCoord(ra=+30.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000')
phasecentre=SkyCoord(ra=+0.0 * u.deg, dec=-40.0 * u.deg, frame='icrs', equinox='J2000')
log.info("Starting imaging-pipeline with %d MPI processes nfreqwin %d ntimes %d" %(size,nfreqwin,ntimes))
print("Starting imaging-pipeline with %d MPI processes nfreqwin %d ntimes %d"
%(size,nfreqwin,ntimes),flush=True)
log.debug('%d: frequency len %d frequency list:'%(rank,len(frequency)))
#print(frequency,flush=True)
if rank == 0:
bvis_list=simulate_list_serial_workflow('LOWBD2',
frequency=frequency,
channel_bandwidth=channel_bandwidth,
times=times,
phasecentre=phasecentre,
order='frequency',
rmax=rmax, format='blockvis')
else:
bvis_list=list()
vis_list = [convert_blockvisibility_to_visibility(bv) for bv in bvis_list]
log.debug('%d: %d elements in vis_list' % (rank,len(vis_list)))
#log.handlers[0].flush()
#print(vis_list
# In[4]:
if rank == 0:
wprojection_planes=1
advice_low=advise_wide_field(vis_list[0], guard_band_image=8.0, delA=0.02,
wprojection_planes=wprojection_planes)
advice_high=advise_wide_field(vis_list[-1], guard_band_image=8.0, delA=0.02,
wprojection_planes=wprojection_planes)
vis_slices = advice_low['vis_slices']
npixel=advice_high['npixels2']
cellsize=min(advice_low['cellsize'], advice_high['cellsize'])
else:
vis_slices = 0
npixel = 0
cellsize = 0
(vis_slices,npixel,cellsize) = comm.bcast((vis_slices,npixel,cellsize),root=0)
log.debug('%d: After advice: vis_slices %d npixel %d cellsize %d' % (rank,vis_slices, npixel, cellsize))
# Now make a graph to fill with a model drawn from GLEAM
# In[ ]:
log.info('%d:About to make GLEAM model' %(rank))
sub_frequency = numpy.array_split(frequency, size)
sub_channel_bandwidth = numpy.array_split(channel_bandwidth,size)
sub_gleam_model = [create_low_test_image_from_gleam(npixel=npixel,
frequency=[sub_frequency[rank][f]],
channel_bandwidth=[sub_channel_bandwidth[rank][f]],
cellsize=cellsize,
phasecentre=phasecentre,
polarisation_frame=PolarisationFrame("stokesI"),
flux_limit=1.0,
applybeam=True)
for f, freq in enumerate(sub_frequency[rank])]
# NOTE: We could do an allgather here to avoid bcast of
# each freqw during predict, it would safe time but use more space
gleam_model=comm.gather(sub_gleam_model,root=0)
if rank==0:
gleam_model=numpy.concatenate(gleam_model)
else:
gleam_model=list()
# In[ ]:
original_predict=False
if original_predict:
if rank==0:
log.info('About to run predict to get predicted visibility')
predicted_vislist = predict_list_serial_workflow(vis_list, gleam_model,
context='wstack', vis_slices=vis_slices)
else:
log.info('%d: About to run predict to get predicted visibility'%(rank))
print('%d: About to run predict to get predicted visibility'%(rank),flush=True)
start=time.time()
# All procs call the function but only rank=0 gets the predicted_vislist
predicted_vislist = predict_list_mpi_workflow(vis_list, gleam_model,
context='wstack',
vis_slices=vis_slices)
end=time.time()
#log.info('About to run corrupt to get corrupted visibility')
#corrupted_vislist = corrupt_list_serial_workflow(predicted_vislist, phase_error=1.0)
# Get the LSM. This is currently blank.
# In[ ]:
### I need to scatter vis_list cause worker don't have it
## frequency and channel_bandwidth are replicated and they have already
## been split
log.info('%d: predict finished in %f seconds'%(rank,end-start))
print('%d: predict finished in %f seconds'%(rank,end-start),flush=True)
log.info('%d: About create image from visibility'%(rank))
sub_vis_list= numpy.array_split(vis_list, size)
sub_vis_list=comm.scatter(sub_vis_list,root=0)
sub_model_list = [create_image_from_visibility(sub_vis_list[f],
npixel=npixel,
frequency=[sub_frequency[rank][f]],
channel_bandwidth=[sub_channel_bandwidth[rank][f]],
cellsize=cellsize,
phasecentre=phasecentre,
polarisation_frame=PolarisationFrame("stokesI"))
for f, freq in enumerate(sub_frequency[rank])]
def concat_tuples(list_of_tuples):
if len(list_of_tuples)<2:
result_list=list_of_tuples
else:
result_list=list_of_tuples[0]
for l in list_of_tuples[1:]:
result_list+=l
return result_list
# NOTE: We could do allgather here, if enough memory space
model_list=comm.gather(sub_model_list,root=0)
if rank==0:
#model_list=numpy.concatenate(model_list)
model_list=concat_tuples(model_list)
# In[ ]:
else:
model_list=list()
log.debug('%d model_list len %d' %(rank,len(model_list)))
log.info('%d: About to start invert'%(rank))
print('%d: About to start invert'%(rank),flush=True)
start=time.time()
original_invert=False
if original_invert:
if rank==0:
dirty_list = invert_list_serial_workflow(predicted_vislist, model_list,
context='wstack',
vis_slices=vis_slices, dopsf=False)
psf_list = invert_list_serial_workflow(predicted_vislist, model_list,
context='wstack',
vis_slices=vis_slices, dopsf=True)
else:
dirty_list = invert_list_mpi_workflow(predicted_vislist, model_list,
context='wstack',
vis_slices=vis_slices, dopsf=False)
psf_list = invert_list_mpi_workflow(predicted_vislist, model_list,
context='wstack',
vis_slices=vis_slices, dopsf=True)
# Create and execute graphs to make the dirty image and PSF
# In[ ]:
end=time.time()
log.info('%d: invert finished'%(rank))
print('%d: invert finished in %f seconds'%(rank,end-start),flush=True)
if rank==0:
#print("sumwts",flush=True)
#print(dirty_list[0][1])
log.info('After invert to get dirty image')
dirty = dirty_list[0][0]
#show_image(dirty, cm='Greys', vmax=1.0, vmin=-0.1)
#plt.show()
print(qa_image(dirty))
export_image_to_fits(dirty, '%s/imaging-dirty.fits'
%(results_dir))
log.info('After invert to get PSF')
psf = psf_list[0][0]
#show_image(psf, cm='Greys', vmax=0.1, vmin=-0.01)
#plt.show()
print(qa_image(psf))
export_image_to_fits(psf, '%s/imaging-psf.fits'
%(results_dir))
# Now deconvolve using msclean
# In[ ]:
log.info('%d: About to run deconvolve'%(rank))
print('%d: About to run deconvolve'%(rank),flush=True)
start=time.time()
original_deconv=False
if original_deconv:
if rank==0:
deconvolved,_ = deconvolve_list_serial_workflow(dirty_list, psf_list, model_imagelist=model_list,
deconvolve_facets=8, deconvolve_overlap=16, deconvolve_taper='tukey',
scales=[0, 3, 10],
algorithm='msclean', niter=1000,
fractional_threshold=0.1,
threshold=0.1, gain=0.1, psf_support=64)
else:
print(" types of dirty list",type(dirty_list)," and psf_list",type(psf_list))
deconvolved = deconvolve_list_mpi_workflow(dirty_list, psf_list, model_imagelist=model_list,
deconvolve_facets=8, deconvolve_overlap=16, deconvolve_taper='tukey',
scales=[0, 3, 10],
algorithm='msclean', niter=1000,
fractional_threshold=0.1,
threshold=0.1, gain=0.1, psf_support=64)
#show_image(deconvolved[0], cm='Greys', vmax=0.1, vmin=-0.01)
#plt.show()
end=time.time()
log.info('%d: After deconvolve'%(rank))
print('%d: deconvolve finished in %f sec'%(rank,end-start))
# In[ ]:
log.info('%d: About to run continuum imaging'%(rank))
print('%d: About to run continuum imaging'%(rank),flush=True)
start=time.time()
original_continuumimaging=False
if original_continuumimaging:
if rank==0:
continuum_imaging_list = continuum_imaging_list_serial_workflow(predicted_vislist,
model_imagelist=model_list,
context='wstack', vis_slices=vis_slices,
scales=[0, 3, 10], algorithm='mmclean',
nmoment=3, niter=1000,
fractional_threshold=0.1,
threshold=0.1, nmajor=5, gain=0.25,
deconvolve_facets = 8, deconvolve_overlap=16,
deconvolve_taper='tukey', psf_support=64)
else:
continuum_imaging_list = continuum_imaging_list_mpi_workflow(predicted_vislist,
model_imagelist=model_list,
context='wstack', vis_slices=vis_slices,
scales=[0, 3, 10], algorithm='mmclean',
nmoment=3, niter=1000,
fractional_threshold=0.1,
threshold=0.1, nmajor=5, gain=0.25,
deconvolve_facets = 8, deconvolve_overlap=16,
deconvolve_taper='tukey', psf_support=64)
# In[ ]:
end=time.time()
log.info('%d: continuum imaging finished'%(rank))
print('%d: continuum imaging finished in %f sec.'%(rank,end-start),flush=True)
if rank==0:
deconvolved = continuum_imaging_list[0][0]
residual = continuum_imaging_list[1][0]
restored = continuum_imaging_list[2][0]
#f=show_image(deconvolved, title='Clean image - no selfcal', cm='Greys',
# vmax=0.1, vmin=-0.01)
print(qa_image(deconvolved, context='Clean image - no selfcal'))
#plt.show()
#f=show_image(restored, title='Restored clean image - no selfcal',
# cm='Greys', vmax=1.0, vmin=-0.1)
print(qa_image(restored, context='Restored clean image - no selfcal'))
#plt.show()
export_image_to_fits(restored, '%s/imaging-dask_continuum_imaging_restored.fits'
%(results_dir))
#f=show_image(residual[0], title='Residual clean image - no selfcal', cm='Greys',
# vmax=0.1, vmin=-0.01)
print(qa_image(residual[0], context='Residual clean image - no selfcal'))
#plt.show()
export_image_to_fits(residual[0], '%s/imaging-dask_continuum_imaging_residual.fits'
%(results_dir))
if rank==0:
for chan in range(nfreqwin):
residual = continuum_imaging_list[1][chan]
#show_image(residual[0], title='Channel %d' % chan, cm='Greys',
# vmax=0.1, vmin=-0.01)
#plt.show()
# In[ ]:
controls = create_calibration_controls()
controls['T']['first_selfcal'] = 1
controls['G']['first_selfcal'] = 3
controls['B']['first_selfcal'] = 4
controls['T']['timeslice'] = 'auto'
controls['G']['timeslice'] = 'auto'
controls['B']['timeslice'] = 1e5
pp.pprint(controls)
# In[ ]:
# TODO I change this to predicted_vislist to make it deterministic, I hope it makes
# sense :)
#ical_list = ical_list_serial_workflow(corrupted_vislist,
log.info('%d: About to run ical'%(rank))
print('%d: About to run ical'%(rank),flush=True)
start=time.time()
original_ical=False
if original_ical:
if rank==0:
ical_list = ical_list_serial_workflow(predicted_vislist,
model_imagelist=model_list,
context='wstack',
calibration_context = 'TG',
controls=controls,
scales=[0, 3, 10], algorithm='mmclean',
nmoment=3, niter=1000,
fractional_threshold=0.1,
threshold=0.1, nmajor=5, gain=0.25,
deconvolve_facets = 8,
deconvolve_overlap=16,
deconvolve_taper='tukey',
vis_slices=ntimes,
timeslice='auto',
global_solution=False,
psf_support=64,
do_selfcal=True)
else:
ical_list = ical_list_mpi_workflow(predicted_vislist,
model_imagelist=model_list,
context='wstack',
calibration_context = 'TG',
controls=controls,
scales=[0, 3, 10], algorithm='mmclean',
nmoment=3, niter=1000,
fractional_threshold=0.1,
threshold=0.1, nmajor=5, gain=0.25,
deconvolve_facets = 8,
deconvolve_overlap=16,
deconvolve_taper='tukey',
vis_slices=ntimes,
timeslice='auto',
global_solution=False,
psf_support=64,
do_selfcal=True)
# In[ ]:
end=time.time()
log.info('%d: ical finished '%(rank))
print('%d: ical finished in %f sec.'%(rank,end-start),flush=True)
if rank==0:
log.info('After ical')
deconvolved = ical_list[0][0]
residual = ical_list[1][0]
restored = ical_list[2][0]
#f=show_image(deconvolved, title='Clean image', cm='Greys', vmax=1.0, vmin=-0.1)
print(qa_image(deconvolved, context='Clean image'))
#plt.show()
#f=show_image(restored, title='Restored clean image', cm='Greys', vmax=1.0,
# vmin=-0.1)
print(qa_image(restored, context='Restored clean image'))
#plt.show()
export_image_to_fits(restored, '%s/imaging-dask_ical_restored.fits'
%(results_dir))
#f=show_image(residual[0], title='Residual clean image', cm='Greys',
# vmax=0.1, vmin=-0.01)
print(qa_image(residual[0], context='Residual clean image'))
#plt.show()
export_image_to_fits(residual[0], '%s/imaging-dask_ical_residual.fits'
%(results_dir))
| 810 | 0 | 46 |
9994d71687ae7eefbb738f2c4ac54a5aaed706d5 | 1,133 | py | Python | logconfig.py | neitzke/stokes-numerics | 8845aef7598ca245d095cca690bf48568758a8c9 | [
"MIT"
] | 1 | 2020-08-03T16:24:06.000Z | 2020-08-03T16:24:06.000Z | logconfig.py | neitzke/stokes-numerics | 8845aef7598ca245d095cca690bf48568758a8c9 | [
"MIT"
] | null | null | null | logconfig.py | neitzke/stokes-numerics | 8845aef7598ca245d095cca690bf48568758a8c9 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import logging
# to change log level globally, use eg logconfig.loglevel(logging.WARN)
# to change level for an individual module, eg logconfig.loglevel(logging.DEBUG, "framedata")
| 37.766667 | 130 | 0.700794 | from __future__ import absolute_import
import logging
def logconfig(console=True,filename='/tmp/harmonic.log'):
formatter = logging.Formatter('[%(asctime)s] [%(processName)s] [%(name)s] [%(levelname)s] %(message)s', datefmt='%m-%d %H:%M')
root = logging.getLogger('')
root.setLevel(logging.INFO)
if filename:
# define a Handler which writes some messages to a log file
logfilehandler = logging.FileHandler(filename)
logfilehandler.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(logfilehandler)
if console:
# define a Handler which writes some messages to sys.stderr
consolehandler = logging.StreamHandler()
consolehandler.setFormatter(formatter)
# add this handler to the root logger
logging.getLogger('').addHandler(consolehandler)
# to change log level globally, use eg logconfig.loglevel(logging.WARN)
# to change level for an individual module, eg logconfig.loglevel(logging.DEBUG, "framedata")
def loglevel(level, name = ""):
logging.getLogger(name).setLevel(level)
| 867 | 0 | 45 |
51dc1e8064d7b160d9261339c4c0f6d7a7c1fc43 | 851 | py | Python | src/genie/libs/parser/iosxe/tests/ShowRunPolicyMap/cli/equal/golden_output4_expected.py | nielsvanhooy/genieparser | 9a1955749697a6777ca614f0af4d5f3a2c254ccd | [
"Apache-2.0"
] | null | null | null | src/genie/libs/parser/iosxe/tests/ShowRunPolicyMap/cli/equal/golden_output4_expected.py | nielsvanhooy/genieparser | 9a1955749697a6777ca614f0af4d5f3a2c254ccd | [
"Apache-2.0"
] | null | null | null | src/genie/libs/parser/iosxe/tests/ShowRunPolicyMap/cli/equal/golden_output4_expected.py | nielsvanhooy/genieparser | 9a1955749697a6777ca614f0af4d5f3a2c254ccd | [
"Apache-2.0"
] | null | null | null | expected_output = {
'policy_map': {
'policy-cbwfq-1': {'class': {
'class-gold': {'bandwidth_percent': '40',
'random_detect': ['dscp-based', 'ecn']},
'class-silver': {'bandwidth_percent': '20',
'random_detect': ['dscp-based', 'ecn']},
'class-bronze': {'bandwidth_percent': '10',
'random_detect': ['dscp-based', 'ecn']},
'management-traffic': {'bandwidth_percent': '1',
'random_detect': ['dscp-based', 'ecn'],
'qos_set': {'dscp': 'af21'}},
'class-default': {'bandwidth_percent': '29',
'random_detect': ['dscp-based', 'ecn'],
'qos_set': {'dscp': 'default'}}}}
}
} | 50.058824 | 74 | 0.415981 | expected_output = {
'policy_map': {
'policy-cbwfq-1': {'class': {
'class-gold': {'bandwidth_percent': '40',
'random_detect': ['dscp-based', 'ecn']},
'class-silver': {'bandwidth_percent': '20',
'random_detect': ['dscp-based', 'ecn']},
'class-bronze': {'bandwidth_percent': '10',
'random_detect': ['dscp-based', 'ecn']},
'management-traffic': {'bandwidth_percent': '1',
'random_detect': ['dscp-based', 'ecn'],
'qos_set': {'dscp': 'af21'}},
'class-default': {'bandwidth_percent': '29',
'random_detect': ['dscp-based', 'ecn'],
'qos_set': {'dscp': 'default'}}}}
}
} | 0 | 0 | 0 |
148aa55044dfa89d96bc3711e30b04ff24054650 | 251 | py | Python | hw4/4.5.py | ArtemNikolaev/gb-hw | b82403e39dc1ca530dc438309fc98ba89ce4337b | [
"Unlicense"
] | null | null | null | hw4/4.5.py | ArtemNikolaev/gb-hw | b82403e39dc1ca530dc438309fc98ba89ce4337b | [
"Unlicense"
] | 40 | 2021-12-30T15:57:10.000Z | 2022-01-26T16:44:24.000Z | hw4/4.5.py | ArtemNikolaev/gb-hw | b82403e39dc1ca530dc438309fc98ba89ce4337b | [
"Unlicense"
] | 1 | 2022-03-12T19:17:26.000Z | 2022-03-12T19:17:26.000Z | # https://github.com/ArtemNikolaev/gb-hw/issues/26
from functools import reduce
print(multiply())
| 17.928571 | 54 | 0.673307 | # https://github.com/ArtemNikolaev/gb-hw/issues/26
from functools import reduce
def even_100_1000():
return (i for i in range(100, 1001) if i % 2 == 0)
def multiply():
return reduce(lambda a, b: a * b, even_100_1000())
print(multiply())
| 103 | 0 | 46 |
a8dce581227cd102c0aaac141e019905d5830e17 | 1,460 | py | Python | Array/Merge_Without_Extra_Space.py | Pratik110/Python | 033ade0dff3dc3bee91eefb53d7eb87a4f4f003d | [
"MIT"
] | null | null | null | Array/Merge_Without_Extra_Space.py | Pratik110/Python | 033ade0dff3dc3bee91eefb53d7eb87a4f4f003d | [
"MIT"
] | null | null | null | Array/Merge_Without_Extra_Space.py | Pratik110/Python | 033ade0dff3dc3bee91eefb53d7eb87a4f4f003d | [
"MIT"
] | null | null | null | Link = "https://practice.geeksforgeeks.org/problems/merge-two-sorted-arrays-1587115620/1"
Description = "Given two sorted arrays arr1[] and arr2[] of sizes n and m in non-decreasing order." \
"Merge them in sorted order without using any extra space. Modify arr1 so that it" \
"contains the first N elements and modify arr2 so that it contains the last M elements."
Examples = "Input: " \
"n = 4, arr1[] = [1 3 5 7] " \
"m = 5, arr2[] = [0 2 6 8 9]" \
"Output: " \
"arr1[] = [0 1 2 3]" \
"arr2[] = [5 6 7 8 9]" \
"Explanation: After merging the two non-decreasing arrays, we get, 0 1 2 3 5 6 7 8 9."
arr1 = [1,36,39,105,146,154,168,170,204,206,217,219,225,227,272,282,293,300,312,323,328,328,334,335,359,370,383,392,395,396,403,413,422,437,443,448,462,463,465,479,492,496]
arr2 = [7,22,30,36,38,38,39,41,42,48,49,83,85,102,107,116,119,124,127,130,140,142,145,149,159,163,165,174,174,191,205,212,224,230,242,246,254,257,258,265,279,289,306,307,309,317,324,334,341,343,351,360,369,371,377,387,391,394,430,431,432,440,443,445,447,455,467,478]
n = 42
m = 68
# Approach 1
print(Solution1().merge(arr1, arr2, n, m))
| 50.344828 | 266 | 0.595205 | Link = "https://practice.geeksforgeeks.org/problems/merge-two-sorted-arrays-1587115620/1"
Description = "Given two sorted arrays arr1[] and arr2[] of sizes n and m in non-decreasing order." \
"Merge them in sorted order without using any extra space. Modify arr1 so that it" \
"contains the first N elements and modify arr2 so that it contains the last M elements."
Examples = "Input: " \
"n = 4, arr1[] = [1 3 5 7] " \
"m = 5, arr2[] = [0 2 6 8 9]" \
"Output: " \
"arr1[] = [0 1 2 3]" \
"arr2[] = [5 6 7 8 9]" \
"Explanation: After merging the two non-decreasing arrays, we get, 0 1 2 3 5 6 7 8 9."
arr1 = [1,36,39,105,146,154,168,170,204,206,217,219,225,227,272,282,293,300,312,323,328,328,334,335,359,370,383,392,395,396,403,413,422,437,443,448,462,463,465,479,492,496]
arr2 = [7,22,30,36,38,38,39,41,42,48,49,83,85,102,107,116,119,124,127,130,140,142,145,149,159,163,165,174,174,191,205,212,224,230,242,246,254,257,258,265,279,289,306,307,309,317,324,334,341,343,351,360,369,371,377,387,391,394,430,431,432,440,443,445,447,455,467,478]
n = 42
m = 68
# Approach 1
class Solution1:
def merge(self,arr1,arr2,n,m):
i = 0
while i < n:
if arr1[i] > arr2[0]:
arr1[i],arr2[0] = arr2[0],arr1[i]
arr2.sort()
i+=1
arr1+=arr2
return arr1
print(Solution1().merge(arr1, arr2, n, m))
| 212 | -5 | 48 |
7ef419f19fd5dd3e6ac9cd2c444b90048270e5da | 825 | py | Python | aws_embedded_metrics/logger/metric.py | 02strich/aws-embedded-metrics-python | 4c5718e580dfb12529673a6b54810adfbfd54242 | [
"Apache-2.0"
] | 130 | 2019-11-18T19:39:55.000Z | 2022-03-28T09:56:25.000Z | aws_embedded_metrics/logger/metric.py | 02strich/aws-embedded-metrics-python | 4c5718e580dfb12529673a6b54810adfbfd54242 | [
"Apache-2.0"
] | 50 | 2019-11-18T22:22:33.000Z | 2022-02-06T11:03:31.000Z | aws_embedded_metrics/logger/metric.py | 02strich/aws-embedded-metrics-python | 4c5718e580dfb12529673a6b54810adfbfd54242 | [
"Apache-2.0"
] | 23 | 2019-11-19T00:06:41.000Z | 2021-12-09T02:01:40.000Z | # Copyright 2019 Amazon.com, Inc. or its affiliates.
# Licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 37.5 | 74 | 0.718788 | # Copyright 2019 Amazon.com, Inc. or its affiliates.
# Licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Metric(object):
def __init__(self, value: float, unit: str = None):
self.values = [value]
self.unit = unit or "None"
def add_value(self, value: float) -> None:
self.values.append(value)
| 150 | 0 | 76 |
21a25efbf47ea04a49cca1dc67f47104e5dcf1e5 | 747 | py | Python | setup.py | SPRCSY/tomago-sdk-py | cbeef646d28f29a3dc7c1d48be1e882383948647 | [
"Apache-2.0"
] | null | null | null | setup.py | SPRCSY/tomago-sdk-py | cbeef646d28f29a3dc7c1d48be1e882383948647 | [
"Apache-2.0"
] | null | null | null | setup.py | SPRCSY/tomago-sdk-py | cbeef646d28f29a3dc7c1d48be1e882383948647 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup, find_packages
import io
setup(
name='tomago-sdk-py',
version='1.5.1',
description="Python SDKs for Blockchain.",
long_description=io.open('README.md', encoding='utf-8').read(),
url='https://github.com/arxanchain/tomago-sdk-py/',
download_url='https://github.com/arxanchain/tomago-sdk-py/',
packages=find_packages(),
platforms='any',
install_requires=[
"mock==2.0.0",
"requests==2.18.4",
"six==1.11.0",
"urllib3==1.22",
"py-common==v1.5.1"
],
dependency_links=[
"git+git://github.com/arxanchain/py-common.git@v1.5.1#egg=py-common-v1.5.1"
],
include_package_data=True,
zip_safe=False,
)
| 26.678571 | 83 | 0.617135 | #!/usr/bin/env python
from setuptools import setup, find_packages
import io
setup(
name='tomago-sdk-py',
version='1.5.1',
description="Python SDKs for Blockchain.",
long_description=io.open('README.md', encoding='utf-8').read(),
url='https://github.com/arxanchain/tomago-sdk-py/',
download_url='https://github.com/arxanchain/tomago-sdk-py/',
packages=find_packages(),
platforms='any',
install_requires=[
"mock==2.0.0",
"requests==2.18.4",
"six==1.11.0",
"urllib3==1.22",
"py-common==v1.5.1"
],
dependency_links=[
"git+git://github.com/arxanchain/py-common.git@v1.5.1#egg=py-common-v1.5.1"
],
include_package_data=True,
zip_safe=False,
)
| 0 | 0 | 0 |
a3e15ba80a2347e1590fe3cb20ee82eff35f9dda | 10,519 | py | Python | tools/archive.py | ganadist/r8 | 850b5a4725954b677103a3a575239d0f330c0b0f | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tools/archive.py | ganadist/r8 | 850b5a4725954b677103a3a575239d0f330c0b0f | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tools/archive.py | ganadist/r8 | 850b5a4725954b677103a3a575239d0f330c0b0f | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2017, the R8 project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import create_maven_release
import gradle
import jdk
import optparse
import os
try:
import resource
except ImportError:
# Not a Unix system. Do what Gandalf tells you not to.
pass
import shutil
import subprocess
import sys
import toolhelper
import utils
import zipfile
from build_r8lib import build_r8lib
ARCHIVE_BUCKET = 'r8-releases'
if __name__ == '__main__':
sys.exit(Main())
| 40.148855 | 93 | 0.681719 | #!/usr/bin/env python
# Copyright (c) 2017, the R8 project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import create_maven_release
import gradle
import jdk
import optparse
import os
try:
import resource
except ImportError:
# Not a Unix system. Do what Gandalf tells you not to.
pass
import shutil
import subprocess
import sys
import toolhelper
import utils
import zipfile
from build_r8lib import build_r8lib
ARCHIVE_BUCKET = 'r8-releases'
def ParseOptions():
result = optparse.OptionParser()
result.add_option('--dry-run', '--dry_run',
help='Build only, no upload.',
default=False, action='store_true')
result.add_option('--dry-run-output', '--dry_run_output',
help='Output directory for \'build only, no upload\'.',
type="string", action="store")
return result.parse_args()
def GetToolVersion(jar_path):
# TODO(mkroghj) This would not work for r8-lib, maybe use utils.getR8Version.
output = subprocess.check_output([
jdk.GetJavaExecutable(), '-jar', jar_path, '--version'
])
return output.splitlines()[0].strip()
def GetVersion():
r8_version = GetToolVersion(utils.R8_JAR)
d8_version = GetToolVersion(utils.D8_JAR)
# The version printed is "D8 vVERSION_NUMBER" and "R8 vVERSION_NUMBER"
# Sanity check that versions match.
if d8_version.split()[1] != r8_version.split()[1]:
raise Exception(
'Version mismatch: \n%s\n%s' % (d8_version, r8_version))
return d8_version.split()[1]
def GetGitBranches():
return subprocess.check_output(['git', 'show', '-s', '--pretty=%d', 'HEAD'])
def GetGitHash():
return subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip()
def IsMaster(version):
branches = subprocess.check_output(['git', 'branch', '-r', '--contains',
'HEAD'])
# CL runs from gerrit does not have a branch, we always treat them as master
# commits to archive these to the hash based location
if len(branches) == 0:
return True
if not version == 'master':
# Sanity check, we don't want to archive on top of release builds EVER
# Note that even though we branch, we never push the bots to build the same
# commit as master on a branch since we always change the version to
# not be just 'master' (or we crash here :-)).
if 'origin/master' in branches:
raise Exception('We are seeing origin/master in a commit that '
'don\'t have \'master\' as version')
return False
if not 'origin/master' in branches:
raise Exception('We are not seeing origin/master '
'in a commit that have \'master\' as version')
return True
def GetStorageDestination(storage_prefix,
version_or_path,
file_name,
is_master):
# We archive master commits under raw/master instead of directly under raw
version_dir = GetVersionDestination(storage_prefix,
version_or_path,
is_master)
return '%s/%s' % (version_dir, file_name)
def GetVersionDestination(storage_prefix, version_or_path, is_master):
archive_dir = 'raw/master' if is_master else 'raw'
return '%s%s/%s/%s' % (storage_prefix, ARCHIVE_BUCKET,
archive_dir, version_or_path)
def GetUploadDestination(version_or_path, file_name, is_master):
return GetStorageDestination('gs://', version_or_path, file_name, is_master)
def GetUrl(version_or_path, file_name, is_master):
return GetStorageDestination('https://storage.googleapis.com/',
version_or_path, file_name, is_master)
def GetMavenUrl(is_master):
return GetVersionDestination('https://storage.googleapis.com/', '', is_master)
def SetRLimitToMax():
(soft, hard) = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard))
def PrintResourceInfo():
(soft, hard) = resource.getrlimit(resource.RLIMIT_NOFILE)
print('INFO: Open files soft limit: %s' % soft)
print('INFO: Open files hard limit: %s' % hard)
def Main():
(options, args) = ParseOptions()
if not utils.is_bot() and not options.dry_run:
raise Exception('You are not a bot, don\'t archive builds. '
+ 'Use --dry-run to test locally')
if (options.dry_run_output and
(not os.path.exists(options.dry_run_output) or
not os.path.isdir(options.dry_run_output))):
raise Exception(options.dry_run_output
+ ' does not exist or is not a directory')
if utils.is_bot() and not utils.IsWindows():
SetRLimitToMax()
if not utils.IsWindows():
PrintResourceInfo()
# Create maven release which uses a build that exclude dependencies.
create_maven_release.generate_r8_maven_zip(utils.MAVEN_ZIP)
create_maven_release.generate_r8_maven_zip(
utils.MAVEN_ZIP_LIB, is_r8lib=True)
# Generate and copy a full build without dependencies.
gradle.RunGradleExcludeDeps([utils.R8, utils.R8_SRC])
shutil.copyfile(utils.R8_JAR, utils.R8_FULL_EXCLUDE_DEPS_JAR)
# Ensure all archived artifacts has been built before archiving.
# The target tasks postfixed by 'lib' depend on the actual target task so
# building it invokes the original task first.
# The '-Pno_internal' flag is important because we generate the lib based on uses in tests.
gradle.RunGradle([
utils.R8,
utils.D8,
utils.R8LIB,
utils.R8LIB_NO_DEPS,
utils.LIBRARY_DESUGAR_CONVERSIONS,
'-Pno_internal'
])
# Create maven release of the desuage_jdk_libs configuration. This require
# an r8.jar with dependencies to have been built.
create_maven_release.generate_desugar_configuration_maven_zip(
utils.DESUGAR_CONFIGURATION_MAVEN_ZIP)
version = GetVersion()
is_master = IsMaster(version)
if is_master:
# On master we use the git hash to archive with
print 'On master, using git hash for archiving'
version = GetGitHash()
destination = GetVersionDestination('gs://', version, is_master)
if utils.cloud_storage_exists(destination) and not options.dry_run:
raise Exception('Target archive directory %s already exists' % destination)
with utils.TempDir() as temp:
# Create pom file for our maven repository that we build for testing.
default_pom_file = os.path.join(temp, 'r8.pom')
create_maven_release.write_default_r8_pom_file(default_pom_file, version)
version_file = os.path.join(temp, 'r8-version.properties')
with open(version_file,'w') as version_writer:
version_writer.write('version.sha=' + GetGitHash() + '\n')
if not os.environ.get('SWARMING_BOT_ID') and not options.dry_run:
raise Exception('Environment variable SWARMING_BOT_ID not set')
releaser = \
("<local developer build>" if options.dry_run
else 'releaser=go/r8bot ('
+ os.environ.get('SWARMING_BOT_ID') + ')\n')
version_writer.write(releaser)
version_writer.write('version-file.version.code=1\n')
for file in [
utils.D8_JAR,
utils.R8_JAR,
utils.R8LIB_JAR,
utils.R8LIB_JAR + '.map',
utils.R8_SRC_JAR,
utils.R8_FULL_EXCLUDE_DEPS_JAR,
utils.R8LIB_EXCLUDE_DEPS_JAR,
utils.R8LIB_EXCLUDE_DEPS_JAR + '.map',
utils.MAVEN_ZIP,
utils.MAVEN_ZIP_LIB,
utils.DESUGAR_CONFIGURATION,
utils.DESUGAR_CONFIGURATION_MAVEN_ZIP,
utils.GENERATED_LICENSE,
]:
file_name = os.path.basename(file)
tagged_jar = os.path.join(temp, file_name)
shutil.copyfile(file, tagged_jar)
if file_name.endswith('.jar') and not file_name.endswith('-src.jar'):
with zipfile.ZipFile(tagged_jar, 'a') as zip:
zip.write(version_file, os.path.basename(version_file))
destination = GetUploadDestination(version, file_name, is_master)
print('Uploading %s to %s' % (tagged_jar, destination))
if options.dry_run:
if options.dry_run_output:
dry_run_destination = os.path.join(options.dry_run_output, file_name)
print('Dry run, not actually uploading. Copying to '
+ dry_run_destination)
shutil.copyfile(tagged_jar, dry_run_destination)
else:
print('Dry run, not actually uploading')
else:
utils.upload_file_to_cloud_storage(tagged_jar, destination)
print('File available at: %s' % GetUrl(version, file_name, is_master))
# Upload R8 to a maven compatible location.
if file == utils.R8_JAR:
maven_dst = GetUploadDestination(utils.get_maven_path('r8', version),
'r8-%s.jar' % version, is_master)
maven_pom_dst = GetUploadDestination(
utils.get_maven_path('r8', version),
'r8-%s.pom' % version, is_master)
if options.dry_run:
print('Dry run, not actually creating maven repo for R8')
else:
utils.upload_file_to_cloud_storage(tagged_jar, maven_dst)
utils.upload_file_to_cloud_storage(default_pom_file, maven_pom_dst)
print('Maven repo root available at: %s' % GetMavenUrl(is_master))
# Upload desugar_jdk_libs configuration to a maven compatible location.
if file == utils.DESUGAR_CONFIGURATION:
jar_name = 'desugar_jdk_libs_configuration-%s.jar' % version
maven_dst = GetUploadDestination(
utils.get_maven_path('desugar_jdk_libs_configuration', version),
jar_name, is_master)
with utils.TempDir() as tmp_dir:
desugar_jdk_libs_configuration_jar = os.path.join(tmp_dir, jar_name)
create_maven_release.generate_jar_with_desugar_configuration(
utils.DESUGAR_CONFIGURATION,
utils.LIBRARY_DESUGAR_CONVERSIONS_ZIP,
desugar_jdk_libs_configuration_jar)
if options.dry_run:
print('Dry run, not actually creating maven repo for '
+ 'desugar configuration.')
if options.dry_run_output:
shutil.copyfile(
desugar_jdk_libs_configuration_jar,
os.path.join(options.dry_run_output, jar_name))
else:
utils.upload_file_to_cloud_storage(
desugar_jdk_libs_configuration_jar, maven_dst)
print('Maven repo root available at: %s' % GetMavenUrl(is_master))
if __name__ == '__main__':
sys.exit(Main())
| 9,574 | 0 | 322 |
ef8d700f6c034510e4036e7f091c9874efee9ad5 | 1,320 | py | Python | model_engine/inference.py | OsmosisAI/ModelEngine | e1024a2e384791b623d58383aa93ca566e191092 | [
"Apache-2.0"
] | null | null | null | model_engine/inference.py | OsmosisAI/ModelEngine | e1024a2e384791b623d58383aa93ca566e191092 | [
"Apache-2.0"
] | null | null | null | model_engine/inference.py | OsmosisAI/ModelEngine | e1024a2e384791b623d58383aa93ca566e191092 | [
"Apache-2.0"
] | 1 | 2021-09-22T15:29:30.000Z | 2021-09-22T15:29:30.000Z | # Copyright 2020 BlueChasm LLC dba OsmosisAI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
from dataclasses import dataclass
from typing import List
@dataclass
@dataclass
@dataclass
| 25.384615 | 83 | 0.702273 | # Copyright 2020 BlueChasm LLC dba OsmosisAI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
from dataclasses import dataclass
from typing import List
@dataclass
class Result(object):
pass
@dataclass
class Box(object):
__slots__ = ['x_min', 'y_min', 'x_max', 'y_max']
x_min: int
y_min: int
x_max: int
y_max: int
@dataclass
class BoundingBoxResult(object):
__slots__ = ['label', 'confidence', 'coordinates']
label: str
confidence: float
coordinates: Box
class BaseModelInference(object):
def inference(self, image) -> List[Result]:
pass
@contextmanager
def inference_session(self, labels_path, weights_path) -> 'BaseModelInference':
try:
return self
finally:
pass
| 164 | 328 | 89 |
c4106d42cdb85aecc71332cd737e27926f2f5e32 | 3,814 | py | Python | SpinorBECSimulation/CoherentStateChebyshev/chebyshev_functions.py | ZachGlassman/SpinorBECSimulation | 8821a8bc150eda2aa36ce6b39ff178a3ddc99df1 | [
"MIT"
] | null | null | null | SpinorBECSimulation/CoherentStateChebyshev/chebyshev_functions.py | ZachGlassman/SpinorBECSimulation | 8821a8bc150eda2aa36ce6b39ff178a3ddc99df1 | [
"MIT"
] | null | null | null | SpinorBECSimulation/CoherentStateChebyshev/chebyshev_functions.py | ZachGlassman/SpinorBECSimulation | 8821a8bc150eda2aa36ce6b39ff178a3ddc99df1 | [
"MIT"
] | null | null | null | import numpy as np
import math
def alpha_help(a,n):
"""function to compute some approximations
Parameters
----------
a : complex
number
n : int
number
Returns
ln : complex
approximation
"""
if a.real == 0 and a.imag == 0:
if n == 0:
ln = np.complex(0,0)
else:
ln = np.complex(-1e200,0)
elif n >= 300:
ln = n *np.log(a)- (n*np.log(n)-n + np.log(2*np.pi*n)/2)/2
else:
ln = n * np.log(a) - math.log(math.factorial(int(n)))/2
return ln
def find_norm(z):
"""find complex norm^2 of a vector of complex numbers"""
k = 0
for i in z:
k = k + (i * np.conj(i)).real
return k
def setup_scaled_H(q, c, n, m, nmaxfinal):
"""function to setup tridigonal Hamiltonian if first, return d,e
Parameters
----------
q : float
quadratic zeeman shift
c : float
c_2n, spinor interaction rate
n : int
number of particles
m : int
magnetization
nmaxfinal : int
deprecated
Returns
-------
e_min : float
minimum eigenvalue
e_max : float
maximum eigenvalue
d : np.array(complex)
diagonal elements of Hamiltonian
e : np.array(complex)
off diagonal elements of Hamiltonian
first_n0 : int
n-|m| % 2
"""
first_n0 = np.mod(n-abs(m), 2)
n0 = np.mod((n-abs(m)), 2)
nmax = int((n-abs(m)-n0)/2 + 1)
#create arrays
e = np.zeros(int(nmax)-1)
d = np.zeros(int(nmax))
c_local = c/n
#matrix elements of hamiltonian
nm = (n - n0 - m)/2
npp = (n - n0 + m)/2
for j in range(int(nmax)):
d[j] = (n-n0)*(q+0.5*c_local*(2*n0-1))
if j < (nmax-1):
e[j] = c_local*np.sqrt(nm*npp*(n0+2)*(n0+1))
nm = nm - 1
npp = npp - 1
n0 = n0 + 2
#estimate based on Gershgorin's circle theorem
radius = abs(e[0])
e_min = d[0] - radius
e_max = d[0] + radius
for j in range(2,int(nmax)-1):
radius = abs(e[j-2]) + abs(e[j-1])
e_min = min(e_min, d[j-1] - radius)
e_max = max(e_max, d[j-1] + radius)
radius = abs(e[nmax-2])
e_min = min(e_min, d[nmax-1] - radius)
e_max = max(e_max, d[nmax-1] + radius)
radius = (e_max + e_min)/2
for i in range(int(nmax)):
d[i] = d[i] - radius
radius = 2/(e_max-e_min)
d = np.multiply(radius,d)
e = np.multiply(radius,e)
return e_min, e_max ,d ,e, first_n0
def hamiltonian_c(n_max, in_w, e, d):
"""apply tridiagonal real Hamiltonian matrix to a complex vector
Parameters
----------
n_max : int
maximum n for cutoff
in_w : np.array(complex)
state in
d : np.array(complex)
diagonal elements of Hamiltonian
e : np.array(complex)
off diagonal elements of Hamiltonian
Returns
-------
out_w : np.array(complex)
application of Hamiltonian to vector
"""
n_max = int(n_max)
out_w = in_w[:n_max]*d[:n_max]
out_w[:(n_max-1)] += e[:(n_max-1)]*in_w[1:n_max]
out_w[1:n_max] += e[:n_max-1] * in_w[:n_max-1]
return out_w
def moments(wave, n):
"""mean and variance of wavefunction
Parameters
----------
wave : np.array(complex)
wavefunction
n : int
number of atoms
Returns
-------
x : float
mean of wavefunction
x2 : float
variance of wavefunction
"""
nn = np.arange(n, n+2*len(wave), 2)
Y = (wave * np.conj(wave)).real
x = np.sum(Y * nn)
x2 = np.sum(Y * nn * nn)
return x, x2 | 23.115152 | 68 | 0.520713 | import numpy as np
import math
def find_nmax(tot, m):
first = np.mod(tot - abs(m), 2)
return (tot - abs(m) - first) / 2 + 1
def alpha_help(a,n):
"""function to compute some approximations
Parameters
----------
a : complex
number
n : int
number
Returns
ln : complex
approximation
"""
if a.real == 0 and a.imag == 0:
if n == 0:
ln = np.complex(0,0)
else:
ln = np.complex(-1e200,0)
elif n >= 300:
ln = n *np.log(a)- (n*np.log(n)-n + np.log(2*np.pi*n)/2)/2
else:
ln = n * np.log(a) - math.log(math.factorial(int(n)))/2
return ln
def find_norm(z):
"""find complex norm^2 of a vector of complex numbers"""
k = 0
for i in z:
k = k + (i * np.conj(i)).real
return k
def setup_scaled_H(q, c, n, m, nmaxfinal):
"""function to setup tridigonal Hamiltonian if first, return d,e
Parameters
----------
q : float
quadratic zeeman shift
c : float
c_2n, spinor interaction rate
n : int
number of particles
m : int
magnetization
nmaxfinal : int
deprecated
Returns
-------
e_min : float
minimum eigenvalue
e_max : float
maximum eigenvalue
d : np.array(complex)
diagonal elements of Hamiltonian
e : np.array(complex)
off diagonal elements of Hamiltonian
first_n0 : int
n-|m| % 2
"""
first_n0 = np.mod(n-abs(m), 2)
n0 = np.mod((n-abs(m)), 2)
nmax = int((n-abs(m)-n0)/2 + 1)
#create arrays
e = np.zeros(int(nmax)-1)
d = np.zeros(int(nmax))
c_local = c/n
#matrix elements of hamiltonian
nm = (n - n0 - m)/2
npp = (n - n0 + m)/2
for j in range(int(nmax)):
d[j] = (n-n0)*(q+0.5*c_local*(2*n0-1))
if j < (nmax-1):
e[j] = c_local*np.sqrt(nm*npp*(n0+2)*(n0+1))
nm = nm - 1
npp = npp - 1
n0 = n0 + 2
#estimate based on Gershgorin's circle theorem
radius = abs(e[0])
e_min = d[0] - radius
e_max = d[0] + radius
for j in range(2,int(nmax)-1):
radius = abs(e[j-2]) + abs(e[j-1])
e_min = min(e_min, d[j-1] - radius)
e_max = max(e_max, d[j-1] + radius)
radius = abs(e[nmax-2])
e_min = min(e_min, d[nmax-1] - radius)
e_max = max(e_max, d[nmax-1] + radius)
radius = (e_max + e_min)/2
for i in range(int(nmax)):
d[i] = d[i] - radius
radius = 2/(e_max-e_min)
d = np.multiply(radius,d)
e = np.multiply(radius,e)
return e_min, e_max ,d ,e, first_n0
def hamiltonian_c(n_max, in_w, e, d):
"""apply tridiagonal real Hamiltonian matrix to a complex vector
Parameters
----------
n_max : int
maximum n for cutoff
in_w : np.array(complex)
state in
d : np.array(complex)
diagonal elements of Hamiltonian
e : np.array(complex)
off diagonal elements of Hamiltonian
Returns
-------
out_w : np.array(complex)
application of Hamiltonian to vector
"""
n_max = int(n_max)
out_w = in_w[:n_max]*d[:n_max]
out_w[:(n_max-1)] += e[:(n_max-1)]*in_w[1:n_max]
out_w[1:n_max] += e[:n_max-1] * in_w[:n_max-1]
return out_w
def moments(wave, n):
"""mean and variance of wavefunction
Parameters
----------
wave : np.array(complex)
wavefunction
n : int
number of atoms
Returns
-------
x : float
mean of wavefunction
x2 : float
variance of wavefunction
"""
nn = np.arange(n, n+2*len(wave), 2)
Y = (wave * np.conj(wave)).real
x = np.sum(Y * nn)
x2 = np.sum(Y * nn * nn)
return x, x2 | 79 | 0 | 23 |
4a045d6909aa525ec2c6af87ff3fdbf9a8b3aef3 | 4,996 | py | Python | tests/test_result.py | shawnbrown/squint | a9d326ff8edb2e2b740c4355fd953edd2c0cf114 | [
"Apache-2.0"
] | 3 | 2020-01-11T23:29:15.000Z | 2020-05-30T09:39:15.000Z | tests/test_result.py | shawnbrown/squint | a9d326ff8edb2e2b740c4355fd953edd2c0cf114 | [
"Apache-2.0"
] | 3 | 2019-10-15T13:23:31.000Z | 2020-07-23T22:13:11.000Z | tests/test_result.py | shawnbrown/squint | a9d326ff8edb2e2b740c4355fd953edd2c0cf114 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import sys
from .common import unittest
from squint._compatibility.itertools import islice
from squint._utils import IterItems
from squint.result import Result
| 33.986395 | 87 | 0.571257 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import sys
from .common import unittest
from squint._compatibility.itertools import islice
from squint._utils import IterItems
from squint.result import Result
class TestFetch(unittest.TestCase):
def test_nonmappings(self):
"""Check collection types (i.e., sized, iterable containers)."""
result = Result([1, 2, 3], list)
self.assertEqual(result.fetch(), [1, 2, 3])
result = Result([1, 2, 3], set)
self.assertEqual(result.fetch(), set([1, 2, 3]))
result = Result(iter([1, 2, 3]), set)
self.assertEqual(result.fetch(), set([1, 2, 3]))
def test_mappings(self):
result = Result({'a': 1, 'b': 2}, dict)
self.assertEqual(result.fetch(), {'a': 1, 'b': 2})
result = Result(IterItems([('a', 1), ('b', 2)]), dict)
self.assertEqual(result.fetch(), {'a': 1, 'b': 2})
result = Result(iter([iter(['a', 1]), iter(['b', 2])]), dict)
self.assertEqual(result.fetch(), {'a': 1, 'b': 2})
with self.assertRaises(ValueError):
result = Result([('a', 1), 'b'], dict)
result.fetch() # <- Fails late (on fetch, only)
def test_bad_evaltype(self):
regex = 'evaltype must be a type, found instance of list'
with self.assertRaisesRegex(TypeError, regex):
typed = Result([1, 2, 3], [1])
class TestSharedIterator(unittest.TestCase):
def test_shared_iterator(self):
"""Dict result should not assume independent source iterators."""
def generate_items(): # <- Generator that reads from single iterator.
shared = iter([
'x', 1, 1, 1, 2, 2, 2, 3, 3, 3,
'y', 4, 4, 4, 5, 5, 5, 6, 6, 6,
])
yield next(shared), Result(islice(shared, 9), evaltype=list)
yield next(shared), Result(islice(shared, 9), evaltype=list)
result = Result(generate_items(), evaltype=dict)
expected = {
'x': [1, 1, 1, 2, 2, 2, 3, 3, 3],
'y': [4, 4, 4, 5, 5, 5, 6, 6, 6],
}
self.assertEqual(result.fetch(), expected)
class TestClosing(unittest.TestCase):
def setUp(self):
self.log = []
def closefunc():
self.log.append('closed')
self.closefunc = closefunc
def test_explicit_close(self):
result = Result(iter([1, 2, 3]), set, closefunc=self.closefunc)
self.assertEqual(self.log, [], msg='verify log is empty')
result.close()
self.assertEqual(self.log, ['closed'], msg='see if close was called')
result.close() # <- Second call.
self.assertEqual(self.log, ['closed'], msg='multiple calls pass without error')
def test_stopiteration(self):
""""Should call close() method when iterable is exhausted."""
result = Result(iter([1, 2, 3]), set, closefunc=self.closefunc)
self.assertEqual(self.log, [], msg='verify log is empty')
list(result) # Exhaust iterable.
self.assertEqual(self.log, ['closed'])
def test_delete(self):
""""Should call close() when object is garbage collected."""
result = Result(iter([1, 2, 3]), set, closefunc=self.closefunc)
self.assertEqual(self.log, [], msg='verify log is empty')
result.__del__() # Call __del__() directly.
self.assertEqual(self.log, ['closed'])
class TestGetCache(unittest.TestCase):
def test_tuple(self):
result = Result(iter([1, 2, 3, 4]), evaltype=tuple)
self.assertEqual(result._get_cache(), ())
result._next_cache()
self.assertEqual(result._get_cache(), (1,))
result._next_cache()
result._next_cache()
result._next_cache()
self.assertEqual(result._get_cache(), (1, 2, 3, 4))
with self.assertRaises(StopIteration):
result._next_cache()
self.assertEqual(result.fetch(), (1, 2, 3, 4))
def test_mapping(self):
iterable = IterItems([
('a', Result(iter([1, 2]), list)),
('b', Result(iter([3, 4]), list)),
('c', Result(iter([5, 6]), list)),
])
result = Result(iterable, dict)
self.assertEqual(result._get_cache(), {})
result._next_cache()
self.assertEqual(result._cache[0][0], 'a')
self.assertEqual(result._cache[0][1]._cache[0], 1)
self.assertEqual(result._get_cache(), {'a': [1]})
result._next_cache()
self.assertEqual(result._get_cache(), {'a': [1, 2]})
result._next_cache()
self.assertEqual(result._get_cache(), {'a': [1, 2], 'b': [3]})
result._next_cache()
result._next_cache()
result._next_cache()
self.assertEqual(result._get_cache(), {'a': [1, 2], 'b': [3, 4], 'c': [5, 6]})
with self.assertRaises(StopIteration):
result._next_cache()
self.assertEqual(result.fetch(), {'a': [1, 2], 'b': [3, 4], 'c': [5, 6]})
| 3,005 | 1,619 | 145 |
74f169df072f3ba8e22ae7cfeb29fb01c317f0cb | 21,254 | py | Python | p2/multiagent/multiAgents.py | patrickmcgrory/cs188 | e5c5995ad187e8edfaf5446dd40e84497461ae90 | [
"BSD-3-Clause"
] | 22 | 2016-03-31T23:04:51.000Z | 2021-11-06T08:45:56.000Z | p2/multiagent/multiAgents.py | naderm/cs188 | e5c5995ad187e8edfaf5446dd40e84497461ae90 | [
"BSD-3-Clause"
] | null | null | null | p2/multiagent/multiAgents.py | naderm/cs188 | e5c5995ad187e8edfaf5446dd40e84497461ae90 | [
"BSD-3-Clause"
] | 39 | 2015-04-12T12:07:06.000Z | 2021-12-01T21:55:04.000Z | # multiAgents.py
# --------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to
# http://inst.eecs.berkeley.edu/~cs188/pacman/pacman.html
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from __future__ import division
from util import manhattanDistance
from game import Directions
import random, util
from game import Agent
class ReflexAgent(Agent):
"""
A reflex agent chooses an action at each choice point by examining
its alternatives via a state evaluation function.
The code below is provided as a guide. You are welcome to change
it in any way you see fit, so long as you don't touch our method
headers.
"""
def getAction(self, gameState):
"""
You do not need to change this method, but you're welcome to.
getAction chooses among the best options according to the evaluation function.
Just like in the previous project, getAction takes a GameState and returns
some Directions.X for some X in the set {North, South, West, East, Stop}
"""
# Collect legal moves and successor states
legalMoves = gameState.getLegalActions()
# Choose one of the best actions
scores = [self.evaluationFunction(gameState, action) for action in legalMoves]
bestScore = max(scores)
bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]
chosenIndex = random.choice(bestIndices) # Pick randomly among the best
"Add more of your code here if you want to"
return legalMoves[chosenIndex]
def evaluationFunction(self, currentGameState, action):
"""
Design a better evaluation function here.
The evaluation function takes in the current and proposed successor
GameStates (pacman.py) and returns a number, where higher numbers are better.
The code below extracts some useful information from the state, like the
remaining food (newFood) and Pacman position after moving (newPos).
newScaredTimes holds the number of moves that each ghost will remain
scared because of Pacman having eaten a power pellet.
Print out these variables to see what you're getting, then combine them
to create a masterful evaluation function.
"""
# Useful information you can extract from a GameState (pacman.py)
successorGameState = currentGameState.generatePacmanSuccessor(action)
newPos = successorGameState.getPacmanPosition()
newFood = successorGameState.getFood()
newGhostStates = successorGameState.getGhostStates()
newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]
food_left = sum(int(j) for i in newFood for j in i)
if food_left > 0:
food_distances = [manhattanDistance(newPos, (x, y))
for x, row in enumerate(newFood)
for y, food in enumerate(row)
if food]
shortest_food = min(food_distances)
else:
shortest_food = 0
if newGhostStates:
ghost_distances = [manhattanDistance(ghost.getPosition(), newPos)
for ghost in newGhostStates]
shortest_ghost = min(ghost_distances)
if shortest_ghost == 0:
shortest_ghost = -2000
else:
shortest_ghost = -5 / shortest_ghost
else:
shortest_ghost = 0
return -2 * shortest_food + shortest_ghost - 40 * food_left
def scoreEvaluationFunction(currentGameState):
"""
This default evaluation function just returns the score of the state.
The score is the same one displayed in the Pacman GUI.
This evaluation function is meant for use with adversarial search agents
(not reflex agents).
"""
return currentGameState.getScore()
class MultiAgentSearchAgent(Agent):
"""
This class provides some common elements to all of your
multi-agent searchers. Any methods defined here will be available
to the MinimaxPacmanAgent, AlphaBetaPacmanAgent & ExpectimaxPacmanAgent.
You *do not* need to make any changes here, but you can if you want to
add functionality to all your adversarial search agents. Please do not
remove anything, however.
Note: this is an abstract class: one that should not be instantiated. It's
only partially specified, and designed to be extended. Agent (game.py)
is another abstract class.
"""
class MinimaxAgent(MultiAgentSearchAgent):
"""
Your minimax agent (question 2)
"""
def getAction(self, gameState):
"""
Returns the minimax action from the current gameState using self.depth
and self.evaluationFunction.
Here are some method calls that might be useful when implementing minimax.
gameState.getLegalActions(agentIndex):
Returns a list of legal actions for an agent
agentIndex=0 means Pacman, ghosts are >= 1
gameState.generateSuccessor(agentIndex, action):
Returns the successor game state after an agent takes an action
gameState.getNumAgents():
Returns the total number of agents in the game
"""
return max(
gameState.getLegalActions(0),
key = lambda x: search_depth(gameState.generateSuccessor(0, x), 1, 1)
)
class AlphaBetaAgent(MultiAgentSearchAgent):
"""
Your minimax agent with alpha-beta pruning (question 3)
"""
def getAction(self, gameState):
"""
Returns the minimax action using self.depth and self.evaluationFunction
"""
val, alpha, beta, best = None, None, None, None
for action in gameState.getLegalActions(0):
val = max(val, min_val(gameState.generateSuccessor(0, action), 1, 1, alpha, beta))
# if val >= beta: return action
if alpha is None:
alpha, best = val, action
else:
alpha, best = max(val, alpha), action if val > alpha else best
return best
class ExpectimaxAgent(MultiAgentSearchAgent):
"""
Your expectimax agent (question 4)
"""
def getAction(self, gameState):
"""
Returns the expectimax action using self.depth and self.evaluationFunction
All ghosts should be modeled as choosing uniformly at random from their
legal moves.
"""
return max(
gameState.getLegalActions(0),
key = lambda x: search_depth(gameState.generateSuccessor(0, x), 1, 1)
)
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"Search the node that has the lowest combined cost and heuristic first."
visited = set()
p_queue = util.PriorityQueue()
p_queue.push((problem.getStartState(), []), 0)
while not p_queue.isEmpty():
state, actions = p_queue.pop()
if state in visited:
continue
visited.add(state)
if problem.isGoalState(state):
return actions
for successor, action, stepCost in problem.getSuccessors(state):
if successor not in visited:
p_queue.push(
(successor, actions + [action]),
stepCost + problem.getCostOfActions(actions) +
heuristic(successor, problem = problem))
from game import Actions
class PositionSearchProblem:
"""
A search problem defines the state space, start state, goal test,
successor function and cost function. This search problem can be
used to find paths to a particular point on the pacman board.
The state space consists of (x,y) positions in a pacman game.
Note: this search problem is fully specified; you should NOT change it.
"""
def __init__(self, gameState, costFn = lambda x: 1, goal=(1,1), start=None, warn=True, visualize=True):
"""
Stores the start and goal.
gameState: A GameState object (pacman.py)
costFn: A function from a search state (tuple) to a non-negative number
goal: A position in the gameState
"""
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
if start != None: self.startState = start
self.goal = goal
self.costFn = costFn
self.visualize = visualize
if warn and (gameState.getNumFood() != 1 or not gameState.hasFood(*goal)):
print 'Warning: this does not look like a regular search maze'
# For display purposes
self._visited, self._visitedlist, self._expanded = {}, [], 0
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextState = (nextx, nexty)
cost = self.costFn(nextState)
successors.append( ( nextState, action, cost) )
# Bookkeeping for display purposes
self._expanded += 1
if state not in self._visited:
self._visited[state] = True
self._visitedlist.append(state)
return successors
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999
"""
if actions == None: return 999999
x,y= self.getStartState()
cost = 0
for action in actions:
# Check figure out the next state and see whether its' legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
cost += self.costFn((x,y))
return cost
class AnyFoodSearchProblem(PositionSearchProblem):
"""
A search problem for finding a path to any food.
This search problem is just like the PositionSearchProblem, but
has a different goal test, which you need to fill in below. The
state space and successor function do not need to be changed.
The class definition above, AnyFoodSearchProblem(PositionSearchProblem),
inherits the methods of the PositionSearchProblem.
You can use this search problem to help you fill in
the findPathToClosestDot method.
"""
def __init__(self, gameState):
"Stores information from the gameState. You don't need to change this."
# Store the food for later reference
self.food = gameState.getFood()
# Store info for the PositionSearchProblem (no need to change this)
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
self.costFn = lambda x: 1
self._visited, self._visitedlist, self._expanded = {}, [], 0
def isGoalState(self, state):
"""
The state is Pacman's position. Fill this in with a goal test
that will complete the problem definition.
"""
x,y = state
return self.food[x][y]
def manhattanHeuristic(position, problem, info={}):
"The Manhattan distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])
def betterEvaluationFunction(currentGameState):
"""
Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
evaluation function (question 5).
DESCRIPTION: This function evaluates a state based on the sum of
six weighted variables:
- Distance of path to nearest food pellet
- Manhattan distance to closest offensive ghost
- Manhattan distance to closest power pellet
- Number of power pellets left
- Number of food pellets left
- Manhattan distance to closest scared ghost
For some of the variables, the reciprocal was taken based on the
following methodology:
- The reciprocal of the distance to closest food pellet
- A close food pellet is a good thing, but we want grabbing
one to have a limited value on the change in score
- The score drop due to the increased distance to the next
nearest pellet should be less than the score gain from
eating the pellet.
- The negative reciprocal of the distance to the closest ghost
- A close ghost makes the state less desirable, but variances
in ghosts far away should have little impact
- The reciprocal of the distance to the closest power pellet
- Same reasoning as food pellets
"""
pos = currentGameState.getPacmanPosition()
food = currentGameState.getFood()
ghosts = currentGameState.getGhostStates()
capsules = currentGameState.getCapsules()
food_left = sum(int(j) for i in food for j in i)
# Nom them foods
problem = AnyFoodSearchProblem(currentGameState)
shortest_food = aStarSearch(problem, heuristic = nearest_food_heuristic)
if shortest_food:
shortest_food = 1 / len(shortest_food)
else:
shortest_food = 1000
# if food_left > 0:
# food_distances = [
# manhattanDistance(pos, (x, y))
# for x, row in enumerate(food)
# for y, food_bool in enumerate(row)
# if food_bool
# ]
# shortest_food = 1 / min(food_distances)
# else:
# shortest_food = -200000
scared = [ghost for ghost in ghosts if ghost.scaredTimer > 0]
ghosts = [ghost for ghost in ghosts if ghost.scaredTimer == 0]
# Don't let the ghost nom you
if ghosts:
ghost_distances = [manhattanDistance(ghost.getPosition(), pos)
for ghost in ghosts]
shortest_ghost = min(ghost_distances)
if shortest_ghost == 0:
shortest_ghost = 200000
else:
shortest_ghost = 1 / shortest_ghost
else:
shortest_ghost = 0
# Nom them scared ones
shortest_scared = 0
if scared:
scared_distances = [manhattanDistance(ghost.getPosition(), pos)
for ghost in scared]
scared_distances = [distance
for ghost, distance in zip(scared, scared_distances)
if distance <= ghost.scaredTimer]
if scared_distances:
shortest_scared = min(scared_distances)
if shortest_scared == 0:
shortest_scared = 10
else:
shortest_scared = 1 / shortest_scared
# Nom them capsules
capsules_left = len(capsules)
if capsules:
capsule_distances = [manhattanDistance(capsule, pos)
for capsule in capsules]
shortest_capsule = 1 / min(capsule_distances)
else:
shortest_capsule = 0
weights = [5, 10, -5, -50, -100, 10]
scores = [shortest_food, shortest_capsule, shortest_ghost,
food_left, capsules_left, shortest_scared]
score = sum(i * j for i, j in zip(scores, weights))
# print "pos\t\t\t", pos
# print "shortest food\t\t", shortest_food
# print "food_left\t\t", food_left
# print "shortest_capsule\t", shortest_capsule
# print "score\t\t\t", score
# print
return score
# Abbreviation
better = betterEvaluationFunction
class ContestAgent(MultiAgentSearchAgent):
"""
Your agent for the mini-contest
"""
def getAction(self, gameState):
"""
Returns an action. You can use any method you want and search to any depth you want.
Just remember that the mini-contest is timed, so you have to trade off speed and computation.
Ghosts don't behave randomly anymore, but they aren't perfect either -- they'll usually
just make a beeline straight towards Pacman (or away from him if they're scared!)
"""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
| 36.08489 | 107 | 0.622847 | # multiAgents.py
# --------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to
# http://inst.eecs.berkeley.edu/~cs188/pacman/pacman.html
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from __future__ import division
from util import manhattanDistance
from game import Directions
import random, util
from game import Agent
class ReflexAgent(Agent):
"""
A reflex agent chooses an action at each choice point by examining
its alternatives via a state evaluation function.
The code below is provided as a guide. You are welcome to change
it in any way you see fit, so long as you don't touch our method
headers.
"""
def getAction(self, gameState):
"""
You do not need to change this method, but you're welcome to.
getAction chooses among the best options according to the evaluation function.
Just like in the previous project, getAction takes a GameState and returns
some Directions.X for some X in the set {North, South, West, East, Stop}
"""
# Collect legal moves and successor states
legalMoves = gameState.getLegalActions()
# Choose one of the best actions
scores = [self.evaluationFunction(gameState, action) for action in legalMoves]
bestScore = max(scores)
bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]
chosenIndex = random.choice(bestIndices) # Pick randomly among the best
"Add more of your code here if you want to"
return legalMoves[chosenIndex]
def evaluationFunction(self, currentGameState, action):
"""
Design a better evaluation function here.
The evaluation function takes in the current and proposed successor
GameStates (pacman.py) and returns a number, where higher numbers are better.
The code below extracts some useful information from the state, like the
remaining food (newFood) and Pacman position after moving (newPos).
newScaredTimes holds the number of moves that each ghost will remain
scared because of Pacman having eaten a power pellet.
Print out these variables to see what you're getting, then combine them
to create a masterful evaluation function.
"""
# Useful information you can extract from a GameState (pacman.py)
successorGameState = currentGameState.generatePacmanSuccessor(action)
newPos = successorGameState.getPacmanPosition()
newFood = successorGameState.getFood()
newGhostStates = successorGameState.getGhostStates()
newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]
food_left = sum(int(j) for i in newFood for j in i)
if food_left > 0:
food_distances = [manhattanDistance(newPos, (x, y))
for x, row in enumerate(newFood)
for y, food in enumerate(row)
if food]
shortest_food = min(food_distances)
else:
shortest_food = 0
if newGhostStates:
ghost_distances = [manhattanDistance(ghost.getPosition(), newPos)
for ghost in newGhostStates]
shortest_ghost = min(ghost_distances)
if shortest_ghost == 0:
shortest_ghost = -2000
else:
shortest_ghost = -5 / shortest_ghost
else:
shortest_ghost = 0
return -2 * shortest_food + shortest_ghost - 40 * food_left
def scoreEvaluationFunction(currentGameState):
"""
This default evaluation function just returns the score of the state.
The score is the same one displayed in the Pacman GUI.
This evaluation function is meant for use with adversarial search agents
(not reflex agents).
"""
return currentGameState.getScore()
class MultiAgentSearchAgent(Agent):
"""
This class provides some common elements to all of your
multi-agent searchers. Any methods defined here will be available
to the MinimaxPacmanAgent, AlphaBetaPacmanAgent & ExpectimaxPacmanAgent.
You *do not* need to make any changes here, but you can if you want to
add functionality to all your adversarial search agents. Please do not
remove anything, however.
Note: this is an abstract class: one that should not be instantiated. It's
only partially specified, and designed to be extended. Agent (game.py)
is another abstract class.
"""
def __init__(self, evalFn = 'scoreEvaluationFunction', depth = '2'):
self.index = 0 # Pacman is always agent index 0
self.evaluationFunction = util.lookup(evalFn, globals())
self.depth = int(depth)
class MinimaxAgent(MultiAgentSearchAgent):
"""
Your minimax agent (question 2)
"""
def getAction(self, gameState):
"""
Returns the minimax action from the current gameState using self.depth
and self.evaluationFunction.
Here are some method calls that might be useful when implementing minimax.
gameState.getLegalActions(agentIndex):
Returns a list of legal actions for an agent
agentIndex=0 means Pacman, ghosts are >= 1
gameState.generateSuccessor(agentIndex, action):
Returns the successor game state after an agent takes an action
gameState.getNumAgents():
Returns the total number of agents in the game
"""
def search_depth(state, depth, agent):
if agent == state.getNumAgents():
if depth == self.depth:
return self.evaluationFunction(state)
else:
return search_depth(state, depth + 1, 0)
else:
actions = state.getLegalActions(agent)
if len(actions) == 0:
return self.evaluationFunction(state)
next_states = (
search_depth(state.generateSuccessor(agent, action),
depth, agent + 1)
for action in actions
)
return (max if agent == 0 else min)(next_states)
return max(
gameState.getLegalActions(0),
key = lambda x: search_depth(gameState.generateSuccessor(0, x), 1, 1)
)
class AlphaBetaAgent(MultiAgentSearchAgent):
"""
Your minimax agent with alpha-beta pruning (question 3)
"""
def getAction(self, gameState):
"""
Returns the minimax action using self.depth and self.evaluationFunction
"""
def min_val(state, depth, agent, alpha, beta):
if agent == state.getNumAgents():
return max_val(state, depth + 1, 0, alpha, beta)
val = None
for action in state.getLegalActions(agent):
successor = min_val(state.generateSuccessor(agent, action), depth, agent + 1, alpha, beta)
val = successor if val is None else min(val, successor)
if alpha is not None and val < alpha:
return val
beta = val if beta is None else min(beta, val)
if val is None:
return self.evaluationFunction(state)
return val
def max_val(state, depth, agent, alpha, beta):
assert agent == 0
if depth > self.depth:
return self.evaluationFunction(state)
val = None
for action in state.getLegalActions(agent):
successor = min_val(state.generateSuccessor(agent, action), depth, agent + 1, alpha, beta)
val = max(val, successor)
if beta is not None and val > beta:
return val
alpha = max(alpha, val)
if val is None:
return self.evaluationFunction(state)
return val
val, alpha, beta, best = None, None, None, None
for action in gameState.getLegalActions(0):
val = max(val, min_val(gameState.generateSuccessor(0, action), 1, 1, alpha, beta))
# if val >= beta: return action
if alpha is None:
alpha, best = val, action
else:
alpha, best = max(val, alpha), action if val > alpha else best
return best
def average(lst):
lst = list(lst)
return sum(lst) / len(lst)
class ExpectimaxAgent(MultiAgentSearchAgent):
"""
Your expectimax agent (question 4)
"""
def getAction(self, gameState):
"""
Returns the expectimax action using self.depth and self.evaluationFunction
All ghosts should be modeled as choosing uniformly at random from their
legal moves.
"""
def search_depth(state, depth, agent):
if agent == state.getNumAgents():
if depth == self.depth:
return self.evaluationFunction(state)
else:
return search_depth(state, depth + 1, 0)
else:
actions = state.getLegalActions(agent)
if len(actions) == 0:
return self.evaluationFunction(state)
next_states = (
search_depth(state.generateSuccessor(agent, action),
depth, agent + 1)
for action in actions
)
return (max if agent == 0 else average)(next_states)
return max(
gameState.getLegalActions(0),
key = lambda x: search_depth(gameState.generateSuccessor(0, x), 1, 1)
)
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"Search the node that has the lowest combined cost and heuristic first."
visited = set()
p_queue = util.PriorityQueue()
p_queue.push((problem.getStartState(), []), 0)
while not p_queue.isEmpty():
state, actions = p_queue.pop()
if state in visited:
continue
visited.add(state)
if problem.isGoalState(state):
return actions
for successor, action, stepCost in problem.getSuccessors(state):
if successor not in visited:
p_queue.push(
(successor, actions + [action]),
stepCost + problem.getCostOfActions(actions) +
heuristic(successor, problem = problem))
from game import Actions
class PositionSearchProblem:
"""
A search problem defines the state space, start state, goal test,
successor function and cost function. This search problem can be
used to find paths to a particular point on the pacman board.
The state space consists of (x,y) positions in a pacman game.
Note: this search problem is fully specified; you should NOT change it.
"""
def __init__(self, gameState, costFn = lambda x: 1, goal=(1,1), start=None, warn=True, visualize=True):
"""
Stores the start and goal.
gameState: A GameState object (pacman.py)
costFn: A function from a search state (tuple) to a non-negative number
goal: A position in the gameState
"""
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
if start != None: self.startState = start
self.goal = goal
self.costFn = costFn
self.visualize = visualize
if warn and (gameState.getNumFood() != 1 or not gameState.hasFood(*goal)):
print 'Warning: this does not look like a regular search maze'
# For display purposes
self._visited, self._visitedlist, self._expanded = {}, [], 0
def getStartState(self):
return self.startState
def isGoalState(self, state):
isGoal = state == self.goal
# For display purposes only
if isGoal and self.visualize:
self._visitedlist.append(state)
import __main__
if '_display' in dir(__main__):
if 'drawExpandedCells' in dir(__main__._display): #@UndefinedVariable
__main__._display.drawExpandedCells(self._visitedlist) #@UndefinedVariable
return isGoal
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextState = (nextx, nexty)
cost = self.costFn(nextState)
successors.append( ( nextState, action, cost) )
# Bookkeeping for display purposes
self._expanded += 1
if state not in self._visited:
self._visited[state] = True
self._visitedlist.append(state)
return successors
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999
"""
if actions == None: return 999999
x,y= self.getStartState()
cost = 0
for action in actions:
# Check figure out the next state and see whether its' legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
cost += self.costFn((x,y))
return cost
class AnyFoodSearchProblem(PositionSearchProblem):
"""
A search problem for finding a path to any food.
This search problem is just like the PositionSearchProblem, but
has a different goal test, which you need to fill in below. The
state space and successor function do not need to be changed.
The class definition above, AnyFoodSearchProblem(PositionSearchProblem),
inherits the methods of the PositionSearchProblem.
You can use this search problem to help you fill in
the findPathToClosestDot method.
"""
def __init__(self, gameState):
"Stores information from the gameState. You don't need to change this."
# Store the food for later reference
self.food = gameState.getFood()
# Store info for the PositionSearchProblem (no need to change this)
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
self.costFn = lambda x: 1
self._visited, self._visitedlist, self._expanded = {}, [], 0
def isGoalState(self, state):
"""
The state is Pacman's position. Fill this in with a goal test
that will complete the problem definition.
"""
x,y = state
return self.food[x][y]
def manhattanHeuristic(position, problem, info={}):
"The Manhattan distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])
def nearest_food_heuristic(pos, problem, info={}):
food = problem.food
food_distances = [
manhattanDistance(pos, (x, y))
for x, row in enumerate(food)
for y, food_bool in enumerate(row)
if food_bool
]
return min(food_distances) if food_distances else 0
def betterEvaluationFunction(currentGameState):
"""
Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
evaluation function (question 5).
DESCRIPTION: This function evaluates a state based on the sum of
six weighted variables:
- Distance of path to nearest food pellet
- Manhattan distance to closest offensive ghost
- Manhattan distance to closest power pellet
- Number of power pellets left
- Number of food pellets left
- Manhattan distance to closest scared ghost
For some of the variables, the reciprocal was taken based on the
following methodology:
- The reciprocal of the distance to closest food pellet
- A close food pellet is a good thing, but we want grabbing
one to have a limited value on the change in score
- The score drop due to the increased distance to the next
nearest pellet should be less than the score gain from
eating the pellet.
- The negative reciprocal of the distance to the closest ghost
- A close ghost makes the state less desirable, but variances
in ghosts far away should have little impact
- The reciprocal of the distance to the closest power pellet
- Same reasoning as food pellets
"""
pos = currentGameState.getPacmanPosition()
food = currentGameState.getFood()
ghosts = currentGameState.getGhostStates()
capsules = currentGameState.getCapsules()
food_left = sum(int(j) for i in food for j in i)
# Nom them foods
problem = AnyFoodSearchProblem(currentGameState)
shortest_food = aStarSearch(problem, heuristic = nearest_food_heuristic)
if shortest_food:
shortest_food = 1 / len(shortest_food)
else:
shortest_food = 1000
# if food_left > 0:
# food_distances = [
# manhattanDistance(pos, (x, y))
# for x, row in enumerate(food)
# for y, food_bool in enumerate(row)
# if food_bool
# ]
# shortest_food = 1 / min(food_distances)
# else:
# shortest_food = -200000
scared = [ghost for ghost in ghosts if ghost.scaredTimer > 0]
ghosts = [ghost for ghost in ghosts if ghost.scaredTimer == 0]
# Don't let the ghost nom you
if ghosts:
ghost_distances = [manhattanDistance(ghost.getPosition(), pos)
for ghost in ghosts]
shortest_ghost = min(ghost_distances)
if shortest_ghost == 0:
shortest_ghost = 200000
else:
shortest_ghost = 1 / shortest_ghost
else:
shortest_ghost = 0
# Nom them scared ones
shortest_scared = 0
if scared:
scared_distances = [manhattanDistance(ghost.getPosition(), pos)
for ghost in scared]
scared_distances = [distance
for ghost, distance in zip(scared, scared_distances)
if distance <= ghost.scaredTimer]
if scared_distances:
shortest_scared = min(scared_distances)
if shortest_scared == 0:
shortest_scared = 10
else:
shortest_scared = 1 / shortest_scared
# Nom them capsules
capsules_left = len(capsules)
if capsules:
capsule_distances = [manhattanDistance(capsule, pos)
for capsule in capsules]
shortest_capsule = 1 / min(capsule_distances)
else:
shortest_capsule = 0
weights = [5, 10, -5, -50, -100, 10]
scores = [shortest_food, shortest_capsule, shortest_ghost,
food_left, capsules_left, shortest_scared]
score = sum(i * j for i, j in zip(scores, weights))
# print "pos\t\t\t", pos
# print "shortest food\t\t", shortest_food
# print "food_left\t\t", food_left
# print "shortest_capsule\t", shortest_capsule
# print "score\t\t\t", score
# print
return score
# Abbreviation
better = betterEvaluationFunction
class ContestAgent(MultiAgentSearchAgent):
"""
Your agent for the mini-contest
"""
def getAction(self, gameState):
"""
Returns an action. You can use any method you want and search to any depth you want.
Just remember that the mini-contest is timed, so you have to trade off speed and computation.
Ghosts don't behave randomly anymore, but they aren't perfect either -- they'll usually
just make a beeline straight towards Pacman (or away from him if they're scared!)
"""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
| 3,642 | 0 | 248 |
60c7aab9263493986520396ca1327fe5b14d50e6 | 4,151 | py | Python | res_mods/mods/packages/xvm_battle/python/replay.py | peterbartha/ImmunoMod | cbf8cd49893d7082a347c1f72c0e39480869318a | [
"MIT"
] | null | null | null | res_mods/mods/packages/xvm_battle/python/replay.py | peterbartha/ImmunoMod | cbf8cd49893d7082a347c1f72c0e39480869318a | [
"MIT"
] | 1 | 2016-04-03T13:31:39.000Z | 2016-04-03T16:48:26.000Z | res_mods/mods/packages/xvm_battle/python/replay.py | peterbartha/ImmunoMod | cbf8cd49893d7082a347c1f72c0e39480869318a | [
"MIT"
] | null | null | null | """ XVM (c) www.modxvm.com 2013-2017 """
#####################################################################
# imports
import simplejson
import traceback
import BigWorld
import game
from Avatar import PlayerAvatar
from BattleReplay import BattleReplay, g_replayCtrl
from PlayerEvents import g_playerEvents
from gui.shared import g_eventBus, events
from xfw import *
import xvm_main.python.config as config
from xvm_main.python.logger import *
import xvm_main.python.minimap_circles as minimap_circles
import xvm_main.python.utils as utils
from consts import *
#####################################################################
# handlers
_xvm_record_data = None
_xvm_play_data = None
@registerEvent(PlayerAvatar, 'onBecomePlayer')
# record
g_eventBus.addListener(XVM_BATTLE_EVENT.XMQP_MESSAGE, onXmqpMessage)
@registerEvent(game, 'fini')
@overrideMethod(BattleReplay, 'stop')
# play
| 34.02459 | 115 | 0.607564 | """ XVM (c) www.modxvm.com 2013-2017 """
#####################################################################
# imports
import simplejson
import traceback
import BigWorld
import game
from Avatar import PlayerAvatar
from BattleReplay import BattleReplay, g_replayCtrl
from PlayerEvents import g_playerEvents
from gui.shared import g_eventBus, events
from xfw import *
import xvm_main.python.config as config
from xvm_main.python.logger import *
import xvm_main.python.minimap_circles as minimap_circles
import xvm_main.python.utils as utils
from consts import *
#####################################################################
# handlers
_xvm_record_data = None
_xvm_play_data = None
@registerEvent(PlayerAvatar, 'onBecomePlayer')
def _PlayerAvatar_onBecomePlayer(self):
try:
if not g_replayCtrl.isPlaying:
global _xvm_record_data
_xvm_record_data = {
'ver': '1.0',
'global': {
'minimap_circles': minimap_circles.getMinimapCirclesData()
},
'timing': []
}
else:
#log('play: ' + str(fileName))
xvm_data = simplejson.loads(g_replayCtrl._BattleReplay__replayCtrl.getArenaInfoStr()).get('xvm', None)
if xvm_data:
xvm_data = unicode_to_ascii(xvm_data)
if xvm_data.get('ver', None) == '1.0':
minimap_circles.setMinimapCirclesData(xvm_data['global']['minimap_circles'])
global _xvm_play_data
_xvm_play_data = {
'timing': xvm_data['timing'],
'value': None,
'period': -1
}
g_playerEvents.onArenaPeriodChange += onArenaPeriodChange
next_data_timing()
except Exception as ex:
err(traceback.format_exc())
# record
def onXmqpMessage(e):
try:
if g_replayCtrl.isRecording:
global _xvm_record_data
if _xvm_record_data:
period = g_replayCtrl._BattleReplay__arenaPeriod
_xvm_record_data['timing'].append({
'p': period,
't': float("{0:.3f}".format(g_replayCtrl.currentTime)),
'm': 'XMQP',
'd': e.ctx
})
except Exception as ex:
err(traceback.format_exc())
g_eventBus.addListener(XVM_BATTLE_EVENT.XMQP_MESSAGE, onXmqpMessage)
@registerEvent(game, 'fini')
def fini():
g_eventBus.removeListener(XVM_BATTLE_EVENT.XMQP_MESSAGE, onXmqpMessage)
@overrideMethod(BattleReplay, 'stop')
def _BattleReplay_stop(base, self, rewindToTime = None, delete = False):
try:
if self.isRecording:
global _xvm_record_data
if _xvm_record_data:
arenaInfo = simplejson.loads(self._BattleReplay__replayCtrl.getArenaInfoStr())
arenaInfo.update({"xvm":utils.pretty_floats(_xvm_record_data)})
self._BattleReplay__replayCtrl.setArenaInfoStr(simplejson.dumps(arenaInfo))
_xvm_record_data = None
except Exception as ex:
err(traceback.format_exc())
return base(self, rewindToTime, delete)
# play
def onArenaPeriodChange(period, periodEndTime, periodLength, periodAdditionalInfo):
global _xvm_play_data
_xvm_play_data['period'] = period
next_data_timing()
def next_data_timing():
global _xvm_play_data
if _xvm_play_data['value']:
if _xvm_play_data['value']['m'] == 'XMQP':
g_eventBus.handleEvent(events.HasCtxEvent(XVM_BATTLE_EVENT.XMQP_MESSAGE, _xvm_play_data['value']['d']))
_xvm_play_data['value'] = None
if _xvm_play_data['timing']:
if _xvm_play_data['period'] < _xvm_play_data['timing'][0]['p']:
return
_xvm_play_data['value'] = _xvm_play_data['timing'].pop(0)
if _xvm_play_data['period'] > _xvm_play_data['value']['p']:
BigWorld.callback(0, next_data_timing)
else:
BigWorld.callback(_xvm_play_data['value']['t'] - g_replayCtrl.currentTime, next_data_timing)
| 3,112 | 0 | 135 |
dbc0023dae7400b46fc4a3f80a594c5bdf65519c | 3,184 | py | Python | pretrain/pri3d/common/solver.py | kudo1026/Pri3D | 8bf8a3ec4393db3da6c0662ff49d5788ea188e20 | [
"MIT"
] | 103 | 2021-07-06T17:05:03.000Z | 2022-03-30T06:10:04.000Z | pretrain/pri3d/common/solver.py | kudo1026/Pri3D | 8bf8a3ec4393db3da6c0662ff49d5788ea188e20 | [
"MIT"
] | 5 | 2021-10-13T14:35:32.000Z | 2022-03-31T23:40:44.000Z | pretrain/pri3d/common/solver.py | kudo1026/Pri3D | 8bf8a3ec4393db3da6c0662ff49d5788ea188e20 | [
"MIT"
] | 3 | 2021-09-24T09:00:44.000Z | 2021-10-14T19:17:01.000Z | import logging
from torch.optim import SGD, Adam
from torch.optim.lr_scheduler import LambdaLR, StepLR, MultiStepLR
from torch import nn
class PolyLR(LambdaLR):
"""DeepLab learning rate policy"""
| 35.377778 | 106 | 0.65201 | import logging
from torch.optim import SGD, Adam
from torch.optim.lr_scheduler import LambdaLR, StepLR, MultiStepLR
from torch import nn
def initialize_optimizer(params, config):
if config.optimizer.optimizer == 'SGD':
return SGD(params, lr=config.optimizer.lr,
momentum=config.optimizer.sgd_momentum,
dampening=config.optimizer.sgd_dampening,
weight_decay=config.optimizer.weight_decay)
elif config.optimizer.optimizer == 'Adam':
return Adam(params, lr=config.optimizer.lr,
betas=(config.optimizer.adam_beta1, config.optimizer.adam_beta2),
weight_decay=config.optimizer.weight_decay)
else:
logging.error('Optimizer type not supported')
raise ValueError('Optimizer type not supported')
class PolyLR(LambdaLR):
"""DeepLab learning rate policy"""
def __init__(self, optimizer, max_iter, power=0.9, last_epoch=-1):
super(PolyLR, self).__init__(optimizer, lambda s: (1 - s / (max_iter + 1))**power, last_epoch)
def initialize_scheduler(optimizer, config, epoch_size, last_epoch=-1):
if config.scheduler.scheduler == 'StepLR':
return StepLR(optimizer,
step_size=config.scheduler.decay_epochs*epoch_size,
gamma=config.scheduler.lr_decay,
last_epoch=last_epoch)
elif config.scheduler.scheduler == 'MultiStepLR':
return MultiStepLR(optimizer,
milestones=[epoch*epoch_size for epoch in config.scheduler.decay_epochs],
gamma=config.scheduler.lr_decay,
last_epoch=last_epoch)
elif config.scheduler.scheduler == 'PolyLR':
return PolyLR(optimizer,
max_iter=config.scheduler.max_epochs*epoch_size,
power=config.scheduler.poly_power,
last_epoch=last_epoch)
else:
logging.error('Scheduler not supported')
def set_bn_momentum_default(bn_momentum):
#TODO Debug if .parameters() needed for model
def fn(m):
if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):
m.momentum = bn_momentum
return fn
class BNMomentumScheduler(object):
def __init__(
self, model, bn_lambda, last_epoch=-1,
setter=set_bn_momentum_default
):
if not isinstance(model, nn.Module):
raise RuntimeError(
"Class '{}' is not a PyTorch nn Module".format(
type(model).__name__
)
)
self.model = model
self.setter = setter
self.lmbd = bn_lambda
self.step(last_epoch + 1)
self.last_epoch = last_epoch
def step(self, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch
self.model.apply(self.setter(self.lmbd(epoch)))
def initialize_bnm_scheduler(model):
BN_MOMENTUM_INIT = 0.5
BN_MOMENTUM_MAX = 0.001
BN_DECAY_STEP = 20
BN_DECAY_RATE = 0.5
bn_lbmd = lambda it: max(BN_MOMENTUM_INIT * BN_DECAY_RATE**(int(it / BN_DECAY_STEP)), BN_MOMENTUM_MAX)
return BNMomentumScheduler(model, bn_lambda=bn_lbmd, last_epoch=-1)
| 2,774 | 13 | 193 |
e2546d8c561e8bcc5bfeb467a3ebf005076c7ed6 | 1,410 | py | Python | 2020/day17.py | tobeannouncd/AdventOfCode | b4fe6e9b10a0dc191429a90ab351806df03bca10 | [
"MIT"
] | null | null | null | 2020/day17.py | tobeannouncd/AdventOfCode | b4fe6e9b10a0dc191429a90ab351806df03bca10 | [
"MIT"
] | null | null | null | 2020/day17.py | tobeannouncd/AdventOfCode | b4fe6e9b10a0dc191429a90ab351806df03bca10 | [
"MIT"
] | null | null | null | from collections import Counter
from itertools import product
import numpy as np
import advent
if __name__ == '__main__':
main()
| 26.111111 | 91 | 0.573759 | from collections import Counter
from itertools import product
import numpy as np
import advent
def main():
data = advent.get_input(2020, 17)
print(solve(data, part=1))
print(solve(data, part=2))
def solve(data, part: int):
grid = np.array([[list(l) for l in data.strip().splitlines()]])
if part == 2:
grid = grid.reshape(grid.shape + (1,))
for _ in range(6):
grid = evolve(grid)
return np.count_nonzero(grid == '#')
def pad(grid: np.ndarray) -> np.ndarray:
n_dims = len(grid.shape)
return np.pad(grid, tuple((1, 1) for _ in range(n_dims)), constant_values='.')
def evolve(grid: np.ndarray) -> np.ndarray:
grid = pad(grid)
new = grid.copy()
for pos in product(*(range(s) for s in grid.shape)):
val = grid[pos]
c = Counter(neighbors(grid, pos))
if val == '#' and c['#'] not in (2, 3):
new[pos] = '.'
elif val == '.' and c['#'] == 3:
new[pos] = '#'
return new
def neighbors(grid: np.ndarray, pos):
for pos_change in product(range(-1, 2), repeat=len(pos)):
if all(coord == 0 for coord in pos_change):
continue
new_pos = [c+d for c, d in zip(pos, pos_change)]
if any(c < 0 for c in new_pos) or any(c == s for c, s in zip(new_pos, grid.shape)):
continue
yield grid.item(*new_pos)
if __name__ == '__main__':
main()
| 1,154 | 0 | 115 |
5457809fc46eebc92588721ee5a6c91bd2068792 | 3,533 | py | Python | mypackage/_mypyromodule.py | camlab-bioml/scvi-tools-skeleton | 2819e20bba9b7db9301f0d8bab33381093d6edc1 | [
"BSD-3-Clause"
] | 8 | 2021-02-16T10:21:59.000Z | 2022-01-14T10:48:37.000Z | mypackage/_mypyromodule.py | camlab-bioml/scvi-tools-skeleton | 2819e20bba9b7db9301f0d8bab33381093d6edc1 | [
"BSD-3-Clause"
] | 10 | 2021-01-26T00:01:47.000Z | 2022-03-01T03:39:05.000Z | mypackage/_mypyromodule.py | camlab-bioml/scvi-tools-skeleton | 2819e20bba9b7db9301f0d8bab33381093d6edc1 | [
"BSD-3-Clause"
] | 4 | 2021-05-17T17:27:33.000Z | 2022-01-26T16:19:22.000Z | import pyro
import pyro.distributions as dist
import torch
from scvi import _CONSTANTS
from scvi.module.base import PyroBaseModuleClass, auto_move_data
from scvi.nn import DecoderSCVI, Encoder
class MyPyroModule(PyroBaseModuleClass):
"""
Skeleton Variational auto-encoder Pyro model.
Here we implement a basic version of scVI's underlying VAE [Lopez18]_.
This implementation is for instructional purposes only.
Parameters
----------
n_input
Number of input genes
n_latent
Dimensionality of the latent space
n_hidden
Number of nodes per hidden layer
n_layers
Number of hidden layers used for encoder and decoder NNs
"""
@staticmethod
@torch.no_grad()
@auto_move_data
| 36.42268 | 90 | 0.613926 | import pyro
import pyro.distributions as dist
import torch
from scvi import _CONSTANTS
from scvi.module.base import PyroBaseModuleClass, auto_move_data
from scvi.nn import DecoderSCVI, Encoder
class MyPyroModule(PyroBaseModuleClass):
"""
Skeleton Variational auto-encoder Pyro model.
Here we implement a basic version of scVI's underlying VAE [Lopez18]_.
This implementation is for instructional purposes only.
Parameters
----------
n_input
Number of input genes
n_latent
Dimensionality of the latent space
n_hidden
Number of nodes per hidden layer
n_layers
Number of hidden layers used for encoder and decoder NNs
"""
def __init__(self, n_input: int, n_latent: int, n_hidden: int, n_layers: int):
super().__init__()
self.n_input = n_input
self.n_latent = n_latent
self.epsilon = 5.0e-3
# z encoder goes from the n_input-dimensional data to an n_latent-d
# latent space representation
self.encoder = Encoder(
n_input,
n_latent,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=0.1,
)
# decoder goes from n_latent-dimensional space to n_input-d data
self.decoder = DecoderSCVI(
n_latent,
n_input,
n_layers=n_layers,
n_hidden=n_hidden,
)
# This gene-level parameter modulates the variance of the observation distribution
self.px_r = torch.nn.Parameter(torch.ones(self.n_input))
@staticmethod
def _get_fn_args_from_batch(tensor_dict):
x = tensor_dict[_CONSTANTS.X_KEY]
log_library = torch.log(torch.sum(x, dim=1, keepdim=True) + 1e-6)
return (x, log_library), {}
def model(self, x, log_library):
# register PyTorch module `decoder` with Pyro
pyro.module("scvi", self)
with pyro.plate("data", x.shape[0]):
# setup hyperparameters for prior p(z)
z_loc = x.new_zeros(torch.Size((x.shape[0], self.n_latent)))
z_scale = x.new_ones(torch.Size((x.shape[0], self.n_latent)))
# sample from prior (value will be sampled by guide when computing the ELBO)
z = pyro.sample("latent", dist.Normal(z_loc, z_scale).to_event(1))
# decode the latent code z
px_scale, _, px_rate, px_dropout = self.decoder("gene", z, log_library)
# build count distribution
nb_logits = (px_rate + self.epsilon).log() - (
self.px_r.exp() + self.epsilon
).log()
x_dist = dist.ZeroInflatedNegativeBinomial(
gate_logits=px_dropout, total_count=self.px_r.exp(), logits=nb_logits
)
# score against actual counts
pyro.sample("obs", x_dist.to_event(1), obs=x)
def guide(self, x, log_library):
# define the guide (i.e. variational distribution) q(z|x)
pyro.module("scvi", self)
with pyro.plate("data", x.shape[0]):
# use the encoder to get the parameters used to define q(z|x)
x_ = torch.log(1 + x)
z_loc, z_scale, _ = self.encoder(x_)
# sample the latent code z
pyro.sample("latent", dist.Normal(z_loc, z_scale).to_event(1))
@torch.no_grad()
@auto_move_data
def get_latent(self, tensors):
x = tensors[_CONSTANTS.X_KEY]
x_ = torch.log(1 + x)
z_loc, _, _ = self.encoder(x_)
return z_loc
| 2,640 | 0 | 133 |
9b203be1cdf9d808ea319102ec9e1767a80807ba | 6,471 | py | Python | examples/example_compute_virtualmachine.py | zikalino/AzurePythonExamples | 23f9c173f0736f4e7ff66dde0402ef88da4ccc8f | [
"MIT"
] | 1 | 2020-09-04T14:38:13.000Z | 2020-09-04T14:38:13.000Z | examples/example_compute_virtualmachine.py | zikalino/AzurePythonExamples | 23f9c173f0736f4e7ff66dde0402ef88da4ccc8f | [
"MIT"
] | null | null | null | examples/example_compute_virtualmachine.py | zikalino/AzurePythonExamples | 23f9c173f0736f4e7ff66dde0402ef88da4ccc8f | [
"MIT"
] | null | null | null | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import os
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.resource import ResourceManagementClient
from azure.common.credentials import ServicePrincipalCredentials
#--------------------------------------------------------------------------
# credentials from environment
#--------------------------------------------------------------------------
SUBSCRIPTION_ID = os.environ['AZURE_SUBSCRIPTION_ID']
TENANT_ID = os.environ['AZURE_TENANT']
CLIENT_ID = os.environ['AZURE_CLIENT_ID']
CLIENT_SECRET = os.environ['AZURE_SECRET']
#--------------------------------------------------------------------------
# variables
#--------------------------------------------------------------------------
AZURE_LOCATION = 'eastus'
RESOURCE_GROUP = "myResourceGroup"
VM_NAME = "myVm"
NETWORK_INTERFACE_NAME = "myNetworkInterface"
VIRTUAL_NETWORK_NAME = "myVirtualNetwork"
SUBNET_NAME = "mySubnet"
#--------------------------------------------------------------------------
# management clients
#--------------------------------------------------------------------------
credentials = ServicePrincipalCredentials(
client_id=CLIENT_ID,
secret=CLIENT_SECRET,
tenant=TENANT_ID
)
mgmt_client = ComputeManagementClient(credentials, SUBSCRIPTION_ID)
resource_client = ResourceManagementClient(credentials, SUBSCRIPTION_ID)
from azure.mgmt.network import NetworkManagementClient
network_client = NetworkManagementClient(credentials, SUBSCRIPTION_ID)
#--------------------------------------------------------------------------
# resource group (prerequisite)
#--------------------------------------------------------------------------
print("Creating Resource Group")
resource_client.resource_groups.create_or_update(resource_group_name=RESOURCE_GROUP, parameters={ 'location': AZURE_LOCATION })
#--------------------------------------------------------------------------
# virtual network (prerequisite)
#--------------------------------------------------------------------------
print("Prerequisite - Creating Virtual Network")
azure_operation_poller = network_client.virtual_networks.create_or_update(
RESOURCE_GROUP,
VIRTUAL_NETWORK_NAME,
{
'location': AZURE_LOCATION,
'address_space': {
'address_prefixes': ['10.0.0.0/16']
}
},
)
result_create = azure_operation_poller.result()
async_subnet_creation = network_client.subnets.create_or_update(
RESOURCE_GROUP,
VIRTUAL_NETWORK_NAME,
SUBNET_NAME,
{'address_prefix': '10.0.0.0/24'}
)
subnet_info = async_subnet_creation.result()
#--------------------------------------------------------------------------
# network interface (prerequisite)
#--------------------------------------------------------------------------
print("Prerequisite - Creating Network Interface")
async_nic_creation = network_client.network_interfaces.create_or_update(
RESOURCE_GROUP,
NETWORK_INTERFACE_NAME,
{
'location': AZURE_LOCATION,
'ip_configurations': [{
'name': 'MyIpConfig',
'subnet': {
'id': subnet_info.id
}
}]
}
)
nic_info = async_nic_creation.result()
#--------------------------------------------------------------------------
# /VirtualMachines/put/Create a vm with password authentication.[put]
#--------------------------------------------------------------------------
print("Create a vm with password authentication.")
BODY = {
"location": AZURE_LOCATION,
"hardware_profile": {
"vm_size": "Standard_D1_v2"
},
"storage_profile": {
"image_reference": {
"sku": "2016-Datacenter",
"publisher": "MicrosoftWindowsServer",
"version": "latest",
"offer": "WindowsServer"
},
"os_disk": {
"caching": "ReadWrite",
"managed_disk": {
"storage_account_type": "Standard_LRS"
},
"name": "myVMosdisk",
"create_option": "FromImage"
}
},
"os_profile": {
"admin_username": "myuser",
"computer_name": "myVM",
"admin_password": "Password123!!!"
},
"network_profile": {
"network_interfaces": [
{
"id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/networkInterfaces/" + NETWORK_INTERFACE_NAME,
"properties": {
"primary": True
}
}
]
}
}
result = mgmt_client.virtual_machines.create_or_update(resource_group_name=RESOURCE_GROUP, vm_name=VM_NAME, parameters=BODY)
result = result.result()
#--------------------------------------------------------------------------
# /VirtualMachines/get/Get Virtual Machine Instance View.[get]
#--------------------------------------------------------------------------
print("Get Virtual Machine Instance View.")
result = mgmt_client.virtual_machines.instance_view(resource_group_name=RESOURCE_GROUP, vm_name=VM_NAME)
#--------------------------------------------------------------------------
# /VirtualMachines/get/Lists all available virtual machine sizes to which the specified virtual machine can be resized[get]
#--------------------------------------------------------------------------
print("Lists all available virtual machine sizes to which the specified virtual machine can be resized")
result = mgmt_client.virtual_machines.list_available_sizes(resource_group_name=RESOURCE_GROUP, vm_name=VM_NAME)
#--------------------------------------------------------------------------
# /VirtualMachines/get/Get a Virtual Machine.[get]
#--------------------------------------------------------------------------
print("Get a Virtual Machine.")
result = mgmt_client.virtual_machines.get(resource_group_name=RESOURCE_GROUP, vm_name=VM_NAME)
#--------------------------------------------------------------------------
# /VirtualMachines/get/Lists all the virtual machines under the specified subscription for the specified location.[get]
#--------------------------------------------------------------------------
print("Lists all the virtual machines under the specified subscription for the specified location.")
result = mgmt_client.virtual_machines.list_by_location(location=AZURE_LOCATION)
| 38.064706 | 165 | 0.532066 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import os
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.resource import ResourceManagementClient
from azure.common.credentials import ServicePrincipalCredentials
#--------------------------------------------------------------------------
# credentials from environment
#--------------------------------------------------------------------------
SUBSCRIPTION_ID = os.environ['AZURE_SUBSCRIPTION_ID']
TENANT_ID = os.environ['AZURE_TENANT']
CLIENT_ID = os.environ['AZURE_CLIENT_ID']
CLIENT_SECRET = os.environ['AZURE_SECRET']
#--------------------------------------------------------------------------
# variables
#--------------------------------------------------------------------------
AZURE_LOCATION = 'eastus'
RESOURCE_GROUP = "myResourceGroup"
VM_NAME = "myVm"
NETWORK_INTERFACE_NAME = "myNetworkInterface"
VIRTUAL_NETWORK_NAME = "myVirtualNetwork"
SUBNET_NAME = "mySubnet"
#--------------------------------------------------------------------------
# management clients
#--------------------------------------------------------------------------
credentials = ServicePrincipalCredentials(
client_id=CLIENT_ID,
secret=CLIENT_SECRET,
tenant=TENANT_ID
)
mgmt_client = ComputeManagementClient(credentials, SUBSCRIPTION_ID)
resource_client = ResourceManagementClient(credentials, SUBSCRIPTION_ID)
from azure.mgmt.network import NetworkManagementClient
network_client = NetworkManagementClient(credentials, SUBSCRIPTION_ID)
#--------------------------------------------------------------------------
# resource group (prerequisite)
#--------------------------------------------------------------------------
print("Creating Resource Group")
resource_client.resource_groups.create_or_update(resource_group_name=RESOURCE_GROUP, parameters={ 'location': AZURE_LOCATION })
#--------------------------------------------------------------------------
# virtual network (prerequisite)
#--------------------------------------------------------------------------
print("Prerequisite - Creating Virtual Network")
azure_operation_poller = network_client.virtual_networks.create_or_update(
RESOURCE_GROUP,
VIRTUAL_NETWORK_NAME,
{
'location': AZURE_LOCATION,
'address_space': {
'address_prefixes': ['10.0.0.0/16']
}
},
)
result_create = azure_operation_poller.result()
async_subnet_creation = network_client.subnets.create_or_update(
RESOURCE_GROUP,
VIRTUAL_NETWORK_NAME,
SUBNET_NAME,
{'address_prefix': '10.0.0.0/24'}
)
subnet_info = async_subnet_creation.result()
#--------------------------------------------------------------------------
# network interface (prerequisite)
#--------------------------------------------------------------------------
print("Prerequisite - Creating Network Interface")
async_nic_creation = network_client.network_interfaces.create_or_update(
RESOURCE_GROUP,
NETWORK_INTERFACE_NAME,
{
'location': AZURE_LOCATION,
'ip_configurations': [{
'name': 'MyIpConfig',
'subnet': {
'id': subnet_info.id
}
}]
}
)
nic_info = async_nic_creation.result()
#--------------------------------------------------------------------------
# /VirtualMachines/put/Create a vm with password authentication.[put]
#--------------------------------------------------------------------------
print("Create a vm with password authentication.")
BODY = {
"location": AZURE_LOCATION,
"hardware_profile": {
"vm_size": "Standard_D1_v2"
},
"storage_profile": {
"image_reference": {
"sku": "2016-Datacenter",
"publisher": "MicrosoftWindowsServer",
"version": "latest",
"offer": "WindowsServer"
},
"os_disk": {
"caching": "ReadWrite",
"managed_disk": {
"storage_account_type": "Standard_LRS"
},
"name": "myVMosdisk",
"create_option": "FromImage"
}
},
"os_profile": {
"admin_username": "myuser",
"computer_name": "myVM",
"admin_password": "Password123!!!"
},
"network_profile": {
"network_interfaces": [
{
"id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/networkInterfaces/" + NETWORK_INTERFACE_NAME,
"properties": {
"primary": True
}
}
]
}
}
result = mgmt_client.virtual_machines.create_or_update(resource_group_name=RESOURCE_GROUP, vm_name=VM_NAME, parameters=BODY)
result = result.result()
#--------------------------------------------------------------------------
# /VirtualMachines/get/Get Virtual Machine Instance View.[get]
#--------------------------------------------------------------------------
print("Get Virtual Machine Instance View.")
result = mgmt_client.virtual_machines.instance_view(resource_group_name=RESOURCE_GROUP, vm_name=VM_NAME)
#--------------------------------------------------------------------------
# /VirtualMachines/get/Lists all available virtual machine sizes to which the specified virtual machine can be resized[get]
#--------------------------------------------------------------------------
print("Lists all available virtual machine sizes to which the specified virtual machine can be resized")
result = mgmt_client.virtual_machines.list_available_sizes(resource_group_name=RESOURCE_GROUP, vm_name=VM_NAME)
#--------------------------------------------------------------------------
# /VirtualMachines/get/Get a Virtual Machine.[get]
#--------------------------------------------------------------------------
print("Get a Virtual Machine.")
result = mgmt_client.virtual_machines.get(resource_group_name=RESOURCE_GROUP, vm_name=VM_NAME)
#--------------------------------------------------------------------------
# /VirtualMachines/get/Lists all the virtual machines under the specified subscription for the specified location.[get]
#--------------------------------------------------------------------------
print("Lists all the virtual machines under the specified subscription for the specified location.")
result = mgmt_client.virtual_machines.list_by_location(location=AZURE_LOCATION)
| 0 | 0 | 0 |
b7b05e24047747d5379002c2e983d449318e3256 | 1,692 | py | Python | showcase/migrations/0007_auto_20190112_1627.py | aseufert/sporttechiq | 90812142bedf63fed9d1e5f3b246b78299aa45f7 | [
"MIT"
] | null | null | null | showcase/migrations/0007_auto_20190112_1627.py | aseufert/sporttechiq | 90812142bedf63fed9d1e5f3b246b78299aa45f7 | [
"MIT"
] | null | null | null | showcase/migrations/0007_auto_20190112_1627.py | aseufert/sporttechiq | 90812142bedf63fed9d1e5f3b246b78299aa45f7 | [
"MIT"
] | null | null | null | # Generated by Django 2.0 on 2019-01-12 16:27
from django.db import migrations, models
import showcase.file_size_validator
| 37.6 | 134 | 0.634752 | # Generated by Django 2.0 on 2019-01-12 16:27
from django.db import migrations, models
import showcase.file_size_validator
class Migration(migrations.Migration):
dependencies = [
('showcase', '0006_auto_20190110_0159'),
]
operations = [
migrations.AddField(
model_name='station',
name='scorecard_name',
field=models.CharField(blank=True, help_text='Name matching scorecard field', max_length=100),
),
migrations.AlterField(
model_name='station',
name='animation',
field=models.FileField(blank=True, null=True, upload_to='stations', validators=[showcase.file_size_validator.file_size]),
),
migrations.AlterField(
model_name='station',
name='diagram',
field=models.FileField(blank=True, null=True, upload_to='stations', validators=[showcase.file_size_validator.file_size]),
),
migrations.AlterField(
model_name='station',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='stations', validators=[showcase.file_size_validator.file_size]),
),
migrations.AlterField(
model_name='station',
name='scorecard_diagram',
field=models.FileField(blank=True, null=True, upload_to='stations', validators=[showcase.file_size_validator.file_size]),
),
migrations.AlterField(
model_name='station',
name='webm_animation',
field=models.FileField(blank=True, null=True, upload_to='stations', validators=[showcase.file_size_validator.file_size]),
),
]
| 0 | 1,544 | 23 |
f83dea9758f213b0e8e2e9abfa0e77a70c0d6852 | 620 | py | Python | analyzer/migrations/0013_auto_20191118_1544.py | elvinaqa/Amazon-Review-Analyzer-Summarizer-Python-NLP-ML- | 6c70e84ffbcb8c8fd65a7fe0847e1f0eb779f759 | [
"Unlicense"
] | 1 | 2020-09-10T11:26:05.000Z | 2020-09-10T11:26:05.000Z | analyzer/migrations/0013_auto_20191118_1544.py | elvinaqa/Amazon-Review-Analyzer-Summarizer-Python-NLP-ML- | 6c70e84ffbcb8c8fd65a7fe0847e1f0eb779f759 | [
"Unlicense"
] | null | null | null | analyzer/migrations/0013_auto_20191118_1544.py | elvinaqa/Amazon-Review-Analyzer-Summarizer-Python-NLP-ML- | 6c70e84ffbcb8c8fd65a7fe0847e1f0eb779f759 | [
"Unlicense"
] | null | null | null | # Generated by Django 2.2.6 on 2019-11-18 11:44
import datetime
from django.db import migrations, models
| 24.8 | 123 | 0.593548 | # Generated by Django 2.2.6 on 2019-11-18 11:44
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('analyzer', '0012_auto_20191118_1543'),
]
operations = [
migrations.RenameField(
model_name='product',
old_name='user',
new_name='p_user',
),
migrations.AlterField(
model_name='product',
name='analyze_date',
field=models.DateField(default='1990-10-10', verbose_name=datetime.datetime(2019, 11, 18, 15, 44, 36, 125683)),
),
]
| 0 | 490 | 23 |
1fc8ea8d797d815dbafa96ffd0013142703a6de9 | 50,533 | py | Python | app/libs/opasDocPermissions.py | Psychoanalytic-Electronic-Publishing/OpenPubArchive-Content-Server | 031b79b8e0dd5e1c22e2a51394cab846763a451a | [
"Apache-2.0"
] | null | null | null | app/libs/opasDocPermissions.py | Psychoanalytic-Electronic-Publishing/OpenPubArchive-Content-Server | 031b79b8e0dd5e1c22e2a51394cab846763a451a | [
"Apache-2.0"
] | 115 | 2020-09-02T20:01:26.000Z | 2022-03-30T11:47:23.000Z | app/libs/opasDocPermissions.py | Psychoanalytic-Electronic-Publishing/OpenPubArchive-Content-Server | 031b79b8e0dd5e1c22e2a51394cab846763a451a | [
"Apache-2.0"
] | 2 | 2020-10-15T13:52:10.000Z | 2020-10-20T13:42:51.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
import datetime
import time
import opasConfig
import models
import logging
import localsecrets
# import urllib.parse
# import json
import sys
# from opasAPISupportLib import save_opas_session_cookie
sys.path.append("..") # Adds higher directory to python modules path.
from config.opasConfig import OPASSESSIONID
logger = logging.getLogger(__name__)
# for this module
# logger.setLevel(logging.DEBUG)
if 0:
# create console handler and set level to debug
ch = logging.StreamHandler()
# create formatter
formatter = logging.Formatter(opasConfig.FORMAT)
# add formatter to ch
ch.setFormatter(formatter)
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
from starlette.responses import Response
from starlette.requests import Request
import starlette.status as httpCodes
# import localsecrets
from localsecrets import PADS_BASE_URL, PADS_TEST_ID, PADS_TEST_PW, PADS_BASED_CLIENT_IDS
base = PADS_BASE_URL
# base = "http://development.org:9300"
import opasCentralDBLib
ocd = opasCentralDBLib.opasCentralDB()
def find_client_session_id(request: Request,
response: Response,
client_session: str=None
):
"""
ALWAYS returns a session ID or None
Dependency for client_session id:
gets it from header;
if not there, gets it from query param;
if not there, gets it from a cookie
Otherwise, gets a new one from the auth server
"""
ret_val = None
if client_session is None or client_session == 'None':
client_session = request.headers.get(opasConfig.CLIENTSESSIONID, None)
if client_session is not None:
ret_val = client_session
#msg = f"client-session from header: {ret_val} "
#logger.debug(msg)
else:
#Won't work unless they expose cookie to client, so don't waste time
#pepweb_session_cookie = request.cookies.get("pepweb_session", None)
opas_session_cookie = request.cookies.get(opasConfig.OPASSESSIONID, None)
client_session_qparam = request.query_params.get(opasConfig.CLIENTSESSIONID, None)
client_session_cookie = request.cookies.get(opasConfig.CLIENTSESSIONID, None)
if client_session_qparam is not None:
ret_val = client_session_qparam
msg = f"client-session from param: {ret_val}. URL: {request.url}"
logger.info(msg)
elif client_session_cookie is not None:
ret_val = client_session_cookie
msg = f"client-session from client-session cookie: {ret_val}. URL: {request.url}"
logger.info(msg)
elif opas_session_cookie is not None and opas_session_cookie != 'None':
msg = f"client-session from stored OPASSESSION cookie {opas_session_cookie}. URL: {request.url} "
logger.info(msg)
ret_val = opas_session_cookie
else:
msg = f"No dependency client-session ID found. Returning None. URL: {request.url}"
logger.info(msg)
ret_val = None
if ret_val is not None and opas_session_cookie is not None and opas_session_cookie != ret_val:
# overwrite any saved cookie, if there is one
logger.debug("Saved OpasSessionID Cookie")
response.set_cookie(
OPASSESSIONID,
value=f"{client_session}",
domain=localsecrets.COOKIE_DOMAIN
)
return ret_val
def get_user_ip(request: Request):
"""
Returns a users IP if passed in the headers.
"""
ret_val = None
if request is not None:
ret_val = request.headers.get(opasConfig.X_FORWARDED_FOR, None)
if ret_val is not None:
try:
req_url = request.url
msg = f"X-Forwarded-For from header: {ret_val}. URL: {req_url}"
logger.debug(msg)
except Exception as e:
logger.error(f"Error: {e}")
return ret_val
def get_authserver_session_info(session_id,
client_id=opasConfig.NO_CLIENT_ID,
pads_session_info=None,
request=None):
"""
Return a filled-in SessionInfo object from several PaDS calls
Saves the session information to the SQL database (or updates it)
>>> session_info = get_authserver_session_info(None, "4")
>>> session_info.username == "NotLoggedIn"
True
>>> pads_session_info = pads_login()
>>> session_id = pads_session_info.SessionId
>>> session_info = get_authserver_session_info(session_id, "4", pads_session_info=pads_session_info)
>>> session_info.authorized_peparchive == True
True
>>> session_info = get_authserver_session_info("7F481226-9AF1-47BC-8E26-F07DB8C3E78D", "4")
>>> print (session_info)
session_id='7F481226-9AF1-47BC-8E26-F07DB8C3E78D' user_id=0 username='NotLoggedIn' ...
>>> session_info.username == "NotLoggedIn"
True
"""
ts = time.time()
caller_name = "get_authserver_session_info"
#make sure it's ok, this is causing problems on production
#see if it's an int?
client_id = validate_client_id(client_id, caller_name=caller_name)
if pads_session_info is None or session_id is None:
# not supplied, so fetch
try:
logger.debug(f"{caller_name}: calling PaDS")
pads_session_info = get_pads_session_info(session_id=session_id,
client_id=client_id,
retry=False,
request=request)
try:
session_info = models.SessionInfo(session_id=pads_session_info.SessionId, api_client_id=client_id)
except Exception as e:
msg = f"{caller_name}: Error {e}. SessID: {session_id} client_id: {client_id} req: {request}"
if opasConfig.LOCAL_TRACE:
print (msg)
logger.error(msg)
session_info = models.SessionInfo(session_id="unknown", api_client_id=client_id)
else:
session_id = session_info.session_id
except Exception as e:
logger.error(f"{caller_name}: Error getting pads_session_info {e}")
client_id_type = type(client_id)
if client_id_type == int:
session_info = models.SessionInfo(session_id="unknown", api_client_id=client_id)
else:
session_info = models.SessionInfo(session_id="unknown", api_client_id=opasConfig.NO_CLIENT_ID)
#else:
#session_info = models.SessionInfo(session_id=session_id, api_client_id=client_id)
# This section is causing errors--I believe it's because PaDS is calling the API without real user info
if pads_session_info is not None:
if pads_session_info.SessionId is not None:
session_info = models.SessionInfo(session_id=pads_session_info.SessionId, api_client_id=client_id)
else:
session_info = models.SessionInfo(session_id=session_id, api_client_id=client_id)
start_time = pads_session_info.session_start_time if pads_session_info.session_start_time is not None else datetime.datetime.now()
try:
session_info.has_subscription = pads_session_info.HasSubscription
except Exception as e:
logger.error(f"{caller_name}: HasSubscription not supplied by PaDS")
session_info.has_subscription = False
try:
session_info.is_valid_login = pads_session_info.IsValidLogon
session_info.authenticated = pads_session_info.IsValidLogon
except Exception as e:
logger.error(f"{caller_name}: IsValidLogon not supplied by PaDS")
session_info.is_valid_login = False
try:
session_info.is_valid_username = pads_session_info.IsValidUserName
except Exception as e:
logger.error(f"{caller_name}: IsValidUsername not supplied by PaDS")
session_info.is_valid_username = False
# session_info.confirmed_unauthenticated = False
session_info.session_start = start_time
session_info.session_expires_time = start_time + datetime.timedelta(seconds=pads_session_info.SessionExpires)
session_info.pads_session_info = pads_session_info
user_logged_in_bool = pads_session_info.IsValidLogon
# either continue an existing session, or start a new one
if request is not None:
if user_logged_in_bool or pads_session_info.IsValidLogon:
pads_user_info, status_code = get_authserver_session_userinfo(session_id, client_id, addl_log_info=" (complete session_record)")
session_info.pads_user_info = pads_user_info
if status_code == 401: # could be just no session_id, but also could have be returned by PaDS if it doesn't recognize it
if session_info.pads_session_info.pads_status_response > 500:
msg = f"{caller_name}: PaDS error or PaDS unavailable - user cannot be logged in and no session_id assigned"
logger.error(msg)
if session_id is not None:
logger.warning(f"{session_id} call to pads produces 401 error. Setting user_logged_in to False")
user_logged_in_bool = False
# session is not logged in
# session_info.confirmed_unauthenticated = True
# these are defaults so commented out
# session_info.authenticated = False
# session_info.user_id = 0
# session_info.username = opasConfig.USER_NOT_LOGGED_IN_NAME
# session_info.user_type = "Unknown"
# session_info.admin = False
# session_info.authorized_peparchive = False
# session_info.authorized_pepcurrent = False
else:
start_time = pads_session_info.session_start_time if pads_session_info.session_start_time is not None else datetime.datetime.now()
if pads_user_info is not None:
session_info.user_id = userID=pads_user_info.UserId
session_info.username = pads_user_info.UserName
session_info.user_type = pads_user_info.UserType
session_info.admin = pads_user_info.UserType=="Admin"
session_info.authorized_peparchive = pads_user_info.HasArchiveAccess
session_info.authorized_pepcurrent = pads_user_info.HasCurrentAccess
logger.debug("PaDS returned user info. Saving to DB")
unused_val = save_session_info_to_db(session_info)
if session_info.user_type is None:
session_info.user_type = "Unknown"
if session_info.username is None:
session_info.username = opasConfig.USER_NOT_LOGGED_IN_NAME
# print (f"SessInfo: {session_info}")
logger.debug(f"***authent: {session_info.authenticated} - get_full_session_info total time: {time.time() - ts}***")
return session_info
def get_authserver_session_userinfo(session_id, client_id, addl_log_info=""):
"""
Send PaDS the session ID and see if that's associated with a user yet.
"""
ret_val = None
caller_name = "get_authserver_session_userinfo"
status_code = 401
msg = f"for session {session_id} from client {client_id}"
#logger.debug(msg)
if session_id is not None:
full_URL = base + f"/v1/Users" + f"?SessionID={session_id}"
try:
response = requests.get(full_URL, headers={"Content-Type":"application/json"}) # Call PaDS
ocd.temp_pads_log_call(caller=caller_name, reason=caller_name + addl_log_info, session_id=session_id, pads_call=full_URL, return_status_code=response.status_code) # Log Call PaDS
except Exception as e:
logger.error(f"{caller_name}: Error from auth server user info call: {e}. Non-logged in user {msg}")
else:
status_code = response.status_code
padsinfo = response.json()
if response.ok:
padsinfo = fix_userinfo_invalid_nones(padsinfo)
ret_val = models.PadsUserInfo(**padsinfo)
else:
logger.debug(f"Non-logged in user {msg}. Info from PaDS: {padsinfo}") # 2021.08.08 back to debug...seems consistent.
return ret_val, status_code # padsinfo, status_code
def authserver_login(username=PADS_TEST_ID,
password=PADS_TEST_PW,
session_id=None,
client_id=opasConfig.NO_CLIENT_ID,
retry=True):
"""
Login directly via the auth server (e.g., in this case PaDS)
If session_id is included, the idea is that the logged in entity will keep that constant.
-- #TODO but that's not implemented in this server itself, if logged in through there, yet!
"""
msg = ""
caller_name = "authserver_login"
logger.info(f"Logging in user {username} with session_id {session_id}")
if session_id is not None:
full_URL = base + f"/v1/Authenticate/?SessionId={session_id}"
else:
full_URL = base + f"/v1/Authenticate/"
try:
pads_response = requests.post(full_URL, headers={"Content-Type":"application/json"}, json={"UserName":f"{username}", "Password":f"{password}"})
ocd.temp_pads_log_call(caller=caller_name, reason=caller_name, session_id=session_id, pads_call=full_URL, return_status_code=pads_response.status_code, params=username) # Log Call PaDS
except Exception as e:
msg = f"{caller_name}: Authorization server not available. {e}"
logger.error(msg)
if opasConfig.LOCAL_TRACE: print (f"****WATCH_THIS****: {msg}")
# set up response with default model
pads_session_info = models.PadsSessionInfo()
if session_id is not None:
pads_session_info.SessionId = session_id
#session_info = models.SessionInfo()
else:
status_code = pads_response.status_code # save it for a bit (we replace pads_session_info below)
if pads_response.ok:
pads_response = pads_response.json()
pads_response = fix_pydantic_invalid_nones(pads_response, caller_name="AuthserverLogin")
if isinstance(pads_response, str):
pads_session_info = models.PadsSessionInfo()
logger.error(f"{caller_name}: returned error string: {pads_response}")
else:
try:
pads_session_info = models.PadsSessionInfo(**pads_response)
except Exception as e:
logger.error(f"{caller_name}: return assignment error: {e}")
pads_session_info = models.PadsSessionInfo()
elif status_code > 403:
if retry == True:
# try once without the session ID
msg = f"{caller_name}: Login returned {status_code}. Trying without session id."
logger.error(msg)
pads_session_info = authserver_login(username=username, password=password, client_id=client_id, retry=False)
else:
msg = f"{caller_name}: Auth System Issue. Login returned {status_code}. Retry (failed), or Retry not selected."
logger.error(msg)
pads_session_info = models.PadsSessionInfo()
pads_session_info.pads_status_response = status_code
pads_session_info.pads_disposition = msg
else:
try:
pads_response = pads_response.json()
pads_response = fix_pydantic_invalid_nones(pads_response)
if isinstance(pads_response, str):
pads_session_info = models.PadsSessionInfo()
msg = f"{caller_name}: Returned error string: {pads_response}"
logger.error(msg)
else:
try:
pads_session_info = models.PadsSessionInfo(**pads_response)
except Exception as e:
msg = f"{caller_name}: Return assignment error: {e}"
logger.error(msg)
pads_session_info = models.PadsSessionInfo()
except Exception as e:
logger.error(f"{caller_name}: Response processing error {e}")
pads_session_info = models.PadsSessionInfo(**pads_session_info)
pads_session_info.pads_status_response = status_code
pads_session_info.pads_disposition = msg
return pads_session_info
def get_access_limitations(doc_id,
classification, # document classification, e.g., free, current, archive, undefined, offsite, toc
session_info, # updated in code below
year=None,
doi=None,
documentListItem: models.DocumentListItem=None, # deprecated, not used
fulltext_request:bool=None,
request=None):
"""
Based on the classification of the document (archive, current [embargoed],
free, offsite), and the users permissions in session_info, determine whether
this user has access to the full-text of the document, and fill out permissions
in accessLimitations (ret_val) structure for document doc_id
20210428 - removed documentListItem and update side effects, caller should copy access
There are still side effects on session_info
"""
caller_name = "get_access_limitations"
try:
open_access = False
ret_val = models.AccessLimitations()
ret_val.doi = doi
ret_val.accessLimitedPubLink = None
ret_val.accessLimitedCode = 200 # default (for now)
# USE THESE DEFAULTS, only set below if different
# default, turned on if classification below is opasConfig.DOCUMENT_ACCESS_EMBARGOED
ret_val.accessLimited = True # no access by default, may be changed below.
ret_val.accessChecked = False # Same as default, for better clarity here
ret_val.accessLimitedClassifiedAsCurrentContent = False
if session_info is None:
# logger.warning(f"Document permissions for {doc_id} -- no session info")
ret_val.accessLimitedCode = 401 # no session
session_id = "No Session Info"
# not logged in
# use all the defaults above, log error below.
else:
# for debugging display at return
try:
session_id = session_info.session_id
except:
session_id = "No Session ID"
if ret_val.doi is not None:
publisherAccess = opasConfig.ACCESS_SUMMARY_PUBLISHER_INFO + opasConfig.ACCESS_SUMMARY_PUBLISHER_INFO_DOI_LINK % ret_val.doi
# TODO: get the link we use to send users to publishers site when we don't have it, and no doi, and implement here.
# for now, just doi
ret_val.accessLimitedPubLink = opasConfig.ACCESS_SUMMARY_PUBLISHER_INFO_DOI_LINK % ret_val.doi
else:
publisherAccess = "."
if classification in (opasConfig.DOCUMENT_ACCESS_FREE):
# free can be for anyone!!!! Change accessLimited
open_access = True
ret_val.accessLimited = False
ret_val.accessChecked = True
ret_val.accessLimitedDescription = opasConfig.ACCESSLIMITED_DESCRIPTION_FREE
#"This content is currently free to all users."
ret_val.accessLimitedReason = opasConfig.ACCESSLIMITED_DESCRIPTION_FREE
elif classification in (opasConfig.DOCUMENT_ACCESS_OFFSITE):
# we only allow reading abstracts for offsite, accessLimited is True
ret_val.accessLimitedDescription = opasConfig.ACCESS_SUMMARY_DESCRIPTION
#"This content is currently completely limited to all users."
ret_val.accessLimitedReason = opasConfig.ACCESSLIMITED_DESCRIPTION_OFFSITE + publisherAccess # limited...get it elsewhere
elif classification in (opasConfig.DOCUMENT_ACCESS_EMBARGOED): # PEPCurrent
ret_val.accessLimitedDescription = opasConfig.ACCESS_SUMMARY_DESCRIPTION
ret_val.accessLimitedClassifiedAsCurrentContent = True
ret_val.accessLimitedReason = opasConfig.ACCESS_SUMMARY_DESCRIPTION + opasConfig.ACCESS_SUMMARY_EMBARGOED + publisherAccess # limited...get it elsewhere
if session_info is not None:
try:
# #########################################################################################
# optimization...if authorized for PEPCurrent, don't check again this query, unless it's a full-text request
# #########################################################################################
if session_info.authorized_pepcurrent:
ret_val.accessLimited = False # you can access it!!!
ret_val.accessChecked = True
# "This current content is available for you to access"
ret_val.accessLimitedReason = opasConfig.ACCESSLIMITED_DESCRIPTION_CURRENT_CONTENT_AVAILABLE
logger.debug("Optimization - session info used to authorize PEPCurrent document")
except Exception as e:
logger.error(f"{caller_name}: PEPCurrent document permission: {e}")
elif classification in (opasConfig.DOCUMENT_ACCESS_ARCHIVE):
ret_val.accessLimitedDescription = opasConfig.ACCESS_SUMMARY_DESCRIPTION
# ret_val.accessLimited = True # default is true
ret_val.accessLimitedReason = opasConfig.ACCESS_SUMMARY_FORSUBSCRIBERS
# #########################################################################################
# optimization...if authorized, don't check again, unless it's a full-text request
# #########################################################################################
if session_info is not None:
try:
if session_info.authorized_peparchive:
ret_val.accessLimited = False # you can access it!!!
ret_val.accessChecked = True
# "This content is available for you to access"
ret_val.accessLimitedReason = opasConfig.ACCESSLIMITED_DESCRIPTION_AVAILABLE
logger.debug("Optimization - session info used to authorize PEPArchive document")
except Exception as e:
logger.error(f"{caller_name}: PEPArchive document permission: {e}")
elif classification in (opasConfig.DOCUMENT_ACCESS_TOC):
open_access = True
ret_val.accessLimited = False # you can access it!!! (All TOCs are open)
ret_val.accessChecked = True
# just like free for now
ret_val.accessLimitedDescription = opasConfig.ACCESSLIMITED_DESCRIPTION_FREE
#"This content is currently free to all users."
ret_val.accessLimitedReason = opasConfig.ACCESSLIMITED_DESCRIPTION_FREE
else:
logger.error(f"{caller_name}: Unknown classification: {classification}")
# **************************************
# Now check for access, or cached access
# - always check for a full-text request so PaDS can track them.
# since we don't really always know about authentication, we need to check all requests that are otherwise rejected.
# **************************************
try:
if not open_access:
if (session_info.authenticated == True # Must be authenticated for this check
and (ret_val.accessLimited == True # if it's marked limited, then may need to check, it might be first one
or fulltext_request == True)): # or whenever full-text is requested.
# and session_info.api_client_session and session_info.api_client_id in PADS_BASED_CLIENT_IDS:
if fulltext_request:
reason_for_check = opasConfig.AUTH_DOCUMENT_VIEW_REQUEST
else:
reason_for_check = opasConfig.AUTH_ABSTRACT_VIEW_REQUEST
try:
pads_authorized, resp = authserver_permission_check(session_id=session_info.session_id,
doc_id=doc_id,
doc_year=year,
reason_for_check=reason_for_check,
request=request)
except Exception as e:
# PaDS could be down, local development
logger.error(f"{caller_name}: Access Exception: {e}")
if localsecrets.BASEURL == "development.org:9100":
resp = models.PadsPermitInfo(Permit=True, HasArchiveAccess=True, HasCurrentAccess=True)
# so it doesn't have to check this later
session_info.authorized_peparchive = True
session_info.authorized_pepcurrent = True
else:
session_info.authorized_peparchive = False
session_info.authorized_pepcurrent = False
resp = models.PadsPermitInfo(Permit=False, HasArchiveAccess=False, HasCurrentAccess=False)
finally:
# save PaDS code
ret_val.accessLimitedCode = resp.StatusCode
if resp.StatusCode == httpCodes.HTTP_401_UNAUTHORIZED: # or resp.ReasonStr == 'Session has not been authenticated':
# if this is True, then we can stop asking this time
# You would get the same return if
# the session was not recognised on pads,
# the session had been deleted from the database (should never happen…), or
# the session simply never existed.
ret_val.accessLimited = True
session_info.authenticated = False
msg = f"Full text of {doc_id} unavailable. " + opasConfig.ACCESSLIMITED_401_UNAUTHORIZED
ret_val.accessLimitedReason = msg
else:
# set default again based on update from PaDS query
ret_val.accessLimited = True
if ret_val.accessLimitedClassifiedAsCurrentContent == True:
if resp.HasCurrentAccess == True:
session_info.authorized_pepcurrent = True
ret_val.accessLimited = False
ret_val.accessChecked = True
else:
ret_val.accessLimited = True
else: # not current content
if resp.HasArchiveAccess == True:
session_info.authorized_peparchive = True
ret_val.accessLimited = False
ret_val.accessChecked = True
if fulltext_request and pads_authorized:
# let's make sure we know about this user.
if session_info.user_id == opasConfig.USER_NOT_LOGGED_IN_NAME:
# We got this far, We need to find out who this is
pads_user_info, status_code = get_authserver_session_userinfo(session_info.session_id, session_info.api_client_id, addl_log_info=" (user info not yet collected)")
if pads_user_info is not None:
session_info.user_id = pads_user_info.UserId
session_info.username = pads_user_info.UserName
session_info.user_type = pads_user_info.UserType # TODO - Add this to session table
# session_info.session_expires_time = ?
# ocd = opasCentralDBLib.opasCentralDB()
ocd.update_session(session_info.session_id,
userID=session_info.user_id,
username=session_info.username,
authenticated=1,
authorized_peparchive=1 if session_info.authorized_peparchive == True else 0,
authorized_pepcurrent=1 if session_info.authorized_pepcurrent == True else 0,
session_end=session_info.session_expires_time,
api_client_id=session_info.api_client_id
)
if pads_authorized:
# "This content is available for you to access"
ret_val.accessLimited = False
ret_val.accessChecked = True
ret_val.accessLimitedDescription = opasConfig.ACCESSLIMITED_DESCRIPTION_AVAILABLE
ret_val.accessLimitedReason = opasConfig.ACCESSLIMITED_DESCRIPTION_AVAILABLE
msg = f"Document {doc_id} available. Pads Reason: {resp.ReasonStr}. Opas Reason: {ret_val.accessLimitedDescription} - {ret_val.accessLimitedReason}"
logger.debug(msg)
ret_val.accessLimitedDebugMsg = msg
else:
# changed from warning to info 2021-06-02 to reduce normal logging
msg = f"Document {doc_id} unavailable. Pads Reason: {resp.ReasonStr} Opas: {ret_val.accessLimitedDescription} - {ret_val.accessLimitedReason}"
logger.info(msg) # limited...get it elsewhere
ret_val.accessLimitedDebugMsg = msg
ret_val.accessLimited = True
if ret_val.accessLimitedClassifiedAsCurrentContent:
# embargoed
ret_val.accessLimitedReason = opasConfig.ACCESS_SUMMARY_EMBARGOED
else:
# non embargoed, but no access.
ret_val.accessLimitedReason = f"{ret_val.accessLimitedDescription} {ret_val.accessLimitedReason}"
else:
# not full-text OR (not authenticated or accessLimited==False)
msg = f"No PaDS check needed: Document {doc_id} accessLimited: {ret_val.accessLimited}. Authent: {session_info.authenticated}"
logger.debug(msg)
ret_val.accessLimitedDebugMsg = msg
else: # It's open access!
msg = f"No PaDS check needed: Document {doc_id} is open access"
logger.debug(msg)
ret_val.accessLimitedDebugMsg = msg
except Exception as e:
msg = f"{caller_name}: Issue checking document permission. Possibly not logged in {e}"
logger.error(msg)
ret_val.accessLimitedDebugMsg = msg
pass # can't be checked, will be unauthorized.
except Exception as e:
msg = f"{caller_name}: General exception {e} trying ascertain access limitations."
logger.error(msg)
if ret_val is None:
ret_val = models.AccessLimitations() # make sure there's defaults!
ret_val.accessLimitedDebugMsg = msg
if fulltext_request and ret_val.accessLimited:
# happens anytime someone views an abstract in Document mode because they don't have an account. Perfectly legal. Changed to info (from error)
msg = f"Full-text access for {doc_id} denied ({ret_val.accessLimitedCode}). Sess:{session_id}: Access:{ret_val.accessLimitedReason}"
logger.info(msg)
ret_val.accessLimitedDebugMsg = msg
return ret_val
# ##################################################################################################################################################
#
# LOCAL ROUTUNES
#
# ##################################################################################################################################################
def get_pads_session_info(session_id=None,
client_id=opasConfig.NO_CLIENT_ID,
retry=True,
request=None):
"""
Get the PaDS session model, and get a new session ID from the auth server if needed
"""
msg = ""
caller_name = "get_pads_session_info"
if client_id == opasConfig.NO_CLIENT_ID:
logger.warning(f"{caller_name}: Session info call for Session ID: {session_id} Client ID was NO_CLIENT_ID ({opasConfig.NO_CLIENT_ID}).")
if session_id is not None:
full_URL = base + f"/v1/Authenticate/IP/" + f"?SessionID={session_id}"
else:
full_URL = base + f"/v1/Authenticate/IP/"
req_url = "No request info."
if request is not None:
try: # just in case this generates an error
req_url = request.url # to log caller url
except Exception as e:
pass
user_ip = get_user_ip(request) # returns an IP if X_FORWARDED_FOR address is in header
try:
logger.debug(f"{caller_name}: calling PaDS")
if user_ip is not None and user_ip is not '':
headers = { opasConfig.X_FORWARDED_FOR:user_ip }
pads_session_info = requests.get(full_URL, headers) # Call PaDS
logger.debug(f"{caller_name}: Session ID:{session_id}. X_FORWARDED_FOR from authenticateIP: {user_ip}. URL: {req_url} PaDS Session Info: {pads_session_info}")
else:
pads_session_info = requests.get(full_URL) # Call PaDS
except Exception as e:
logger.error(f"{caller_name}: Authorization server not available. {e}")
pads_session_info = models.PadsSessionInfo()
else:
status_code = pads_session_info.status_code # save it for a bit (we replace pads_session_info below)
ocd.temp_pads_log_call(caller=caller_name, reason=caller_name, session_id=session_id, pads_call=full_URL, ip_address=user_ip, return_status_code=status_code) # Log Call PaDS
if status_code > 403: # e.g., (httpCodes.HTTP_500_INTERNAL_SERVER_ERROR, httpCodes.HTTP_503_SERVICE_UNAVAILABLE):
error_text = f"{caller_name}: PaDS session_info status_code is {status_code}"
logger.error(error_text)
# try once without the session ID
if retry == True:
pads_session_info = get_pads_session_info(client_id=client_id, retry=False, request=request)
pads_session_info.pads_status_response = status_code
else:
logger.error(error_text)
pads_session_info = models.PadsSessionInfo()
pads_session_info.pads_status_response = status_code
pads_session_info.pads_disposition = error_text
else:
try:
pads_session_info = pads_session_info.json()
pads_session_info = fix_pydantic_invalid_nones(pads_session_info, caller_name=caller_name)
pads_session_info = models.PadsSessionInfo(**pads_session_info)
pads_session_info.pads_status_response = status_code
logger.debug(f"PaDS Status Ok, Final IP Session Info: {pads_session_info} URL: {req_url}.")
except Exception as e:
msg = f"{caller_name}: Response processing error {e}"
logger.error(msg)
pads_session_info = models.PadsSessionInfo(**pads_session_info)
pads_session_info.pads_status_response = status_code
pads_session_info.pads_disposition = msg
return pads_session_info
if __name__ == "__main__":
import doctest
import sys
print (40*"*", "opasDocPermissionsTests", 40*"*")
print (f"Running in Python {sys.version_info[0]}.{sys.version_info[1]}")
logger = logging.getLogger(__name__)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)s %(lineno)d - %(levelname)s %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
doctest.testmod(optionflags=doctest.ELLIPSIS|doctest.NORMALIZE_WHITESPACE)
print ("Fini. Tests complete.") | 53.304852 | 202 | 0.572517 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
import datetime
import time
import opasConfig
import models
import logging
import localsecrets
# import urllib.parse
# import json
import sys
# from opasAPISupportLib import save_opas_session_cookie
sys.path.append("..") # Adds higher directory to python modules path.
from config.opasConfig import OPASSESSIONID
logger = logging.getLogger(__name__)
# for this module
# logger.setLevel(logging.DEBUG)
if 0:
# create console handler and set level to debug
ch = logging.StreamHandler()
# create formatter
formatter = logging.Formatter(opasConfig.FORMAT)
# add formatter to ch
ch.setFormatter(formatter)
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
from starlette.responses import Response
from starlette.requests import Request
import starlette.status as httpCodes
# import localsecrets
from localsecrets import PADS_BASE_URL, PADS_TEST_ID, PADS_TEST_PW, PADS_BASED_CLIENT_IDS
base = PADS_BASE_URL
# base = "http://development.org:9300"
import opasCentralDBLib
ocd = opasCentralDBLib.opasCentralDB()
def user_logged_in_per_header(request, session_id=None, caller_name="unknown") -> bool:
if request == None:
logger.warning(f"No request supplied to check log-in. Returning False ({caller_name} / {session_id})")
ret_val = False
else:
ret_val = request.headers.get(key=localsecrets.AUTH_KEY_NAME, default=None)
# is logged in?
if ret_val == "true":
# logger.warning(f"Loggedin=True ({caller_name} / {session_id})") #TEMP diagnostic
ret_val = True
elif ret_val == "false":
logger.warning(f"Loggedin=False ({caller_name} / {session_id})") #TEMP diagnostic
# make sure session is ended in database
success = ocd.end_session(session_id=session_id)
ret_val = False
else:
the_session_info = ocd.get_session_from_db(session_id)
if the_session_info is not None:
try:
if the_session_info.authenticated:
if the_session_info.session_end is not None:
# session already ended
ret_val = False
else:
if datetime.datetime.now() > the_session_info.session_expires_time:
# End session
success = ocd.end_session(session_id=session_id)
ret_val = False
else:
# still logged in
ret_val = True
except Exception as e:
logger.debug(f"No login status in header ({caller_name} / {session_id}). Can't determine login status {e}")
ret_val = False
else: # no logged-in session found
logger.debug(f"No login status in header. Session not found in server database ({caller_name} / {session_id}).")
ret_val = False
return ret_val
def verify_header(request, caller_name):
# Double Check for missing header test--ONLY checks headers, not other avenues used by find
client_session_from_header = request.headers.get(opasConfig.CLIENTSESSIONID, None)
client_id_from_header = request.headers.get(opasConfig.CLIENTID, None)
if client_id_from_header == 2 or client_id_from_header == 3:
if client_session_from_header == None:
logger.warning(f"***{caller_name}*** - No client-session supplied. Client-id (from header): {client_id_from_header}.")
else:
logger.debug(f"***{caller_name}*** - Client-session found. Client-id (from header): {client_id_from_header}.")
return client_id_from_header, client_session_from_header
def find_client_session_id(request: Request,
response: Response,
client_session: str=None
):
"""
ALWAYS returns a session ID or None
Dependency for client_session id:
gets it from header;
if not there, gets it from query param;
if not there, gets it from a cookie
Otherwise, gets a new one from the auth server
"""
ret_val = None
if client_session is None or client_session == 'None':
client_session = request.headers.get(opasConfig.CLIENTSESSIONID, None)
if client_session is not None:
ret_val = client_session
#msg = f"client-session from header: {ret_val} "
#logger.debug(msg)
else:
#Won't work unless they expose cookie to client, so don't waste time
#pepweb_session_cookie = request.cookies.get("pepweb_session", None)
opas_session_cookie = request.cookies.get(opasConfig.OPASSESSIONID, None)
client_session_qparam = request.query_params.get(opasConfig.CLIENTSESSIONID, None)
client_session_cookie = request.cookies.get(opasConfig.CLIENTSESSIONID, None)
if client_session_qparam is not None:
ret_val = client_session_qparam
msg = f"client-session from param: {ret_val}. URL: {request.url}"
logger.info(msg)
elif client_session_cookie is not None:
ret_val = client_session_cookie
msg = f"client-session from client-session cookie: {ret_val}. URL: {request.url}"
logger.info(msg)
elif opas_session_cookie is not None and opas_session_cookie != 'None':
msg = f"client-session from stored OPASSESSION cookie {opas_session_cookie}. URL: {request.url} "
logger.info(msg)
ret_val = opas_session_cookie
else:
msg = f"No dependency client-session ID found. Returning None. URL: {request.url}"
logger.info(msg)
ret_val = None
if ret_val is not None and opas_session_cookie is not None and opas_session_cookie != ret_val:
# overwrite any saved cookie, if there is one
logger.debug("Saved OpasSessionID Cookie")
response.set_cookie(
OPASSESSIONID,
value=f"{client_session}",
domain=localsecrets.COOKIE_DOMAIN
)
return ret_val
def get_user_ip(request: Request):
"""
Returns a users IP if passed in the headers.
"""
ret_val = None
if request is not None:
ret_val = request.headers.get(opasConfig.X_FORWARDED_FOR, None)
if ret_val is not None:
try:
req_url = request.url
msg = f"X-Forwarded-For from header: {ret_val}. URL: {req_url}"
logger.debug(msg)
except Exception as e:
logger.error(f"Error: {e}")
return ret_val
def fix_userinfo_invalid_nones(response_data, caller_name="DocPermissionsError"):
try:
if response_data["UserName"] is None:
response_data["UserName"] = "NotLoggedIn"
except Exception as e:
logger.error(f"{caller_name}: PaDS UserName Data Exception: {e}")
try:
if response_data["UserType"] is None:
response_data["UserType"] = "Unknown"
except Exception as e:
logger.error(f"{caller_name}: PaDS UserType Data Exception: {e}")
return response_data
def fix_pydantic_invalid_nones(response_data, caller_name="DocPermissionsError"):
try:
if response_data["ReasonStr"] is None:
response_data["ReasonStr"] = ""
except Exception as e:
logger.error(f"{caller_name}: Exception: {e}")
return response_data
def validate_client_id(client_id, caller_name="DocPermissionsError"):
if client_id is None:
client_id = opasConfig.NO_CLIENT_ID
logger.error(f"{caller_name}: Error: Client ID is None")
else:
try:
if not isinstance(client_id, int):
if isinstance(client_id, str):
try:
client_id = int(client_id)
except:
logger.error(f"client_id is str, but not convertible to int. Default to NO_CLIENT_ID. Caller: {caller_name}")
client_id = opasConfig.NO_CLIENT_ID
else:
logger.error(f"client_id is not int or str. Type is {type(client_id)}. Default to NO_CLIENT_ID. Caller: {caller_name}")
client_id = opasConfig.NO_CLIENT_ID
except Exception as e:
logger.error(f"client_id instance check failed. {e}")
client_id = opasConfig.NO_CLIENT_ID
return client_id
def get_authserver_session_info(session_id,
client_id=opasConfig.NO_CLIENT_ID,
pads_session_info=None,
request=None):
"""
Return a filled-in SessionInfo object from several PaDS calls
Saves the session information to the SQL database (or updates it)
>>> session_info = get_authserver_session_info(None, "4")
>>> session_info.username == "NotLoggedIn"
True
>>> pads_session_info = pads_login()
>>> session_id = pads_session_info.SessionId
>>> session_info = get_authserver_session_info(session_id, "4", pads_session_info=pads_session_info)
>>> session_info.authorized_peparchive == True
True
>>> session_info = get_authserver_session_info("7F481226-9AF1-47BC-8E26-F07DB8C3E78D", "4")
>>> print (session_info)
session_id='7F481226-9AF1-47BC-8E26-F07DB8C3E78D' user_id=0 username='NotLoggedIn' ...
>>> session_info.username == "NotLoggedIn"
True
"""
ts = time.time()
caller_name = "get_authserver_session_info"
#make sure it's ok, this is causing problems on production
#see if it's an int?
client_id = validate_client_id(client_id, caller_name=caller_name)
if pads_session_info is None or session_id is None:
# not supplied, so fetch
try:
logger.debug(f"{caller_name}: calling PaDS")
pads_session_info = get_pads_session_info(session_id=session_id,
client_id=client_id,
retry=False,
request=request)
try:
session_info = models.SessionInfo(session_id=pads_session_info.SessionId, api_client_id=client_id)
except Exception as e:
msg = f"{caller_name}: Error {e}. SessID: {session_id} client_id: {client_id} req: {request}"
if opasConfig.LOCAL_TRACE:
print (msg)
logger.error(msg)
session_info = models.SessionInfo(session_id="unknown", api_client_id=client_id)
else:
session_id = session_info.session_id
except Exception as e:
logger.error(f"{caller_name}: Error getting pads_session_info {e}")
client_id_type = type(client_id)
if client_id_type == int:
session_info = models.SessionInfo(session_id="unknown", api_client_id=client_id)
else:
session_info = models.SessionInfo(session_id="unknown", api_client_id=opasConfig.NO_CLIENT_ID)
#else:
#session_info = models.SessionInfo(session_id=session_id, api_client_id=client_id)
# This section is causing errors--I believe it's because PaDS is calling the API without real user info
if pads_session_info is not None:
if pads_session_info.SessionId is not None:
session_info = models.SessionInfo(session_id=pads_session_info.SessionId, api_client_id=client_id)
else:
session_info = models.SessionInfo(session_id=session_id, api_client_id=client_id)
start_time = pads_session_info.session_start_time if pads_session_info.session_start_time is not None else datetime.datetime.now()
try:
session_info.has_subscription = pads_session_info.HasSubscription
except Exception as e:
logger.error(f"{caller_name}: HasSubscription not supplied by PaDS")
session_info.has_subscription = False
try:
session_info.is_valid_login = pads_session_info.IsValidLogon
session_info.authenticated = pads_session_info.IsValidLogon
except Exception as e:
logger.error(f"{caller_name}: IsValidLogon not supplied by PaDS")
session_info.is_valid_login = False
try:
session_info.is_valid_username = pads_session_info.IsValidUserName
except Exception as e:
logger.error(f"{caller_name}: IsValidUsername not supplied by PaDS")
session_info.is_valid_username = False
# session_info.confirmed_unauthenticated = False
session_info.session_start = start_time
session_info.session_expires_time = start_time + datetime.timedelta(seconds=pads_session_info.SessionExpires)
session_info.pads_session_info = pads_session_info
user_logged_in_bool = pads_session_info.IsValidLogon
# either continue an existing session, or start a new one
if request is not None:
if user_logged_in_bool or pads_session_info.IsValidLogon:
pads_user_info, status_code = get_authserver_session_userinfo(session_id, client_id, addl_log_info=" (complete session_record)")
session_info.pads_user_info = pads_user_info
if status_code == 401: # could be just no session_id, but also could have be returned by PaDS if it doesn't recognize it
if session_info.pads_session_info.pads_status_response > 500:
msg = f"{caller_name}: PaDS error or PaDS unavailable - user cannot be logged in and no session_id assigned"
logger.error(msg)
if session_id is not None:
logger.warning(f"{session_id} call to pads produces 401 error. Setting user_logged_in to False")
user_logged_in_bool = False
# session is not logged in
# session_info.confirmed_unauthenticated = True
# these are defaults so commented out
# session_info.authenticated = False
# session_info.user_id = 0
# session_info.username = opasConfig.USER_NOT_LOGGED_IN_NAME
# session_info.user_type = "Unknown"
# session_info.admin = False
# session_info.authorized_peparchive = False
# session_info.authorized_pepcurrent = False
else:
start_time = pads_session_info.session_start_time if pads_session_info.session_start_time is not None else datetime.datetime.now()
if pads_user_info is not None:
session_info.user_id = userID=pads_user_info.UserId
session_info.username = pads_user_info.UserName
session_info.user_type = pads_user_info.UserType
session_info.admin = pads_user_info.UserType=="Admin"
session_info.authorized_peparchive = pads_user_info.HasArchiveAccess
session_info.authorized_pepcurrent = pads_user_info.HasCurrentAccess
logger.debug("PaDS returned user info. Saving to DB")
unused_val = save_session_info_to_db(session_info)
if session_info.user_type is None:
session_info.user_type = "Unknown"
if session_info.username is None:
session_info.username = opasConfig.USER_NOT_LOGGED_IN_NAME
# print (f"SessInfo: {session_info}")
logger.debug(f"***authent: {session_info.authenticated} - get_full_session_info total time: {time.time() - ts}***")
return session_info
def get_authserver_session_userinfo(session_id, client_id, addl_log_info=""):
"""
Send PaDS the session ID and see if that's associated with a user yet.
"""
ret_val = None
caller_name = "get_authserver_session_userinfo"
status_code = 401
msg = f"for session {session_id} from client {client_id}"
#logger.debug(msg)
if session_id is not None:
full_URL = base + f"/v1/Users" + f"?SessionID={session_id}"
try:
response = requests.get(full_URL, headers={"Content-Type":"application/json"}) # Call PaDS
ocd.temp_pads_log_call(caller=caller_name, reason=caller_name + addl_log_info, session_id=session_id, pads_call=full_URL, return_status_code=response.status_code) # Log Call PaDS
except Exception as e:
logger.error(f"{caller_name}: Error from auth server user info call: {e}. Non-logged in user {msg}")
else:
status_code = response.status_code
padsinfo = response.json()
if response.ok:
padsinfo = fix_userinfo_invalid_nones(padsinfo)
ret_val = models.PadsUserInfo(**padsinfo)
else:
logger.debug(f"Non-logged in user {msg}. Info from PaDS: {padsinfo}") # 2021.08.08 back to debug...seems consistent.
return ret_val, status_code # padsinfo, status_code
def save_session_info_to_db(session_info):
# make sure the session is recorded.
session_id = session_info.session_id
# ocd = opasCentralDBLib.opasCentralDB()
db_session_info = ocd.get_session_from_db(session_id)
if db_session_info is None:
ret_val, saved_session_info = ocd.save_session(session_id, session_info)
logger.debug(f"Saving session info {session_id}")
else:
logger.debug(f"Session {session_id} already found in db. Updating...")
if session_info.username != db_session_info.username and db_session_info.username != opasConfig.USER_NOT_LOGGED_IN_NAME:
msg = f"AuthServerSessionInfoError: MISMATCH! Two Usernames with same session_id. OLD(DB): {db_session_info}; NEW(SESSION): {session_info}"
print (msg)
logger.error(msg)
logger.debug(f"Updating session info {session_id}")
ret_val = ocd.update_session(session_id,
userID=session_info.user_id,
username=session_info.username,
authenticated=1 if session_info.authenticated == True else 0,
authorized_peparchive=1 if session_info.authorized_peparchive == True else 0,
authorized_pepcurrent=1 if session_info.authorized_pepcurrent == True else 0,
session_end=session_info.session_expires_time,
api_client_id=session_info.api_client_id
)
return ret_val
def authserver_login(username=PADS_TEST_ID,
password=PADS_TEST_PW,
session_id=None,
client_id=opasConfig.NO_CLIENT_ID,
retry=True):
"""
Login directly via the auth server (e.g., in this case PaDS)
If session_id is included, the idea is that the logged in entity will keep that constant.
-- #TODO but that's not implemented in this server itself, if logged in through there, yet!
"""
msg = ""
caller_name = "authserver_login"
logger.info(f"Logging in user {username} with session_id {session_id}")
if session_id is not None:
full_URL = base + f"/v1/Authenticate/?SessionId={session_id}"
else:
full_URL = base + f"/v1/Authenticate/"
try:
pads_response = requests.post(full_URL, headers={"Content-Type":"application/json"}, json={"UserName":f"{username}", "Password":f"{password}"})
ocd.temp_pads_log_call(caller=caller_name, reason=caller_name, session_id=session_id, pads_call=full_URL, return_status_code=pads_response.status_code, params=username) # Log Call PaDS
except Exception as e:
msg = f"{caller_name}: Authorization server not available. {e}"
logger.error(msg)
if opasConfig.LOCAL_TRACE: print (f"****WATCH_THIS****: {msg}")
# set up response with default model
pads_session_info = models.PadsSessionInfo()
if session_id is not None:
pads_session_info.SessionId = session_id
#session_info = models.SessionInfo()
else:
status_code = pads_response.status_code # save it for a bit (we replace pads_session_info below)
if pads_response.ok:
pads_response = pads_response.json()
pads_response = fix_pydantic_invalid_nones(pads_response, caller_name="AuthserverLogin")
if isinstance(pads_response, str):
pads_session_info = models.PadsSessionInfo()
logger.error(f"{caller_name}: returned error string: {pads_response}")
else:
try:
pads_session_info = models.PadsSessionInfo(**pads_response)
except Exception as e:
logger.error(f"{caller_name}: return assignment error: {e}")
pads_session_info = models.PadsSessionInfo()
elif status_code > 403:
if retry == True:
# try once without the session ID
msg = f"{caller_name}: Login returned {status_code}. Trying without session id."
logger.error(msg)
pads_session_info = authserver_login(username=username, password=password, client_id=client_id, retry=False)
else:
msg = f"{caller_name}: Auth System Issue. Login returned {status_code}. Retry (failed), or Retry not selected."
logger.error(msg)
pads_session_info = models.PadsSessionInfo()
pads_session_info.pads_status_response = status_code
pads_session_info.pads_disposition = msg
else:
try:
pads_response = pads_response.json()
pads_response = fix_pydantic_invalid_nones(pads_response)
if isinstance(pads_response, str):
pads_session_info = models.PadsSessionInfo()
msg = f"{caller_name}: Returned error string: {pads_response}"
logger.error(msg)
else:
try:
pads_session_info = models.PadsSessionInfo(**pads_response)
except Exception as e:
msg = f"{caller_name}: Return assignment error: {e}"
logger.error(msg)
pads_session_info = models.PadsSessionInfo()
except Exception as e:
logger.error(f"{caller_name}: Response processing error {e}")
pads_session_info = models.PadsSessionInfo(**pads_session_info)
pads_session_info.pads_status_response = status_code
pads_session_info.pads_disposition = msg
return pads_session_info
def authserver_logout(session_id, request: Request=None, response: Response=None):
ret_val = False
caller_name = "authserver_logout"
if session_id is not None:
if response is not None:
response.delete_cookie(key=opasConfig.OPASSESSIONID,path="/",
domain=localsecrets.COOKIE_DOMAIN)
# call PaDS
full_URL = base + f"/v1/Users/Logout/?SessionId={session_id}"
response = requests.post(full_URL, headers={"Content-Type":"application/json"})
ocd.temp_pads_log_call(caller=caller_name, reason=caller_name, session_id=session_id, pads_call=full_URL, return_status_code=response.status_code) # Log Call PaDS
if response.ok:
ret_val = True
else:
logger.error(f"{caller_name}: Error Logging out for sessionId: {session_id} from PaDS: {response.json()}")
else:
logger.error(f"{caller_name}: No SessionId supplied.")
return ret_val
def authserver_permission_check(session_id,
doc_id,
doc_year,
reason_for_check=None,
request=None):
ret_val = False
caller_name = "authserver_permission_check"
ret_resp = None
if reason_for_check is None:
logger.warning(f"{caller_name}: fulltext_request info not supplied")
full_URL = base + f"/v1/Permits?SessionId={session_id}&DocId={doc_id}&DocYear={doc_year}&ReasonForCheck={reason_for_check}"
user_ip = get_user_ip(request)
if user_ip is not None:
headers = { opasConfig.X_FORWARDED_FOR:user_ip }
else:
headers = None
try: # permit request to PaDS
response = requests.get(full_URL, headers=headers) # Call PaDS
ocd.temp_pads_log_call(caller=caller_name, reason=reason_for_check, session_id=session_id, pads_call=full_URL, return_status_code=response.status_code, params=doc_id) # Log Call PaDS
except Exception as e:
logger.error(f"{caller_name}: Request session {session_id} exception part 1: {full_URL}")
logger.error(f"{caller_name}: Request session {session_id} exception part 2: {response}")
logger.error(f"{caller_name}: Request session {session_id} exception part 3: {e}")
logger.error(f"{caller_name}: Request session {session_id} exception part 4: headers {headers}")
# just return no access
ret_resp = models.PadsPermitInfo(SessionId = session_id,
DocID = doc_id,
HasArchiveAccess=True,
HasCurrentAccess=False,
Permit=True,
ReasonId=0,
StatusCode=200,
ReasonStr=f"PaDS error {e}"
)
else:
try:
if response.status_code == 503:
# PaDS down, fake it for now
msg = f"{caller_name}: Permits response error {e}. Temporarily return data."
logger.error(msg)
ret_resp = models.PadsPermitInfo(SessionId = session_id,
DocID = doc_id,
HasArchiveAccess=True,
HasCurrentAccess=False,
Permit=True,
ReasonId=0,
StatusCode=200,
ReasonStr="PaDS not responding"
)
elif response.status_code == 401:
msg = response.json()
ret_val = False
ret_resp = models.PadsPermitInfo(SessionId = session_id,
DocID = doc_id,
HasArchiveAccess=False,
HasCurrentAccess=False,
Permit=False,
ReasonId=0,
StatusCode=401,
ReasonStr=msg
)
else:
ret_resp = response.json()
ret_resp = models.PadsPermitInfo(**ret_resp)
# returns 401 for a non-authenticated session
ret_resp.StatusCode = response.status_code
ret_val = ret_resp.Permit
if ret_resp.StatusCode != 200:
msg = f"PaDS returned a non-200 permit req status: {ret_resp.StatusCode}"
logger.info(msg)
except Exception as e:
msg = f"{caller_name}: Permits response error {e}. Composing no access response."
logger.error(msg)
ret_val = False
ret_resp = models.PadsPermitInfo(SessionId=session_id,
DocId=doc_id,
ReasonStr=msg)
return ret_val, ret_resp
def get_access_limitations(doc_id,
classification, # document classification, e.g., free, current, archive, undefined, offsite, toc
session_info, # updated in code below
year=None,
doi=None,
documentListItem: models.DocumentListItem=None, # deprecated, not used
fulltext_request:bool=None,
request=None):
"""
Based on the classification of the document (archive, current [embargoed],
free, offsite), and the users permissions in session_info, determine whether
this user has access to the full-text of the document, and fill out permissions
in accessLimitations (ret_val) structure for document doc_id
20210428 - removed documentListItem and update side effects, caller should copy access
There are still side effects on session_info
"""
caller_name = "get_access_limitations"
try:
open_access = False
ret_val = models.AccessLimitations()
ret_val.doi = doi
ret_val.accessLimitedPubLink = None
ret_val.accessLimitedCode = 200 # default (for now)
# USE THESE DEFAULTS, only set below if different
# default, turned on if classification below is opasConfig.DOCUMENT_ACCESS_EMBARGOED
ret_val.accessLimited = True # no access by default, may be changed below.
ret_val.accessChecked = False # Same as default, for better clarity here
ret_val.accessLimitedClassifiedAsCurrentContent = False
if session_info is None:
# logger.warning(f"Document permissions for {doc_id} -- no session info")
ret_val.accessLimitedCode = 401 # no session
session_id = "No Session Info"
# not logged in
# use all the defaults above, log error below.
else:
# for debugging display at return
try:
session_id = session_info.session_id
except:
session_id = "No Session ID"
if ret_val.doi is not None:
publisherAccess = opasConfig.ACCESS_SUMMARY_PUBLISHER_INFO + opasConfig.ACCESS_SUMMARY_PUBLISHER_INFO_DOI_LINK % ret_val.doi
# TODO: get the link we use to send users to publishers site when we don't have it, and no doi, and implement here.
# for now, just doi
ret_val.accessLimitedPubLink = opasConfig.ACCESS_SUMMARY_PUBLISHER_INFO_DOI_LINK % ret_val.doi
else:
publisherAccess = "."
if classification in (opasConfig.DOCUMENT_ACCESS_FREE):
# free can be for anyone!!!! Change accessLimited
open_access = True
ret_val.accessLimited = False
ret_val.accessChecked = True
ret_val.accessLimitedDescription = opasConfig.ACCESSLIMITED_DESCRIPTION_FREE
#"This content is currently free to all users."
ret_val.accessLimitedReason = opasConfig.ACCESSLIMITED_DESCRIPTION_FREE
elif classification in (opasConfig.DOCUMENT_ACCESS_OFFSITE):
# we only allow reading abstracts for offsite, accessLimited is True
ret_val.accessLimitedDescription = opasConfig.ACCESS_SUMMARY_DESCRIPTION
#"This content is currently completely limited to all users."
ret_val.accessLimitedReason = opasConfig.ACCESSLIMITED_DESCRIPTION_OFFSITE + publisherAccess # limited...get it elsewhere
elif classification in (opasConfig.DOCUMENT_ACCESS_EMBARGOED): # PEPCurrent
ret_val.accessLimitedDescription = opasConfig.ACCESS_SUMMARY_DESCRIPTION
ret_val.accessLimitedClassifiedAsCurrentContent = True
ret_val.accessLimitedReason = opasConfig.ACCESS_SUMMARY_DESCRIPTION + opasConfig.ACCESS_SUMMARY_EMBARGOED + publisherAccess # limited...get it elsewhere
if session_info is not None:
try:
# #########################################################################################
# optimization...if authorized for PEPCurrent, don't check again this query, unless it's a full-text request
# #########################################################################################
if session_info.authorized_pepcurrent:
ret_val.accessLimited = False # you can access it!!!
ret_val.accessChecked = True
# "This current content is available for you to access"
ret_val.accessLimitedReason = opasConfig.ACCESSLIMITED_DESCRIPTION_CURRENT_CONTENT_AVAILABLE
logger.debug("Optimization - session info used to authorize PEPCurrent document")
except Exception as e:
logger.error(f"{caller_name}: PEPCurrent document permission: {e}")
elif classification in (opasConfig.DOCUMENT_ACCESS_ARCHIVE):
ret_val.accessLimitedDescription = opasConfig.ACCESS_SUMMARY_DESCRIPTION
# ret_val.accessLimited = True # default is true
ret_val.accessLimitedReason = opasConfig.ACCESS_SUMMARY_FORSUBSCRIBERS
# #########################################################################################
# optimization...if authorized, don't check again, unless it's a full-text request
# #########################################################################################
if session_info is not None:
try:
if session_info.authorized_peparchive:
ret_val.accessLimited = False # you can access it!!!
ret_val.accessChecked = True
# "This content is available for you to access"
ret_val.accessLimitedReason = opasConfig.ACCESSLIMITED_DESCRIPTION_AVAILABLE
logger.debug("Optimization - session info used to authorize PEPArchive document")
except Exception as e:
logger.error(f"{caller_name}: PEPArchive document permission: {e}")
elif classification in (opasConfig.DOCUMENT_ACCESS_TOC):
open_access = True
ret_val.accessLimited = False # you can access it!!! (All TOCs are open)
ret_val.accessChecked = True
# just like free for now
ret_val.accessLimitedDescription = opasConfig.ACCESSLIMITED_DESCRIPTION_FREE
#"This content is currently free to all users."
ret_val.accessLimitedReason = opasConfig.ACCESSLIMITED_DESCRIPTION_FREE
else:
logger.error(f"{caller_name}: Unknown classification: {classification}")
# **************************************
# Now check for access, or cached access
# - always check for a full-text request so PaDS can track them.
# since we don't really always know about authentication, we need to check all requests that are otherwise rejected.
# **************************************
try:
if not open_access:
if (session_info.authenticated == True # Must be authenticated for this check
and (ret_val.accessLimited == True # if it's marked limited, then may need to check, it might be first one
or fulltext_request == True)): # or whenever full-text is requested.
# and session_info.api_client_session and session_info.api_client_id in PADS_BASED_CLIENT_IDS:
if fulltext_request:
reason_for_check = opasConfig.AUTH_DOCUMENT_VIEW_REQUEST
else:
reason_for_check = opasConfig.AUTH_ABSTRACT_VIEW_REQUEST
try:
pads_authorized, resp = authserver_permission_check(session_id=session_info.session_id,
doc_id=doc_id,
doc_year=year,
reason_for_check=reason_for_check,
request=request)
except Exception as e:
# PaDS could be down, local development
logger.error(f"{caller_name}: Access Exception: {e}")
if localsecrets.BASEURL == "development.org:9100":
resp = models.PadsPermitInfo(Permit=True, HasArchiveAccess=True, HasCurrentAccess=True)
# so it doesn't have to check this later
session_info.authorized_peparchive = True
session_info.authorized_pepcurrent = True
else:
session_info.authorized_peparchive = False
session_info.authorized_pepcurrent = False
resp = models.PadsPermitInfo(Permit=False, HasArchiveAccess=False, HasCurrentAccess=False)
finally:
# save PaDS code
ret_val.accessLimitedCode = resp.StatusCode
if resp.StatusCode == httpCodes.HTTP_401_UNAUTHORIZED: # or resp.ReasonStr == 'Session has not been authenticated':
# if this is True, then we can stop asking this time
# You would get the same return if
# the session was not recognised on pads,
# the session had been deleted from the database (should never happen…), or
# the session simply never existed.
ret_val.accessLimited = True
session_info.authenticated = False
msg = f"Full text of {doc_id} unavailable. " + opasConfig.ACCESSLIMITED_401_UNAUTHORIZED
ret_val.accessLimitedReason = msg
else:
# set default again based on update from PaDS query
ret_val.accessLimited = True
if ret_val.accessLimitedClassifiedAsCurrentContent == True:
if resp.HasCurrentAccess == True:
session_info.authorized_pepcurrent = True
ret_val.accessLimited = False
ret_val.accessChecked = True
else:
ret_val.accessLimited = True
else: # not current content
if resp.HasArchiveAccess == True:
session_info.authorized_peparchive = True
ret_val.accessLimited = False
ret_val.accessChecked = True
if fulltext_request and pads_authorized:
# let's make sure we know about this user.
if session_info.user_id == opasConfig.USER_NOT_LOGGED_IN_NAME:
# We got this far, We need to find out who this is
pads_user_info, status_code = get_authserver_session_userinfo(session_info.session_id, session_info.api_client_id, addl_log_info=" (user info not yet collected)")
if pads_user_info is not None:
session_info.user_id = pads_user_info.UserId
session_info.username = pads_user_info.UserName
session_info.user_type = pads_user_info.UserType # TODO - Add this to session table
# session_info.session_expires_time = ?
# ocd = opasCentralDBLib.opasCentralDB()
ocd.update_session(session_info.session_id,
userID=session_info.user_id,
username=session_info.username,
authenticated=1,
authorized_peparchive=1 if session_info.authorized_peparchive == True else 0,
authorized_pepcurrent=1 if session_info.authorized_pepcurrent == True else 0,
session_end=session_info.session_expires_time,
api_client_id=session_info.api_client_id
)
if pads_authorized:
# "This content is available for you to access"
ret_val.accessLimited = False
ret_val.accessChecked = True
ret_val.accessLimitedDescription = opasConfig.ACCESSLIMITED_DESCRIPTION_AVAILABLE
ret_val.accessLimitedReason = opasConfig.ACCESSLIMITED_DESCRIPTION_AVAILABLE
msg = f"Document {doc_id} available. Pads Reason: {resp.ReasonStr}. Opas Reason: {ret_val.accessLimitedDescription} - {ret_val.accessLimitedReason}"
logger.debug(msg)
ret_val.accessLimitedDebugMsg = msg
else:
# changed from warning to info 2021-06-02 to reduce normal logging
msg = f"Document {doc_id} unavailable. Pads Reason: {resp.ReasonStr} Opas: {ret_val.accessLimitedDescription} - {ret_val.accessLimitedReason}"
logger.info(msg) # limited...get it elsewhere
ret_val.accessLimitedDebugMsg = msg
ret_val.accessLimited = True
if ret_val.accessLimitedClassifiedAsCurrentContent:
# embargoed
ret_val.accessLimitedReason = opasConfig.ACCESS_SUMMARY_EMBARGOED
else:
# non embargoed, but no access.
ret_val.accessLimitedReason = f"{ret_val.accessLimitedDescription} {ret_val.accessLimitedReason}"
else:
# not full-text OR (not authenticated or accessLimited==False)
msg = f"No PaDS check needed: Document {doc_id} accessLimited: {ret_val.accessLimited}. Authent: {session_info.authenticated}"
logger.debug(msg)
ret_val.accessLimitedDebugMsg = msg
else: # It's open access!
msg = f"No PaDS check needed: Document {doc_id} is open access"
logger.debug(msg)
ret_val.accessLimitedDebugMsg = msg
except Exception as e:
msg = f"{caller_name}: Issue checking document permission. Possibly not logged in {e}"
logger.error(msg)
ret_val.accessLimitedDebugMsg = msg
pass # can't be checked, will be unauthorized.
except Exception as e:
msg = f"{caller_name}: General exception {e} trying ascertain access limitations."
logger.error(msg)
if ret_val is None:
ret_val = models.AccessLimitations() # make sure there's defaults!
ret_val.accessLimitedDebugMsg = msg
if fulltext_request and ret_val.accessLimited:
# happens anytime someone views an abstract in Document mode because they don't have an account. Perfectly legal. Changed to info (from error)
msg = f"Full-text access for {doc_id} denied ({ret_val.accessLimitedCode}). Sess:{session_id}: Access:{ret_val.accessLimitedReason}"
logger.info(msg)
ret_val.accessLimitedDebugMsg = msg
return ret_val
# ##################################################################################################################################################
#
# LOCAL ROUTUNES
#
# ##################################################################################################################################################
def get_pads_session_info(session_id=None,
client_id=opasConfig.NO_CLIENT_ID,
retry=True,
request=None):
"""
Get the PaDS session model, and get a new session ID from the auth server if needed
"""
msg = ""
caller_name = "get_pads_session_info"
if client_id == opasConfig.NO_CLIENT_ID:
logger.warning(f"{caller_name}: Session info call for Session ID: {session_id} Client ID was NO_CLIENT_ID ({opasConfig.NO_CLIENT_ID}).")
if session_id is not None:
full_URL = base + f"/v1/Authenticate/IP/" + f"?SessionID={session_id}"
else:
full_URL = base + f"/v1/Authenticate/IP/"
req_url = "No request info."
if request is not None:
try: # just in case this generates an error
req_url = request.url # to log caller url
except Exception as e:
pass
user_ip = get_user_ip(request) # returns an IP if X_FORWARDED_FOR address is in header
try:
logger.debug(f"{caller_name}: calling PaDS")
if user_ip is not None and user_ip is not '':
headers = { opasConfig.X_FORWARDED_FOR:user_ip }
pads_session_info = requests.get(full_URL, headers) # Call PaDS
logger.debug(f"{caller_name}: Session ID:{session_id}. X_FORWARDED_FOR from authenticateIP: {user_ip}. URL: {req_url} PaDS Session Info: {pads_session_info}")
else:
pads_session_info = requests.get(full_URL) # Call PaDS
except Exception as e:
logger.error(f"{caller_name}: Authorization server not available. {e}")
pads_session_info = models.PadsSessionInfo()
else:
status_code = pads_session_info.status_code # save it for a bit (we replace pads_session_info below)
ocd.temp_pads_log_call(caller=caller_name, reason=caller_name, session_id=session_id, pads_call=full_URL, ip_address=user_ip, return_status_code=status_code) # Log Call PaDS
if status_code > 403: # e.g., (httpCodes.HTTP_500_INTERNAL_SERVER_ERROR, httpCodes.HTTP_503_SERVICE_UNAVAILABLE):
error_text = f"{caller_name}: PaDS session_info status_code is {status_code}"
logger.error(error_text)
# try once without the session ID
if retry == True:
pads_session_info = get_pads_session_info(client_id=client_id, retry=False, request=request)
pads_session_info.pads_status_response = status_code
else:
logger.error(error_text)
pads_session_info = models.PadsSessionInfo()
pads_session_info.pads_status_response = status_code
pads_session_info.pads_disposition = error_text
else:
try:
pads_session_info = pads_session_info.json()
pads_session_info = fix_pydantic_invalid_nones(pads_session_info, caller_name=caller_name)
pads_session_info = models.PadsSessionInfo(**pads_session_info)
pads_session_info.pads_status_response = status_code
logger.debug(f"PaDS Status Ok, Final IP Session Info: {pads_session_info} URL: {req_url}.")
except Exception as e:
msg = f"{caller_name}: Response processing error {e}"
logger.error(msg)
pads_session_info = models.PadsSessionInfo(**pads_session_info)
pads_session_info.pads_status_response = status_code
pads_session_info.pads_disposition = msg
return pads_session_info
if __name__ == "__main__":
import doctest
import sys
print (40*"*", "opasDocPermissionsTests", 40*"*")
print (f"Running in Python {sys.version_info[0]}.{sys.version_info[1]}")
logger = logging.getLogger(__name__)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)s %(lineno)d - %(levelname)s %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
doctest.testmod(optionflags=doctest.ELLIPSIS|doctest.NORMALIZE_WHITESPACE)
print ("Fini. Tests complete.") | 11,394 | 0 | 218 |
40fbb92c604af78ae8abdd9f0581a69abc062ce6 | 399 | py | Python | specdal/operators/__init__.py | EnSpec/specdal.github.io | 4a89f5de6d8feb9472813da9767eafb78c0fe19a | [
"MIT"
] | 13 | 2018-03-09T07:45:29.000Z | 2021-09-15T21:41:28.000Z | specdal/operators/__init__.py | EnSpec/specdal.github.io | 4a89f5de6d8feb9472813da9767eafb78c0fe19a | [
"MIT"
] | 8 | 2018-05-14T14:06:50.000Z | 2021-08-23T09:17:00.000Z | specdal/operators/__init__.py | EnSpec/specdal.github.io | 4a89f5de6d8feb9472813da9767eafb78c0fe19a | [
"MIT"
] | 12 | 2017-08-30T18:06:13.000Z | 2021-06-08T18:54:16.000Z | from os.path import dirname, basename, isfile
import glob
modules = glob.glob(dirname(__file__)+"/*.py")
__all__ = [ basename(f)[:-3] for f in modules if isfile(f) and not f.endswith('__init__.py')]
from .proximal_join import proximal_join, get_column_types
from .interpolate import interpolate
from .stitch import stitch
from .jump_correct import jump_correct
from .derivative import derivative
| 33.25 | 93 | 0.784461 | from os.path import dirname, basename, isfile
import glob
modules = glob.glob(dirname(__file__)+"/*.py")
__all__ = [ basename(f)[:-3] for f in modules if isfile(f) and not f.endswith('__init__.py')]
from .proximal_join import proximal_join, get_column_types
from .interpolate import interpolate
from .stitch import stitch
from .jump_correct import jump_correct
from .derivative import derivative
| 0 | 0 | 0 |
ac6b1ef8fdf9ff2381db86bc500d15f0ccda651f | 977 | py | Python | 04_jump_the_five/jump.py | dwidmaye/tiny_python_projects | 3b03a667ad2b73d19126732a4018580cfecb35ed | [
"MIT"
] | null | null | null | 04_jump_the_five/jump.py | dwidmaye/tiny_python_projects | 3b03a667ad2b73d19126732a4018580cfecb35ed | [
"MIT"
] | null | null | null | 04_jump_the_five/jump.py | dwidmaye/tiny_python_projects | 3b03a667ad2b73d19126732a4018580cfecb35ed | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Author : Derek Widmayer <dwidmaye@gmail.com>
Date : 2021-01-10
Purpose: Rock the Casbah
"""
import argparse
# --------------------------------------------------
def get_args():
"""Jump the five"""
parser = argparse.ArgumentParser(
description='Jump the five',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('str',
metavar='str',
help='Input text')
return parser.parse_args()
# --------------------------------------------------
def main():
"""Encode jump the five"""
text = get_args().str
encoding = {'1': '9', '2': '8', '3': '7', '4': '6', '5': '0', '6': '4', '7': '3', '8': '2', '9': '1', '0': '5'}
encoded_text = ""
for char in text:
encoded_text += encoding.get(char, char)
print(f'{encoded_text}')
# --------------------------------------------------
if __name__ == '__main__':
main()
| 22.72093 | 115 | 0.460594 | #!/usr/bin/env python3
"""
Author : Derek Widmayer <dwidmaye@gmail.com>
Date : 2021-01-10
Purpose: Rock the Casbah
"""
import argparse
# --------------------------------------------------
def get_args():
"""Jump the five"""
parser = argparse.ArgumentParser(
description='Jump the five',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('str',
metavar='str',
help='Input text')
return parser.parse_args()
# --------------------------------------------------
def main():
"""Encode jump the five"""
text = get_args().str
encoding = {'1': '9', '2': '8', '3': '7', '4': '6', '5': '0', '6': '4', '7': '3', '8': '2', '9': '1', '0': '5'}
encoded_text = ""
for char in text:
encoded_text += encoding.get(char, char)
print(f'{encoded_text}')
# --------------------------------------------------
if __name__ == '__main__':
main()
| 0 | 0 | 0 |
d414981108f85165d9d4ca5669ff02f44bd6cea3 | 8,029 | py | Python | models-tf1/preprocess_data.py | vlada-rozova/bladder-cancer | 05005600948eacf25cd1ae164a2f46ae15ce2cf3 | [
"MIT"
] | 1 | 2022-03-05T02:51:47.000Z | 2022-03-05T02:51:47.000Z | models-tf1/preprocess_data.py | vlada-rozova/bladder-cancer | 05005600948eacf25cd1ae164a2f46ae15ce2cf3 | [
"MIT"
] | null | null | null | models-tf1/preprocess_data.py | vlada-rozova/bladder-cancer | 05005600948eacf25cd1ae164a2f46ae15ce2cf3 | [
"MIT"
] | 1 | 2021-01-22T15:59:08.000Z | 2021-01-22T15:59:08.000Z | import os
import os.path
import glob
import cv2
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
IMAGENET_MEAN_BGR = [103.939, 116.779, 123.68]
def load_images(data_path, image_height, image_width, plot=False):
"""
Read an image in BGR,
resize to image_height x image_width,
subtract mean of ImageNet dataset
"""
# Get a list of images in the folder
os.chdir(data_path)
list = glob.glob('*.jpg')
N_images = len(list)
# Create arrays to store data
images = np.zeros((N_images, image_height, image_width, 3), dtype = np.float32)
if plot:
fig = plt.figure(figsize=(15,6))
for i in range(0, N_images):
# Load image
image_name = list[i]
image = cv2.imread(image_name)
if plot:
# Plot an image
fig.add_subplot(1, N_images, i+1)
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.axis('off')
plt.show()
# Resize to image_height x image_width
images[i, :, :, :] = cv2.resize(image.astype(np.float32),(image_height, image_width))
# Subtract ImageNet mean
images[i, :, :, :] -= IMAGENET_MEAN_BGR
return images
def load_images_with_labels(data_path, labels_path, image_height, image_width):
"""
Read an image in BGR,
resize to image_height x image_width,
subtract mean of ImageNet dataset.
Assign a label to an image:
1 if there is a tumour, 0 otherwise
"""
# Get a list of images in the folder
os.chdir(data_path)
list = glob.glob('*.jpeg')
N_images = len(list)
return N_images
# Create arrays to store data and labels
images = np.zeros((N_images, image_height, image_width, 3), dtype = np.float32)
labels = -1 * np.ones((N_images, 1), dtype = np.float32)
for i in range(0, N_images):
# Load image in BGR
image_name = list[i]
image = cv2.imread(image_name)
# Load image in RGB
# image = plt.imread(image_name)
# Convert RGB to BGR
#image = image[:, :, [2, 1, 0]]
# Resize to image_height x image_width
images[i, :, :, :] = cv2.resize(image.astype(np.float32),(image_height, image_width))
# Subtract ImageNet mean
images[i, :, :, :] -= IMAGENET_MEAN_BGR
# Assign a label to an image:
# 1 if there is a tumour, 0 otherwise
file_path = labels_path + image_name[:-5] + ".txt"
if os.path.isfile(file_path):
labels[i] = 1
else:
labels[i] = 0
return images, labels
def load_images_with_masks(data_path, mask_path, image_height, image_width, binary=False, plot=False):
"""
Read an image in BGR,
resize to image_height x image_width,
subtract mean of ImageNet dataset.
Read the corresponding binary mask.
"""
# Get the list of images
os.chdir(data_path)
image_list = glob.glob('*.jpg')
N_images = len(image_list)
# Get the list of masks
os.chdir(mask_path)
mask_list = glob.glob('*.jpg')
# Create arrays to store data
images = np.zeros((N_images, image_height, image_width, 3), dtype = np.float32)
masks = np.zeros((N_images, image_height, image_width), dtype = np.float32)
if plot:
fig = plt.figure(figsize=(15,6))
for i in range(0, N_images):
# Load image
image_name = image_list[i]
os.chdir(data_path)
image = cv2.imread(image_name)
# Resize to image_height x image_width
images[i, :, :, :] = cv2.resize(image.astype(np.float32),(image_height, image_width))
# Subtract ImageNet mean
images[i, :, :, :] -= IMAGENET_MEAN_BGR
# Check if there is a mask
mask_name = image_name[:-4] + '_mask.jpg'
if mask_name in mask_list:
os.chdir(mask_path)
mask = cv2.resize(plt.imread(mask_name).astype(np.float32), (image_height, image_width))
if binary:
mask = 0 * (mask < 128.0) + 1 * (mask >= 128.0)
masks[i, :, :] = mask
if plot:
# Plot image
fig.add_subplot(N_images, 2, 2*i+1)
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.axis('off')
# Plot mask
fig.add_subplot(N_images, 2, 2*i+2)
plt.imshow(mask)
plt.axis('off')
plt.show()
return images, masks
def split_train_val(data, labels, train_ratio=0.8):
"""
Split data on training and validation sets
"""
# Shuffle indeces
n = len(data)
indeces = list(range(0, n))
np.random.shuffle(indeces)
# Create training set
train_indeces = indeces[:round(train_ratio * n)]
X_train = data[train_indeces, :, :, :]
y_train = labels[train_indeces]
# Create validation set
val_indeces = indeces[round(train_ratio * n):]
X_val = data[val_indeces, :, :, :]
y_val = labels[val_indeces]
print("Training set:", X_train.shape, y_train.shape)
print("Validation set:", X_val.shape, y_val.shape)
return X_train, y_train, X_val, y_val
def stratified_train_val(data, labels, train_ratio=0.8, balance_classes=False):
"""
Create stratified training and validation sets for binary data
"""
# numbers of positive and negative samples in the dataset
n_pos = int(sum(labels))
n_neg = data.shape[0] - n_pos
print('Number of negative samples: ', n_neg)
print('Number of positive samples: ', n_pos)
print('Fraction of positive samples: ', n_pos / data.shape[0] * 100, '%')
# to fix class imbalance equalize
# the numbers of negative and positive samples
if balance_classes:
if n_neg > n_pos:
n_neg = n_pos
else:
n_pos = n_neg
# print the numbers of negative/positive samples
# in training and validation sets
print('Positive samples:',
round(train_ratio * n_pos), "in y_train,",
round((1 - train_ratio) * n_pos), "in y_val")
print('Negative samples:',
round(train_ratio * n_neg), "in y_train,",
round((1 - train_ratio) * n_neg), "in y_val")
# extract, shuffle and split indeces of positive samples
pos_indeces = (np.where(labels == 1))[0]
np.random.shuffle(pos_indeces)
pos_indeces_train = pos_indeces[:round(train_ratio * n_pos)]
pos_indeces_val = pos_indeces[round(train_ratio * n_pos):]
# extract, shuffle and split indeces of negative samples
neg_indeces = (np.where(labels == 0))[0]
np.random.shuffle(neg_indeces)
neg_indeces_train = neg_indeces[:round(train_ratio * n_neg)]
neg_indeces_val = neg_indeces[round(train_ratio * n_neg):]
# create a training set
train_indeces = np.append(pos_indeces_train, neg_indeces_train, axis=0)
np.random.shuffle(train_indeces)
X_train = data[train_indeces, :, :, :]
y_train = labels[train_indeces]
# create a validation set
val_indeces = np.append(pos_indeces_val, neg_indeces_val, axis = 0)
np.random.shuffle(val_indeces)
X_val = data[val_indeces, :, :, :]
y_val = labels[val_indeces]
print("Training set:", X_train.shape, y_train.shape)
print("Validation set:", X_val.shape, y_val.shape)
return X_train, y_train, X_val, y_val
| 32.905738 | 102 | 0.61116 | import os
import os.path
import glob
import cv2
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
IMAGENET_MEAN_BGR = [103.939, 116.779, 123.68]
def load_images(data_path, image_height, image_width, plot=False):
"""
Read an image in BGR,
resize to image_height x image_width,
subtract mean of ImageNet dataset
"""
# Get a list of images in the folder
os.chdir(data_path)
list = glob.glob('*.jpg')
N_images = len(list)
# Create arrays to store data
images = np.zeros((N_images, image_height, image_width, 3), dtype = np.float32)
if plot:
fig = plt.figure(figsize=(15,6))
for i in range(0, N_images):
# Load image
image_name = list[i]
image = cv2.imread(image_name)
if plot:
# Plot an image
fig.add_subplot(1, N_images, i+1)
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.axis('off')
plt.show()
# Resize to image_height x image_width
images[i, :, :, :] = cv2.resize(image.astype(np.float32),(image_height, image_width))
# Subtract ImageNet mean
images[i, :, :, :] -= IMAGENET_MEAN_BGR
return images
def load_images_with_labels(data_path, labels_path, image_height, image_width):
"""
Read an image in BGR,
resize to image_height x image_width,
subtract mean of ImageNet dataset.
Assign a label to an image:
1 if there is a tumour, 0 otherwise
"""
# Get a list of images in the folder
os.chdir(data_path)
list = glob.glob('*.jpeg')
N_images = len(list)
return N_images
# Create arrays to store data and labels
images = np.zeros((N_images, image_height, image_width, 3), dtype = np.float32)
labels = -1 * np.ones((N_images, 1), dtype = np.float32)
for i in range(0, N_images):
# Load image in BGR
image_name = list[i]
image = cv2.imread(image_name)
# Load image in RGB
# image = plt.imread(image_name)
# Convert RGB to BGR
#image = image[:, :, [2, 1, 0]]
# Resize to image_height x image_width
images[i, :, :, :] = cv2.resize(image.astype(np.float32),(image_height, image_width))
# Subtract ImageNet mean
images[i, :, :, :] -= IMAGENET_MEAN_BGR
# Assign a label to an image:
# 1 if there is a tumour, 0 otherwise
file_path = labels_path + image_name[:-5] + ".txt"
if os.path.isfile(file_path):
labels[i] = 1
else:
labels[i] = 0
return images, labels
def load_images_with_masks(data_path, mask_path, image_height, image_width, binary=False, plot=False):
"""
Read an image in BGR,
resize to image_height x image_width,
subtract mean of ImageNet dataset.
Read the corresponding binary mask.
"""
# Get the list of images
os.chdir(data_path)
image_list = glob.glob('*.jpg')
N_images = len(image_list)
# Get the list of masks
os.chdir(mask_path)
mask_list = glob.glob('*.jpg')
# Create arrays to store data
images = np.zeros((N_images, image_height, image_width, 3), dtype = np.float32)
masks = np.zeros((N_images, image_height, image_width), dtype = np.float32)
if plot:
fig = plt.figure(figsize=(15,6))
for i in range(0, N_images):
# Load image
image_name = image_list[i]
os.chdir(data_path)
image = cv2.imread(image_name)
# Resize to image_height x image_width
images[i, :, :, :] = cv2.resize(image.astype(np.float32),(image_height, image_width))
# Subtract ImageNet mean
images[i, :, :, :] -= IMAGENET_MEAN_BGR
# Check if there is a mask
mask_name = image_name[:-4] + '_mask.jpg'
if mask_name in mask_list:
os.chdir(mask_path)
mask = cv2.resize(plt.imread(mask_name).astype(np.float32), (image_height, image_width))
if binary:
mask = 0 * (mask < 128.0) + 1 * (mask >= 128.0)
masks[i, :, :] = mask
if plot:
# Plot image
fig.add_subplot(N_images, 2, 2*i+1)
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.axis('off')
# Plot mask
fig.add_subplot(N_images, 2, 2*i+2)
plt.imshow(mask)
plt.axis('off')
plt.show()
return images, masks
def split_train_val(data, labels, train_ratio=0.8):
"""
Split data on training and validation sets
"""
# Shuffle indeces
n = len(data)
indeces = list(range(0, n))
np.random.shuffle(indeces)
# Create training set
train_indeces = indeces[:round(train_ratio * n)]
X_train = data[train_indeces, :, :, :]
y_train = labels[train_indeces]
# Create validation set
val_indeces = indeces[round(train_ratio * n):]
X_val = data[val_indeces, :, :, :]
y_val = labels[val_indeces]
print("Training set:", X_train.shape, y_train.shape)
print("Validation set:", X_val.shape, y_val.shape)
return X_train, y_train, X_val, y_val
def stratified_train_val(data, labels, train_ratio=0.8, balance_classes=False):
"""
Create stratified training and validation sets for binary data
"""
# numbers of positive and negative samples in the dataset
n_pos = int(sum(labels))
n_neg = data.shape[0] - n_pos
print('Number of negative samples: ', n_neg)
print('Number of positive samples: ', n_pos)
print('Fraction of positive samples: ', n_pos / data.shape[0] * 100, '%')
# to fix class imbalance equalize
# the numbers of negative and positive samples
if balance_classes:
if n_neg > n_pos:
n_neg = n_pos
else:
n_pos = n_neg
# print the numbers of negative/positive samples
# in training and validation sets
print('Positive samples:',
round(train_ratio * n_pos), "in y_train,",
round((1 - train_ratio) * n_pos), "in y_val")
print('Negative samples:',
round(train_ratio * n_neg), "in y_train,",
round((1 - train_ratio) * n_neg), "in y_val")
# extract, shuffle and split indeces of positive samples
pos_indeces = (np.where(labels == 1))[0]
np.random.shuffle(pos_indeces)
pos_indeces_train = pos_indeces[:round(train_ratio * n_pos)]
pos_indeces_val = pos_indeces[round(train_ratio * n_pos):]
# extract, shuffle and split indeces of negative samples
neg_indeces = (np.where(labels == 0))[0]
np.random.shuffle(neg_indeces)
neg_indeces_train = neg_indeces[:round(train_ratio * n_neg)]
neg_indeces_val = neg_indeces[round(train_ratio * n_neg):]
# create a training set
train_indeces = np.append(pos_indeces_train, neg_indeces_train, axis=0)
np.random.shuffle(train_indeces)
X_train = data[train_indeces, :, :, :]
y_train = labels[train_indeces]
# create a validation set
val_indeces = np.append(pos_indeces_val, neg_indeces_val, axis = 0)
np.random.shuffle(val_indeces)
X_val = data[val_indeces, :, :, :]
y_val = labels[val_indeces]
print("Training set:", X_train.shape, y_train.shape)
print("Validation set:", X_val.shape, y_val.shape)
return X_train, y_train, X_val, y_val
def standardize(X, train_mean=None, train_sd=None, training_set=False):
# Standartise data using mean and sd of the training set
if train_mean == None:
train_mean = np.mean(X)
train_sd = np.std(X, ddof = 1)
X_std = (X - train_mean) / train_sd
print('This set had mean', np.mean(X), 'and s.d.', np.std(X, ddof = 1))
print('Standardized set has mean', np.mean(X_std), 'and s.d.', np.std(X_std, ddof = 1))
if training_set:
return X_std, train_mean, train_sd
else:
return X_std
| 512 | 0 | 23 |
6f45aea8b544b9ccd10b35307ac7d38d809c6c2e | 201 | py | Python | Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/python/tvm/relay/nn.py | mengkai94/training_results_v0.6 | 43dc3e250f8da47b5f8833197d74cb8cf1004fc9 | [
"Apache-2.0"
] | 64 | 2021-05-02T14:42:34.000Z | 2021-05-06T01:35:03.000Z | python/tvm/relay/nn.py | ganzhiliang/tvm | b076cad542524cb3744149d953c341b5815f6474 | [
"Apache-2.0"
] | 23 | 2019-07-29T05:21:52.000Z | 2020-08-31T18:51:42.000Z | python/tvm/relay/nn.py | ganzhiliang/tvm | b076cad542524cb3744149d953c341b5815f6474 | [
"Apache-2.0"
] | 51 | 2019-07-12T05:10:25.000Z | 2021-07-28T16:19:06.000Z | # pylint: disable=wildcard-import, unused-import, unused-wildcard-import
"""Neural network related operators."""
# Re-export in a specific file name so that autodoc can pick it up
from .op.nn import *
| 40.2 | 72 | 0.761194 | # pylint: disable=wildcard-import, unused-import, unused-wildcard-import
"""Neural network related operators."""
# Re-export in a specific file name so that autodoc can pick it up
from .op.nn import *
| 0 | 0 | 0 |
9ae9056d879bbb3f32bdea9b7d41bf7ae82b80b5 | 3,154 | py | Python | api/__init__.py | jieggii/witless | 5ed678bf5a7b8602d6052715a198a56c46f62d02 | [
"WTFPL"
] | 7 | 2021-11-14T13:54:50.000Z | 2022-03-05T06:06:41.000Z | api/__init__.py | jieggii/witless | 5ed678bf5a7b8602d6052715a198a56c46f62d02 | [
"WTFPL"
] | null | null | null | api/__init__.py | jieggii/witless | 5ed678bf5a7b8602d6052715a198a56c46f62d02 | [
"WTFPL"
] | null | null | null | from aiofile import AIOFile
from os import remove
from re import findall
from api import util
from os.path import getsize
from os import listdir
from random import randint
| 25.642276 | 83 | 0.553583 | from aiofile import AIOFile
from os import remove
from re import findall
from api import util
from os.path import getsize
from os import listdir
from random import randint
async def improve_result(result: str):
rnd = randint(0, 4)
if rnd == 0: # ну чиста для прекола иногда бот пишет постироничнее....
return result
elif rnd == 1:
return result.upper() # ну чиста для прекола в иногда бот орет кричит....
else:
improved_result = ""
for i in range(len(result)):
if i == 0:
improved_result += result[i].upper()
elif i > 1:
if result[i - 1] == " " and result[i - 2] in ["?", ".", "!"]:
improved_result += result[i].upper()
else:
improved_result += result[i]
else:
improved_result += result[i]
return improved_result
class Stats:
@staticmethod
async def get(peer_id):
files = [f"messages/{filename}" for filename in listdir("messages")]
global_size = round(sum([getsize(file) for file in files]) / 1048576, 2)
try:
local_size = round(getsize(f"messages/{peer_id}.raw") / 1048576, 2)
except FileNotFoundError:
local_size = 0
return len(files), global_size, local_size
async def censor_result(result: str):
blacklisted_tokens = [
"сова никогда не спит",
"#cинийкит",
"#рaзбудименяв420",
"all",
"everyone",
]
links = util.remove_duplicates(
findall(r"[^ (){\}\[\]\'\";]+\.[^ (){\}\[\]\'\";]+", result)
)
for link in links:
result = result.replace(link, "[ссылка удалена]")
for token in blacklisted_tokens:
result = result.replace(token, "*" * len(token))
return result
async def escape_string(string: str):
return string.replace(";", "\;")
async def unescape_string(string: str):
return string.replace("\;", ";")
async def parse_raw(raw: str):
result = []
start = 0
for i in range(len(raw)):
if i != 0:
if raw[i] == ";" and raw[i - 1] != "\\":
result.append(raw[start:i])
start = i + 1
return [await unescape_string(message) for message in result]
class MessagesStorage:
def __init__(self, peer_id: int):
self.path = f"messages/{peer_id}.raw"
async def wipe(self):
try:
remove(self.path)
return True
except:
return False
async def get(self):
try:
async with AIOFile(self.path, "r", encoding="utf-8") as file:
raw = await file.read()
messages = await parse_raw(raw)
# messages = messages[::-1][0:150] # в эфире рубрика ЕХперементы....
return messages
except FileNotFoundError:
return []
async def push(self, messages: list):
async with AIOFile(self.path, "a", encoding="utf-8") as file:
line = ";".join([await escape_string(message) for message in messages])
await file.write(line + ";")
| 2,824 | 36 | 268 |
b56bee90b4215634a95bf4e633d4903390ad51f0 | 15,864 | py | Python | tests/cli/publish/test_publish.py | daobook/hatch | 1cf39ad1a11ce90bc77fb7fdc4b9202433509179 | [
"MIT"
] | null | null | null | tests/cli/publish/test_publish.py | daobook/hatch | 1cf39ad1a11ce90bc77fb7fdc4b9202433509179 | [
"MIT"
] | null | null | null | tests/cli/publish/test_publish.py | daobook/hatch | 1cf39ad1a11ce90bc77fb7fdc4b9202433509179 | [
"MIT"
] | null | null | null | import os
import secrets
import tarfile
import time
import zipfile
from collections import defaultdict
import httpx
import pytest
from hatch.config.constants import PublishEnvVars
from hatch.utils.ci import running_in_ci
PUBLISHER_TOKEN = os.environ.get('HATCH_CI_PUBLISHER_TOKEN')
pytestmark = [
pytest.mark.skipif(not PUBLISHER_TOKEN, reason='Publishing tests are only executed within CI environments'),
]
@pytest.fixture(autouse=True)
@pytest.fixture
| 32.84472 | 114 | 0.649962 | import os
import secrets
import tarfile
import time
import zipfile
from collections import defaultdict
import httpx
import pytest
from hatch.config.constants import PublishEnvVars
from hatch.utils.ci import running_in_ci
PUBLISHER_TOKEN = os.environ.get('HATCH_CI_PUBLISHER_TOKEN')
pytestmark = [
pytest.mark.skipif(not PUBLISHER_TOKEN, reason='Publishing tests are only executed within CI environments'),
]
@pytest.fixture(autouse=True)
def keyring_store(mocker):
mock_store = defaultdict(dict)
mocker.patch('keyring.get_password', side_effect=lambda system, user: mock_store[system].get(user))
mocker.patch(
'keyring.set_password', side_effect=lambda system, user, auth: mock_store[system].__setitem__(user, auth)
)
yield mock_store
@pytest.fixture
def published_project_name():
return f'c4880cdbe05de9a28415fbad{secrets.choice(range(100))}'
def wait_for_artifacts(project_name, *artifacts):
index_url = f'https://test.pypi.org/simple/{project_name}/'
artifact_names = [artifact.name for artifact in artifacts]
for _ in range(120):
try:
response = httpx.get(index_url)
response.raise_for_status()
except Exception: # no cov
pass
else:
for artifact_name in artifact_names:
if artifact_name not in response.text:
break
else:
break
time.sleep(1)
else: # no cov
raise Exception(f'Could not find artifacts at {index_url}: {", ".join(artifact_names)}')
def remove_metadata_field(field: str, metadata_file_contents: str):
lines = metadata_file_contents.splitlines(True)
field_marker = f'{field}: '
indices_to_remove = []
for i, line in enumerate(lines):
if line.lower().startswith(field_marker):
indices_to_remove.append(i)
for i, index in enumerate(indices_to_remove):
del lines[index - i]
return ''.join(lines)
def timestamp_to_version(timestamp):
major, minor = str(timestamp).split('.')
if minor.startswith('0'):
normalized_minor = str(int(minor))
padding = '.'.join('0' for _ in range(len(minor) - len(normalized_minor)))
return f'{major}.{padding}.{normalized_minor}'
else:
return f'{major}.{minor}'
def test_timestamp_to_version():
assert timestamp_to_version(123.4) == '123.4'
assert timestamp_to_version(123.04) == '123.0.4'
assert timestamp_to_version(123.004) == '123.0.0.4'
def test_explicit_options(hatch, temp_dir):
project_name = 'My App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert result.exit_code == 0, result.output
path = temp_dir / 'my-app'
with path.as_cwd():
result = hatch('publish', '-o', 'foo=bar')
assert result.exit_code == 1, result.output
assert result.output == (
'Use the standard CLI flags rather than passing explicit options when using the `pypi` plugin\n'
)
def test_unknown_publisher(hatch, temp_dir):
project_name = 'My App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert result.exit_code == 0, result.output
path = temp_dir / 'my-app'
with path.as_cwd():
result = hatch('publish', '-p', 'foo')
assert result.exit_code == 1, result.output
assert result.output == 'Unknown publisher: foo\n'
def test_missing_user(hatch, temp_dir):
project_name = 'My App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert result.exit_code == 0, result.output
path = temp_dir / 'my-app'
with path.as_cwd():
result = hatch('publish', '-n')
assert result.exit_code == 1, result.output
assert result.output == 'Missing required option: user\n'
def test_missing_auth(hatch, temp_dir):
project_name = 'My App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert result.exit_code == 0, result.output
path = temp_dir / 'my-app'
with path.as_cwd():
result = hatch('publish', '-n', '--user', 'foo')
assert result.exit_code == 1, result.output
assert result.output == 'Missing required option: auth\n'
def test_flags(hatch, temp_dir_cache, helpers, published_project_name):
with temp_dir_cache.as_cwd():
result = hatch('new', published_project_name)
assert result.exit_code == 0, result.output
path = temp_dir_cache / published_project_name
with path.as_cwd():
current_version = timestamp_to_version(helpers.get_current_timestamp())
result = hatch('version', current_version)
assert result.exit_code == 0, result.output
result = hatch('build')
assert result.exit_code == 0, result.output
build_directory = path / 'dist'
artifacts = list(build_directory.iterdir())
result = hatch(
'publish', '--user', '__token__', '--auth', PUBLISHER_TOKEN, '--repo', 'https://test.pypi.org/legacy/'
)
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
f"""
{artifacts[0].relative_to(path)} ... success
{artifacts[1].relative_to(path)} ... success
[{published_project_name}]
https://test.pypi.org/project/{published_project_name}/{current_version}/
"""
)
def test_plugin_config(hatch, temp_dir_cache, helpers, published_project_name, config_file):
config_file.model.publish['pypi']['user'] = '__token__'
config_file.model.publish['pypi']['auth'] = PUBLISHER_TOKEN
config_file.model.publish['pypi']['repo'] = 'test'
config_file.save()
with temp_dir_cache.as_cwd():
result = hatch('new', published_project_name)
assert result.exit_code == 0, result.output
path = temp_dir_cache / published_project_name
with path.as_cwd():
del os.environ[PublishEnvVars.REPO]
current_version = timestamp_to_version(helpers.get_current_timestamp())
result = hatch('version', current_version)
assert result.exit_code == 0, result.output
result = hatch('build')
assert result.exit_code == 0, result.output
build_directory = path / 'dist'
artifacts = list(build_directory.iterdir())
result = hatch('publish')
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
f"""
{artifacts[0].relative_to(path)} ... success
{artifacts[1].relative_to(path)} ... success
[{published_project_name}]
https://test.pypi.org/project/{published_project_name}/{current_version}/
"""
)
def test_prompt(hatch, temp_dir_cache, helpers, published_project_name):
with temp_dir_cache.as_cwd():
result = hatch('new', published_project_name)
assert result.exit_code == 0, result.output
path = temp_dir_cache / published_project_name
with path.as_cwd():
current_version = timestamp_to_version(helpers.get_current_timestamp())
result = hatch('version', current_version)
assert result.exit_code == 0, result.output
result = hatch('build')
assert result.exit_code == 0, result.output
build_directory = path / 'dist'
artifacts = list(build_directory.iterdir())
result = hatch('publish', input='__token__\nfoo')
assert result.exit_code == 1, result.output
assert '403' in result.output
assert 'Invalid or non-existent authentication information' in result.output
# Ensure nothing is saved for errors
with path.as_cwd():
result = hatch('publish', '-n')
assert result.exit_code == 1, result.output
assert result.output == 'Missing required option: user\n'
# Trigger save
with path.as_cwd():
result = hatch('publish', str(artifacts[0]), input=f'__token__\n{PUBLISHER_TOKEN}')
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
f"""
Enter your username: __token__
Enter your credentials:{' '}
{artifacts[0].relative_to(path)} ... success
[{published_project_name}]
https://test.pypi.org/project/{published_project_name}/{current_version}/
"""
)
# Use saved results
with path.as_cwd():
result = hatch('publish', str(artifacts[1]))
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
f"""
{artifacts[1].relative_to(path)} ... success
[{published_project_name}]
https://test.pypi.org/project/{published_project_name}/{current_version}/
"""
)
def test_external_artifact_path(hatch, temp_dir_cache, helpers, published_project_name):
with temp_dir_cache.as_cwd():
result = hatch('new', published_project_name)
assert result.exit_code == 0, result.output
path = temp_dir_cache / published_project_name
external_build_directory = temp_dir_cache / 'dist'
with path.as_cwd():
current_version = timestamp_to_version(helpers.get_current_timestamp())
result = hatch('version', current_version)
assert result.exit_code == 0, result.output
result = hatch('build', '-t', 'sdist', str(external_build_directory))
assert result.exit_code == 0, result.output
external_artifacts = list(external_build_directory.iterdir())
result = hatch('build', '-t', 'wheel')
assert result.exit_code == 0, result.output
internal_build_directory = path / 'dist'
internal_artifacts = list(internal_build_directory.iterdir())
result = hatch(
'publish', '--user', '__token__', '--auth', PUBLISHER_TOKEN, 'dist', str(external_build_directory)
)
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
f"""
{internal_artifacts[0].relative_to(path)} ... success
{external_artifacts[0]} ... success
[{published_project_name}]
https://test.pypi.org/project/{published_project_name}/{current_version}/
"""
)
def test_already_exists(hatch, temp_dir_cache, helpers, published_project_name):
with temp_dir_cache.as_cwd():
result = hatch('new', published_project_name)
assert result.exit_code == 0, result.output
path = temp_dir_cache / published_project_name
with path.as_cwd():
current_version = timestamp_to_version(helpers.get_current_timestamp())
result = hatch('version', current_version)
assert result.exit_code == 0, result.output
result = hatch('build')
assert result.exit_code == 0, result.output
build_directory = path / 'dist'
artifacts = list(build_directory.iterdir())
result = hatch('publish', '--user', '__token__', '--auth', PUBLISHER_TOKEN)
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
f"""
{artifacts[0].relative_to(path)} ... success
{artifacts[1].relative_to(path)} ... success
[{published_project_name}]
https://test.pypi.org/project/{published_project_name}/{current_version}/
"""
)
for _ in range(30 if running_in_ci() else 5):
wait_for_artifacts(published_project_name, *artifacts)
time.sleep(1)
with path.as_cwd():
result = hatch('publish', '--user', '__token__', '--auth', PUBLISHER_TOKEN)
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
f"""
{artifacts[0].relative_to(path)} ... already exists
{artifacts[1].relative_to(path)} ... already exists
"""
)
def test_no_artifacts(hatch, temp_dir_cache, helpers, published_project_name):
with temp_dir_cache.as_cwd():
result = hatch('new', published_project_name)
assert result.exit_code == 0, result.output
path = temp_dir_cache / published_project_name
with path.as_cwd():
directory = path / 'dir2'
directory.mkdir()
(directory / 'test.txt').touch()
result = hatch('publish', 'dir1', 'dir2', '--user', '__token__', '--auth', 'foo')
assert result.exit_code == 1, result.output
assert result.output == helpers.dedent(
"""
No artifacts found
"""
)
class TestWheel:
@pytest.mark.parametrize('field', ['name', 'version'])
def test_missing_required_metadata_field(self, hatch, temp_dir_cache, helpers, published_project_name, field):
with temp_dir_cache.as_cwd():
result = hatch('new', published_project_name)
assert result.exit_code == 0, result.output
path = temp_dir_cache / published_project_name
with path.as_cwd():
current_version = timestamp_to_version(helpers.get_current_timestamp())
result = hatch('version', current_version)
assert result.exit_code == 0, result.output
result = hatch('build', '-t', 'wheel')
assert result.exit_code == 0, result.output
build_directory = path / 'dist'
artifacts = list(build_directory.iterdir())
artifact_path = str(artifacts[0])
metadata_file_path = f'{published_project_name}-{current_version}.dist-info/METADATA'
with zipfile.ZipFile(artifact_path, 'r') as zip_archive:
with zip_archive.open(metadata_file_path, 'r') as metadata_file:
metadata_file_contents = metadata_file.read().decode('utf-8')
with zipfile.ZipFile(artifact_path, 'w') as zip_archive:
with zip_archive.open(metadata_file_path, 'w') as metadata_file:
metadata_file.write(remove_metadata_field(field, metadata_file_contents).encode('utf-8'))
with path.as_cwd():
result = hatch('publish', '--user', '__token__', '--auth', 'foo')
assert result.exit_code == 1, result.output
assert result.output == helpers.dedent(
f"""
Missing required field `{field}` in artifact: {artifact_path}
"""
)
class TestSourceDistribution:
@pytest.mark.parametrize('field', ['name', 'version'])
def test_missing_required_metadata_field(self, hatch, temp_dir_cache, helpers, published_project_name, field):
with temp_dir_cache.as_cwd():
result = hatch('new', published_project_name)
assert result.exit_code == 0, result.output
path = temp_dir_cache / published_project_name
with path.as_cwd():
current_version = timestamp_to_version(helpers.get_current_timestamp())
result = hatch('version', current_version)
assert result.exit_code == 0, result.output
result = hatch('build', '-t', 'sdist')
assert result.exit_code == 0, result.output
build_directory = path / 'dist'
artifacts = list(build_directory.iterdir())
artifact_path = str(artifacts[0])
extraction_directory = path / 'extraction'
with tarfile.open(artifact_path, 'r:gz') as tar_archive:
tar_archive.extractall(extraction_directory)
metadata_file_path = extraction_directory / f'{published_project_name}-{current_version}' / 'PKG-INFO'
metadata_file_path.write_text(remove_metadata_field(field, metadata_file_path.read_text()))
with tarfile.open(artifact_path, 'w:gz') as tar_archive:
tar_archive.add(extraction_directory)
with path.as_cwd():
result = hatch('publish', '--user', '__token__', '--auth', 'foo')
assert result.exit_code == 1, result.output
assert result.output == helpers.dedent(
f"""
Missing required field `{field}` in artifact: {artifact_path}
"""
)
| 14,797 | 173 | 412 |
5b6ffd45074769f3afaecfb79816ae32eb72fdb0 | 10,009 | py | Python | src/no_padding_unit_strides.py | rileyhannigan/DeepLearningVisualizations | b83c69ada5fb81acc42fde891dd6045a4eebe493 | [
"MIT"
] | null | null | null | src/no_padding_unit_strides.py | rileyhannigan/DeepLearningVisualizations | b83c69ada5fb81acc42fde891dd6045a4eebe493 | [
"MIT"
] | null | null | null | src/no_padding_unit_strides.py | rileyhannigan/DeepLearningVisualizations | b83c69ada5fb81acc42fde891dd6045a4eebe493 | [
"MIT"
] | null | null | null | from manim import *
import numpy as np
# creates lists of lists of squares, used for input, kernel, and output
# moves kernel around and displays output squares one at a time
# creates padding
| 57.522989 | 131 | 0.636527 | from manim import *
import numpy as np
class NoPaddingUnitStrides(Scene):
# creates lists of lists of squares, used for input, kernel, and output
def create_squares(self, height, width, size, padding, c, up_shift, left_shift):
total_squares = []
for i in range(height):
current_row = []
for j in range(width):
if i == 0 and j == 0:
current_row += Square(side_length=size,color=c).shift(UP*up_shift, LEFT*left_shift)
elif j == 0:
current_row += Square(side_length=size,color=c).next_to(total_squares[i-1][0], DOWN*padding)
else:
current_row += Square(side_length=size,color=c).next_to(current_row[j-1], RIGHT*padding)
total_squares += [current_row]
return total_squares
# moves kernel around and displays output squares one at a time
def do_convolution(self, output_squares, kernel_squares, speed):
for i in range(len(output_squares)):
for j in range(len(output_squares[i])):
if j == 0 and i != 0:
self.play(ApplyMethod(kernel_squares.shift, LEFT*0.75*(len(output_squares[i])-1), DOWN*0.75, run_time=speed))
elif j != 0 or i != 0:
self.play(ApplyMethod(kernel_squares.shift, RIGHT*0.75, run_time=speed))
self.play(Create(output_squares[i][j], run_time=speed))
# creates padding
def create_padding(self, height, width, input_squares, size):
total_squares = []
total_zeroes = []
for i in range(len(input_squares)+(height*2)):
current_row = []
current_zeroes = []
for j in range(len(input_squares[0])+(width*2)):
if i == 0 and j == 0:
current_row += Square(side_length=size,color=ORANGE).shift(UP*3, LEFT*-1.5)
current_zeroes += Text("0", color=GREY_E).shift(UP*3, LEFT*-1.5)
elif j == 0:
current_row += Square(side_length=size,color=ORANGE).next_to(total_squares[i-1][0], DOWN)
current_zeroes += Text("0", color=GREY_E).next_to(total_zeroes[i-1][0], DOWN*1.52)
else:
if i < height or i >= len(input_squares)+height or j < width or j >= len(input_squares[0])+width:
current_row += Square(side_length=size,color=ORANGE).next_to(current_row[j-1], RIGHT)
current_zeroes += Text("0", color=GREY_E).next_to(current_zeroes[j-1], RIGHT*1.63)
else:
current_row += Square(side_length=size).next_to(current_row[j-1], RIGHT).set_opacity(0)
current_zeroes += Text("0", color=GREY_E).next_to(current_zeroes[j-1], RIGHT*1.63).set_opacity(0)
total_squares += [current_row]
total_zeroes += [current_zeroes]
return total_squares, total_zeroes
def construct(self):
# regular convolution labels
title = Text("No Padding, Unit", gradient=(BLUE, GREEN)).shift(UP*0.5)
title2 = Text("Strides Convolution", gradient=(BLUE, GREEN)).next_to(title, DOWN)
input_text = Text("Input: 5 x 5").shift(UP*3.0, LEFT*1.7).scale(0.7)
padding_text = Text("Padding: 0 x 0", color=ORANGE).next_to(input_text,DOWN*0.35).scale(0.7)
kernel_text = Text("Kernel: 3 x 3", color=BLUE).next_to(padding_text,DOWN*0.35).scale(0.7)
stride_text = Text("Stride: 1 x 1", color=PURPLE).next_to(kernel_text,DOWN*0.35).scale(0.7)
output_text = Text("Output: 3 x 3", color=YELLOW).next_to(stride_text,DOWN*0.35).scale(0.7)
# regular input, kernel, and output squares
input_squares = self.create_squares(5, 5, 0.5, 1, WHITE, 3.0, -1.25)
kernel_squares = self.create_squares(3, 3, 0.7, 0.2, BLUE, 3.0, -1.25)
output_squares = self.create_squares(3, 3, 0.5, 1, YELLOW, -1.0, 2.5)
# regular input, kernel, output, and label groups
input_squares_group = VGroup(*input_squares[0], *input_squares[1], *input_squares[2],
*input_squares[3], *input_squares[4])
kernel_squares_group = VGroup(*kernel_squares[0], *kernel_squares[1], *kernel_squares[2])
output_squares_group = VGroup(*output_squares[0], *output_squares[1], *output_squares[2])
label_group = Group(input_text, padding_text, kernel_text, stride_text, output_text)
line = Rectangle(width=0.001, height=7.0).shift(LEFT*3.4)
#display title
self.play(Write(title), Write(title2))
self.wait(2)
self.play(ApplyMethod(title.scale, 0.7), ApplyMethod(title2.scale, 0.7))
self.play(ApplyMethod(title.shift, DOWN*2.5, RIGHT*2.9), ApplyMethod(title2.shift, DOWN *2.2, RIGHT*2.9))
# display input
self.play(Write(input_text))
self.play(Create(input_squares_group))
# display padding (none)
self.play(Write(padding_text))
self.wait()
# display kernel
self.play(Write(kernel_text))
self.play(Create(kernel_squares_group))
# display and do strides
self.play(Write(stride_text))
self.do_convolution(output_squares, kernel_squares_group, 0.7)
# display output result
self.play(Write(output_text))
self.wait()
#prepare screen for transposed
self.play(ApplyMethod(label_group.scale, 0.6), ApplyMethod(input_squares_group.scale, 0.6),
ApplyMethod(kernel_squares_group.scale, 0.6, {"about_point":np.array([2.75,1.45,1])}),
ApplyMethod(output_squares_group.scale, 0.6), ApplyMethod(title.scale, 0.7), ApplyMethod(title2.scale, 0.7))
self.play(ApplyMethod(label_group.shift, LEFT*3.5, UP*0.7), ApplyMethod(input_squares_group.shift, LEFT*8, DOWN*1.1),
ApplyMethod(kernel_squares_group.shift, LEFT*8, DOWN*1.08), ApplyMethod(output_squares_group.shift, LEFT*3.5, UP*0.2),
ApplyMethod(title.shift, LEFT*8.1, DOWN*0.5), ApplyMethod(title2.shift, LEFT*8.1, DOWN*0.35))
self.play(Create(line))
# transposed convolution labels
title_trans = Text("No Padding, Unit Strides", gradient=(BLUE, GREEN)).shift(RIGHT, UP*0.5)
title_trans1 = Text("Transposed Convolution", gradient=(BLUE, GREEN)).next_to(title_trans, DOWN)
input_text_trans = Text("Input: 3 x 3", color=YELLOW).shift(UP*3.1, LEFT*1.3).scale(0.7)
padding_text_trans_1 = Text("Padding: 0 x 0", color=ORANGE).next_to(input_text_trans,DOWN*0.35).scale(0.7)
padding_text_trans_2 = Text("p' = Kernel - 1", color=ORANGE).next_to(padding_text_trans_1,DOWN*0.35).scale(0.7)
padding_text_trans_3 = Text("p' = 2 x 2", color=ORANGE).next_to(padding_text_trans_1,DOWN*0.35).scale(0.7)
kernel_text_trans = Text("Kernel: 3 x 3", color=BLUE).next_to(padding_text_trans_1,DOWN*0.35).scale(0.7)
stride_text_trans = Text("Stride: 1 x 1", color=PURPLE).next_to(kernel_text_trans,DOWN*0.35).scale(0.7)
output_text_trans = Text("Output: 5 x 5").next_to(stride_text_trans,DOWN*0.35).scale(0.7)
# transposed input, kernel, and output squares
input_squares_trans = self.create_squares(3, 3, 0.5, 1, YELLOW, 3, -3)
kernel_squares_trans = self.create_squares(3, 3, 0.7, 0.2, BLUE, 3, -1.5)
output_squares_trans = self.create_squares(5, 5, 0.5, 1, WHITE, 0, 2.7)
padding_squares_trans, padding_zeroes_trans = self.create_padding(2, 2, input_squares_trans, 0.5)
# transposed input, kernel, and output groups
input_squares_group_trans = VGroup(*input_squares_trans[0], *input_squares_trans[1], *input_squares_trans[2])
kernel_squares_group_trans = VGroup(*kernel_squares_trans[0], *kernel_squares_trans[1], *kernel_squares_trans[2])
output_squares_group_trans = VGroup(*output_squares_trans[0], *output_squares_trans[1], *output_squares_trans[2],
*output_squares_trans[3], *output_squares_trans[4])
padding_squares_group_trans = VGroup(*padding_squares_trans[0], *padding_squares_trans[1], *padding_squares_trans[2],
*padding_squares_trans[3], *padding_squares_trans[4], *padding_squares_trans[5], *padding_squares_trans[6])
padding_zeroes_group_trans = VGroup(*padding_zeroes_trans[0], *padding_zeroes_trans[1], *padding_zeroes_trans[2],
*padding_zeroes_trans[3], *padding_zeroes_trans[4], *padding_zeroes_trans[5], *padding_zeroes_trans[6])
#display title
self.play(Write(title_trans), Write(title_trans1))
self.wait(2)
self.play(ApplyMethod(title_trans.scale, 0.7), ApplyMethod(title_trans1.scale, 0.7))
self.play(ApplyMethod(title_trans.shift, DOWN*2.8, RIGHT*2.8), ApplyMethod(title_trans1.shift, DOWN *2.5, RIGHT*2.8))
# display input
self.play(Write(input_text_trans))
self.play(Create(input_squares_group_trans))
# display padding
self.play(Write(padding_text_trans_1))
self.wait(0.5)
self.play(Write(padding_text_trans_2))
self.wait(0.5)
self.play(Transform(padding_text_trans_2, padding_text_trans_3))
self.wait(0.5)
self.play(ApplyMethod(input_squares_group_trans.shift, DOWN*0.75*2))
self.play(Create(padding_squares_group_trans), Create(padding_zeroes_group_trans))
self.play(FadeOut(padding_text_trans_2))
# display kernel
self.play(Write(kernel_text_trans))
self.play(Create(kernel_squares_group_trans))
# display and do strides
self.play(Write(stride_text_trans))
self.do_convolution(output_squares_trans, kernel_squares_group_trans, 0.5)
# display output result
self.play(Write(output_text_trans))
self.wait(3) | 9,639 | 13 | 137 |
f294135649d390d81ded077a590de5d0b88465de | 350 | py | Python | feed/urls.py | DeanBiton/JobSeeker | ed60ec1ab4a047700d72b44d7a042dd834e43abd | [
"MIT"
] | null | null | null | feed/urls.py | DeanBiton/JobSeeker | ed60ec1ab4a047700d72b44d7a042dd834e43abd | [
"MIT"
] | null | null | null | feed/urls.py | DeanBiton/JobSeeker | ed60ec1ab4a047700d72b44d7a042dd834e43abd | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('', views.feed, name='feed'),
path('post/<int:pk>/', views.PostDetailView.as_view(), name='post-detail'),
path('post/<int:pk>/delete/', views.PostDeleteView.as_view(), name='post-delete'),
path('post/new/', views.PostCreateView.as_view(), name='post-create'),
]
| 31.818182 | 86 | 0.668571 | from django.urls import path
from . import views
urlpatterns = [
path('', views.feed, name='feed'),
path('post/<int:pk>/', views.PostDetailView.as_view(), name='post-detail'),
path('post/<int:pk>/delete/', views.PostDeleteView.as_view(), name='post-delete'),
path('post/new/', views.PostCreateView.as_view(), name='post-create'),
]
| 0 | 0 | 0 |
60b9ecd703c18e205ce332ad07cdc2632ae08239 | 1,554 | py | Python | button/demo_oled.py | flashypepo/myMicropython-Examples | b2b63df865b5ad471b351ca5f279135025859f5d | [
"MIT"
] | 3 | 2017-09-03T17:17:44.000Z | 2017-12-10T12:26:46.000Z | button/demo_oled.py | flashypepo/myMicropython-Examples | b2b63df865b5ad471b351ca5f279135025859f5d | [
"MIT"
] | null | null | null | button/demo_oled.py | flashypepo/myMicropython-Examples | b2b63df865b5ad471b351ca5f279135025859f5d | [
"MIT"
] | 2 | 2017-10-01T01:10:55.000Z | 2018-07-15T19:49:29.000Z | ''' demo of reading a button
2017-0808 PePo - added OLED display to demo
Adafruit article:
https://learn.adafruit.com/micropython-hardware-digital-i-slash-o/digital-inputs
'''
import machine, time
import ssd1306
__LED_PIN = const(14) #GPIO14
__BUTTON_PIN = const(12) #GPIO12
#define led to be set on / off by button
led = machine.Pin(__LED_PIN, machine.Pin.OUT)
led.off()
# OPTIONAL: status of led: True=on, False=off
# led_status = False
# create i2c for OLED display
i2c = machine.I2C(scl=machine.Pin(5), sda=machine.Pin(4), freq=100000)
print('i2c.scan: ', i2c.scan()) #[60]
# OLED screen dimensions
__WIDTH = const(128)
__HEIGHT = const(32)
oled = ssd1306.SSD1306_I2C(__WIDTH, __HEIGHT, i2c)
# define button on Pin GPIO12
button = machine.Pin(__BUTTON_PIN, machine.Pin.IN, machine.Pin.PULL_UP)
# helper to refresh OLED display
# demo ...
# run demo
try:
print('Button demo, press button...')
refreshOLED('Press button!')
run()
except:
print('Done')
refreshOLED('Done!')
| 26.338983 | 81 | 0.647362 | ''' demo of reading a button
2017-0808 PePo - added OLED display to demo
Adafruit article:
https://learn.adafruit.com/micropython-hardware-digital-i-slash-o/digital-inputs
'''
import machine, time
import ssd1306
__LED_PIN = const(14) #GPIO14
__BUTTON_PIN = const(12) #GPIO12
#define led to be set on / off by button
led = machine.Pin(__LED_PIN, machine.Pin.OUT)
led.off()
# OPTIONAL: status of led: True=on, False=off
# led_status = False
# create i2c for OLED display
i2c = machine.I2C(scl=machine.Pin(5), sda=machine.Pin(4), freq=100000)
print('i2c.scan: ', i2c.scan()) #[60]
# OLED screen dimensions
__WIDTH = const(128)
__HEIGHT = const(32)
oled = ssd1306.SSD1306_I2C(__WIDTH, __HEIGHT, i2c)
# define button on Pin GPIO12
button = machine.Pin(__BUTTON_PIN, machine.Pin.IN, machine.Pin.PULL_UP)
# helper to refresh OLED display
def refreshOLED(msg):
oled.fill(0) # clear oled
oled.text('Button demo',0,0) #header
oled.text(msg,0,10)
oled.show()
# demo ...
def run():
while True:
first = button.value()
time.sleep(0.01)
second = button.value()
if first and not second:
print('Button pressed!')
led.on()
refreshOLED('LED: {0} '.format(led.value()))
elif not first and second:
print('Button released!')
led.off()
refreshOLED('LED: {0} '.format(led.value()))
# run demo
try:
print('Button demo, press button...')
refreshOLED('Press button!')
run()
except:
print('Done')
refreshOLED('Done!')
| 504 | 0 | 44 |
0ed30c27d88286245f7f8350be353500778ba256 | 27,161 | py | Python | tests/ope/test_meta_slate.py | han20192019/newRL | 53598edab284b4364d127ec5662137de3f9c1206 | [
"Apache-2.0"
] | 387 | 2020-07-19T14:56:36.000Z | 2022-03-29T15:25:21.000Z | tests/ope/test_meta_slate.py | han20192019/newRL | 53598edab284b4364d127ec5662137de3f9c1206 | [
"Apache-2.0"
] | 89 | 2020-10-04T17:04:42.000Z | 2022-03-27T10:43:15.000Z | tests/ope/test_meta_slate.py | han20192019/newRL | 53598edab284b4364d127ec5662137de3f9c1206 | [
"Apache-2.0"
] | 53 | 2020-08-18T09:52:22.000Z | 2022-03-30T23:16:13.000Z | from copy import deepcopy
from dataclasses import dataclass
import itertools
import re
from typing import Dict
from typing import Optional
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
import pytest
from obp.ope import SlateIndependentIPS
from obp.ope import SlateOffPolicyEvaluation
from obp.ope import SlateRewardInteractionIPS
from obp.ope import SlateStandardIPS
from obp.types import BanditFeedback
from obp.utils import check_confidence_interval_arguments
mock_policy_value = 0.5
mock_confidence_interval = {
"mean": 0.5,
"95.0% CI (lower)": 0.3,
"95.0% CI (upper)": 0.7,
}
@dataclass
class SlateStandardIPSMock(SlateStandardIPS):
"""Slate Standard Inverse Propensity Scoring (SIPS) Mock."""
estimator_name: str = "sips"
eps: float = 0.1
def estimate_policy_value(
self,
slate_id: np.ndarray,
reward: np.ndarray,
position: np.ndarray,
pscore: np.ndarray,
evaluation_policy_pscore: np.ndarray,
**kwargs,
) -> float:
"""Estimate the policy value of evaluation policy.
Returns
----------
mock_policy_value: float
"""
return mock_policy_value + self.eps
def estimate_interval(
self,
slate_id: np.ndarray,
reward: np.ndarray,
position: np.ndarray,
pscore: np.ndarray,
evaluation_policy_pscore: np.ndarray,
alpha: float = 0.05,
n_bootstrap_samples: int = 10000,
random_state: Optional[int] = None,
**kwargs,
) -> Dict[str, float]:
"""Estimate confidence interval of policy value by nonparametric bootstrap procedure.
Returns
----------
mock_confidence_interval: Dict[str, float]
Dictionary storing the estimated mean and upper-lower confidence bounds.
"""
check_confidence_interval_arguments(
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
return {k: v + self.eps for k, v in mock_confidence_interval.items()}
@dataclass
class SlateIndependentIPSMock(SlateIndependentIPS):
"""Slate Independent Inverse Propensity Scoring (IIPS) Mock."""
estimator_name: str = "iips"
def estimate_policy_value(
self,
slate_id: np.ndarray,
reward: np.ndarray,
position: np.ndarray,
pscore_item_position: np.ndarray,
evaluation_policy_pscore_item_position: np.ndarray,
**kwargs,
) -> float:
"""Estimate the policy value of evaluation policy.
Returns
----------
mock_policy_value: float
"""
return mock_policy_value
def estimate_interval(
self,
slate_id: np.ndarray,
reward: np.ndarray,
position: np.ndarray,
pscore_item_position: np.ndarray,
evaluation_policy_pscore_item_position: np.ndarray,
alpha: float = 0.05,
n_bootstrap_samples: int = 10000,
random_state: Optional[int] = None,
**kwargs,
) -> Dict[str, float]:
"""Estimate confidence interval of policy value by nonparametric bootstrap procedure.
Returns
----------
mock_confidence_interval: Dict[str, float]
Dictionary storing the estimated mean and upper-lower confidence bounds.
"""
check_confidence_interval_arguments(
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
return {k: v for k, v in mock_confidence_interval.items()}
@dataclass
class SlateRewardInteractionIPSMock(SlateRewardInteractionIPS):
"""Slate Recursive Inverse Propensity Scoring (RIPS) Mock."""
estimator_name: str = "rips"
def estimate_policy_value(
self,
slate_id: np.ndarray,
reward: np.ndarray,
position: np.ndarray,
pscore_cascade: np.ndarray,
evaluation_policy_pscore_cascade: np.ndarray,
**kwargs,
) -> float:
"""Estimate the policy value of evaluation policy.
Returns
----------
mock_policy_value: float
"""
return mock_policy_value
def estimate_interval(
self,
slate_id: np.ndarray,
reward: np.ndarray,
position: np.ndarray,
pscore_cascade: np.ndarray,
evaluation_policy_pscore_cascade: np.ndarray,
alpha: float = 0.05,
n_bootstrap_samples: int = 10000,
random_state: Optional[int] = None,
**kwargs,
) -> Dict[str, float]:
"""Estimate confidence interval of policy value by nonparametric bootstrap procedure.
Returns
----------
mock_confidence_interval: Dict[str, float]
Dictionary storing the estimated mean and upper-lower confidence bounds.
"""
check_confidence_interval_arguments(
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
return {k: v for k, v in mock_confidence_interval.items()}
# define Mock instances
sips = SlateStandardIPSMock(len_list=3)
sips2 = SlateStandardIPSMock(len_list=3, eps=0.02)
sips3 = SlateStandardIPSMock(len_list=3, estimator_name="sips3")
iips = SlateIndependentIPSMock(len_list=3)
rips = SlateRewardInteractionIPSMock(len_list=3)
def test_meta_post_init(synthetic_slate_bandit_feedback: BanditFeedback) -> None:
"""
Test the __post_init__ function
"""
# __post_init__ saves the latter estimator when the same estimator name is used
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[sips, sips2]
)
assert ope_.ope_estimators_ == {
"sips": sips2
}, "__post_init__ returns a wrong value"
# __post_init__ can handle the same estimator if the estimator names are different
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[sips, sips3]
)
assert ope_.ope_estimators_ == {
"sips": sips,
"sips3": sips3,
}, "__post_init__ returns a wrong value"
# __post__init__ raises RuntimeError when necessary_keys are not included in the bandit_feedback
necessary_keys = ["slate_id", "position", "reward"]
for i in range(len(necessary_keys)):
for deleted_keys in itertools.combinations(necessary_keys, i + 1):
invalid_bandit_feedback_dict = {key: "_" for key in necessary_keys}
# delete
for k in deleted_keys:
del invalid_bandit_feedback_dict[k]
with pytest.raises(RuntimeError, match=r"Missing key*"):
_ = SlateOffPolicyEvaluation(
bandit_feedback=invalid_bandit_feedback_dict, ope_estimators=[sips]
)
# evaluation_policy_pscore, description
invalid_input_of_create_estimator_inputs = [
(
None,
"one of evaluation_policy_pscore, evaluation_policy_pscore_item_position, or evaluation_policy_pscore_cascade must be given",
),
]
# evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, description
valid_input_of_create_estimator_inputs = [
(
np.ones(300),
np.ones(300),
np.ones(300),
"deterministic evaluation policy",
),
]
@pytest.mark.parametrize(
"evaluation_policy_pscore, description",
invalid_input_of_create_estimator_inputs,
)
def test_meta_create_estimator_inputs_using_invalid_input_data(
evaluation_policy_pscore,
description: str,
synthetic_slate_bandit_feedback: BanditFeedback,
) -> None:
"""
Test the _create_estimator_inputs using valid data and a sips estimator
"""
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[sips]
)
# raise ValueError when the shape of two arrays are different
with pytest.raises(ValueError, match=f"{description}*"):
_ = ope_._create_estimator_inputs(
evaluation_policy_pscore=evaluation_policy_pscore
)
# _create_estimator_inputs function is called in the following functions
with pytest.raises(ValueError, match=f"{description}*"):
_ = ope_.estimate_policy_values(
evaluation_policy_pscore=evaluation_policy_pscore
)
with pytest.raises(ValueError, match=f"{description}*"):
_ = ope_.estimate_intervals(evaluation_policy_pscore=evaluation_policy_pscore)
with pytest.raises(ValueError, match=f"{description}*"):
_ = ope_.summarize_off_policy_estimates(
evaluation_policy_pscore=evaluation_policy_pscore
)
with pytest.raises(ValueError, match=f"{description}*"):
_ = ope_.evaluate_performance_of_estimators(
ground_truth_policy_value=0.1,
evaluation_policy_pscore=evaluation_policy_pscore,
)
with pytest.raises(ValueError, match=f"{description}*"):
_ = ope_.summarize_estimators_comparison(
ground_truth_policy_value=0.1,
evaluation_policy_pscore=evaluation_policy_pscore,
)
@pytest.mark.parametrize(
"evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, description",
valid_input_of_create_estimator_inputs,
)
def test_meta_create_estimator_inputs_using_valid_input_data(
evaluation_policy_pscore,
evaluation_policy_pscore_item_position,
evaluation_policy_pscore_cascade,
description: str,
synthetic_slate_bandit_feedback: BanditFeedback,
) -> None:
"""
Test the _create_estimator_inputs using invalid data
"""
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[sips]
)
estimator_inputs = ope_._create_estimator_inputs(
evaluation_policy_pscore=evaluation_policy_pscore
)
assert set(estimator_inputs.keys()) == set(
[
"reward",
"pscore",
"pscore_item_position",
"pscore_cascade",
"position",
"evaluation_policy_pscore",
"evaluation_policy_pscore_item_position",
"evaluation_policy_pscore_cascade",
"slate_id",
]
), f"Invalid response of _create_estimator_inputs (test case: {description})"
# _create_estimator_inputs function is called in the following functions
_ = ope_.estimate_policy_values(evaluation_policy_pscore=evaluation_policy_pscore)
_ = ope_.estimate_intervals(evaluation_policy_pscore=evaluation_policy_pscore)
_ = ope_.summarize_off_policy_estimates(
evaluation_policy_pscore=evaluation_policy_pscore
)
_ = ope_.evaluate_performance_of_estimators(
ground_truth_policy_value=0.1, evaluation_policy_pscore=evaluation_policy_pscore
)
_ = ope_.summarize_estimators_comparison(
ground_truth_policy_value=0.1, evaluation_policy_pscore=evaluation_policy_pscore
)
@pytest.mark.parametrize(
"evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, description",
valid_input_of_create_estimator_inputs,
)
def test_meta_estimate_policy_values_using_valid_input_data(
evaluation_policy_pscore,
evaluation_policy_pscore_item_position,
evaluation_policy_pscore_cascade,
description: str,
synthetic_slate_bandit_feedback: BanditFeedback,
) -> None:
"""
Test the response of estimate_policy_values using valid data
"""
# single ope estimator (iips)
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[iips]
)
assert ope_.estimate_policy_values(
evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position
) == {
"iips": mock_policy_value
}, "SlateOffPolicyEvaluation.estimate_policy_values ([IIPS]) returns a wrong value"
# multiple ope estimators
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback,
ope_estimators=[iips, sips, rips],
)
assert ope_.estimate_policy_values(
evaluation_policy_pscore=evaluation_policy_pscore,
evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,
evaluation_policy_pscore_cascade=evaluation_policy_pscore_cascade,
) == {
"iips": mock_policy_value,
"sips": mock_policy_value + sips.eps,
"rips": mock_policy_value,
}, "SlateOffPolicyEvaluation.estimate_policy_values ([IIPS, SIPS, RIPS]) returns a wrong value"
@pytest.mark.parametrize(
"evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, description",
valid_input_of_create_estimator_inputs,
)
# alpha, n_bootstrap_samples, random_state, err, description
invalid_input_of_estimate_intervals = [
(
0.05,
100,
"s",
ValueError,
"'s' cannot be used to seed a numpy.random.RandomState instance",
),
(0.05, -1, 1, ValueError, "`n_bootstrap_samples`= -1, must be >= 1"),
(
0.05,
"s",
1,
TypeError,
"`n_bootstrap_samples` must be an instance of <class 'int'>, not <class 'str'>",
),
(-1.0, 1, 1, ValueError, "`alpha`= -1.0, must be >= 0.0"),
(2.0, 1, 1, ValueError, "`alpha`= 2.0, must be <= 1.0"),
(
"0",
1,
1,
TypeError,
"`alpha` must be an instance of <class 'float'>, not <class 'str'>",
),
]
valid_input_of_estimate_intervals = [
(0.05, 100, 1, "random_state is 1"),
(0.05, 1, 1, "n_bootstrap_samples is 1"),
]
@pytest.mark.parametrize(
"evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, description_1",
valid_input_of_create_estimator_inputs,
)
@pytest.mark.parametrize(
"alpha, n_bootstrap_samples, random_state, err, description_2",
invalid_input_of_estimate_intervals,
)
def test_meta_estimate_intervals_using_invalid_input_data(
evaluation_policy_pscore,
evaluation_policy_pscore_item_position,
evaluation_policy_pscore_cascade,
description_1: str,
alpha,
n_bootstrap_samples,
random_state,
err,
description_2: str,
synthetic_slate_bandit_feedback: BanditFeedback,
) -> None:
"""
Test the response of estimate_intervals using invalid data
"""
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[iips]
)
with pytest.raises(err, match=f"{description_2}*"):
_ = ope_.estimate_intervals(
evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
# estimate_intervals function is called in summarize_off_policy_estimates
with pytest.raises(err, match=f"{description_2}*"):
_ = ope_.summarize_off_policy_estimates(
evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
@pytest.mark.parametrize(
"evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, description_1",
valid_input_of_create_estimator_inputs,
)
@pytest.mark.parametrize(
"alpha, n_bootstrap_samples, random_state, description_2",
valid_input_of_estimate_intervals,
)
def test_meta_estimate_intervals_using_valid_input_data(
evaluation_policy_pscore,
evaluation_policy_pscore_item_position,
evaluation_policy_pscore_cascade,
description_1: str,
alpha: float,
n_bootstrap_samples: int,
random_state: int,
description_2: str,
synthetic_slate_bandit_feedback: BanditFeedback,
) -> None:
"""
Test the response of estimate_intervals using valid data
"""
# single ope estimator
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[iips]
)
assert ope_.estimate_intervals(
evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
) == {
"iips": mock_confidence_interval
}, "SlateOffPolicyEvaluation.estimate_intervals ([IIPS]) returns a wrong value"
# multiple ope estimators
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[iips, sips]
)
assert ope_.estimate_intervals(
evaluation_policy_pscore=evaluation_policy_pscore,
evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
) == {
"iips": mock_confidence_interval,
"sips": {k: v + sips.eps for k, v in mock_confidence_interval.items()},
}, "SlateOffPolicyEvaluation.estimate_intervals ([IIPS, SIPS]) returns a wrong value"
@pytest.mark.parametrize(
"evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, description_1",
valid_input_of_create_estimator_inputs,
)
@pytest.mark.parametrize(
"alpha, n_bootstrap_samples, random_state, description_2",
valid_input_of_estimate_intervals,
)
def test_meta_summarize_off_policy_estimates(
evaluation_policy_pscore,
evaluation_policy_pscore_item_position,
evaluation_policy_pscore_cascade,
description_1: str,
alpha: float,
n_bootstrap_samples: int,
random_state: int,
description_2: str,
synthetic_slate_bandit_feedback: BanditFeedback,
) -> None:
"""
Test the response of summarize_off_policy_estimates using valid data
"""
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[sips, sips3]
)
value, interval = ope_.summarize_off_policy_estimates(
evaluation_policy_pscore=evaluation_policy_pscore,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
expected_value = pd.DataFrame(
{
"sips": mock_policy_value + sips.eps,
"sips3": mock_policy_value + sips3.eps,
},
index=["estimated_policy_value"],
).T
expected_value["relative_estimated_policy_value"] = expected_value[
"estimated_policy_value"
] / (
synthetic_slate_bandit_feedback["reward"].sum()
/ np.unique(synthetic_slate_bandit_feedback["slate_id"]).shape[0]
)
expected_interval = pd.DataFrame(
{
"sips": {k: v + sips.eps for k, v in mock_confidence_interval.items()},
"sips3": {k: v + sips3.eps for k, v in mock_confidence_interval.items()},
}
).T
assert_frame_equal(value, expected_value), "Invalid summarization (policy value)"
assert_frame_equal(interval, expected_interval), "Invalid summarization (interval)"
# check relative estimated policy value when the average of bandit_feedback["reward"] is zero
zero_reward_bandit_feedback = deepcopy(synthetic_slate_bandit_feedback)
zero_reward_bandit_feedback["reward"] = np.zeros(
zero_reward_bandit_feedback["reward"].shape[0]
)
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=zero_reward_bandit_feedback, ope_estimators=[sips, sips3]
)
value, _ = ope_.summarize_off_policy_estimates(
evaluation_policy_pscore=evaluation_policy_pscore,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
expected_value = pd.DataFrame(
{
"sips": mock_policy_value + sips.eps,
"sips3": mock_policy_value + sips3.eps,
},
index=["estimated_policy_value"],
).T
expected_value["relative_estimated_policy_value"] = np.nan
assert_frame_equal(value, expected_value), "Invalid summarization (policy value)"
invalid_input_of_evaluation_performance_of_estimators = [
("foo", 0.3, ValueError, "metric must be either 'relative-ee' or 'se'"),
(
"se",
1,
TypeError,
"`ground_truth_policy_value` must be an instance of <class 'float'>, not <class 'int'>.",
),
(
"se",
"a",
TypeError,
"`ground_truth_policy_value` must be an instance of <class 'float'>, not <class 'str'>.",
),
(
"relative-ee",
0.0,
ValueError,
"ground_truth_policy_value must be non-zero when metric is relative-ee",
),
]
valid_input_of_evaluation_performance_of_estimators = [
("se", 0.0, "metric is se and ground_truth_policy_value is 0.0"),
("relative-ee", 1.0, "metric is relative-ee and ground_truth_policy_value is 1.0"),
]
@pytest.mark.parametrize(
"evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, description_1",
valid_input_of_create_estimator_inputs,
)
@pytest.mark.parametrize(
"metric, ground_truth_policy_value, err, description_2",
invalid_input_of_evaluation_performance_of_estimators,
)
def test_meta_evaluate_performance_of_estimators_using_invalid_input_data(
evaluation_policy_pscore,
evaluation_policy_pscore_item_position,
evaluation_policy_pscore_cascade,
description_1: str,
metric,
ground_truth_policy_value,
err,
description_2: str,
synthetic_slate_bandit_feedback: BanditFeedback,
) -> None:
"""
Test the response of evaluate_performance_of_estimators using invalid data
"""
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[iips]
)
with pytest.raises(err, match=f"{description_2}*"):
_ = ope_.evaluate_performance_of_estimators(
ground_truth_policy_value=ground_truth_policy_value,
evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,
metric=metric,
)
# estimate_intervals function is called in summarize_off_policy_estimates
with pytest.raises(err, match=f"{description_2}*"):
_ = ope_.summarize_estimators_comparison(
ground_truth_policy_value=ground_truth_policy_value,
evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,
metric=metric,
)
@pytest.mark.parametrize(
"evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, description_1",
valid_input_of_create_estimator_inputs,
)
@pytest.mark.parametrize(
"metric, ground_truth_policy_value, description_2",
valid_input_of_evaluation_performance_of_estimators,
)
def test_meta_evaluate_performance_of_estimators_using_valid_input_data(
evaluation_policy_pscore,
evaluation_policy_pscore_item_position,
evaluation_policy_pscore_cascade,
description_1: str,
metric,
ground_truth_policy_value,
description_2: str,
synthetic_slate_bandit_feedback: BanditFeedback,
) -> None:
"""
Test the response of evaluate_performance_of_estimators using valid data
"""
if metric == "relative-ee":
# calculate relative-ee
eval_metric_ope_dict = {
"sips": np.abs(
(mock_policy_value + sips.eps - ground_truth_policy_value)
/ ground_truth_policy_value
),
"sips3": np.abs(
(mock_policy_value + sips3.eps - ground_truth_policy_value)
/ ground_truth_policy_value
),
}
else:
# calculate se
eval_metric_ope_dict = {
"sips": (mock_policy_value + sips.eps - ground_truth_policy_value) ** 2,
"sips3": (mock_policy_value + sips3.eps - ground_truth_policy_value) ** 2,
}
# check performance estimators
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[sips, sips3]
)
performance = ope_.evaluate_performance_of_estimators(
ground_truth_policy_value=ground_truth_policy_value,
evaluation_policy_pscore=evaluation_policy_pscore,
metric=metric,
)
for k, v in performance.items():
assert k in eval_metric_ope_dict, "Invalid key of performance response"
assert v == eval_metric_ope_dict[k], "Invalid value of performance response"
performance_df = ope_.summarize_estimators_comparison(
ground_truth_policy_value=ground_truth_policy_value,
evaluation_policy_pscore=evaluation_policy_pscore,
metric=metric,
)
assert_frame_equal(
performance_df, pd.DataFrame(eval_metric_ope_dict, index=[metric]).T
), "Invalid summarization (performance)"
| 35.644357 | 133 | 0.699385 | from copy import deepcopy
from dataclasses import dataclass
import itertools
import re
from typing import Dict
from typing import Optional
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
import pytest
from obp.ope import SlateIndependentIPS
from obp.ope import SlateOffPolicyEvaluation
from obp.ope import SlateRewardInteractionIPS
from obp.ope import SlateStandardIPS
from obp.types import BanditFeedback
from obp.utils import check_confidence_interval_arguments
mock_policy_value = 0.5
mock_confidence_interval = {
"mean": 0.5,
"95.0% CI (lower)": 0.3,
"95.0% CI (upper)": 0.7,
}
@dataclass
class SlateStandardIPSMock(SlateStandardIPS):
"""Slate Standard Inverse Propensity Scoring (SIPS) Mock."""
estimator_name: str = "sips"
eps: float = 0.1
def estimate_policy_value(
self,
slate_id: np.ndarray,
reward: np.ndarray,
position: np.ndarray,
pscore: np.ndarray,
evaluation_policy_pscore: np.ndarray,
**kwargs,
) -> float:
"""Estimate the policy value of evaluation policy.
Returns
----------
mock_policy_value: float
"""
return mock_policy_value + self.eps
def estimate_interval(
self,
slate_id: np.ndarray,
reward: np.ndarray,
position: np.ndarray,
pscore: np.ndarray,
evaluation_policy_pscore: np.ndarray,
alpha: float = 0.05,
n_bootstrap_samples: int = 10000,
random_state: Optional[int] = None,
**kwargs,
) -> Dict[str, float]:
"""Estimate confidence interval of policy value by nonparametric bootstrap procedure.
Returns
----------
mock_confidence_interval: Dict[str, float]
Dictionary storing the estimated mean and upper-lower confidence bounds.
"""
check_confidence_interval_arguments(
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
return {k: v + self.eps for k, v in mock_confidence_interval.items()}
@dataclass
class SlateIndependentIPSMock(SlateIndependentIPS):
"""Slate Independent Inverse Propensity Scoring (IIPS) Mock."""
estimator_name: str = "iips"
def estimate_policy_value(
self,
slate_id: np.ndarray,
reward: np.ndarray,
position: np.ndarray,
pscore_item_position: np.ndarray,
evaluation_policy_pscore_item_position: np.ndarray,
**kwargs,
) -> float:
"""Estimate the policy value of evaluation policy.
Returns
----------
mock_policy_value: float
"""
return mock_policy_value
def estimate_interval(
self,
slate_id: np.ndarray,
reward: np.ndarray,
position: np.ndarray,
pscore_item_position: np.ndarray,
evaluation_policy_pscore_item_position: np.ndarray,
alpha: float = 0.05,
n_bootstrap_samples: int = 10000,
random_state: Optional[int] = None,
**kwargs,
) -> Dict[str, float]:
"""Estimate confidence interval of policy value by nonparametric bootstrap procedure.
Returns
----------
mock_confidence_interval: Dict[str, float]
Dictionary storing the estimated mean and upper-lower confidence bounds.
"""
check_confidence_interval_arguments(
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
return {k: v for k, v in mock_confidence_interval.items()}
@dataclass
class SlateRewardInteractionIPSMock(SlateRewardInteractionIPS):
"""Slate Recursive Inverse Propensity Scoring (RIPS) Mock."""
estimator_name: str = "rips"
def estimate_policy_value(
self,
slate_id: np.ndarray,
reward: np.ndarray,
position: np.ndarray,
pscore_cascade: np.ndarray,
evaluation_policy_pscore_cascade: np.ndarray,
**kwargs,
) -> float:
"""Estimate the policy value of evaluation policy.
Returns
----------
mock_policy_value: float
"""
return mock_policy_value
def estimate_interval(
self,
slate_id: np.ndarray,
reward: np.ndarray,
position: np.ndarray,
pscore_cascade: np.ndarray,
evaluation_policy_pscore_cascade: np.ndarray,
alpha: float = 0.05,
n_bootstrap_samples: int = 10000,
random_state: Optional[int] = None,
**kwargs,
) -> Dict[str, float]:
"""Estimate confidence interval of policy value by nonparametric bootstrap procedure.
Returns
----------
mock_confidence_interval: Dict[str, float]
Dictionary storing the estimated mean and upper-lower confidence bounds.
"""
check_confidence_interval_arguments(
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
return {k: v for k, v in mock_confidence_interval.items()}
# define Mock instances
sips = SlateStandardIPSMock(len_list=3)
sips2 = SlateStandardIPSMock(len_list=3, eps=0.02)
sips3 = SlateStandardIPSMock(len_list=3, estimator_name="sips3")
iips = SlateIndependentIPSMock(len_list=3)
rips = SlateRewardInteractionIPSMock(len_list=3)
def test_meta_post_init(synthetic_slate_bandit_feedback: BanditFeedback) -> None:
"""
Test the __post_init__ function
"""
# __post_init__ saves the latter estimator when the same estimator name is used
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[sips, sips2]
)
assert ope_.ope_estimators_ == {
"sips": sips2
}, "__post_init__ returns a wrong value"
# __post_init__ can handle the same estimator if the estimator names are different
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[sips, sips3]
)
assert ope_.ope_estimators_ == {
"sips": sips,
"sips3": sips3,
}, "__post_init__ returns a wrong value"
# __post__init__ raises RuntimeError when necessary_keys are not included in the bandit_feedback
necessary_keys = ["slate_id", "position", "reward"]
for i in range(len(necessary_keys)):
for deleted_keys in itertools.combinations(necessary_keys, i + 1):
invalid_bandit_feedback_dict = {key: "_" for key in necessary_keys}
# delete
for k in deleted_keys:
del invalid_bandit_feedback_dict[k]
with pytest.raises(RuntimeError, match=r"Missing key*"):
_ = SlateOffPolicyEvaluation(
bandit_feedback=invalid_bandit_feedback_dict, ope_estimators=[sips]
)
# evaluation_policy_pscore, description
invalid_input_of_create_estimator_inputs = [
(
None,
"one of evaluation_policy_pscore, evaluation_policy_pscore_item_position, or evaluation_policy_pscore_cascade must be given",
),
]
# evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, description
valid_input_of_create_estimator_inputs = [
(
np.ones(300),
np.ones(300),
np.ones(300),
"deterministic evaluation policy",
),
]
@pytest.mark.parametrize(
"evaluation_policy_pscore, description",
invalid_input_of_create_estimator_inputs,
)
def test_meta_create_estimator_inputs_using_invalid_input_data(
evaluation_policy_pscore,
description: str,
synthetic_slate_bandit_feedback: BanditFeedback,
) -> None:
"""
Test the _create_estimator_inputs using valid data and a sips estimator
"""
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[sips]
)
# raise ValueError when the shape of two arrays are different
with pytest.raises(ValueError, match=f"{description}*"):
_ = ope_._create_estimator_inputs(
evaluation_policy_pscore=evaluation_policy_pscore
)
# _create_estimator_inputs function is called in the following functions
with pytest.raises(ValueError, match=f"{description}*"):
_ = ope_.estimate_policy_values(
evaluation_policy_pscore=evaluation_policy_pscore
)
with pytest.raises(ValueError, match=f"{description}*"):
_ = ope_.estimate_intervals(evaluation_policy_pscore=evaluation_policy_pscore)
with pytest.raises(ValueError, match=f"{description}*"):
_ = ope_.summarize_off_policy_estimates(
evaluation_policy_pscore=evaluation_policy_pscore
)
with pytest.raises(ValueError, match=f"{description}*"):
_ = ope_.evaluate_performance_of_estimators(
ground_truth_policy_value=0.1,
evaluation_policy_pscore=evaluation_policy_pscore,
)
with pytest.raises(ValueError, match=f"{description}*"):
_ = ope_.summarize_estimators_comparison(
ground_truth_policy_value=0.1,
evaluation_policy_pscore=evaluation_policy_pscore,
)
@pytest.mark.parametrize(
"evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, description",
valid_input_of_create_estimator_inputs,
)
def test_meta_create_estimator_inputs_using_valid_input_data(
evaluation_policy_pscore,
evaluation_policy_pscore_item_position,
evaluation_policy_pscore_cascade,
description: str,
synthetic_slate_bandit_feedback: BanditFeedback,
) -> None:
"""
Test the _create_estimator_inputs using invalid data
"""
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[sips]
)
estimator_inputs = ope_._create_estimator_inputs(
evaluation_policy_pscore=evaluation_policy_pscore
)
assert set(estimator_inputs.keys()) == set(
[
"reward",
"pscore",
"pscore_item_position",
"pscore_cascade",
"position",
"evaluation_policy_pscore",
"evaluation_policy_pscore_item_position",
"evaluation_policy_pscore_cascade",
"slate_id",
]
), f"Invalid response of _create_estimator_inputs (test case: {description})"
# _create_estimator_inputs function is called in the following functions
_ = ope_.estimate_policy_values(evaluation_policy_pscore=evaluation_policy_pscore)
_ = ope_.estimate_intervals(evaluation_policy_pscore=evaluation_policy_pscore)
_ = ope_.summarize_off_policy_estimates(
evaluation_policy_pscore=evaluation_policy_pscore
)
_ = ope_.evaluate_performance_of_estimators(
ground_truth_policy_value=0.1, evaluation_policy_pscore=evaluation_policy_pscore
)
_ = ope_.summarize_estimators_comparison(
ground_truth_policy_value=0.1, evaluation_policy_pscore=evaluation_policy_pscore
)
@pytest.mark.parametrize(
"evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, description",
valid_input_of_create_estimator_inputs,
)
def test_meta_estimate_policy_values_using_valid_input_data(
evaluation_policy_pscore,
evaluation_policy_pscore_item_position,
evaluation_policy_pscore_cascade,
description: str,
synthetic_slate_bandit_feedback: BanditFeedback,
) -> None:
"""
Test the response of estimate_policy_values using valid data
"""
# single ope estimator (iips)
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[iips]
)
assert ope_.estimate_policy_values(
evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position
) == {
"iips": mock_policy_value
}, "SlateOffPolicyEvaluation.estimate_policy_values ([IIPS]) returns a wrong value"
# multiple ope estimators
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback,
ope_estimators=[iips, sips, rips],
)
assert ope_.estimate_policy_values(
evaluation_policy_pscore=evaluation_policy_pscore,
evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,
evaluation_policy_pscore_cascade=evaluation_policy_pscore_cascade,
) == {
"iips": mock_policy_value,
"sips": mock_policy_value + sips.eps,
"rips": mock_policy_value,
}, "SlateOffPolicyEvaluation.estimate_policy_values ([IIPS, SIPS, RIPS]) returns a wrong value"
@pytest.mark.parametrize(
"evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, description",
valid_input_of_create_estimator_inputs,
)
def test_meta_estimate_policy_values_using_various_pscores(
evaluation_policy_pscore,
evaluation_policy_pscore_item_position,
evaluation_policy_pscore_cascade,
description: str,
synthetic_slate_bandit_feedback: BanditFeedback,
) -> None:
necessary_keys = [
"reward",
"position",
"evaluation_policy_pscore",
"evaluation_policy_pscore_item_position",
"evaluation_policy_pscore_cascade" "slate_id",
]
pscore_keys = [
"pscore",
"pscore_item_position",
"pscore_cascade",
]
# TypeError must be raised when required positional arguments are missing
for i in range(len(necessary_keys)):
for deleted_keys in itertools.combinations(pscore_keys, i + 1):
copied_feedback = deepcopy(synthetic_slate_bandit_feedback)
# delete
for k in deleted_keys:
del copied_feedback[k]
with pytest.raises(
TypeError,
match=re.escape("estimate_policy_value() missing"),
):
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=copied_feedback,
ope_estimators=[sips, iips, rips],
)
_ = ope_.estimate_policy_values(
evaluation_policy_pscore=evaluation_policy_pscore,
evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,
evaluation_policy_pscore_cascade=evaluation_policy_pscore_cascade,
)
# pscore_item_position and evaluation_policy_pscore_item_position are not necessary when iips is not evaluated
copied_feedback = deepcopy(synthetic_slate_bandit_feedback)
del copied_feedback["pscore_item_position"]
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=copied_feedback,
ope_estimators=[sips, rips],
)
_ = ope_.estimate_policy_values(
evaluation_policy_pscore=evaluation_policy_pscore,
evaluation_policy_pscore_cascade=evaluation_policy_pscore_cascade,
)
# alpha, n_bootstrap_samples, random_state, err, description
invalid_input_of_estimate_intervals = [
(
0.05,
100,
"s",
ValueError,
"'s' cannot be used to seed a numpy.random.RandomState instance",
),
(0.05, -1, 1, ValueError, "`n_bootstrap_samples`= -1, must be >= 1"),
(
0.05,
"s",
1,
TypeError,
"`n_bootstrap_samples` must be an instance of <class 'int'>, not <class 'str'>",
),
(-1.0, 1, 1, ValueError, "`alpha`= -1.0, must be >= 0.0"),
(2.0, 1, 1, ValueError, "`alpha`= 2.0, must be <= 1.0"),
(
"0",
1,
1,
TypeError,
"`alpha` must be an instance of <class 'float'>, not <class 'str'>",
),
]
valid_input_of_estimate_intervals = [
(0.05, 100, 1, "random_state is 1"),
(0.05, 1, 1, "n_bootstrap_samples is 1"),
]
@pytest.mark.parametrize(
"evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, description_1",
valid_input_of_create_estimator_inputs,
)
@pytest.mark.parametrize(
"alpha, n_bootstrap_samples, random_state, err, description_2",
invalid_input_of_estimate_intervals,
)
def test_meta_estimate_intervals_using_invalid_input_data(
evaluation_policy_pscore,
evaluation_policy_pscore_item_position,
evaluation_policy_pscore_cascade,
description_1: str,
alpha,
n_bootstrap_samples,
random_state,
err,
description_2: str,
synthetic_slate_bandit_feedback: BanditFeedback,
) -> None:
"""
Test the response of estimate_intervals using invalid data
"""
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[iips]
)
with pytest.raises(err, match=f"{description_2}*"):
_ = ope_.estimate_intervals(
evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
# estimate_intervals function is called in summarize_off_policy_estimates
with pytest.raises(err, match=f"{description_2}*"):
_ = ope_.summarize_off_policy_estimates(
evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
@pytest.mark.parametrize(
"evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, description_1",
valid_input_of_create_estimator_inputs,
)
@pytest.mark.parametrize(
"alpha, n_bootstrap_samples, random_state, description_2",
valid_input_of_estimate_intervals,
)
def test_meta_estimate_intervals_using_valid_input_data(
evaluation_policy_pscore,
evaluation_policy_pscore_item_position,
evaluation_policy_pscore_cascade,
description_1: str,
alpha: float,
n_bootstrap_samples: int,
random_state: int,
description_2: str,
synthetic_slate_bandit_feedback: BanditFeedback,
) -> None:
"""
Test the response of estimate_intervals using valid data
"""
# single ope estimator
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[iips]
)
assert ope_.estimate_intervals(
evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
) == {
"iips": mock_confidence_interval
}, "SlateOffPolicyEvaluation.estimate_intervals ([IIPS]) returns a wrong value"
# multiple ope estimators
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[iips, sips]
)
assert ope_.estimate_intervals(
evaluation_policy_pscore=evaluation_policy_pscore,
evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
) == {
"iips": mock_confidence_interval,
"sips": {k: v + sips.eps for k, v in mock_confidence_interval.items()},
}, "SlateOffPolicyEvaluation.estimate_intervals ([IIPS, SIPS]) returns a wrong value"
@pytest.mark.parametrize(
"evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, description_1",
valid_input_of_create_estimator_inputs,
)
@pytest.mark.parametrize(
"alpha, n_bootstrap_samples, random_state, description_2",
valid_input_of_estimate_intervals,
)
def test_meta_summarize_off_policy_estimates(
evaluation_policy_pscore,
evaluation_policy_pscore_item_position,
evaluation_policy_pscore_cascade,
description_1: str,
alpha: float,
n_bootstrap_samples: int,
random_state: int,
description_2: str,
synthetic_slate_bandit_feedback: BanditFeedback,
) -> None:
"""
Test the response of summarize_off_policy_estimates using valid data
"""
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[sips, sips3]
)
value, interval = ope_.summarize_off_policy_estimates(
evaluation_policy_pscore=evaluation_policy_pscore,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
expected_value = pd.DataFrame(
{
"sips": mock_policy_value + sips.eps,
"sips3": mock_policy_value + sips3.eps,
},
index=["estimated_policy_value"],
).T
expected_value["relative_estimated_policy_value"] = expected_value[
"estimated_policy_value"
] / (
synthetic_slate_bandit_feedback["reward"].sum()
/ np.unique(synthetic_slate_bandit_feedback["slate_id"]).shape[0]
)
expected_interval = pd.DataFrame(
{
"sips": {k: v + sips.eps for k, v in mock_confidence_interval.items()},
"sips3": {k: v + sips3.eps for k, v in mock_confidence_interval.items()},
}
).T
assert_frame_equal(value, expected_value), "Invalid summarization (policy value)"
assert_frame_equal(interval, expected_interval), "Invalid summarization (interval)"
# check relative estimated policy value when the average of bandit_feedback["reward"] is zero
zero_reward_bandit_feedback = deepcopy(synthetic_slate_bandit_feedback)
zero_reward_bandit_feedback["reward"] = np.zeros(
zero_reward_bandit_feedback["reward"].shape[0]
)
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=zero_reward_bandit_feedback, ope_estimators=[sips, sips3]
)
value, _ = ope_.summarize_off_policy_estimates(
evaluation_policy_pscore=evaluation_policy_pscore,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
expected_value = pd.DataFrame(
{
"sips": mock_policy_value + sips.eps,
"sips3": mock_policy_value + sips3.eps,
},
index=["estimated_policy_value"],
).T
expected_value["relative_estimated_policy_value"] = np.nan
assert_frame_equal(value, expected_value), "Invalid summarization (policy value)"
invalid_input_of_evaluation_performance_of_estimators = [
("foo", 0.3, ValueError, "metric must be either 'relative-ee' or 'se'"),
(
"se",
1,
TypeError,
"`ground_truth_policy_value` must be an instance of <class 'float'>, not <class 'int'>.",
),
(
"se",
"a",
TypeError,
"`ground_truth_policy_value` must be an instance of <class 'float'>, not <class 'str'>.",
),
(
"relative-ee",
0.0,
ValueError,
"ground_truth_policy_value must be non-zero when metric is relative-ee",
),
]
valid_input_of_evaluation_performance_of_estimators = [
("se", 0.0, "metric is se and ground_truth_policy_value is 0.0"),
("relative-ee", 1.0, "metric is relative-ee and ground_truth_policy_value is 1.0"),
]
@pytest.mark.parametrize(
"evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, description_1",
valid_input_of_create_estimator_inputs,
)
@pytest.mark.parametrize(
"metric, ground_truth_policy_value, err, description_2",
invalid_input_of_evaluation_performance_of_estimators,
)
def test_meta_evaluate_performance_of_estimators_using_invalid_input_data(
evaluation_policy_pscore,
evaluation_policy_pscore_item_position,
evaluation_policy_pscore_cascade,
description_1: str,
metric,
ground_truth_policy_value,
err,
description_2: str,
synthetic_slate_bandit_feedback: BanditFeedback,
) -> None:
"""
Test the response of evaluate_performance_of_estimators using invalid data
"""
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[iips]
)
with pytest.raises(err, match=f"{description_2}*"):
_ = ope_.evaluate_performance_of_estimators(
ground_truth_policy_value=ground_truth_policy_value,
evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,
metric=metric,
)
# estimate_intervals function is called in summarize_off_policy_estimates
with pytest.raises(err, match=f"{description_2}*"):
_ = ope_.summarize_estimators_comparison(
ground_truth_policy_value=ground_truth_policy_value,
evaluation_policy_pscore_item_position=evaluation_policy_pscore_item_position,
metric=metric,
)
@pytest.mark.parametrize(
"evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, description_1",
valid_input_of_create_estimator_inputs,
)
@pytest.mark.parametrize(
"metric, ground_truth_policy_value, description_2",
valid_input_of_evaluation_performance_of_estimators,
)
def test_meta_evaluate_performance_of_estimators_using_valid_input_data(
evaluation_policy_pscore,
evaluation_policy_pscore_item_position,
evaluation_policy_pscore_cascade,
description_1: str,
metric,
ground_truth_policy_value,
description_2: str,
synthetic_slate_bandit_feedback: BanditFeedback,
) -> None:
"""
Test the response of evaluate_performance_of_estimators using valid data
"""
if metric == "relative-ee":
# calculate relative-ee
eval_metric_ope_dict = {
"sips": np.abs(
(mock_policy_value + sips.eps - ground_truth_policy_value)
/ ground_truth_policy_value
),
"sips3": np.abs(
(mock_policy_value + sips3.eps - ground_truth_policy_value)
/ ground_truth_policy_value
),
}
else:
# calculate se
eval_metric_ope_dict = {
"sips": (mock_policy_value + sips.eps - ground_truth_policy_value) ** 2,
"sips3": (mock_policy_value + sips3.eps - ground_truth_policy_value) ** 2,
}
# check performance estimators
ope_ = SlateOffPolicyEvaluation(
bandit_feedback=synthetic_slate_bandit_feedback, ope_estimators=[sips, sips3]
)
performance = ope_.evaluate_performance_of_estimators(
ground_truth_policy_value=ground_truth_policy_value,
evaluation_policy_pscore=evaluation_policy_pscore,
metric=metric,
)
for k, v in performance.items():
assert k in eval_metric_ope_dict, "Invalid key of performance response"
assert v == eval_metric_ope_dict[k], "Invalid value of performance response"
performance_df = ope_.summarize_estimators_comparison(
ground_truth_policy_value=ground_truth_policy_value,
evaluation_policy_pscore=evaluation_policy_pscore,
metric=metric,
)
assert_frame_equal(
performance_df, pd.DataFrame(eval_metric_ope_dict, index=[metric]).T
), "Invalid summarization (performance)"
| 2,070 | 0 | 22 |
ba8e540faa742cb0c08a2833ab89a8271fb5c208 | 6,100 | py | Python | unityparser/view_factory.py | adrianogil/SublimeUnityIntel | b3f3d686654c4dd62b3a792488d63bfd8105ce6a | [
"MIT"
] | null | null | null | unityparser/view_factory.py | adrianogil/SublimeUnityIntel | b3f3d686654c4dd62b3a792488d63bfd8105ce6a | [
"MIT"
] | null | null | null | unityparser/view_factory.py | adrianogil/SublimeUnityIntel | b3f3d686654c4dd62b3a792488d63bfd8105ce6a | [
"MIT"
] | null | null | null | import os, sys
__file__ = os.path.normpath(os.path.abspath(__file__))
__path__ = os.path.dirname(__file__)
__popup_path__ = os.path.join(__path__, 'popup')
# print(__path__)
if __path__ not in sys.path:
sys.path.insert(0, __path__)
if __popup_path__ not in sys.path:
sys.path.insert(0, __popup_path__)
from csharp_element import CSharpElement
from csharp_reference import CSharpReference
import popup.yaml_reference_popup
import popup.yaml_gameobject_popup
import popup.yaml_transform_popup
import popup.csharp_reference_popup
import popup.csharp_class_summary_popup
import popup.csharp_method_summary_popup
import popup.csharp_class_inherits_diagram_popup
import popup.git_whatchanged_commit_popup
import popup.git_summary_list_popup
## Popups ##
| 37.195122 | 88 | 0.621803 | import os, sys
__file__ = os.path.normpath(os.path.abspath(__file__))
__path__ = os.path.dirname(__file__)
__popup_path__ = os.path.join(__path__, 'popup')
# print(__path__)
if __path__ not in sys.path:
sys.path.insert(0, __path__)
if __popup_path__ not in sys.path:
sys.path.insert(0, __popup_path__)
from csharp_element import CSharpElement
from csharp_reference import CSharpReference
import popup.yaml_reference_popup
import popup.yaml_gameobject_popup
import popup.yaml_transform_popup
import popup.csharp_reference_popup
import popup.csharp_class_summary_popup
import popup.csharp_method_summary_popup
import popup.csharp_class_inherits_diagram_popup
import popup.git_whatchanged_commit_popup
import popup.git_summary_list_popup
class ViewFactory:
def __init__(self, view, symbolic_parser):
self.view = view
self.symbolic_parser = symbolic_parser
self.symbolic_data = symbolic_parser.symbolic_data
self.last_selected_action_id = -1
self.view_actions = {}
def load_action(action_id):
action_id = int(action_id)
self.last_selected_action_id = action_id
if action_id in self.view_actions:
self.view_actions[action_id]()
self.selection_action = load_action
self.last_popup_action = None
def clear_actions(self):
self.view_actions = {}
def register_action(self, action_id, action):
self.view_actions[action_id] = action
def show_popup(self, html, width=300):
self.view.show_popup(html, on_navigate=self.selection_action, max_width=width)
def hide_popup(self):
if self.view.is_popup_visible():
self.view.hide_popup()
def add_text_on_position(self, text, line):
self.view.window().active_view().run_command(
"insert_text_on_position",
{"text": text, "line": line}
)
def select_text_on_position(self, line, start_pos, end_pos):
self.view.window().active_view().run_command(
"select_text_on_position",
{"line": line, "begin_text": start_pos, "end_text": end_pos}
)
def get_showpopup(self):
def show_popup(text, action):
self.view.show_popup(text, on_navigate=action)
return show_popup
def get_open_file_action(self, file):
def open_file():
self.view.window().open_file(file)
return open_file
def get_open_file(self):
def open_file(file):
self.view.window().open_file(file)
return open_file
def get_goto_file_reference_action(self, file, line):
def go_to_reference():
if self.view.window().active_view():
row = line
col = 1
print("Trying to go to line " + str(row))
self.view.window().active_view().run_command(
"goto_row_col",
{"row": row, "col": col, "file": file}
)
return go_to_reference
def get_goto_reference_action(self, yaml_id):
def go_to_reference():
if self.view.window().active_view():
row = self.symbolic_parser.get_current_file_data()['row_by_id'][yaml_id]
col = 1
print("Trying to go to line " + str(row))
self.view.window().active_view().run_command(
"goto_row_col",
{"row": row, "col": col}
)
return go_to_reference
def get_goto_reference(self):
def go_to_reference(id):
if self.view.window().active_view():
row = self.symbolic_parser.get_current_file_data()['row_by_id'][id]
col = 1
print("Trying to go to line " + str(row))
self.view.window().active_view().run_command(
"goto_row_col",
{"row": row, "col": col}
)
return go_to_reference
def get_goto_line_action(self, line):
def go_to_line():
if self.view.window().active_view():
row = line
col = 1
print("Trying to go to line " + str(row))
self.view.window().active_view().run_command(
"goto_row_col",
{"row": row, "col": col}
)
return go_to_line
def get_goto_line(self):
def go_to_line(line):
if self.view.window().active_view():
row = line
col = 1
print("Trying to go to line " + str(row))
self.view.window().active_view().run_command(
"goto_row_col",
{"row": row, "col": col}
)
return go_to_line
## Popups ##
def print_yaml_ref_popup(self, class_instance):
popup.yaml_reference_popup.print_popup(class_instance, self)
def print_csharp_ref_popup(self, class_instance):
popup.csharp_reference_popup.print_popup(class_instance, self)
def print_yaml_go_popup(self, go_model):
popup.yaml_gameobject_popup.print_popup(go_model, self)
def print_yaml_transform_popup(self, go_model):
popup.yaml_transform_popup.print_popup(go_model, self)
def print_csharp_class_summary_popup(self, class_instance):
popup.csharp_class_summary_popup.print_popup(class_instance, self)
def print_csharp_method_summary_popup(self, method_instance):
popup.csharp_method_summary_popup.print_popup(method_instance, self)
def print_csharp_class_inherits_diagram_popup(self, class_instance):
popup.csharp_class_inherits_diagram_popup.print_popup(class_instance, self)
def print_git_whatchanged_commit_popup(self, git_data):
popup.git_whatchanged_commit_popup.print_popup(git_data, self)
def print_git_summary_list_popup(self, git_data):
popup.git_summary_list_popup.print_popup(git_data, self)
| 4,673 | -3 | 663 |
0fa697b9befc7417a9ae9af626234a8357c50e5d | 485 | py | Python | operators/utils/http_connection/script.py | thhapke/dilocal | 1d3c1b7b1a4513e9bff7efd77f680f66014ff499 | [
"MIT"
] | null | null | null | operators/utils/http_connection/script.py | thhapke/dilocal | 1d3c1b7b1a4513e9bff7efd77f680f66014ff499 | [
"MIT"
] | null | null | null | operators/utils/http_connection/script.py | thhapke/dilocal | 1d3c1b7b1a4513e9bff7efd77f680f66014ff499 | [
"MIT"
] | null | null | null | # Mock apis needs to be commented before used within SAP Data Intelligence
#from diadmin.dimockapi.mock_api import mock_api
#api = mock_api(__file__)
import os
import json
import requests
import http.client
from base64 import b64encode
api.add_generator(gen) | 22.045455 | 74 | 0.740206 | # Mock apis needs to be commented before used within SAP Data Intelligence
#from diadmin.dimockapi.mock_api import mock_api
#api = mock_api(__file__)
import os
import json
import requests
import http.client
from base64 import b64encode
def gen():
#att = {'user':user,'tenant':tenant,'password':api.config.password}
att = api.config.http_connection
msg = api.Message(attributes=att,body = None )
api.send('output',msg) # data type: string
api.add_generator(gen) | 201 | 0 | 23 |
41345779e4ff29c248e42e597575a0a23cdd48b2 | 5,772 | py | Python | nessusha/nessusha/Archive/v3.py | SanPersie/NessusHa | c7663d7ff58e3a5eaf25505a862af3d9b9a52f09 | [
"MIT"
] | null | null | null | nessusha/nessusha/Archive/v3.py | SanPersie/NessusHa | c7663d7ff58e3a5eaf25505a862af3d9b9a52f09 | [
"MIT"
] | null | null | null | nessusha/nessusha/Archive/v3.py | SanPersie/NessusHa | c7663d7ff58e3a5eaf25505a862af3d9b9a52f09 | [
"MIT"
] | null | null | null | from tkinter import filedialog
from bs4 import *
import re
from pprint import *
import pprint
import xlsxwriter
from tkinter import *
# from tkinter.filedialog import askopenfilename
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
filename = filedialog.askopenfilename() # show an "Open" dialog box and return the path to the selected file
filename.replace("/", "\\\\")
rawhtml = open(filename,
encoding="utf-8").readlines()
hhosts =allhosts()
haha =foo(hhosts)
# print(type(haha))
# print(haha)
a, b =reformat(haha)
# print(a)
reformatforprint(a, b)
print("Done! Next!")
| 34.562874 | 119 | 0.472107 | from tkinter import filedialog
from bs4 import *
import re
from pprint import *
import pprint
import xlsxwriter
from tkinter import *
# from tkinter.filedialog import askopenfilename
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
filename = filedialog.askopenfilename() # show an "Open" dialog box and return the path to the selected file
filename.replace("/", "\\\\")
rawhtml = open(filename,
encoding="utf-8").readlines()
def allhosts():
myhosts =set()
for i in range(len(rawhtml)):
if re.findall('(?<=<h2 xmlns="">)((\d+\.)+(\d+))(?=(\(*.+)|(</h2>))', rawhtml[i]):
# print(re.findall('(?<=<h2 xmlns="">)((\d+\.)+(\d+))(?=(\(*.+)|(</h2>))', rawhtml[i])[0][0])
myhosts.add(re.findall('(?<=<h2 xmlns="">)((\d+\.)+(\d+))(?=(\(*.+)|(</h2>))', rawhtml[i])[0][0])
else:
continue
return myhosts
def foo(hosts):
dct = {}
blah = []
for host in hosts:
dct[host] = {}
for i in range(len(rawhtml)):
if re.findall('(?<=font-weight: bold; font-size: 14px; line-height: 20px; color: #fff;">)(.*?)(?=<)',
rawhtml[i]): # Look for check
check = re.findall('(?<=font-weight: bold; font-size: 14px; line-height: 20px; color: #fff;">)(.*?)(?=<)',
rawhtml[i])[0]
if "#d43f3a" in rawhtml[i]:
compliance = "NC - "
elif "#3fae49" in rawhtml[i]:
compliance = "C - "
elif "#ee9336" in rawhtml[i]:
compliance = "NA - "
# Add the default value into the dictionary
for eachhost in dct:
# dct[eachhost][check] ={"Policy Value": "NA", "Host Value": "NA",
# "Compliance": "NA"}
if check not in dct[eachhost].keys():
dct[eachhost][check] = {"Policy Value": "NA", "Host Value": "NA",
"Compliance": "NA"}
else:
continue
elif 'Policy Value' in rawhtml[i]: # Look for desired value
polvalue = re.findall('(?<=>)(.*?)(?=<)', rawhtml[i + 2])[0]
for eachhost in dct:
dct[eachhost][check]["Policy Value"] =polvalue
elif re.findall('(?<=<h2 xmlns="">)((\d+\.)+(\d+))(?=(\(*.+)|(</h2>))', rawhtml[i]):
hostname = re.findall('(?<=<h2 xmlns="">)((\d+\.)+(\d+))(?=(\(*.+)|(</h2>))', rawhtml[i])[0][0]
value =[]
if re.findall('(?<=>)(.+?)(?=<div)', rawhtml[i + 2]): # Look for host value
value += re.findall('(?<=>)(.+?)(?=<div)', rawhtml[i + 2])
if len(value) <=1:
dct[hostname][check]["Host Value"] =compliance +str(value[0])
else:
addedvalue = "\n".join(map(str, value))
dct[hostname][check]["Host Value"] =compliance +str(addedvalue)
else:
continue
return dct
def reformat(mydic):
optdic = {}
listofhosts =[]
for host in mydic:
listofhosts.append(host)
if len(mydic[host]) == 0:
continue
else:
for check in mydic[host]:
if check not in optdic:
optdic[check] = {}
# print(mydic[host])
# print(mydic[host][check]['Host Value'])
optdic[check][host] = mydic[host][check]['Host Value']
else:
optdic[check][host] = mydic[host][check]['Host Value']
return optdic, listofhosts
def reformatforprint(newdic, hosts):
row = 1
colum = 1
dicofhosts = {}
outputpath = re.findall("(?<=\/)(\w+)(?=\.\D+)", filename)[0]
workbook = xlsxwriter.Workbook('%r_Output.xlsx' %outputpath)
worksheet = workbook.add_worksheet()
worksheet.set_column(0, 0, 51)
worksheet.set_column(1, len(hosts), 18)
# Wrap content
cell_format = workbook.add_format()
cell_format.set_text_wrap()
# Light red fill with dark red text.
formatr = workbook.add_format({'bg_color': '#FFC7CE',
'font_color': '#9C0006'})
# Green fill with dark green text.
formatg = workbook.add_format({'bg_color': '#C6EFCE',
'font_color': '#006100'})
bold = workbook.add_format({'bold': 1})
for host in hosts:
worksheet.write(0, colum, host)
dicofhosts[host] = colum
colum +=1
# print(dicofhosts)
for check in newdic:
# print(check)
worksheet.write(row, 0, check, cell_format)
# print(newdic[check])
for thehost in newdic[check]:
# print(dicofhosts[thehost])
worksheet.write(row, dicofhosts[thehost], newdic[check][thehost], cell_format)
row +=1
worksheet.conditional_format('A1:Z999', {'type': 'text',
'criteria': 'begins with',
'value': 'NC - ',
'format': formatr})
worksheet.conditional_format('A1:Z999', {'type': 'text',
'criteria': 'begins with',
'value': 'C - ',
'format': formatg})
workbook.close()
hhosts =allhosts()
haha =foo(hhosts)
# print(type(haha))
# print(haha)
a, b =reformat(haha)
# print(a)
reformatforprint(a, b)
print("Done! Next!")
| 5,003 | 0 | 100 |